mirror of
https://github.com/Palm1r/QodeAssist.git
synced 2026-02-12 10:10:44 -05:00
Compare commits
35 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| dc06ea2ed5 | |||
| fc5e1adc0d | |||
| 93e59fb2dc | |||
| cd2a56cde0 | |||
| 09cde8fd3d | |||
| ac8080542d | |||
| 7376a11a05 | |||
| 10e8b16caf | |||
| a38debb140 | |||
| 844ac35a59 | |||
| 16b77a5722 | |||
| c070fd5cfd | |||
| 882047d7b2 | |||
| b692402897 | |||
| 8102ba95f9 | |||
| f8bb9998ab | |||
| 6dab055ca2 | |||
| 7b31fff9f2 | |||
| be9156fd0e | |||
| 657413344d | |||
| 5f3deb44b9 | |||
| 55e2b24b8d | |||
| 76c17f03dd | |||
| 19c25043fb | |||
| 56b5ea8e68 | |||
| b475f15e3d | |||
| 31f4516e7b | |||
| bfdbc755e3 | |||
| 30964d90d5 | |||
| 1261f913bb | |||
| 36d5242a1f | |||
| 6503887091 | |||
| 50087aa744 | |||
| 4f2dc0c450 | |||
| 80fe388bdd |
54
.github/scripts/plugin.json
vendored
Normal file
54
.github/scripts/plugin.json
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
{
|
||||
"name": "QodeAssist",
|
||||
"vendor": "Petr Mironychev",
|
||||
"tags": [
|
||||
"code assistant",
|
||||
"llm",
|
||||
"ai"
|
||||
],
|
||||
"compatibility": "Qt 6.8.1",
|
||||
"platforms": [
|
||||
"Windows",
|
||||
"macOS",
|
||||
"Linux"
|
||||
],
|
||||
"license": "GPLv3",
|
||||
"version": "0.4.0",
|
||||
"status": "draft",
|
||||
"is_pack": false,
|
||||
"released_at": null,
|
||||
"version_history": [
|
||||
{
|
||||
"version": "0.4.0",
|
||||
"is_latest": true,
|
||||
"released_at": "2024-01-24T15:00:00Z"
|
||||
}
|
||||
],
|
||||
"icon": "https://github.com/user-attachments/assets/dc336712-83cb-440d-8761-8d0a31de898d",
|
||||
"small_icon": "https://github.com/user-attachments/assets/8ec241bf-3186-452e-b8db-8d70543c2f41",
|
||||
"description_paragraphs": [
|
||||
{
|
||||
"header": "Description",
|
||||
"text": [
|
||||
"QodeAssist is an AI-powered coding assistant plugin for Qt Creator. It provides intelligent code completion and suggestions for C++ and QML, leveraging large language models through local providers like Ollama. Enhance your coding productivity with context-aware AI assistance directly in your Qt development environment."
|
||||
]
|
||||
}
|
||||
],
|
||||
"description_links": [
|
||||
{
|
||||
"url": "https://github.com/Palm1r/QodeAssist",
|
||||
"link_text": "Site"
|
||||
}
|
||||
],
|
||||
"description_images": [
|
||||
{
|
||||
"url": "https://github.com/user-attachments/assets/255a52f1-5cc0-4ca3-b05c-c4cf9cdbe25a",
|
||||
"image_label": "Code Completion"
|
||||
}
|
||||
],
|
||||
"copyright": "(C) Petr Mironychev",
|
||||
"download_history": {
|
||||
"download_count": 0
|
||||
},
|
||||
"plugin_sets": []
|
||||
}
|
||||
147
.github/scripts/registerPlugin.js
vendored
Normal file
147
.github/scripts/registerPlugin.js
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const updatePluginData = (plugin, env, pluginQtcData) => {
|
||||
const dictionary_platform = {
|
||||
'Windows': `${env.PLUGIN_DOWNLOAD_URL}/${env.PLUGIN_NAME}-${env.QT_CREATOR_VERSION}-Windows-x64.7z`,
|
||||
'Linux': `${env.PLUGIN_DOWNLOAD_URL}/${env.PLUGIN_NAME}-${env.QT_CREATOR_VERSION}-Linux-x64.7z`,
|
||||
'macOS': `${env.PLUGIN_DOWNLOAD_URL}/${env.PLUGIN_NAME}-${env.QT_CREATOR_VERSION}-macOS-universal.7z`
|
||||
};
|
||||
|
||||
plugin.core_compat_version = env.QT_CREATOR_VERSION_INTERNAL;
|
||||
plugin.core_version = env.QT_CREATOR_VERSION_INTERNAL;
|
||||
plugin.status = "draft";
|
||||
|
||||
plugin.plugins.forEach(pluginsEntry => {
|
||||
pluginsEntry.url = dictionary_platform[plugin.host_os];
|
||||
pluginsEntry.meta_data = pluginQtcData;
|
||||
});
|
||||
return plugin;
|
||||
};
|
||||
|
||||
const createNewPluginData = (env, platform, pluginQtcData) => {
|
||||
const pluginJson = {
|
||||
"status": "draft",
|
||||
"core_compat_version": "<placeholder>",
|
||||
"core_version": "<placeholder>",
|
||||
"host_os": platform,
|
||||
"host_os_version": "0", // TODO: pass the real data
|
||||
"host_os_architecture": "x86_64", // TODO: pass the real data
|
||||
"plugins": [
|
||||
{
|
||||
"url": "",
|
||||
"size": 5000, // TODO: check if it is needed, pass the real data
|
||||
"meta_data": {},
|
||||
"dependencies": []
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
updatePluginData(pluginJson, env, pluginQtcData);
|
||||
return pluginJson;
|
||||
}
|
||||
|
||||
const updateServerPluginJson = (endJsonData, pluginQtcData, env) => {
|
||||
// Update the global data in mainData
|
||||
endJsonData.name = pluginQtcData.Name;
|
||||
endJsonData.vendor = pluginQtcData.Vendor;
|
||||
endJsonData.version = pluginQtcData.Version;
|
||||
endJsonData.copyright = pluginQtcData.Copyright;
|
||||
endJsonData.status = "draft";
|
||||
|
||||
endJsonData.version_history[0].version = pluginQtcData.Version;
|
||||
|
||||
endJsonData.description_paragraphs = [
|
||||
{
|
||||
header: "Description",
|
||||
text: [
|
||||
pluginQtcData.Description
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
let found = false;
|
||||
// Update or Add the plugin data for the current Qt Creator version
|
||||
for (const plugin of endJsonData.plugin_sets) {
|
||||
if (plugin.core_compat_version === env.QT_CREATOR_VERSION_INTERNAL) {
|
||||
updatePluginData(plugin, env, pluginQtcData);
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
for (const platform of ['Windows', 'Linux', 'macOS']) {
|
||||
endJsonData.plugin_sets.push(createNewPluginData(env, platform, pluginQtcData));
|
||||
}
|
||||
}
|
||||
|
||||
// Save the updated JSON file
|
||||
const serverPluginJsonPath = path.join(__dirname, `${env.PLUGIN_NAME}.json`);
|
||||
fs.writeFileSync(serverPluginJsonPath, JSON.stringify(endJsonData, null, 2), 'utf8');
|
||||
};
|
||||
|
||||
const request = async (type, url, token, data) => {
|
||||
const response = await fetch(url, {
|
||||
method: type,
|
||||
headers: {
|
||||
'Authorization': `Bearer ${token}`,
|
||||
'accept': 'application/json',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: data ? JSON.stringify(data) : undefined
|
||||
});
|
||||
if (!response.ok) {
|
||||
const errorResponse = await response.json();
|
||||
console.error(`${type} Request Error Response:`, errorResponse); // Log the error response
|
||||
throw new Error(`HTTP error! status: ${response.status}`);
|
||||
}
|
||||
return await response.json();
|
||||
}
|
||||
|
||||
const put = (url, token, data) => request('PUT', url, token, data)
|
||||
const post = (url, token, data) => request('POST', url, token, data)
|
||||
const get = (url, token) => request('GET', url, token)
|
||||
|
||||
const purgeCache = async (env) => {
|
||||
try {
|
||||
await post(`${env.API_URL}api/v1/cache/purgeall`, env.TOKEN, {});
|
||||
console.log('Cache purged successfully');
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
};
|
||||
|
||||
async function main() {
|
||||
const env = {
|
||||
PLUGIN_DOWNLOAD_URL: process.env.PLUGIN_DOWNLOAD_URL || process.argv[2],
|
||||
PLUGIN_NAME: process.env.PLUGIN_NAME || process.argv[3],
|
||||
QT_CREATOR_VERSION: process.env.QT_CREATOR_VERSION || process.argv[4],
|
||||
QT_CREATOR_VERSION_INTERNAL: process.env.QT_CREATOR_VERSION_INTERNAL || process.argv[5],
|
||||
TOKEN: process.env.TOKEN || process.argv[6],
|
||||
API_URL: process.env.API_URL || process.argv[7] || ''
|
||||
};
|
||||
|
||||
const pluginQtcData = require(`../../${env.PLUGIN_NAME}-origin/${env.PLUGIN_NAME}.json`);
|
||||
const templateFileData = require('./plugin.json');
|
||||
|
||||
if (env.API_URL === '') {
|
||||
updateServerPluginJson(templateFileData, pluginQtcData, env);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const response = await get(`${env.API_URL}api/v1/admin/extensions?search=${env.PLUGIN_NAME}`, env.TOKEN);
|
||||
if (response.items.length > 0 && response.items[0].extension_id !== '') {
|
||||
const pluginId = response.items[0].extension_id;
|
||||
console.log('Plugin found. Updating the plugin');
|
||||
updateServerPluginJson(response.items[0], pluginQtcData, env);
|
||||
|
||||
await put(`${env.API_URL}api/v1/admin/extensions/${pluginId}`, env.TOKEN, response.items[0]);
|
||||
} else {
|
||||
console.log('No plugin found. Creating a new plugin');
|
||||
updateServerPluginJson(templateFileData, pluginQtcData, env);
|
||||
await post(`${env.API_URL}api/v1/admin/extensions`, env.TOKEN, templateFileData);
|
||||
}
|
||||
// await purgeCache(env);
|
||||
}
|
||||
|
||||
main().then(() => console.log('JSON file updated successfully'));
|
||||
173
.github/workflows/build_cmake.yml
vendored
173
.github/workflows/build_cmake.yml
vendored
@ -9,11 +9,12 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
PLUGIN_NAME: QodeAssist
|
||||
QT_VERSION: 6.7.3
|
||||
QT_CREATOR_VERSION: 14.0.2
|
||||
QT_CREATOR_SNAPSHOT: NO
|
||||
QT_VERSION: 6.8.1
|
||||
QT_CREATOR_VERSION: 15.0.0
|
||||
QT_CREATOR_VERSION_INTERNAL: 15.0.0
|
||||
MACOS_DEPLOYMENT_TARGET: "11.0"
|
||||
CMAKE_VERSION: "3.29.6"
|
||||
NINJA_VERSION: "1.12.1"
|
||||
@ -30,74 +31,44 @@ jobs:
|
||||
- {
|
||||
name: "Windows Latest MSVC", artifact: "Windows-x64",
|
||||
os: windows-latest,
|
||||
platform: windows_x64,
|
||||
cc: "cl", cxx: "cl",
|
||||
environment_script: "C:/Program Files/Microsoft Visual Studio/2022/Enterprise/VC/Auxiliary/Build/vcvars64.bat",
|
||||
}
|
||||
- {
|
||||
name: "Ubuntu Latest GCC", artifact: "Linux-x64",
|
||||
os: ubuntu-latest,
|
||||
platform: linux_x64,
|
||||
cc: "gcc", cxx: "g++"
|
||||
}
|
||||
- {
|
||||
name: "macOS Latest Clang", artifact: "macOS-universal",
|
||||
os: macos-latest,
|
||||
platform: mac_x64,
|
||||
cc: "clang", cxx: "clang++"
|
||||
}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
- name: Checkout submodules
|
||||
id: git
|
||||
shell: cmake -P {0}
|
||||
run: |
|
||||
if (${{github.ref}} MATCHES "tags/v(.*)")
|
||||
file(APPEND "$ENV{GITHUB_OUTPUT}" "tag=${CMAKE_MATCH_1}\n")
|
||||
file(APPEND "$ENV{GITHUB_OUTPUT}" "tag=${CMAKE_MATCH_1}")
|
||||
else()
|
||||
file(APPEND "$ENV{GITHUB_OUTPUT}" "tag=${{github.run_id}}\n")
|
||||
file(APPEND "$ENV{GITHUB_OUTPUT}" "tag=${{github.run_id}}")
|
||||
endif()
|
||||
|
||||
- name: Download Ninja and CMake
|
||||
shell: cmake -P {0}
|
||||
run: |
|
||||
set(cmake_version "$ENV{CMAKE_VERSION}")
|
||||
set(ninja_version "$ENV{NINJA_VERSION}")
|
||||
|
||||
if ("${{ runner.os }}" STREQUAL "Windows")
|
||||
set(ninja_suffix "win.zip")
|
||||
set(cmake_suffix "windows-x86_64.zip")
|
||||
set(cmake_dir "cmake-${cmake_version}-windows-x86_64/bin")
|
||||
elseif ("${{ runner.os }}" STREQUAL "Linux")
|
||||
set(ninja_suffix "linux.zip")
|
||||
set(cmake_suffix "linux-x86_64.tar.gz")
|
||||
set(cmake_dir "cmake-${cmake_version}-linux-x86_64/bin")
|
||||
elseif ("${{ runner.os }}" STREQUAL "macOS")
|
||||
set(ninja_suffix "mac.zip")
|
||||
set(cmake_suffix "macos-universal.tar.gz")
|
||||
set(cmake_dir "cmake-${cmake_version}-macos-universal/CMake.app/Contents/bin")
|
||||
endif()
|
||||
|
||||
set(ninja_url "https://github.com/ninja-build/ninja/releases/download/v${ninja_version}/ninja-${ninja_suffix}")
|
||||
file(DOWNLOAD "${ninja_url}" ./ninja.zip SHOW_PROGRESS)
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E tar xvf ./ninja.zip)
|
||||
|
||||
set(cmake_url "https://github.com/Kitware/CMake/releases/download/v${cmake_version}/cmake-${cmake_version}-${cmake_suffix}")
|
||||
file(DOWNLOAD "${cmake_url}" ./cmake.zip SHOW_PROGRESS)
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E tar xvf ./cmake.zip)
|
||||
|
||||
# Add to PATH environment variable
|
||||
file(TO_CMAKE_PATH "$ENV{GITHUB_WORKSPACE}/${cmake_dir}" cmake_dir)
|
||||
set(path_separator ":")
|
||||
if ("${{ runner.os }}" STREQUAL "Windows")
|
||||
set(path_separator ";")
|
||||
endif()
|
||||
file(APPEND "$ENV{GITHUB_PATH}" "$ENV{GITHUB_WORKSPACE}${path_separator}${cmake_dir}")
|
||||
|
||||
if (NOT "${{ runner.os }}" STREQUAL "Windows")
|
||||
execute_process(
|
||||
COMMAND chmod +x ninja
|
||||
COMMAND chmod +x ${cmake_dir}/cmake
|
||||
)
|
||||
endif()
|
||||
uses: lukka/get-cmake@latest
|
||||
with:
|
||||
cmakeVersion: ${{ env.CMAKE_VERSION }}
|
||||
ninjaVersion: ${{ env.NINJA_VERSION }}
|
||||
|
||||
- name: Install system libs
|
||||
shell: cmake -P {0}
|
||||
@ -107,7 +78,7 @@ jobs:
|
||||
COMMAND sudo apt update
|
||||
)
|
||||
execute_process(
|
||||
COMMAND sudo apt install libgl1-mesa-dev libcups2-dev
|
||||
COMMAND sudo apt install libgl1-mesa-dev
|
||||
RESULT_VARIABLE result
|
||||
)
|
||||
if (NOT result EQUAL 0)
|
||||
@ -124,9 +95,9 @@ jobs:
|
||||
string(REPLACE "." "" qt_version_dotless "${qt_version}")
|
||||
if ("${{ runner.os }}" STREQUAL "Windows")
|
||||
set(url_os "windows_x86")
|
||||
set(qt_package_arch_suffix "win64_msvc2019_64")
|
||||
set(qt_dir_prefix "${qt_version}/msvc2019_64")
|
||||
set(qt_package_suffix "-Windows-Windows_10_22H2-MSVC2019-Windows-Windows_10_22H2-X86_64")
|
||||
set(qt_package_arch_suffix "win64_msvc2022_64")
|
||||
set(qt_dir_prefix "${qt_version}/msvc2022_64")
|
||||
set(qt_package_suffix "-Windows-Windows_11_23H2-MSVC2022-Windows-Windows_11_23H2-X86_64")
|
||||
elseif ("${{ runner.os }}" STREQUAL "Linux")
|
||||
set(url_os "linux_x64")
|
||||
if (qt_version VERSION_LESS "6.7.0")
|
||||
@ -135,15 +106,15 @@ jobs:
|
||||
set(qt_package_arch_suffix "linux_gcc_64")
|
||||
endif()
|
||||
set(qt_dir_prefix "${qt_version}/gcc_64")
|
||||
set(qt_package_suffix "-Linux-RHEL_8_8-GCC-Linux-RHEL_8_8-X86_64")
|
||||
set(qt_package_suffix "-Linux-RHEL_8_10-GCC-Linux-RHEL_8_10-X86_64")
|
||||
elseif ("${{ runner.os }}" STREQUAL "macOS")
|
||||
set(url_os "mac_x64")
|
||||
set(qt_package_arch_suffix "clang_64")
|
||||
set(qt_dir_prefix "${qt_version}/macos")
|
||||
set(qt_package_suffix "-MacOS-MacOS_13-Clang-MacOS-MacOS_13-X86_64-ARM64")
|
||||
set(qt_package_suffix "-MacOS-MacOS_14-Clang-MacOS-MacOS_14-X86_64-ARM64")
|
||||
endif()
|
||||
|
||||
set(qt_base_url "https://download.qt.io/online/qtsdkrepository/${url_os}/desktop/qt6_${qt_version_dotless}")
|
||||
set(qt_base_url "https://download.qt.io/online/qtsdkrepository/${url_os}/desktop/qt6_${qt_version_dotless}/qt6_${qt_version_dotless}")
|
||||
file(DOWNLOAD "${qt_base_url}/Updates.xml" ./Updates.xml SHOW_PROGRESS)
|
||||
|
||||
file(READ ./Updates.xml updates_xml)
|
||||
@ -153,7 +124,7 @@ jobs:
|
||||
file(MAKE_DIRECTORY qt6)
|
||||
|
||||
# Save the path for other steps
|
||||
file(TO_CMAKE_PATH "$ENV{GITHUB_WORKSPACE}/qt6/${qt_dir_prefix}" qt_dir)
|
||||
file(TO_CMAKE_PATH "$ENV{GITHUB_WORKSPACE}/qt6" qt_dir)
|
||||
file(APPEND "$ENV{GITHUB_OUTPUT}" "qt_dir=${qt_dir}")
|
||||
|
||||
message("Downloading Qt to ${qt_dir}")
|
||||
@ -172,11 +143,17 @@ jobs:
|
||||
|
||||
foreach(package qt5compat qtshadertools)
|
||||
downloadAndExtract(
|
||||
"${qt_base_url}/qt.qt6.${qt_version_dotless}.${package}.${qt_package_arch_suffix}/${qt_package_version}${package}${qt_package_suffix}.7z"
|
||||
"${qt_base_url}/qt.qt6.${qt_version_dotless}.addons.${package}.${qt_package_arch_suffix}/${qt_package_version}${package}${qt_package_suffix}.7z"
|
||||
${package}.7z
|
||||
)
|
||||
endforeach()
|
||||
|
||||
function(downloadAndExtractLibicu url archive)
|
||||
message("Downloading ${url}")
|
||||
file(DOWNLOAD "${url}" ./${archive} SHOW_PROGRESS)
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E tar xvf ../../${archive} WORKING_DIRECTORY qt6/lib)
|
||||
endfunction()
|
||||
|
||||
# uic depends on libicu*.so
|
||||
if ("${{ runner.os }}" STREQUAL "Linux")
|
||||
if (qt_version VERSION_LESS "6.7.0")
|
||||
@ -184,47 +161,25 @@ jobs:
|
||||
else()
|
||||
set(uic_suffix "Rhel8.6-x86_64")
|
||||
endif()
|
||||
downloadAndExtract(
|
||||
downloadAndExtractLibicu(
|
||||
"${qt_base_url}/qt.qt6.${qt_version_dotless}.${qt_package_arch_suffix}/${qt_package_version}icu-linux-${uic_suffix}.7z"
|
||||
icu.7z
|
||||
)
|
||||
endif()
|
||||
|
||||
- name: Download Qt Creator
|
||||
uses: qt-creator/install-dev-package@v1.2
|
||||
with:
|
||||
version: ${{ env.QT_CREATOR_VERSION }}
|
||||
unzip-to: 'qtcreator'
|
||||
|
||||
- name: Extract Qt Creator
|
||||
id: qt_creator
|
||||
shell: cmake -P {0}
|
||||
run: |
|
||||
string(REGEX MATCH "([0-9]+.[0-9]+).[0-9]+" outvar "$ENV{QT_CREATOR_VERSION}")
|
||||
|
||||
set(qtc_base_url "https://download.qt.io/official_releases/qtcreator/${CMAKE_MATCH_1}/$ENV{QT_CREATOR_VERSION}/installer_source")
|
||||
set(qtc_snapshot "$ENV{QT_CREATOR_SNAPSHOT}")
|
||||
if (qtc_snapshot)
|
||||
set(qtc_base_url "https://download.qt.io/snapshots/qtcreator/${CMAKE_MATCH_1}/$ENV{QT_CREATOR_VERSION}/installer_source/${qtc_snapshot}")
|
||||
endif()
|
||||
|
||||
if ("${{ runner.os }}" STREQUAL "Windows")
|
||||
set(qtc_platform "windows_x64")
|
||||
elseif ("${{ runner.os }}" STREQUAL "Linux")
|
||||
set(qtc_platform "linux_x64")
|
||||
elseif ("${{ runner.os }}" STREQUAL "macOS")
|
||||
set(qtc_platform "mac_x64")
|
||||
endif()
|
||||
|
||||
file(TO_CMAKE_PATH "$ENV{GITHUB_WORKSPACE}/qtcreator" qtc_dir)
|
||||
# Save the path for other steps
|
||||
file(APPEND "$ENV{GITHUB_OUTPUT}" "qtc_dir=${qtc_dir}")
|
||||
|
||||
file(MAKE_DIRECTORY qtcreator)
|
||||
|
||||
message("Downloading Qt Creator from ${qtc_base_url}/${qtc_platform}")
|
||||
|
||||
foreach(package qtcreator qtcreator_dev)
|
||||
file(DOWNLOAD
|
||||
"${qtc_base_url}/${qtc_platform}/${package}.7z" ./${package}.7z SHOW_PROGRESS)
|
||||
execute_process(COMMAND
|
||||
${CMAKE_COMMAND} -E tar xvf ../${package}.7z WORKING_DIRECTORY qtcreator)
|
||||
endforeach()
|
||||
|
||||
- name: Build
|
||||
shell: cmake -P {0}
|
||||
run: |
|
||||
@ -283,11 +238,59 @@ jobs:
|
||||
path: ./${{ env.PLUGIN_NAME }}-${{ env.QT_CREATOR_VERSION }}-${{ matrix.config.artifact }}.7z
|
||||
name: ${{ env.PLUGIN_NAME}}-${{ env.QT_CREATOR_VERSION }}-${{ matrix.config.artifact }}.7z
|
||||
|
||||
release:
|
||||
# The json is the same for all platforms, but we need to save one
|
||||
- name: Upload plugin json
|
||||
if: matrix.config.os == 'ubuntu-latest'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ env.PLUGIN_NAME }}-origin-json
|
||||
path: ./build/build/${{ env.PLUGIN_NAME }}.json
|
||||
|
||||
update_json:
|
||||
if: contains(github.ref, 'tags/v')
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
- name: Download the JSON file
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ env.PLUGIN_NAME }}-origin-json
|
||||
path: ./${{ env.PLUGIN_NAME }}-origin
|
||||
|
||||
- name: Store Release upload_url
|
||||
run: |
|
||||
RELEASE_HTML_URL=$(echo "${{github.event.repository.html_url}}/releases/download/v${{ needs.build.outputs.tag }}")
|
||||
echo "RELEASE_HTML_URL=${RELEASE_HTML_URL}" >> $GITHUB_ENV
|
||||
|
||||
- name: Run the Node.js script to update JSON
|
||||
env:
|
||||
QT_TOKEN: ${{ secrets.TOKEN }}
|
||||
API_URL: ${{ secrets.API_URL }}
|
||||
run: |
|
||||
node .github/scripts/registerPlugin.js ${{ env.RELEASE_HTML_URL }} ${{ env.PLUGIN_NAME }} ${{ env.QT_CREATOR_VERSION }} ${{ env.QT_CREATOR_VERSION_INTERNAL }} ${{ env.QT_TOKEN }} ${{ env.API_URL }}
|
||||
|
||||
- name: Delete previous json artifacts
|
||||
uses: geekyeggo/delete-artifact@v5
|
||||
with:
|
||||
name: ${{ env.PLUGIN_NAME }}*-json
|
||||
|
||||
- name: Upload the modified JSON file as an artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: plugin-json
|
||||
path: .github/scripts/${{ env.PLUGIN_NAME }}.json
|
||||
|
||||
release:
|
||||
if: contains(github.ref, 'tags/v')
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build, update_json]
|
||||
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
|
||||
@ -40,17 +40,23 @@ add_qtc_plugin(QodeAssist
|
||||
QodeAssistConstants.hpp
|
||||
QodeAssisttr.h
|
||||
LLMClientInterface.hpp LLMClientInterface.cpp
|
||||
templates/Templates.hpp
|
||||
templates/CodeLlamaFim.hpp
|
||||
templates/StarCoder2Fim.hpp
|
||||
templates/DeepSeekCoderFim.hpp
|
||||
templates/CustomFimTemplate.hpp
|
||||
templates/DeepSeekCoderChat.hpp
|
||||
templates/CodeLlamaChat.hpp
|
||||
templates/Qwen.hpp
|
||||
templates/StarCoderChat.hpp
|
||||
templates/Ollama.hpp
|
||||
templates/BasicChat.hpp
|
||||
templates/Llama3.hpp
|
||||
templates/ChatML.hpp
|
||||
templates/Alpaca.hpp
|
||||
templates/Llama2.hpp
|
||||
providers/Providers.hpp
|
||||
providers/OllamaProvider.hpp providers/OllamaProvider.cpp
|
||||
providers/LMStudioProvider.hpp providers/LMStudioProvider.cpp
|
||||
providers/OpenAICompatProvider.hpp providers/OpenAICompatProvider.cpp
|
||||
providers/OpenRouterAIProvider.hpp providers/OpenRouterAIProvider.cpp
|
||||
QodeAssist.qrc
|
||||
LSPCompletion.hpp
|
||||
LLMSuggestion.hpp LLMSuggestion.cpp
|
||||
@ -61,6 +67,5 @@ add_qtc_plugin(QodeAssist
|
||||
chat/ChatOutputPane.h chat/ChatOutputPane.cpp
|
||||
chat/NavigationPanel.hpp chat/NavigationPanel.cpp
|
||||
ConfigurationManager.hpp ConfigurationManager.cpp
|
||||
CodeHandler.hpp CodeHandler.cpp
|
||||
)
|
||||
|
||||
target_link_libraries(QodeAssist PRIVATE )
|
||||
|
||||
@ -68,15 +68,27 @@ void ClientInterface::sendMessage(const QString &message, bool includeCurrentFil
|
||||
{
|
||||
cancelRequest();
|
||||
|
||||
m_chatModel->addMessage(message, ChatModel::ChatRole::User, "");
|
||||
|
||||
auto &chatAssistantSettings = Settings::chatAssistantSettings();
|
||||
|
||||
auto providerName = Settings::generalSettings().caProvider();
|
||||
auto provider = LLMCore::ProvidersManager::instance().getProviderByName(providerName);
|
||||
|
||||
if (!provider) {
|
||||
LOG_MESSAGE(QString("No provider found with name: %1").arg(providerName));
|
||||
return;
|
||||
}
|
||||
|
||||
auto templateName = Settings::generalSettings().caTemplate();
|
||||
auto promptTemplate = LLMCore::PromptTemplateManager::instance().getChatTemplateByName(
|
||||
templateName);
|
||||
|
||||
if (!promptTemplate) {
|
||||
LOG_MESSAGE(QString("No template found with name: %1").arg(templateName));
|
||||
return;
|
||||
}
|
||||
|
||||
LLMCore::ContextData context;
|
||||
context.prefix = message;
|
||||
context.suffix = "";
|
||||
@ -94,7 +106,7 @@ void ClientInterface::sendMessage(const QString &message, bool includeCurrentFil
|
||||
|
||||
QJsonObject providerRequest;
|
||||
providerRequest["model"] = Settings::generalSettings().caModel();
|
||||
providerRequest["stream"] = true;
|
||||
providerRequest["stream"] = chatAssistantSettings.stream();
|
||||
providerRequest["messages"] = m_chatModel->prepareMessagesForRequest(systemPrompt);
|
||||
|
||||
if (promptTemplate)
|
||||
@ -114,11 +126,18 @@ void ClientInterface::sendMessage(const QString &message, bool includeCurrentFil
|
||||
config.url = QString("%1%2").arg(Settings::generalSettings().caUrl(), provider->chatEndpoint());
|
||||
config.providerRequest = providerRequest;
|
||||
config.multiLineCompletion = false;
|
||||
config.apiKey = Settings::chatAssistantSettings().apiKey();
|
||||
|
||||
QJsonObject request;
|
||||
request["id"] = QUuid::createUuid().toString();
|
||||
|
||||
m_chatModel->addMessage(message, ChatModel::ChatRole::User, "");
|
||||
auto errors = config.provider->validateRequest(config.providerRequest, promptTemplate->type());
|
||||
if (!errors.isEmpty()) {
|
||||
LOG_MESSAGE("Validate errors for chat request:");
|
||||
LOG_MESSAGES(errors);
|
||||
return;
|
||||
}
|
||||
|
||||
m_requestHandler->sendLLMRequest(config, request);
|
||||
}
|
||||
|
||||
@ -138,11 +157,16 @@ void ClientInterface::handleLLMResponse(const QString &response,
|
||||
const QJsonObject &request,
|
||||
bool isComplete)
|
||||
{
|
||||
QString messageId = request["id"].toString();
|
||||
m_chatModel->addMessage(response.trimmed(), ChatModel::ChatRole::Assistant, messageId);
|
||||
const auto message = response.trimmed();
|
||||
|
||||
if (isComplete) {
|
||||
LOG_MESSAGE("Message completed. Final response for message " + messageId + ": " + response);
|
||||
if (!message.isEmpty()) {
|
||||
QString messageId = request["id"].toString();
|
||||
m_chatModel->addMessage(message, ChatModel::ChatRole::Assistant, messageId);
|
||||
|
||||
if (isComplete) {
|
||||
LOG_MESSAGE(
|
||||
"Message completed. Final response for message " + messageId + ": " + response);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
115
CodeHandler.cpp
Normal file
115
CodeHandler.cpp
Normal file
@ -0,0 +1,115 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "CodeHandler.hpp"
|
||||
#include <QHash>
|
||||
|
||||
namespace QodeAssist {
|
||||
|
||||
QString CodeHandler::processText(QString text)
|
||||
{
|
||||
QString result;
|
||||
QStringList lines = text.split('\n');
|
||||
bool inCodeBlock = false;
|
||||
QString pendingComments;
|
||||
QString currentLanguage;
|
||||
|
||||
for (const QString &line : lines) {
|
||||
if (line.trimmed().startsWith("```")) {
|
||||
if (!inCodeBlock) {
|
||||
currentLanguage = detectLanguage(line);
|
||||
}
|
||||
inCodeBlock = !inCodeBlock;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (inCodeBlock) {
|
||||
if (!pendingComments.isEmpty()) {
|
||||
QStringList commentLines = pendingComments.split('\n');
|
||||
QString commentPrefix = getCommentPrefix(currentLanguage);
|
||||
|
||||
for (const QString &commentLine : commentLines) {
|
||||
if (!commentLine.trimmed().isEmpty()) {
|
||||
result += commentPrefix + " " + commentLine.trimmed() + "\n";
|
||||
} else {
|
||||
result += "\n";
|
||||
}
|
||||
}
|
||||
pendingComments.clear();
|
||||
}
|
||||
result += line + "\n";
|
||||
} else {
|
||||
QString trimmed = line.trimmed();
|
||||
if (!trimmed.isEmpty()) {
|
||||
pendingComments += trimmed + "\n";
|
||||
} else {
|
||||
pendingComments += "\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
QString CodeHandler::getCommentPrefix(const QString &language)
|
||||
{
|
||||
static const QHash<QString, QString> commentPrefixes
|
||||
= {{"python", "#"}, {"py", "#"}, {"lua", "--"}, {"javascript", "//"},
|
||||
{"js", "//"}, {"typescript", "//"}, {"ts", "//"}, {"cpp", "//"},
|
||||
{"c++", "//"}, {"c", "//"}, {"java", "//"}, {"csharp", "//"},
|
||||
{"cs", "//"}, {"php", "//"}, {"ruby", "#"}, {"rb", "#"},
|
||||
{"rust", "//"}, {"rs", "//"}, {"go", "//"}, {"swift", "//"},
|
||||
{"kotlin", "//"}, {"kt", "//"}, {"scala", "//"}, {"r", "#"},
|
||||
{"shell", "#"}, {"bash", "#"}, {"sh", "#"}, {"perl", "#"},
|
||||
{"pl", "#"}, {"haskell", "--"}, {"hs", "--"}};
|
||||
|
||||
return commentPrefixes.value(language.toLower(), "//");
|
||||
}
|
||||
|
||||
QString CodeHandler::detectLanguage(const QString &line)
|
||||
{
|
||||
QString trimmed = line.trimmed();
|
||||
if (trimmed.length() <= 3) { // Если только ```
|
||||
return QString();
|
||||
}
|
||||
|
||||
return trimmed.mid(3).trimmed();
|
||||
}
|
||||
|
||||
const QRegularExpression &CodeHandler::getFullCodeBlockRegex()
|
||||
{
|
||||
static const QRegularExpression
|
||||
regex(R"(```[\w\s]*\n([\s\S]*?)```)", QRegularExpression::MultilineOption);
|
||||
return regex;
|
||||
}
|
||||
|
||||
const QRegularExpression &CodeHandler::getPartialStartBlockRegex()
|
||||
{
|
||||
static const QRegularExpression
|
||||
regex(R"(```[\w\s]*\n([\s\S]*?)$)", QRegularExpression::MultilineOption);
|
||||
return regex;
|
||||
}
|
||||
|
||||
const QRegularExpression &CodeHandler::getPartialEndBlockRegex()
|
||||
{
|
||||
static const QRegularExpression regex(R"(^([\s\S]*?)```)", QRegularExpression::MultilineOption);
|
||||
return regex;
|
||||
}
|
||||
|
||||
} // namespace QodeAssist
|
||||
42
CodeHandler.hpp
Normal file
42
CodeHandler.hpp
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <QObject>
|
||||
#include <QRegularExpression>
|
||||
#include <QString>
|
||||
|
||||
namespace QodeAssist {
|
||||
|
||||
class CodeHandler
|
||||
{
|
||||
public:
|
||||
static QString processText(QString text);
|
||||
|
||||
private:
|
||||
static QString getCommentPrefix(const QString &language);
|
||||
static QString detectLanguage(const QString &line);
|
||||
|
||||
static const QRegularExpression &getFullCodeBlockRegex();
|
||||
static const QRegularExpression &getPartialStartBlockRegex();
|
||||
static const QRegularExpression &getPartialEndBlockRegex();
|
||||
};
|
||||
|
||||
} // namespace QodeAssist
|
||||
@ -210,10 +210,11 @@ LLMCore::ContextData DocumentContextReader::prepareContext(int lineNumber, int c
|
||||
|
||||
QString fileContext;
|
||||
if (Settings::codeCompletionSettings().useFilePathInContext())
|
||||
fileContext += getLanguageAndFileInfo();
|
||||
fileContext.append("\n ").append(getLanguageAndFileInfo());
|
||||
|
||||
if (Settings::codeCompletionSettings().useProjectChangesCache())
|
||||
fileContext += ChangesManager::instance().getRecentChangesContext(m_textDocument);
|
||||
fileContext.append("\n ").append(
|
||||
ChangesManager::instance().getRecentChangesContext(m_textDocument));
|
||||
|
||||
return {contextBefore, contextAfter, fileContext};
|
||||
}
|
||||
|
||||
@ -26,7 +26,9 @@
|
||||
#include <llmcore/RequestConfig.hpp>
|
||||
#include <texteditor/textdocument.h>
|
||||
|
||||
#include "CodeHandler.hpp"
|
||||
#include "DocumentContextReader.hpp"
|
||||
#include "llmcore/MessageBuilder.hpp"
|
||||
#include "llmcore/PromptTemplateManager.hpp"
|
||||
#include "llmcore/ProvidersManager.hpp"
|
||||
#include "logger/Logger.hpp"
|
||||
@ -152,34 +154,64 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request)
|
||||
auto providerName = Settings::generalSettings().ccProvider();
|
||||
auto provider = LLMCore::ProvidersManager::instance().getProviderByName(providerName);
|
||||
|
||||
if (!provider) {
|
||||
LOG_MESSAGE(QString("No provider found with name: %1").arg(providerName));
|
||||
return;
|
||||
}
|
||||
|
||||
auto templateName = Settings::generalSettings().ccTemplate();
|
||||
auto promptTemplate = LLMCore::PromptTemplateManager::instance().getFimTemplateByName(
|
||||
templateName);
|
||||
|
||||
if (!promptTemplate) {
|
||||
LOG_MESSAGE(QString("No template found with name: %1").arg(templateName));
|
||||
return;
|
||||
}
|
||||
|
||||
LLMCore::LLMConfig config;
|
||||
config.requestType = LLMCore::RequestType::Fim;
|
||||
config.requestType = LLMCore::RequestType::CodeCompletion;
|
||||
config.provider = provider;
|
||||
config.promptTemplate = promptTemplate;
|
||||
config.url = QUrl(
|
||||
QString("%1%2").arg(Settings::generalSettings().ccUrl(), provider->completionEndpoint()));
|
||||
config.url = QUrl(QString("%1%2").arg(
|
||||
Settings::generalSettings().ccUrl(),
|
||||
promptTemplate->type() == LLMCore::TemplateType::Fim ? provider->completionEndpoint()
|
||||
: provider->chatEndpoint()));
|
||||
config.apiKey = Settings::codeCompletionSettings().apiKey();
|
||||
|
||||
config.providerRequest
|
||||
= {{"model", Settings::generalSettings().ccModel()},
|
||||
{"stream", Settings::codeCompletionSettings().stream()}};
|
||||
|
||||
config.providerRequest = {{"model", Settings::generalSettings().ccModel()},
|
||||
{"stream", true},
|
||||
{"stop",
|
||||
QJsonArray::fromStringList(config.promptTemplate->stopWords())}};
|
||||
config.multiLineCompletion = completeSettings.multiLineCompletion();
|
||||
|
||||
const auto stopWords = QJsonArray::fromStringList(config.promptTemplate->stopWords());
|
||||
if (!stopWords.isEmpty())
|
||||
config.providerRequest["stop"] = stopWords;
|
||||
|
||||
QString systemPrompt;
|
||||
if (completeSettings.useSystemPrompt())
|
||||
systemPrompt.append(completeSettings.systemPrompt());
|
||||
if (!updatedContext.fileContext.isEmpty())
|
||||
systemPrompt.append(updatedContext.fileContext);
|
||||
|
||||
config.providerRequest["system"] = systemPrompt;
|
||||
auto message = LLMCore::MessageBuilder()
|
||||
.addSystemMessage(systemPrompt)
|
||||
.addUserMessage(updatedContext.prefix)
|
||||
.addSuffix(updatedContext.suffix)
|
||||
.addtTokenizer(promptTemplate);
|
||||
|
||||
config.promptTemplate->prepareRequest(config.providerRequest, updatedContext);
|
||||
config.provider->prepareRequest(config.providerRequest, LLMCore::RequestType::Fim);
|
||||
message.saveTo(
|
||||
config.providerRequest,
|
||||
providerName == "Ollama" ? LLMCore::ProvidersApi::Ollama : LLMCore::ProvidersApi::OpenAI);
|
||||
|
||||
config.provider->prepareRequest(config.providerRequest, LLMCore::RequestType::CodeCompletion);
|
||||
|
||||
auto errors = config.provider->validateRequest(config.providerRequest, promptTemplate->type());
|
||||
if (!errors.isEmpty()) {
|
||||
LOG_MESSAGE("Validate errors for fim request:");
|
||||
LOG_MESSAGES(errors);
|
||||
return;
|
||||
}
|
||||
m_requestHandler.sendLLMRequest(config, request);
|
||||
}
|
||||
|
||||
@ -211,19 +243,31 @@ void LLMClientInterface::sendCompletionToClient(const QString &completion,
|
||||
const QJsonObject &request,
|
||||
bool isComplete)
|
||||
{
|
||||
auto templateName = Settings::generalSettings().ccTemplate();
|
||||
auto promptTemplate = LLMCore::PromptTemplateManager::instance().getFimTemplateByName(
|
||||
templateName);
|
||||
|
||||
QJsonObject position = request["params"].toObject()["doc"].toObject()["position"].toObject();
|
||||
|
||||
QJsonObject response;
|
||||
response["jsonrpc"] = "2.0";
|
||||
response[LanguageServerProtocol::idKey] = request["id"];
|
||||
|
||||
QJsonObject result;
|
||||
QJsonArray completions;
|
||||
QJsonObject completionItem;
|
||||
completionItem[LanguageServerProtocol::textKey] = completion;
|
||||
|
||||
QString processedCompletion
|
||||
= promptTemplate->type() == LLMCore::TemplateType::Chat
|
||||
&& Settings::codeCompletionSettings().smartProcessInstuctText()
|
||||
? CodeHandler::processText(completion)
|
||||
: completion;
|
||||
|
||||
completionItem[LanguageServerProtocol::textKey] = processedCompletion;
|
||||
QJsonObject range;
|
||||
range["start"] = position;
|
||||
QJsonObject end = position;
|
||||
end["character"] = position["character"].toInt() + completion.length();
|
||||
end["character"] = position["character"].toInt() + processedCompletion.length();
|
||||
range["end"] = end;
|
||||
completionItem[LanguageServerProtocol::rangeKey] = range;
|
||||
completionItem[LanguageServerProtocol::positionKey] = position;
|
||||
|
||||
@ -1,8 +1,13 @@
|
||||
/*
|
||||
/*
|
||||
* Copyright (C) 2023 The Qt Company Ltd.
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* The Qt Company portions:
|
||||
* SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0+ OR GPL-3.0 WITH Qt-GPL-exception-1.0
|
||||
*
|
||||
* Petr Mironychev portions:
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
@ -18,30 +23,25 @@
|
||||
*/
|
||||
|
||||
#include "LLMSuggestion.hpp"
|
||||
|
||||
#include <QTextCursor>
|
||||
#include <QtWidgets/qtoolbar.h>
|
||||
#include <texteditor/texteditor.h>
|
||||
#include <utils/stringutils.h>
|
||||
#include <utils/tooltip/tooltip.h>
|
||||
|
||||
namespace QodeAssist {
|
||||
|
||||
LLMSuggestion::LLMSuggestion(const Completion &completion, QTextDocument *origin)
|
||||
: m_completion(completion)
|
||||
, m_linesCount(0)
|
||||
LLMSuggestion::LLMSuggestion(
|
||||
const QList<Data> &suggestions, QTextDocument *sourceDocument, int currentCompletion)
|
||||
: TextEditor::CyclicSuggestion(suggestions, sourceDocument, currentCompletion)
|
||||
{
|
||||
int startPos = completion.range().start().toPositionInDocument(origin);
|
||||
int endPos = completion.range().end().toPositionInDocument(origin);
|
||||
const auto &data = suggestions[currentCompletion];
|
||||
|
||||
startPos = qBound(0, startPos, origin->characterCount() - 1);
|
||||
endPos = qBound(startPos, endPos, origin->characterCount() - 1);
|
||||
int startPos = data.range.begin.toPositionInDocument(sourceDocument);
|
||||
int endPos = data.range.end.toPositionInDocument(sourceDocument);
|
||||
|
||||
m_start = QTextCursor(origin);
|
||||
m_start.setPosition(startPos);
|
||||
m_start.setKeepPositionOnInsert(true);
|
||||
startPos = qBound(0, startPos, sourceDocument->characterCount() - 1);
|
||||
endPos = qBound(startPos, endPos, sourceDocument->characterCount() - 1);
|
||||
|
||||
QTextCursor cursor(origin);
|
||||
QTextCursor cursor(sourceDocument);
|
||||
cursor.setPosition(startPos);
|
||||
cursor.setPosition(endPos, QTextCursor::KeepAnchor);
|
||||
|
||||
@ -51,74 +51,57 @@ LLMSuggestion::LLMSuggestion(const Completion &completion, QTextDocument *origin
|
||||
int startPosInBlock = startPos - block.position();
|
||||
int endPosInBlock = endPos - block.position();
|
||||
|
||||
blockText.replace(startPosInBlock, endPosInBlock - startPosInBlock, completion.text());
|
||||
|
||||
document()->setPlainText(blockText);
|
||||
|
||||
setCurrentPosition(m_start.position());
|
||||
}
|
||||
|
||||
bool LLMSuggestion::apply()
|
||||
{
|
||||
QTextCursor cursor = m_completion.range().toSelection(m_start.document());
|
||||
cursor.beginEditBlock();
|
||||
cursor.removeSelectedText();
|
||||
cursor.insertText(m_completion.text());
|
||||
cursor.endEditBlock();
|
||||
return true;
|
||||
blockText.replace(startPosInBlock, endPosInBlock - startPosInBlock, data.text);
|
||||
replacementDocument()->setPlainText(blockText);
|
||||
}
|
||||
|
||||
bool LLMSuggestion::applyWord(TextEditor::TextEditorWidget *widget)
|
||||
{
|
||||
return applyNextLine(widget);
|
||||
return applyPart(Word, widget);
|
||||
}
|
||||
|
||||
bool LLMSuggestion::applyNextLine(TextEditor::TextEditorWidget *widget)
|
||||
bool LLMSuggestion::applyLine(TextEditor::TextEditorWidget *widget)
|
||||
{
|
||||
const QString text = m_completion.text();
|
||||
QStringList lines = text.split('\n');
|
||||
|
||||
if (m_linesCount < lines.size())
|
||||
m_linesCount++;
|
||||
|
||||
showTooltip(widget, m_linesCount);
|
||||
|
||||
return m_linesCount == lines.size() && !Utils::ToolTip::isVisible();
|
||||
return applyPart(Line, widget);
|
||||
}
|
||||
|
||||
void LLMSuggestion::onCounterFinished(int count)
|
||||
bool LLMSuggestion::applyPart(Part part, TextEditor::TextEditorWidget *widget)
|
||||
{
|
||||
Utils::ToolTip::hide();
|
||||
m_linesCount = 0;
|
||||
QTextCursor cursor = m_completion.range().toSelection(m_start.document());
|
||||
cursor.beginEditBlock();
|
||||
cursor.removeSelectedText();
|
||||
const Utils::Text::Range range = suggestions()[currentSuggestion()].range;
|
||||
const QTextCursor cursor = range.begin.toTextCursor(sourceDocument());
|
||||
QTextCursor currentCursor = widget->textCursor();
|
||||
const QString text = suggestions()[currentSuggestion()].text;
|
||||
|
||||
QStringList lines = m_completion.text().split('\n');
|
||||
QString textToInsert = lines.mid(0, count).join('\n');
|
||||
const int startPos = currentCursor.positionInBlock() - cursor.positionInBlock()
|
||||
+ (cursor.selectionEnd() - cursor.selectionStart());
|
||||
|
||||
cursor.insertText(textToInsert);
|
||||
cursor.endEditBlock();
|
||||
}
|
||||
int next = part == Word ? Utils::endOfNextWord(text, startPos) : text.indexOf('\n', startPos);
|
||||
|
||||
void LLMSuggestion::reset()
|
||||
{
|
||||
m_start.removeSelectedText();
|
||||
}
|
||||
if (next == -1)
|
||||
return apply();
|
||||
|
||||
int LLMSuggestion::position()
|
||||
{
|
||||
return m_start.position();
|
||||
}
|
||||
if (part == Line)
|
||||
++next;
|
||||
|
||||
void LLMSuggestion::showTooltip(TextEditor::TextEditorWidget *widget, int count)
|
||||
{
|
||||
Utils::ToolTip::hide();
|
||||
QPoint pos = widget->mapToGlobal(widget->cursorRect().topRight());
|
||||
pos += QPoint(-10, -50);
|
||||
m_counterTooltip = new CounterTooltip(count);
|
||||
Utils::ToolTip::show(pos, m_counterTooltip, widget);
|
||||
connect(m_counterTooltip, &CounterTooltip::finished, this, &LLMSuggestion::onCounterFinished);
|
||||
QString subText = text.mid(startPos, next - startPos);
|
||||
if (subText.isEmpty())
|
||||
return false;
|
||||
|
||||
currentCursor.insertText(subText);
|
||||
|
||||
if (const int seperatorPos = subText.lastIndexOf('\n'); seperatorPos >= 0) {
|
||||
const QString newCompletionText = text.mid(startPos + seperatorPos + 1);
|
||||
if (!newCompletionText.isEmpty()) {
|
||||
const Utils::Text::Position newStart{int(range.begin.line + subText.count('\n')), 0};
|
||||
const Utils::Text::Position
|
||||
newEnd{newStart.line, int(subText.length() - seperatorPos - 1)};
|
||||
const Utils::Text::Range newRange{newStart, newEnd};
|
||||
const QList<Data> newSuggestion{{newRange, newEnd, newCompletionText}};
|
||||
widget->insertSuggestion(
|
||||
std::make_unique<LLMSuggestion>(newSuggestion, widget->document(), 0));
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace QodeAssist
|
||||
|
||||
@ -1,8 +1,13 @@
|
||||
/*
|
||||
/*
|
||||
* Copyright (C) 2023 The Qt Company Ltd.
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* The Qt Company portions:
|
||||
* SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0+ OR GPL-3.0 WITH Qt-GPL-exception-1.0
|
||||
*
|
||||
* Petr Mironychev portions:
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
@ -19,37 +24,21 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <QObject>
|
||||
#include "LSPCompletion.hpp"
|
||||
#include <texteditor/textdocumentlayout.h>
|
||||
|
||||
#include "utils/CounterTooltip.hpp"
|
||||
#include <texteditor/texteditor.h>
|
||||
#include <texteditor/textsuggestion.h>
|
||||
|
||||
namespace QodeAssist {
|
||||
|
||||
class LLMSuggestion final : public QObject, public TextEditor::TextSuggestion
|
||||
class LLMSuggestion : public TextEditor::CyclicSuggestion
|
||||
{
|
||||
Q_OBJECT
|
||||
public:
|
||||
LLMSuggestion(const Completion &completion, QTextDocument *origin);
|
||||
enum Part { Word, Line };
|
||||
|
||||
bool apply() final;
|
||||
bool applyWord(TextEditor::TextEditorWidget *widget) final;
|
||||
bool applyNextLine(TextEditor::TextEditorWidget *widget);
|
||||
void reset() final;
|
||||
int position() final;
|
||||
LLMSuggestion(
|
||||
const QList<Data> &suggestions, QTextDocument *sourceDocument, int currentCompletion = 0);
|
||||
|
||||
const Completion &completion() const { return m_completion; }
|
||||
|
||||
void showTooltip(TextEditor::TextEditorWidget *widget, int count);
|
||||
void onCounterFinished(int count);
|
||||
|
||||
private:
|
||||
Completion m_completion;
|
||||
QTextCursor m_start;
|
||||
int m_linesCount;
|
||||
|
||||
CounterTooltip *m_counterTooltip = nullptr;
|
||||
bool applyWord(TextEditor::TextEditorWidget *widget) override;
|
||||
bool applyLine(TextEditor::TextEditorWidget *widget) override;
|
||||
bool applyPart(Part part, TextEditor::TextEditorWidget *widget);
|
||||
};
|
||||
|
||||
} // namespace QodeAssist
|
||||
|
||||
@ -1,16 +1,13 @@
|
||||
{
|
||||
"Id" : "qodeassist",
|
||||
"Name" : "QodeAssist",
|
||||
"Version" : "0.3.8",
|
||||
"CompatVersion" : "${IDE_VERSION_COMPAT}",
|
||||
"Version" : "0.4.2",
|
||||
"Vendor" : "Petr Mironychev",
|
||||
"VendorId" : "petrmironychev",
|
||||
"Copyright" : "(C) ${IDE_COPYRIGHT_YEAR} Petr Mironychev, (C) ${IDE_COPYRIGHT_YEAR} The Qt Company Ltd",
|
||||
"License" : "GNU General Public License Usage
|
||||
|
||||
Alternatively, this file may be used under the terms of the GNU General Public License version 3 as published by the Free Software Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT included in the packaging of this file. Please review the following information to ensure the GNU General Public License requirements will be met: https://www.gnu.org/licenses/gpl-3.0.html.",
|
||||
"Description" : ["QodeAssist is an AI-powered coding assistant for Qt Creator. It provides intelligent code completion and suggestions for your code",
|
||||
"Prerequisites:",
|
||||
"- One of the supported LLM providers installed (e.g., Ollama or LM Studio)",
|
||||
"- A compatible large language model downloaded for your chosen provider (e.g., CodeLlama, StarCoder2)"],
|
||||
"License" : "GPLv3",
|
||||
"Description": "QodeAssist is an AI-powered coding assistant for Qt Creator. It provides intelligent code completion and suggestions for your code. Prerequisites: Requires one of the supported LLM providers installed (e.g., Ollama or LM Studio) and a compatible large language model downloaded for your chosen provider (e.g., CodeLlama, StarCoder2).",
|
||||
"Url" : "https://github.com/Palm1r/QodeAssist",
|
||||
"DocumentationUrl" : "",
|
||||
${IDE_PLUGIN_DEPENDENCIES}
|
||||
}
|
||||
|
||||
@ -193,8 +193,8 @@ void QodeAssistClient::handleCompletions(const GetCompletionRequest::Response &r
|
||||
auto isValidCompletion = [](const Completion &completion) {
|
||||
return completion.isValid() && !completion.text().trimmed().isEmpty();
|
||||
};
|
||||
QList<Completion> completions = Utils::filtered(result->completions().toListOrEmpty(),
|
||||
isValidCompletion);
|
||||
QList<Completion> completions
|
||||
= Utils::filtered(result->completions().toListOrEmpty(), isValidCompletion);
|
||||
|
||||
// remove trailing whitespaces from the end of the completions
|
||||
for (Completion &completion : completions) {
|
||||
@ -211,10 +211,18 @@ void QodeAssistClient::handleCompletions(const GetCompletionRequest::Response &r
|
||||
if (delta > 0)
|
||||
completion.setText(completionText.chopped(delta));
|
||||
}
|
||||
auto suggestions = Utils::transform(completions, [](const Completion &c) {
|
||||
auto toTextPos = [](const LanguageServerProtocol::Position pos) {
|
||||
return Text::Position{pos.line() + 1, pos.character()};
|
||||
};
|
||||
|
||||
Text::Range range{toTextPos(c.range().start()), toTextPos(c.range().end())};
|
||||
Text::Position pos{toTextPos(c.position())};
|
||||
return TextSuggestion::Data{range, pos, c.text()};
|
||||
});
|
||||
if (completions.isEmpty())
|
||||
return;
|
||||
editor->insertSuggestion(
|
||||
std::make_unique<LLMSuggestion>(completions.first(), editor->document()));
|
||||
editor->insertSuggestion(std::make_unique<LLMSuggestion>(suggestions, editor->document()));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
93
README.md
93
README.md
@ -3,9 +3,10 @@
|
||||
[](https://github.com/Palm1r/QodeAssist/actions/workflows/build_cmake.yml)
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
QodeAssist is an AI-powered coding assistant plugin for Qt Creator. It provides intelligent code completion and suggestions for C++ and QML, leveraging large language models through local providers like Ollama. Enhance your coding productivity with context-aware AI assistance directly in your Qt development environment.
|
||||
 QodeAssist is an AI-powered coding assistant plugin for Qt Creator. It provides intelligent code completion and suggestions for C++ and QML, leveraging large language models through local providers like Ollama. Enhance your coding productivity with context-aware AI assistance directly in your Qt development environment.
|
||||
|
||||
## Table of Contents
|
||||
1. [Overview](#overview)
|
||||
@ -14,7 +15,6 @@ QodeAssist is an AI-powered coding assistant plugin for Qt Creator. It provides
|
||||
4. [Supported LLM Providers](#supported-llm-providers)
|
||||
5. [Recommended Models](#recommended-models)
|
||||
- [Ollama](#ollama)
|
||||
- [LM Studio](#lm-studio)
|
||||
6. [QtCreator Version Compatibility](#qtcreator-version-compatibility)
|
||||
7. [Development Progress](#development-progress)
|
||||
8. [Hotkeys](#hotkeys)
|
||||
@ -30,7 +30,7 @@ QodeAssist is an AI-powered coding assistant plugin for Qt Creator. It provides
|
||||
- Support for multiple LLM providers:
|
||||
- Ollama
|
||||
- LM Studio
|
||||
- OpenAI-compatible local providers
|
||||
- OpenAI-compatible providers(eg. https://openrouter.ai)
|
||||
- Extensive library of model-specific templates
|
||||
- Custom template support
|
||||
- Easy configuration and model selection
|
||||
@ -40,6 +40,11 @@ QodeAssist is an AI-powered coding assistant plugin for Qt Creator. It provides
|
||||
<img src="https://github.com/user-attachments/assets/255a52f1-5cc0-4ca3-b05c-c4cf9cdbe25a" width="600" alt="QodeAssistPreview">
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Multiline Code completion: (click to expand)</summary>
|
||||
<img src="https://github.com/user-attachments/assets/c18dfbd2-8c54-4a7b-90d1-66e3bb51adb0" width="600" alt="QodeAssistPreview">
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Chat with LLM models in side panels: (click to expand)</summary>
|
||||
<img src="https://github.com/user-attachments/assets/ead5a5d9-b40a-4f17-af05-77fa2bcb3a61" width="600" alt="QodeAssistChat">
|
||||
@ -56,13 +61,17 @@ QodeAssist is an AI-powered coding assistant plugin for Qt Creator. It provides
|
||||
2. Install [Ollama](https://ollama.com). Make sure to review the system requirements before installation.
|
||||
3. Install a language models in Ollama via terminal. For example, you can run:
|
||||
|
||||
For suggestions:
|
||||
For standard computers (minimum 8GB RAM):
|
||||
```
|
||||
ollama run codellama:7b-code
|
||||
ollama run qwen2.5-coder:7b
|
||||
```
|
||||
For chat:
|
||||
For better performance (16GB+ RAM):
|
||||
```
|
||||
ollama run codellama:7b-instruct
|
||||
ollama run qwen2.5-coder:14b
|
||||
```
|
||||
For high-end systems (32GB+ RAM):
|
||||
```
|
||||
ollama run qwen2.5-coder:32b
|
||||
```
|
||||
4. Download the QodeAssist plugin for your QtCreator.
|
||||
5. Launch Qt Creator and install the plugin:
|
||||
@ -73,38 +82,41 @@ ollama run codellama:7b-instruct
|
||||
|
||||
## Configure Plugin
|
||||
|
||||
<details>
|
||||
<summary>Configure plugins: (click to expand)</summary>
|
||||
<img src="https://github.com/user-attachments/assets/00ad980f-b470-48eb-9aaa-077783d38798" width="600" alt="Configuere QodeAssist">
|
||||
</details>
|
||||
QodeAssist comes with default settings that should work immediately after installing a language model. The plugin is pre-configured to use Ollama with standard templates, so you may only need to verify the settings.
|
||||
|
||||
1. Open Qt Creator settings
|
||||
1. Open Qt Creator settings (Edit > Preferences on Linux/Windows, Qt Creator > Preferences on macOS)
|
||||
2. Navigate to the "Qode Assist" tab
|
||||
3. Select "General" page
|
||||
4. Choose your LLM provider (e.g., Ollama)
|
||||
5. Select the installed model by the "Select Model" button
|
||||
- For LM Studio you will see current loaded model
|
||||
6. Choose the prompt template that corresponds to your model
|
||||
7. Apply the settings
|
||||
3. On the "General" page, verify:
|
||||
- Ollama is selected as your LLM provider
|
||||
- The URL is set to http://localhost:11434
|
||||
- Your installed model appears in the model selection
|
||||
- The prompt template is Ollama Auto FIM
|
||||
4. Click Apply if you made any changes
|
||||
|
||||
You're all set! QodeAssist is now ready to use in Qt Creator.
|
||||
|
||||
[](https://ko-fi.com/P5P412V96G)
|
||||
|
||||
## Supported LLM Providers
|
||||
QodeAssist currently supports the following LLM (Large Language Model) providers:
|
||||
- [Ollama](https://ollama.com)
|
||||
- [LM Studio](https://lmstudio.ai)
|
||||
- [OpenRouter](https://openrouter.ai)
|
||||
- OpenAI compatible providers
|
||||
|
||||
## Recommended Models:
|
||||
QodeAssist has been thoroughly tested and optimized for use with the following language models:
|
||||
|
||||
- Llama
|
||||
- Qwen2.5-coder
|
||||
- CodeLlama
|
||||
- StarCoder2
|
||||
- DeepSeek-Coder-V2
|
||||
- Qwen-2.5
|
||||
|
||||
### Model Types
|
||||
|
||||
FIM models (codellama:7b-code, starcoder2:7b, etc.) - Optimized for code completion and suggestions
|
||||
|
||||
Instruct models (codellama:7b-instruct, starcoder2:instruct, etc.) - Better for chat assistance, explanations, and code review
|
||||
|
||||
For best results, use FIM models with code completion and Instruct models with chat features.
|
||||
|
||||
### Ollama:
|
||||
### For autocomplete(FIM)
|
||||
@ -114,7 +126,7 @@ ollama run starcoder2:7b
|
||||
ollama run qwen2.5-coder:7b-base
|
||||
ollama run deepseek-coder-v2:16b-lite-base-q3_K_M
|
||||
```
|
||||
### For chat
|
||||
### For chat and instruct
|
||||
```
|
||||
ollama run codellama:7b-instruct
|
||||
ollama run starcoder2:instruct
|
||||
@ -122,15 +134,31 @@ ollama run qwen2.5-coder:7b-instruct
|
||||
ollama run deepseek-coder-v2
|
||||
```
|
||||
|
||||
### LM Studio:
|
||||
similar models, like for ollama
|
||||
### Template-Model Compatibility
|
||||
|
||||
Please note that while these models have been specifically tested and confirmed to work well with QodeAssist, other models compatible with the supported providers may also work. We encourage users to experiment with different models and report their experiences.
|
||||
| Template | Compatible Models | Purpose |
|
||||
|----------|------------------|----------|
|
||||
| CodeLlama FIM | `codellama:code` | Code completion |
|
||||
| DeepSeekCoder FIM | `deepseek-coder-v2`, `deepseek-v2.5` | Code completion |
|
||||
| Ollama Auto FIM | `Any Ollama base model` | Code completion |
|
||||
| Qwen FIM | `Qwen 2.5 models` | Code completion |
|
||||
| StarCoder2 FIM | `starcoder2 base model` | Code completion |
|
||||
| Alpaca | `starcoder2:instruct` | Chat assistance |
|
||||
| Basic Chat| `Messages without tokens` | Chat assistance |
|
||||
| ChatML | `Qwen 2.5 models` | Chat assistance |
|
||||
| Llama2 | `llama2 model family`, `codellama:instruct` | Chat assistance |
|
||||
| Llama3 | `llama3 model family` | Chat assistance |
|
||||
| Ollama Auto Chat | `Any Ollama chat model` | Chat assistance |
|
||||
|
||||
If you've successfully used a model that's not listed here, please let us know by opening an issue or submitting a pull request to update this list.
|
||||
> Note:
|
||||
> - FIM (Fill-in-Middle) templates are optimized for code completion
|
||||
> - Chat templates are designed for interactive dialogue
|
||||
> - The Ollama Auto templates automatically adapt to most Ollama models
|
||||
> - Custom Template allows you to define your own prompt format
|
||||
|
||||
## QtCreator Version Compatibility
|
||||
|
||||
- QtCreator 15.0.0 - 0.4.x
|
||||
- QtCreator 14.0.2 - 0.2.3 - 0.3.x
|
||||
- QtCreator 14.0.1 - 0.2.2 plugin version and below
|
||||
|
||||
@ -149,9 +177,7 @@ If you've successfully used a model that's not listed here, please let us know b
|
||||
- on Mac: Option + Command + Q
|
||||
- on Windows: Ctrl + Alt + Q
|
||||
- To insert the full suggestion, you can use the TAB key
|
||||
- To insert line by line, you can use the "Move cursor word right" shortcut:
|
||||
- On Mac: Option + Right Arrow
|
||||
- On Windows: Alt + Right Arrow
|
||||
- To insert word of suggistion, you can use Alt + Right Arrow for Win/Lin, or Option + Right Arrow for Mac
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
@ -191,7 +217,6 @@ If you find QodeAssist helpful, there are several ways you can support the proje
|
||||
3. **Spread the Word**: Star our GitHub repository and share QodeAssist with your fellow developers.
|
||||
|
||||
4. **Financial Support**: If you'd like to support the development financially, you can make a donation using one of the following:
|
||||
- [](https://ko-fi.com/P5P412V96G)
|
||||
- Bitcoin (BTC): `bc1qndq7f0mpnlya48vk7kugvyqj5w89xrg4wzg68t`
|
||||
- Ethereum (ETH): `0xA5e8c37c94b24e25F9f1f292a01AF55F03099D8D`
|
||||
- Litecoin (LTC): `ltc1qlrxnk30s2pcjchzx4qrxvdjt5gzuervy5mv0vy`
|
||||
@ -215,3 +240,7 @@ relative or absolute path to this plugin directory.
|
||||
|
||||
QML code style: Preferably follow the following guidelines https://github.com/Furkanzmc/QML-Coding-Guide, thank you @Furkanzmc for collect them
|
||||
C++ code style: check use .clang-fortmat in project
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
@ -7,6 +7,10 @@ add_library(LLMCore STATIC
|
||||
PromptTemplateManager.hpp PromptTemplateManager.cpp
|
||||
RequestConfig.hpp
|
||||
RequestHandler.hpp RequestHandler.cpp
|
||||
OllamaMessage.hpp OllamaMessage.cpp
|
||||
OpenAIMessage.hpp OpenAIMessage.cpp
|
||||
ValidationUtils.hpp ValidationUtils.cpp
|
||||
MessageBuilder.hpp MessageBuilder.cpp
|
||||
)
|
||||
|
||||
target_link_libraries(LLMCore
|
||||
|
||||
92
llmcore/MessageBuilder.cpp
Normal file
92
llmcore/MessageBuilder.cpp
Normal file
@ -0,0 +1,92 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "MessageBuilder.hpp"
|
||||
|
||||
QodeAssist::LLMCore::MessageBuilder &QodeAssist::LLMCore::MessageBuilder::addSystemMessage(
|
||||
const QString &content)
|
||||
{
|
||||
m_systemMessage = content;
|
||||
return *this;
|
||||
}
|
||||
|
||||
QodeAssist::LLMCore::MessageBuilder &QodeAssist::LLMCore::MessageBuilder::addUserMessage(
|
||||
const QString &content)
|
||||
{
|
||||
m_messages.append({MessageRole::User, content});
|
||||
return *this;
|
||||
}
|
||||
|
||||
QodeAssist::LLMCore::MessageBuilder &QodeAssist::LLMCore::MessageBuilder::addSuffix(
|
||||
const QString &content)
|
||||
{
|
||||
m_suffix = content;
|
||||
return *this;
|
||||
}
|
||||
|
||||
QodeAssist::LLMCore::MessageBuilder &QodeAssist::LLMCore::MessageBuilder::addtTokenizer(
|
||||
PromptTemplate *promptTemplate)
|
||||
{
|
||||
m_promptTemplate = promptTemplate;
|
||||
return *this;
|
||||
}
|
||||
|
||||
QString QodeAssist::LLMCore::MessageBuilder::roleToString(MessageRole role) const
|
||||
{
|
||||
switch (role) {
|
||||
case MessageRole::System:
|
||||
return ROLE_SYSTEM;
|
||||
case MessageRole::User:
|
||||
return ROLE_USER;
|
||||
case MessageRole::Assistant:
|
||||
return ROLE_ASSISTANT;
|
||||
default:
|
||||
return ROLE_USER;
|
||||
}
|
||||
}
|
||||
|
||||
void QodeAssist::LLMCore::MessageBuilder::saveTo(QJsonObject &request, ProvidersApi api)
|
||||
{
|
||||
if (!m_promptTemplate) {
|
||||
return;
|
||||
}
|
||||
|
||||
ContextData context{
|
||||
m_messages.isEmpty() ? QString() : m_messages.last().content, m_suffix, m_systemMessage};
|
||||
|
||||
if (api == ProvidersApi::Ollama) {
|
||||
if (m_promptTemplate->type() == TemplateType::Fim) {
|
||||
m_promptTemplate->prepareRequest(request, context);
|
||||
} else {
|
||||
QJsonArray messages;
|
||||
|
||||
messages.append(QJsonObject{{"role", "system"}, {"content", m_systemMessage}});
|
||||
messages.append(QJsonObject{{"role", "user"}, {"content", m_messages.last().content}});
|
||||
request["messages"] = messages;
|
||||
m_promptTemplate->prepareRequest(request, context);
|
||||
}
|
||||
} else if (api == ProvidersApi::OpenAI) {
|
||||
QJsonArray messages;
|
||||
|
||||
messages.append(QJsonObject{{"role", "system"}, {"content", m_systemMessage}});
|
||||
messages.append(QJsonObject{{"role", "user"}, {"content", m_messages.last().content}});
|
||||
request["messages"] = messages;
|
||||
m_promptTemplate->prepareRequest(request, context);
|
||||
}
|
||||
}
|
||||
68
llmcore/MessageBuilder.hpp
Normal file
68
llmcore/MessageBuilder.hpp
Normal file
@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <QJsonArray>
|
||||
#include <QJsonObject>
|
||||
#include <QString>
|
||||
#include <QVector>
|
||||
|
||||
#include "PromptTemplate.hpp"
|
||||
|
||||
namespace QodeAssist::LLMCore {
|
||||
|
||||
enum class MessageRole { System, User, Assistant };
|
||||
|
||||
enum class OllamaFormat { Messages, Completions };
|
||||
|
||||
enum class ProvidersApi { Ollama, OpenAI };
|
||||
|
||||
static const QString ROLE_SYSTEM = "system";
|
||||
static const QString ROLE_USER = "user";
|
||||
static const QString ROLE_ASSISTANT = "assistant";
|
||||
|
||||
struct Message
|
||||
{
|
||||
MessageRole role;
|
||||
QString content;
|
||||
};
|
||||
|
||||
class MessageBuilder
|
||||
{
|
||||
public:
|
||||
MessageBuilder &addSystemMessage(const QString &content);
|
||||
|
||||
MessageBuilder &addUserMessage(const QString &content);
|
||||
|
||||
MessageBuilder &addSuffix(const QString &content);
|
||||
|
||||
MessageBuilder &addtTokenizer(PromptTemplate *promptTemplate);
|
||||
|
||||
QString roleToString(MessageRole role) const;
|
||||
|
||||
void saveTo(QJsonObject &request, ProvidersApi api);
|
||||
|
||||
private:
|
||||
QString m_systemMessage;
|
||||
QString m_suffix;
|
||||
QVector<Message> m_messages;
|
||||
PromptTemplate *m_promptTemplate;
|
||||
};
|
||||
} // namespace QodeAssist::LLMCore
|
||||
102
llmcore/OllamaMessage.cpp
Normal file
102
llmcore/OllamaMessage.cpp
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "OllamaMessage.hpp"
|
||||
#include <QJsonArray>
|
||||
#include <QJsonDocument>
|
||||
|
||||
namespace QodeAssist::LLMCore {
|
||||
|
||||
QJsonObject OllamaMessage::parseJsonFromData(const QByteArray &data)
|
||||
{
|
||||
QByteArrayList lines = data.split('\n');
|
||||
for (const QByteArray &line : lines) {
|
||||
if (line.trimmed().isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
QJsonParseError error;
|
||||
QJsonDocument doc = QJsonDocument::fromJson(line, &error);
|
||||
if (!doc.isNull() && error.error == QJsonParseError::NoError) {
|
||||
return doc.object();
|
||||
}
|
||||
}
|
||||
return QJsonObject();
|
||||
}
|
||||
|
||||
OllamaMessage OllamaMessage::fromJson(const QByteArray &data, Type type)
|
||||
{
|
||||
OllamaMessage msg;
|
||||
QJsonObject obj = parseJsonFromData(data);
|
||||
|
||||
if (obj.isEmpty()) {
|
||||
msg.error = "Invalid JSON response";
|
||||
return msg;
|
||||
}
|
||||
|
||||
msg.model = obj["model"].toString();
|
||||
msg.createdAt = QDateTime::fromString(obj["created_at"].toString(), Qt::ISODate);
|
||||
msg.done = obj["done"].toBool();
|
||||
msg.doneReason = obj["done_reason"].toString();
|
||||
msg.error = obj["error"].toString();
|
||||
|
||||
if (type == Type::Generate) {
|
||||
auto &genResponse = msg.response.emplace<GenerateResponse>();
|
||||
genResponse.response = obj["response"].toString();
|
||||
if (msg.done && obj.contains("context")) {
|
||||
const auto array = obj["context"].toArray();
|
||||
genResponse.context.reserve(array.size());
|
||||
for (const auto &val : array) {
|
||||
genResponse.context.append(val.toInt());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
auto &chatResponse = msg.response.emplace<ChatResponse>();
|
||||
const auto msgObj = obj["message"].toObject();
|
||||
chatResponse.role = msgObj["role"].toString();
|
||||
chatResponse.content = msgObj["content"].toString();
|
||||
}
|
||||
|
||||
if (msg.done) {
|
||||
msg.metrics
|
||||
= {obj["total_duration"].toVariant().toLongLong(),
|
||||
obj["load_duration"].toVariant().toLongLong(),
|
||||
obj["prompt_eval_count"].toVariant().toLongLong(),
|
||||
obj["prompt_eval_duration"].toVariant().toLongLong(),
|
||||
obj["eval_count"].toVariant().toLongLong(),
|
||||
obj["eval_duration"].toVariant().toLongLong()};
|
||||
}
|
||||
|
||||
return msg;
|
||||
}
|
||||
|
||||
QString OllamaMessage::getContent() const
|
||||
{
|
||||
if (std::holds_alternative<GenerateResponse>(response)) {
|
||||
return std::get<GenerateResponse>(response).response;
|
||||
}
|
||||
return std::get<ChatResponse>(response).content;
|
||||
}
|
||||
|
||||
bool OllamaMessage::hasError() const
|
||||
{
|
||||
return !error.isEmpty();
|
||||
}
|
||||
|
||||
} // namespace QodeAssist::LLMCore
|
||||
71
llmcore/OllamaMessage.hpp
Normal file
71
llmcore/OllamaMessage.hpp
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <QDateTime>
|
||||
#include <QJsonObject>
|
||||
#include <QObject>
|
||||
|
||||
namespace QodeAssist::LLMCore {
|
||||
|
||||
class OllamaMessage
|
||||
{
|
||||
public:
|
||||
enum class Type { Generate, Chat };
|
||||
|
||||
struct Metrics
|
||||
{
|
||||
qint64 totalDuration{0};
|
||||
qint64 loadDuration{0};
|
||||
qint64 promptEvalCount{0};
|
||||
qint64 promptEvalDuration{0};
|
||||
qint64 evalCount{0};
|
||||
qint64 evalDuration{0};
|
||||
};
|
||||
|
||||
struct GenerateResponse
|
||||
{
|
||||
QString response;
|
||||
QVector<int> context;
|
||||
};
|
||||
|
||||
struct ChatResponse
|
||||
{
|
||||
QString role;
|
||||
QString content;
|
||||
};
|
||||
|
||||
QString model;
|
||||
QDateTime createdAt;
|
||||
std::variant<GenerateResponse, ChatResponse> response;
|
||||
bool done{false};
|
||||
QString doneReason;
|
||||
QString error;
|
||||
Metrics metrics;
|
||||
|
||||
static OllamaMessage fromJson(const QByteArray &data, Type type);
|
||||
QString getContent() const;
|
||||
bool hasError() const;
|
||||
|
||||
private:
|
||||
static QJsonObject parseJsonFromData(const QByteArray &data);
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::LLMCore
|
||||
82
llmcore/OpenAIMessage.cpp
Normal file
82
llmcore/OpenAIMessage.cpp
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "OpenAIMessage.hpp"
|
||||
#include <QJsonArray>
|
||||
#include <QJsonDocument>
|
||||
|
||||
namespace QodeAssist::LLMCore {
|
||||
|
||||
OpenAIMessage OpenAIMessage::fromJson(const QJsonObject &obj)
|
||||
{
|
||||
OpenAIMessage msg;
|
||||
|
||||
if (obj.contains("error")) {
|
||||
msg.error = obj["error"].toObject()["message"].toString();
|
||||
return msg;
|
||||
}
|
||||
|
||||
if (obj.contains("choices")) {
|
||||
auto choices = obj["choices"].toArray();
|
||||
if (!choices.isEmpty()) {
|
||||
auto choiceObj = choices[0].toObject();
|
||||
|
||||
if (choiceObj.contains("delta")) {
|
||||
QJsonObject delta = choiceObj["delta"].toObject();
|
||||
msg.choice.content = delta["content"].toString();
|
||||
} else if (choiceObj.contains("message")) {
|
||||
QJsonObject message = choiceObj["message"].toObject();
|
||||
msg.choice.content = message["content"].toString();
|
||||
}
|
||||
|
||||
msg.choice.finishReason = choiceObj["finish_reason"].toString();
|
||||
if (!msg.choice.finishReason.isEmpty()) {
|
||||
msg.done = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (obj.contains("usage")) {
|
||||
QJsonObject usage = obj["usage"].toObject();
|
||||
msg.usage.promptTokens = usage["prompt_tokens"].toInt();
|
||||
msg.usage.completionTokens = usage["completion_tokens"].toInt();
|
||||
msg.usage.totalTokens = usage["total_tokens"].toInt();
|
||||
}
|
||||
|
||||
return msg;
|
||||
}
|
||||
|
||||
QString OpenAIMessage::getContent() const
|
||||
{
|
||||
return choice.content;
|
||||
}
|
||||
|
||||
bool OpenAIMessage::hasError() const
|
||||
{
|
||||
return !error.isEmpty();
|
||||
}
|
||||
|
||||
bool OpenAIMessage::isDone() const
|
||||
{
|
||||
return done
|
||||
|| (!choice.finishReason.isEmpty()
|
||||
&& (choice.finishReason == "stop" || choice.finishReason == "length"));
|
||||
}
|
||||
|
||||
} // namespace QodeAssist::LLMCore
|
||||
56
llmcore/OpenAIMessage.hpp
Normal file
56
llmcore/OpenAIMessage.hpp
Normal file
@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <QByteArray>
|
||||
#include <QJsonObject>
|
||||
#include <QString>
|
||||
|
||||
namespace QodeAssist::LLMCore {
|
||||
|
||||
class OpenAIMessage
|
||||
{
|
||||
public:
|
||||
struct Choice
|
||||
{
|
||||
QString content;
|
||||
QString finishReason;
|
||||
};
|
||||
|
||||
struct Usage
|
||||
{
|
||||
int promptTokens{0};
|
||||
int completionTokens{0};
|
||||
int totalTokens{0};
|
||||
};
|
||||
|
||||
Choice choice;
|
||||
QString error;
|
||||
bool done{false};
|
||||
Usage usage;
|
||||
|
||||
QString getContent() const;
|
||||
bool hasError() const;
|
||||
bool isDone() const;
|
||||
|
||||
static OpenAIMessage fromJson(const QJsonObject &obj);
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::LLMCore
|
||||
@ -38,5 +38,6 @@ public:
|
||||
virtual QString promptTemplate() const = 0;
|
||||
virtual QStringList stopWords() const = 0;
|
||||
virtual void prepareRequest(QJsonObject &request, const ContextData &context) const = 0;
|
||||
virtual QString description() const = 0;
|
||||
};
|
||||
} // namespace QodeAssist::LLMCore
|
||||
|
||||
@ -40,7 +40,6 @@ QStringList PromptTemplateManager::chatTemplatesNames() const
|
||||
PromptTemplateManager::~PromptTemplateManager()
|
||||
{
|
||||
qDeleteAll(m_fimTemplates);
|
||||
qDeleteAll(m_chatTemplates);
|
||||
}
|
||||
|
||||
PromptTemplate *PromptTemplateManager::getFimTemplateByName(const QString &templateName)
|
||||
|
||||
@ -39,9 +39,8 @@ public:
|
||||
"T must inherit from PromptTemplate");
|
||||
T *template_ptr = new T();
|
||||
QString name = template_ptr->name();
|
||||
if (template_ptr->type() == TemplateType::Fim) {
|
||||
m_fimTemplates[name] = template_ptr;
|
||||
} else if (template_ptr->type() == TemplateType::Chat) {
|
||||
m_fimTemplates[name] = template_ptr;
|
||||
if (template_ptr->type() == TemplateType::Chat) {
|
||||
m_chatTemplates[name] = template_ptr;
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,9 +20,11 @@
|
||||
#pragma once
|
||||
|
||||
#include <QString>
|
||||
#include "RequestType.hpp"
|
||||
#include <utils/environment.h>
|
||||
|
||||
#include "PromptTemplate.hpp"
|
||||
#include "RequestType.hpp"
|
||||
|
||||
class QNetworkReply;
|
||||
class QJsonObject;
|
||||
|
||||
@ -42,6 +44,7 @@ public:
|
||||
virtual void prepareRequest(QJsonObject &request, RequestType type) = 0;
|
||||
virtual bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) = 0;
|
||||
virtual QList<QString> getInstalledModels(const QString &url) = 0;
|
||||
virtual QList<QString> validateRequest(const QJsonObject &request, TemplateType type) = 0;
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::LLMCore
|
||||
|
||||
@ -35,6 +35,7 @@ struct LLMConfig
|
||||
QJsonObject providerRequest;
|
||||
RequestType requestType;
|
||||
bool multiLineCompletion;
|
||||
QString apiKey;
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::LLMCore
|
||||
|
||||
@ -38,7 +38,7 @@ void RequestHandler::sendLLMRequest(const LLMConfig &config, const QJsonObject &
|
||||
QJsonDocument(config.providerRequest).toJson(QJsonDocument::Indented))));
|
||||
|
||||
QNetworkRequest networkRequest(config.url);
|
||||
prepareNetworkRequest(networkRequest, config.providerRequest);
|
||||
prepareNetworkRequest(networkRequest, config.apiKey);
|
||||
|
||||
QNetworkReply *reply = m_manager->post(networkRequest,
|
||||
QJsonDocument(config.providerRequest).toJson());
|
||||
@ -75,7 +75,7 @@ void RequestHandler::handleLLMResponse(QNetworkReply *reply,
|
||||
|
||||
bool isComplete = config.provider->handleResponse(reply, accumulatedResponse);
|
||||
|
||||
if (config.requestType == RequestType::Fim) {
|
||||
if (config.requestType == RequestType::CodeCompletion) {
|
||||
if (!config.multiLineCompletion
|
||||
&& processSingleLineCompletion(reply, request, accumulatedResponse, config)) {
|
||||
return;
|
||||
@ -84,6 +84,7 @@ void RequestHandler::handleLLMResponse(QNetworkReply *reply,
|
||||
if (isComplete) {
|
||||
auto cleanedCompletion = removeStopWords(accumulatedResponse,
|
||||
config.promptTemplate->stopWords());
|
||||
|
||||
emit completionReceived(cleanedCompletion, request, true);
|
||||
}
|
||||
} else if (config.requestType == RequestType::Chat) {
|
||||
@ -107,33 +108,32 @@ bool RequestHandler::cancelRequest(const QString &id)
|
||||
return false;
|
||||
}
|
||||
|
||||
void RequestHandler::prepareNetworkRequest(QNetworkRequest &networkRequest,
|
||||
const QJsonObject &providerRequest)
|
||||
void RequestHandler::prepareNetworkRequest(
|
||||
QNetworkRequest &networkRequest, const QString &apiKey) const
|
||||
{
|
||||
networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
|
||||
|
||||
if (providerRequest.contains("api_key")) {
|
||||
QString apiKey = providerRequest["api_key"].toString();
|
||||
if (!apiKey.isEmpty()) {
|
||||
networkRequest.setRawHeader("Authorization", QString("Bearer %1").arg(apiKey).toUtf8());
|
||||
}
|
||||
}
|
||||
|
||||
bool RequestHandler::processSingleLineCompletion(QNetworkReply *reply,
|
||||
const QJsonObject &request,
|
||||
const QString &accumulatedResponse,
|
||||
const LLMConfig &config)
|
||||
bool RequestHandler::processSingleLineCompletion(
|
||||
QNetworkReply *reply,
|
||||
const QJsonObject &request,
|
||||
const QString &accumulatedResponse,
|
||||
const LLMConfig &config)
|
||||
{
|
||||
int newlinePos = accumulatedResponse.indexOf('\n');
|
||||
QString cleanedResponse = accumulatedResponse;
|
||||
|
||||
int newlinePos = cleanedResponse.indexOf('\n');
|
||||
if (newlinePos != -1) {
|
||||
QString singleLineCompletion = accumulatedResponse.left(newlinePos).trimmed();
|
||||
singleLineCompletion = removeStopWords(singleLineCompletion,
|
||||
config.promptTemplate->stopWords());
|
||||
|
||||
QString singleLineCompletion = cleanedResponse.left(newlinePos).trimmed();
|
||||
singleLineCompletion
|
||||
= removeStopWords(singleLineCompletion, config.promptTemplate->stopWords());
|
||||
emit completionReceived(singleLineCompletion, request, true);
|
||||
m_accumulatedResponses.remove(reply);
|
||||
reply->abort();
|
||||
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -150,4 +150,36 @@ QString RequestHandler::removeStopWords(const QStringView &completion, const QSt
|
||||
return filteredCompletion;
|
||||
}
|
||||
|
||||
void RequestHandler::removeCodeBlockWrappers(QString &response)
|
||||
{
|
||||
static const QRegularExpression
|
||||
fullCodeBlockRegex(R"(```[\w\s]*\n([\s\S]*?)```)", QRegularExpression::MultilineOption);
|
||||
static const QRegularExpression
|
||||
partialStartBlockRegex(R"(```[\w\s]*\n([\s\S]*?)$)", QRegularExpression::MultilineOption);
|
||||
static const QRegularExpression
|
||||
partialEndBlockRegex(R"(^([\s\S]*?)```)", QRegularExpression::MultilineOption);
|
||||
|
||||
QRegularExpressionMatchIterator matchIterator = fullCodeBlockRegex.globalMatch(response);
|
||||
while (matchIterator.hasNext()) {
|
||||
QRegularExpressionMatch match = matchIterator.next();
|
||||
QString codeBlock = match.captured(0);
|
||||
QString codeContent = match.captured(1).trimmed();
|
||||
response.replace(codeBlock, codeContent);
|
||||
}
|
||||
|
||||
QRegularExpressionMatch startMatch = partialStartBlockRegex.match(response);
|
||||
if (startMatch.hasMatch()) {
|
||||
QString partialBlock = startMatch.captured(0);
|
||||
QString codeContent = startMatch.captured(1).trimmed();
|
||||
response.replace(partialBlock, codeContent);
|
||||
}
|
||||
|
||||
QRegularExpressionMatch endMatch = partialEndBlockRegex.match(response);
|
||||
if (endMatch.hasMatch()) {
|
||||
QString partialBlock = endMatch.captured(0);
|
||||
QString codeContent = endMatch.captured(1).trimmed();
|
||||
response.replace(partialBlock, codeContent);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace QodeAssist::LLMCore
|
||||
|
||||
@ -52,12 +52,13 @@ private:
|
||||
QMap<QString, QNetworkReply *> m_activeRequests;
|
||||
QMap<QNetworkReply *, QString> m_accumulatedResponses;
|
||||
|
||||
void prepareNetworkRequest(QNetworkRequest &networkRequest, const QJsonObject &providerRequest);
|
||||
void prepareNetworkRequest(QNetworkRequest &networkRequest, const QString &apiKey) const;
|
||||
bool processSingleLineCompletion(QNetworkReply *reply,
|
||||
const QJsonObject &request,
|
||||
const QString &accumulatedResponse,
|
||||
const LLMConfig &config);
|
||||
QString removeStopWords(const QStringView &completion, const QStringList &stopWords);
|
||||
void removeCodeBlockWrappers(QString &response);
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::LLMCore
|
||||
|
||||
@ -21,5 +21,5 @@
|
||||
|
||||
namespace QodeAssist::LLMCore {
|
||||
|
||||
enum RequestType { Fim, Chat };
|
||||
enum RequestType { CodeCompletion, Chat };
|
||||
}
|
||||
|
||||
57
llmcore/ValidationUtils.cpp
Normal file
57
llmcore/ValidationUtils.cpp
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "ValidationUtils.hpp"
|
||||
|
||||
#include <QJsonArray>
|
||||
|
||||
namespace QodeAssist::LLMCore {
|
||||
|
||||
QStringList ValidationUtils::validateRequestFields(
|
||||
const QJsonObject &request, const QJsonObject &templateObj)
|
||||
{
|
||||
QStringList errors;
|
||||
validateFields(request, templateObj, errors);
|
||||
validateNestedObjects(request, templateObj, errors);
|
||||
return errors;
|
||||
}
|
||||
|
||||
void ValidationUtils::validateFields(
|
||||
const QJsonObject &request, const QJsonObject &templateObj, QStringList &errors)
|
||||
{
|
||||
for (auto it = request.begin(); it != request.end(); ++it) {
|
||||
if (!templateObj.contains(it.key())) {
|
||||
errors << QString("unknown field '%1'").arg(it.key());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ValidationUtils::validateNestedObjects(
|
||||
const QJsonObject &request, const QJsonObject &templateObj, QStringList &errors)
|
||||
{
|
||||
for (auto it = request.begin(); it != request.end(); ++it) {
|
||||
if (templateObj.contains(it.key()) && it.value().isObject()
|
||||
&& templateObj[it.key()].isObject()) {
|
||||
validateFields(it.value().toObject(), templateObj[it.key()].toObject(), errors);
|
||||
validateNestedObjects(it.value().toObject(), templateObj[it.key()].toObject(), errors);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace QodeAssist::LLMCore
|
||||
41
llmcore/ValidationUtils.hpp
Normal file
41
llmcore/ValidationUtils.hpp
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <QJsonObject>
|
||||
#include <QStringList>
|
||||
|
||||
namespace QodeAssist::LLMCore {
|
||||
|
||||
class ValidationUtils
|
||||
{
|
||||
public:
|
||||
static QStringList validateRequestFields(
|
||||
const QJsonObject &request, const QJsonObject &templateObj);
|
||||
|
||||
private:
|
||||
static void validateFields(
|
||||
const QJsonObject &request, const QJsonObject &templateObj, QStringList &errors);
|
||||
|
||||
static void validateNestedObjects(
|
||||
const QJsonObject &request, const QJsonObject &templateObj, QStringList &errors);
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::LLMCore
|
||||
@ -25,6 +25,8 @@
|
||||
#include <QJsonObject>
|
||||
#include <QNetworkReply>
|
||||
|
||||
#include "llmcore/OpenAIMessage.hpp"
|
||||
#include "llmcore/ValidationUtils.hpp"
|
||||
#include "logger/Logger.hpp"
|
||||
#include "settings/ChatAssistantSettings.hpp"
|
||||
#include "settings/CodeCompletionSettings.hpp"
|
||||
@ -92,7 +94,7 @@ void LMStudioProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType
|
||||
request["messages"] = std::move(messages);
|
||||
}
|
||||
|
||||
if (type == LLMCore::RequestType::Fim) {
|
||||
if (type == LLMCore::RequestType::CodeCompletion) {
|
||||
applyModelParams(Settings::codeCompletionSettings());
|
||||
} else {
|
||||
applyModelParams(Settings::chatAssistantSettings());
|
||||
@ -101,43 +103,40 @@ void LMStudioProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType
|
||||
|
||||
bool LMStudioProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse)
|
||||
{
|
||||
bool isComplete = false;
|
||||
while (reply->canReadLine()) {
|
||||
QByteArray line = reply->readLine().trimmed();
|
||||
if (line.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
if (line == "data: [DONE]") {
|
||||
isComplete = true;
|
||||
break;
|
||||
}
|
||||
if (line.startsWith("data: ")) {
|
||||
line = line.mid(6); // Remove "data: " prefix
|
||||
}
|
||||
QJsonDocument jsonResponse = QJsonDocument::fromJson(line);
|
||||
if (jsonResponse.isNull()) {
|
||||
qWarning() << "Invalid JSON response from LM Studio:" << line;
|
||||
continue;
|
||||
}
|
||||
QJsonObject responseObj = jsonResponse.object();
|
||||
if (responseObj.contains("choices")) {
|
||||
QJsonArray choices = responseObj["choices"].toArray();
|
||||
if (!choices.isEmpty()) {
|
||||
QJsonObject choice = choices.first().toObject();
|
||||
QJsonObject delta = choice["delta"].toObject();
|
||||
if (delta.contains("content")) {
|
||||
QString completion = delta["content"].toString();
|
||||
|
||||
accumulatedResponse += completion;
|
||||
}
|
||||
if (choice["finish_reason"].toString() == "stop") {
|
||||
isComplete = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
QByteArray data = reply->readAll();
|
||||
if (data.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
return isComplete;
|
||||
|
||||
QByteArrayList chunks = data.split('\n');
|
||||
for (const QByteArray &chunk : chunks) {
|
||||
if (chunk.trimmed().isEmpty() || chunk == "data: [DONE]") {
|
||||
continue;
|
||||
}
|
||||
|
||||
QByteArray jsonData = chunk;
|
||||
if (chunk.startsWith("data: ")) {
|
||||
jsonData = chunk.mid(6);
|
||||
}
|
||||
|
||||
QJsonParseError error;
|
||||
QJsonDocument doc = QJsonDocument::fromJson(jsonData, &error);
|
||||
|
||||
if (doc.isNull()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto message = LLMCore::OpenAIMessage::fromJson(doc.object());
|
||||
if (message.hasError()) {
|
||||
LOG_MESSAGE("Error in LMStudioProvider response: " + message.error);
|
||||
continue;
|
||||
}
|
||||
|
||||
accumulatedResponse += message.getContent();
|
||||
return message.isDone();
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
QList<QString> LMStudioProvider::getInstalledModels(const QString &url)
|
||||
@ -171,4 +170,22 @@ QList<QString> LMStudioProvider::getInstalledModels(const QString &url)
|
||||
return models;
|
||||
}
|
||||
|
||||
QList<QString> LMStudioProvider::validateRequest(
|
||||
const QJsonObject &request, LLMCore::TemplateType type)
|
||||
{
|
||||
const auto templateReq = QJsonObject{
|
||||
{"model", {}},
|
||||
{"messages", QJsonArray{{QJsonObject{{"role", {}}, {"content", {}}}}}},
|
||||
{"temperature", {}},
|
||||
{"max_tokens", {}},
|
||||
{"top_p", {}},
|
||||
{"top_k", {}},
|
||||
{"frequency_penalty", {}},
|
||||
{"presence_penalty", {}},
|
||||
{"stop", QJsonArray{}},
|
||||
{"stream", {}}};
|
||||
|
||||
return LLMCore::ValidationUtils::validateRequestFields(request, templateReq);
|
||||
}
|
||||
|
||||
} // namespace QodeAssist::Providers
|
||||
|
||||
@ -36,6 +36,7 @@ public:
|
||||
void prepareRequest(QJsonObject &request, LLMCore::RequestType type) override;
|
||||
bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) override;
|
||||
QList<QString> getInstalledModels(const QString &url) override;
|
||||
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Providers
|
||||
|
||||
@ -25,6 +25,8 @@
|
||||
#include <QNetworkReply>
|
||||
#include <QtCore/qeventloop.h>
|
||||
|
||||
#include "llmcore/OllamaMessage.hpp"
|
||||
#include "llmcore/ValidationUtils.hpp"
|
||||
#include "logger/Logger.hpp"
|
||||
#include "settings/ChatAssistantSettings.hpp"
|
||||
#include "settings/CodeCompletionSettings.hpp"
|
||||
@ -64,6 +66,7 @@ void OllamaProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType t
|
||||
QJsonObject options;
|
||||
options["num_predict"] = settings.maxTokens();
|
||||
options["temperature"] = settings.temperature();
|
||||
options["stop"] = request.take("stop");
|
||||
|
||||
if (settings.useTopP())
|
||||
options["top_p"] = settings.topP();
|
||||
@ -78,7 +81,7 @@ void OllamaProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType t
|
||||
request["keep_alive"] = settings.ollamaLivetime();
|
||||
};
|
||||
|
||||
if (type == LLMCore::RequestType::Fim) {
|
||||
if (type == LLMCore::RequestType::CodeCompletion) {
|
||||
applySettings(Settings::codeCompletionSettings());
|
||||
} else {
|
||||
applySettings(Settings::chatAssistantSettings());
|
||||
@ -87,53 +90,23 @@ void OllamaProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType t
|
||||
|
||||
bool OllamaProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse)
|
||||
{
|
||||
QString endpoint = reply->url().path();
|
||||
|
||||
bool isComplete = false;
|
||||
while (reply->canReadLine()) {
|
||||
QByteArray line = reply->readLine().trimmed();
|
||||
if (line.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
QJsonDocument doc = QJsonDocument::fromJson(line);
|
||||
if (doc.isNull()) {
|
||||
LOG_MESSAGE("Invalid JSON response from Ollama: " + QString::fromUtf8(line));
|
||||
continue;
|
||||
}
|
||||
|
||||
QJsonObject responseObj = doc.object();
|
||||
|
||||
if (responseObj.contains("error")) {
|
||||
QString errorMessage = responseObj["error"].toString();
|
||||
LOG_MESSAGE("Error in Ollama response: " + errorMessage);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (endpoint == completionEndpoint()) {
|
||||
if (responseObj.contains("response")) {
|
||||
QString completion = responseObj["response"].toString();
|
||||
accumulatedResponse += completion;
|
||||
}
|
||||
} else if (endpoint == chatEndpoint()) {
|
||||
if (responseObj.contains("message")) {
|
||||
QJsonObject message = responseObj["message"].toObject();
|
||||
if (message.contains("content")) {
|
||||
QString content = message["content"].toString();
|
||||
accumulatedResponse += content;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOG_MESSAGE("Unknown endpoint: " + endpoint);
|
||||
}
|
||||
|
||||
if (responseObj.contains("done") && responseObj["done"].toBool()) {
|
||||
isComplete = true;
|
||||
break;
|
||||
}
|
||||
QByteArray data = reply->readAll();
|
||||
if (data.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return isComplete;
|
||||
const QString endpoint = reply->url().path();
|
||||
auto messageType = endpoint == completionEndpoint() ? LLMCore::OllamaMessage::Type::Generate
|
||||
: LLMCore::OllamaMessage::Type::Chat;
|
||||
|
||||
auto message = LLMCore::OllamaMessage::fromJson(data, messageType);
|
||||
if (message.hasError()) {
|
||||
LOG_MESSAGE("Error in Ollama response: " + message.error);
|
||||
return false;
|
||||
}
|
||||
|
||||
accumulatedResponse += message.getContent();
|
||||
return message.done;
|
||||
}
|
||||
|
||||
QList<QString> OllamaProvider::getInstalledModels(const QString &url)
|
||||
@ -166,4 +139,42 @@ QList<QString> OllamaProvider::getInstalledModels(const QString &url)
|
||||
return models;
|
||||
}
|
||||
|
||||
QList<QString> OllamaProvider::validateRequest(const QJsonObject &request, LLMCore::TemplateType type)
|
||||
{
|
||||
const auto fimReq = QJsonObject{
|
||||
{"keep_alive", {}},
|
||||
{"model", {}},
|
||||
{"stream", {}},
|
||||
{"prompt", {}},
|
||||
{"suffix", {}},
|
||||
{"system", {}},
|
||||
{"options",
|
||||
QJsonObject{
|
||||
{"temperature", {}},
|
||||
{"stop", {}},
|
||||
{"top_p", {}},
|
||||
{"top_k", {}},
|
||||
{"num_predict", {}},
|
||||
{"frequency_penalty", {}},
|
||||
{"presence_penalty", {}}}}};
|
||||
|
||||
const auto messageReq = QJsonObject{
|
||||
{"keep_alive", {}},
|
||||
{"model", {}},
|
||||
{"stream", {}},
|
||||
{"messages", QJsonArray{{QJsonObject{{"role", {}}, {"content", {}}}}}},
|
||||
{"options",
|
||||
QJsonObject{
|
||||
{"temperature", {}},
|
||||
{"stop", {}},
|
||||
{"top_p", {}},
|
||||
{"top_k", {}},
|
||||
{"num_predict", {}},
|
||||
{"frequency_penalty", {}},
|
||||
{"presence_penalty", {}}}}};
|
||||
|
||||
return LLMCore::ValidationUtils::validateRequestFields(
|
||||
request, type == LLMCore::TemplateType::Fim ? fimReq : messageReq);
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Providers
|
||||
|
||||
@ -36,6 +36,7 @@ public:
|
||||
void prepareRequest(QJsonObject &request, LLMCore::RequestType type) override;
|
||||
bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) override;
|
||||
QList<QString> getInstalledModels(const QString &url) override;
|
||||
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Providers
|
||||
|
||||
@ -26,13 +26,17 @@
|
||||
#include <QJsonObject>
|
||||
#include <QNetworkReply>
|
||||
|
||||
#include "llmcore/OpenAIMessage.hpp"
|
||||
#include "llmcore/ValidationUtils.hpp"
|
||||
#include "logger/Logger.hpp"
|
||||
|
||||
namespace QodeAssist::Providers {
|
||||
|
||||
OpenAICompatProvider::OpenAICompatProvider() {}
|
||||
|
||||
QString OpenAICompatProvider::name() const
|
||||
{
|
||||
return "OpenAI Compatible (experimental)";
|
||||
return "OpenAI Compatible";
|
||||
}
|
||||
|
||||
QString OpenAICompatProvider::url() const
|
||||
@ -82,10 +86,6 @@ void OpenAICompatProvider::prepareRequest(QJsonObject &request, LLMCore::Request
|
||||
request["frequency_penalty"] = settings.frequencyPenalty();
|
||||
if (settings.usePresencePenalty())
|
||||
request["presence_penalty"] = settings.presencePenalty();
|
||||
const QString &apiKey = settings.apiKey();
|
||||
if (!apiKey.isEmpty()) {
|
||||
request["api_key"] = apiKey;
|
||||
}
|
||||
};
|
||||
|
||||
QJsonArray messages = prepareMessages(request);
|
||||
@ -93,7 +93,7 @@ void OpenAICompatProvider::prepareRequest(QJsonObject &request, LLMCore::Request
|
||||
request["messages"] = std::move(messages);
|
||||
}
|
||||
|
||||
if (type == LLMCore::RequestType::Fim) {
|
||||
if (type == LLMCore::RequestType::CodeCompletion) {
|
||||
applyModelParams(Settings::codeCompletionSettings());
|
||||
} else {
|
||||
applyModelParams(Settings::chatAssistantSettings());
|
||||
@ -102,43 +102,40 @@ void OpenAICompatProvider::prepareRequest(QJsonObject &request, LLMCore::Request
|
||||
|
||||
bool OpenAICompatProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse)
|
||||
{
|
||||
bool isComplete = false;
|
||||
while (reply->canReadLine()) {
|
||||
QByteArray line = reply->readLine().trimmed();
|
||||
if (line.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
if (line == "data: [DONE]") {
|
||||
isComplete = true;
|
||||
break;
|
||||
}
|
||||
if (line.startsWith("data: ")) {
|
||||
line = line.mid(6); // Remove "data: " prefix
|
||||
}
|
||||
QJsonDocument jsonResponse = QJsonDocument::fromJson(line);
|
||||
if (jsonResponse.isNull()) {
|
||||
qWarning() << "Invalid JSON response from LM Studio:" << line;
|
||||
continue;
|
||||
}
|
||||
QJsonObject responseObj = jsonResponse.object();
|
||||
if (responseObj.contains("choices")) {
|
||||
QJsonArray choices = responseObj["choices"].toArray();
|
||||
if (!choices.isEmpty()) {
|
||||
QJsonObject choice = choices.first().toObject();
|
||||
QJsonObject delta = choice["delta"].toObject();
|
||||
if (delta.contains("content")) {
|
||||
QString completion = delta["content"].toString();
|
||||
|
||||
accumulatedResponse += completion;
|
||||
}
|
||||
if (choice["finish_reason"].toString() == "stop") {
|
||||
isComplete = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
QByteArray data = reply->readAll();
|
||||
if (data.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
return isComplete;
|
||||
|
||||
QByteArrayList chunks = data.split('\n');
|
||||
for (const QByteArray &chunk : chunks) {
|
||||
if (chunk.trimmed().isEmpty() || chunk == "data: [DONE]") {
|
||||
continue;
|
||||
}
|
||||
|
||||
QByteArray jsonData = chunk;
|
||||
if (chunk.startsWith("data: ")) {
|
||||
jsonData = chunk.mid(6);
|
||||
}
|
||||
|
||||
QJsonParseError error;
|
||||
QJsonDocument doc = QJsonDocument::fromJson(jsonData, &error);
|
||||
|
||||
if (doc.isNull()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto message = LLMCore::OpenAIMessage::fromJson(doc.object());
|
||||
if (message.hasError()) {
|
||||
LOG_MESSAGE("Error in OpenAI response: " + message.error);
|
||||
continue;
|
||||
}
|
||||
|
||||
accumulatedResponse += message.getContent();
|
||||
return message.isDone();
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
QList<QString> OpenAICompatProvider::getInstalledModels(const QString &url)
|
||||
@ -146,4 +143,22 @@ QList<QString> OpenAICompatProvider::getInstalledModels(const QString &url)
|
||||
return QStringList();
|
||||
}
|
||||
|
||||
QList<QString> OpenAICompatProvider::validateRequest(
|
||||
const QJsonObject &request, LLMCore::TemplateType type)
|
||||
{
|
||||
const auto templateReq = QJsonObject{
|
||||
{"model", {}},
|
||||
{"messages", QJsonArray{{QJsonObject{{"role", {}}, {"content", {}}}}}},
|
||||
{"temperature", {}},
|
||||
{"max_tokens", {}},
|
||||
{"top_p", {}},
|
||||
{"top_k", {}},
|
||||
{"frequency_penalty", {}},
|
||||
{"presence_penalty", {}},
|
||||
{"stop", QJsonArray{}},
|
||||
{"stream", {}}};
|
||||
|
||||
return LLMCore::ValidationUtils::validateRequestFields(request, templateReq);
|
||||
}
|
||||
|
||||
} // namespace QodeAssist::Providers
|
||||
|
||||
@ -36,6 +36,7 @@ public:
|
||||
void prepareRequest(QJsonObject &request, LLMCore::RequestType type) override;
|
||||
bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) override;
|
||||
QList<QString> getInstalledModels(const QString &url) override;
|
||||
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Providers
|
||||
|
||||
126
providers/OpenRouterAIProvider.cpp
Normal file
126
providers/OpenRouterAIProvider.cpp
Normal file
@ -0,0 +1,126 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "OpenRouterAIProvider.hpp"
|
||||
#include "settings/ChatAssistantSettings.hpp"
|
||||
#include "settings/CodeCompletionSettings.hpp"
|
||||
|
||||
#include <QJsonArray>
|
||||
#include <QJsonDocument>
|
||||
#include <QJsonObject>
|
||||
#include <QNetworkReply>
|
||||
|
||||
#include "llmcore/OpenAIMessage.hpp"
|
||||
#include "logger/Logger.hpp"
|
||||
|
||||
namespace QodeAssist::Providers {
|
||||
|
||||
OpenRouterProvider::OpenRouterProvider() {}
|
||||
|
||||
QString OpenRouterProvider::name() const
|
||||
{
|
||||
return "OpenRouter";
|
||||
}
|
||||
|
||||
QString OpenRouterProvider::url() const
|
||||
{
|
||||
return "https://openrouter.ai/api";
|
||||
}
|
||||
|
||||
void OpenRouterProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType type)
|
||||
{
|
||||
auto prepareMessages = [](QJsonObject &req) -> QJsonArray {
|
||||
QJsonArray messages;
|
||||
if (req.contains("system")) {
|
||||
messages.append(
|
||||
QJsonObject{{"role", "system"}, {"content", req.take("system").toString()}});
|
||||
}
|
||||
if (req.contains("prompt")) {
|
||||
messages.append(
|
||||
QJsonObject{{"role", "user"}, {"content", req.take("prompt").toString()}});
|
||||
}
|
||||
return messages;
|
||||
};
|
||||
|
||||
auto applyModelParams = [&request](const auto &settings) {
|
||||
request["max_tokens"] = settings.maxTokens();
|
||||
request["temperature"] = settings.temperature();
|
||||
|
||||
if (settings.useTopP())
|
||||
request["top_p"] = settings.topP();
|
||||
if (settings.useTopK())
|
||||
request["top_k"] = settings.topK();
|
||||
if (settings.useFrequencyPenalty())
|
||||
request["frequency_penalty"] = settings.frequencyPenalty();
|
||||
if (settings.usePresencePenalty())
|
||||
request["presence_penalty"] = settings.presencePenalty();
|
||||
};
|
||||
|
||||
QJsonArray messages = prepareMessages(request);
|
||||
if (!messages.isEmpty()) {
|
||||
request["messages"] = std::move(messages);
|
||||
}
|
||||
|
||||
if (type == LLMCore::RequestType::CodeCompletion) {
|
||||
applyModelParams(Settings::codeCompletionSettings());
|
||||
} else {
|
||||
applyModelParams(Settings::chatAssistantSettings());
|
||||
}
|
||||
}
|
||||
|
||||
bool OpenRouterProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse)
|
||||
{
|
||||
QByteArray data = reply->readAll();
|
||||
if (data.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
QByteArrayList chunks = data.split('\n');
|
||||
for (const QByteArray &chunk : chunks) {
|
||||
if (chunk.trimmed().isEmpty() || chunk.contains("OPENROUTER PROCESSING")
|
||||
|| chunk == "data: [DONE]") {
|
||||
continue;
|
||||
}
|
||||
|
||||
QByteArray jsonData = chunk;
|
||||
if (chunk.startsWith("data: ")) {
|
||||
jsonData = chunk.mid(6);
|
||||
}
|
||||
|
||||
QJsonParseError error;
|
||||
QJsonDocument doc = QJsonDocument::fromJson(jsonData, &error);
|
||||
|
||||
if (doc.isNull()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto message = LLMCore::OpenAIMessage::fromJson(doc.object());
|
||||
if (message.hasError()) {
|
||||
LOG_MESSAGE("Error in OpenRouter response: " + message.error);
|
||||
continue;
|
||||
}
|
||||
|
||||
accumulatedResponse += message.getContent();
|
||||
return message.isDone();
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace QodeAssist::Providers
|
||||
38
providers/OpenRouterAIProvider.hpp
Normal file
38
providers/OpenRouterAIProvider.hpp
Normal file
@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "llmcore/Provider.hpp"
|
||||
#include "providers/OpenAICompatProvider.hpp"
|
||||
|
||||
namespace QodeAssist::Providers {
|
||||
|
||||
class OpenRouterProvider : public OpenAICompatProvider
|
||||
{
|
||||
public:
|
||||
OpenRouterProvider();
|
||||
|
||||
QString name() const override;
|
||||
QString url() const override;
|
||||
void prepareRequest(QJsonObject &request, LLMCore::RequestType type) override;
|
||||
bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) override;
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Providers
|
||||
39
providers/Providers.hpp
Normal file
39
providers/Providers.hpp
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "llmcore/ProvidersManager.hpp"
|
||||
#include "providers/LMStudioProvider.hpp"
|
||||
#include "providers/OllamaProvider.hpp"
|
||||
#include "providers/OpenAICompatProvider.hpp"
|
||||
#include "providers/OpenRouterAIProvider.hpp"
|
||||
|
||||
namespace QodeAssist::Providers {
|
||||
|
||||
inline void registerProviders()
|
||||
{
|
||||
auto &providerManager = LLMCore::ProvidersManager::instance();
|
||||
providerManager.registerProvider<OllamaProvider>();
|
||||
providerManager.registerProvider<LMStudioProvider>();
|
||||
providerManager.registerProvider<OpenAICompatProvider>();
|
||||
providerManager.registerProvider<OpenRouterProvider>();
|
||||
}
|
||||
|
||||
} // namespace QodeAssist::Providers
|
||||
@ -43,20 +43,9 @@
|
||||
#include "QodeAssistClient.hpp"
|
||||
#include "chat/ChatOutputPane.h"
|
||||
#include "chat/NavigationPanel.hpp"
|
||||
#include "llmcore/PromptTemplateManager.hpp"
|
||||
#include "llmcore/ProvidersManager.hpp"
|
||||
#include "providers/LMStudioProvider.hpp"
|
||||
#include "providers/OllamaProvider.hpp"
|
||||
#include "providers/OpenAICompatProvider.hpp"
|
||||
|
||||
#include "templates/CodeLlamaChat.hpp"
|
||||
#include "templates/CodeLlamaFim.hpp"
|
||||
#include "templates/CustomFimTemplate.hpp"
|
||||
#include "templates/DeepSeekCoderChat.hpp"
|
||||
#include "templates/DeepSeekCoderFim.hpp"
|
||||
#include "templates/Qwen.hpp"
|
||||
#include "templates/StarCoder2Fim.hpp"
|
||||
#include "templates/StarCoderChat.hpp"
|
||||
#include "providers/Providers.hpp"
|
||||
#include "templates/Templates.hpp"
|
||||
|
||||
using namespace Utils;
|
||||
using namespace Core;
|
||||
@ -83,22 +72,8 @@ public:
|
||||
|
||||
void initialize() final
|
||||
{
|
||||
auto &providerManager = LLMCore::ProvidersManager::instance();
|
||||
providerManager.registerProvider<Providers::OllamaProvider>();
|
||||
providerManager.registerProvider<Providers::LMStudioProvider>();
|
||||
providerManager.registerProvider<Providers::OpenAICompatProvider>();
|
||||
|
||||
auto &templateManager = LLMCore::PromptTemplateManager::instance();
|
||||
templateManager.registerTemplate<Templates::CodeLlamaFim>();
|
||||
templateManager.registerTemplate<Templates::StarCoder2Fim>();
|
||||
templateManager.registerTemplate<Templates::DeepSeekCoderFim>();
|
||||
templateManager.registerTemplate<Templates::CustomTemplate>();
|
||||
templateManager.registerTemplate<Templates::DeepSeekCoderChat>();
|
||||
templateManager.registerTemplate<Templates::CodeLlamaChat>();
|
||||
templateManager.registerTemplate<Templates::LlamaChat>();
|
||||
templateManager.registerTemplate<Templates::StarCoderChat>();
|
||||
templateManager.registerTemplate<Templates::QwenChat>();
|
||||
templateManager.registerTemplate<Templates::QwenFim>();
|
||||
Providers::registerProviders();
|
||||
Templates::registerTemplates();
|
||||
|
||||
Utils::Icon QCODEASSIST_ICON(
|
||||
{{":/resources/images/qoderassist-icon.png", Utils::Theme::IconsBaseColor}});
|
||||
|
||||
@ -32,7 +32,7 @@ public:
|
||||
: Utils::BaseAspect(container)
|
||||
{}
|
||||
|
||||
void addToLayout(Layouting::Layout &parent) override
|
||||
void addToLayoutImpl(Layouting::Layout &parent) override
|
||||
{
|
||||
auto button = new QPushButton(m_buttonText);
|
||||
connect(button, &QPushButton::clicked, this, &ButtonAspect::clicked);
|
||||
|
||||
@ -54,6 +54,10 @@ ChatAssistantSettings::ChatAssistantSettings()
|
||||
sharingCurrentFile.setLabelText(Tr::tr("Share Current File With Assistant by Default"));
|
||||
sharingCurrentFile.setDefaultValue(true);
|
||||
|
||||
stream.setSettingsKey(Constants::CA_STREAM);
|
||||
stream.setDefaultValue(true);
|
||||
stream.setLabelText(Tr::tr("Enable stream option"));
|
||||
|
||||
// General Parameters Settings
|
||||
temperature.setSettingsKey(Constants::CA_TEMPERATURE);
|
||||
temperature.setLabelText(Tr::tr("Temperature:"));
|
||||
@ -158,28 +162,30 @@ ChatAssistantSettings::ChatAssistantSettings()
|
||||
ollamaGrid.addRow({ollamaLivetime});
|
||||
ollamaGrid.addRow({contextWindow});
|
||||
|
||||
return Column{Row{Stretch{1}, resetToDefaults},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("Chat Settings")),
|
||||
Column{Row{chatTokensThreshold, Stretch{1}}, sharingCurrentFile}},
|
||||
Space{8},
|
||||
Group{
|
||||
title(Tr::tr("General Parameters")),
|
||||
Row{genGrid, Stretch{1}},
|
||||
},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("Advanced Parameters")),
|
||||
Column{Row{advancedGrid, Stretch{1}}}},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("Context Settings")),
|
||||
Column{
|
||||
Row{useSystemPrompt, Stretch{1}},
|
||||
systemPrompt,
|
||||
}},
|
||||
Group{title(Tr::tr("Ollama Settings")), Column{Row{ollamaGrid, Stretch{1}}}},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("API Configuration")), Column{apiKey}},
|
||||
Stretch{1}};
|
||||
return Column{
|
||||
Row{Stretch{1}, resetToDefaults},
|
||||
Space{8},
|
||||
Group{
|
||||
title(Tr::tr("Chat Settings")),
|
||||
Column{Row{chatTokensThreshold, Stretch{1}}, sharingCurrentFile, stream}},
|
||||
Space{8},
|
||||
Group{
|
||||
title(Tr::tr("General Parameters")),
|
||||
Row{genGrid, Stretch{1}},
|
||||
},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("Advanced Parameters")), Column{Row{advancedGrid, Stretch{1}}}},
|
||||
Space{8},
|
||||
Group{
|
||||
title(Tr::tr("Context Settings")),
|
||||
Column{
|
||||
Row{useSystemPrompt, Stretch{1}},
|
||||
systemPrompt,
|
||||
}},
|
||||
Group{title(Tr::tr("Ollama Settings")), Column{Row{ollamaGrid, Stretch{1}}}},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("API Configuration")), Column{apiKey}},
|
||||
Stretch{1}};
|
||||
});
|
||||
}
|
||||
|
||||
@ -201,6 +207,7 @@ void ChatAssistantSettings::resetSettingsToDefaults()
|
||||
QMessageBox::Yes | QMessageBox::No);
|
||||
|
||||
if (reply == QMessageBox::Yes) {
|
||||
resetAspect(stream);
|
||||
resetAspect(chatTokensThreshold);
|
||||
resetAspect(temperature);
|
||||
resetAspect(maxTokens);
|
||||
|
||||
@ -35,6 +35,7 @@ public:
|
||||
// Chat settings
|
||||
Utils::IntegerAspect chatTokensThreshold{this};
|
||||
Utils::BoolAspect sharingCurrentFile{this};
|
||||
Utils::BoolAspect stream{this};
|
||||
|
||||
// General Parameters Settings
|
||||
Utils::DoubleAspect temperature{this};
|
||||
|
||||
@ -48,8 +48,16 @@ CodeCompletionSettings::CodeCompletionSettings()
|
||||
autoCompletion.setDefaultValue(true);
|
||||
|
||||
multiLineCompletion.setSettingsKey(Constants::CC_MULTILINE_COMPLETION);
|
||||
multiLineCompletion.setDefaultValue(false);
|
||||
multiLineCompletion.setLabelText(Tr::tr("Enable Multiline Completion(experimental)"));
|
||||
multiLineCompletion.setDefaultValue(true);
|
||||
multiLineCompletion.setLabelText(Tr::tr("Enable Multiline Completion"));
|
||||
|
||||
stream.setSettingsKey(Constants::CC_STREAM);
|
||||
stream.setDefaultValue(true);
|
||||
stream.setLabelText(Tr::tr("Enable stream option"));
|
||||
|
||||
smartProcessInstuctText.setSettingsKey(Constants::CC_SMART_PROCESS_INSTRUCT_TEXT);
|
||||
smartProcessInstuctText.setDefaultValue(true);
|
||||
smartProcessInstuctText.setLabelText(Tr::tr("Enable smart process text from instruct model"));
|
||||
|
||||
startSuggestionTimer.setSettingsKey(Constants::СС_START_SUGGESTION_TIMER);
|
||||
startSuggestionTimer.setLabelText(Tr::tr("with delay(ms)"));
|
||||
@ -143,9 +151,8 @@ CodeCompletionSettings::CodeCompletionSettings()
|
||||
|
||||
systemPrompt.setSettingsKey(Constants::CC_SYSTEM_PROMPT);
|
||||
systemPrompt.setDisplayStyle(Utils::StringAspect::TextEditDisplay);
|
||||
systemPrompt.setDefaultValue(
|
||||
"You are an expert C++, Qt, and QML code completion AI. Your task is to provide accurate "
|
||||
"and contextually appropriate code suggestions.");
|
||||
systemPrompt.setDefaultValue("You are an expert C++, Qt, and QML code completion AI. Answer "
|
||||
"should be ONLY in CODE and without repeating current.");
|
||||
|
||||
useFilePathInContext.setSettingsKey(Constants::CC_USE_FILE_PATH_IN_CONTEXT);
|
||||
useFilePathInContext.setDefaultValue(true);
|
||||
@ -214,31 +221,36 @@ CodeCompletionSettings::CodeCompletionSettings()
|
||||
Row{useFilePathInContext, Stretch{1}},
|
||||
Row{useProjectChangesCache, maxChangesCacheSize, Stretch{1}}};
|
||||
|
||||
return Column{Row{Stretch{1}, resetToDefaults},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("Auto Completion Settings")),
|
||||
Column{autoCompletion,
|
||||
Space{8},
|
||||
multiLineCompletion,
|
||||
Row{autoCompletionCharThreshold,
|
||||
autoCompletionTypingInterval,
|
||||
startSuggestionTimer,
|
||||
Stretch{1}}}},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("General Parameters")),
|
||||
Column{
|
||||
Row{genGrid, Stretch{1}},
|
||||
}},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("Advanced Parameters")),
|
||||
Column{Row{advancedGrid, Stretch{1}}}},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("Context Settings")), contextItem},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("Ollama Settings")), Column{Row{ollamaGrid, Stretch{1}}}},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("API Configuration")), Column{apiKey}},
|
||||
Stretch{1}};
|
||||
return Column{
|
||||
Row{Stretch{1}, resetToDefaults},
|
||||
Space{8},
|
||||
Group{
|
||||
title(TrConstants::AUTO_COMPLETION_SETTINGS),
|
||||
Column{
|
||||
autoCompletion,
|
||||
Space{8},
|
||||
multiLineCompletion,
|
||||
stream,
|
||||
smartProcessInstuctText,
|
||||
Row{autoCompletionCharThreshold,
|
||||
autoCompletionTypingInterval,
|
||||
startSuggestionTimer,
|
||||
Stretch{1}}}},
|
||||
Space{8},
|
||||
Group{
|
||||
title(Tr::tr("General Parameters")),
|
||||
Column{
|
||||
Row{genGrid, Stretch{1}},
|
||||
}},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("Advanced Parameters")), Column{Row{advancedGrid, Stretch{1}}}},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("Context Settings")), contextItem},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("Ollama Settings")), Column{Row{ollamaGrid, Stretch{1}}}},
|
||||
Space{8},
|
||||
Group{title(Tr::tr("API Configuration")), Column{apiKey}},
|
||||
Stretch{1}};
|
||||
});
|
||||
}
|
||||
|
||||
@ -276,6 +288,7 @@ void CodeCompletionSettings::resetSettingsToDefaults()
|
||||
if (reply == QMessageBox::Yes) {
|
||||
resetAspect(autoCompletion);
|
||||
resetAspect(multiLineCompletion);
|
||||
resetAspect(stream);
|
||||
resetAspect(temperature);
|
||||
resetAspect(maxTokens);
|
||||
resetAspect(useTopP);
|
||||
|
||||
@ -35,6 +35,8 @@ public:
|
||||
// Auto Completion Settings
|
||||
Utils::BoolAspect autoCompletion{this};
|
||||
Utils::BoolAspect multiLineCompletion{this};
|
||||
Utils::BoolAspect stream{this};
|
||||
Utils::BoolAspect smartProcessInstuctText{this};
|
||||
|
||||
Utils::IntegerAspect startSuggestionTimer{this};
|
||||
Utils::IntegerAspect autoCompletionCharThreshold{this};
|
||||
|
||||
@ -66,11 +66,11 @@ GeneralSettings::GeneralSettings()
|
||||
ccProvider.setReadOnly(true);
|
||||
ccSelectProvider.m_buttonText = TrConstants::SELECT;
|
||||
|
||||
initStringAspect(ccModel, Constants::CC_MODEL, TrConstants::MODEL, "codellama:7b-code");
|
||||
initStringAspect(ccModel, Constants::CC_MODEL, TrConstants::MODEL, "qwen2.5-coder:7b");
|
||||
ccModel.setHistoryCompleter(Constants::CC_MODEL_HISTORY);
|
||||
ccSelectModel.m_buttonText = TrConstants::SELECT;
|
||||
|
||||
initStringAspect(ccTemplate, Constants::CC_TEMPLATE, TrConstants::TEMPLATE, "CodeLlama FIM");
|
||||
initStringAspect(ccTemplate, Constants::CC_TEMPLATE, TrConstants::TEMPLATE, "Ollama Auto FIM");
|
||||
ccTemplate.setReadOnly(true);
|
||||
ccSelectTemplate.m_buttonText = TrConstants::SELECT;
|
||||
|
||||
@ -87,11 +87,11 @@ GeneralSettings::GeneralSettings()
|
||||
caProvider.setReadOnly(true);
|
||||
caSelectProvider.m_buttonText = TrConstants::SELECT;
|
||||
|
||||
initStringAspect(caModel, Constants::CA_MODEL, TrConstants::MODEL, "codellama:7b-instruct");
|
||||
initStringAspect(caModel, Constants::CA_MODEL, TrConstants::MODEL, "qwen2.5-coder:7b");
|
||||
caModel.setHistoryCompleter(Constants::CA_MODEL_HISTORY);
|
||||
caSelectModel.m_buttonText = TrConstants::SELECT;
|
||||
|
||||
initStringAspect(caTemplate, Constants::CA_TEMPLATE, TrConstants::TEMPLATE, "CodeLlama Chat");
|
||||
initStringAspect(caTemplate, Constants::CA_TEMPLATE, TrConstants::TEMPLATE, "Ollama Auto Chat");
|
||||
caTemplate.setReadOnly(true);
|
||||
|
||||
caSelectTemplate.m_buttonText = TrConstants::SELECT;
|
||||
@ -119,14 +119,12 @@ GeneralSettings::GeneralSettings()
|
||||
ccGrid.addRow({ccUrl, ccSetUrl});
|
||||
ccGrid.addRow({ccModel, ccSelectModel});
|
||||
ccGrid.addRow({ccTemplate, ccSelectTemplate});
|
||||
ccGrid.addRow({ccStatus, ccTest});
|
||||
|
||||
auto caGrid = Grid{};
|
||||
caGrid.addRow({caProvider, caSelectProvider});
|
||||
caGrid.addRow({caUrl, caSetUrl});
|
||||
caGrid.addRow({caModel, caSelectModel});
|
||||
caGrid.addRow({caTemplate, caSelectTemplate});
|
||||
caGrid.addRow({caStatus, caTest});
|
||||
|
||||
auto ccGroup = Group{title(TrConstants::CODE_COMPLETION), ccGrid};
|
||||
auto caGroup = Group{title(TrConstants::CHAT_ASSISTANT), caGrid};
|
||||
|
||||
@ -49,9 +49,12 @@ const char СС_AUTO_COMPLETION_CHAR_THRESHOLD[] = "QodeAssist.autoCompletionCha
|
||||
const char СС_AUTO_COMPLETION_TYPING_INTERVAL[] = "QodeAssist.autoCompletionTypingInterval";
|
||||
const char MAX_FILE_THRESHOLD[] = "QodeAssist.maxFileThreshold";
|
||||
const char CC_MULTILINE_COMPLETION[] = "QodeAssist.ccMultilineCompletion";
|
||||
const char CC_STREAM[] = "QodeAssist.ccStream";
|
||||
const char CC_SMART_PROCESS_INSTRUCT_TEXT[] = "QodeAssist.ccSmartProcessInstructText";
|
||||
const char CUSTOM_JSON_TEMPLATE[] = "QodeAssist.customJsonTemplate";
|
||||
const char CA_TOKENS_THRESHOLD[] = "QodeAssist.caTokensThreshold";
|
||||
const char CA_SHARING_CURRENT_FILE[] = "QodeAssist.caSharingCurrentFile";
|
||||
const char CA_STREAM[] = "QodeAssist.caStream";
|
||||
|
||||
const char QODE_ASSIST_GENERAL_OPTIONS_ID[] = "QodeAssist.GeneralOptions";
|
||||
const char QODE_ASSIST_GENERAL_SETTINGS_PAGE_ID[] = "QodeAssist.1GeneralSettingsPageId";
|
||||
|
||||
@ -79,6 +79,8 @@ inline const char PREDEFINED_URL[]
|
||||
inline const char CUSTOM_URL[] = QT_TRANSLATE_NOOP("QtC::QodeAssist", "Enter custom URL");
|
||||
inline const char ENTER_MODEL_MANUALLY_BUTTON[]
|
||||
= QT_TRANSLATE_NOOP("QtC::QodeAssist", "Enter Model Name Manually");
|
||||
inline const char AUTO_COMPLETION_SETTINGS[]
|
||||
= QT_TRANSLATE_NOOP("QtC::QodeAssist", "Auto Completion Settings");
|
||||
|
||||
} // namespace TrConstants
|
||||
|
||||
|
||||
67
templates/Alpaca.hpp
Normal file
67
templates/Alpaca.hpp
Normal file
@ -0,0 +1,67 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "llmcore/PromptTemplate.hpp"
|
||||
#include <QJsonArray>
|
||||
|
||||
namespace QodeAssist::Templates {
|
||||
|
||||
class Alpaca : public LLMCore::PromptTemplate
|
||||
{
|
||||
public:
|
||||
QString name() const override { return "Alpaca"; }
|
||||
LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
|
||||
QString promptTemplate() const override { return {}; }
|
||||
QStringList stopWords() const override
|
||||
{
|
||||
return QStringList() << "### Instruction:" << "### Response:";
|
||||
}
|
||||
void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
|
||||
{
|
||||
QJsonArray messages = request["messages"].toArray();
|
||||
|
||||
for (int i = 0; i < messages.size(); ++i) {
|
||||
QJsonObject message = messages[i].toObject();
|
||||
QString role = message["role"].toString();
|
||||
QString content = message["content"].toString();
|
||||
|
||||
QString formattedContent;
|
||||
if (role == "system") {
|
||||
formattedContent = content + "\n\n";
|
||||
} else if (role == "user") {
|
||||
formattedContent = "### Instruction:\n" + content + "\n\n";
|
||||
} else if (role == "assistant") {
|
||||
formattedContent = "### Response:\n" + content + "\n\n";
|
||||
}
|
||||
|
||||
message["content"] = formattedContent;
|
||||
messages[i] = message;
|
||||
}
|
||||
|
||||
request["messages"] = messages;
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: ### Instruction:\n### Response:\n";
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Templates
|
||||
@ -19,37 +19,22 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <QtCore/qjsonarray.h>
|
||||
#include <QJsonArray>
|
||||
|
||||
#include "llmcore/PromptTemplate.hpp"
|
||||
|
||||
namespace QodeAssist::Templates {
|
||||
|
||||
class CodeLlamaChat : public LLMCore::PromptTemplate
|
||||
class BasicChat : public LLMCore::PromptTemplate
|
||||
{
|
||||
public:
|
||||
LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
|
||||
QString name() const override { return "CodeLlama Chat"; }
|
||||
QString promptTemplate() const override { return "[INST] %1 [/INST]"; }
|
||||
QStringList stopWords() const override { return QStringList() << "[INST]" << "[/INST]"; }
|
||||
|
||||
QString name() const override { return "Basic Chat"; }
|
||||
QString promptTemplate() const override { return {}; }
|
||||
QStringList stopWords() const override { return QStringList(); }
|
||||
void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
|
||||
{
|
||||
QString formattedPrompt = promptTemplate().arg(context.prefix);
|
||||
QJsonArray messages = request["messages"].toArray();
|
||||
|
||||
QJsonObject newMessage;
|
||||
newMessage["role"] = "user";
|
||||
newMessage["content"] = formattedPrompt;
|
||||
messages.append(newMessage);
|
||||
|
||||
request["messages"] = messages;
|
||||
}
|
||||
};
|
||||
|
||||
class LlamaChat : public CodeLlamaChat
|
||||
{
|
||||
public:
|
||||
QString name() const override { return "Llama Chat"; }
|
||||
{}
|
||||
QString description() const override { return "chat without tokens"; }
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Templates
|
||||
@ -20,35 +20,41 @@
|
||||
#pragma once
|
||||
|
||||
#include <QJsonArray>
|
||||
|
||||
#include "llmcore/PromptTemplate.hpp"
|
||||
|
||||
namespace QodeAssist::Templates {
|
||||
|
||||
class DeepSeekCoderChat : public LLMCore::PromptTemplate
|
||||
class ChatML : public LLMCore::PromptTemplate
|
||||
{
|
||||
public:
|
||||
QString name() const override { return "DeepSeekCoder Chat"; }
|
||||
QString name() const override { return "ChatML"; }
|
||||
LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
|
||||
|
||||
QString promptTemplate() const override { return "### Instruction:\n%1\n### Response:\n"; }
|
||||
|
||||
QString promptTemplate() const override { return {}; }
|
||||
QStringList stopWords() const override
|
||||
{
|
||||
return QStringList() << "### Instruction:" << "### Response:" << "\n\n### " << "<|EOT|>";
|
||||
return QStringList() << "<|im_start|>" << "<|im_end|>";
|
||||
}
|
||||
|
||||
void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
|
||||
{
|
||||
QString formattedPrompt = promptTemplate().arg(context.prefix);
|
||||
QJsonArray messages = request["messages"].toArray();
|
||||
|
||||
QJsonObject newMessage;
|
||||
newMessage["role"] = "user";
|
||||
newMessage["content"] = formattedPrompt;
|
||||
messages.append(newMessage);
|
||||
for (int i = 0; i < messages.size(); ++i) {
|
||||
QJsonObject message = messages[i].toObject();
|
||||
QString role = message["role"].toString();
|
||||
QString content = message["content"].toString();
|
||||
|
||||
message["content"] = QString("<|im_start|>%1\n%2\n<|im_end|>").arg(role, content);
|
||||
|
||||
messages[i] = message;
|
||||
}
|
||||
|
||||
request["messages"] = messages;
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: <|im_start|>%1\n%2\n<|im_end|>";
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Templates
|
||||
@ -33,12 +33,15 @@ public:
|
||||
{
|
||||
return QStringList() << "<EOT>" << "<PRE>" << "<SUF" << "<MID>";
|
||||
}
|
||||
|
||||
void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
|
||||
{
|
||||
QString formattedPrompt = promptTemplate().arg(context.prefix, context.suffix);
|
||||
request["prompt"] = formattedPrompt;
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: <PRE> %1 <SUF>%2 <MID>";
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Templates
|
||||
|
||||
@ -39,7 +39,6 @@ public:
|
||||
return Settings::customPromptSettings().customJsonTemplate();
|
||||
}
|
||||
QStringList stopWords() const override { return QStringList(); }
|
||||
|
||||
void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
|
||||
{
|
||||
QJsonDocument doc = QJsonDocument::fromJson(promptTemplate().toUtf8());
|
||||
@ -56,6 +55,7 @@ public:
|
||||
request[it.key()] = it.value();
|
||||
}
|
||||
}
|
||||
QString description() const override { return promptTemplate(); }
|
||||
|
||||
private:
|
||||
QJsonValue processJsonValue(const QJsonValue &value, const LLMCore::ContextData &context) const
|
||||
|
||||
@ -38,6 +38,11 @@ public:
|
||||
QString formattedPrompt = promptTemplate().arg(context.prefix, context.suffix);
|
||||
request["prompt"] = formattedPrompt;
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: "
|
||||
"<|fim▁begin|>%1<|fim▁hole|>%2<|fim▁end|>";
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Templates
|
||||
|
||||
64
templates/Llama2.hpp
Normal file
64
templates/Llama2.hpp
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "llmcore/PromptTemplate.hpp"
|
||||
#include <QJsonArray>
|
||||
|
||||
namespace QodeAssist::Templates {
|
||||
|
||||
class Llama2 : public LLMCore::PromptTemplate
|
||||
{
|
||||
public:
|
||||
QString name() const override { return "Llama 2"; }
|
||||
LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
|
||||
QString promptTemplate() const override { return {}; }
|
||||
QStringList stopWords() const override { return QStringList() << "[INST]"; }
|
||||
void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
|
||||
{
|
||||
QJsonArray messages = request["messages"].toArray();
|
||||
|
||||
for (int i = 0; i < messages.size(); ++i) {
|
||||
QJsonObject message = messages[i].toObject();
|
||||
QString role = message["role"].toString();
|
||||
QString content = message["content"].toString();
|
||||
|
||||
QString formattedContent;
|
||||
if (role == "system") {
|
||||
formattedContent = QString("[INST]<<SYS>>\n%1\n<</SYS>>[/INST]\n").arg(content);
|
||||
} else if (role == "user") {
|
||||
formattedContent = QString("[INST]%1[/INST]\n").arg(content);
|
||||
} else if (role == "assistant") {
|
||||
formattedContent = content + "\n";
|
||||
}
|
||||
|
||||
message["content"] = formattedContent;
|
||||
messages[i] = message;
|
||||
}
|
||||
|
||||
request["messages"] = messages;
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: [INST]%1[/INST]\n";
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Templates
|
||||
@ -20,32 +20,43 @@
|
||||
#pragma once
|
||||
|
||||
#include <QJsonArray>
|
||||
|
||||
#include "llmcore/PromptTemplate.hpp"
|
||||
|
||||
namespace QodeAssist::Templates {
|
||||
|
||||
class StarCoderChat : public LLMCore::PromptTemplate
|
||||
class Llama3 : public LLMCore::PromptTemplate
|
||||
{
|
||||
public:
|
||||
QString name() const override { return "StarCoder Chat"; }
|
||||
QString name() const override { return "Llama 3"; }
|
||||
LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
|
||||
QString promptTemplate() const override { return "### Instruction:\n%1\n### Response:\n"; }
|
||||
QString promptTemplate() const override { return ""; }
|
||||
QStringList stopWords() const override
|
||||
{
|
||||
return QStringList() << "###"
|
||||
<< "<|endoftext|>" << "<file_sep>";
|
||||
return QStringList() << "<|start_header_id|>" << "<|end_header_id|>" << "<|eot_id|>";
|
||||
}
|
||||
void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
|
||||
{
|
||||
QString formattedPrompt = promptTemplate().arg(context.prefix);
|
||||
QJsonArray messages = request["messages"].toArray();
|
||||
|
||||
QJsonObject newMessage;
|
||||
newMessage["role"] = "user";
|
||||
newMessage["content"] = formattedPrompt;
|
||||
messages.append(newMessage);
|
||||
for (int i = 0; i < messages.size(); ++i) {
|
||||
QJsonObject message = messages[i].toObject();
|
||||
QString role = message["role"].toString();
|
||||
QString content = message["content"].toString();
|
||||
|
||||
message["content"]
|
||||
= QString("<|start_header_id|>%1<|end_header_id|>%2<|eot_id|>").arg(role, content);
|
||||
|
||||
messages[i] = message;
|
||||
}
|
||||
|
||||
request["messages"] = messages;
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: "
|
||||
"<|start_header_id|>%1<|end_header_id|>%2<|eot_id|>";
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Templates
|
||||
65
templates/Ollama.hpp
Normal file
65
templates/Ollama.hpp
Normal file
@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <QJsonArray>
|
||||
|
||||
#include "llmcore/PromptTemplate.hpp"
|
||||
|
||||
namespace QodeAssist::Templates {
|
||||
|
||||
class OllamaAutoFim : public LLMCore::PromptTemplate
|
||||
{
|
||||
public:
|
||||
LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Fim; }
|
||||
QString name() const override { return "Ollama Auto FIM"; }
|
||||
QString promptTemplate() const override { return {}; }
|
||||
QStringList stopWords() const override { return QStringList(); }
|
||||
void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
|
||||
{
|
||||
request["prompt"] = context.prefix;
|
||||
request["suffix"] = context.suffix;
|
||||
}
|
||||
QString description() const override { return "template will take from ollama modelfile"; }
|
||||
};
|
||||
|
||||
class OllamaAutoChat : public LLMCore::PromptTemplate
|
||||
{
|
||||
public:
|
||||
LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
|
||||
QString name() const override { return "Ollama Auto Chat"; }
|
||||
QString promptTemplate() const override { return {}; }
|
||||
QStringList stopWords() const override { return QStringList(); }
|
||||
|
||||
void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
|
||||
{
|
||||
QJsonArray messages = request["messages"].toArray();
|
||||
|
||||
QJsonObject newMessage;
|
||||
newMessage["role"] = "user";
|
||||
newMessage["content"] = context.prefix;
|
||||
messages.append(newMessage);
|
||||
|
||||
request["messages"] = messages;
|
||||
}
|
||||
QString description() const override { return "template will take from ollama modelfile"; }
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Templates
|
||||
@ -24,33 +24,6 @@
|
||||
|
||||
namespace QodeAssist::Templates {
|
||||
|
||||
class QwenChat : public LLMCore::PromptTemplate
|
||||
{
|
||||
public:
|
||||
QString name() const override { return "Qwen Chat"; }
|
||||
LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
|
||||
|
||||
QString promptTemplate() const override { return "### Instruction:\n%1\n### Response:\n"; }
|
||||
|
||||
QStringList stopWords() const override
|
||||
{
|
||||
return QStringList() << "### Instruction:" << "### Response:" << "\n\n### " << "<|EOT|>";
|
||||
}
|
||||
|
||||
void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
|
||||
{
|
||||
QString formattedPrompt = promptTemplate().arg(context.prefix);
|
||||
QJsonArray messages = request["messages"].toArray();
|
||||
|
||||
QJsonObject newMessage;
|
||||
newMessage["role"] = "user";
|
||||
newMessage["content"] = formattedPrompt;
|
||||
messages.append(newMessage);
|
||||
|
||||
request["messages"] = messages;
|
||||
}
|
||||
};
|
||||
|
||||
class QwenFim : public LLMCore::PromptTemplate
|
||||
{
|
||||
public:
|
||||
@ -66,6 +39,11 @@ public:
|
||||
QString formattedPrompt = promptTemplate().arg(context.prefix, context.suffix);
|
||||
request["prompt"] = formattedPrompt;
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: "
|
||||
"<|fim_prefix|>%1<|fim_suffix|>%2<|fim_middle|>";
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Templates
|
||||
|
||||
@ -39,6 +39,11 @@ public:
|
||||
QString formattedPrompt = promptTemplate().arg(context.prefix, context.suffix);
|
||||
request["prompt"] = formattedPrompt;
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: "
|
||||
"<fim_prefix>%1<fim_suffix>%2<fim_middle>";
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Templates
|
||||
|
||||
54
templates/Templates.hpp
Normal file
54
templates/Templates.hpp
Normal file
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "llmcore/PromptTemplateManager.hpp"
|
||||
#include "templates/Alpaca.hpp"
|
||||
#include "templates/BasicChat.hpp"
|
||||
#include "templates/ChatML.hpp"
|
||||
#include "templates/CodeLlamaFim.hpp"
|
||||
#include "templates/CustomFimTemplate.hpp"
|
||||
#include "templates/DeepSeekCoderFim.hpp"
|
||||
#include "templates/Llama2.hpp"
|
||||
#include "templates/Llama3.hpp"
|
||||
#include "templates/Ollama.hpp"
|
||||
#include "templates/Qwen.hpp"
|
||||
#include "templates/StarCoder2Fim.hpp"
|
||||
|
||||
namespace QodeAssist::Templates {
|
||||
|
||||
inline void registerTemplates()
|
||||
{
|
||||
auto &templateManager = LLMCore::PromptTemplateManager::instance();
|
||||
templateManager.registerTemplate<CodeLlamaFim>();
|
||||
templateManager.registerTemplate<StarCoder2Fim>();
|
||||
templateManager.registerTemplate<DeepSeekCoderFim>();
|
||||
templateManager.registerTemplate<CustomTemplate>();
|
||||
templateManager.registerTemplate<QwenFim>();
|
||||
templateManager.registerTemplate<OllamaAutoFim>();
|
||||
templateManager.registerTemplate<OllamaAutoChat>();
|
||||
templateManager.registerTemplate<BasicChat>();
|
||||
templateManager.registerTemplate<Llama3>();
|
||||
templateManager.registerTemplate<ChatML>();
|
||||
templateManager.registerTemplate<Alpaca>();
|
||||
templateManager.registerTemplate<Llama2>();
|
||||
}
|
||||
|
||||
} // namespace QodeAssist::Templates
|
||||
Reference in New Issue
Block a user