mirror of
				https://github.com/Palm1r/QodeAssist.git
				synced 2025-10-31 08:14:36 -04:00 
			
		
		
		
	✨ feat: Add using instruct model in code completion
* ✨ feat: Add MessageBuilder for code completion * ✨ feat: Add move text from request to comments * ✨ feat: Add settings for process text of instruct model * 🐛 fix: Add stop to ollama request validator * 🐛 fix: Template double delete
This commit is contained in:
		| @ -94,7 +94,7 @@ void LMStudioProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType | ||||
|         request["messages"] = std::move(messages); | ||||
|     } | ||||
|  | ||||
|     if (type == LLMCore::RequestType::Fim) { | ||||
|     if (type == LLMCore::RequestType::CodeCompletion) { | ||||
|         applyModelParams(Settings::codeCompletionSettings()); | ||||
|     } else { | ||||
|         applyModelParams(Settings::chatAssistantSettings()); | ||||
|  | ||||
| @ -66,6 +66,7 @@ void OllamaProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType t | ||||
|         QJsonObject options; | ||||
|         options["num_predict"] = settings.maxTokens(); | ||||
|         options["temperature"] = settings.temperature(); | ||||
|         options["stop"] = request.take("stop"); | ||||
|  | ||||
|         if (settings.useTopP()) | ||||
|             options["top_p"] = settings.topP(); | ||||
| @ -80,7 +81,7 @@ void OllamaProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType t | ||||
|         request["keep_alive"] = settings.ollamaLivetime(); | ||||
|     }; | ||||
|  | ||||
|     if (type == LLMCore::RequestType::Fim) { | ||||
|     if (type == LLMCore::RequestType::CodeCompletion) { | ||||
|         applySettings(Settings::codeCompletionSettings()); | ||||
|     } else { | ||||
|         applySettings(Settings::chatAssistantSettings()); | ||||
| @ -150,6 +151,7 @@ QList<QString> OllamaProvider::validateRequest(const QJsonObject &request, LLMCo | ||||
|         {"options", | ||||
|          QJsonObject{ | ||||
|              {"temperature", {}}, | ||||
|              {"stop", {}}, | ||||
|              {"top_p", {}}, | ||||
|              {"top_k", {}}, | ||||
|              {"num_predict", {}}, | ||||
| @ -164,6 +166,7 @@ QList<QString> OllamaProvider::validateRequest(const QJsonObject &request, LLMCo | ||||
|         {"options", | ||||
|          QJsonObject{ | ||||
|              {"temperature", {}}, | ||||
|              {"stop", {}}, | ||||
|              {"top_p", {}}, | ||||
|              {"top_k", {}}, | ||||
|              {"num_predict", {}}, | ||||
|  | ||||
| @ -93,7 +93,7 @@ void OpenAICompatProvider::prepareRequest(QJsonObject &request, LLMCore::Request | ||||
|         request["messages"] = std::move(messages); | ||||
|     } | ||||
|  | ||||
|     if (type == LLMCore::RequestType::Fim) { | ||||
|     if (type == LLMCore::RequestType::CodeCompletion) { | ||||
|         applyModelParams(Settings::codeCompletionSettings()); | ||||
|     } else { | ||||
|         applyModelParams(Settings::chatAssistantSettings()); | ||||
|  | ||||
| @ -77,7 +77,7 @@ void OpenRouterProvider::prepareRequest(QJsonObject &request, LLMCore::RequestTy | ||||
|         request["messages"] = std::move(messages); | ||||
|     } | ||||
|  | ||||
|     if (type == LLMCore::RequestType::Fim) { | ||||
|     if (type == LLMCore::RequestType::CodeCompletion) { | ||||
|         applyModelParams(Settings::codeCompletionSettings()); | ||||
|     } else { | ||||
|         applyModelParams(Settings::chatAssistantSettings()); | ||||
|  | ||||
		Reference in New Issue
	
	Block a user