Refactor llm providers to use internal http client (#227)

* refactor: Move http client into provider

* refactor: Rework ollama provider for work with internal http client

* refactor: Rework LM Studio provider to work with internal http client

* refactor: Rework Mistral AI to work with internal http client

* fix: Replace url and header to QNetworkRequest

* refactor: Rework Google provider to use internal http client

* refactor: OpenAI compatible providers switch to use internal http client

* fix: Remove m_requestHandler from tests

* refactor: Remove old handleData method

* fix: Remove LLMClientInterfaceTest
This commit is contained in:
Petr Mironychev
2025-09-03 10:56:05 +02:00
committed by GitHub
parent 5969d530bd
commit 76309be0a6
34 changed files with 1144 additions and 909 deletions

View File

@ -24,7 +24,7 @@
#include <QVector>
#include "ChatModel.hpp"
#include "RequestHandler.hpp"
#include "Provider.hpp"
#include "llmcore/IPromptProvider.hpp"
#include <context/ContextManager.hpp>
@ -52,16 +52,29 @@ signals:
void errorOccurred(const QString &error);
void messageReceivedCompletely();
private slots:
void handlePartialResponse(const QString &requestId, const QString &partialText);
void handleFullResponse(const QString &requestId, const QString &fullText);
void handleRequestFailed(const QString &requestId, const QString &error);
private:
void handleLLMResponse(const QString &response, const QJsonObject &request, bool isComplete);
QString getCurrentFileContext() const;
QString getSystemPromptWithLinkedFiles(
const QString &basePrompt, const QList<QString> &linkedFiles) const;
struct RequestContext
{
QJsonObject originalRequest;
LLMCore::Provider *provider;
};
LLMCore::IPromptProvider *m_promptProvider = nullptr;
ChatModel *m_chatModel;
LLMCore::RequestHandler *m_requestHandler;
Context::ContextManager *m_contextManager;
QHash<QString, RequestContext> m_activeRequests;
QHash<QString, QString> m_accumulatedResponses;
};
} // namespace QodeAssist::Chat