Refactor llm providers to use internal http client (#227)

* refactor: Move http client into provider

* refactor: Rework ollama provider for work with internal http client

* refactor: Rework LM Studio provider to work with internal http client

* refactor: Rework Mistral AI to work with internal http client

* fix: Replace url and header to QNetworkRequest

* refactor: Rework Google provider to use internal http client

* refactor: OpenAI compatible providers switch to use internal http client

* fix: Remove m_requestHandler from tests

* refactor: Remove old handleData method

* fix: Remove LLMClientInterfaceTest
This commit is contained in:
Petr Mironychev
2025-09-03 10:56:05 +02:00
committed by GitHub
parent 5969d530bd
commit 76309be0a6
34 changed files with 1144 additions and 909 deletions

View File

@ -58,57 +58,6 @@ bool LMStudioProvider::supportsModelListing() const
return true;
}
bool LMStudioProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse)
{
QByteArray data = reply->readAll();
if (data.isEmpty()) {
return false;
}
bool isDone = false;
QByteArrayList lines = data.split('\n');
for (const QByteArray &line : lines) {
if (line.trimmed().isEmpty()) {
continue;
}
if (line == "data: [DONE]") {
isDone = true;
continue;
}
QByteArray jsonData = line;
if (line.startsWith("data: ")) {
jsonData = line.mid(6);
}
QJsonParseError error;
QJsonDocument doc = QJsonDocument::fromJson(jsonData, &error);
if (doc.isNull()) {
continue;
}
auto message = LLMCore::OpenAIMessage::fromJson(doc.object());
if (message.hasError()) {
LOG_MESSAGE("Error in OpenAI response: " + message.error);
continue;
}
QString content = message.getContent();
if (!content.isEmpty()) {
accumulatedResponse += content;
}
if (message.isDone()) {
isDone = true;
}
}
return isDone;
}
QList<QString> LMStudioProvider::getInstalledModels(const QString &url)
{
QList<QString> models;
@ -173,6 +122,94 @@ LLMCore::ProviderID LMStudioProvider::providerID() const
return LLMCore::ProviderID::LMStudio;
}
void LMStudioProvider::sendRequest(
const QString &requestId, const QUrl &url, const QJsonObject &payload)
{
QNetworkRequest networkRequest(url);
prepareNetworkRequest(networkRequest);
LLMCore::HttpRequest
request{.networkRequest = networkRequest, .requestId = requestId, .payload = payload};
LOG_MESSAGE(
QString("LMStudioProvider: Sending request %1 to %2").arg(requestId, url.toString()));
emit httpClient()->sendRequest(request);
}
void LMStudioProvider::onDataReceived(const QString &requestId, const QByteArray &data)
{
QString &accumulatedResponse = m_accumulatedResponses[requestId];
if (data.isEmpty()) {
return;
}
bool isDone = false;
QByteArrayList lines = data.split('\n');
for (const QByteArray &line : lines) {
if (line.trimmed().isEmpty()) {
continue;
}
if (line == "data: [DONE]") {
isDone = true;
continue;
}
QByteArray jsonData = line;
if (line.startsWith("data: ")) {
jsonData = line.mid(6);
}
QJsonParseError error;
QJsonDocument doc = QJsonDocument::fromJson(jsonData, &error);
if (doc.isNull()) {
continue;
}
auto message = LLMCore::OpenAIMessage::fromJson(doc.object());
if (message.hasError()) {
LOG_MESSAGE("Error in LMStudio response: " + message.error);
continue;
}
QString content = message.getContent();
if (!content.isEmpty()) {
accumulatedResponse += content;
emit partialResponseReceived(requestId, content);
}
if (message.isDone()) {
isDone = true;
}
}
if (isDone) {
emit fullResponseReceived(requestId, accumulatedResponse);
m_accumulatedResponses.remove(requestId);
}
}
void LMStudioProvider::onRequestFinished(const QString &requestId, bool success, const QString &error)
{
if (!success) {
LOG_MESSAGE(QString("LMStudioProvider request %1 failed: %2").arg(requestId, error));
emit requestFailed(requestId, error);
} else {
if (m_accumulatedResponses.contains(requestId)) {
const QString fullResponse = m_accumulatedResponses[requestId];
if (!fullResponse.isEmpty()) {
emit fullResponseReceived(requestId, fullResponse);
}
}
}
m_accumulatedResponses.remove(requestId);
}
void QodeAssist::Providers::LMStudioProvider::prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,