diff --git a/src/api/providers/__tests__/lm-studio-timeout.spec.ts b/src/api/providers/__tests__/lm-studio-timeout.spec.ts index 659fcaaf670..0e0687c905f 100644 --- a/src/api/providers/__tests__/lm-studio-timeout.spec.ts +++ b/src/api/providers/__tests__/lm-studio-timeout.spec.ts @@ -47,7 +47,7 @@ describe("LmStudioHandler timeout configuration", () => { expect(getApiRequestTimeout).toHaveBeenCalled() expect(mockOpenAIConstructor).toHaveBeenCalledWith( expect.objectContaining({ - baseURL: "http://localhost:1234/v1", + baseURL: "http://localhost:1234/api/v1", apiKey: "noop", timeout: 600000, // 600 seconds in milliseconds }), diff --git a/src/api/providers/fetchers/__tests__/lmstudio.test.ts b/src/api/providers/fetchers/__tests__/lmstudio.test.ts index a1f06d2e251..e3787d374af 100644 --- a/src/api/providers/fetchers/__tests__/lmstudio.test.ts +++ b/src/api/providers/fetchers/__tests__/lmstudio.test.ts @@ -113,7 +113,7 @@ describe("LMStudio Fetcher", () => { const result = await getLMStudioModels(baseUrl) expect(mockedAxios.get).toHaveBeenCalledTimes(1) - expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`) + expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/v1/models`) expect(MockedLMStudioClientConstructor).toHaveBeenCalledTimes(1) expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: lmsUrl }) expect(mockListDownloadedModels).toHaveBeenCalledTimes(1) @@ -133,7 +133,7 @@ describe("LMStudio Fetcher", () => { const result = await getLMStudioModels(baseUrl) expect(mockedAxios.get).toHaveBeenCalledTimes(1) - expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`) + expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/v1/models`) expect(MockedLMStudioClientConstructor).toHaveBeenCalledTimes(1) expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: lmsUrl }) expect(mockListDownloadedModels).toHaveBeenCalledTimes(1) @@ -373,7 +373,7 @@ describe("LMStudio Fetcher", () => { await getLMStudioModels("") - expect(mockedAxios.get).toHaveBeenCalledWith(`${defaultBaseUrl}/v1/models`) + expect(mockedAxios.get).toHaveBeenCalledWith(`${defaultBaseUrl}/api/v1/models`) expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: defaultLmsUrl }) }) @@ -385,7 +385,7 @@ describe("LMStudio Fetcher", () => { await getLMStudioModels(httpsBaseUrl) - expect(mockedAxios.get).toHaveBeenCalledWith(`${httpsBaseUrl}/v1/models`) + expect(mockedAxios.get).toHaveBeenCalledWith(`${httpsBaseUrl}/api/v1/models`) expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: wssLmsUrl }) }) @@ -407,7 +407,7 @@ describe("LMStudio Fetcher", () => { const result = await getLMStudioModels(baseUrl) expect(mockedAxios.get).toHaveBeenCalledTimes(1) - expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`) + expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/v1/models`) expect(MockedLMStudioClientConstructor).not.toHaveBeenCalled() expect(mockListLoaded).not.toHaveBeenCalled() expect(consoleErrorSpy).toHaveBeenCalledWith( @@ -426,7 +426,7 @@ describe("LMStudio Fetcher", () => { const result = await getLMStudioModels(baseUrl) expect(mockedAxios.get).toHaveBeenCalledTimes(1) - expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`) + expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/v1/models`) expect(MockedLMStudioClientConstructor).not.toHaveBeenCalled() expect(mockListLoaded).not.toHaveBeenCalled() expect(consoleInfoSpy).toHaveBeenCalledWith(`Error connecting to LMStudio at ${baseUrl}`) diff --git a/src/api/providers/fetchers/lmstudio.ts b/src/api/providers/fetchers/lmstudio.ts index 73cb60e88e3..93901a51f72 100644 --- a/src/api/providers/fetchers/lmstudio.ts +++ b/src/api/providers/fetchers/lmstudio.ts @@ -13,7 +13,7 @@ export const forceFullModelDetailsLoad = async (baseUrl: string, modelId: string try { // Test the connection to LM Studio first // Crrors will be caught further down. - await axios.get(`${baseUrl}/v1/models`) + await axios.get(`${baseUrl}/api/v1/models`) const lmsUrl = baseUrl.replace(/^http:\/\//, "ws://").replace(/^https:\/\//, "wss://") const client = new LMStudioClient({ baseUrl: lmsUrl }) @@ -66,7 +66,7 @@ export async function getLMStudioModels(baseUrl = "http://localhost:1234"): Prom // test the connection to LM Studio first // errors will be caught further down - await axios.get(`${baseUrl}/v1/models`) + await axios.get(`${baseUrl}/api/v1/models`) const client = new LMStudioClient({ baseUrl: lmsUrl }) diff --git a/src/api/providers/lm-studio.ts b/src/api/providers/lm-studio.ts index a771394c535..18aa065ffff 100644 --- a/src/api/providers/lm-studio.ts +++ b/src/api/providers/lm-studio.ts @@ -31,7 +31,7 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan const apiKey = "noop" this.client = new OpenAI({ - baseURL: (this.options.lmStudioBaseUrl || "http://localhost:1234") + "/v1", + baseURL: (this.options.lmStudioBaseUrl || "http://localhost:1234") + "/api/v1", apiKey: apiKey, timeout: getApiRequestTimeout(), }) @@ -221,7 +221,7 @@ export async function getLmStudioModels(baseUrl = "http://localhost:1234") { return [] } - const response = await axios.get(`${baseUrl}/v1/models`) + const response = await axios.get(`${baseUrl}/api/v1/models`) const modelsArray = response.data?.data?.map((model: any) => model.id) || [] return [...new Set(modelsArray)] } catch (error) {