Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,14 @@ vitest.mock("../utils/timeout-config", () => ({
getApiRequestTimeout: vitest.fn(),
}))

// Mock the undici-fetch utility
const mockFetchFn = vitest.fn()
vitest.mock("../utils/undici-fetch", () => ({
createFetchWithUndiciTimeout: vitest.fn(() => mockFetchFn),
}))

import { getApiRequestTimeout } from "../utils/timeout-config"
import { createFetchWithUndiciTimeout } from "../utils/undici-fetch"

// Mock OpenAI and capture constructor calls
const mockOpenAIConstructor = vitest.fn()
Expand Down Expand Up @@ -65,6 +72,7 @@ describe("BaseOpenAiCompatibleProvider Timeout Configuration", () => {
new TestOpenAiCompatibleProvider("test-api-key")

expect(getApiRequestTimeout).toHaveBeenCalled()
expect(createFetchWithUndiciTimeout).toHaveBeenCalled()
})

it("should pass the default timeout to the OpenAI client constructor", () => {
Expand All @@ -77,6 +85,7 @@ describe("BaseOpenAiCompatibleProvider Timeout Configuration", () => {
baseURL: "https://test.example.com/v1",
apiKey: "test-api-key",
timeout: 600000,
fetch: mockFetchFn,
}),
)
})
Expand Down
9 changes: 9 additions & 0 deletions src/api/providers/__tests__/lm-studio-timeout.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,14 @@ vitest.mock("../utils/timeout-config", () => ({
getApiRequestTimeout: vitest.fn(),
}))

// Mock the undici-fetch utility
const mockFetchFn = vitest.fn()
vitest.mock("../utils/undici-fetch", () => ({
createFetchWithUndiciTimeout: vitest.fn(() => mockFetchFn),
}))

import { getApiRequestTimeout } from "../utils/timeout-config"
import { createFetchWithUndiciTimeout } from "../utils/undici-fetch"

// Mock OpenAI
const mockOpenAIConstructor = vitest.fn()
Expand Down Expand Up @@ -45,11 +52,13 @@ describe("LmStudioHandler timeout configuration", () => {
new LmStudioHandler(options)

expect(getApiRequestTimeout).toHaveBeenCalled()
expect(createFetchWithUndiciTimeout).toHaveBeenCalled()
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
expect.objectContaining({
baseURL: "http://localhost:1234/v1",
apiKey: "noop",
timeout: 600000, // 600 seconds in milliseconds
fetch: mockFetchFn,
}),
)
})
Expand Down
10 changes: 10 additions & 0 deletions src/api/providers/__tests__/openai-timeout.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,14 @@ vitest.mock("../utils/timeout-config", () => ({
getApiRequestTimeout: vitest.fn(),
}))

// Mock the undici-fetch utility
const mockFetchFn = vitest.fn()
vitest.mock("../utils/undici-fetch", () => ({
createFetchWithUndiciTimeout: vitest.fn(() => mockFetchFn),
}))

import { getApiRequestTimeout } from "../utils/timeout-config"
import { createFetchWithUndiciTimeout } from "../utils/undici-fetch"

// Mock OpenAI and AzureOpenAI
const mockOpenAIConstructor = vitest.fn()
Expand Down Expand Up @@ -57,11 +64,13 @@ describe("OpenAiHandler timeout configuration", () => {
new OpenAiHandler(options)

expect(getApiRequestTimeout).toHaveBeenCalled()
expect(createFetchWithUndiciTimeout).toHaveBeenCalled()
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
expect.objectContaining({
baseURL: "https://api.openai.com/v1",
apiKey: "test-key",
timeout: 600000, // 600 seconds in milliseconds
fetch: mockFetchFn,
}),
)
})
Expand Down Expand Up @@ -102,6 +111,7 @@ describe("OpenAiHandler timeout configuration", () => {
expect(mockAzureOpenAIConstructor).toHaveBeenCalledWith(
expect.objectContaining({
timeout: 900000, // 900 seconds in milliseconds
fetch: mockFetchFn,
}),
)
})
Expand Down
2 changes: 2 additions & 0 deletions src/api/providers/base-openai-compatible-provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import { BaseProvider } from "./base-provider"
import { handleOpenAIError } from "./utils/openai-error-handler"
import { calculateApiCostOpenAI } from "../../shared/cost"
import { getApiRequestTimeout } from "./utils/timeout-config"
import { createFetchWithUndiciTimeout } from "./utils/undici-fetch"

type BaseOpenAiCompatibleProviderOptions<ModelName extends string> = ApiHandlerOptions & {
providerName: string
Expand Down Expand Up @@ -64,6 +65,7 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
apiKey: this.options.apiKey,
defaultHeaders: DEFAULT_HEADERS,
timeout: getApiRequestTimeout(),
fetch: createFetchWithUndiciTimeout(),
})
}

Expand Down
2 changes: 2 additions & 0 deletions src/api/providers/lm-studio.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ import { BaseProvider } from "./base-provider"
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
import { getModelsFromCache } from "./fetchers/modelCache"
import { getApiRequestTimeout } from "./utils/timeout-config"
import { createFetchWithUndiciTimeout } from "./utils/undici-fetch"
import { handleOpenAIError } from "./utils/openai-error-handler"

export class LmStudioHandler extends BaseProvider implements SingleCompletionHandler {
Expand All @@ -34,6 +35,7 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
baseURL: (this.options.lmStudioBaseUrl || "http://localhost:1234") + "/v1",
apiKey: apiKey,
timeout: getApiRequestTimeout(),
fetch: createFetchWithUndiciTimeout(),
})
}

Expand Down
5 changes: 5 additions & 0 deletions src/api/providers/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import { DEFAULT_HEADERS } from "./constants"
import { BaseProvider } from "./base-provider"
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
import { getApiRequestTimeout } from "./utils/timeout-config"
import { createFetchWithUndiciTimeout } from "./utils/undici-fetch"
import { handleOpenAIError } from "./utils/openai-error-handler"

// TODO: Rename this to OpenAICompatibleHandler. Also, I think the
Expand All @@ -49,6 +50,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
}

const timeout = getApiRequestTimeout()
const fetchWithTimeout = createFetchWithUndiciTimeout()

if (isAzureAiInference) {
// Azure AI Inference Service (e.g., for DeepSeek) uses a different path structure
Expand All @@ -58,6 +60,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
defaultHeaders: headers,
defaultQuery: { "api-version": this.options.azureApiVersion || "2024-05-01-preview" },
timeout,
fetch: fetchWithTimeout,
})
} else if (isAzureOpenAi) {
// Azure API shape slightly differs from the core API shape:
Expand All @@ -68,13 +71,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
apiVersion: this.options.azureApiVersion || azureOpenAiDefaultApiVersion,
defaultHeaders: headers,
timeout,
fetch: fetchWithTimeout,
})
} else {
this.client = new OpenAI({
baseURL,
apiKey,
defaultHeaders: headers,
timeout,
fetch: fetchWithTimeout,
})
}
}
Expand Down
95 changes: 95 additions & 0 deletions src/api/providers/utils/__tests__/undici-fetch.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
// npx vitest run api/providers/utils/__tests__/undici-fetch.spec.ts

// Mock vscode before any imports that depend on it
vitest.mock("vscode", () => ({
workspace: {
getConfiguration: vitest.fn().mockReturnValue({
get: vitest.fn(),
}),
},
}))

// Mock undici Agent
const mockAgent = { headersTimeout: 0, bodyTimeout: 0 }
vitest.mock("undici", () => ({
Agent: vitest.fn().mockImplementation((opts) => {
mockAgent.headersTimeout = opts.headersTimeout
mockAgent.bodyTimeout = opts.bodyTimeout
return mockAgent
}),
fetch: vitest.fn().mockResolvedValue(new Response("ok")),
}))

import { Agent, fetch as undiciFetch } from "undici"
import * as vscode from "vscode"

import { createFetchWithUndiciTimeout } from "../undici-fetch"

describe("createFetchWithUndiciTimeout", () => {
let mockGetConfig: any

beforeEach(() => {
vitest.clearAllMocks()
mockGetConfig = vitest.fn()
;(vscode.workspace.getConfiguration as any).mockReturnValue({
get: mockGetConfig,
})
})

it("should create an Undici Agent with headersTimeout and bodyTimeout matching the configured timeout", () => {
mockGetConfig.mockReturnValue(600) // 600 seconds

createFetchWithUndiciTimeout()

expect(Agent).toHaveBeenCalledWith({
headersTimeout: 600000, // 600s in ms
bodyTimeout: 600000,
})
})

it("should set Agent timeouts to 0 (no timeout) when apiRequestTimeout is disabled", () => {
mockGetConfig.mockReturnValue(0) // disabled

createFetchWithUndiciTimeout()

// getApiRequestTimeout returns undefined for 0/negative, and undici-fetch maps undefined to 0
expect(Agent).toHaveBeenCalledWith({
headersTimeout: 0,
bodyTimeout: 0,
})
})

it("should return a function with the same signature as fetch", () => {
mockGetConfig.mockReturnValue(600)

const fetchFn = createFetchWithUndiciTimeout()

expect(typeof fetchFn).toBe("function")
})

it("should call undici fetch with the custom dispatcher when invoked", async () => {
mockGetConfig.mockReturnValue(600)

const fetchFn = createFetchWithUndiciTimeout()
await fetchFn("https://example.com/api", { method: "POST" })

expect(undiciFetch).toHaveBeenCalledWith(
"https://example.com/api",
expect.objectContaining({
method: "POST",
dispatcher: mockAgent,
}),
)
})

it("should use custom timeout values", () => {
mockGetConfig.mockReturnValue(1200) // 20 minutes

createFetchWithUndiciTimeout()

expect(Agent).toHaveBeenCalledWith({
headersTimeout: 1200000,
bodyTimeout: 1200000,
})
})
})
48 changes: 48 additions & 0 deletions src/api/providers/utils/undici-fetch.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import { Agent, fetch as undiciFetch } from "undici"

import { getApiRequestTimeout } from "./timeout-config"

/**
* Default Undici headersTimeout is 300 seconds (5 minutes), which can cause
* premature HeadersTimeoutError for slow providers (e.g. local models via
* LM Studio, Ollama) even when the SDK-level timeout is set higher.
*
* This utility creates a custom `fetch` function backed by an Undici `Agent`
* whose `headersTimeout` and `bodyTimeout` are aligned with the user's
* configured `apiRequestTimeout`. This ensures the transport layer won't
* abort the connection before the SDK-level timeout fires.
*
* @see https://github.com/RooCodeInc/Roo-Code/issues/12244
*/

/**
* Builds a `fetch` function that uses an Undici Agent with headersTimeout
* and bodyTimeout set to match the configured API request timeout.
*
* The returned function has the same signature as the global `fetch` and
* can be passed directly to the OpenAI SDK's `fetch` constructor option.
*/
export function createFetchWithUndiciTimeout(): typeof globalThis.fetch {
const timeoutMs = getApiRequestTimeout()

// When timeout is undefined (user disabled it), use 0 which means
// "no timeout" in Undici.
const agentTimeout = timeoutMs ?? 0

const agent = new Agent({
headersTimeout: agentTimeout,
bodyTimeout: agentTimeout,
})

// Return a fetch wrapper that injects the custom dispatcher.
// The OpenAI SDK expects the standard fetch signature.
return ((input: RequestInfo | URL, init?: RequestInit): Promise<Response> => {
return undiciFetch(
input as any,
{
...init,
dispatcher: agent,
} as any,
) as unknown as Promise<Response>
}) as typeof globalThis.fetch
}
Loading