From 33c0cd1378fcd3ddf84fce369ea1f82434d5f318 Mon Sep 17 00:00:00 2001 From: Peter Steinberger Date: Fri, 24 Apr 2026 08:16:37 +0100 Subject: [PATCH] fix: improve codex model discovery --- CHANGELOG.md | 1 + extensions/codex/provider.test.ts | 77 ++++++++++- extensions/codex/provider.ts | 40 ++++-- .../codex/src/app-server/models.test.ts | 130 ++++++++++++++++++ extensions/codex/src/app-server/models.ts | 67 +++++++-- extensions/codex/src/command-formatters.ts | 8 +- extensions/codex/src/command-handlers.ts | 6 +- extensions/codex/src/commands.test.ts | 21 +++ extensions/openai/default-models.ts | 2 +- .../openai/openai-codex-provider.test.ts | 2 +- extensions/openai/openai-provider.ts | 2 +- extensions/zalo/src/monitor.lifecycle.test.ts | 2 +- .../reply/agent-runner-execution.test.ts | 4 +- .../reply/agent-runner-execution.ts | 2 +- src/commands/models/auth.test.ts | 13 +- src/plugins/provider-model-defaults.ts | 2 +- .../helpers/plugins/provider-auth-contract.ts | 4 +- 17 files changed, 343 insertions(+), 40 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 71c2b78f53d..9bde0f38b0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ Docs: https://docs.openclaw.ai - Voice-call/Telnyx: preserve inbound/outbound callback metadata and read transcription text from Telnyx's current `transcription_data` payload. - Codex harness: send verbose tool progress to chat channels for native app-server runs, matching the Pi harness `/verbose on` and `/verbose full` behavior. (#70966) Thanks @jalehman. +- Codex models: fetch paginated Codex app-server model catalogs, mark truncated `/codex models` output, and keep ChatGPT OAuth defaults on the `openai-codex/gpt-5.5` route instead of the OpenAI API-key route. - Codex harness: route native `request_user_input` prompts back to the originating chat, preserve queued follow-up answers, and honor newer app-server command approval amendment decisions. - Codex status: report Codex CLI OAuth as `oauth (codex-cli)` for native `codex/*` sessions instead of showing unknown auth. Fixes #70688. Thanks @jb510. - Codex harness/context-engine: redact context-engine assembly failures before logging, so fallback warnings do not serialize raw error objects. (#70809) Thanks @jalehman. diff --git a/extensions/codex/provider.test.ts b/extensions/codex/provider.test.ts index 71571cab11b..1b4698fb8a6 100644 --- a/extensions/codex/provider.test.ts +++ b/extensions/codex/provider.test.ts @@ -137,6 +137,66 @@ describe("codex provider", () => { }); }); + it("pages through live discovery before building the provider catalog", async () => { + const listModels = vi + .fn() + .mockResolvedValueOnce({ + models: [ + { + id: "gpt-5.4", + model: "gpt-5.4", + hidden: false, + inputModalities: ["text", "image"], + supportedReasoningEfforts: ["medium"], + }, + ], + nextCursor: "page-2", + }) + .mockResolvedValueOnce({ + models: [ + { + id: "gpt-5.2", + model: "gpt-5.2", + hidden: false, + inputModalities: ["text"], + supportedReasoningEfforts: [], + }, + ], + }); + + const result = await buildCodexProviderCatalog({ + env: {}, + listModels, + }); + + expect(listModels).toHaveBeenNthCalledWith( + 1, + expect.objectContaining({ cursor: undefined, limit: 100, sharedClient: false }), + ); + expect(listModels).toHaveBeenNthCalledWith( + 2, + expect.objectContaining({ cursor: "page-2", limit: 100, sharedClient: false }), + ); + expect(result.provider.models.map((model) => model.id)).toEqual(["gpt-5.4", "gpt-5.2"]); + }); + + it("reports discovery failures before using the fallback catalog", async () => { + const error = new Error("app-server down"); + const onDiscoveryFailure = vi.fn(); + const listModels = vi.fn(async () => { + throw error; + }); + + const result = await buildCodexProviderCatalog({ + env: {}, + listModels, + onDiscoveryFailure, + }); + + expect(onDiscoveryFailure).toHaveBeenCalledWith(error); + expectStaticFallbackCatalog(result); + }); + it("keeps a static fallback catalog when live discovery is explicitly disabled by env", async () => { const listModels = vi.fn(); @@ -176,7 +236,7 @@ describe("codex provider", () => { expect(discoveryClient.close).toHaveBeenCalledTimes(1); }); - it("resolves arbitrary Codex app-server model ids through the codex provider", () => { + it("resolves arbitrary Codex app-server model ids as text-only until discovered", () => { const provider = buildCodexProvider(); const model = provider.resolveDynamicModel?.({ @@ -190,6 +250,21 @@ describe("codex provider", () => { provider: "codex", api: "openai-codex-responses", baseUrl: "https://chatgpt.com/backend-api", + input: ["text"], + }); + }); + + it("keeps fallback Codex app-server models image-capable", () => { + const provider = buildCodexProvider(); + + const model = provider.resolveDynamicModel?.({ + provider: "codex", + modelId: "gpt-5.5", + modelRegistry: { find: () => null }, + } as never); + + expect(model).toMatchObject({ + id: "gpt-5.5", input: ["text", "image"], }); }); diff --git a/extensions/codex/provider.ts b/extensions/codex/provider.ts index 1c4cf0ee9a8..4c4ec743df8 100644 --- a/extensions/codex/provider.ts +++ b/extensions/codex/provider.ts @@ -1,4 +1,5 @@ import { resolvePluginConfigObject } from "openclaw/plugin-sdk/config-runtime"; +import { createSubsystemLogger } from "openclaw/plugin-sdk/core"; import type { ProviderRuntimeModel } from "openclaw/plugin-sdk/plugin-entry"; import { normalizeModelCompat, @@ -26,10 +27,13 @@ import type { const DEFAULT_DISCOVERY_TIMEOUT_MS = 2500; const LIVE_DISCOVERY_ENV = "OPENCLAW_CODEX_DISCOVERY_LIVE"; +const MODEL_DISCOVERY_PAGE_LIMIT = 100; +const codexCatalogLog = createSubsystemLogger("codex/catalog"); type CodexModelLister = (options: { timeoutMs: number; limit?: number; + cursor?: string; startOptions?: CodexAppServerStartOptions; sharedClient?: boolean; }) => Promise; @@ -43,6 +47,7 @@ type BuildCatalogOptions = { env?: NodeJS.ProcessEnv; pluginConfig?: unknown; listModels?: CodexModelLister; + onDiscoveryFailure?: (error: unknown) => void; }; export function buildCodexProvider(options: BuildCodexProviderOptions = {}): ProviderPlugin { @@ -103,6 +108,7 @@ export async function buildCodexProviderCatalog( listModels: options.listModels ?? listCodexAppServerModelsLazy, timeoutMs, startOptions: appServer.start, + onDiscoveryFailure: options.onDiscoveryFailure, }); } return { @@ -115,12 +121,15 @@ function resolveCodexDynamicModel(modelId: string) { if (!id) { return undefined; } + const fallbackModel = FALLBACK_CODEX_MODELS.find((model) => model.id === id); return normalizeModelCompat({ ...buildCodexModelDefinition({ id, model: id, - inputModalities: ["text", "image"], - supportedReasoningEfforts: shouldDefaultToReasoningModel(id) ? ["medium"] : [], + inputModalities: fallbackModel?.inputModalities ?? ["text"], + supportedReasoningEfforts: + fallbackModel?.supportedReasoningEfforts ?? + (shouldDefaultToReasoningModel(id) ? ["medium"] : []), }), provider: CODEX_PROVIDER_ID, baseUrl: CODEX_BASE_URL, @@ -131,16 +140,28 @@ async function listModelsBestEffort(params: { listModels: CodexModelLister; timeoutMs: number; startOptions: CodexAppServerStartOptions; + onDiscoveryFailure?: (error: unknown) => void; }): Promise { try { - const result = await params.listModels({ - timeoutMs: params.timeoutMs, - limit: 100, - startOptions: params.startOptions, - sharedClient: false, + const models: CodexAppServerModel[] = []; + let cursor: string | undefined; + do { + const result = await params.listModels({ + timeoutMs: params.timeoutMs, + limit: MODEL_DISCOVERY_PAGE_LIMIT, + cursor, + startOptions: params.startOptions, + sharedClient: false, + }); + models.push(...result.models.filter((model) => !model.hidden)); + cursor = result.nextCursor; + } while (cursor); + return models; + } catch (error) { + params.onDiscoveryFailure?.(error); + codexCatalogLog.debug("codex model discovery failed; using fallback catalog", { + error: error instanceof Error ? error.message : String(error), }); - return result.models.filter((model) => !model.hidden); - } catch { return []; } } @@ -148,6 +169,7 @@ async function listModelsBestEffort(params: { async function listCodexAppServerModelsLazy(options: { timeoutMs: number; limit?: number; + cursor?: string; startOptions?: CodexAppServerStartOptions; sharedClient?: boolean; }): Promise { diff --git a/extensions/codex/src/app-server/models.test.ts b/extensions/codex/src/app-server/models.test.ts index 2e80806757f..a9a18127b15 100644 --- a/extensions/codex/src/app-server/models.test.ts +++ b/extensions/codex/src/app-server/models.test.ts @@ -21,11 +21,13 @@ vi.mock("openclaw/plugin-sdk/provider-auth", () => ({ })); let listCodexAppServerModels: typeof import("./models.js").listCodexAppServerModels; +let listAllCodexAppServerModels: typeof import("./models.js").listAllCodexAppServerModels; let resetSharedCodexAppServerClientForTests: typeof import("./shared-client.js").resetSharedCodexAppServerClientForTests; describe("listCodexAppServerModels", () => { beforeAll(async () => { ({ listCodexAppServerModels } = await import("./models.js")); + ({ listAllCodexAppServerModels } = await import("./models.js")); ({ resetSharedCodexAppServerClientForTests } = await import("./shared-client.js")); }); @@ -97,4 +99,132 @@ describe("listCodexAppServerModels", () => { harness.client.close(); startSpy.mockRestore(); }); + + it("lists all app-server model pages through one client", async () => { + const harness = createClientHarness(); + const startSpy = vi.spyOn(CodexAppServerClient, "start").mockReturnValue(harness.client); + + const listPromise = listAllCodexAppServerModels({ limit: 1, timeoutMs: 1000 }); + await vi.waitFor(() => expect(harness.writes.length).toBeGreaterThanOrEqual(1)); + const initialize = JSON.parse(harness.writes[0] ?? "{}") as { id?: number }; + harness.send({ + id: initialize.id, + result: { userAgent: "openclaw/0.118.0 (macOS; test)" }, + }); + await vi.waitFor(() => expect(harness.writes.length).toBeGreaterThanOrEqual(3)); + const firstList = JSON.parse(harness.writes[2] ?? "{}") as { + id?: number; + params?: { cursor?: string | null }; + }; + expect(firstList.params?.cursor).toBeNull(); + + harness.send({ + id: firstList.id, + result: { + data: [ + { + id: "gpt-5.4", + model: "gpt-5.4", + upgrade: null, + upgradeInfo: null, + availabilityNux: null, + displayName: "gpt-5.4", + description: "GPT-5.4", + hidden: false, + inputModalities: ["text"], + supportedReasoningEfforts: [], + defaultReasoningEffort: "medium", + supportsPersonality: false, + additionalSpeedTiers: [], + isDefault: false, + }, + ], + nextCursor: "page-2", + }, + }); + await vi.waitFor(() => expect(harness.writes.length).toBeGreaterThanOrEqual(4)); + const secondList = JSON.parse(harness.writes[3] ?? "{}") as { + id?: number; + params?: { cursor?: string | null }; + }; + expect(secondList.params?.cursor).toBe("page-2"); + + harness.send({ + id: secondList.id, + result: { + data: [ + { + id: "gpt-5.2", + model: "gpt-5.2", + upgrade: null, + upgradeInfo: null, + availabilityNux: null, + displayName: "gpt-5.2", + description: "GPT-5.2", + hidden: false, + inputModalities: ["text", "image"], + supportedReasoningEfforts: [], + defaultReasoningEffort: "medium", + supportsPersonality: false, + additionalSpeedTiers: [], + isDefault: false, + }, + ], + nextCursor: null, + }, + }); + + await expect(listPromise).resolves.toMatchObject({ + models: [{ id: "gpt-5.4" }, { id: "gpt-5.2" }], + }); + harness.client.close(); + startSpy.mockRestore(); + }); + + it("marks all-model listing truncated after the page cap", async () => { + const harness = createClientHarness(); + const startSpy = vi.spyOn(CodexAppServerClient, "start").mockReturnValue(harness.client); + + const listPromise = listAllCodexAppServerModels({ limit: 1, timeoutMs: 1000, maxPages: 1 }); + await vi.waitFor(() => expect(harness.writes.length).toBeGreaterThanOrEqual(1)); + const initialize = JSON.parse(harness.writes[0] ?? "{}") as { id?: number }; + harness.send({ + id: initialize.id, + result: { userAgent: "openclaw/0.118.0 (macOS; test)" }, + }); + await vi.waitFor(() => expect(harness.writes.length).toBeGreaterThanOrEqual(3)); + const firstList = JSON.parse(harness.writes[2] ?? "{}") as { id?: number }; + harness.send({ + id: firstList.id, + result: { + data: [ + { + id: "gpt-5.4", + model: "gpt-5.4", + upgrade: null, + upgradeInfo: null, + availabilityNux: null, + displayName: "gpt-5.4", + description: "GPT-5.4", + hidden: false, + inputModalities: ["text"], + supportedReasoningEfforts: [], + defaultReasoningEffort: "medium", + supportsPersonality: false, + additionalSpeedTiers: [], + isDefault: false, + }, + ], + nextCursor: "page-2", + }, + }); + + await expect(listPromise).resolves.toMatchObject({ + models: [{ id: "gpt-5.4" }], + nextCursor: "page-2", + truncated: true, + }); + harness.client.close(); + startSpy.mockRestore(); + }); }); diff --git a/extensions/codex/src/app-server/models.ts b/extensions/codex/src/app-server/models.ts index b1ce271bd72..a3a5d40d14c 100644 --- a/extensions/codex/src/app-server/models.ts +++ b/extensions/codex/src/app-server/models.ts @@ -1,3 +1,4 @@ +import type { CodexAppServerClient } from "./client.js"; import type { CodexAppServerStartOptions } from "./config.js"; import type { v2 } from "./protocol-generated/typescript/index.js"; import { readCodexModelListResponse } from "./protocol-validators.js"; @@ -17,6 +18,7 @@ export type CodexAppServerModel = { export type CodexAppServerModelListResult = { models: CodexAppServerModel[]; nextCursor?: string; + truncated?: boolean; }; export type CodexAppServerListModelsOptions = { @@ -32,6 +34,40 @@ export type CodexAppServerListModelsOptions = { export async function listCodexAppServerModels( options: CodexAppServerListModelsOptions = {}, ): Promise { + return await withCodexAppServerModelClient(options, async ({ client, timeoutMs }) => + requestModelListPage(client, { ...options, timeoutMs }), + ); +} + +export async function listAllCodexAppServerModels( + options: CodexAppServerListModelsOptions & { maxPages?: number } = {}, +): Promise { + const maxPages = normalizeMaxPages(options.maxPages); + return await withCodexAppServerModelClient(options, async ({ client, timeoutMs }) => { + const models: CodexAppServerModel[] = []; + let cursor = options.cursor; + let nextCursor: string | undefined; + for (let page = 0; page < maxPages; page += 1) { + const result = await requestModelListPage(client, { + ...options, + timeoutMs, + cursor, + }); + models.push(...result.models); + nextCursor = result.nextCursor; + if (!nextCursor) { + return { models }; + } + cursor = nextCursor; + } + return { models, nextCursor, truncated: true }; + }); +} + +async function withCodexAppServerModelClient( + options: CodexAppServerListModelsOptions, + run: (params: { client: CodexAppServerClient; timeoutMs: number }) => Promise, +): Promise { const timeoutMs = options.timeoutMs ?? 2500; const useSharedClient = options.sharedClient !== false; const { createIsolatedCodexAppServerClient, getSharedCodexAppServerClient } = @@ -48,16 +84,7 @@ export async function listCodexAppServerModels( authProfileId: options.authProfileId, }); try { - const response = await client.request( - "model/list", - { - limit: options.limit ?? null, - cursor: options.cursor ?? null, - includeHidden: options.includeHidden ?? null, - }, - { timeoutMs }, - ); - return readModelListResult(response); + return await run({ client, timeoutMs }); } finally { if (!useSharedClient) { client.close(); @@ -65,6 +92,22 @@ export async function listCodexAppServerModels( } } +async function requestModelListPage( + client: CodexAppServerClient, + options: CodexAppServerListModelsOptions & { timeoutMs: number }, +): Promise { + const response = await client.request( + "model/list", + { + limit: options.limit ?? null, + cursor: options.cursor ?? null, + includeHidden: options.includeHidden ?? null, + }, + { timeoutMs: options.timeoutMs }, + ); + return readModelListResult(response); +} + export function readModelListResult(value: unknown): CodexAppServerModelListResult { const response = readCodexModelListResponse(value); if (!response) { @@ -116,3 +159,7 @@ function readNonEmptyString(value: unknown): string | undefined { const trimmed = value.trim(); return trimmed || undefined; } + +function normalizeMaxPages(value: unknown): number { + return typeof value === "number" && Number.isFinite(value) && value > 0 ? Math.floor(value) : 20; +} diff --git a/extensions/codex/src/command-formatters.ts b/extensions/codex/src/command-formatters.ts index b2f2715601c..d84c3d52336 100644 --- a/extensions/codex/src/command-formatters.ts +++ b/extensions/codex/src/command-formatters.ts @@ -45,10 +45,14 @@ export function formatModels(result: CodexAppServerModelListResult): string { if (result.models.length === 0) { return "No Codex app-server models returned."; } - return [ + const lines = [ "Codex models:", ...result.models.map((model) => `- ${model.id}${model.isDefault ? " (default)" : ""}`), - ].join("\n"); + ]; + if (result.truncated) { + lines.push("- More models available; output truncated."); + } + return lines.join("\n"); } export function formatThreads(response: JsonValue | undefined): string { diff --git a/extensions/codex/src/command-handlers.ts b/extensions/codex/src/command-handlers.ts index 30d134c1677..59278456826 100644 --- a/extensions/codex/src/command-handlers.ts +++ b/extensions/codex/src/command-handlers.ts @@ -1,6 +1,6 @@ import type { PluginCommandContext, PluginCommandResult } from "openclaw/plugin-sdk/plugin-entry"; import { CODEX_CONTROL_METHODS, type CodexControlMethod } from "./app-server/capabilities.js"; -import { listCodexAppServerModels } from "./app-server/models.js"; +import { listAllCodexAppServerModels } from "./app-server/models.js"; import { isJsonObject, type JsonValue } from "./app-server/protocol.js"; import { clearCodexAppServerBinding, @@ -42,7 +42,7 @@ import { export type CodexCommandDeps = { codexControlRequest: CodexControlRequestFn; - listCodexAppServerModels: typeof listCodexAppServerModels; + listCodexAppServerModels: typeof listAllCodexAppServerModels; readCodexStatusProbes: typeof readCodexStatusProbes; readCodexAppServerBinding: typeof readCodexAppServerBinding; requestOptions: typeof requestOptions; @@ -73,7 +73,7 @@ type SafeCodexControlRequestFn = ( const defaultCodexCommandDeps: CodexCommandDeps = { codexControlRequest, - listCodexAppServerModels, + listCodexAppServerModels: listAllCodexAppServerModels, readCodexStatusProbes, readCodexAppServerBinding, requestOptions, diff --git a/extensions/codex/src/commands.test.ts b/extensions/codex/src/commands.test.ts index 27b10aaca0b..f8ec23bd407 100644 --- a/extensions/codex/src/commands.test.ts +++ b/extensions/codex/src/commands.test.ts @@ -112,6 +112,27 @@ describe("codex command", () => { }); }); + it("shows when Codex app-server model output is truncated", async () => { + const deps = createDeps({ + listCodexAppServerModels: vi.fn(async () => ({ + models: [ + { + id: "gpt-5.4", + model: "gpt-5.4", + inputModalities: ["text"], + supportedReasoningEfforts: ["medium"], + }, + ], + nextCursor: "page-2", + truncated: true, + })), + }); + + await expect(handleCodexCommand(createContext("models"), { deps })).resolves.toEqual({ + text: "Codex models:\n- gpt-5.4\n- More models available; output truncated.", + }); + }); + it("reports status unavailable when every Codex probe fails", async () => { const offline = { ok: false as const, error: "offline" }; const deps = createDeps({ diff --git a/extensions/openai/default-models.ts b/extensions/openai/default-models.ts index e4157a3caf3..7034c2053c0 100644 --- a/extensions/openai/default-models.ts +++ b/extensions/openai/default-models.ts @@ -5,7 +5,7 @@ import { } from "openclaw/plugin-sdk/provider-onboard"; export const OPENAI_DEFAULT_MODEL = "openai/gpt-5.5"; -export const OPENAI_CODEX_DEFAULT_MODEL = "openai/gpt-5.5"; +export const OPENAI_CODEX_DEFAULT_MODEL = "openai-codex/gpt-5.5"; export const OPENAI_DEFAULT_IMAGE_MODEL = "gpt-image-2"; export const OPENAI_DEFAULT_TTS_MODEL = "gpt-4o-mini-tts"; export const OPENAI_DEFAULT_TTS_VOICE = "alloy"; diff --git a/extensions/openai/openai-codex-provider.test.ts b/extensions/openai/openai-codex-provider.test.ts index 0ffac736c59..bab32e65eaa 100644 --- a/extensions/openai/openai-codex-provider.test.ts +++ b/extensions/openai/openai-codex-provider.test.ts @@ -224,7 +224,7 @@ describe("openai codex provider", () => { }, }, ], - defaultModel: "openai/gpt-5.5", + defaultModel: "openai-codex/gpt-5.5", }); expect(result?.profiles[0]?.credential).not.toHaveProperty("idToken"); expect(result?.profiles[0]?.credential).not.toHaveProperty("accountId"); diff --git a/extensions/openai/openai-provider.ts b/extensions/openai/openai-provider.ts index 2db1b79fa82..0595777ea7c 100644 --- a/extensions/openai/openai-provider.ts +++ b/extensions/openai/openai-provider.ts @@ -258,7 +258,7 @@ export function buildOpenAIProvider(): ProviderPlugin { if (ctx.provider !== PROVIDER_ID || ctx.listProfileIds("openai-codex").length === 0) { return undefined; } - return 'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai/gpt-5.5 with the Codex OAuth profile, or set OPENAI_API_KEY for direct OpenAI API access.'; + return 'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai-codex/gpt-5.5, or set OPENAI_API_KEY for direct OpenAI API access.'; }, suppressBuiltInModel: (ctx) => { if ( diff --git a/extensions/zalo/src/monitor.lifecycle.test.ts b/extensions/zalo/src/monitor.lifecycle.test.ts index 8c5bab952d7..7d17b9d9cc8 100644 --- a/extensions/zalo/src/monitor.lifecycle.test.ts +++ b/extensions/zalo/src/monitor.lifecycle.test.ts @@ -147,7 +147,7 @@ describe("monitorZaloProvider lifecycle", () => { settled = true; }); - await vi.waitFor(() => expect(setWebhookMock).toHaveBeenCalledTimes(1)); + await vi.waitFor(() => expect(setWebhookMock).toHaveBeenCalledTimes(1), { timeout: 5_000 }); expect(registry.httpRoutes).toHaveLength(2); abort.abort(); diff --git a/src/auto-reply/reply/agent-runner-execution.test.ts b/src/auto-reply/reply/agent-runner-execution.test.ts index 677c66574ca..d2189196317 100644 --- a/src/auto-reply/reply/agent-runner-execution.test.ts +++ b/src/auto-reply/reply/agent-runner-execution.test.ts @@ -1669,7 +1669,7 @@ describe("runAgentTurnWithFallback", () => { it("surfaces direct provider auth guidance for missing API keys", async () => { state.runEmbeddedPiAgentMock.mockRejectedValueOnce( new Error( - 'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai/gpt-5.5 with the Codex OAuth profile, or set OPENAI_API_KEY for direct OpenAI API access. | No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai/gpt-5.5 with the Codex OAuth profile, or set OPENAI_API_KEY for direct OpenAI API access.', + 'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai-codex/gpt-5.5, or set OPENAI_API_KEY for direct OpenAI API access. | No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai-codex/gpt-5.5, or set OPENAI_API_KEY for direct OpenAI API access.', ), ); @@ -1701,7 +1701,7 @@ describe("runAgentTurnWithFallback", () => { expect(result.kind).toBe("final"); if (result.kind === "final") { expect(result.payload.text).toBe( - "⚠️ Missing API key for OpenAI on the gateway. Use `openai/gpt-5.5` with the Codex OAuth profile, or set `OPENAI_API_KEY`, then try again.", + "⚠️ Missing API key for OpenAI on the gateway. Use `openai-codex/gpt-5.5`, or set `OPENAI_API_KEY`, then try again.", ); } }); diff --git a/src/auto-reply/reply/agent-runner-execution.ts b/src/auto-reply/reply/agent-runner-execution.ts index 4f54d83dee4..13b27eefea8 100644 --- a/src/auto-reply/reply/agent-runner-execution.ts +++ b/src/auto-reply/reply/agent-runner-execution.ts @@ -357,7 +357,7 @@ function buildMissingApiKeyFailureText(message: string): string | null { return null; } if (provider === "openai" && normalizedMessage.includes("OpenAI Codex OAuth")) { - return "⚠️ Missing API key for OpenAI on the gateway. Use `openai/gpt-5.5` with the Codex OAuth profile, or set `OPENAI_API_KEY`, then try again."; + return "⚠️ Missing API key for OpenAI on the gateway. Use `openai-codex/gpt-5.5`, or set `OPENAI_API_KEY`, then try again."; } if (SAFE_MISSING_API_KEY_PROVIDERS.has(provider)) { return `⚠️ Missing API key for provider "${provider}". Configure the gateway auth for that provider, then try again.`; diff --git a/src/commands/models/auth.test.ts b/src/commands/models/auth.test.ts index 08663a19acd..173ddba7c63 100644 --- a/src/commands/models/auth.test.ts +++ b/src/commands/models/auth.test.ts @@ -298,7 +298,7 @@ describe("modelsAuthLoginCommand", () => { }, }, ], - defaultModel: "openai/gpt-5.5", + defaultModel: "openai-codex/gpt-5.5", }); mocks.resolvePluginProviders.mockReturnValue([ createProvider({ @@ -365,7 +365,7 @@ describe("modelsAuthLoginCommand", () => { "Auth profile: openai-codex:user@example.com (openai-codex/oauth)", ); expect(runtime.log).toHaveBeenCalledWith( - "Default model available: openai/gpt-5.5 (use --set-default to apply)", + "Default model available: openai-codex/gpt-5.5 (use --set-default to apply)", ); expect(runtime.log).toHaveBeenCalledWith( "Tip: Codex-capable models can use native Codex web search. Enable it with openclaw configure --section web (recommended mode: cached). Docs: https://docs.openclaw.ai/tools/web", @@ -602,13 +602,16 @@ describe("modelsAuthLoginCommand", () => { }, }, ], - configPatch: { agents: { defaults: { models: { "openai/gpt-5.5": {} } } } }, - defaultModel: "openai/gpt-5.5", + configPatch: { agents: { defaults: { models: { "openai-codex/gpt-5.5": {} } } } }, + defaultModel: "openai-codex/gpt-5.5", }); await modelsAuthLoginCommand({ provider: "openai-codex" }, runtime); - expect(lastUpdatedConfig?.agents?.defaults?.models).toEqual(existingModels); + expect(lastUpdatedConfig?.agents?.defaults?.models).toEqual({ + ...existingModels, + "openai-codex/gpt-5.5": {}, + }); }); it("survives lockout clearing failure without blocking login", async () => { diff --git a/src/plugins/provider-model-defaults.ts b/src/plugins/provider-model-defaults.ts index 744b12f61c0..8cecb0b4ea5 100644 --- a/src/plugins/provider-model-defaults.ts +++ b/src/plugins/provider-model-defaults.ts @@ -7,7 +7,7 @@ import { ensureModelAllowlistEntry } from "./provider-model-allowlist.js"; import { applyAgentDefaultPrimaryModel } from "./provider-model-primary.js"; export const OPENAI_DEFAULT_MODEL = "openai/gpt-5.5"; -export const OPENAI_CODEX_DEFAULT_MODEL = "openai/gpt-5.5"; +export const OPENAI_CODEX_DEFAULT_MODEL = "openai-codex/gpt-5.5"; export const OPENAI_DEFAULT_IMAGE_MODEL = "gpt-image-2"; export const OPENAI_DEFAULT_TTS_MODEL = "gpt-4o-mini-tts"; export const OPENAI_DEFAULT_TTS_VOICE = "alloy"; diff --git a/test/helpers/plugins/provider-auth-contract.ts b/test/helpers/plugins/provider-auth-contract.ts index 209c918b8d0..666c9e7715c 100644 --- a/test/helpers/plugins/provider-auth-contract.ts +++ b/test/helpers/plugins/provider-auth-contract.ts @@ -119,12 +119,12 @@ function buildOpenAICodexOAuthResult(params: { agents: { defaults: { models: { - "openai/gpt-5.5": {}, + "openai-codex/gpt-5.5": {}, }, }, }, }, - defaultModel: "openai/gpt-5.5", + defaultModel: "openai-codex/gpt-5.5", notes: undefined, }; }