fix: improve codex model discovery

This commit is contained in:
Peter Steinberger
2026-04-24 08:16:37 +01:00
parent 81666e586a
commit 33c0cd1378
17 changed files with 343 additions and 40 deletions

View File

@@ -31,6 +31,7 @@ Docs: https://docs.openclaw.ai
- Voice-call/Telnyx: preserve inbound/outbound callback metadata and read transcription text from Telnyx's current `transcription_data` payload.
- Codex harness: send verbose tool progress to chat channels for native app-server runs, matching the Pi harness `/verbose on` and `/verbose full` behavior. (#70966) Thanks @jalehman.
- Codex models: fetch paginated Codex app-server model catalogs, mark truncated `/codex models` output, and keep ChatGPT OAuth defaults on the `openai-codex/gpt-5.5` route instead of the OpenAI API-key route.
- Codex harness: route native `request_user_input` prompts back to the originating chat, preserve queued follow-up answers, and honor newer app-server command approval amendment decisions.
- Codex status: report Codex CLI OAuth as `oauth (codex-cli)` for native `codex/*` sessions instead of showing unknown auth. Fixes #70688. Thanks @jb510.
- Codex harness/context-engine: redact context-engine assembly failures before logging, so fallback warnings do not serialize raw error objects. (#70809) Thanks @jalehman.

View File

@@ -137,6 +137,66 @@ describe("codex provider", () => {
});
});
it("pages through live discovery before building the provider catalog", async () => {
const listModels = vi
.fn()
.mockResolvedValueOnce({
models: [
{
id: "gpt-5.4",
model: "gpt-5.4",
hidden: false,
inputModalities: ["text", "image"],
supportedReasoningEfforts: ["medium"],
},
],
nextCursor: "page-2",
})
.mockResolvedValueOnce({
models: [
{
id: "gpt-5.2",
model: "gpt-5.2",
hidden: false,
inputModalities: ["text"],
supportedReasoningEfforts: [],
},
],
});
const result = await buildCodexProviderCatalog({
env: {},
listModels,
});
expect(listModels).toHaveBeenNthCalledWith(
1,
expect.objectContaining({ cursor: undefined, limit: 100, sharedClient: false }),
);
expect(listModels).toHaveBeenNthCalledWith(
2,
expect.objectContaining({ cursor: "page-2", limit: 100, sharedClient: false }),
);
expect(result.provider.models.map((model) => model.id)).toEqual(["gpt-5.4", "gpt-5.2"]);
});
it("reports discovery failures before using the fallback catalog", async () => {
const error = new Error("app-server down");
const onDiscoveryFailure = vi.fn();
const listModels = vi.fn(async () => {
throw error;
});
const result = await buildCodexProviderCatalog({
env: {},
listModels,
onDiscoveryFailure,
});
expect(onDiscoveryFailure).toHaveBeenCalledWith(error);
expectStaticFallbackCatalog(result);
});
it("keeps a static fallback catalog when live discovery is explicitly disabled by env", async () => {
const listModels = vi.fn();
@@ -176,7 +236,7 @@ describe("codex provider", () => {
expect(discoveryClient.close).toHaveBeenCalledTimes(1);
});
it("resolves arbitrary Codex app-server model ids through the codex provider", () => {
it("resolves arbitrary Codex app-server model ids as text-only until discovered", () => {
const provider = buildCodexProvider();
const model = provider.resolveDynamicModel?.({
@@ -190,6 +250,21 @@ describe("codex provider", () => {
provider: "codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
input: ["text"],
});
});
it("keeps fallback Codex app-server models image-capable", () => {
const provider = buildCodexProvider();
const model = provider.resolveDynamicModel?.({
provider: "codex",
modelId: "gpt-5.5",
modelRegistry: { find: () => null },
} as never);
expect(model).toMatchObject({
id: "gpt-5.5",
input: ["text", "image"],
});
});

View File

@@ -1,4 +1,5 @@
import { resolvePluginConfigObject } from "openclaw/plugin-sdk/config-runtime";
import { createSubsystemLogger } from "openclaw/plugin-sdk/core";
import type { ProviderRuntimeModel } from "openclaw/plugin-sdk/plugin-entry";
import {
normalizeModelCompat,
@@ -26,10 +27,13 @@ import type {
const DEFAULT_DISCOVERY_TIMEOUT_MS = 2500;
const LIVE_DISCOVERY_ENV = "OPENCLAW_CODEX_DISCOVERY_LIVE";
const MODEL_DISCOVERY_PAGE_LIMIT = 100;
const codexCatalogLog = createSubsystemLogger("codex/catalog");
type CodexModelLister = (options: {
timeoutMs: number;
limit?: number;
cursor?: string;
startOptions?: CodexAppServerStartOptions;
sharedClient?: boolean;
}) => Promise<CodexAppServerModelListResult>;
@@ -43,6 +47,7 @@ type BuildCatalogOptions = {
env?: NodeJS.ProcessEnv;
pluginConfig?: unknown;
listModels?: CodexModelLister;
onDiscoveryFailure?: (error: unknown) => void;
};
export function buildCodexProvider(options: BuildCodexProviderOptions = {}): ProviderPlugin {
@@ -103,6 +108,7 @@ export async function buildCodexProviderCatalog(
listModels: options.listModels ?? listCodexAppServerModelsLazy,
timeoutMs,
startOptions: appServer.start,
onDiscoveryFailure: options.onDiscoveryFailure,
});
}
return {
@@ -115,12 +121,15 @@ function resolveCodexDynamicModel(modelId: string) {
if (!id) {
return undefined;
}
const fallbackModel = FALLBACK_CODEX_MODELS.find((model) => model.id === id);
return normalizeModelCompat({
...buildCodexModelDefinition({
id,
model: id,
inputModalities: ["text", "image"],
supportedReasoningEfforts: shouldDefaultToReasoningModel(id) ? ["medium"] : [],
inputModalities: fallbackModel?.inputModalities ?? ["text"],
supportedReasoningEfforts:
fallbackModel?.supportedReasoningEfforts ??
(shouldDefaultToReasoningModel(id) ? ["medium"] : []),
}),
provider: CODEX_PROVIDER_ID,
baseUrl: CODEX_BASE_URL,
@@ -131,16 +140,28 @@ async function listModelsBestEffort(params: {
listModels: CodexModelLister;
timeoutMs: number;
startOptions: CodexAppServerStartOptions;
onDiscoveryFailure?: (error: unknown) => void;
}): Promise<CodexAppServerModel[]> {
try {
const result = await params.listModels({
timeoutMs: params.timeoutMs,
limit: 100,
startOptions: params.startOptions,
sharedClient: false,
const models: CodexAppServerModel[] = [];
let cursor: string | undefined;
do {
const result = await params.listModels({
timeoutMs: params.timeoutMs,
limit: MODEL_DISCOVERY_PAGE_LIMIT,
cursor,
startOptions: params.startOptions,
sharedClient: false,
});
models.push(...result.models.filter((model) => !model.hidden));
cursor = result.nextCursor;
} while (cursor);
return models;
} catch (error) {
params.onDiscoveryFailure?.(error);
codexCatalogLog.debug("codex model discovery failed; using fallback catalog", {
error: error instanceof Error ? error.message : String(error),
});
return result.models.filter((model) => !model.hidden);
} catch {
return [];
}
}
@@ -148,6 +169,7 @@ async function listModelsBestEffort(params: {
async function listCodexAppServerModelsLazy(options: {
timeoutMs: number;
limit?: number;
cursor?: string;
startOptions?: CodexAppServerStartOptions;
sharedClient?: boolean;
}): Promise<CodexAppServerModelListResult> {

View File

@@ -21,11 +21,13 @@ vi.mock("openclaw/plugin-sdk/provider-auth", () => ({
}));
let listCodexAppServerModels: typeof import("./models.js").listCodexAppServerModels;
let listAllCodexAppServerModels: typeof import("./models.js").listAllCodexAppServerModels;
let resetSharedCodexAppServerClientForTests: typeof import("./shared-client.js").resetSharedCodexAppServerClientForTests;
describe("listCodexAppServerModels", () => {
beforeAll(async () => {
({ listCodexAppServerModels } = await import("./models.js"));
({ listAllCodexAppServerModels } = await import("./models.js"));
({ resetSharedCodexAppServerClientForTests } = await import("./shared-client.js"));
});
@@ -97,4 +99,132 @@ describe("listCodexAppServerModels", () => {
harness.client.close();
startSpy.mockRestore();
});
it("lists all app-server model pages through one client", async () => {
const harness = createClientHarness();
const startSpy = vi.spyOn(CodexAppServerClient, "start").mockReturnValue(harness.client);
const listPromise = listAllCodexAppServerModels({ limit: 1, timeoutMs: 1000 });
await vi.waitFor(() => expect(harness.writes.length).toBeGreaterThanOrEqual(1));
const initialize = JSON.parse(harness.writes[0] ?? "{}") as { id?: number };
harness.send({
id: initialize.id,
result: { userAgent: "openclaw/0.118.0 (macOS; test)" },
});
await vi.waitFor(() => expect(harness.writes.length).toBeGreaterThanOrEqual(3));
const firstList = JSON.parse(harness.writes[2] ?? "{}") as {
id?: number;
params?: { cursor?: string | null };
};
expect(firstList.params?.cursor).toBeNull();
harness.send({
id: firstList.id,
result: {
data: [
{
id: "gpt-5.4",
model: "gpt-5.4",
upgrade: null,
upgradeInfo: null,
availabilityNux: null,
displayName: "gpt-5.4",
description: "GPT-5.4",
hidden: false,
inputModalities: ["text"],
supportedReasoningEfforts: [],
defaultReasoningEffort: "medium",
supportsPersonality: false,
additionalSpeedTiers: [],
isDefault: false,
},
],
nextCursor: "page-2",
},
});
await vi.waitFor(() => expect(harness.writes.length).toBeGreaterThanOrEqual(4));
const secondList = JSON.parse(harness.writes[3] ?? "{}") as {
id?: number;
params?: { cursor?: string | null };
};
expect(secondList.params?.cursor).toBe("page-2");
harness.send({
id: secondList.id,
result: {
data: [
{
id: "gpt-5.2",
model: "gpt-5.2",
upgrade: null,
upgradeInfo: null,
availabilityNux: null,
displayName: "gpt-5.2",
description: "GPT-5.2",
hidden: false,
inputModalities: ["text", "image"],
supportedReasoningEfforts: [],
defaultReasoningEffort: "medium",
supportsPersonality: false,
additionalSpeedTiers: [],
isDefault: false,
},
],
nextCursor: null,
},
});
await expect(listPromise).resolves.toMatchObject({
models: [{ id: "gpt-5.4" }, { id: "gpt-5.2" }],
});
harness.client.close();
startSpy.mockRestore();
});
it("marks all-model listing truncated after the page cap", async () => {
const harness = createClientHarness();
const startSpy = vi.spyOn(CodexAppServerClient, "start").mockReturnValue(harness.client);
const listPromise = listAllCodexAppServerModels({ limit: 1, timeoutMs: 1000, maxPages: 1 });
await vi.waitFor(() => expect(harness.writes.length).toBeGreaterThanOrEqual(1));
const initialize = JSON.parse(harness.writes[0] ?? "{}") as { id?: number };
harness.send({
id: initialize.id,
result: { userAgent: "openclaw/0.118.0 (macOS; test)" },
});
await vi.waitFor(() => expect(harness.writes.length).toBeGreaterThanOrEqual(3));
const firstList = JSON.parse(harness.writes[2] ?? "{}") as { id?: number };
harness.send({
id: firstList.id,
result: {
data: [
{
id: "gpt-5.4",
model: "gpt-5.4",
upgrade: null,
upgradeInfo: null,
availabilityNux: null,
displayName: "gpt-5.4",
description: "GPT-5.4",
hidden: false,
inputModalities: ["text"],
supportedReasoningEfforts: [],
defaultReasoningEffort: "medium",
supportsPersonality: false,
additionalSpeedTiers: [],
isDefault: false,
},
],
nextCursor: "page-2",
},
});
await expect(listPromise).resolves.toMatchObject({
models: [{ id: "gpt-5.4" }],
nextCursor: "page-2",
truncated: true,
});
harness.client.close();
startSpy.mockRestore();
});
});

View File

@@ -1,3 +1,4 @@
import type { CodexAppServerClient } from "./client.js";
import type { CodexAppServerStartOptions } from "./config.js";
import type { v2 } from "./protocol-generated/typescript/index.js";
import { readCodexModelListResponse } from "./protocol-validators.js";
@@ -17,6 +18,7 @@ export type CodexAppServerModel = {
export type CodexAppServerModelListResult = {
models: CodexAppServerModel[];
nextCursor?: string;
truncated?: boolean;
};
export type CodexAppServerListModelsOptions = {
@@ -32,6 +34,40 @@ export type CodexAppServerListModelsOptions = {
export async function listCodexAppServerModels(
options: CodexAppServerListModelsOptions = {},
): Promise<CodexAppServerModelListResult> {
return await withCodexAppServerModelClient(options, async ({ client, timeoutMs }) =>
requestModelListPage(client, { ...options, timeoutMs }),
);
}
export async function listAllCodexAppServerModels(
options: CodexAppServerListModelsOptions & { maxPages?: number } = {},
): Promise<CodexAppServerModelListResult> {
const maxPages = normalizeMaxPages(options.maxPages);
return await withCodexAppServerModelClient(options, async ({ client, timeoutMs }) => {
const models: CodexAppServerModel[] = [];
let cursor = options.cursor;
let nextCursor: string | undefined;
for (let page = 0; page < maxPages; page += 1) {
const result = await requestModelListPage(client, {
...options,
timeoutMs,
cursor,
});
models.push(...result.models);
nextCursor = result.nextCursor;
if (!nextCursor) {
return { models };
}
cursor = nextCursor;
}
return { models, nextCursor, truncated: true };
});
}
async function withCodexAppServerModelClient<T>(
options: CodexAppServerListModelsOptions,
run: (params: { client: CodexAppServerClient; timeoutMs: number }) => Promise<T>,
): Promise<T> {
const timeoutMs = options.timeoutMs ?? 2500;
const useSharedClient = options.sharedClient !== false;
const { createIsolatedCodexAppServerClient, getSharedCodexAppServerClient } =
@@ -48,16 +84,7 @@ export async function listCodexAppServerModels(
authProfileId: options.authProfileId,
});
try {
const response = await client.request(
"model/list",
{
limit: options.limit ?? null,
cursor: options.cursor ?? null,
includeHidden: options.includeHidden ?? null,
},
{ timeoutMs },
);
return readModelListResult(response);
return await run({ client, timeoutMs });
} finally {
if (!useSharedClient) {
client.close();
@@ -65,6 +92,22 @@ export async function listCodexAppServerModels(
}
}
async function requestModelListPage(
client: CodexAppServerClient,
options: CodexAppServerListModelsOptions & { timeoutMs: number },
): Promise<CodexAppServerModelListResult> {
const response = await client.request(
"model/list",
{
limit: options.limit ?? null,
cursor: options.cursor ?? null,
includeHidden: options.includeHidden ?? null,
},
{ timeoutMs: options.timeoutMs },
);
return readModelListResult(response);
}
export function readModelListResult(value: unknown): CodexAppServerModelListResult {
const response = readCodexModelListResponse(value);
if (!response) {
@@ -116,3 +159,7 @@ function readNonEmptyString(value: unknown): string | undefined {
const trimmed = value.trim();
return trimmed || undefined;
}
function normalizeMaxPages(value: unknown): number {
return typeof value === "number" && Number.isFinite(value) && value > 0 ? Math.floor(value) : 20;
}

View File

@@ -45,10 +45,14 @@ export function formatModels(result: CodexAppServerModelListResult): string {
if (result.models.length === 0) {
return "No Codex app-server models returned.";
}
return [
const lines = [
"Codex models:",
...result.models.map((model) => `- ${model.id}${model.isDefault ? " (default)" : ""}`),
].join("\n");
];
if (result.truncated) {
lines.push("- More models available; output truncated.");
}
return lines.join("\n");
}
export function formatThreads(response: JsonValue | undefined): string {

View File

@@ -1,6 +1,6 @@
import type { PluginCommandContext, PluginCommandResult } from "openclaw/plugin-sdk/plugin-entry";
import { CODEX_CONTROL_METHODS, type CodexControlMethod } from "./app-server/capabilities.js";
import { listCodexAppServerModels } from "./app-server/models.js";
import { listAllCodexAppServerModels } from "./app-server/models.js";
import { isJsonObject, type JsonValue } from "./app-server/protocol.js";
import {
clearCodexAppServerBinding,
@@ -42,7 +42,7 @@ import {
export type CodexCommandDeps = {
codexControlRequest: CodexControlRequestFn;
listCodexAppServerModels: typeof listCodexAppServerModels;
listCodexAppServerModels: typeof listAllCodexAppServerModels;
readCodexStatusProbes: typeof readCodexStatusProbes;
readCodexAppServerBinding: typeof readCodexAppServerBinding;
requestOptions: typeof requestOptions;
@@ -73,7 +73,7 @@ type SafeCodexControlRequestFn = (
const defaultCodexCommandDeps: CodexCommandDeps = {
codexControlRequest,
listCodexAppServerModels,
listCodexAppServerModels: listAllCodexAppServerModels,
readCodexStatusProbes,
readCodexAppServerBinding,
requestOptions,

View File

@@ -112,6 +112,27 @@ describe("codex command", () => {
});
});
it("shows when Codex app-server model output is truncated", async () => {
const deps = createDeps({
listCodexAppServerModels: vi.fn(async () => ({
models: [
{
id: "gpt-5.4",
model: "gpt-5.4",
inputModalities: ["text"],
supportedReasoningEfforts: ["medium"],
},
],
nextCursor: "page-2",
truncated: true,
})),
});
await expect(handleCodexCommand(createContext("models"), { deps })).resolves.toEqual({
text: "Codex models:\n- gpt-5.4\n- More models available; output truncated.",
});
});
it("reports status unavailable when every Codex probe fails", async () => {
const offline = { ok: false as const, error: "offline" };
const deps = createDeps({

View File

@@ -5,7 +5,7 @@ import {
} from "openclaw/plugin-sdk/provider-onboard";
export const OPENAI_DEFAULT_MODEL = "openai/gpt-5.5";
export const OPENAI_CODEX_DEFAULT_MODEL = "openai/gpt-5.5";
export const OPENAI_CODEX_DEFAULT_MODEL = "openai-codex/gpt-5.5";
export const OPENAI_DEFAULT_IMAGE_MODEL = "gpt-image-2";
export const OPENAI_DEFAULT_TTS_MODEL = "gpt-4o-mini-tts";
export const OPENAI_DEFAULT_TTS_VOICE = "alloy";

View File

@@ -224,7 +224,7 @@ describe("openai codex provider", () => {
},
},
],
defaultModel: "openai/gpt-5.5",
defaultModel: "openai-codex/gpt-5.5",
});
expect(result?.profiles[0]?.credential).not.toHaveProperty("idToken");
expect(result?.profiles[0]?.credential).not.toHaveProperty("accountId");

View File

@@ -258,7 +258,7 @@ export function buildOpenAIProvider(): ProviderPlugin {
if (ctx.provider !== PROVIDER_ID || ctx.listProfileIds("openai-codex").length === 0) {
return undefined;
}
return 'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai/gpt-5.5 with the Codex OAuth profile, or set OPENAI_API_KEY for direct OpenAI API access.';
return 'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai-codex/gpt-5.5, or set OPENAI_API_KEY for direct OpenAI API access.';
},
suppressBuiltInModel: (ctx) => {
if (

View File

@@ -147,7 +147,7 @@ describe("monitorZaloProvider lifecycle", () => {
settled = true;
});
await vi.waitFor(() => expect(setWebhookMock).toHaveBeenCalledTimes(1));
await vi.waitFor(() => expect(setWebhookMock).toHaveBeenCalledTimes(1), { timeout: 5_000 });
expect(registry.httpRoutes).toHaveLength(2);
abort.abort();

View File

@@ -1669,7 +1669,7 @@ describe("runAgentTurnWithFallback", () => {
it("surfaces direct provider auth guidance for missing API keys", async () => {
state.runEmbeddedPiAgentMock.mockRejectedValueOnce(
new Error(
'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai/gpt-5.5 with the Codex OAuth profile, or set OPENAI_API_KEY for direct OpenAI API access. | No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai/gpt-5.5 with the Codex OAuth profile, or set OPENAI_API_KEY for direct OpenAI API access.',
'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai-codex/gpt-5.5, or set OPENAI_API_KEY for direct OpenAI API access. | No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai-codex/gpt-5.5, or set OPENAI_API_KEY for direct OpenAI API access.',
),
);
@@ -1701,7 +1701,7 @@ describe("runAgentTurnWithFallback", () => {
expect(result.kind).toBe("final");
if (result.kind === "final") {
expect(result.payload.text).toBe(
"⚠️ Missing API key for OpenAI on the gateway. Use `openai/gpt-5.5` with the Codex OAuth profile, or set `OPENAI_API_KEY`, then try again.",
"⚠️ Missing API key for OpenAI on the gateway. Use `openai-codex/gpt-5.5`, or set `OPENAI_API_KEY`, then try again.",
);
}
});

View File

@@ -357,7 +357,7 @@ function buildMissingApiKeyFailureText(message: string): string | null {
return null;
}
if (provider === "openai" && normalizedMessage.includes("OpenAI Codex OAuth")) {
return "⚠️ Missing API key for OpenAI on the gateway. Use `openai/gpt-5.5` with the Codex OAuth profile, or set `OPENAI_API_KEY`, then try again.";
return "⚠️ Missing API key for OpenAI on the gateway. Use `openai-codex/gpt-5.5`, or set `OPENAI_API_KEY`, then try again.";
}
if (SAFE_MISSING_API_KEY_PROVIDERS.has(provider)) {
return `⚠️ Missing API key for provider "${provider}". Configure the gateway auth for that provider, then try again.`;

View File

@@ -298,7 +298,7 @@ describe("modelsAuthLoginCommand", () => {
},
},
],
defaultModel: "openai/gpt-5.5",
defaultModel: "openai-codex/gpt-5.5",
});
mocks.resolvePluginProviders.mockReturnValue([
createProvider({
@@ -365,7 +365,7 @@ describe("modelsAuthLoginCommand", () => {
"Auth profile: openai-codex:user@example.com (openai-codex/oauth)",
);
expect(runtime.log).toHaveBeenCalledWith(
"Default model available: openai/gpt-5.5 (use --set-default to apply)",
"Default model available: openai-codex/gpt-5.5 (use --set-default to apply)",
);
expect(runtime.log).toHaveBeenCalledWith(
"Tip: Codex-capable models can use native Codex web search. Enable it with openclaw configure --section web (recommended mode: cached). Docs: https://docs.openclaw.ai/tools/web",
@@ -602,13 +602,16 @@ describe("modelsAuthLoginCommand", () => {
},
},
],
configPatch: { agents: { defaults: { models: { "openai/gpt-5.5": {} } } } },
defaultModel: "openai/gpt-5.5",
configPatch: { agents: { defaults: { models: { "openai-codex/gpt-5.5": {} } } } },
defaultModel: "openai-codex/gpt-5.5",
});
await modelsAuthLoginCommand({ provider: "openai-codex" }, runtime);
expect(lastUpdatedConfig?.agents?.defaults?.models).toEqual(existingModels);
expect(lastUpdatedConfig?.agents?.defaults?.models).toEqual({
...existingModels,
"openai-codex/gpt-5.5": {},
});
});
it("survives lockout clearing failure without blocking login", async () => {

View File

@@ -7,7 +7,7 @@ import { ensureModelAllowlistEntry } from "./provider-model-allowlist.js";
import { applyAgentDefaultPrimaryModel } from "./provider-model-primary.js";
export const OPENAI_DEFAULT_MODEL = "openai/gpt-5.5";
export const OPENAI_CODEX_DEFAULT_MODEL = "openai/gpt-5.5";
export const OPENAI_CODEX_DEFAULT_MODEL = "openai-codex/gpt-5.5";
export const OPENAI_DEFAULT_IMAGE_MODEL = "gpt-image-2";
export const OPENAI_DEFAULT_TTS_MODEL = "gpt-4o-mini-tts";
export const OPENAI_DEFAULT_TTS_VOICE = "alloy";

View File

@@ -119,12 +119,12 @@ function buildOpenAICodexOAuthResult(params: {
agents: {
defaults: {
models: {
"openai/gpt-5.5": {},
"openai-codex/gpt-5.5": {},
},
},
},
},
defaultModel: "openai/gpt-5.5",
defaultModel: "openai-codex/gpt-5.5",
notes: undefined,
};
}