fix(models): restore codex mini oauth route

This commit is contained in:
Peter Steinberger
2026-04-30 14:43:33 +01:00
parent 0f120c09ba
commit 3766bbb674
11 changed files with 132 additions and 49 deletions

View File

@@ -8,6 +8,7 @@ Docs: https://docs.openclaw.ai
- Agents/subagents: bound automatic orphan recovery with persisted recovery attempts and a wedged-session tombstone, and teach task maintenance/doctor to reconcile those sessions so restart loops no longer require manual `sessions.json` surgery. Fixes #74864. Thanks @solosage1.
- CLI/progress: suppress nested progress spinners and line clears while TUI input owns raw stdin, so Crestodian `/status` no longer disturbs the active input row. (#75003) Thanks @velvet-shark.
- Models/OpenAI Codex: restore `openai-codex/gpt-5.4-mini` for ChatGPT/Codex OAuth PI runs after live OAuth proof, and align the manifest, forward-compat metadata, docs, and regression tests so stale cron and heartbeat configs resolve again. Fixes #74451. Thanks @0xCyda, @hclsys, and @Marvae.
- Telegram: use durable message edits for streaming previews instead of native draft state, so generated replies no longer flicker through draft-to-message transitions that look like duplicates. (#75073) Thanks @obviyus.
## 2026.4.29

View File

@@ -208,6 +208,7 @@ Choose your preferred auth method and follow the setup steps.
| Model ref | Runtime config | Route | Auth |
|-----------|----------------|-------|------|
| `openai-codex/gpt-5.5` | omitted / `runtime: "pi"` | ChatGPT/Codex OAuth through PI | Codex sign-in |
| `openai-codex/gpt-5.4-mini` | omitted / `runtime: "pi"` | ChatGPT/Codex OAuth through PI | Codex sign-in |
| `openai-codex/gpt-5.5` | `runtime: "auto"` | Still PI unless a plugin explicitly claims `openai-codex` | Codex sign-in |
| `openai/gpt-5.5` | `agentRuntime.id: "codex"` | Codex app-server harness | Codex app-server auth |
@@ -217,12 +218,6 @@ Choose your preferred auth method and follow the setup steps.
It does not select or auto-enable the bundled Codex app-server harness.
</Note>
<Warning>
`openai-codex/gpt-5.4-mini` is not a supported Codex OAuth route. Use
`openai/gpt-5.4-mini` with an OpenAI API key, or use
`openai-codex/gpt-5.5` with Codex OAuth.
</Warning>
### Config example
```json5

View File

@@ -439,7 +439,7 @@ describe("openai codex provider", () => {
});
});
it("does not resolve gpt-5.4-mini through the Codex OAuth route", () => {
it("resolves gpt-5.4-mini through the Codex OAuth route", () => {
const provider = buildOpenAICodexProviderPlugin();
const model = provider.resolveDynamicModel?.({
@@ -447,14 +447,25 @@ describe("openai codex provider", () => {
modelId: "gpt-5.4-mini",
modelRegistry: createSingleModelRegistry(
createCodexTemplate({
id: "gpt-5.1-codex-mini",
cost: { input: 0.25, output: 2, cacheRead: 0.025, cacheWrite: 0 },
id: "gpt-5.4",
cost: { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 },
contextWindow: 1_050_000,
contextTokens: 272_000,
}),
null,
) as never,
} as never);
expect(model).toBeUndefined();
expect(model).toMatchObject({
id: "gpt-5.4-mini",
name: "gpt-5.4-mini",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
contextWindow: 400_000,
contextTokens: 272_000,
maxTokens: 128_000,
cost: { input: 0.75, output: 4.5, cacheRead: 0.075, cacheWrite: 0 },
});
});
it("augments catalog with gpt-5.5-pro and gpt-5.4 native metadata", () => {
@@ -503,9 +514,12 @@ describe("openai codex provider", () => {
cost: { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 },
}),
);
expect(entries).not.toContainEqual(
expect(entries).toContainEqual(
expect.objectContaining({
id: "gpt-5.4-mini",
contextWindow: 400_000,
contextTokens: 272_000,
cost: { input: 0.75, output: 4.5, cacheRead: 0.075, cacheWrite: 0 },
}),
);
});

View File

@@ -52,6 +52,7 @@ const OPENAI_CODEX_GPT_55_MODEL_ID = "gpt-5.5";
const OPENAI_CODEX_GPT_55_PRO_MODEL_ID = "gpt-5.5-pro";
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
const OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID = "gpt-5.4-codex";
const OPENAI_CODEX_GPT_54_MINI_MODEL_ID = "gpt-5.4-mini";
const OPENAI_CODEX_GPT_54_PRO_MODEL_ID = "gpt-5.4-pro";
const OPENAI_CODEX_GPT_55_CODEX_CONTEXT_TOKENS = 400_000;
const OPENAI_CODEX_GPT_55_DEFAULT_RUNTIME_CONTEXT_TOKENS = 272_000;
@@ -59,6 +60,7 @@ const OPENAI_CODEX_GPT_55_PRO_NATIVE_CONTEXT_TOKENS = 1_000_000;
const OPENAI_CODEX_GPT_55_PRO_DEFAULT_CONTEXT_TOKENS = 272_000;
const OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS = 1_050_000;
const OPENAI_CODEX_GPT_54_DEFAULT_CONTEXT_TOKENS = 272_000;
const OPENAI_CODEX_GPT_54_MINI_NATIVE_CONTEXT_TOKENS = 400_000;
const OPENAI_CODEX_GPT_54_MAX_TOKENS = 128_000;
const OPENAI_CODEX_GPT_55_PRO_COST = {
input: 30,
@@ -78,6 +80,12 @@ const OPENAI_CODEX_GPT_54_PRO_COST = {
cacheRead: 0,
cacheWrite: 0,
} as const;
const OPENAI_CODEX_GPT_54_MINI_COST = {
input: 0.75,
output: 4.5,
cacheRead: 0.075,
cacheWrite: 0,
} as const;
const OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const;
/** Legacy codex rows first; fall back to catalog `gpt-5.4` when the API omits 5.3/5.2. */
const OPENAI_CODEX_GPT_54_CATALOG_SYNTH_TEMPLATE_MODEL_IDS = [
@@ -105,6 +113,7 @@ const OPENAI_CODEX_MODERN_MODEL_IDS = [
OPENAI_CODEX_GPT_55_PRO_MODEL_ID,
OPENAI_CODEX_GPT_54_MODEL_ID,
OPENAI_CODEX_GPT_54_PRO_MODEL_ID,
OPENAI_CODEX_GPT_54_MINI_MODEL_ID,
"gpt-5.2",
"gpt-5.2-codex",
OPENAI_CODEX_GPT_53_MODEL_ID,
@@ -227,6 +236,14 @@ function resolveCodexForwardCompatModel(ctx: ProviderResolveDynamicModelContext)
maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS,
cost: OPENAI_CODEX_GPT_54_PRO_COST,
};
} else if (lower === OPENAI_CODEX_GPT_54_MINI_MODEL_ID) {
templateIds = OPENAI_CODEX_GPT_54_CATALOG_SYNTH_TEMPLATE_MODEL_IDS;
patch = {
contextWindow: OPENAI_CODEX_GPT_54_MINI_NATIVE_CONTEXT_TOKENS,
contextTokens: OPENAI_CODEX_GPT_54_DEFAULT_CONTEXT_TOKENS,
maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS,
cost: OPENAI_CODEX_GPT_54_MINI_COST,
};
} else if (lower === OPENAI_CODEX_GPT_53_MODEL_ID) {
templateIds = OPENAI_CODEX_TEMPLATE_MODEL_IDS;
} else {
@@ -495,6 +512,7 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
OPENAI_CODEX_GPT_55_PRO_MODEL_ID,
OPENAI_CODEX_GPT_54_MODEL_ID,
OPENAI_CODEX_GPT_54_PRO_MODEL_ID,
OPENAI_CODEX_GPT_54_MINI_MODEL_ID,
].includes(id);
},
...buildOpenAIResponsesProviderHooks(),
@@ -555,6 +573,14 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
contextTokens: OPENAI_CODEX_GPT_54_DEFAULT_CONTEXT_TOKENS,
cost: OPENAI_CODEX_GPT_54_PRO_COST,
}),
buildOpenAISyntheticCatalogEntry(gpt54Template, {
id: OPENAI_CODEX_GPT_54_MINI_MODEL_ID,
reasoning: true,
input: ["text", "image"],
contextWindow: OPENAI_CODEX_GPT_54_MINI_NATIVE_CONTEXT_TOKENS,
contextTokens: OPENAI_CODEX_GPT_54_DEFAULT_CONTEXT_TOKENS,
cost: OPENAI_CODEX_GPT_54_MINI_COST,
}),
].filter((entry): entry is NonNullable<typeof entry> => entry !== undefined);
},
};

View File

@@ -645,6 +645,21 @@
"cacheWrite": 0
}
},
{
"id": "gpt-5.4-mini",
"name": "gpt-5.4-mini",
"reasoning": true,
"input": ["text", "image"],
"contextWindow": 400000,
"contextTokens": 272000,
"maxTokens": 128000,
"cost": {
"input": 0.75,
"output": 4.5,
"cacheRead": 0.075,
"cacheWrite": 0
}
},
{
"id": "gpt-5.5-pro",
"name": "gpt-5.5-pro",
@@ -688,11 +703,6 @@
"provider": "openai-codex",
"model": "gpt-5.3-codex-spark",
"reason": "gpt-5.3-codex-spark is no longer exposed by the OpenAI or Codex catalogs. Use openai/gpt-5.5."
},
{
"provider": "openai-codex",
"model": "gpt-5.4-mini",
"reason": "gpt-5.4-mini is not supported by the OpenAI Codex OAuth route. Use openai/gpt-5.4-mini with an OpenAI API key or openai-codex/gpt-5.5 with Codex OAuth."
}
]
},

View File

@@ -266,7 +266,10 @@ function buildDynamicModel(
const template =
lower === "gpt-5.5-pro"
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.4-pro", "gpt-5.3-codex"])
: lower === "gpt-5.4" || isLegacyGpt54Alias || lower === "gpt-5.4-pro"
: lower === "gpt-5.4" ||
isLegacyGpt54Alias ||
lower === "gpt-5.4-pro" ||
lower === "gpt-5.4-mini"
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.3-codex", "gpt-5.2-codex"])
: lower === "gpt-5.3-codex-spark"
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.3-codex", "gpt-5.2-codex"])
@@ -329,6 +332,22 @@ function buildDynamicModel(
fallback,
);
}
if (lower === "gpt-5.4-mini") {
return cloneTemplate(
template,
modelId,
{
provider: "openai-codex",
api: "openai-codex-responses",
baseUrl: OPENAI_CODEX_BASE_URL,
cost: { input: 0.75, output: 4.5, cacheRead: 0.075, cacheWrite: 0 },
contextWindow: 400_000,
contextTokens: 272_000,
maxTokens: 128_000,
},
fallback,
);
}
if (lower === "gpt-5.3-codex-spark") {
return cloneTemplate(
template,

View File

@@ -75,8 +75,14 @@ export function buildOpenAICodexForwardCompatExpectation(
: isGpt54Mini
? { input: 0.75, output: 4.5, cacheRead: 0.075, cacheWrite: 0 }
: OPENAI_CODEX_TEMPLATE_MODEL.cost,
contextWindow: isGpt54 ? 1_050_000 : isGpt55 ? 400_000 : isSpark ? 128_000 : 272000,
...(isGpt54 || isGpt55 ? { contextTokens: 272_000 } : {}),
contextWindow: isGpt54
? 1_050_000
: isGpt55 || isGpt54Mini
? 400_000
: isSpark
? 128_000
: 272000,
...(isGpt54 || isGpt55 || isGpt54Mini ? { contextTokens: 272_000 } : {}),
maxTokens: 128000,
};
}

View File

@@ -60,9 +60,6 @@ vi.mock("../model-suppression.js", () => {
) {
return true;
}
if (provider === "openai-codex" && id?.trim().toLowerCase() === "gpt-5.4-mini") {
return true;
}
return (
(provider === "qwen" || provider === "modelstudio") &&
id?.trim().toLowerCase() === "qwen3.6-plus" &&
@@ -78,9 +75,6 @@ vi.mock("../model-suppression.js", () => {
) {
return true;
}
if (provider === "openai-codex" && id?.trim().toLowerCase() === "gpt-5.4-mini") {
return true;
}
return false;
},
buildSuppressedBuiltInModelError: ({
@@ -99,9 +93,6 @@ vi.mock("../model-suppression.js", () => {
) {
return "Unknown model: qwen/qwen3.6-plus. qwen3.6-plus is not supported on the Qwen Coding Plan endpoint; use a Standard pay-as-you-go Qwen endpoint or choose qwen/qwen3.5-plus.";
}
if (provider === "openai-codex" && id?.trim().toLowerCase() === "gpt-5.4-mini") {
return "Unknown model: openai-codex/gpt-5.4-mini. gpt-5.4-mini is not supported by the OpenAI Codex OAuth route. Use openai/gpt-5.4-mini with an OpenAI API key or openai-codex/gpt-5.5 with Codex OAuth.";
}
if (
(provider === "openai" ||
provider === "azure-openai-responses" ||
@@ -369,7 +360,7 @@ describe("resolveModel", () => {
);
});
it("#74451: suppresses explicitly configured openai-codex/gpt-5.4-mini despite inline entry", () => {
it("#74451: resolves explicitly configured openai-codex/gpt-5.4-mini inline entries", () => {
const cfg = {
models: {
providers: {
@@ -391,10 +382,14 @@ describe("resolveModel", () => {
const result = resolveModelForTest("openai-codex", "gpt-5.4-mini", "/tmp/agent", cfg);
expect(result.model).toBeUndefined();
expect(result.error).toBe(
"Unknown model: openai-codex/gpt-5.4-mini. gpt-5.4-mini is not supported by the OpenAI Codex OAuth route. Use openai/gpt-5.4-mini with an OpenAI API key or openai-codex/gpt-5.5 with Codex OAuth.",
);
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject({
provider: "openai-codex",
id: "gpt-5.4-mini",
api: "openai-codex-responses",
contextWindow: 400_000,
maxTokens: 128_000,
});
});
it("normalizes Google fallback baseUrls for custom providers", () => {
@@ -1542,15 +1537,17 @@ describe("resolveModel", () => {
});
});
it("does not build an openai-codex fallback for unsupported gpt-5.4-mini", () => {
it("builds an openai-codex fallback for gpt-5.4-mini", () => {
mockOpenAICodexTemplateModel(discoverModels);
const result = resolveModelForTest("openai-codex", "gpt-5.4-mini", "/tmp/agent");
expect(result.model).toBeUndefined();
expect(result.error).toBe(
"Unknown model: openai-codex/gpt-5.4-mini. gpt-5.4-mini is not supported by the OpenAI Codex OAuth route. Use openai/gpt-5.4-mini with an OpenAI API key or openai-codex/gpt-5.5 with Codex OAuth.",
);
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject({
...buildOpenAICodexForwardCompatExpectation("gpt-5.4-mini"),
contextWindow: 400_000,
contextTokens: 272_000,
});
});
it("does not build an openai-codex fallback for removed gpt-5.3-codex-spark", () => {
@@ -1944,7 +1941,7 @@ describe("resolveModel", () => {
});
});
it("rejects stale discovered openai-codex gpt-5.4-mini rows", () => {
it("resolves discovered openai-codex gpt-5.4-mini rows", () => {
mockDiscoveredModel(discoverModels, {
provider: "openai-codex",
modelId: "gpt-5.4-mini",
@@ -1958,10 +1955,14 @@ describe("resolveModel", () => {
const result = resolveModelForTest("openai-codex", "gpt-5.4-mini", "/tmp/agent");
expect(result.model).toBeUndefined();
expect(result.error).toBe(
"Unknown model: openai-codex/gpt-5.4-mini. gpt-5.4-mini is not supported by the OpenAI Codex OAuth route. Use openai/gpt-5.4-mini with an OpenAI API key or openai-codex/gpt-5.5 with Codex OAuth.",
);
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject({
provider: "openai-codex",
id: "gpt-5.4-mini",
name: "GPT-5.4 Mini",
contextWindow: 64_000,
input: ["text"],
});
});
it("rejects stale direct openai gpt-5.3-codex-spark discovery rows", () => {

View File

@@ -605,25 +605,36 @@ export function describeOpenAIProviderRuntimeContract(load: ProviderRuntimeContr
});
});
it("does not claim unsupported codex mini models", () => {
it("claims codex mini models through the Codex OAuth route", () => {
const provider = requireProviderContractProvider("openai-codex");
const model = provider.resolveDynamicModel?.({
provider: "openai-codex",
modelId: "gpt-5.4-mini",
modelRegistry: {
find: (_provider: string, id: string) =>
id === "gpt-5.1-codex-mini"
id === "gpt-5.4"
? createModel({
id,
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
cost: { input: 5, output: 30, cacheRead: 0.5, cacheWrite: 0 },
contextWindow: 272_000,
maxTokens: 128_000,
})
: null,
} as never,
});
expect(model).toBeUndefined();
expect(model).toMatchObject({
id: "gpt-5.4-mini",
provider: "openai-codex",
api: "openai-codex-responses",
contextWindow: 400_000,
contextTokens: 272_000,
maxTokens: 128_000,
cost: { input: 0.75, output: 4.5, cacheRead: 0.075, cacheWrite: 0 },
});
});
it("owns codex transport defaults", () => {

View File

@@ -15,6 +15,7 @@ export const expectedAugmentedOpenaiCodexCatalogEntries = [
{ provider: "openai", id: "gpt-5.4-nano", name: "gpt-5.4-nano" },
{ provider: "openai-codex", id: "gpt-5.4", name: "gpt-5.4" },
{ provider: "openai-codex", id: "gpt-5.4-pro", name: "gpt-5.4-pro" },
{ provider: "openai-codex", id: "gpt-5.4-mini", name: "gpt-5.4-mini" },
];
export const expectedAugmentedOpenaiCodexCatalogEntriesWithGpt55 = [
@@ -25,9 +26,7 @@ export const expectedAugmentedOpenaiCodexCatalogEntriesWithGpt55 = [
];
export const expectedOpenaiPluginCodexCatalogEntriesWithGpt55 =
expectedAugmentedOpenaiCodexCatalogEntriesWithGpt55.filter(
(entry) => !(entry.provider === "openai-codex" && entry.id === "gpt-5.4-mini"),
);
expectedAugmentedOpenaiCodexCatalogEntriesWithGpt55;
export function expectCodexMissingAuthHint(
buildProviderMissingAuthMessageWithPlugin: (params: {

View File

@@ -148,6 +148,7 @@ function createOpenAiCatalogProviderPlugin(
{ provider: "openai", id: "gpt-5.4-nano", name: "gpt-5.4-nano" },
{ provider: "openai-codex", id: "gpt-5.4", name: "gpt-5.4" },
{ provider: "openai-codex", id: "gpt-5.4-pro", name: "gpt-5.4-pro" },
{ provider: "openai-codex", id: "gpt-5.4-mini", name: "gpt-5.4-mini" },
],
...overrides,
};