fix(cli): reject empty model run prompts

This commit is contained in:
Peter Steinberger
2026-04-28 06:50:39 +01:00
parent ee75a8ec2c
commit 76a07b9a07
4 changed files with 46 additions and 1 deletions

View File

@@ -12,6 +12,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Cron/Telegram: preserve explicit `:topic:` delivery targets over stale session-derived thread IDs when isolated cron announces to Telegram forum topics. Carries forward #59069; refs #49704 and #43808. Thanks @roytong9.
- CLI/model probes: reject empty or whitespace-only `infer model run --prompt` values before calling local providers or the Gateway, so smoke checks do not spend provider calls on invalid turns. Fixes #73185. Thanks @iot2edge.
- Gateway/media: route text-only `chat.send` image offloads through media-understanding fields so `agents.defaults.imageModel` can describe WebChat attachments instead of leaving only an opaque `media://inbound` marker. Fixes #72968. Thanks @vorajeeah.
- CLI/onboarding: infer image input for common custom-provider vision model IDs, ask only for unknown models, and keep `--custom-image-input`/`--custom-text-input` overrides so vision-capable proxies do not get saved as text-only configs. Fixes #51869. Thanks @Antsoldier1974.
- Models/OpenAI Codex: stop listing or resolving unsupported `openai-codex/gpt-5.4-mini` rows through Codex OAuth, keep stale discovery rows suppressed with a clear API-key-route hint, and leave direct `openai/gpt-5.4-mini` available. Fixes #73242. Thanks @0xCyda.

View File

@@ -159,6 +159,7 @@ openclaw infer model run --local --model openai/gpt-4.1 --prompt "Reply with exa
Notes:
- Local `model run` is the narrowest CLI smoke for provider/model/auth health because it sends only the supplied prompt to the selected model.
- `model run --prompt` must contain non-whitespace text; empty prompts are rejected before local providers or the Gateway are called.
- Local `model run` exits non-zero when the provider returns no text output, so unreachable local providers and empty completions do not look like successful probes.
- Use `model run --gateway` when you need to test Gateway routing, agent-runtime setup, or Gateway-managed provider state instead of the lean local completion path.
- `model auth login`, `model auth logout`, and `model auth status` manage saved provider auth state.

View File

@@ -437,6 +437,26 @@ describe("capability cli", () => {
expect(mocks.runtime.writeJson).not.toHaveBeenCalled();
});
it.each(["", " ", "\n\t"])(
"rejects empty model run prompts before local dispatch (%j)",
async (prompt) => {
await expect(
runRegisteredCli({
register: registerCapabilityCli as (program: Command) => void,
argv: ["capability", "model", "run", "--prompt", prompt, "--json"],
}),
).rejects.toThrow("exit 1");
expect(mocks.runtime.error).toHaveBeenCalledWith(
expect.stringContaining("--prompt cannot be empty or whitespace-only."),
);
expect(mocks.prepareSimpleCompletionModelForAgent).not.toHaveBeenCalled();
expect(mocks.completeWithPreparedSimpleCompletionModel).not.toHaveBeenCalled();
expect(mocks.callGateway).not.toHaveBeenCalled();
expect(mocks.runtime.writeJson).not.toHaveBeenCalled();
},
);
it("runs gateway model probes without chat-agent prompt policy or tools", async () => {
await runRegisteredCli({
register: registerCapabilityCli as (program: Command) => void,
@@ -455,6 +475,21 @@ describe("capability cli", () => {
);
});
it("rejects empty model run prompts before gateway dispatch", async () => {
await expect(
runRegisteredCli({
register: registerCapabilityCli as (program: Command) => void,
argv: ["capability", "model", "run", "--prompt", " ", "--gateway", "--json"],
}),
).rejects.toThrow("exit 1");
expect(mocks.runtime.error).toHaveBeenCalledWith(
expect.stringContaining("--prompt cannot be empty or whitespace-only."),
);
expect(mocks.callGateway).not.toHaveBeenCalled();
expect(mocks.runtime.writeJson).not.toHaveBeenCalled();
});
it("defaults tts status to gateway transport", async () => {
await runRegisteredCli({
register: registerCapabilityCli as (program: Command) => void,

View File

@@ -577,6 +577,13 @@ function collectModelRunText(content: Array<{ type: string; text?: string }>): s
.trim();
}
function requireModelRunPrompt(value: unknown): string {
if (typeof value !== "string" || normalizeOptionalString(value) === undefined) {
throw new Error("--prompt cannot be empty or whitespace-only.");
}
return value;
}
async function runModelRun(params: {
prompt: string;
model?: string;
@@ -1487,6 +1494,7 @@ export function registerCapabilityCli(program: Command) {
.option("--json", "Output JSON", false)
.action(async (opts) => {
await runCommandWithRuntime(defaultRuntime, async () => {
const prompt = requireModelRunPrompt(opts.prompt);
const transport = resolveTransport({
local: Boolean(opts.local),
gateway: Boolean(opts.gateway),
@@ -1494,7 +1502,7 @@ export function registerCapabilityCli(program: Command) {
defaultTransport: "local",
});
const result = await runModelRun({
prompt: String(opts.prompt),
prompt,
model: opts.model as string | undefined,
transport,
});