fix: stabilize skills prompt ordering (#64198) (thanks @Bartok9)

This commit is contained in:
Peter Steinberger
2026-04-16 17:58:20 +01:00
parent c4488d5ef5
commit b31d243c57
4 changed files with 79 additions and 29 deletions

View File

@@ -49,6 +49,7 @@ Docs: https://docs.openclaw.ai
- Sessions/Codex: skip redundant `delivery-mirror` transcript appends only when the latest assistant message has the same visible text, preventing duplicate visible replies on Codex-backed turns without suppressing repeated answers across turns. (#67185) Thanks @andyylin.
- Auto-reply/prompt-cache: keep volatile inbound chat IDs out of the stable system prompt so task-scoped adapters can reuse prompt caches across runs, while preserving conversation metadata for the user turn and media-only messages. (#65071) Thanks @MonkeyLeeT.
- BlueBubbles/inbound: restore inbound image attachment downloads on Node 22+ by stripping incompatible bundled-undici dispatchers from the non-SSRF fetch path, accept `updated-message` webhooks carrying attachments, use event-type-aware dedup keys so attachment follow-ups are not rejected as duplicates, and retry attachment fetch from the BB API when the initial webhook arrives with an empty array. (#64105, #61861, #65430, #67510) Thanks @omarshahine.
- Agents/skills: sort prompt-facing `available_skills` entries by skill name after merging sources so `skills.load.extraDirs` order no longer changes prompt-cache prefixes. (#64198) Thanks @Bartok9.
## 2026.4.15-beta.1

View File

@@ -2,6 +2,7 @@ import { Type } from "@sinclair/typebox";
import type { ChannelMessageActionContext } from "openclaw/plugin-sdk/channel-contract";
import type { OpenClawConfig } from "openclaw/plugin-sdk/config-runtime";
import { describe, expect, it, vi } from "vitest";
import { withEnv } from "../../../src/test-utils/env.js";
const handleDiscordMessageActionMock = vi.hoisted(() =>
vi.fn(async () => ({ content: [], details: { ok: true } })),
@@ -15,20 +16,22 @@ const { discordMessageActions } = await import("./channel-actions.js");
describe("discordMessageActions", () => {
it("returns no tool actions when no token-sourced Discord accounts are enabled", () => {
const discovery = discordMessageActions.describeMessageTool?.({
cfg: {
channels: {
discord: {
enabled: true,
withEnv({ DISCORD_BOT_TOKEN: undefined }, () => {
const discovery = discordMessageActions.describeMessageTool?.({
cfg: {
channels: {
discord: {
enabled: true,
},
},
},
} as OpenClawConfig,
});
} as OpenClawConfig,
});
expect(discovery).toEqual({
actions: [],
capabilities: [],
schema: null,
expect(discovery).toEqual({
actions: [],
capabilities: [],
schema: null,
});
});
});

View File

@@ -372,23 +372,41 @@ describe("gateway-status command", () => {
it("surfaces unresolved SecretRef auth diagnostics when probe fails", async () => {
const { runtime, runtimeLogs, runtimeErrors } = createRuntimeCapture();
await withEnvAsync({ MISSING_GATEWAY_TOKEN: undefined }, async () => {
mockLocalTokenEnvRefConfig();
probeGateway.mockResolvedValueOnce({
ok: false,
url: "ws://127.0.0.1:18789",
connectLatencyMs: null,
error: "connection refused",
close: null,
health: null,
status: null,
presence: null,
configSnapshot: null,
const defaultReadBestEffortConfig = readBestEffortConfig.getMockImplementation();
const defaultProbeGateway = probeGateway.getMockImplementation();
try {
await withEnvAsync({ MISSING_GATEWAY_TOKEN: undefined }, async () => {
readBestEffortConfig.mockReset();
probeGateway.mockReset();
mockLocalTokenEnvRefConfig();
probeGateway.mockImplementation(async (opts: { url: string }) => {
const { url } = opts;
return {
ok: false,
url,
connectLatencyMs: null,
error: "connection refused",
close: null,
health: null,
status: null,
presence: null,
configSnapshot: null,
};
});
await expect(runGatewayStatus(runtime, { timeout: "1000", json: true })).rejects.toThrow(
"__exit__:1",
);
});
await expect(runGatewayStatus(runtime, { timeout: "1000", json: true })).rejects.toThrow(
"__exit__:1",
);
});
} finally {
readBestEffortConfig.mockReset();
if (defaultReadBestEffortConfig) {
readBestEffortConfig.mockImplementation(defaultReadBestEffortConfig);
}
probeGateway.mockReset();
if (defaultProbeGateway) {
probeGateway.mockImplementation(defaultProbeGateway);
}
}
expect(runtimeErrors).toHaveLength(0);
const unresolvedWarning = findUnresolvedSecretRefWarning(runtimeLogs);

View File

@@ -108,6 +108,21 @@ function loadVitestMusicGenerationFallbackEntries(
});
}
function loadVitestSpeechFallbackEntries(
pluginIds: readonly string[],
): SpeechProviderContractEntry[] {
return loadVitestCapabilityContractEntries({
contract: "speechProviders",
pluginSdkResolution: "src",
pluginIds,
pickEntries: (registry) =>
registry.speechProviders.map((entry) => ({
pluginId: entry.pluginId,
provider: entry.provider,
})),
});
}
function hasExplicitVideoGenerationModes(provider: VideoGenerationProviderPlugin): boolean {
return Boolean(
provider.capabilities.generate &&
@@ -156,7 +171,7 @@ function loadVitestCapabilityContractEntries<T>(params: {
}
export function loadVitestSpeechProviderContractRegistry(): SpeechProviderContractEntry[] {
return loadVitestCapabilityContractEntries({
const entries = loadVitestCapabilityContractEntries({
contract: "speechProviders",
pickEntries: (registry) =>
registry.speechProviders.map((entry) => ({
@@ -164,6 +179,19 @@ export function loadVitestSpeechProviderContractRegistry(): SpeechProviderContra
provider: entry.provider,
})),
});
const coveredPluginIds = new Set(entries.map((entry) => entry.pluginId));
const missingPluginIds = VITEST_CONTRACT_PLUGIN_IDS.speechProviders.filter(
(pluginId) => !coveredPluginIds.has(pluginId),
);
if (missingPluginIds.length === 0) {
return entries;
}
const replacementEntries = loadVitestSpeechFallbackEntries(missingPluginIds);
const replacedPluginIds = new Set(replacementEntries.map((entry) => entry.pluginId));
return [
...entries.filter((entry) => !replacedPluginIds.has(entry.pluginId)),
...replacementEntries,
];
}
export function loadVitestMediaUnderstandingProviderContractRegistry(): MediaUnderstandingProviderContractEntry[] {