fix: normalize minimal ollama provider config (#69370) (thanks @PratikRai0101)

This commit is contained in:
Peter Steinberger
2026-04-21 02:36:45 +01:00
parent 8edf705238
commit 76d72d48f3
4 changed files with 70 additions and 48 deletions

View File

@@ -18,6 +18,7 @@ Docs: https://docs.openclaw.ai
- OpenAI Codex: route ChatGPT/Codex OAuth Responses requests through the `/backend-api/codex` endpoint so `openai-codex/gpt-5.4` no longer hits the removed `/backend-api/responses` alias. (#69336) Thanks @mzogithub.
- Gateway/pairing: treat loopback shared-secret node-host, TUI, and gateway clients as local for pairing decisions, so trusted local tools no longer reconnect as remote clients and fail with `pairing required`. (#69431) Thanks @SARAMALI15792.
- Active Memory: degrade gracefully when memory recall fails during prompt building, logging a warning and letting the reply continue without memory context instead of failing the whole turn. (#69485) Thanks @Magicray1217.
- Ollama: add provider-policy defaults for `baseUrl` and `models` so implicit local discovery can run before config validation rejects a minimal Ollama provider config. (#69370) Thanks @PratikRai0101.
- Telegram/status reactions: honor `messages.removeAckAfterReply` when lifecycle status reactions are enabled, clearing or restoring the reaction after success/error using the configured hold timings. (#68067) Thanks @poiskgit.
- Web search/plugins: resolve plugin-scoped SecretRef API keys for bundled Exa, Firecrawl, Gemini, Kimi, Perplexity, Tavily, and Grok web-search providers when they are selected through the shared web-search config. (#68424) Thanks @afurm.
- Telegram/polling: raise the default polling watchdog threshold from 90s to 120s and add configurable `channels.telegram.pollingStallThresholdMs` (also per-account) so long-running Telegram work gets more room before polling is treated as stalled. (#57737) Thanks @Vitalcheffe.

View File

@@ -1,40 +0,0 @@
import { OLLAMA_DEFAULT_BASE_URL } from "./src/defaults.js";
/**
* Provider policy surface for Ollama: normalize provider configs used by
* core defaults/normalizers. This runs during config defaults application and
* normalization paths (not Zod validation). It ensures the Ollama provider
* config uses the native Ollama default base URL when baseUrl is omitted.
*
* Keep this intentionally small: do not change types or try to sidestep core
* schema validation. This helper makes runtime normalization and defaults
* consistent for Ollama-only paths.
*/
export function normalizeConfig({ provider, providerConfig }) {
if (!providerConfig || typeof providerConfig !== "object") {
return providerConfig;
}
// Only normalize the Ollama provider; be tolerant of provider aliasing/case.
const normalizedProviderId = String(provider ?? "")
.trim()
.toLowerCase();
if (normalizedProviderId !== "ollama") {
return providerConfig;
}
const next = { ...providerConfig };
// If baseUrl is missing/empty, default to local Ollama host. Do not override
// a deliberately-set empty string or non-string value beyond normalization.
if (typeof next.baseUrl !== "string" || !next.baseUrl.trim()) {
next.baseUrl = OLLAMA_DEFAULT_BASE_URL;
}
// If models is missing/not an array, default to empty array to signal
// that discovery should run to populate models.
if (!Array.isArray(next.models)) {
next.models = [];
}
return next;
}

View File

@@ -0,0 +1,61 @@
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-model-types";
import { describe, expect, it } from "vitest";
import { normalizeConfig } from "./provider-policy-api.js";
import { OLLAMA_DEFAULT_BASE_URL } from "./src/defaults.js";
function createModel(id: string, name: string): ModelDefinitionConfig {
return {
id,
name,
reasoning: false,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128_000,
maxTokens: 8_192,
};
}
describe("ollama provider policy public artifact", () => {
it("injects defaults so implicit discovery can run before validation", () => {
expect(
normalizeConfig({
provider: "ollama",
providerConfig: {},
}),
).toMatchObject({
baseUrl: OLLAMA_DEFAULT_BASE_URL,
models: [],
});
});
it("preserves explicit Ollama config values", () => {
const models = [createModel("llama3.2", "Llama 3.2")];
expect(
normalizeConfig({
provider: "ollama",
providerConfig: {
baseUrl: "http://ollama.internal:11434",
models,
},
}),
).toMatchObject({
baseUrl: "http://ollama.internal:11434",
models,
});
});
it("ignores other providers", () => {
expect(
normalizeConfig({
provider: "openai",
providerConfig: {},
}),
).toEqual({});
});
});

View File

@@ -1,5 +1,7 @@
// Inlined to avoid custom source-loader resolution issues
const OLLAMA_DEFAULT_BASE_URL = "http://127.0.0.1:11434";
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-model-types";
import { OLLAMA_DEFAULT_BASE_URL } from "./src/defaults.js";
type OllamaProviderConfigDraft = Partial<ModelProviderConfig>;
/**
* Provider policy surface for Ollama: normalize provider configs used by
@@ -11,27 +13,25 @@ export function normalizeConfig({
providerConfig,
}: {
provider: string;
providerConfig: unknown;
}): unknown {
providerConfig: OllamaProviderConfigDraft;
}): OllamaProviderConfigDraft {
if (!providerConfig || typeof providerConfig !== "object") {
return providerConfig;
}
// provider is already a string, no need for String() cast
const normalizedProviderId = (provider ?? "").trim().toLowerCase();
if (normalizedProviderId !== "ollama") {
return providerConfig;
}
// Safely cast to Record to allow mutations without 'any'
const next: Record<string, unknown> = { ...(providerConfig as Record<string, unknown>) };
const next: OllamaProviderConfigDraft = { ...providerConfig };
// If baseUrl is missing, empty, or whitespace-only, default to local Ollama host.
if (typeof next.baseUrl !== "string" || !next.baseUrl.trim()) {
next.baseUrl = OLLAMA_DEFAULT_BASE_URL;
}
// If models is missing/not an array, default to empty array to signal discovery
// If models is missing/not an array, default to empty array to signal discovery.
if (!Array.isArray(next.models)) {
next.models = [];
}