mirror of
https://fastgit.cc/github.com/openclaw/openclaw
synced 2026-04-30 22:12:32 +08:00
fix: add OpenCode Go DeepSeek V4 models
This commit is contained in:
@@ -38,6 +38,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Control UI/Quick Settings: persist the assistant avatar override to browser local storage (mirroring the user avatar) so uploaded image data URLs no longer fail config validation with "Too big: expected string to have <=200 characters". Also lift the gateway-side `ui.assistant.avatar` length cap to match the user avatar size budget for non-UI clients writing the field directly. Thanks @BunsDev.
|
||||
- Browser/CDP: make readiness diagnostics use the same discovery-first fallback as reachability for bare `ws://` Browserless and Browserbase CDP URLs. Fixes #69532.
|
||||
- ACP/OpenCode: update the bundled acpx runtime to 0.6.0 and cover the OpenCode ACP bind path in Docker live tests.
|
||||
- Providers/OpenCode Go: add DeepSeek V4 Pro and DeepSeek V4 Flash to the Go catalog while the bundled Pi registry catches up. Fixes #71587.
|
||||
- Browser/existing-session: support per-profile Chrome MCP command/args, map `cdpUrl` to `--browserUrl` or `--wsEndpoint`, and avoid combining endpoint flags with `--userDataDir`. Fixes #47879, #48037, and #62706. Thanks @puneet1409, @zhehao, and @madkow1001.
|
||||
- Media/plugins: bound MIME sniffing and ZIP archive preflight before handing
|
||||
untrusted files to `file-type` or `jszip`, reducing parser CPU and memory
|
||||
|
||||
@@ -18,23 +18,26 @@ provider id `opencode-go` so upstream per-model routing stays correct.
|
||||
|
||||
## Built-in catalog
|
||||
|
||||
OpenClaw sources the Go catalog from the bundled pi model registry. Run
|
||||
OpenClaw sources most Go catalog rows from the bundled pi model registry and
|
||||
supplements current upstream rows while the registry catches up. Run
|
||||
`openclaw models list --provider opencode-go` for the current model list.
|
||||
|
||||
As of the bundled pi catalog, the provider includes:
|
||||
The provider includes:
|
||||
|
||||
| Model ref | Name |
|
||||
| -------------------------- | --------------------- |
|
||||
| `opencode-go/glm-5` | GLM-5 |
|
||||
| `opencode-go/glm-5.1` | GLM-5.1 |
|
||||
| `opencode-go/kimi-k2.5` | Kimi K2.5 |
|
||||
| `opencode-go/kimi-k2.6` | Kimi K2.6 (3x limits) |
|
||||
| `opencode-go/mimo-v2-omni` | MiMo V2 Omni |
|
||||
| `opencode-go/mimo-v2-pro` | MiMo V2 Pro |
|
||||
| `opencode-go/minimax-m2.5` | MiniMax M2.5 |
|
||||
| `opencode-go/minimax-m2.7` | MiniMax M2.7 |
|
||||
| `opencode-go/qwen3.5-plus` | Qwen3.5 Plus |
|
||||
| `opencode-go/qwen3.6-plus` | Qwen3.6 Plus |
|
||||
| Model ref | Name |
|
||||
| ------------------------------- | --------------------- |
|
||||
| `opencode-go/glm-5` | GLM-5 |
|
||||
| `opencode-go/glm-5.1` | GLM-5.1 |
|
||||
| `opencode-go/kimi-k2.5` | Kimi K2.5 |
|
||||
| `opencode-go/kimi-k2.6` | Kimi K2.6 (3x limits) |
|
||||
| `opencode-go/deepseek-v4-pro` | DeepSeek V4 Pro |
|
||||
| `opencode-go/deepseek-v4-flash` | DeepSeek V4 Flash |
|
||||
| `opencode-go/mimo-v2-omni` | MiMo V2 Omni |
|
||||
| `opencode-go/mimo-v2-pro` | MiMo V2 Pro |
|
||||
| `opencode-go/minimax-m2.5` | MiniMax M2.5 |
|
||||
| `opencode-go/minimax-m2.7` | MiniMax M2.7 |
|
||||
| `opencode-go/qwen3.5-plus` | Qwen3.5 Plus |
|
||||
| `opencode-go/qwen3.6-plus` | Qwen3.6 Plus |
|
||||
|
||||
## Getting started
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ describe("opencode-go provider plugin", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("leaves OpenCode Go models to Pi's built-in registry", async () => {
|
||||
it("keeps OpenCode Go catalog coverage aligned with upstream", async () => {
|
||||
const provider = await registerSingleProviderPlugin(plugin);
|
||||
expect(provider.catalog).toBeUndefined();
|
||||
|
||||
@@ -62,6 +62,27 @@ describe("opencode-go provider plugin", () => {
|
||||
"qwen3.5-plus",
|
||||
"qwen3.6-plus",
|
||||
]);
|
||||
const supplemental = await provider.augmentModelCatalog?.({
|
||||
entries: [...models.values()].map((model) => ({
|
||||
provider: model.provider,
|
||||
id: model.id,
|
||||
name: model.name,
|
||||
})),
|
||||
} as never);
|
||||
expect(supplemental).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
provider: "opencode-go",
|
||||
id: "deepseek-v4-pro",
|
||||
name: "DeepSeek V4 Pro",
|
||||
}),
|
||||
expect.objectContaining({
|
||||
provider: "opencode-go",
|
||||
id: "deepseek-v4-flash",
|
||||
name: "DeepSeek V4 Flash",
|
||||
}),
|
||||
]),
|
||||
);
|
||||
|
||||
expect(models.get("kimi-k2.6")).toMatchObject({
|
||||
api: "openai-completions",
|
||||
@@ -92,6 +113,19 @@ describe("opencode-go provider plugin", () => {
|
||||
contextWindow: 262_144,
|
||||
maxTokens: 128_000,
|
||||
});
|
||||
expect(
|
||||
provider.resolveDynamicModel?.({
|
||||
modelId: "deepseek-v4-pro",
|
||||
} as never),
|
||||
).toMatchObject({
|
||||
id: "deepseek-v4-pro",
|
||||
api: "anthropic-messages",
|
||||
provider: "opencode-go",
|
||||
baseUrl: "https://opencode.ai/zen/go",
|
||||
reasoning: true,
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 384_000,
|
||||
});
|
||||
});
|
||||
|
||||
it("canonicalizes stale OpenCode Go base URLs", async () => {
|
||||
|
||||
@@ -3,7 +3,11 @@ import { definePluginEntry } from "openclaw/plugin-sdk/plugin-entry";
|
||||
import { PASSTHROUGH_GEMINI_REPLAY_HOOKS } from "openclaw/plugin-sdk/provider-model-shared";
|
||||
import { applyOpencodeGoConfig, OPENCODE_GO_DEFAULT_MODEL_REF } from "./api.js";
|
||||
import { opencodeGoMediaUnderstandingProvider } from "./media-understanding-provider.js";
|
||||
import { normalizeOpencodeGoBaseUrl } from "./provider-catalog.js";
|
||||
import {
|
||||
listOpencodeGoSupplementalModelCatalogEntries,
|
||||
normalizeOpencodeGoBaseUrl,
|
||||
resolveOpencodeGoSupplementalModel,
|
||||
} from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "opencode-go";
|
||||
export default definePluginEntry({
|
||||
@@ -60,6 +64,8 @@ export default definePluginEntry({
|
||||
}
|
||||
: undefined;
|
||||
},
|
||||
resolveDynamicModel: ({ modelId }) => resolveOpencodeGoSupplementalModel(modelId),
|
||||
augmentModelCatalog: () => listOpencodeGoSupplementalModelCatalogEntries(),
|
||||
...PASSTHROUGH_GEMINI_REPLAY_HOOKS,
|
||||
isModernModelRef: () => true,
|
||||
});
|
||||
|
||||
@@ -1,6 +1,69 @@
|
||||
import type { ModelCatalogEntry } from "openclaw/plugin-sdk/agent-runtime";
|
||||
import type { ProviderRuntimeModel } from "openclaw/plugin-sdk/plugin-entry";
|
||||
import { normalizeModelCompat } from "openclaw/plugin-sdk/provider-model-shared";
|
||||
|
||||
const PROVIDER_ID = "opencode-go";
|
||||
|
||||
export const OPENCODE_GO_OPENAI_BASE_URL = "https://opencode.ai/zen/go/v1";
|
||||
export const OPENCODE_GO_ANTHROPIC_BASE_URL = "https://opencode.ai/zen/go";
|
||||
|
||||
const OPENCODE_GO_SUPPLEMENTAL_MODELS = (
|
||||
[
|
||||
{
|
||||
id: "deepseek-v4-pro",
|
||||
name: "DeepSeek V4 Pro",
|
||||
api: "anthropic-messages",
|
||||
provider: PROVIDER_ID,
|
||||
baseUrl: OPENCODE_GO_ANTHROPIC_BASE_URL,
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 1.74,
|
||||
output: 3.48,
|
||||
cacheRead: 0.145,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 384_000,
|
||||
},
|
||||
{
|
||||
id: "deepseek-v4-flash",
|
||||
name: "DeepSeek V4 Flash",
|
||||
api: "anthropic-messages",
|
||||
provider: PROVIDER_ID,
|
||||
baseUrl: OPENCODE_GO_ANTHROPIC_BASE_URL,
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
input: 0.14,
|
||||
output: 0.28,
|
||||
cacheRead: 0.028,
|
||||
cacheWrite: 0,
|
||||
},
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 384_000,
|
||||
},
|
||||
] satisfies ProviderRuntimeModel[]
|
||||
).map((model) => normalizeModelCompat(model));
|
||||
|
||||
export function listOpencodeGoSupplementalModelCatalogEntries(): ModelCatalogEntry[] {
|
||||
return OPENCODE_GO_SUPPLEMENTAL_MODELS.map((model) => ({
|
||||
provider: model.provider,
|
||||
id: model.id,
|
||||
name: model.name,
|
||||
reasoning: model.reasoning,
|
||||
input: model.input,
|
||||
contextWindow: model.contextWindow,
|
||||
}));
|
||||
}
|
||||
|
||||
export function resolveOpencodeGoSupplementalModel(
|
||||
modelId: string,
|
||||
): ProviderRuntimeModel | undefined {
|
||||
const normalizedModelId = modelId.trim().toLowerCase();
|
||||
return OPENCODE_GO_SUPPLEMENTAL_MODELS.find((model) => model.id === normalizedModelId);
|
||||
}
|
||||
|
||||
function normalizeBaseUrl(baseUrl: string | undefined): string {
|
||||
return (baseUrl ?? "").trim().replace(/\/+$/, "");
|
||||
}
|
||||
|
||||
@@ -528,6 +528,51 @@ describe("modelsListCommand forward-compat", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("includes provider-owned supplemental catalog rows with provider filters", async () => {
|
||||
mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] });
|
||||
mocks.loadModelRegistry.mockResolvedValueOnce({
|
||||
models: [],
|
||||
availableKeys: new Set(["opencode-go/deepseek-v4-pro"]),
|
||||
registry: {
|
||||
getAll: () => [],
|
||||
},
|
||||
});
|
||||
mocks.loadModelCatalog.mockResolvedValueOnce([
|
||||
{
|
||||
provider: "opencode-go",
|
||||
id: "deepseek-v4-pro",
|
||||
name: "DeepSeek V4 Pro",
|
||||
input: ["text"],
|
||||
contextWindow: 1_000_000,
|
||||
},
|
||||
]);
|
||||
mocks.resolveModelWithRegistry.mockImplementation(
|
||||
({ provider, modelId }: { provider: string; modelId: string }) =>
|
||||
provider === "opencode-go" && modelId === "deepseek-v4-pro"
|
||||
? {
|
||||
provider,
|
||||
id: modelId,
|
||||
name: "DeepSeek V4 Pro",
|
||||
api: "anthropic-messages",
|
||||
baseUrl: "https://opencode.ai/zen/go",
|
||||
input: ["text"],
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 384_000,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
}
|
||||
: undefined,
|
||||
);
|
||||
const runtime = createRuntime();
|
||||
|
||||
await modelsListCommand({ all: true, provider: "opencode-go", json: true }, runtime as never);
|
||||
|
||||
expect(lastPrintedRows<{ key: string }>()).toEqual([
|
||||
expect.objectContaining({
|
||||
key: "opencode-go/deepseek-v4-pro",
|
||||
}),
|
||||
]);
|
||||
});
|
||||
|
||||
it("includes synthetic codex gpt-5.4 in --all output when catalog supports it", async () => {
|
||||
mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] });
|
||||
mocks.loadModelRegistry.mockResolvedValueOnce({
|
||||
|
||||
@@ -77,7 +77,7 @@ export async function appendAllModelRowSources(
|
||||
seenKeys,
|
||||
});
|
||||
|
||||
if (params.modelRegistry && !params.context.filter.provider) {
|
||||
if (params.modelRegistry) {
|
||||
await appendCatalogSupplementRows({
|
||||
rows: params.rows,
|
||||
modelRegistry: params.modelRegistry,
|
||||
|
||||
Reference in New Issue
Block a user