mirror of
https://fastgit.cc/github.com/openclaw/openclaw
synced 2026-04-30 22:12:32 +08:00
chore: bump version to 2026.4.24
This commit is contained in:
@@ -65,8 +65,8 @@ android {
|
||||
applicationId = "ai.openclaw.app"
|
||||
minSdk = 31
|
||||
targetSdk = 36
|
||||
versionCode = 2026042300
|
||||
versionName = "2026.4.23"
|
||||
versionCode = 2026042400
|
||||
versionName = "2026.4.24"
|
||||
ndk {
|
||||
// Support all major ABIs — native libs are tiny (~47 KB per ABI)
|
||||
abiFilters += listOf("armeabi-v7a", "arm64-v8a", "x86", "x86_64")
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
# OpenClaw iOS Changelog
|
||||
|
||||
## 2026.4.24 - 2026-04-24
|
||||
|
||||
Maintenance update for the current OpenClaw development release.
|
||||
|
||||
## 2026.4.23 - 2026-04-23
|
||||
|
||||
Maintenance update for the current OpenClaw development release.
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
// Source of truth: apps/ios/version.json
|
||||
// Generated by scripts/ios-sync-versioning.ts.
|
||||
|
||||
OPENCLAW_IOS_VERSION = 2026.4.23
|
||||
OPENCLAW_MARKETING_VERSION = 2026.4.23
|
||||
OPENCLAW_IOS_VERSION = 2026.4.24
|
||||
OPENCLAW_MARKETING_VERSION = 2026.4.24
|
||||
OPENCLAW_BUILD_VERSION = 1
|
||||
|
||||
#include? "../build/Version.xcconfig"
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
{
|
||||
"version": "2026.4.23"
|
||||
"version": "2026.4.24"
|
||||
}
|
||||
|
||||
@@ -15,9 +15,9 @@
|
||||
<key>CFBundlePackageType</key>
|
||||
<string>APPL</string>
|
||||
<key>CFBundleShortVersionString</key>
|
||||
<string>2026.4.23</string>
|
||||
<string>2026.4.24</string>
|
||||
<key>CFBundleVersion</key>
|
||||
<string>2026042300</string>
|
||||
<string>2026042400</string>
|
||||
<key>CFBundleIconFile</key>
|
||||
<string>OpenClaw</string>
|
||||
<key>CFBundleURLTypes</key>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
6b142e6a8aa513ccd8f9cfbf7e95fa4919fb6fca7aeaa841f57ad9e39e8901a9 config-baseline.json
|
||||
a4e167f169db58d71c385a31fa2b980772f9fee963e70dd9553f63536cae5aed config-baseline.core.json
|
||||
d3b5638e205a94e40d07aa1830c8d57135df18ff9388fb7d72ee84c791ac293f config-baseline.json
|
||||
bf00f7910d8f0d8e12592e8a1c6bd0397f8e62fef2c11eb0cbd3b3a3e2a78ffe config-baseline.core.json
|
||||
22d7cd6d8279146b2d79c9531a55b80b52a2c99c81338c508104729154fdd02d config-baseline.channel.json
|
||||
a91304e3566ecc8906f199b88a2e38eaee86130aad799bf4d62921e2f0ddc1b5 config-baseline.plugin.json
|
||||
|
||||
@@ -16,7 +16,7 @@ Feishu/Lark is an all-in-one collaboration platform where teams chat, share docu
|
||||
|
||||
## Quick start
|
||||
|
||||
> **Requires OpenClaw 2026.4.23 or above.** Run `openclaw --version` to check. Upgrade with `openclaw update`.
|
||||
> **Requires OpenClaw 2026.4.24 or above.** Run `openclaw --version` to check. Upgrade with `openclaw update`.
|
||||
|
||||
<Steps>
|
||||
<Step title="Run the channel setup wizard">
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"openclaw": "workspace:*"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"openclaw": ">=2026.4.23"
|
||||
"openclaw": ">=2026.4.24"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"openclaw": {
|
||||
@@ -40,10 +40,10 @@
|
||||
"install": {
|
||||
"npmSpec": "@openclaw/feishu",
|
||||
"defaultChoice": "npm",
|
||||
"minHostVersion": ">=2026.4.23"
|
||||
"minHostVersion": ">=2026.4.24"
|
||||
},
|
||||
"compat": {
|
||||
"pluginApi": ">=2026.4.23"
|
||||
"pluginApi": ">=2026.4.24"
|
||||
},
|
||||
"build": {
|
||||
"openclawVersion": "2026.4.20"
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
"openclaw": "workspace:*"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"openclaw": ">=2026.4.23"
|
||||
"openclaw": ">=2026.4.24"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"openclaw": {
|
||||
@@ -53,10 +53,10 @@
|
||||
"install": {
|
||||
"npmSpec": "@openclaw/whatsapp",
|
||||
"defaultChoice": "npm",
|
||||
"minHostVersion": ">=2026.4.23"
|
||||
"minHostVersion": ">=2026.4.24"
|
||||
},
|
||||
"compat": {
|
||||
"pluginApi": ">=2026.4.23"
|
||||
"pluginApi": ">=2026.4.24"
|
||||
},
|
||||
"bundle": {
|
||||
"stageRuntimeDependencies": true
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "openclaw",
|
||||
"version": "2026.4.23",
|
||||
"version": "2026.4.24",
|
||||
"description": "Multi-channel AI gateway with extensible messaging integrations",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/openclaw/openclaw#readme",
|
||||
|
||||
@@ -4176,12 +4176,19 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
|
||||
},
|
||||
contextSize: {
|
||||
anyOf: [
|
||||
{ type: "integer", exclusiveMinimum: 0, maximum: 9007199254740991 },
|
||||
{ type: "string", const: "auto" },
|
||||
{
|
||||
type: "integer",
|
||||
exclusiveMinimum: 0,
|
||||
maximum: 9007199254740991,
|
||||
},
|
||||
{
|
||||
type: "string",
|
||||
const: "auto",
|
||||
},
|
||||
],
|
||||
title: "Local Embedding Context Size",
|
||||
description:
|
||||
'Context window size passed to node-llama-cpp when creating the embedding context (default: 4096). 4096 safely covers typical memory-search chunks (128\u2013512 tokens) while keeping non-weight VRAM bounded. Lower to 1024\u20132048 on resource-constrained hosts. Set to "auto" to let node-llama-cpp use the model\'s trained maximum \u2014 not recommended for large models (e.g. Qwen3-Embedding-8B trained on 40\u202f960 tokens can push VRAM from ~8.8\u202fGB to ~32\u202fGB).',
|
||||
'Context window size passed to node-llama-cpp when creating the embedding context (default: 4096). 4096 safely covers typical memory-search chunks (128–512 tokens) while keeping non-weight VRAM bounded. Lower to 1024–2048 on resource-constrained hosts. Set to "auto" to let node-llama-cpp use the model\'s trained maximum — not recommended for large models (e.g. Qwen3-Embedding-8B trained on 40 960 tokens can push VRAM from ~8.8 GB to ~32 GB).',
|
||||
},
|
||||
},
|
||||
additionalProperties: false,
|
||||
@@ -6071,8 +6078,15 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
|
||||
},
|
||||
contextSize: {
|
||||
anyOf: [
|
||||
{ type: "integer", exclusiveMinimum: 0, maximum: 9007199254740991 },
|
||||
{ type: "string", const: "auto" },
|
||||
{
|
||||
type: "integer",
|
||||
exclusiveMinimum: 0,
|
||||
maximum: 9007199254740991,
|
||||
},
|
||||
{
|
||||
type: "string",
|
||||
const: "auto",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
@@ -25171,7 +25185,7 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
|
||||
},
|
||||
"agents.defaults.memorySearch.local.contextSize": {
|
||||
label: "Local Embedding Context Size",
|
||||
help: 'Context window size passed to node-llama-cpp when creating the embedding context (default: 4096). 4096 safely covers typical memory-search chunks (128\u2013512 tokens) while keeping non-weight VRAM bounded. Lower to 1024\u20132048 on resource-constrained hosts. Set to "auto" to let node-llama-cpp use the model\'s trained maximum \u2014 not recommended for large models (e.g. Qwen3-Embedding-8B trained on 40\u202f960 tokens can push VRAM from ~8.8\u202fGB to ~32\u202fGB).',
|
||||
help: 'Context window size passed to node-llama-cpp when creating the embedding context (default: 4096). 4096 safely covers typical memory-search chunks (128–512 tokens) while keeping non-weight VRAM bounded. Lower to 1024–2048 on resource-constrained hosts. Set to "auto" to let node-llama-cpp use the model\'s trained maximum — not recommended for large models (e.g. Qwen3-Embedding-8B trained on 40 960 tokens can push VRAM from ~8.8 GB to ~32 GB).',
|
||||
tags: ["advanced"],
|
||||
},
|
||||
"agents.defaults.memorySearch.store.path": {
|
||||
@@ -27755,6 +27769,6 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
|
||||
tags: ["advanced", "url-secret"],
|
||||
},
|
||||
},
|
||||
version: "2026.4.23",
|
||||
version: "2026.4.24",
|
||||
generatedAt: "2026-03-22T21:17:33.302Z",
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user