Compare commits

..

162 Commits

Author SHA1 Message Date
opencode-agent[bot]
7ed6a69300 Apply PR #25034: feat: default HTTP API backend to on for dev/beta channels 2026-05-02 05:20:28 +00:00
opencode-agent[bot]
96e046005a Apply PR #24512: Refactor v2 session events as schemas 2026-05-02 05:20:28 +00:00
opencode-agent[bot]
56f6f28492 Apply PR #24229: fix: lazy session error schema 2026-05-02 05:19:28 +00:00
opencode-agent[bot]
f92425722a Apply PR #24174: feat(core): add background subagent support 2026-05-02 05:19:28 +00:00
opencode-agent[bot]
a9b70028a3 Apply PR #24149: feat(core): add scout agent for repo research 2026-05-02 05:19:27 +00:00
opencode-agent[bot]
243497740b Apply PR #22753: core: move plugin intialisation to config layer override 2026-05-02 05:18:00 +00:00
opencode-agent[bot]
1abbeaf70c Apply PR #21537: fix(app): remove pierre diff virtualization 2026-05-02 05:16:32 +00:00
opencode-agent[bot]
098b06b27c Apply PR #20039: feat: bash->shell tool + pwsh/powershell/cmd/bash specific tool definitions so agents work better 2026-05-02 05:15:58 +00:00
opencode-agent[bot]
1c8090e417 Apply PR #19067: ci: only build electron desktop 2026-05-02 05:14:24 +00:00
opencode-agent[bot]
5d5484db6e Apply PR #12633: feat(tui): add auto-accept mode for permission requests 2026-05-02 05:14:23 +00:00
opencode-agent[bot]
c1da6adbbd Apply PR #11710: feat: Add the ability to include cleared prompts in the history, toggled by a KV-persisted command palette item (resolves #11489) 2026-05-02 05:10:02 +00:00
opencode-agent[bot]
becf57ee6a chore: generate 2026-05-02 04:03:59 +00:00
Dax Raad
cf9e798e48 tui: close open dialogs when navigating to a new session to prevent UI state from lingering 2026-05-01 19:56:39 -04:00
Dax Raad
c36703b4fa core: add unique IDs to all events for reliable tracking and debugging
Events now include unique identifiers at the payload level, making it easier to trace event flow through the system and debug issues. Session events have been restructured so IDs are consistent across the event bus, database projections, and API responses.

Sessions now persist which agent and model were used, preserving this context in session history. Agent and model switches are now tracked as dedicated message types in sessions, providing a clearer timeline of how the conversation evolved.
2026-05-01 18:47:55 -04:00
Dax Raad
cc8431e21f core: track agent and model used in each session
Store the active agent and model in the session table so users can see which configuration was used when browsing session history. This helps identify sessions that used specific agents or models for easier filtering and organization.
2026-05-01 18:47:55 -04:00
Dax Raad
cd2535fb59 core: simplify Session.Info schema to empty struct for flexible event handling
This change removes the predefined fields from Session.Info to allow more
dynamic event-driven session data. Instead of fixed schema fields, session
information will be populated through the event system, enabling better
support for evolving session states without schema migrations.

The empty struct serves as a base that can be extended through the event
model, making it easier to add new session attributes without modifying
core schema definitions.
2026-05-01 18:47:55 -04:00
Dax Raad
f81c00647f core: expose complete session metadata schema for agent session introspection 2026-05-01 18:47:55 -04:00
Dax Raad
2b75c10b60 core: add session listing API with filtering and improved pagination
Users can now browse sessions through the new /api/session endpoint with filters for directory, workspace, date range, and title search. Pagination cursors are now labeled 'previous' and 'next' instead of 'before' and 'after' to make navigation direction clearer. Both session lists and message history now support explicit 'asc' or 'desc' ordering so users can choose between newest-first or oldest-first views. The TUI session view now displays messages with the newest at the bottom, matching standard chat interfaces.
2026-05-01 18:47:55 -04:00
Dax Raad
175821b782 core: add pagination support for session messages with cursor-based navigation
Enables loading messages in chunks for better performance with long conversations.
Users can now navigate through large session histories without loading all messages at once.
Includes before/after cursors for bi-directional pagination.
2026-05-01 18:47:55 -04:00
Dax Raad
af96af716f core: simplify message history pagination with unified cursor API
Replace separate before/after query parameters with a single cursor that
carries direction info. Chat clients can now use 'start' or 'end' keywords
to jump to the beginning or newest messages, and navigate history with a
single cursor parameter instead of managing multiple pagination states.
2026-05-01 18:47:55 -04:00
Dax Raad
11ef239dbe refactor(session): define v2 session event schemas 2026-05-01 18:47:55 -04:00
Shoubhit Dash
5015536050 chore: remove reference examples 2026-05-02 00:47:00 +05:30
Shoubhit Dash
347f4c9af2 chore: add reference examples 2026-05-02 00:44:57 +05:30
Shoubhit Dash
96a6256258 feat: add scout references 2026-05-02 00:44:41 +05:30
Shoubhit Dash
a84edc224f fix(tui): show background task progress 2026-05-01 19:05:16 +05:30
Shoubhit Dash
2f919b8bc7 refactor(task): use background jobs 2026-05-01 19:05:06 +05:30
Shoubhit Dash
085fac7c2c feat(background): add job service 2026-05-01 19:04:56 +05:30
Shoubhit Dash
b26fe0d357 Merge branch 'dev' into nxl/background-subagents 2026-05-01 15:29:32 +05:30
Shoubhit Dash
365386fac0 Merge remote-tracking branch 'origin/dev' into nxl/scout-repo-tools
# Conflicts:
#	packages/opencode/src/agent/agent.ts
#	packages/opencode/src/cli/cmd/github.ts
#	packages/opencode/src/config/config.ts
#	packages/opencode/src/config/permission.ts
#	packages/opencode/src/global/index.ts
#	packages/opencode/src/tool/registry.ts
2026-05-01 14:59:01 +05:30
Brendan Allan
7423b4872c rename desktop-darwin to desktop-mac 2026-05-01 11:54:56 +08:00
Brendan Allan
9b85d2cbb4 Merge branch 'dev' into brendan/lazy-init-plugins 2026-05-01 11:48:58 +08:00
Brendan Allan
4a86c2b77a Merge branch 'dev' into brendan/lazy-init-plugins 2026-05-01 11:47:50 +08:00
Kit Langton
1e5cc6da19 feat: default HTTP API backend to on for dev/beta channels
Turn on the experimental effect-httpapi server backend by default for
dev, beta, and local installations so internal users exercise the new
backend. Stable (prod/latest) installs remain on the legacy hono backend.

OPENCODE_EXPERIMENTAL_HTTPAPI=true/1 still force-enables on stable, and
OPENCODE_EXPERIMENTAL_HTTPAPI=false/0 disables it as an escape hatch for
dev/beta users.
2026-04-29 21:35:48 -04:00
Brendan Allan
92d44ce72e create git token after downloading artifacts 2026-04-29 14:25:36 +08:00
LukeParkerDev
529a6ed10f . 2026-04-29 13:00:39 +10:00
Brendan Allan
a3cb00a1ab try improve 2026-04-29 10:17:54 +08:00
LukeParkerDev
20c3461a80 f 2026-04-29 10:10:58 +10:00
LukeParkerDev
f8687190f2 . 2026-04-29 09:47:58 +10:00
LukeParkerDev
d5ebfad838 . 2026-04-29 09:34:28 +10:00
LukeParkerDev
c16a0e08ae Merge remote-tracking branch 'upstream/dev' into refactor-shells 2026-04-29 09:25:48 +10:00
LukeParkerDev
70ca5727a6 noooooooooo brekaing changes 2026-04-29 09:06:05 +10:00
LukeParkerDev
9d9830b7df no breaking changes 2026-04-29 08:51:06 +10:00
Brendan Allan
6a7b415894 read signature from sig file 2026-04-28 19:29:39 +08:00
Brendan Allan
682c4eecd6 don't do tag lookup istg 2026-04-28 15:41:05 +08:00
Brendan Allan
9fbff1bc7d Merge branch 'dev' into brendan/desktop-electron-only 2026-04-28 14:05:53 +08:00
Brendan Allan
dced9c9baa lookup release by id not name 2026-04-28 14:03:15 +08:00
Brendan Allan
55e7bb08d0 remove build-tauri 2026-04-28 12:31:53 +08:00
Brendan Allan
6620054fe1 fix error 2026-04-28 12:28:09 +08:00
Brendan Allan
db830c636b rename opencode-election-* to opencode-desktop-* 2026-04-28 12:28:09 +08:00
Brendan Allan
04c03fa612 ci: only build electron desktop 2026-04-28 12:28:09 +08:00
LukeParkerDev
6ac33ddc4d test: update experimental api shell assertions 2026-04-27 14:30:41 +10:00
LukeParkerDev
ea277baeb7 css 2026-04-27 14:23:37 +10:00
LukeParkerDev
b1d9c57655 Merge remote-tracking branch 'upstream/dev' into refactor-shells 2026-04-27 14:15:07 +10:00
LukeParkerDev
5a7e69b325 Merge remote-tracking branch 'upstream/dev' into refactor-shells 2026-04-27 08:59:39 +10:00
LukeParkerDev
344dab3839 Update next.test.ts 2026-04-26 09:59:46 +10:00
LukeParkerDev
9dde86acbe Merge remote-tracking branch 'upstream/dev' into refactor-shells 2026-04-26 09:56:21 +10:00
Luke Parker
73ee7ae702 Merge branch 'dev' into refactor-shells 2026-04-25 15:23:49 +10:00
LukeParkerDev
2051cadcb8 Update prompt.ts 2026-04-25 15:18:30 +10:00
LukeParkerDev
790d181d8a slight accuracy 2026-04-25 11:37:32 +10:00
LukeParkerDev
ecac4c4e2a split prompt/definition from logic 2026-04-25 11:32:18 +10:00
LukeParkerDev
f89955a4e3 Merge remote-tracking branch 'upstream/dev' into refactor-shells 2026-04-25 11:18:01 +10:00
LukeParkerDev
428b0c46a7 cmd 2026-04-25 11:15:31 +10:00
LukeParkerDev
341b8e78c9 perms 2026-04-25 11:11:42 +10:00
LukeParkerDev
d704110e52 fix: lazy session error schema 2026-04-25 10:04:49 +10:00
Shoubhit Dash
80aeb78b38 Merge branch 'dev' into nxl/background-subagents 2026-04-24 20:32:04 +05:30
Shoubhit Dash
601fe03a3a refactor(task): simplify effect wrappers 2026-04-24 20:30:53 +05:30
Shoubhit Dash
3f4b9d9ef4 test(task): use branded session id in schema test 2026-04-24 20:21:02 +05:30
Shoubhit Dash
1357bb984f style: fix background task formatting 2026-04-24 20:20:04 +05:30
Shoubhit Dash
ecde8ab363 test(task): update parameter schema snapshot 2026-04-24 20:20:04 +05:30
Shoubhit Dash
7970130720 fix(ui): label background task cards 2026-04-24 20:08:32 +05:30
Shoubhit Dash
971c837ad4 feat(task): add background subagent support 2026-04-24 20:08:24 +05:30
Shoubhit Dash
b633a8b1c8 refactor(scout): fold github remote parsing into repository 2026-04-24 19:03:56 +05:30
Shoubhit Dash
c750df3e86 fix(scout): use effect schema tool params 2026-04-24 18:58:28 +05:30
Shoubhit Dash
3bf0c79396 Merge branch 'dev' into nxl/scout-repo-tools 2026-04-24 16:39:56 +05:30
Shoubhit Dash
35a19df57d fix(scout): widen repo tool schema types 2026-04-24 16:38:37 +05:30
Shoubhit Dash
343e68853c fix(scout): type repo tool definitions 2026-04-24 16:34:45 +05:30
Shoubhit Dash
0db04ef69f docs: add scout agent docs 2026-04-24 16:29:31 +05:30
Shoubhit Dash
1e0246cdc8 feat(scout): add repo research tools 2026-04-24 16:29:19 +05:30
Ariane Emory
09e4e5a184 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-04-23 21:55:13 -04:00
LukeParkerDev
4f8ff6ab53 . 2026-04-24 08:23:18 +10:00
LukeParkerDev
7266b48ca0 Merge remote-tracking branch 'upstream/dev' into refactor-shells 2026-04-24 08:11:09 +10:00
LukeParkerDev
26d77add77 edges 2026-04-24 08:03:16 +10:00
LukeParkerDev
cffb8eb1e3 . 2026-04-24 07:54:08 +10:00
LukeParkerDev
0d500a735f Create todo.spec.ts 2026-04-24 07:44:06 +10:00
LukeParkerDev
6d66973fd5 clean 2026-04-24 07:39:19 +10:00
LukeParkerDev
3e30068907 refactor: make shell the canonical tool internals 2026-04-23 19:46:00 +10:00
LukeParkerDev
b75f831eaa . 2026-04-23 17:34:57 +10:00
LukeParkerDev
f9a633bd0b Merge remote-tracking branch 'upstream/dev' into refactor-shells 2026-04-23 17:31:07 +10:00
Brendan Allan
e041605b40 Merge branch 'dev' into brendan/lazy-init-plugins 2026-04-21 12:33:02 +08:00
LukeParkerDev
f280e7e69c fix: defer MessageV2.Assistant.shape access to break circular dep in compiled binary 2026-04-20 16:08:42 +10:00
Brendan Allan
b265742fd0 Merge branch 'dev' into brendan/lazy-init-plugins 2026-04-19 21:15:45 +08:00
Brendan Allan
b1db69fdf7 fix other commands 2026-04-17 17:03:53 +08:00
Brendan Allan
031766efa0 fix tui 2026-04-17 15:44:01 +08:00
Brendan Allan
dc6d39551c address feedback 2026-04-17 15:44:01 +08:00
Brendan Allan
e287569f82 rename layer 2026-04-17 15:44:01 +08:00
Brendan Allan
14eacb4019 core: move plugin intialisation to config layer override 2026-04-17 15:44:01 +08:00
Ariane Emory
731c1e58f2 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-04-16 20:22:02 -04:00
Ariane Emory
c411d37484 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-04-12 04:22:06 -04:00
Adam
cb29742b57 fix(app): remove pierre diff virtualization 2026-04-08 13:16:45 -05:00
LukeParkerDev
ee0884ad31 fix(shell): preserve legacy bash compatibility
Keep mixed shell/bash permission configs ordered correctly and treat --tools bash as the legacy alias during agent creation.
2026-04-08 15:14:45 +10:00
LukeParkerDev
f1547de528 ok 2026-04-08 14:35:16 +10:00
LukeParkerDev
39088e1a1e Merge remote-tracking branch 'upstream/dev' into refactor-shells
# Conflicts:
#	packages/app/e2e/prompt/prompt-shell.spec.ts
#	packages/opencode/src/tool/bash.ts
#	packages/opencode/src/tool/registry.ts
2026-04-08 13:11:43 +10:00
Ariane Emory
97a94571a4 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-04-03 09:19:12 -04:00
LukeParkerDev
25551172c9 fix(shell): avoid abort hangs and utf8 corruption
Attach shell process listeners before handling already-aborted tool signals so canceled runs always settle, and decode shell output as UTF-8 to preserve multibyte characters across chunk boundaries. Also lazy-load shell-specific parsers and hoist command sets so parsing work stays focused on the active shell.
2026-04-03 16:04:41 +10:00
LukeParkerDev
32ec3666b7 fix(shell): keep shell config consistent
Treat shell access as one logical toggle during agent creation and apply bash compatibility rules before explicit per-shell overrides. This avoids disabling the active Windows shell unexpectedly and keeps pwsh and powershell overrides deterministic.
2026-04-03 15:08:30 +10:00
LukeParkerDev
2eb9ae4d34 refactor(shell): centralize shell tool identity
Move shell tool ID checks behind shared helpers so runtime code and tests stop duplicating bash, pwsh, and powershell branches. This keeps shell-specific behavior aligned across consumers and makes follow-on shell changes less error-prone.
2026-04-03 14:56:40 +10:00
LukeParkerDev
baf476f431 test(shell): handle nullable exit metadata
Make the shell exit assertions typecheck cleanly while keeping the PowerShell regression coverage. Remove the accidentally committed .opencode package-lock so generated state does not ship in the branch.
2026-04-03 14:29:04 +10:00
LukeParkerDev
23e77fd9bc fix(shell): preserve powershell exit codes
Use a multiline PowerShell trailer so native Windows commands keep their actual exit status without masking cmdlet failures, and add focused regression coverage. Remove the accidentally committed .opencode package-lock to keep generated state out of the branch.
2026-04-03 14:27:03 +10:00
LukeParkerDev
6ad6358eb1 fix: render pwsh and powershell tools correctly in UI
This fixes regressions from splitting the shell tools where powershell commands were missing their native exit codes and their correct UI rendering.
2026-04-03 14:01:13 +10:00
LukeParkerDev
95577c75a3 fix(config): preserve bash permission compatibility
Keep legacy tools.bash migration mapped to the single bash permission since the permission layer already expands it to pwsh and powershell. This preserves the backward-compatible config shape while retaining shell compatibility.
2026-04-03 13:43:37 +10:00
LukeParkerDev
f21bf4a62a Merge remote-tracking branch 'upstream/dev' into refactor-shells 2026-04-03 13:34:16 +10:00
LukeParkerDev
676519d79d refactor: apply positive guidance and parameterize shell commands in prompt template 2026-03-30 20:42:42 +10:00
LukeParkerDev
48f9082d0a refactor: use positive tone in shell guidance prompts 2026-03-30 20:24:49 +10:00
LukeParkerDev
51ebba2975 refactor: add shell-specific guidance to each tool prompt 2026-03-30 20:18:50 +10:00
LukeParkerDev
3e26c3ae83 refactor: extract shell tool factory to eliminate duplication 2026-03-30 20:15:58 +10:00
LukeParkerDev
67dfbcbcfd fix: use dynamic imports for tree-sitter and shell-aware metadata tags 2026-03-30 20:12:36 +10:00
LukeParkerDev
048ac63abd refactor: split monolithic bash tool into separate bash/pwsh/powershell tools 2026-03-30 20:08:27 +10:00
Ariane Emory
6652585a7f Merge branch 'dev' into feat/canceled-prompts-in-history 2026-03-24 11:17:40 -04:00
Ariane Emory
532b64c0d5 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-03-24 07:43:03 -04:00
Ariane Emory
eec4c775a7 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-03-23 21:10:21 -04:00
Ariane Emory
01e350449c Merge branch 'dev' into feat/canceled-prompts-in-history 2026-03-20 19:12:18 -04:00
Dax
5792a80a8c Merge branch 'dev' into feat/auto-accept-permissions 2026-03-20 10:46:31 -04:00
Dax Raad
db039db7f5 regen js sdk 2026-03-20 10:21:10 -04:00
Dax Raad
c1a3936b61 Merge remote-tracking branch 'origin/dev' into feat/auto-accept-permissions
# Conflicts:
#	packages/sdk/js/src/v2/gen/types.gen.ts
2026-03-20 10:20:26 -04:00
Ariane Emory
a9d9e4d9c4 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-03-20 03:35:16 -04:00
Ariane Emory
2531b2d3a9 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-03-13 11:47:39 -04:00
Ariane Emory
a718f86e0f Merge branch 'dev' into feat/canceled-prompts-in-history 2026-03-08 19:28:41 -04:00
Ariane Emory
f3efdff861 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-03-08 08:36:02 -04:00
Ariane Emory
955d8591df Merge branch 'dev' into feat/canceled-prompts-in-history 2026-03-05 18:24:19 -05:00
Ariane Emory
33b3388bf4 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-26 17:50:11 -05:00
Ariane Emory
716f40b128 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-26 01:36:39 -05:00
Ariane Emory
0b06ff1407 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-20 21:24:12 -05:00
Ariane Emory
01ff5b5390 Merge branch 'dev' into feat/canceled-prompts-in-history
# Conflicts:
#	packages/opencode/src/cli/cmd/tui/component/prompt/history.tsx
2026-02-20 02:16:02 -05:00
Ariane Emory
3d1b121e70 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-19 19:18:48 -05:00
Ariane Emory
b70629af27 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-18 19:10:26 -05:00
Ariane Emory
b7b016fa28 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-17 00:09:51 -05:00
Ariane Emory
5ba2d7e5f0 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-15 12:27:51 -05:00
Ariane Emory
459b22b83d Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-14 19:21:47 -05:00
Ariane Emory
377812b98a Merge dev into feat/canceled-prompts-in-history 2026-02-14 06:28:48 -05:00
Ariane Emory
5cc0901e38 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-13 09:37:11 -05:00
Ariane Emory
7fb6b589d1 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-12 18:29:23 -05:00
Ariane Emory
3f37b43e7d Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-11 12:46:47 -05:00
Ariane Emory
8805dfc849 fix: deduplicate prompt history entries
Avoid adding duplicate entries to prompt history when the same input
is appended multiple times (e.g., clearing with ctrl+c then restoring
via history navigation and clearing again).
2026-02-10 22:21:39 -05:00
Ariane Emory
ac5a5d8b16 Merge branch 'feat/canceled-prompts-in-history' of github.com:ariane-emory/opencode into feat/canceled-prompts-in-history 2026-02-10 16:37:55 -05:00
Ariane Emory
eaf94ed047 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-10 16:29:05 -05:00
Ariane Emory
b8031c5ae8 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-10 16:10:35 -05:00
Dax Raad
a531f3f36d core: run command build agent now auto-accepts file edits to reduce workflow interruptions while still requiring confirmation for bash commands 2026-02-07 20:00:09 -05:00
Dax Raad
bb3382311d tui: standardize autoedit indicator text styling to match other status labels 2026-02-07 19:57:45 -05:00
Dax Raad
ad545d0cc9 tui: allow auto-accepting only edit permissions instead of all permissions 2026-02-07 19:52:53 -05:00
Dax Raad
ac244b1458 tui: add searchable 'toggle' keywords to command palette and show current state in toggle titles 2026-02-07 17:03:34 -05:00
Dax Raad
f202536b65 tui: show enable/disable state in permission toggle and make it searchable by 'toggle permissions' 2026-02-07 16:57:48 -05:00
Dax Raad
405cc3f610 tui: streamline permission toggle command naming and add keyboard shortcut support
Rename 'Toggle autoaccept permissions' to 'Toggle permissions' for clarity
and move the command to the Agent category for better discoverability.
Add permission_auto_accept_toggle keybind to enable keyboard shortcut
toggling of auto-accept mode for permission requests.
2026-02-07 16:51:55 -05:00
Dax Raad
878c1b8c2d feat(tui): add auto-accept mode for permission requests
Add a toggleable auto-accept mode that automatically accepts all incoming
permission requests with a 'once' reply. This is useful for users who want
to streamline their workflow when they trust the agent's actions.

Changes:
- Add permission_auto_accept keybind (default: shift+tab) to config
- Remove default for agent_cycle_reverse (was shift+tab)
- Add auto-accept logic in sync.tsx to auto-reply when enabled
- Add command bar action to toggle auto-accept mode (copy: "Toggle autoaccept permissions")
- Add visual indicator showing 'auto-accept' when active
- Store auto-accept state in KV for persistence across sessions
2026-02-07 16:44:39 -05:00
Ariane Emory
d5dcadc000 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-07 13:34:42 -05:00
Ariane Emory
0c154e6a2f Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-06 15:59:50 -05:00
Ariane Emory
4f96975148 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-05 18:17:01 -05:00
Ariane Emory
eaba99711b Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-04 19:33:59 -05:00
Ariane Emory
f762125775 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-03 18:36:44 -05:00
Ariane Emory
ded6bb6513 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-02 21:23:28 -05:00
Ariane Emory
39332f5be6 Merge branch 'dev' into feat/canceled-prompts-in-history 2026-02-01 22:33:29 -05:00
Ariane Emory
2c6ff35400 feat: add toggle to control whether cleared prompts are saved to history
Adds a toggle command in the System category that allows users to enable
or disable saving cleared prompts to history. The feature is disabled by
default to preserve existing behavior.

When enabled via the command palette ("Include cleared prompts in history"),
pressing Ctrl+C will save the current prompt to history before clearing it,
allowing users to navigate back with arrow keys.

The setting persists in kv.json.
2026-02-01 21:12:48 -05:00
Ariane Emory
738d6c8899 feat: save prompt to history when cleared with Ctrl+C
When users press Ctrl+C to clear the input field, the current prompt
is now saved to history before clearing. This allows users to navigate
back to cleared prompts using arrow keys, preventing loss of work.

Addresses #11489
2026-02-01 21:01:15 -05:00
139 changed files with 12417 additions and 3222 deletions

View File

@@ -209,182 +209,6 @@ jobs:
packages/opencode/dist/opencode-windows-x64
packages/opencode/dist/opencode-windows-x64-baseline
build-tauri:
needs:
- build-cli
- version
continue-on-error: false
env:
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
AZURE_TRUSTED_SIGNING_ACCOUNT_NAME: ${{ secrets.AZURE_TRUSTED_SIGNING_ACCOUNT_NAME }}
AZURE_TRUSTED_SIGNING_CERTIFICATE_PROFILE: ${{ secrets.AZURE_TRUSTED_SIGNING_CERTIFICATE_PROFILE }}
AZURE_TRUSTED_SIGNING_ENDPOINT: ${{ secrets.AZURE_TRUSTED_SIGNING_ENDPOINT }}
strategy:
fail-fast: false
matrix:
settings:
- host: macos-latest
target: x86_64-apple-darwin
- host: macos-latest
target: aarch64-apple-darwin
# github-hosted: blacksmith lacks ARM64 MSVC cross-compilation toolchain
- host: windows-2025
target: aarch64-pc-windows-msvc
- host: blacksmith-4vcpu-windows-2025
target: x86_64-pc-windows-msvc
- host: blacksmith-4vcpu-ubuntu-2404
target: x86_64-unknown-linux-gnu
- host: blacksmith-8vcpu-ubuntu-2404-arm
target: aarch64-unknown-linux-gnu
runs-on: ${{ matrix.settings.host }}
steps:
- uses: actions/checkout@v3
with:
fetch-tags: true
- uses: apple-actions/import-codesign-certs@v2
if: ${{ runner.os == 'macOS' }}
with:
keychain: build
p12-file-base64: ${{ secrets.APPLE_CERTIFICATE }}
p12-password: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
- name: Verify Certificate
if: ${{ runner.os == 'macOS' }}
run: |
CERT_INFO=$(security find-identity -v -p codesigning build.keychain | grep "Developer ID Application")
CERT_ID=$(echo "$CERT_INFO" | awk -F'"' '{print $2}')
echo "CERT_ID=$CERT_ID" >> $GITHUB_ENV
echo "Certificate imported."
- name: Setup Apple API Key
if: ${{ runner.os == 'macOS' }}
run: |
echo "${{ secrets.APPLE_API_KEY_PATH }}" > $RUNNER_TEMP/apple-api-key.p8
- uses: ./.github/actions/setup-bun
- name: Azure login
if: runner.os == 'Windows'
uses: azure/login@v2
with:
client-id: ${{ env.AZURE_CLIENT_ID }}
tenant-id: ${{ env.AZURE_TENANT_ID }}
subscription-id: ${{ env.AZURE_SUBSCRIPTION_ID }}
- uses: actions/setup-node@v4
with:
node-version: "24"
- name: Cache apt packages
if: contains(matrix.settings.host, 'ubuntu')
uses: actions/cache@v4
with:
path: ~/apt-cache
key: ${{ runner.os }}-${{ matrix.settings.target }}-apt-${{ hashFiles('.github/workflows/publish.yml') }}
restore-keys: |
${{ runner.os }}-${{ matrix.settings.target }}-apt-
- name: install dependencies (ubuntu only)
if: contains(matrix.settings.host, 'ubuntu')
run: |
mkdir -p ~/apt-cache && chmod -R a+rw ~/apt-cache
sudo apt-get update
sudo apt-get install -y --no-install-recommends -o dir::cache::archives="$HOME/apt-cache" libwebkit2gtk-4.1-dev libappindicator3-dev librsvg2-dev patchelf
sudo chmod -R a+rw ~/apt-cache
- name: install Rust stable
uses: dtolnay/rust-toolchain@stable
with:
targets: ${{ matrix.settings.target }}
- uses: Swatinem/rust-cache@v2
with:
workspaces: packages/desktop/src-tauri
shared-key: ${{ matrix.settings.target }}
- name: Prepare
run: |
cd packages/desktop
bun ./scripts/prepare.ts
env:
OPENCODE_VERSION: ${{ needs.version.outputs.version }}
GITHUB_TOKEN: ${{ steps.committer.outputs.token }}
OPENCODE_CLI_ARTIFACT: ${{ (runner.os == 'Windows' && 'opencode-cli-windows') || 'opencode-cli' }}
RUST_TARGET: ${{ matrix.settings.target }}
GH_TOKEN: ${{ github.token }}
GITHUB_RUN_ID: ${{ github.run_id }}
- name: Resolve tauri portable SHA
if: contains(matrix.settings.host, 'ubuntu')
run: echo "TAURI_PORTABLE_SHA=$(git ls-remote https://github.com/tauri-apps/tauri.git refs/heads/feat/truly-portable-appimage | cut -f1)" >> "$GITHUB_ENV"
# Fixes AppImage build issues, can be removed when https://github.com/tauri-apps/tauri/pull/12491 is released
- name: Install tauri-cli from portable appimage branch
uses: taiki-e/cache-cargo-install-action@v3
if: contains(matrix.settings.host, 'ubuntu')
with:
tool: tauri-cli
git: https://github.com/tauri-apps/tauri
# branch: feat/truly-portable-appimage
rev: ${{ env.TAURI_PORTABLE_SHA }}
- name: Show tauri-cli version
if: contains(matrix.settings.host, 'ubuntu')
run: cargo tauri --version
- name: Setup git committer
id: committer
uses: ./.github/actions/setup-git-committer
with:
opencode-app-id: ${{ vars.OPENCODE_APP_ID }}
opencode-app-secret: ${{ secrets.OPENCODE_APP_SECRET }}
- name: Build and upload artifacts
uses: tauri-apps/tauri-action@390cbe447412ced1303d35abe75287949e43437a
timeout-minutes: 60
with:
projectPath: packages/desktop
uploadWorkflowArtifacts: true
tauriScript: ${{ (contains(matrix.settings.host, 'ubuntu') && 'cargo tauri') || '' }}
args: --target ${{ matrix.settings.target }} --config ${{ (github.ref_name == 'beta' && './src-tauri/tauri.beta.conf.json') || './src-tauri/tauri.prod.conf.json' }} --verbose
updaterJsonPreferNsis: true
releaseId: ${{ needs.version.outputs.release }}
tagName: ${{ needs.version.outputs.tag }}
releaseDraft: true
releaseAssetNamePattern: opencode-desktop-[platform]-[arch][ext]
repo: ${{ (github.ref_name == 'beta' && 'opencode-beta') || '' }}
releaseCommitish: ${{ github.sha }}
env:
GITHUB_TOKEN: ${{ steps.committer.outputs.token }}
TAURI_BUNDLER_NEW_APPIMAGE_FORMAT: true
TAURI_SIGNING_PRIVATE_KEY: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY }}
TAURI_SIGNING_PRIVATE_KEY_PASSWORD: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY_PASSWORD }}
APPLE_CERTIFICATE: ${{ secrets.APPLE_CERTIFICATE }}
APPLE_CERTIFICATE_PASSWORD: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }}
APPLE_SIGNING_IDENTITY: ${{ env.CERT_ID }}
APPLE_API_ISSUER: ${{ secrets.APPLE_API_ISSUER }}
APPLE_API_KEY: ${{ secrets.APPLE_API_KEY }}
APPLE_API_KEY_PATH: ${{ runner.temp }}/apple-api-key.p8
- name: Verify signed Windows desktop artifacts
if: runner.os == 'Windows'
shell: pwsh
run: |
$files = @(
"${{ github.workspace }}\packages\desktop\src-tauri\sidecars\opencode-cli-${{ matrix.settings.target }}.exe"
)
$files += Get-ChildItem "${{ github.workspace }}\packages\desktop\src-tauri\target\${{ matrix.settings.target }}\release\bundle\nsis\*.exe" | Select-Object -ExpandProperty FullName
foreach ($file in $files) {
$sig = Get-AuthenticodeSignature $file
if ($sig.Status -ne "Valid") {
throw "Invalid signature for ${file}: $($sig.Status)"
}
}
build-electron:
needs:
- build-cli
@@ -524,6 +348,30 @@ jobs:
env:
OPENCODE_CHANNEL: ${{ (github.ref_name == 'beta' && 'beta') || 'prod' }}
- name: Create and upload macOS .app.tar.gz
if: runner.os == 'macOS' && needs.version.outputs.release
working-directory: packages/desktop-electron/dist
env:
GH_TOKEN: ${{ steps.committer.outputs.token }}
run: |
if [[ "${{ matrix.settings.target }}" == "x86_64-apple-darwin" ]]; then
APP_DIR="mac"
OUT_NAME="opencode-desktop-mac-x64.app.tar.gz"
elif [[ "${{ matrix.settings.target }}" == "aarch64-apple-darwin" ]]; then
APP_DIR="mac-arm64"
OUT_NAME="opencode-desktop-mac-arm64.app.tar.gz"
else
echo "Unknown macOS target: ${{ matrix.settings.target }}"
exit 1
fi
APP_PATH=$(find "$APP_DIR" -maxdepth 1 -name "*.app" -type d | head -1)
if [ -z "$APP_PATH" ]; then
echo "No .app bundle found in $APP_DIR"
exit 1
fi
tar -czf "$OUT_NAME" -C "$(dirname "$APP_PATH")" "$(basename "$APP_PATH")"
gh release upload "v${{ needs.version.outputs.version }}" "$OUT_NAME" --clobber --repo "${{ needs.version.outputs.repo }}"
- name: Verify signed Windows Electron artifacts
if: runner.os == 'Windows'
shell: pwsh
@@ -542,7 +390,7 @@ jobs:
- uses: actions/upload-artifact@v4
with:
name: opencode-electron-${{ matrix.settings.target }}
name: opencode-desktop-${{ matrix.settings.target }}
path: packages/desktop-electron/dist/*
- uses: actions/upload-artifact@v4
@@ -556,7 +404,6 @@ jobs:
- version
- build-cli
- sign-cli-windows
- build-tauri
- build-electron
if: always() && !failure() && !cancelled()
runs-on: blacksmith-4vcpu-ubuntu-2404
@@ -583,13 +430,6 @@ jobs:
node-version: "24"
registry-url: "https://registry.npmjs.org"
- name: Setup git committer
id: committer
uses: ./.github/actions/setup-git-committer
with:
opencode-app-id: ${{ vars.OPENCODE_APP_ID }}
opencode-app-secret: ${{ secrets.OPENCODE_APP_SECRET }}
- uses: actions/download-artifact@v4
with:
name: opencode-cli
@@ -611,6 +451,13 @@ jobs:
pattern: latest-yml-*
path: /tmp/latest-yml
- name: Setup git committer
id: committer
uses: ./.github/actions/setup-git-committer
with:
opencode-app-id: ${{ vars.OPENCODE_APP_ID }}
opencode-app-secret: ${{ secrets.OPENCODE_APP_SECRET }}
- name: Cache apt packages (AUR)
uses: actions/cache@v4
with:
@@ -639,3 +486,5 @@ jobs:
GH_REPO: ${{ needs.version.outputs.repo }}
NPM_CONFIG_PROVENANCE: false
LATEST_YML_DIR: /tmp/latest-yml
TAURI_SIGNING_PRIVATE_KEY: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY }}
TAURI_SIGNING_PRIVATE_KEY_PASSWORD: ${{ secrets.TAURI_SIGNING_PRIVATE_KEY_PASSWORD }}

View File

@@ -1,4 +1,5 @@
import { Config } from "effect"
import { InstallationChannel } from "../installation/version"
function truthy(key: string) {
const value = process.env[key]?.toLowerCase()
@@ -10,6 +11,10 @@ function falsy(key: string) {
return value === "false" || value === "0"
}
// Channels that default to the new effect-httpapi server backend. The legacy
// hono backend remains the default for stable (`prod`/`latest`) installs.
const HTTPAPI_DEFAULT_ON_CHANNELS = new Set(["dev", "beta", "local"])
function number(key: string) {
const value = process.env[key]
if (!value) return undefined
@@ -81,7 +86,14 @@ export const Flag = {
OPENCODE_STRICT_CONFIG_DEPS: truthy("OPENCODE_STRICT_CONFIG_DEPS"),
OPENCODE_WORKSPACE_ID: process.env["OPENCODE_WORKSPACE_ID"],
OPENCODE_EXPERIMENTAL_HTTPAPI: truthy("OPENCODE_EXPERIMENTAL_HTTPAPI"),
// Defaults to true on dev/beta/local channels so internal users exercise the
// new effect-httpapi server backend. Stable (`prod`/`latest`) installs stay
// on the legacy hono backend until the rollout is complete. An explicit env
// var ("true"/"1" or "false"/"0") always wins, providing an opt-in for
// stable users and an escape hatch for dev/beta users.
OPENCODE_EXPERIMENTAL_HTTPAPI:
truthy("OPENCODE_EXPERIMENTAL_HTTPAPI") ||
(!falsy("OPENCODE_EXPERIMENTAL_HTTPAPI") && HTTPAPI_DEFAULT_ON_CHANNELS.has(InstallationChannel)),
OPENCODE_EXPERIMENTAL_WORKSPACES: OPENCODE_EXPERIMENTAL || truthy("OPENCODE_EXPERIMENTAL_WORKSPACES"),
// Evaluated at access time (not module load) because tests, the CLI, and

View File

@@ -20,6 +20,7 @@ const paths = {
data,
bin: path.join(cache, "bin"),
log: path.join(data, "log"),
repos: path.join(data, "repos"),
cache,
config,
state,
@@ -37,6 +38,7 @@ await Promise.all([
fs.mkdir(Path.tmp, { recursive: true }),
fs.mkdir(Path.log, { recursive: true }),
fs.mkdir(Path.bin, { recursive: true }),
fs.mkdir(Path.repos, { recursive: true }),
])
export class Service extends Context.Service<Service, Interface>()("@opencode/Global") {}
@@ -50,6 +52,7 @@ export interface Interface {
readonly tmp: string
readonly bin: string
readonly log: string
readonly repos: string
}
export function make(input: Partial<Interface> = {}): Interface {
@@ -62,6 +65,7 @@ export function make(input: Partial<Interface> = {}): Interface {
tmp: Path.tmp,
bin: Path.bin,
log: Path.log,
repos: Path.repos,
...input,
}
}

View File

@@ -1,3 +1,5 @@
export * as Log from "./log"
import path from "path"
import fs from "fs/promises"
import { createWriteStream } from "fs"

View File

@@ -27,7 +27,7 @@ const channel = (() => {
})()
const getBase = (): Configuration => ({
artifactName: "opencode-electron-${os}-${arch}.${ext}",
artifactName: "opencode-desktop-${os}-${arch}.${ext}",
directories: {
output: "dist",
buildResources: "resources",

View File

@@ -1,7 +1,8 @@
#!/usr/bin/env bun
import { Buffer } from "node:buffer"
import { $ } from "bun"
import path from "node:path"
import { parseArgs } from "node:util"
const { values } = parseArgs({
args: Bun.argv.slice(2),
@@ -12,8 +13,6 @@ const { values } = parseArgs({
const dryRun = values["dry-run"]
import { parseArgs } from "node:util"
const repo = process.env.GH_REPO
if (!repo) throw new Error("GH_REPO is required")
@@ -23,20 +22,22 @@ if (!releaseId) throw new Error("OPENCODE_RELEASE is required")
const version = process.env.OPENCODE_VERSION
if (!version) throw new Error("OPENCODE_VERSION is required")
const dir = process.env.LATEST_YML_DIR
if (!dir) throw new Error("LATEST_YML_DIR is required")
const root = dir
const token = process.env.GH_TOKEN ?? process.env.GITHUB_TOKEN
if (!token) throw new Error("GH_TOKEN or GITHUB_TOKEN is required")
const apiHeaders = {
Authorization: `token ${token}`,
Accept: "application/vnd.github+json",
}
const releaseRes = await fetch(`https://api.github.com/repos/${repo}/releases/${releaseId}`, {
headers: apiHeaders,
const rel = await fetch(`https://api.github.com/repos/${repo}/releases/${releaseId}`, {
headers: {
Authorization: `token ${token}`,
Accept: "application/vnd.github+json",
},
})
if (!releaseRes.ok) {
throw new Error(`Failed to fetch release: ${releaseRes.status} ${releaseRes.statusText}`)
if (!rel.ok) {
throw new Error(`Failed to fetch release: ${rel.status} ${rel.statusText}`)
}
type Asset = {
@@ -45,115 +46,169 @@ type Asset = {
}
type Release = {
tag_name?: string
assets?: Asset[]
}
const release = (await releaseRes.json()) as Release
const assets = release.assets ?? []
const assetByName = new Map(assets.map((asset) => [asset.name, asset]))
const assets = ((await rel.json()) as Release).assets ?? []
const amap = new Map(assets.map((item) => [item.name, item]))
const latestAsset = assetByName.get("latest.json")
if (!latestAsset) {
console.log("latest.json not found, skipping tauri finalization")
process.exit(0)
type Item = {
url: string
}
const latestRes = await fetch(latestAsset.url, {
headers: {
Authorization: `token ${token}`,
Accept: "application/octet-stream",
},
})
if (!latestRes.ok) {
throw new Error(`Failed to fetch latest.json: ${latestRes.status} ${latestRes.statusText}`)
type Yml = {
version: string
files: Item[]
}
const latestText = new TextDecoder().decode(await latestRes.arrayBuffer())
const latest = JSON.parse(latestText)
const base = { ...latest }
delete base.platforms
function parse(text: string): Yml {
const lines = text.split("\n")
let version = ""
const files: Item[] = []
let url = ""
const fetchSignature = async (asset: Asset) => {
const res = await fetch(asset.url, {
const flush = () => {
if (!url) return
files.push({ url })
url = ""
}
for (const line of lines) {
const trim = line.trim()
if (line.startsWith("version:")) {
version = line.slice("version:".length).trim()
continue
}
if (trim.startsWith("- url:")) {
flush()
url = trim.slice("- url:".length).trim()
continue
}
const indented = line.startsWith(" ") || line.startsWith("\t")
if (!indented) flush()
}
flush()
return { version, files }
}
async function read(sub: string, file: string) {
const item = Bun.file(path.join(root, sub, file))
if (!(await item.exists())) return undefined
return parse(await item.text())
}
function pick(list: Item[], exts: string[]) {
for (const ext of exts) {
const found = list.find((item) => item.url.split("?")[0]?.toLowerCase().endsWith(ext))
if (found) return found.url
}
}
function link(raw: string) {
if (raw.startsWith("https://") || raw.startsWith("http://")) return raw
return `https://github.com/${repo}/releases/download/v${version}/${raw}`
}
async function sign(url: string, key: string) {
const name = decodeURIComponent(new URL(url).pathname.split("/").pop() ?? key)
const asset = amap.get(name)
const res = await fetch(asset?.url ?? url, {
headers: {
Authorization: `token ${token}`,
Accept: "application/octet-stream",
...(asset ? { Accept: "application/octet-stream" } : {}),
},
})
if (!res.ok) {
throw new Error(`Failed to fetch signature: ${res.status} ${res.statusText}`)
throw new Error(`Failed to fetch file ${name}: ${res.status} ${res.statusText} (${asset?.url ?? url})`)
}
return Buffer.from(await res.arrayBuffer()).toString()
const tmp = process.env.RUNNER_TEMP ?? "/tmp"
const file = path.join(tmp, name)
await Bun.write(file, await res.arrayBuffer())
await $`bunx @tauri-apps/cli signer sign ${file}`
const sigFile = Bun.file(`${file}.sig`)
if (!(await sigFile.exists())) throw new Error(`Signature file not found for ${name}`)
return (await sigFile.text()).trim()
}
const entries: Record<string, { url: string; signature: string }> = {}
const add = (key: string, asset: Asset, signature: string) => {
if (entries[key]) return
entries[key] = {
url: `https://github.com/${repo}/releases/download/v${version}/${asset.name}`,
signature,
}
const add = async (data: Record<string, { url: string; signature: string }>, key: string, raw: string | undefined) => {
if (!raw) return
if (data[key]) return
const url = link(raw)
data[key] = { url, signature: await sign(url, key) }
}
const targets = [
{ key: "linux-x86_64-deb", asset: "opencode-desktop-linux-amd64.deb" },
{ key: "linux-x86_64-rpm", asset: "opencode-desktop-linux-x86_64.rpm" },
{ key: "linux-aarch64-deb", asset: "opencode-desktop-linux-arm64.deb" },
{ key: "linux-aarch64-rpm", asset: "opencode-desktop-linux-aarch64.rpm" },
{ key: "windows-aarch64-nsis", asset: "opencode-desktop-windows-arm64.exe" },
{ key: "windows-x86_64-nsis", asset: "opencode-desktop-windows-x64.exe" },
{ key: "darwin-x86_64-app", asset: "opencode-desktop-darwin-x64.app.tar.gz" },
{
key: "darwin-aarch64-app",
asset: "opencode-desktop-darwin-aarch64.app.tar.gz",
},
]
for (const target of targets) {
const asset = assetByName.get(target.asset)
if (!asset) continue
const sig = assetByName.get(`${target.asset}.sig`)
if (!sig) continue
const signature = await fetchSignature(sig)
add(target.key, asset, signature)
const alias = (data: Record<string, { url: string; signature: string }>, key: string, src: string) => {
if (data[key]) return
if (!data[src]) return
data[key] = data[src]
}
const alias = (key: string, source: string) => {
if (entries[key]) return
const entry = entries[source]
if (!entry) return
entries[key] = entry
}
const winx = await read("latest-yml-x86_64-pc-windows-msvc", "latest.yml")
const wina = await read("latest-yml-aarch64-pc-windows-msvc", "latest.yml")
const macx = await read("latest-yml-x86_64-apple-darwin", "latest-mac.yml")
const maca = await read("latest-yml-aarch64-apple-darwin", "latest-mac.yml")
const linx = await read("latest-yml-x86_64-unknown-linux-gnu", "latest-linux.yml")
const lina = await read("latest-yml-aarch64-unknown-linux-gnu", "latest-linux-arm64.yml")
alias("linux-x86_64", "linux-x86_64-deb")
alias("linux-aarch64", "linux-aarch64-deb")
alias("windows-aarch64", "windows-aarch64-nsis")
alias("windows-x86_64", "windows-x86_64-nsis")
alias("darwin-x86_64", "darwin-x86_64-app")
alias("darwin-aarch64", "darwin-aarch64-app")
const yver = winx?.version ?? wina?.version ?? macx?.version ?? maca?.version ?? linx?.version ?? lina?.version
if (yver && yver !== version) throw new Error(`latest.yml version mismatch: expected ${version}, got ${yver}`)
const out: Record<string, { url: string; signature: string }> = {}
const winxexe = pick(winx?.files ?? [], [".exe"])
const winaexe = pick(wina?.files ?? [], [".exe"])
const macxTarGz = "opencode-desktop-mac-x64.app.tar.gz"
const macaTarGz = "opencode-desktop-mac-arm64.app.tar.gz"
const linxDeb = pick(linx?.files ?? [], [".deb"])
const linxRpm = pick(linx?.files ?? [], [".rpm"])
const linxAppImage = pick(linx?.files ?? [], [".appimage"])
const linaDeb = pick(lina?.files ?? [], [".deb"])
const linaRpm = pick(lina?.files ?? [], [".rpm"])
const linaAppImage = pick(lina?.files ?? [], [".appimage"])
await add(out, "windows-x86_64-nsis", winxexe)
await add(out, "windows-aarch64-nsis", winaexe)
await add(out, "darwin-x86_64-app", macxTarGz)
await add(out, "darwin-aarch64-app", macaTarGz)
await add(out, "linux-x86_64-deb", linxDeb)
await add(out, "linux-x86_64-rpm", linxRpm)
await add(out, "linux-x86_64-appimage", linxAppImage)
await add(out, "linux-aarch64-deb", linaDeb)
await add(out, "linux-aarch64-rpm", linaRpm)
await add(out, "linux-aarch64-appimage", linaAppImage)
alias(out, "windows-x86_64", "windows-x86_64-nsis")
alias(out, "windows-aarch64", "windows-aarch64-nsis")
alias(out, "darwin-x86_64", "darwin-x86_64-app")
alias(out, "darwin-aarch64", "darwin-aarch64-app")
alias(out, "linux-x86_64", "linux-x86_64-deb")
alias(out, "linux-aarch64", "linux-aarch64-deb")
const platforms = Object.fromEntries(
Object.keys(entries)
Object.keys(out)
.sort()
.map((key) => [key, entries[key]]),
.map((key) => [key, out[key]]),
)
const output = {
...base,
if (!Object.keys(platforms).length) throw new Error("No updater files found in latest.yml artifacts")
const data = {
version,
notes: "",
pub_date: new Date().toISOString(),
platforms,
}
const dir = process.env.RUNNER_TEMP ?? "/tmp"
const file = `${dir}/latest.json`
await Bun.write(file, JSON.stringify(output, null, 2))
const tmp = process.env.RUNNER_TEMP ?? "/tmp"
const file = path.join(tmp, "latest.json")
await Bun.write(file, JSON.stringify(data, null, 2))
const tag = release.tag_name
if (!tag) throw new Error("Release tag not found")
const tag = `v${version}`
if (dryRun) {
console.log(`dry-run: wrote latest.json for ${tag} to ${file}`)

View File

@@ -0,0 +1,17 @@
CREATE TABLE `session_message` (
`id` text PRIMARY KEY,
`session_id` text NOT NULL,
`type` text NOT NULL,
`time_created` integer NOT NULL,
`time_updated` integer NOT NULL,
`data` text NOT NULL,
CONSTRAINT `fk_session_message_session_id_session_id_fk` FOREIGN KEY (`session_id`) REFERENCES `session`(`id`) ON DELETE CASCADE
);
--> statement-breakpoint
DROP INDEX IF EXISTS `session_entry_session_idx`;--> statement-breakpoint
DROP INDEX IF EXISTS `session_entry_session_type_idx`;--> statement-breakpoint
DROP INDEX IF EXISTS `session_entry_time_created_idx`;--> statement-breakpoint
CREATE INDEX `session_message_session_idx` ON `session_message` (`session_id`);--> statement-breakpoint
CREATE INDEX `session_message_session_type_idx` ON `session_message` (`session_id`,`type`);--> statement-breakpoint
CREATE INDEX `session_message_time_created_idx` ON `session_message` (`time_created`);--> statement-breakpoint
DROP TABLE `session_entry`;

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,9 @@
"version": "7",
"dialect": "sqlite",
"id": "aaa2ebeb-caa4-478d-8365-4fc595d16856",
"prevIds": ["66cbe0d7-def0-451b-b88a-7608513a9b44"],
"prevIds": [
"61f807f9-6398-4067-be05-804acc2561bc"
],
"ddl": [
{
"name": "account_state",
@@ -37,7 +39,7 @@
"entityType": "tables"
},
{
"name": "session_entry",
"name": "session_message",
"entityType": "tables"
},
{
@@ -598,7 +600,7 @@
"generated": null,
"name": "id",
"entityType": "columns",
"table": "session_entry"
"table": "session_message"
},
{
"type": "text",
@@ -608,7 +610,7 @@
"generated": null,
"name": "session_id",
"entityType": "columns",
"table": "session_entry"
"table": "session_message"
},
{
"type": "text",
@@ -618,7 +620,7 @@
"generated": null,
"name": "type",
"entityType": "columns",
"table": "session_entry"
"table": "session_message"
},
{
"type": "integer",
@@ -628,7 +630,7 @@
"generated": null,
"name": "time_created",
"entityType": "columns",
"table": "session_entry"
"table": "session_message"
},
{
"type": "integer",
@@ -638,7 +640,7 @@
"generated": null,
"name": "time_updated",
"entityType": "columns",
"table": "session_entry"
"table": "session_message"
},
{
"type": "text",
@@ -648,7 +650,7 @@
"generated": null,
"name": "data",
"entityType": "columns",
"table": "session_entry"
"table": "session_message"
},
{
"type": "text",
@@ -1051,9 +1053,13 @@
"table": "event"
},
{
"columns": ["active_account_id"],
"columns": [
"active_account_id"
],
"tableTo": "account",
"columnsTo": ["id"],
"columnsTo": [
"id"
],
"onUpdate": "NO ACTION",
"onDelete": "SET NULL",
"nameExplicit": false,
@@ -1062,9 +1068,13 @@
"table": "account_state"
},
{
"columns": ["project_id"],
"columns": [
"project_id"
],
"tableTo": "project",
"columnsTo": ["id"],
"columnsTo": [
"id"
],
"onUpdate": "NO ACTION",
"onDelete": "CASCADE",
"nameExplicit": false,
@@ -1073,9 +1083,13 @@
"table": "workspace"
},
{
"columns": ["session_id"],
"columns": [
"session_id"
],
"tableTo": "session",
"columnsTo": ["id"],
"columnsTo": [
"id"
],
"onUpdate": "NO ACTION",
"onDelete": "CASCADE",
"nameExplicit": false,
@@ -1084,9 +1098,13 @@
"table": "message"
},
{
"columns": ["message_id"],
"columns": [
"message_id"
],
"tableTo": "message",
"columnsTo": ["id"],
"columnsTo": [
"id"
],
"onUpdate": "NO ACTION",
"onDelete": "CASCADE",
"nameExplicit": false,
@@ -1095,9 +1113,13 @@
"table": "part"
},
{
"columns": ["project_id"],
"columns": [
"project_id"
],
"tableTo": "project",
"columnsTo": ["id"],
"columnsTo": [
"id"
],
"onUpdate": "NO ACTION",
"onDelete": "CASCADE",
"nameExplicit": false,
@@ -1106,20 +1128,28 @@
"table": "permission"
},
{
"columns": ["session_id"],
"columns": [
"session_id"
],
"tableTo": "session",
"columnsTo": ["id"],
"columnsTo": [
"id"
],
"onUpdate": "NO ACTION",
"onDelete": "CASCADE",
"nameExplicit": false,
"name": "fk_session_entry_session_id_session_id_fk",
"name": "fk_session_message_session_id_session_id_fk",
"entityType": "fks",
"table": "session_entry"
"table": "session_message"
},
{
"columns": ["project_id"],
"columns": [
"project_id"
],
"tableTo": "project",
"columnsTo": ["id"],
"columnsTo": [
"id"
],
"onUpdate": "NO ACTION",
"onDelete": "CASCADE",
"nameExplicit": false,
@@ -1128,9 +1158,13 @@
"table": "session"
},
{
"columns": ["session_id"],
"columns": [
"session_id"
],
"tableTo": "session",
"columnsTo": ["id"],
"columnsTo": [
"id"
],
"onUpdate": "NO ACTION",
"onDelete": "CASCADE",
"nameExplicit": false,
@@ -1139,9 +1173,13 @@
"table": "todo"
},
{
"columns": ["session_id"],
"columns": [
"session_id"
],
"tableTo": "session",
"columnsTo": ["id"],
"columnsTo": [
"id"
],
"onUpdate": "NO ACTION",
"onDelete": "CASCADE",
"nameExplicit": false,
@@ -1150,9 +1188,13 @@
"table": "session_share"
},
{
"columns": ["aggregate_id"],
"columns": [
"aggregate_id"
],
"tableTo": "event_sequence",
"columnsTo": ["aggregate_id"],
"columnsTo": [
"aggregate_id"
],
"onUpdate": "NO ACTION",
"onDelete": "CASCADE",
"nameExplicit": false,
@@ -1161,98 +1203,128 @@
"table": "event"
},
{
"columns": ["email", "url"],
"columns": [
"email",
"url"
],
"nameExplicit": false,
"name": "control_account_pk",
"entityType": "pks",
"table": "control_account"
},
{
"columns": ["session_id", "position"],
"columns": [
"session_id",
"position"
],
"nameExplicit": false,
"name": "todo_pk",
"entityType": "pks",
"table": "todo"
},
{
"columns": ["id"],
"columns": [
"id"
],
"nameExplicit": false,
"name": "account_state_pk",
"table": "account_state",
"entityType": "pks"
},
{
"columns": ["id"],
"columns": [
"id"
],
"nameExplicit": false,
"name": "account_pk",
"table": "account",
"entityType": "pks"
},
{
"columns": ["id"],
"columns": [
"id"
],
"nameExplicit": false,
"name": "workspace_pk",
"table": "workspace",
"entityType": "pks"
},
{
"columns": ["id"],
"columns": [
"id"
],
"nameExplicit": false,
"name": "project_pk",
"table": "project",
"entityType": "pks"
},
{
"columns": ["id"],
"columns": [
"id"
],
"nameExplicit": false,
"name": "message_pk",
"table": "message",
"entityType": "pks"
},
{
"columns": ["id"],
"columns": [
"id"
],
"nameExplicit": false,
"name": "part_pk",
"table": "part",
"entityType": "pks"
},
{
"columns": ["project_id"],
"columns": [
"project_id"
],
"nameExplicit": false,
"name": "permission_pk",
"table": "permission",
"entityType": "pks"
},
{
"columns": ["id"],
"columns": [
"id"
],
"nameExplicit": false,
"name": "session_entry_pk",
"table": "session_entry",
"name": "session_message_pk",
"table": "session_message",
"entityType": "pks"
},
{
"columns": ["id"],
"columns": [
"id"
],
"nameExplicit": false,
"name": "session_pk",
"table": "session",
"entityType": "pks"
},
{
"columns": ["session_id"],
"columns": [
"session_id"
],
"nameExplicit": false,
"name": "session_share_pk",
"table": "session_share",
"entityType": "pks"
},
{
"columns": ["aggregate_id"],
"columns": [
"aggregate_id"
],
"nameExplicit": false,
"name": "event_sequence_pk",
"table": "event_sequence",
"entityType": "pks"
},
{
"columns": ["id"],
"columns": [
"id"
],
"nameExplicit": false,
"name": "event_pk",
"table": "event",
@@ -1322,9 +1394,9 @@
"isUnique": false,
"where": null,
"origin": "manual",
"name": "session_entry_session_idx",
"name": "session_message_session_idx",
"entityType": "indexes",
"table": "session_entry"
"table": "session_message"
},
{
"columns": [
@@ -1340,9 +1412,9 @@
"isUnique": false,
"where": null,
"origin": "manual",
"name": "session_entry_session_type_idx",
"name": "session_message_session_type_idx",
"entityType": "indexes",
"table": "session_entry"
"table": "session_message"
},
{
"columns": [
@@ -1354,9 +1426,9 @@
"isUnique": false,
"where": null,
"origin": "manual",
"name": "session_entry_time_created_idx",
"name": "session_message_time_created_idx",
"entityType": "indexes",
"table": "session_entry"
"table": "session_message"
},
{
"columns": [

View File

@@ -0,0 +1,2 @@
ALTER TABLE `session` ADD `agent` text;--> statement-breakpoint
ALTER TABLE `session` ADD `model` text;

File diff suppressed because it is too large Load Diff

View File

@@ -51,6 +51,7 @@ import { LoadAPIKeyError } from "ai"
import type { AssistantMessage, Event, OpencodeClient, SessionMessageResponse, ToolPart } from "@opencode-ai/sdk/v2"
import { applyPatch } from "diff"
import { InstallationVersion } from "@opencode-ai/core/installation/version"
import { ShellToolID } from "@/tool/shell/id"
type ModeOption = { id: string; name: string; description?: string }
type ModelOption = { modelId: string; name: string }
@@ -144,7 +145,7 @@ export class Agent implements ACPAgent {
private sessionManager: ACPSessionManager
private eventAbort = new AbortController()
private eventStarted = false
private bashSnapshots = new Map<string, string>()
private shellSnapshots = new Map<string, string>()
private toolStarts = new Set<string>()
private permissionQueues = new Map<string, Promise<void>>()
private permissionOptions: PermissionOption[] = [
@@ -283,16 +284,16 @@ export class Agent implements ACPAgent {
switch (part.state.status) {
case "pending":
this.bashSnapshots.delete(part.callID)
this.shellSnapshots.delete(part.callID)
return
case "running":
const output = this.bashOutput(part)
const output = this.shellOutput(part)
const content: ToolCallContent[] = []
if (output) {
const hash = Hash.fast(output)
if (part.tool === "bash") {
if (this.bashSnapshots.get(part.callID) === hash) {
if (part.tool === ShellToolID.id) {
if (this.shellSnapshots.get(part.callID) === hash) {
await this.connection
.sessionUpdate({
sessionId,
@@ -311,7 +312,7 @@ export class Agent implements ACPAgent {
})
return
}
this.bashSnapshots.set(part.callID, hash)
this.shellSnapshots.set(part.callID, hash)
}
content.push({
type: "content",
@@ -342,7 +343,7 @@ export class Agent implements ACPAgent {
case "completed": {
this.toolStarts.delete(part.callID)
this.bashSnapshots.delete(part.callID)
this.shellSnapshots.delete(part.callID)
const kind = toToolKind(part.tool)
const content: ToolCallContent[] = [
{
@@ -423,7 +424,7 @@ export class Agent implements ACPAgent {
}
case "error":
this.toolStarts.delete(part.callID)
this.bashSnapshots.delete(part.callID)
this.shellSnapshots.delete(part.callID)
await this.connection
.sessionUpdate({
sessionId,
@@ -837,10 +838,10 @@ export class Agent implements ACPAgent {
await this.toolStart(sessionId, part)
switch (part.state.status) {
case "pending":
this.bashSnapshots.delete(part.callID)
this.shellSnapshots.delete(part.callID)
break
case "running":
const output = this.bashOutput(part)
const output = this.shellOutput(part)
const runningContent: ToolCallContent[] = []
if (output) {
runningContent.push({
@@ -871,7 +872,7 @@ export class Agent implements ACPAgent {
break
case "completed":
this.toolStarts.delete(part.callID)
this.bashSnapshots.delete(part.callID)
this.shellSnapshots.delete(part.callID)
const kind = toToolKind(part.tool)
const content: ToolCallContent[] = [
{
@@ -951,7 +952,7 @@ export class Agent implements ACPAgent {
break
case "error":
this.toolStarts.delete(part.callID)
this.bashSnapshots.delete(part.callID)
this.shellSnapshots.delete(part.callID)
await this.connection
.sessionUpdate({
sessionId,
@@ -1105,8 +1106,8 @@ export class Agent implements ACPAgent {
}
}
private bashOutput(part: ToolPart) {
if (part.tool !== "bash") return
private shellOutput(part: ToolPart) {
if (part.tool !== ShellToolID.id) return
if (!("metadata" in part.state) || !part.state.metadata || typeof part.state.metadata !== "object") return
const output = part.state.metadata["output"]
if (typeof output !== "string") return
@@ -1549,9 +1550,11 @@ export class Agent implements ACPAgent {
function toToolKind(toolName: string): ToolKind {
const tool = toolName.toLocaleLowerCase()
switch (tool) {
case "bash":
case ShellToolID.id:
return "execute"
case "webfetch":
return "fetch"
@@ -1562,6 +1565,8 @@ function toToolKind(toolName: string): ToolKind {
case "grep":
case "glob":
case "repo_clone":
case "repo_overview":
case "context7_resolve_library_id":
case "context7_get_library_docs":
return "search"
@@ -1576,6 +1581,7 @@ function toToolKind(toolName: string): ToolKind {
function toLocations(toolName: string, input: Record<string, any>): { path: string }[] {
const tool = toolName.toLocaleLowerCase()
switch (tool) {
case "read":
case "edit":
@@ -1584,7 +1590,11 @@ function toLocations(toolName: string, input: Record<string, any>): { path: stri
case "glob":
case "grep":
return input["path"] ? [{ path: input["path"] }] : []
case "bash":
case "repo_clone":
return input["path"] ? [{ path: input["path"] }] : []
case "repo_overview":
return input["path"] ? [{ path: input["path"] }] : []
case ShellToolID.id:
return []
default:
return []

View File

@@ -10,6 +10,7 @@ import { ProviderTransform } from "@/provider/transform"
import PROMPT_GENERATE from "./generate.txt"
import PROMPT_COMPACTION from "./prompt/compaction.txt"
import PROMPT_EXPLORE from "./prompt/explore.txt"
import PROMPT_SCOUT from "./prompt/scout.txt"
import PROMPT_SUMMARY from "./prompt/summary.txt"
import PROMPT_TITLE from "./prompt/title.txt"
import { Permission } from "@/permission"
@@ -25,6 +26,9 @@ import * as OtelTracer from "@effect/opentelemetry/Tracer"
import { zod } from "@/util/effect-zod"
import { withStatics, type DeepMutable } from "@/util/schema"
type ReferenceEntry = NonNullable<Config.Info["reference"]>[string]
type ResolvedReference = { kind: "git"; repository: string; branch?: string } | { kind: "local"; path: string }
export const Info = Schema.Struct({
name: Schema.String,
description: Schema.optional(Schema.String),
@@ -86,6 +90,10 @@ export const layer = Layer.effect(
path.join(Global.Path.tmp, "*"),
...skillDirs.map((dir) => path.join(dir, "*")),
]
const readonlyExternalDirectory = {
"*": "ask",
...Object.fromEntries(whitelistedDirs.map((dir) => [dir, "allow"])),
} satisfies Record<string, "allow" | "ask" | "deny">
const defaults = Permission.fromConfig({
"*": "allow",
@@ -97,6 +105,8 @@ export const layer = Layer.effect(
question: "deny",
plan_enter: "deny",
plan_exit: "deny",
repo_clone: "deny",
repo_overview: "deny",
// mirrors github.com/github/gitignore Node.gitignore pattern for .env files
read: {
"*": "allow",
@@ -174,10 +184,7 @@ export const layer = Layer.effect(
webfetch: "allow",
websearch: "allow",
read: "allow",
external_directory: {
"*": "ask",
...Object.fromEntries(whitelistedDirs.map((dir) => [dir, "allow"])),
},
external_directory: readonlyExternalDirectory,
}),
user,
),
@@ -187,6 +194,33 @@ export const layer = Layer.effect(
mode: "subagent",
native: true,
},
scout: {
name: "scout",
permission: Permission.merge(
defaults,
Permission.fromConfig({
"*": "deny",
grep: "allow",
glob: "allow",
webfetch: "allow",
websearch: "allow",
codesearch: "allow",
read: "allow",
repo_clone: "allow",
repo_overview: "allow",
external_directory: {
...readonlyExternalDirectory,
[path.join(Global.Path.repos, "*")]: "allow",
},
}),
user,
),
description: `Docs and dependency-source specialist. Use this when you need to inspect external documentation, clone dependency repositories into the managed cache, and research library implementation details without modifying the user's workspace.`,
prompt: PROMPT_SCOUT,
options: {},
mode: "subagent",
native: true,
},
compaction: {
name: "compaction",
mode: "primary",
@@ -264,6 +298,73 @@ export const layer = Layer.effect(
item.permission = Permission.merge(item.permission, Permission.fromConfig(value.permission ?? {}))
}
function referencePath(value: string) {
if (value.startsWith("~/")) return path.join(Global.Path.home, value.slice(2))
return path.isAbsolute(value)
? value
: path.resolve(ctx.worktree === "/" ? ctx.directory : ctx.worktree, value)
}
function resolveReference(reference: ReferenceEntry): ResolvedReference {
if (typeof reference === "string") {
if (reference.startsWith(".") || reference.startsWith("/") || reference.startsWith("~")) {
return { kind: "local", path: referencePath(reference) }
}
return { kind: "git", repository: reference }
}
if ("path" in reference) return { kind: "local", path: referencePath(reference.path) }
return { kind: "git", repository: reference.repository, branch: reference.branch }
}
function referencePrompt(name: string, reference: ResolvedReference) {
if (reference.kind === "local") {
return [
PROMPT_SCOUT,
`You are Scout reference @${name}. This reference points to a local directory outside or alongside the current workspace.`,
`Local directory: ${reference.path}`,
`When invoked, inspect this directory as the primary reference source. Prefer repo_overview with path ${JSON.stringify(reference.path)} before broader searches. Do not edit files.`,
].join("\n\n")
}
return [
PROMPT_SCOUT,
`You are Scout reference @${name}. This reference points to a git repository.`,
`Repository: ${reference.repository}`,
...(reference.branch ? [`Branch/ref: ${reference.branch}`] : []),
`When invoked, clone or refresh this repository with repo_clone, then inspect the cached repository as the primary reference source. Do not edit files.`,
].join("\n\n")
}
for (const [name, reference] of Object.entries(cfg.reference ?? {})) {
if (agents[name]) continue
const resolved = resolveReference(reference)
const localPath = resolved.kind === "local" ? resolved.path : undefined
agents[name] = {
name,
description:
resolved.kind === "local"
? `Scout reference for local directory ${resolved.path}`
: `Scout reference for repository ${resolved.repository}`,
permission: Permission.merge(
agents.scout.permission,
Permission.fromConfig(
localPath
? {
external_directory: {
[localPath]: "allow",
[path.join(localPath, "*")]: "allow",
},
}
: {},
),
),
prompt: referencePrompt(name, resolved),
options: { reference },
mode: "subagent",
native: false,
}
}
// Ensure Truncate.GLOB is allowed unless explicitly configured
for (const name in agents) {
const agent = agents[name]

View File

@@ -0,0 +1,36 @@
You are `scout`, a read-only research agent for external libraries, dependency source, and documentation.
Your purpose is to investigate code outside the local workspace and return evidence-backed findings without modifying the user's workspace.
Use this agent when asked to:
- inspect dependency repositories or library source
- compare local code against upstream implementations
- research public GitHub repositories the environment can clone
- explain how a library or framework works by reading its source and docs
- investigate third-party APIs, workflows, or behavior outside the current workspace
Working style:
1. When the task involves a GitHub repository or dependency source, use `repo_clone` first.
2. After cloning, use `Glob`, `Grep`, and `Read` to inspect the cloned repository.
3. Use `WebFetch` for official documentation pages when source alone is not enough.
4. Prefer direct code and documentation evidence over assumptions.
5. If multiple external repositories are relevant, inspect each one before drawing conclusions.
Research standards:
- cite exact absolute file paths and line references whenever possible
- separate what is verified from what is inferred
- if the answer depends on branch state, note that you are reading the repository's current default clone state unless the caller specifies otherwise
- if a repository cannot be cloned or accessed, say so explicitly and continue with whatever evidence is still available
- call out uncertainty clearly instead of smoothing over gaps
Output expectations:
- start with the direct answer
- then explain the evidence repository by repository or source by source
- include file references when relevant
- keep the explanation organized and easy to scan
Constraints:
- do not modify files or run tools that change the user's workspace
- return absolute file paths for cloned-repo findings in your final response
Complete the user's research request efficiently and report your findings clearly.

View File

@@ -0,0 +1,173 @@
import { InstanceState } from "@/effect/instance-state"
import { Identifier } from "@/id/id"
import { Cause, Deferred, Effect, Fiber, Layer, Scope, Context } from "effect"
export type Status = "running" | "completed" | "error" | "cancelled"
export type Info = {
id: string
type: string
title?: string
status: Status
started_at: number
completed_at?: number
output?: string
error?: string
metadata?: Record<string, unknown>
}
type Active = {
info: Info
done: Deferred.Deferred<Info>
fiber?: Fiber.Fiber<void, unknown>
}
type State = {
jobs: Map<string, Active>
scope: Scope.Scope
}
export type StartInput = {
id?: string
type: string
title?: string
metadata?: Record<string, unknown>
run: Effect.Effect<string, unknown>
}
export type WaitInput = {
id: string
timeout?: number
}
export type WaitResult = {
info?: Info
timedOut: boolean
}
export interface Interface {
readonly list: () => Effect.Effect<Info[]>
readonly get: (id: string) => Effect.Effect<Info | undefined>
readonly start: (input: StartInput) => Effect.Effect<Info>
readonly wait: (input: WaitInput) => Effect.Effect<WaitResult>
readonly cancel: (id: string) => Effect.Effect<Info | undefined>
}
export class Service extends Context.Service<Service, Interface>()("@opencode/BackgroundJob") {}
function snapshot(job: Active): Info {
return {
...job.info,
...(job.info.metadata ? { metadata: { ...job.info.metadata } } : {}),
}
}
function errorText(error: unknown) {
if (error instanceof Error) return error.message
return String(error)
}
export const layer = Layer.effect(
Service,
Effect.gen(function* () {
const state = yield* InstanceState.make<State>(
Effect.fn("BackgroundJob.state")(function* () {
return {
jobs: new Map(),
scope: yield* Scope.Scope,
}
}),
)
const finish = Effect.fn("BackgroundJob.finish")(function* (
job: Active,
status: Exclude<Status, "running">,
data?: { output?: string; error?: string },
) {
if (job.info.status !== "running") return snapshot(job)
job.info.status = status
job.info.completed_at = Date.now()
if (data?.output !== undefined) job.info.output = data.output
if (data?.error !== undefined) job.info.error = data.error
job.fiber = undefined
const info = snapshot(job)
yield* Deferred.succeed(job.done, info).pipe(Effect.ignore)
return info
})
const list: Interface["list"] = Effect.fn("BackgroundJob.list")(function* () {
const s = yield* InstanceState.get(state)
return Array.from(s.jobs.values())
.map(snapshot)
.toSorted((a, b) => a.started_at - b.started_at)
})
const get: Interface["get"] = Effect.fn("BackgroundJob.get")(function* (id) {
const s = yield* InstanceState.get(state)
const job = s.jobs.get(id)
if (!job) return
return snapshot(job)
})
const start: Interface["start"] = Effect.fn("BackgroundJob.start")(function* (input) {
const s = yield* InstanceState.get(state)
const id = input.id ?? Identifier.ascending("job")
const existing = s.jobs.get(id)
if (existing?.info.status === "running") return snapshot(existing)
const job: Active = {
info: {
id,
type: input.type,
title: input.title,
status: "running",
started_at: Date.now(),
metadata: input.metadata,
},
done: yield* Deferred.make<Info>(),
}
s.jobs.set(id, job)
job.fiber = yield* input.run.pipe(
Effect.matchCauseEffect({
onSuccess: (output) => finish(job, "completed", { output }),
onFailure: (cause) =>
finish(job, Cause.hasInterruptsOnly(cause) ? "cancelled" : "error", {
error: errorText(Cause.squash(cause)),
}),
}),
Effect.asVoid,
Effect.forkIn(s.scope),
)
return snapshot(job)
})
const wait: Interface["wait"] = Effect.fn("BackgroundJob.wait")(function* (input) {
const s = yield* InstanceState.get(state)
const job = s.jobs.get(input.id)
if (!job) return { timedOut: false }
if (job.info.status !== "running") return { info: snapshot(job), timedOut: false }
if (!input.timeout) return { info: yield* Deferred.await(job.done), timedOut: false }
return yield* Effect.raceAll([
Deferred.await(job.done).pipe(Effect.map((info) => ({ info, timedOut: false }))),
Effect.sleep(input.timeout).pipe(Effect.as({ info: snapshot(job), timedOut: true })),
])
})
const cancel: Interface["cancel"] = Effect.fn("BackgroundJob.cancel")(function* (id) {
const s = yield* InstanceState.get(state)
const job = s.jobs.get(id)
if (!job) return
if (job.info.status !== "running") return snapshot(job)
const fiber = job.fiber
const info = yield* finish(job, "cancelled")
if (fiber) yield* Fiber.interrupt(fiber).pipe(Effect.ignore)
return info
})
return Service.of({ list, get, start, wait, cancel })
}),
)
export const defaultLayer = layer
export * as BackgroundJob from "./job"

View File

@@ -24,6 +24,7 @@ export function payloads() {
.map(([type, def]) => {
return z
.object({
id: z.string(),
type: z.literal(type),
properties: zodObject(def.properties),
})
@@ -39,6 +40,7 @@ export function effectPayloads() {
.entries()
.map(([type, def]) =>
Schema.Struct({
id: Schema.String,
type: Schema.Literal(type),
properties: def.properties,
}).annotate({ identifier: `Event.${type}` }),

View File

@@ -1,4 +1,5 @@
import { EventEmitter } from "events"
import { Identifier } from "@/id/id"
export type GlobalEvent = {
directory?: string
@@ -7,6 +8,15 @@ export type GlobalEvent = {
payload: any
}
export const GlobalBus = new EventEmitter<{
class GlobalBusEmitter extends EventEmitter<{
event: [GlobalEvent]
}>()
}> {
override emit(eventName: "event", event: GlobalEvent): boolean {
if (event.payload && typeof event.payload === "object" && !("id" in event.payload)) {
event.payload.id = event.payload.syncEvent?.id ?? Identifier.create("evt", "ascending")
}
return super.emit(eventName, event)
}
}
export const GlobalBus = new GlobalBusEmitter()

View File

@@ -5,6 +5,7 @@ import { BusEvent } from "./bus-event"
import { GlobalBus } from "./global"
import { InstanceState } from "@/effect/instance-state"
import { makeRuntime } from "@/effect/run-service"
import { Identifier } from "@/id/id"
const log = Log.create({ service: "bus" })
@@ -18,6 +19,7 @@ export const InstanceDisposed = BusEvent.define(
)
type Payload<D extends BusEvent.Definition = BusEvent.Definition> = {
id: string
type: D["type"]
properties: BusProperties<D>
}
@@ -28,7 +30,11 @@ type State = {
}
export interface Interface {
readonly publish: <D extends BusEvent.Definition>(def: D, properties: BusProperties<D>) => Effect.Effect<void>
readonly publish: <D extends BusEvent.Definition>(
def: D,
properties: BusProperties<D>,
options?: { id?: string },
) => Effect.Effect<void>
readonly subscribe: <D extends BusEvent.Definition>(def: D) => Stream.Stream<Payload<D>>
readonly subscribeAll: () => Stream.Stream<Payload>
readonly subscribeCallback: <D extends BusEvent.Definition>(
@@ -53,6 +59,7 @@ export const layer = Layer.effect(
// Publish InstanceDisposed before shutting down so subscribers see it
yield* PubSub.publish(wildcard, {
type: InstanceDisposed.type,
id: createID(),
properties: { directory: ctx.directory },
})
yield* PubSub.shutdown(wildcard)
@@ -77,10 +84,10 @@ export const layer = Layer.effect(
})
}
function publish<D extends BusEvent.Definition>(def: D, properties: BusProperties<D>) {
function publish<D extends BusEvent.Definition>(def: D, properties: BusProperties<D>, options?: { id?: string }) {
return Effect.gen(function* () {
const s = yield* InstanceState.get(state)
const payload: Payload = { type: def.type, properties }
const payload: Payload = { id: options?.id ?? createID(), type: def.type, properties }
log.info("publishing", { type: def.type })
const ps = s.typed.get(def.type)
@@ -173,8 +180,16 @@ const { runPromise, runSync } = makeRuntime(Service, layer)
// runSync is safe here because the subscribe chain (InstanceState.get, PubSub.subscribe,
// Scope.make, Effect.forkScoped) is entirely synchronous. If any step becomes async, this will throw.
export async function publish<D extends BusEvent.Definition>(def: D, properties: BusProperties<D>) {
return runPromise((svc) => svc.publish(def, properties))
export function createID() {
return Identifier.create("evt", "ascending")
}
export async function publish<D extends BusEvent.Definition>(
def: D,
properties: BusProperties<D>,
options?: { id?: string },
) {
return runPromise((svc) => svc.publish(def, properties, options))
}
export function subscribe<D extends BusEvent.Definition>(def: D, callback: (event: Payload<D>) => unknown) {

View File

@@ -33,6 +33,7 @@ import { AppRuntime } from "@/effect/app-runtime"
import { Git } from "@/git"
import { setTimeout as sleep } from "node:timers/promises"
import { Process } from "@/util/process"
import { parseGitHubRemote } from "@/util/repository"
import { Effect } from "effect"
type GitHubAuthor = {
@@ -152,18 +153,7 @@ const SUPPORTED_EVENTS = [...USER_EVENTS, ...REPO_EVENTS] as const
type UserEvent = (typeof USER_EVENTS)[number]
type RepoEvent = (typeof REPO_EVENTS)[number]
// Parses GitHub remote URLs in various formats:
// - https://github.com/owner/repo.git
// - https://github.com/owner/repo
// - git@github.com:owner/repo.git
// - git@github.com:owner/repo
// - ssh://git@github.com/owner/repo.git
// - ssh://git@github.com/owner/repo
export function parseGitHubRemote(url: string): { owner: string; repo: string } | null {
const match = url.match(/^(?:(?:https?|ssh):\/\/)?(?:git@)?github\.com[:/]([^/]+)\/([^/]+?)(?:\.git)?$/)
if (!match) return null
return { owner: match[1], repo: match[2] }
}
export { parseGitHubRemote }
/**
* Extracts displayable text from assistant response parts.
@@ -879,7 +869,7 @@ export const GithubRunCommand = cmd({
function subscribeSessionEvents() {
const TOOL: Record<string, [string, string]> = {
todowrite: ["Todo", UI.Style.TEXT_WARNING_BOLD],
bash: ["Bash", UI.Style.TEXT_DANGER_BOLD],
bash: ["Shell", UI.Style.TEXT_DANGER_BOLD],
edit: ["Edit", UI.Style.TEXT_SUCCESS_BOLD],
glob: ["Glob", UI.Style.TEXT_INFO_BOLD],
grep: ["Grep", UI.Style.TEXT_INFO_BOLD],

View File

@@ -22,7 +22,8 @@ import { WriteTool } from "../../tool/write"
import { WebSearchTool } from "../../tool/websearch"
import { TaskTool } from "../../tool/task"
import { SkillTool } from "../../tool/skill"
import { BashTool } from "../../tool/bash"
import { ShellTool } from "../../tool/shell"
import { ShellToolID } from "../../tool/shell/id"
import { TodoWriteTool } from "../../tool/todo"
import { Locale } from "@/util/locale"
import { AppRuntime } from "@/effect/app-runtime"
@@ -175,7 +176,7 @@ function skill(info: ToolProps<typeof SkillTool>) {
})
}
function bash(info: ToolProps<typeof BashTool>) {
function shell(info: ToolProps<typeof ShellTool>) {
const output = info.part.state.status === "completed" ? info.part.state.output?.trim() : undefined
block(
{
@@ -359,6 +360,11 @@ export const RunCommand = cmd({
action: "deny",
pattern: "*",
},
{
permission: "edit",
action: "allow",
pattern: "*",
},
]
function title() {
@@ -400,7 +406,7 @@ export const RunCommand = cmd({
async function execute(sdk: OpencodeClient) {
function tool(part: ToolPart) {
try {
if (part.tool === "bash") return bash(props<typeof BashTool>(part))
if (part.tool === ShellToolID.id) return shell(props<typeof ShellTool>(part))
if (part.tool === "glob") return glob(props<typeof GlobTool>(part))
if (part.tool === "grep") return grep(props<typeof GrepTool>(part))
if (part.tool === "read") return read(props<typeof ReadTool>(part))

View File

@@ -1,6 +1,7 @@
import { Server } from "../../server/server"
import { cmd } from "./cmd"
import { withNetworkOptions, resolveNetworkOptions } from "../network"
import { bootstrap } from "../bootstrap"
import { Flag } from "@opencode-ai/core/flag/flag"
export const ServeCommand = cmd({
@@ -11,7 +12,8 @@ export const ServeCommand = cmd({
if (!Flag.OPENCODE_SERVER_PASSWORD) {
console.log("Warning: OPENCODE_SERVER_PASSWORD is not set; server is unsecured.")
}
const opts = await resolveNetworkOptions(args)
const opts = await bootstrap(process.cwd(), () => resolveNetworkOptions(args))
const server = await Server.listen(opts)
console.log(`opencode server listening on http://${server.hostname}:${server.port}`)

View File

@@ -28,6 +28,7 @@ import { useEvent } from "@tui/context/event"
import { SDKProvider, useSDK } from "@tui/context/sdk"
import { StartupLoading } from "@tui/component/startup-loading"
import { SyncProvider, useSync } from "@tui/context/sync"
import { SyncProviderV2 } from "@tui/context/sync-v2"
import { LocalProvider, useLocal } from "@tui/context/local"
import { DialogModel } from "@tui/component/dialog-model"
import { useConnected } from "@tui/component/use-connected"
@@ -166,27 +167,29 @@ export function tui(input: {
>
<ProjectProvider>
<SyncProvider>
<ThemeProvider mode={mode}>
<LocalProvider>
<KeybindProvider>
<PromptStashProvider>
<DialogProvider>
<CommandProvider>
<FrecencyProvider>
<PromptHistoryProvider>
<PromptRefProvider>
<EditorContextProvider>
<App onSnapshot={input.onSnapshot} />
</EditorContextProvider>
</PromptRefProvider>
</PromptHistoryProvider>
</FrecencyProvider>
</CommandProvider>
</DialogProvider>
</PromptStashProvider>
</KeybindProvider>
</LocalProvider>
</ThemeProvider>
<SyncProviderV2>
<ThemeProvider mode={mode}>
<LocalProvider>
<KeybindProvider>
<PromptStashProvider>
<DialogProvider>
<CommandProvider>
<FrecencyProvider>
<PromptHistoryProvider>
<PromptRefProvider>
<EditorContextProvider>
<App onSnapshot={input.onSnapshot} />
</EditorContextProvider>
</PromptRefProvider>
</PromptHistoryProvider>
</FrecencyProvider>
</CommandProvider>
</DialogProvider>
</PromptStashProvider>
</KeybindProvider>
</LocalProvider>
</ThemeProvider>
</SyncProviderV2>
</SyncProvider>
</ProjectProvider>
</SDKProvider>
@@ -505,6 +508,7 @@ function App(props: { onSnapshot?: () => Promise<string[]> }) {
{
title: "Toggle MCPs",
value: "mcp.list",
search: "toggle mcps",
category: "Agent",
slash: {
name: "mcps",
@@ -611,6 +615,7 @@ function App(props: { onSnapshot?: () => Promise<string[]> }) {
{
title: mode() === "dark" ? "Switch to light mode" : "Switch to dark mode",
value: "theme.switch_mode",
search: "toggle appearance",
onSelect: (dialog) => {
setMode(mode() === "dark" ? "light" : "dark")
dialog.clear()
@@ -659,6 +664,7 @@ function App(props: { onSnapshot?: () => Promise<string[]> }) {
},
{
title: "Toggle debug panel",
search: "toggle debug",
category: "System",
value: "app.debug",
onSelect: (dialog) => {
@@ -668,6 +674,7 @@ function App(props: { onSnapshot?: () => Promise<string[]> }) {
},
{
title: "Toggle console",
search: "toggle console",
category: "System",
value: "app.console",
onSelect: (dialog) => {
@@ -709,6 +716,7 @@ function App(props: { onSnapshot?: () => Promise<string[]> }) {
{
title: terminalTitleEnabled() ? "Disable terminal title" : "Enable terminal title",
value: "terminal.title.toggle",
search: "toggle terminal title",
keybind: "terminal_title_toggle",
category: "System",
onSelect: (dialog) => {
@@ -724,6 +732,7 @@ function App(props: { onSnapshot?: () => Promise<string[]> }) {
{
title: kv.get("animations_enabled", true) ? "Disable animations" : "Enable animations",
value: "app.toggle.animations",
search: "toggle animations",
category: "System",
onSelect: (dialog) => {
kv.set("animations_enabled", !kv.get("animations_enabled", true))
@@ -767,6 +776,7 @@ function App(props: { onSnapshot?: () => Promise<string[]> }) {
{
title: kv.get("diff_wrap_mode", "word") === "word" ? "Disable diff wrapping" : "Enable diff wrapping",
value: "app.toggle.diffwrap",
search: "toggle diff wrapping",
category: "System",
onSelect: (dialog) => {
const current = kv.get("diff_wrap_mode", "word")
@@ -774,6 +784,15 @@ function App(props: { onSnapshot?: () => Promise<string[]> }) {
dialog.clear()
},
},
{
title: kv.get("clear_prompt_save_history", false) ? "Don't include cleared prompts in history" : "Include cleared prompts in history",
value: "app.toggle.clear_prompt_history",
category: "System",
onSelect: (dialog) => {
kv.set("clear_prompt_save_history", !kv.get("clear_prompt_save_history", false))
dialog.clear()
},
},
])
event.on(TuiEvent.CommandExecute.type, (evt) => {

View File

@@ -82,6 +82,7 @@ export const { use: usePromptHistory, provider: PromptHistoryProvider } = create
return store.history.at(store.index)
},
append(item: PromptInfo) {
if (store.history.at(-1)?.input === item.input) return
const entry = structuredClone(unwrap(item))
let trimmed = false
setStore(

View File

@@ -174,6 +174,7 @@ export function Prompt(props: PromptProps) {
const [auto, setAuto] = createSignal<AutocompleteRef>()
const currentProviderLabel = createMemo(() => local.model.parsed().provider)
const hasRightContent = createMemo(() => Boolean(props.right))
const [autoaccept, setAutoaccept] = kv.signal<"none" | "edit">("permission_auto_accept", "edit")
function promptModelWarning() {
toast.show({
@@ -295,6 +296,17 @@ export function Prompt(props: PromptProps) {
command.register(() => {
return [
{
title: autoaccept() === "none" ? "Enable autoedit" : "Disable autoedit",
value: "permission.auto_accept.toggle",
search: "toggle permissions",
keybind: "permission_auto_accept_toggle",
category: "Agent",
onSelect: (dialog) => {
setAutoaccept(() => (autoaccept() === "none" ? "edit" : "none"))
dialog.clear()
},
},
{
title: "Clear prompt",
value: "prompt.clear",
@@ -755,9 +767,18 @@ export function Prompt(props: PromptProps) {
return false
}
const variant = local.model.variant.current()
let sessionID = props.sessionID
if (sessionID == null) {
const res = await sdk.client.session.create({ workspace: props.workspaceID })
const res = await sdk.client.session.create({
workspace: props.workspaceID,
agent: agent.name,
model: {
providerID: selectedModel.providerID,
id: selectedModel.modelID,
variant,
},
})
if (res.error) {
console.log("Creating a session failed:", res.error)
@@ -797,7 +818,6 @@ export function Prompt(props: PromptProps) {
// Capture mode before it gets reset
const currentMode = store.mode
const variant = local.model.variant.current()
const editorSelection = editorContext()
const currentEditorSelectionKey = editorSelectionKey(editorSelection)
const editorParts =
@@ -1121,6 +1141,12 @@ export function Prompt(props: PromptProps) {
// If no image, let the default paste behavior continue
}
if (keybind.match("input_clear", e) && store.prompt.input !== "") {
if (kv.get("clear_prompt_save_history", false)) {
history.append({
...store.prompt,
mode: store.mode,
})
}
input.clear()
input.extmarks.clear()
setStore("prompt", {
@@ -1313,9 +1339,14 @@ export function Prompt(props: PromptProps) {
)}
</Show>
</box>
<Show when={hasRightContent()}>
<Show when={hasRightContent() || autoaccept() === "edit"}>
<box flexDirection="row" gap={1} alignItems="center">
{props.right}
<Show when={autoaccept() === "edit"}>
<text>
<span style={{ fg: theme.warning }}>autoedit</span>
</text>
</Show>
</box>
</Show>
</box>

View File

@@ -0,0 +1,271 @@
import { useEvent } from "@tui/context/event"
import type {
SessionMessage,
SessionMessageAssistant,
SessionMessageAssistantReasoning,
SessionMessageAssistantText,
SessionMessageAssistantTool,
} from "@opencode-ai/sdk/v2"
import { createStore, produce, reconcile } from "solid-js/store"
import { createSimpleContext } from "./helper"
import { useSDK } from "./sdk"
function activeAssistant(messages: SessionMessage[]) {
const index = messages.findLastIndex((message) => message.type === "assistant" && !message.time.completed)
if (index < 0) return
const assistant = messages[index]
return assistant?.type === "assistant" ? assistant : undefined
}
function activeCompaction(messages: SessionMessage[]) {
const index = messages.findLastIndex((message) => message.type === "compaction")
if (index < 0) return
const compaction = messages[index]
return compaction?.type === "compaction" ? compaction : undefined
}
function latestTool(assistant: SessionMessageAssistant | undefined, callID?: string) {
return assistant?.content.findLast(
(item): item is SessionMessageAssistantTool => item.type === "tool" && (callID === undefined || item.id === callID),
)
}
function latestText(assistant: SessionMessageAssistant | undefined) {
return assistant?.content.findLast((item): item is SessionMessageAssistantText => item.type === "text")
}
function latestReasoning(assistant: SessionMessageAssistant | undefined, reasoningID: string) {
return assistant?.content.findLast(
(item): item is SessionMessageAssistantReasoning => item.type === "reasoning" && item.id === reasoningID,
)
}
export const { use: useSyncV2, provider: SyncProviderV2 } = createSimpleContext({
name: "SyncV2",
init: () => {
const [store, setStore] = createStore<{
messages: {
[sessionID: string]: SessionMessage[]
}
}>({
messages: {},
})
const event = useEvent()
const sdk = useSDK()
function update(sessionID: string, fn: (messages: SessionMessage[]) => void) {
setStore(
"messages",
produce((draft) => {
fn((draft[sessionID] ??= []))
}),
)
}
event.subscribe((event) => {
switch (event.type) {
case "session.next.prompted": {
update(event.properties.sessionID, (draft) => {
draft.push({
id: event.id,
type: "user",
text: event.properties.prompt.text,
files: event.properties.prompt.files,
agents: event.properties.prompt.agents,
time: { created: event.properties.timestamp },
})
})
break
}
case "session.next.synthetic":
update(event.properties.sessionID, (draft) => {
draft.push({
id: event.id,
type: "synthetic",
sessionID: event.properties.sessionID,
text: event.properties.text,
time: { created: event.properties.timestamp },
})
})
break
case "session.next.step.started":
update(event.properties.sessionID, (draft) => {
const currentAssistant = activeAssistant(draft)
if (currentAssistant) currentAssistant.time.completed = event.properties.timestamp
draft.push({
id: event.id,
type: "assistant",
agent: event.properties.agent,
model: event.properties.model,
content: [],
snapshot: event.properties.snapshot ? { start: event.properties.snapshot } : undefined,
time: { created: event.properties.timestamp },
})
})
break
case "session.next.step.ended":
update(event.properties.sessionID, (draft) => {
const currentAssistant = activeAssistant(draft)
if (!currentAssistant) return
currentAssistant.time.completed = event.properties.timestamp
currentAssistant.finish = event.properties.finish
currentAssistant.cost = event.properties.cost
currentAssistant.tokens = event.properties.tokens
if (event.properties.snapshot)
currentAssistant.snapshot = { ...currentAssistant.snapshot, end: event.properties.snapshot }
})
break
case "session.next.text.started":
update(event.properties.sessionID, (draft) => {
activeAssistant(draft)?.content.push({ type: "text", text: "" })
})
break
case "session.next.text.delta":
update(event.properties.sessionID, (draft) => {
const match = latestText(activeAssistant(draft))
if (match) match.text += event.properties.delta
})
break
case "session.next.text.ended":
update(event.properties.sessionID, (draft) => {
const match = latestText(activeAssistant(draft))
if (match) match.text = event.properties.text
})
break
case "session.next.tool.input.started":
update(event.properties.sessionID, (draft) => {
activeAssistant(draft)?.content.push({
type: "tool",
id: event.properties.callID,
name: event.properties.name,
time: { created: event.properties.timestamp },
state: { status: "pending", input: "" },
})
})
break
case "session.next.tool.input.delta":
update(event.properties.sessionID, (draft) => {
const match = latestTool(activeAssistant(draft), event.properties.callID)
if (match?.state.status === "pending") match.state.input += event.properties.delta
})
break
case "session.next.tool.input.ended":
break
case "session.next.tool.called":
update(event.properties.sessionID, (draft) => {
const match = latestTool(activeAssistant(draft), event.properties.callID)
if (!match) return
match.time.ran = event.properties.timestamp
match.provider = event.properties.provider
match.state = { status: "running", input: event.properties.input, structured: {}, content: [] }
})
break
case "session.next.tool.progress":
update(event.properties.sessionID, (draft) => {
const match = latestTool(activeAssistant(draft), event.properties.callID)
if (match?.state.status !== "running") return
match.state.structured = event.properties.structured
match.state.content = [...event.properties.content]
})
break
case "session.next.tool.success":
update(event.properties.sessionID, (draft) => {
const match = latestTool(activeAssistant(draft), event.properties.callID)
if (match?.state.status !== "running") return
match.state = {
status: "completed",
input: match.state.input,
structured: event.properties.structured,
content: [...event.properties.content],
}
match.provider = event.properties.provider
match.time.completed = event.properties.timestamp
})
break
case "session.next.tool.error":
update(event.properties.sessionID, (draft) => {
const match = latestTool(activeAssistant(draft), event.properties.callID)
if (match?.state.status !== "running") return
match.state = {
status: "error",
error: event.properties.error,
input: match.state.input,
structured: match.state.structured,
content: match.state.content,
}
match.provider = event.properties.provider
match.time.completed = event.properties.timestamp
})
break
case "session.next.reasoning.started":
update(event.properties.sessionID, (draft) => {
activeAssistant(draft)?.content.push({
type: "reasoning",
id: event.properties.reasoningID,
text: "",
})
})
break
case "session.next.reasoning.delta":
update(event.properties.sessionID, (draft) => {
const match = latestReasoning(activeAssistant(draft), event.properties.reasoningID)
if (match) match.text += event.properties.delta
})
break
case "session.next.reasoning.ended":
update(event.properties.sessionID, (draft) => {
const match = latestReasoning(activeAssistant(draft), event.properties.reasoningID)
if (match) match.text = event.properties.text
})
break
case "session.next.retried":
break
case "session.next.compaction.started":
update(event.properties.sessionID, (draft) => {
draft.push({
id: event.id,
type: "compaction",
reason: event.properties.reason,
summary: "",
time: { created: event.properties.timestamp },
})
})
break
case "session.next.compaction.delta":
update(event.properties.sessionID, (draft) => {
const match = activeCompaction(draft)
if (match) match.summary += event.properties.text
})
break
case "session.next.compaction.ended":
update(event.properties.sessionID, (draft) => {
const match = activeCompaction(draft)
if (!match) return
match.summary = event.properties.text
match.include = event.properties.include
})
break
}
})
const result = {
data: store,
session: {
message: {
async sync(sessionID: string) {
const response = await sdk.client.v2.session.messages({ sessionID })
setStore("messages", sessionID, reconcile(response.data?.items ?? []))
},
fromSession(sessionID: string) {
const messages = store.messages[sessionID]
if (!messages) return []
return messages
},
},
},
}
return result
},
})

View File

@@ -27,11 +27,11 @@ import { createSimpleContext } from "./helper"
import type { Snapshot } from "@/snapshot"
import { useExit } from "./exit"
import { useArgs } from "./args"
import { useKV } from "./kv"
import { batch, onMount } from "solid-js"
import * as Log from "@opencode-ai/core/util/log"
import { emptyConsoleState, type ConsoleState } from "@/config/console-state"
import path from "path"
import { useKV } from "./kv"
export const { use: useSync, provider: SyncProvider } = createSimpleContext({
name: "Sync",
@@ -110,6 +110,7 @@ export const { use: useSync, provider: SyncProvider } = createSimpleContext({
const project = useProject()
const sdk = useSDK()
const kv = useKV()
const [autoaccept] = kv.signal<"none" | "edit">("permission_auto_accept", "edit")
const fullSyncedSessions = new Set<string>()
let syncedWorkspace = project.workspace.current()
@@ -152,6 +153,13 @@ export const { use: useSync, provider: SyncProvider } = createSimpleContext({
case "permission.asked": {
const request = event.properties
if (autoaccept() === "edit" && request.permission === "edit") {
sdk.client.permission.reply({
reply: "once",
requestID: request.id,
})
break
}
const requests = store.permission[request.sessionID]
if (!requests) {
setStore("permission", request.sessionID, [request])

File diff suppressed because it is too large Load Diff

View File

@@ -7,6 +7,7 @@ import SidebarTodo from "../feature-plugins/sidebar/todo"
import SidebarFiles from "../feature-plugins/sidebar/files"
import SidebarFooter from "../feature-plugins/sidebar/footer"
import PluginManager from "../feature-plugins/system/plugins"
import SessionV2Debug from "../feature-plugins/system/session-v2"
import type { TuiPlugin, TuiPluginModule } from "@opencode-ai/plugin/tui"
export type InternalTuiPlugin = TuiPluginModule & {
@@ -24,4 +25,5 @@ export const INTERNAL_TUI_PLUGINS: InternalTuiPlugin[] = [
SidebarFiles,
SidebarFooter,
PluginManager,
SessionV2Debug,
]

View File

@@ -37,7 +37,8 @@ import { Locale } from "@/util/locale"
import type { Tool } from "@/tool/tool"
import type { ReadTool } from "@/tool/read"
import type { WriteTool } from "@/tool/write"
import { BashTool } from "@/tool/bash"
import { ShellTool } from "@/tool/shell"
import { ShellToolID } from "@/tool/shell/id"
import type { GlobTool } from "@/tool/glob"
import { TodoWriteTool } from "@/tool/todo"
import type { GrepTool } from "@/tool/grep"
@@ -607,6 +608,7 @@ export function Session() {
{
title: sidebarVisible() ? "Hide sidebar" : "Show sidebar",
value: "session.sidebar.toggle",
search: "toggle sidebar",
keybind: "sidebar_toggle",
category: "Session",
onSelect: (dialog) => {
@@ -631,6 +633,7 @@ export function Session() {
{
title: showTimestamps() ? "Hide timestamps" : "Show timestamps",
value: "session.toggle.timestamps",
search: "toggle timestamps",
category: "Session",
slash: {
name: "timestamps",
@@ -644,6 +647,7 @@ export function Session() {
{
title: showThinking() ? "Hide thinking" : "Show thinking",
value: "session.toggle.thinking",
search: "toggle thinking",
keybind: "display_thinking",
category: "Session",
slash: {
@@ -658,6 +662,7 @@ export function Session() {
{
title: showDetails() ? "Hide tool details" : "Show tool details",
value: "session.toggle.actions",
search: "toggle tool details",
keybind: "tool_details",
category: "Session",
onSelect: (dialog) => {
@@ -666,8 +671,9 @@ export function Session() {
},
},
{
title: "Toggle session scrollbar",
title: showScrollbar() ? "Hide session scrollbar" : "Show session scrollbar",
value: "session.toggle.scrollbar",
search: "toggle session scrollbar",
keybind: "scrollbar_toggle",
category: "Session",
onSelect: (dialog) => {
@@ -1552,8 +1558,8 @@ function ToolPart(props: { last: boolean; part: ToolPart; message: AssistantMess
return (
<Show when={!shouldHide()}>
<Switch>
<Match when={props.part.tool === "bash"}>
<Bash {...toolprops} />
<Match when={props.part.tool === ShellToolID.id}>
<Shell {...toolprops} />
</Match>
<Match when={props.part.tool === "glob"}>
<Glob {...toolprops} />
@@ -1784,7 +1790,7 @@ function BlockTool(props: {
)
}
function Bash(props: ToolProps<typeof BashTool>) {
function Shell(props: ToolProps<typeof ShellTool>) {
const { theme } = useTheme()
const sync = useSync()
const isRunning = createMemo(() => props.part.state.status === "running")
@@ -1960,12 +1966,15 @@ function Task(props: ToolProps<typeof TaskTool>) {
const { navigate } = useRoute()
const sync = useSync()
onMount(() => {
if (props.metadata.sessionId && !sync.data.message[props.metadata.sessionId]?.length)
void sync.session.sync(props.metadata.sessionId)
createEffect(() => {
const sessionID = props.metadata.sessionId
if (!sessionID) return
if (sync.data.message[sessionID]?.length) return
void sync.session.sync(sessionID)
})
const messages = createMemo(() => sync.data.message[props.metadata.sessionId ?? ""] ?? [])
const childSessionID = createMemo(() => props.metadata.sessionId)
const messages = createMemo(() => sync.data.message[childSessionID() ?? ""] ?? [])
const tools = createMemo(() => {
return messages().flatMap((msg) =>
@@ -1979,7 +1988,16 @@ function Task(props: ToolProps<typeof TaskTool>) {
tools().findLast((x) => (x.state.status === "running" || x.state.status === "completed") && x.state.title),
)
const isRunning = createMemo(() => props.part.state.status === "running")
const isBackground = createMemo(() => props.metadata.background === true)
const isBackgroundRunning = createMemo(() => {
const sessionID = childSessionID()
if (!isBackground() || !sessionID) return false
const status = sync.data.session_status[sessionID]?.type
if (status === "busy" || status === "retry") return true
if (status === "idle") return false
return !messages().some((x) => x.role === "assistant" && x.time.completed)
})
const isRunning = createMemo(() => props.part.state.status === "running" || isBackgroundRunning())
const duration = createMemo(() => {
const first = messages().find((x) => x.role === "user")?.time.created
@@ -1990,7 +2008,8 @@ function Task(props: ToolProps<typeof TaskTool>) {
const content = createMemo(() => {
if (!props.input.description) return ""
let content = [`${Locale.titlecase(props.input.subagent_type ?? "General")} Task — ${props.input.description}`]
const description = isBackground() ? `${props.input.description} (background)` : props.input.description
let content = [`${Locale.titlecase(props.input.subagent_type ?? "General")} Task — ${description}`]
if (isRunning() && tools().length > 0) {
// content[0] += ` · ${tools().length} toolcalls`
@@ -2001,7 +2020,7 @@ function Task(props: ToolProps<typeof TaskTool>) {
} else content.push(`${tools().length} toolcalls`)
}
if (props.part.state.status === "completed") {
if (!isRunning() && props.part.state.status === "completed") {
content.push(`${tools().length} toolcalls · ${Locale.duration(duration())}`)
}
@@ -2016,8 +2035,9 @@ function Task(props: ToolProps<typeof TaskTool>) {
pending="Delegating..."
part={props.part}
onClick={() => {
if (props.metadata.sessionId) {
navigate({ type: "session", sessionID: props.metadata.sessionId })
const sessionID = childSessionID()
if (sessionID) {
navigate({ type: "session", sessionID })
}
}}
>

View File

@@ -15,6 +15,7 @@ import { LANGUAGE_EXTENSIONS } from "@/lsp/language"
import { Keybind } from "@/util/keybind"
import { Locale } from "@/util/locale"
import { Global } from "@opencode-ai/core/global"
import { ShellToolID } from "@/tool/shell/id"
import { useDialog } from "../../ui/dialog"
import { getScrollAcceleration } from "../../util/scroll"
import { useTuiConfig } from "../../context/tui-config"
@@ -287,7 +288,7 @@ export function PermissionPrompt(props: { request: PermissionRequest }) {
}
}
if (permission === "bash") {
if (permission === ShellToolID.id) {
const title =
typeof data.description === "string" && data.description ? data.description : "Shell command"
const command = typeof data.command === "string" ? data.command : ""

View File

@@ -8,6 +8,7 @@ import { UI } from "@/cli/ui"
import * as Log from "@opencode-ai/core/util/log"
import { errorMessage } from "@/util/error"
import { withTimeout } from "@/util/timeout"
import { Instance } from "@/project/instance"
import { withNetworkOptions, resolveNetworkOptionsNoConfig } from "@/cli/network"
import { Filesystem } from "@/util/filesystem"
import type { GlobalEvent } from "@opencode-ai/sdk/v2"
@@ -190,7 +191,11 @@ export const TuiThreadCommand = cmd({
const prompt = await input(args.prompt)
const config = await TuiConfig.get()
const network = resolveNetworkOptionsNoConfig(args)
const network = await Instance.provide({
directory: cwd,
fn: () => resolveNetworkOptionsNoConfig(args),
})
const external =
process.argv.includes("--port") ||
process.argv.includes("--hostname") ||

View File

@@ -37,6 +37,7 @@ export interface DialogSelectOption<T = any> {
title: string
value: T
description?: string
search?: string
footer?: JSX.Element | string
category?: string
categoryView?: JSX.Element
@@ -93,8 +94,8 @@ export function DialogSelect<T>(props: DialogSelectProps<T>) {
// users typically search by the item name, and not its category.
const result = fuzzysort
.go(needle, options, {
keys: ["title", "category"],
scoreFn: (r) => r[0].score * 2 + r[1].score,
keys: ["title", "category", "search"],
scoreFn: (r) => r[0].score * 2 + r[1].score + r[2].score,
})
.map((x) => x.obj)

View File

@@ -5,6 +5,7 @@ import { withNetworkOptions, resolveNetworkOptions } from "../network"
import { Flag } from "@opencode-ai/core/flag/flag"
import open from "open"
import { networkInterfaces } from "os"
import { bootstrap } from "../bootstrap"
function getNetworkIPs() {
const nets = networkInterfaces()
@@ -36,7 +37,7 @@ export const WebCommand = cmd({
if (!Flag.OPENCODE_SERVER_PASSWORD) {
UI.println(UI.Style.TEXT_WARNING_BOLD + "! OPENCODE_SERVER_PASSWORD is not set; server is unsecured.")
}
const opts = await resolveNetworkOptions(args)
const opts = await bootstrap(process.cwd(), () => resolveNetworkOptions(args))
const server = await Server.listen(opts)
UI.empty()
UI.println(UI.logo(" "))

View File

@@ -39,6 +39,7 @@ import { ConfigPaths } from "./paths"
import { ConfigPermission } from "./permission"
import { ConfigPlugin } from "./plugin"
import { ConfigProvider } from "./provider"
import { ConfigReference } from "./reference"
import { ConfigServer } from "./server"
import { ConfigSkills } from "./skills"
import { ConfigVariable } from "./variable"
@@ -114,6 +115,9 @@ export const Info = Schema.Struct({
description: "Command configuration, see https://opencode.ai/docs/commands",
}),
skills: Schema.optional(ConfigSkills.Info).annotate({ description: "Additional skill folder paths" }),
reference: Schema.optional(ConfigReference.Info).annotate({
description: "Named git or local directory references that can be @ mentioned as Scout-backed subagents",
}),
watcher: Schema.optional(
Schema.Struct({
ignore: Schema.optional(Schema.mutable(Schema.Array(Schema.String))),
@@ -173,6 +177,7 @@ export const Info = Schema.Struct({
// subagent
general: Schema.optional(ConfigAgent.Info),
explore: Schema.optional(ConfigAgent.Info),
scout: Schema.optional(ConfigAgent.Info),
// specialized
title: Schema.optional(ConfigAgent.Info),
summary: Schema.optional(ConfigAgent.Info),

View File

@@ -35,6 +35,9 @@ const InputObject = Schema.StructWithRest(
question: Schema.optional(Action),
webfetch: Schema.optional(Action),
websearch: Schema.optional(Action),
codesearch: Schema.optional(Action),
repo_clone: Schema.optional(Rule),
repo_overview: Schema.optional(Rule),
lsp: Schema.optional(Rule),
doom_loop: Schema.optional(Action),
skill: Schema.optional(Rule),

View File

@@ -0,0 +1,27 @@
export * as ConfigReference from "./reference"
import { Schema } from "effect"
import { zod } from "@/util/effect-zod"
import { withStatics } from "@/util/schema"
const Git = Schema.Struct({
repository: Schema.String.annotate({
description: "Git repository URL, host/path reference, or GitHub owner/repo shorthand",
}),
branch: Schema.optional(Schema.String).annotate({
description: "Branch or ref Scout should clone and inspect",
}),
})
const Local = Schema.Struct({
path: Schema.String.annotate({
description: "Absolute path, ~/ path, or workspace-relative path to a local reference directory",
}),
})
export const Entry = Schema.Union([Schema.String, Git, Local]).annotate({ identifier: "ReferenceConfigEntry" })
export const Info = Schema.Record(Schema.String, Entry)
.annotate({ identifier: "ReferenceConfig" })
.pipe(withStatics((s) => ({ zod: zod(s) })))
export type Info = Schema.Schema.Type<typeof Info>

View File

@@ -45,11 +45,30 @@ import { Workspace } from "@/control-plane/workspace"
import { Worktree } from "@/worktree"
import { Pty } from "@/pty"
import { Installation } from "@/installation"
import * as Effect from "effect/Effect"
import { ShareNext } from "@/share/share-next"
import { SessionShare } from "@/share/session"
import { SyncEvent } from "@/sync"
import { Npm } from "@opencode-ai/core/npm"
import { memoMap } from "@opencode-ai/core/effect/memo-map"
import { BackgroundJob } from "@/background/job"
// Adjusts the default Config layer to ensure that plugins are always initialised before
// any other layers read the current config
const ConfigWithPluginPriority = Layer.effect(
Config.Service,
Effect.gen(function* () {
const config = yield* Config.Service
const plugin = yield* Plugin.Service
return {
...config,
get: () => Effect.andThen(plugin.init(), config.get),
getGlobal: () => Effect.andThen(plugin.init(), config.getGlobal),
getConsoleState: () => Effect.andThen(plugin.init(), config.getConsoleState),
}
}),
).pipe(Layer.provide(Layer.merge(Plugin.defaultLayer, Config.defaultLayer)))
export const AppLayer = Layer.mergeAll(
Npm.defaultLayer,
@@ -57,7 +76,7 @@ export const AppLayer = Layer.mergeAll(
Bus.defaultLayer,
Auth.defaultLayer,
Account.defaultLayer,
Config.defaultLayer,
ConfigWithPluginPriority,
Git.defaultLayer,
Ripgrep.defaultLayer,
File.defaultLayer,
@@ -75,6 +94,7 @@ export const AppLayer = Layer.mergeAll(
Todo.defaultLayer,
Session.defaultLayer,
SessionStatus.defaultLayer,
BackgroundJob.defaultLayer,
SessionRunState.defaultLayer,
SessionProcessor.defaultLayer,
SessionCompaction.defaultLayer,

View File

@@ -2,6 +2,7 @@ import z from "zod"
import { randomBytes } from "crypto"
const prefixes = {
job: "job",
event: "evt",
session: "ses",
message: "msg",

View File

@@ -1,4 +1,3 @@
import { Plugin } from "../plugin"
import { Format } from "../format"
import { LSP } from "@/lsp/lsp"
import { File } from "../file"
@@ -7,6 +6,7 @@ import * as Project from "./project"
import * as Vcs from "./vcs"
import { Bus } from "../bus"
import { Command } from "../command"
import { Plugin } from "../plugin"
import { InstanceState } from "@/effect/instance-state"
import { FileWatcher } from "@/file/watcher"
import { ShareNext } from "@/share/share-next"
@@ -17,6 +17,21 @@ export interface Interface {
readonly run: Effect.Effect<void>
}
const configWithPluginPriority = Layer.effect(
Config.Service,
Effect.gen(function* () {
const config = yield* Config.Service
const plugin = yield* Plugin.Service
return {
...config,
get: () => Effect.andThen(plugin.init(), config.get),
getGlobal: () => Effect.andThen(plugin.init(), config.getGlobal),
getConsoleState: () => Effect.andThen(plugin.init(), config.getConsoleState),
}
}),
).pipe(Layer.provide(Layer.merge(Plugin.defaultLayer, Config.defaultLayer)))
export class Service extends Context.Service<Service, Interface>()("@opencode/InstanceBootstrap") {}
export const layer = Layer.effect(
@@ -31,7 +46,6 @@ export const layer = Layer.effect(
const fileWatcher = yield* FileWatcher.Service
const format = yield* Format.Service
const lsp = yield* LSP.Service
const plugin = yield* Plugin.Service
const shareNext = yield* ShareNext.Service
const snapshot = yield* Snapshot.Service
const vcs = yield* Vcs.Service
@@ -41,8 +55,6 @@ export const layer = Layer.effect(
yield* Effect.logInfo("bootstrapping", { directory: ctx.directory })
// everything depends on config so eager load it for nice traces
yield* config.get()
// Plugin can mutate config so it has to be initialized before anything else.
yield* plugin.init()
yield* Effect.all(
[lsp, shareNext, format, file, fileWatcher, vcs, snapshot].map((s) => Effect.forkDetach(s.init())),
).pipe(Effect.withSpan("InstanceBootstrap.init"))
@@ -62,7 +74,7 @@ export const layer = Layer.effect(
export const defaultLayer: Layer.Layer<Service> = layer.pipe(
Layer.provide([
Bus.layer,
Config.defaultLayer,
configWithPluginPriority,
File.defaultLayer,
FileWatcher.defaultLayer,
Format.defaultLayer,

View File

@@ -178,10 +178,7 @@ export const layer: Layer.Layer<Service, never, Project.Service> = Layer.effect(
return yield* cachedDisposeAll
})
const provide = <A, E, R, R2>(
input: LoadInput<R2>,
effect: Effect.Effect<A, E, R>,
): Effect.Effect<A, E, R | R2> =>
const provide = <A, E, R, R2>(input: LoadInput<R2>, effect: Effect.Effect<A, E, R>): Effect.Effect<A, E, R | R2> =>
load(input).pipe(Effect.flatMap((ctx) => effect.pipe(Effect.provideService(InstanceRef, ctx))))
yield* Effect.addFinalizer(() => disposeAll().pipe(Effect.ignore))

View File

@@ -9,13 +9,15 @@ export type { LoadInput } from "./instance-store"
type LegacyLoadInput = {
directory: string
init?: Effect.Effect<void>
init?: () => Promise<unknown>
project?: Project.Info
worktree?: string
}
// Bind ALS around init so legacy code reachable through it (Instance.directory reads, etc.)
// stays bound. The Effect-typed init also gets InstanceRef provided by the store.
// Promise-style legacy inits often read Instance.directory etc. from the ALS context.
// The new Effect-typed init path doesn't bind ALS — it provides InstanceRef. To keep
// legacy inits working without forcing every test to convert, bind ALS around the
// Promise call here using the instance ctx that the store provides via InstanceRef.
const liftLegacyInput = (input: LegacyLoadInput): InstanceStore.LoadInput => {
const { init, ...rest } = input
if (!init) return rest
@@ -23,15 +25,7 @@ const liftLegacyInput = (input: LegacyLoadInput): InstanceStore.LoadInput => {
...rest,
init: Effect.gen(function* () {
const ctx = yield* InstanceRef
if (!ctx) return yield* init
yield* Effect.callback<void>((resume) => {
context.provide(ctx, () => {
Effect.runPromise(init).then(
() => resume(Effect.void),
(err) => resume(Effect.die(err)),
)
})
})
yield* Effect.promise(() => (ctx ? context.provide(ctx, init) : init()))
}),
}
}
@@ -40,10 +34,9 @@ export const Instance = {
load(input: LegacyLoadInput): Promise<InstanceContext> {
return InstanceStore.runtime.runPromise((store) => store.load(liftLegacyInput(input)))
},
async provide<R>(input: { directory: string; init?: Effect.Effect<void>; fn: () => R }): Promise<R> {
return context.provide(
await Instance.load({ directory: input.directory, init: input.init }),
async () => input.fn(),
async provide<R>(input: { directory: string; init?: () => Promise<unknown>; fn: () => R }): Promise<R> {
return context.provide(await Instance.load({ directory: input.directory, init: input.init }), async () =>
input.fn(),
)
},
get current() {

View File

@@ -6,6 +6,7 @@ import z from "zod"
import { BusEvent } from "@/bus/bus-event"
import { SyncEvent } from "@/sync"
import { GlobalBus } from "@/bus/global"
import { Bus } from "@/bus"
import { AppRuntime } from "@/effect/app-runtime"
import { AsyncQueue } from "@/util/queue"
import { Instance } from "../../project/instance"
@@ -28,6 +29,7 @@ async function streamEvents(c: Context, subscribe: (q: AsyncQueue<string | null>
q.push(
JSON.stringify({
payload: {
id: Bus.createID(),
type: "server.connected",
properties: {},
},
@@ -39,6 +41,7 @@ async function streamEvents(c: Context, subscribe: (q: AsyncQueue<string | null>
q.push(
JSON.stringify({
payload: {
id: Bus.createID(),
type: "server.heartbeat",
properties: {},
},

View File

@@ -42,6 +42,7 @@ export const EventRoutes = () =>
q.push(
JSON.stringify({
id: Bus.createID(),
type: "server.connected",
properties: {},
}),
@@ -50,9 +51,10 @@ export const EventRoutes = () =>
// Send heartbeat every 10s to prevent stalled proxy streams.
const heartbeat = setInterval(() => {
q.push(
JSON.stringify({
type: "server.heartbeat",
properties: {},
JSON.stringify({
id: Bus.createID(),
type: "server.heartbeat",
properties: {},
}),
)
}, 10_000)

View File

@@ -19,6 +19,7 @@ import { SessionApi } from "./groups/session"
import { SyncApi } from "./groups/sync"
import { TuiApi } from "./groups/tui"
import { WorkspaceApi } from "./groups/workspace"
import { V2Api } from "./groups/v2"
// SSE event schemas built from the same BusEvent/SyncEvent registries that
// the Hono spec uses, so both specs emit identical Event/SyncEvent components.
@@ -40,6 +41,7 @@ export const InstanceHttpApi = HttpApi.make("opencode-instance")
.addHttpApi(ProviderApi)
.addHttpApi(SessionApi)
.addHttpApi(SyncApi)
.addHttpApi(V2Api)
.addHttpApi(TuiApi)
.addHttpApi(WorkspaceApi)

View File

@@ -41,12 +41,12 @@ function eventResponse(bus: Bus.Interface) {
const events = bus.subscribeAll().pipe(Stream.takeUntil((event) => event.type === Bus.InstanceDisposed.type))
const heartbeat = Stream.tick("10 seconds").pipe(
Stream.drop(1),
Stream.map(() => ({ type: "server.heartbeat", properties: {} })),
Stream.map(() => ({ id: Bus.createID(), type: "server.heartbeat", properties: {} })),
)
log.info("event connected")
return HttpServerResponse.stream(
Stream.make({ type: "server.connected", properties: {} }).pipe(
Stream.make({ id: Bus.createID(), type: "server.connected", properties: {} }).pipe(
Stream.concat(events.pipe(Stream.merge(heartbeat, { haltStrategy: "left" }))),
Stream.map(eventData),
Stream.pipeThroughChannel(Sse.encode()),

View File

@@ -0,0 +1,14 @@
import { HttpApi, OpenApi } from "effect/unstable/httpapi"
import { MessageGroup } from "./v2/message"
import { SessionGroup } from "./v2/session"
export const V2Api = HttpApi.make("v2")
.add(SessionGroup)
.add(MessageGroup)
.annotateMerge(
OpenApi.annotations({
title: "opencode experimental HttpApi",
version: "0.0.1",
description: "Experimental HttpApi surface for selected instance routes.",
}),
)

View File

@@ -0,0 +1,69 @@
import { SessionID } from "@/session/schema"
import { SessionMessage } from "@/v2/session-message"
import { Schema } from "effect"
import { HttpApiEndpoint, HttpApiError, HttpApiGroup, OpenApi } from "effect/unstable/httpapi"
import { Authorization } from "../../middleware/authorization"
export const MessageGroup = HttpApiGroup.make("v2.message")
.add(
HttpApiEndpoint.get("messages", "/api/session/:sessionID/message", {
params: { sessionID: SessionID },
query: Schema.Union([
Schema.Struct({
limit: Schema.optional(
Schema.NumberFromString.check(
Schema.isInt(),
Schema.isGreaterThanOrEqualTo(1),
Schema.isLessThanOrEqualTo(200),
),
).annotate({
description:
"Maximum number of messages to return. When omitted, the endpoint returns its default page size.",
}),
order: Schema.optional(Schema.Union([Schema.Literal("asc"), Schema.Literal("desc")])).annotate({
description: "Message order for the first page. Use desc for newest first or asc for oldest first.",
}),
cursor: Schema.optional(Schema.Never),
}),
Schema.Struct({
limit: Schema.optional(
Schema.NumberFromString.check(
Schema.isInt(),
Schema.isGreaterThanOrEqualTo(1),
Schema.isLessThanOrEqualTo(200),
),
).annotate({
description:
"Maximum number of messages to return. When omitted, the endpoint returns its default page size.",
}),
cursor: Schema.String.annotate({
description:
"Opaque pagination cursor returned as cursor.previous or cursor.next in the previous response. Do not combine with order.",
}),
order: Schema.optional(Schema.Never),
}),
]).annotate({ identifier: "V2SessionMessagesQuery" }),
success: Schema.Struct({
items: Schema.Array(SessionMessage.Message),
cursor: Schema.Struct({
previous: Schema.String.pipe(Schema.optional),
next: Schema.String.pipe(Schema.optional),
}),
}).annotate({ identifier: "V2SessionMessagesResponse" }),
error: HttpApiError.BadRequest,
}).annotateMerge(
OpenApi.annotations({
identifier: "v2.session.messages",
summary: "Get v2 session messages",
description:
"Retrieve projected v2 messages for a session. Items keep the requested order across pages; use cursor.next or cursor.previous to move through the ordered timeline.",
}),
),
)
.annotateMerge(
OpenApi.annotations({
title: "v2 messages",
description: "Experimental v2 message routes.",
}),
)
.middleware(Authorization)

View File

@@ -0,0 +1,128 @@
import { WorkspaceID } from "@/control-plane/schema"
import { SessionID } from "@/session/schema"
import { SessionMessage } from "@/v2/session-message"
import { Prompt } from "@/v2/session-prompt"
import { SessionV2 } from "@/v2/session"
import { Schema, SchemaGetter } from "effect"
import { HttpApiEndpoint, HttpApiError, HttpApiGroup, HttpApiSchema, OpenApi } from "effect/unstable/httpapi"
import { Authorization } from "../../middleware/authorization"
export const SessionGroup = HttpApiGroup.make("v2.session")
.add(
HttpApiEndpoint.get("sessions", "/api/session", {
query: Schema.Union([
Schema.Struct({
limit: Schema.optional(
Schema.NumberFromString.check(
Schema.isInt(),
Schema.isGreaterThanOrEqualTo(1),
Schema.isLessThanOrEqualTo(200),
),
).annotate({
description: "Maximum number of sessions to return. Defaults to the newest 50 sessions.",
}),
order: Schema.optional(Schema.Union([Schema.Literal("asc"), Schema.Literal("desc")])).annotate({
description: "Session order for the first page. Use desc for newest first or asc for oldest first.",
}),
directory: Schema.String.pipe(Schema.optional),
path: Schema.String.pipe(Schema.optional),
workspace: WorkspaceID.pipe(Schema.optional),
roots: Schema.Literals(["true", "false"])
.pipe(
Schema.decodeTo(Schema.Boolean, {
decode: SchemaGetter.transform((value) => value === "true"),
encode: SchemaGetter.transform((value) => (value ? "true" : "false")),
}),
)
.pipe(Schema.optional),
start: Schema.NumberFromString.pipe(Schema.optional),
search: Schema.String.pipe(Schema.optional),
cursor: Schema.optional(Schema.Never),
}),
Schema.Struct({
limit: Schema.optional(
Schema.NumberFromString.check(
Schema.isInt(),
Schema.isGreaterThanOrEqualTo(1),
Schema.isLessThanOrEqualTo(200),
),
).annotate({
description: "Maximum number of sessions to return. Defaults to the newest 50 sessions.",
}),
cursor: Schema.String.annotate({
description:
"Opaque pagination cursor returned as cursor.previous or cursor.next in the previous response. Do not combine with order.",
}),
order: Schema.optional(Schema.Never),
directory: Schema.optional(Schema.Never),
path: Schema.optional(Schema.Never),
workspace: Schema.optional(Schema.Never),
roots: Schema.optional(Schema.Never),
start: Schema.optional(Schema.Never),
search: Schema.optional(Schema.Never),
}),
]).annotate({ identifier: "V2SessionsQuery" }),
success: Schema.Struct({
items: Schema.Array(SessionV2.Info),
cursor: Schema.Struct({
previous: Schema.String.pipe(Schema.optional),
next: Schema.String.pipe(Schema.optional),
}),
}).annotate({ identifier: "V2SessionsResponse" }),
error: HttpApiError.BadRequest,
}).annotateMerge(
OpenApi.annotations({
identifier: "v2.session.list",
summary: "List v2 sessions",
description:
"Retrieve sessions in the requested order. Items keep that order across pages; use cursor.next or cursor.previous to move through the ordered list.",
}),
),
)
.add(
HttpApiEndpoint.post("prompt", "/api/session/:sessionID/prompt", {
params: { sessionID: SessionID },
payload: Schema.Struct({
prompt: Prompt,
delivery: SessionV2.Delivery.pipe(Schema.optional),
}),
success: SessionMessage.Message,
}).annotateMerge(
OpenApi.annotations({
identifier: "v2.session.prompt",
summary: "Send v2 message",
description: "Create a v2 session message and queue it for the agent loop.",
}),
),
)
.add(
HttpApiEndpoint.post("compact", "/api/session/:sessionID/compact", {
params: { sessionID: SessionID },
success: HttpApiSchema.NoContent,
}).annotateMerge(
OpenApi.annotations({
identifier: "v2.session.compact",
summary: "Compact v2 session",
description: "Compact a v2 session conversation.",
}),
),
)
.add(
HttpApiEndpoint.post("wait", "/api/session/:sessionID/wait", {
params: { sessionID: SessionID },
success: HttpApiSchema.NoContent,
}).annotateMerge(
OpenApi.annotations({
identifier: "v2.session.wait",
summary: "Wait for v2 session",
description: "Wait for a v2 session agent loop to become idle.",
}),
),
)
.annotateMerge(
OpenApi.annotations({
title: "v2",
description: "Experimental v2 routes.",
}),
)
.middleware(Authorization)

View File

@@ -1,5 +1,6 @@
import { Config } from "@/config/config"
import { GlobalBus, type GlobalEvent as GlobalBusEvent } from "@/bus/global"
import { Bus } from "@/bus"
import { Installation } from "@/installation"
import { InstanceStore } from "@/project/instance-store"
import { InstallationVersion } from "@opencode-ai/core/installation/version"
@@ -42,11 +43,11 @@ function eventResponse() {
})
const heartbeat = Stream.tick("10 seconds").pipe(
Stream.drop(1),
Stream.map(() => ({ payload: { type: "server.heartbeat", properties: {} } })),
Stream.map(() => ({ payload: { id: Bus.createID(), type: "server.heartbeat", properties: {} } })),
)
return HttpServerResponse.stream(
Stream.make({ payload: { type: "server.connected", properties: {} } }).pipe(
Stream.make({ payload: { id: Bus.createID(), type: "server.connected", properties: {} } }).pipe(
Stream.concat(events.pipe(Stream.merge(heartbeat, { haltStrategy: "left" }))),
Stream.map(eventData),
Stream.pipeThroughChannel(Sse.encode()),

View File

@@ -0,0 +1,6 @@
import { SessionV2 } from "@/v2/session"
import { Layer } from "effect"
import { messageHandlers } from "./v2/message"
import { sessionHandlers } from "./v2/session"
export const v2Handlers = Layer.mergeAll(sessionHandlers, messageHandlers).pipe(Layer.provide(SessionV2.defaultLayer))

View File

@@ -0,0 +1,60 @@
import { SessionMessage } from "@/v2/session-message"
import { SessionV2 } from "@/v2/session"
import { Effect, Schema } from "effect"
import * as DateTime from "effect/DateTime"
import { HttpApiBuilder, HttpApiError } from "effect/unstable/httpapi"
import { InstanceHttpApi } from "../../api"
const DefaultMessagesLimit = 50
const Cursor = Schema.Struct({
id: SessionMessage.ID,
time: Schema.Number,
order: Schema.Union([Schema.Literal("asc"), Schema.Literal("desc")]),
direction: Schema.Union([Schema.Literal("previous"), Schema.Literal("next")]),
})
const decodeCursor = Schema.decodeUnknownSync(Cursor)
const cursor = {
encode(message: SessionMessage.Message, order: "asc" | "desc", direction: "previous" | "next") {
return Buffer.from(
JSON.stringify({ id: message.id, time: DateTime.toEpochMillis(message.time.created), order, direction }),
).toString("base64url")
},
decode(input: string) {
return decodeCursor(JSON.parse(Buffer.from(input, "base64url").toString("utf8")))
},
}
export const messageHandlers = HttpApiBuilder.group(InstanceHttpApi, "v2.message", (handlers) =>
Effect.gen(function* () {
const session = yield* SessionV2.Service
return handlers.handle(
"messages",
Effect.fn(function* (ctx) {
const decoded = yield* Effect.try({
try: () => (ctx.query.cursor ? cursor.decode(ctx.query.cursor) : undefined),
catch: () => new HttpApiError.BadRequest({}),
})
const order = decoded?.order ?? ctx.query.order ?? "desc"
const messages = yield* session.messages({
sessionID: ctx.params.sessionID,
limit: ctx.query.limit ?? DefaultMessagesLimit,
order,
cursor: decoded ? { id: decoded.id, time: decoded.time, direction: decoded.direction } : undefined,
})
const first = messages[0]
const last = messages.at(-1)
return {
items: messages,
cursor: {
previous: first ? cursor.encode(first, order, "previous") : undefined,
next: last ? cursor.encode(last, order, "next") : undefined,
},
}
}),
)
}),
)

View File

@@ -0,0 +1,109 @@
import { WorkspaceID } from "@/control-plane/schema"
import { SessionV2 } from "@/v2/session"
import { Effect, Schema } from "effect"
import { HttpApiBuilder, HttpApiError, HttpApiSchema } from "effect/unstable/httpapi"
import { InstanceHttpApi } from "../../api"
const DefaultSessionsLimit = 50
const SessionCursor = Schema.Struct({
id: SessionV2.Info.fields.id,
time: Schema.Number,
order: Schema.Union([Schema.Literal("asc"), Schema.Literal("desc")]),
direction: Schema.Union([Schema.Literal("previous"), Schema.Literal("next")]),
directory: Schema.String.pipe(Schema.optional),
path: Schema.String.pipe(Schema.optional),
workspaceID: WorkspaceID.pipe(Schema.optional),
roots: Schema.Boolean.pipe(Schema.optional),
start: Schema.Number.pipe(Schema.optional),
search: Schema.String.pipe(Schema.optional),
})
type SessionCursor = typeof SessionCursor.Type
const decodeCursor = Schema.decodeUnknownSync(SessionCursor)
const sessionCursor = {
encode(
session: SessionV2.Info,
order: "asc" | "desc",
direction: "previous" | "next",
filters: Pick<SessionCursor, "directory" | "path" | "workspaceID" | "roots" | "start" | "search">,
) {
return Buffer.from(
JSON.stringify({ id: session.id, time: session.time.created, order, direction, ...filters }),
).toString("base64url")
},
decode(input: string) {
return decodeCursor(JSON.parse(Buffer.from(input, "base64url").toString("utf8")))
},
}
export const sessionHandlers = HttpApiBuilder.group(InstanceHttpApi, "v2.session", (handlers) =>
Effect.gen(function* () {
const session = yield* SessionV2.Service
return handlers
.handle(
"sessions",
Effect.fn(function* (ctx) {
const decoded = yield* Effect.try({
try: () => (ctx.query.cursor ? sessionCursor.decode(ctx.query.cursor) : undefined),
catch: () => new HttpApiError.BadRequest({}),
})
const order = decoded?.order ?? ctx.query.order ?? "desc"
const filters = decoded ?? {
directory: ctx.query.directory,
path: ctx.query.path,
workspaceID: ctx.query.workspace ? WorkspaceID.make(ctx.query.workspace) : undefined,
roots: ctx.query.roots,
start: ctx.query.start,
search: ctx.query.search,
}
const sessions = yield* session.list({
limit: ctx.query.limit ?? DefaultSessionsLimit,
order,
directory: filters.directory,
path: filters.path,
workspaceID: filters.workspaceID,
roots: filters.roots,
start: filters.start,
search: filters.search,
cursor: decoded ? { id: decoded.id, time: decoded.time, direction: decoded.direction } : undefined,
})
const first = sessions[0]
const last = sessions.at(-1)
return {
items: sessions,
cursor: {
previous: first ? sessionCursor.encode(first, order, "previous", filters) : undefined,
next: last ? sessionCursor.encode(last, order, "next", filters) : undefined,
},
}
}),
)
.handle(
"prompt",
Effect.fn(function* (ctx) {
return yield* session.prompt({
sessionID: ctx.params.sessionID,
prompt: ctx.payload.prompt,
delivery: ctx.payload.delivery ?? SessionV2.DefaultDelivery,
})
}),
)
.handle(
"compact",
Effect.fn(function* (ctx) {
yield* session.compact(ctx.params.sessionID)
return HttpApiSchema.NoContent.make()
}),
)
.handle(
"wait",
Effect.fn(function* (ctx) {
yield* session.wait(ctx.params.sessionID)
return HttpApiSchema.NoContent.make()
}),
)
}),
)

View File

@@ -64,6 +64,7 @@ import { questionHandlers } from "./handlers/question"
import { sessionHandlers } from "./handlers/session"
import { syncHandlers } from "./handlers/sync"
import { tuiHandlers } from "./handlers/tui"
import { v2Handlers } from "./handlers/v2"
import { workspaceHandlers } from "./handlers/workspace"
import { instanceContextLayer, instanceRouterMiddleware } from "./middleware/instance-context"
import { workspaceRouterMiddleware, workspaceRoutingLayer } from "./middleware/workspace-routing"
@@ -115,6 +116,7 @@ const instanceApiRoutes = HttpApiBuilder.layer(InstanceHttpApi).pipe(
providerHandlers,
sessionHandlers,
syncHandlers,
v2Handlers,
tuiHandlers,
workspaceHandlers,
]),

View File

@@ -1,7 +1,8 @@
import { describeRoute, resolver, validator } from "hono-openapi"
import { Hono } from "hono"
import type { UpgradeWebSocket } from "hono/ws"
import { Effect } from "effect"
import { Context, Effect } from "effect"
import { Flag } from "@opencode-ai/core/flag/flag"
import z from "zod"
import { Format } from "@/format"
import { TuiRoutes } from "./tui"
@@ -24,12 +25,136 @@ import { ExperimentalRoutes } from "./experimental"
import { ProviderRoutes } from "./provider"
import { EventRoutes } from "./event"
import { SyncRoutes } from "./sync"
import { V2Routes } from "./v2"
import { InstanceMiddleware } from "./middleware"
import { jsonRequest } from "./trace"
import { ExperimentalHttpApiServer } from "./httpapi/server"
import { EventPaths } from "./httpapi/event"
import { ExperimentalPaths } from "./httpapi/groups/experimental"
import { FilePaths } from "./httpapi/groups/file"
import { InstancePaths } from "./httpapi/groups/instance"
import { McpPaths } from "./httpapi/groups/mcp"
import { PtyPaths } from "./httpapi/groups/pty"
import { SessionPaths } from "./httpapi/groups/session"
import { SyncPaths } from "./httpapi/groups/sync"
import { TuiPaths } from "./httpapi/groups/tui"
import { WorkspacePaths } from "./httpapi/groups/workspace"
export const InstanceRoutes = (upgrade: UpgradeWebSocket): Hono => {
const app = new Hono()
if (Flag.OPENCODE_EXPERIMENTAL_HTTPAPI) {
const handler = ExperimentalHttpApiServer.webHandler().handler
const context = Context.empty() as Context.Context<unknown>
app.all("/api/*", (c) => handler(c.req.raw, context))
app.get(EventPaths.event, (c) => handler(c.req.raw, context))
app.get("/question", (c) => handler(c.req.raw, context))
app.post("/question/:requestID/reply", (c) => handler(c.req.raw, context))
app.post("/question/:requestID/reject", (c) => handler(c.req.raw, context))
app.get("/permission", (c) => handler(c.req.raw, context))
app.post("/permission/:requestID/reply", (c) => handler(c.req.raw, context))
app.get("/config", (c) => handler(c.req.raw, context))
app.patch("/config", (c) => handler(c.req.raw, context))
app.get("/config/providers", (c) => handler(c.req.raw, context))
app.get(ExperimentalPaths.console, (c) => handler(c.req.raw, context))
app.get(ExperimentalPaths.consoleOrgs, (c) => handler(c.req.raw, context))
app.post(ExperimentalPaths.consoleSwitch, (c) => handler(c.req.raw, context))
app.get(ExperimentalPaths.tool, (c) => handler(c.req.raw, context))
app.get(ExperimentalPaths.toolIDs, (c) => handler(c.req.raw, context))
app.get(ExperimentalPaths.worktree, (c) => handler(c.req.raw, context))
app.post(ExperimentalPaths.worktree, (c) => handler(c.req.raw, context))
app.delete(ExperimentalPaths.worktree, (c) => handler(c.req.raw, context))
app.post(ExperimentalPaths.worktreeReset, (c) => handler(c.req.raw, context))
app.get(ExperimentalPaths.session, (c) => handler(c.req.raw, context))
app.get(ExperimentalPaths.resource, (c) => handler(c.req.raw, context))
app.get("/provider", (c) => handler(c.req.raw, context))
app.get("/provider/auth", (c) => handler(c.req.raw, context))
app.post("/provider/:providerID/oauth/authorize", (c) => handler(c.req.raw, context))
app.post("/provider/:providerID/oauth/callback", (c) => handler(c.req.raw, context))
app.get("/project", (c) => handler(c.req.raw, context))
app.get("/project/current", (c) => handler(c.req.raw, context))
app.post("/project/git/init", (c) => handler(c.req.raw, context))
app.patch("/project/:projectID", (c) => handler(c.req.raw, context))
app.get(FilePaths.findText, (c) => handler(c.req.raw, context))
app.get(FilePaths.findFile, (c) => handler(c.req.raw, context))
app.get(FilePaths.findSymbol, (c) => handler(c.req.raw, context))
app.get(FilePaths.list, (c) => handler(c.req.raw, context))
app.get(FilePaths.content, (c) => handler(c.req.raw, context))
app.get(FilePaths.status, (c) => handler(c.req.raw, context))
app.get(InstancePaths.path, (c) => handler(c.req.raw, context))
app.post(InstancePaths.dispose, (c) => handler(c.req.raw, context))
app.get(InstancePaths.vcs, (c) => handler(c.req.raw, context))
app.get(InstancePaths.vcsDiff, (c) => handler(c.req.raw, context))
app.get(InstancePaths.command, (c) => handler(c.req.raw, context))
app.get(InstancePaths.agent, (c) => handler(c.req.raw, context))
app.get(InstancePaths.skill, (c) => handler(c.req.raw, context))
app.get(InstancePaths.lsp, (c) => handler(c.req.raw, context))
app.get(InstancePaths.formatter, (c) => handler(c.req.raw, context))
app.get(McpPaths.status, (c) => handler(c.req.raw, context))
app.post(McpPaths.status, (c) => handler(c.req.raw, context))
app.post(McpPaths.auth, (c) => handler(c.req.raw, context))
app.post(McpPaths.authCallback, (c) => handler(c.req.raw, context))
app.post(McpPaths.authAuthenticate, (c) => handler(c.req.raw, context))
app.delete(McpPaths.auth, (c) => handler(c.req.raw, context))
app.post(McpPaths.connect, (c) => handler(c.req.raw, context))
app.post(McpPaths.disconnect, (c) => handler(c.req.raw, context))
app.post(SyncPaths.start, (c) => handler(c.req.raw, context))
app.post(SyncPaths.replay, (c) => handler(c.req.raw, context))
app.post(SyncPaths.history, (c) => handler(c.req.raw, context))
app.get(PtyPaths.list, (c) => handler(c.req.raw, context))
app.post(PtyPaths.create, (c) => handler(c.req.raw, context))
app.get(PtyPaths.get, (c) => handler(c.req.raw, context))
app.put(PtyPaths.update, (c) => handler(c.req.raw, context))
app.delete(PtyPaths.remove, (c) => handler(c.req.raw, context))
app.get(PtyPaths.connect, (c) => handler(c.req.raw, context))
app.get(SessionPaths.list, (c) => handler(c.req.raw, context))
app.get(SessionPaths.status, (c) => handler(c.req.raw, context))
app.get(SessionPaths.get, (c) => handler(c.req.raw, context))
app.get(SessionPaths.children, (c) => handler(c.req.raw, context))
app.get(SessionPaths.todo, (c) => handler(c.req.raw, context))
app.get(SessionPaths.diff, (c) => handler(c.req.raw, context))
app.get(SessionPaths.messages, (c) => handler(c.req.raw, context))
app.get(SessionPaths.message, (c) => handler(c.req.raw, context))
app.post(SessionPaths.create, (c) => handler(c.req.raw, context))
app.delete(SessionPaths.remove, (c) => handler(c.req.raw, context))
app.patch(SessionPaths.update, (c) => handler(c.req.raw, context))
app.post(SessionPaths.init, (c) => handler(c.req.raw, context))
app.post(SessionPaths.fork, (c) => handler(c.req.raw, context))
app.post(SessionPaths.abort, (c) => handler(c.req.raw, context))
app.post(SessionPaths.share, (c) => handler(c.req.raw, context))
app.delete(SessionPaths.share, (c) => handler(c.req.raw, context))
app.post(SessionPaths.summarize, (c) => handler(c.req.raw, context))
app.post(SessionPaths.prompt, (c) => handler(c.req.raw, context))
app.post(SessionPaths.promptAsync, (c) => handler(c.req.raw, context))
app.post(SessionPaths.command, (c) => handler(c.req.raw, context))
app.post(SessionPaths.shell, (c) => handler(c.req.raw, context))
app.post(SessionPaths.revert, (c) => handler(c.req.raw, context))
app.post(SessionPaths.unrevert, (c) => handler(c.req.raw, context))
app.post(SessionPaths.permissions, (c) => handler(c.req.raw, context))
app.delete(SessionPaths.deleteMessage, (c) => handler(c.req.raw, context))
app.delete(SessionPaths.deletePart, (c) => handler(c.req.raw, context))
app.patch(SessionPaths.updatePart, (c) => handler(c.req.raw, context))
app.post(TuiPaths.appendPrompt, (c) => handler(c.req.raw, context))
app.post(TuiPaths.openHelp, (c) => handler(c.req.raw, context))
app.post(TuiPaths.openSessions, (c) => handler(c.req.raw, context))
app.post(TuiPaths.openThemes, (c) => handler(c.req.raw, context))
app.post(TuiPaths.openModels, (c) => handler(c.req.raw, context))
app.post(TuiPaths.submitPrompt, (c) => handler(c.req.raw, context))
app.post(TuiPaths.clearPrompt, (c) => handler(c.req.raw, context))
app.post(TuiPaths.executeCommand, (c) => handler(c.req.raw, context))
app.post(TuiPaths.showToast, (c) => handler(c.req.raw, context))
app.post(TuiPaths.publish, (c) => handler(c.req.raw, context))
app.post(TuiPaths.selectSession, (c) => handler(c.req.raw, context))
app.get(TuiPaths.controlNext, (c) => handler(c.req.raw, context))
app.post(TuiPaths.controlResponse, (c) => handler(c.req.raw, context))
app.get(WorkspacePaths.adapters, (c) => handler(c.req.raw, context))
app.post(WorkspacePaths.list, (c) => handler(c.req.raw, context))
app.get(WorkspacePaths.list, (c) => handler(c.req.raw, context))
app.get(WorkspacePaths.status, (c) => handler(c.req.raw, context))
app.delete(WorkspacePaths.remove, (c) => handler(c.req.raw, context))
app.post(WorkspacePaths.sessionRestore, (c) => handler(c.req.raw, context))
}
return app
.route("/project", ProjectRoutes())
.route("/pty", PtyRoutes(upgrade))
@@ -40,6 +165,7 @@ export const InstanceRoutes = (upgrade: UpgradeWebSocket): Hono => {
.route("/question", QuestionRoutes())
.route("/provider", ProviderRoutes())
.route("/sync", SyncRoutes())
.route("/api", V2Routes())
.route("/", FileRoutes())
.route("/", EventRoutes())
.route("/mcp", McpRoutes())

View File

@@ -0,0 +1,229 @@
import { WorkspaceID } from "@/control-plane/schema"
import { SessionID } from "@/session/schema"
import { SessionMessage } from "@/v2/session-message"
import { SessionV2 } from "@/v2/session"
import { zod } from "@/util/effect-zod"
import { lazy } from "@/util/lazy"
import { Effect, Schema } from "effect"
import * as DateTime from "effect/DateTime"
import { Hono } from "hono"
import { describeRoute, resolver, validator } from "hono-openapi"
import { HTTPException } from "hono/http-exception"
import z from "zod"
import { errors } from "../../error"
import { jsonRequest } from "./trace"
const DefaultMessagesLimit = 50
const DefaultSessionsLimit = 50
const SessionCursor = Schema.Struct({
id: SessionID,
time: Schema.Number,
order: Schema.Union([Schema.Literal("asc"), Schema.Literal("desc")]),
direction: Schema.Union([Schema.Literal("previous"), Schema.Literal("next")]),
directory: Schema.String.pipe(Schema.optional),
path: Schema.String.pipe(Schema.optional),
workspaceID: WorkspaceID.pipe(Schema.optional),
roots: Schema.Boolean.pipe(Schema.optional),
start: Schema.Number.pipe(Schema.optional),
search: Schema.String.pipe(Schema.optional),
})
type SessionCursor = typeof SessionCursor.Type
const SessionsResponse = Schema.Struct({
items: Schema.Array(SessionV2.Info),
cursor: Schema.Struct({
previous: Schema.String.pipe(Schema.optional),
next: Schema.String.pipe(Schema.optional),
}),
}).annotate({ identifier: "V2SessionsResponse" })
const Cursor = Schema.Struct({
id: SessionMessage.ID,
time: Schema.Number,
order: Schema.Union([Schema.Literal("asc"), Schema.Literal("desc")]),
direction: Schema.Union([Schema.Literal("previous"), Schema.Literal("next")]),
})
const MessagesResponse = Schema.Struct({
items: Schema.Array(SessionMessage.Message),
cursor: Schema.Struct({
previous: Schema.String.pipe(Schema.optional),
next: Schema.String.pipe(Schema.optional),
}),
}).annotate({ identifier: "V2SessionMessagesResponse" })
const decodeCursor = Schema.decodeUnknownSync(Cursor)
const decodeSessionCursor = Schema.decodeUnknownSync(SessionCursor)
const sessionCursor = {
encode(
session: SessionV2.Info,
order: "asc" | "desc",
direction: "previous" | "next",
filters: Pick<SessionCursor, "directory" | "path" | "workspaceID" | "roots" | "start" | "search">,
) {
return Buffer.from(
JSON.stringify({ id: session.id, time: session.time.created, order, direction, ...filters }),
).toString("base64url")
},
decode(input: string) {
return decodeSessionCursor(JSON.parse(Buffer.from(input, "base64url").toString("utf8")))
},
}
const cursor = {
encode(message: SessionMessage.Message, order: "asc" | "desc", direction: "previous" | "next") {
return Buffer.from(
JSON.stringify({ id: message.id, time: DateTime.toEpochMillis(message.time.created), order, direction }),
).toString("base64url")
},
decode(input: string) {
return decodeCursor(JSON.parse(Buffer.from(input, "base64url").toString("utf8")))
},
}
export const V2Routes = lazy(() =>
new Hono()
.get(
"/session",
describeRoute({
summary: "List v2 sessions",
description:
"Retrieve sessions in the requested order. Items keep that order across pages; use cursor.next or cursor.previous to move through the ordered list.",
operationId: "v2.session.list",
responses: {
200: {
description: "List of v2 sessions",
content: {
"application/json": {
schema: resolver(zod(SessionsResponse)),
},
},
},
...errors(400),
},
}),
validator(
"query",
z.object({
limit: z.coerce.number().int().min(1).max(200).optional(),
cursor: z.string().optional(),
order: z.enum(["asc", "desc"]).optional(),
directory: z.string().optional(),
path: z.string().optional(),
workspace: WorkspaceID.zod.optional(),
roots: z
.enum(["true", "false"])
.transform((value) => value === "true")
.optional(),
start: z.coerce.number().optional(),
search: z.string().optional(),
}),
),
async (c) => {
const query = c.req.valid("query")
const decoded = (() => {
try {
return query.cursor ? sessionCursor.decode(query.cursor) : undefined
} catch {
throw new HTTPException(400)
}
})()
const order = decoded?.order ?? query.order ?? "desc"
const filters = decoded ?? {
directory: query.directory,
path: query.path,
workspaceID: query.workspace,
roots: query.roots,
start: query.start,
search: query.search,
}
return jsonRequest("V2Routes.sessions", c, function* () {
return yield* Effect.gen(function* () {
const session = yield* SessionV2.Service
const sessions = yield* session.list({
limit: query.limit ?? DefaultSessionsLimit,
order,
directory: filters.directory,
path: filters.path,
workspaceID: filters.workspaceID,
roots: filters.roots,
start: filters.start,
search: filters.search,
cursor: decoded ? { id: decoded.id, time: decoded.time, direction: decoded.direction } : undefined,
})
const first = sessions[0]
const last = sessions.at(-1)
return {
items: sessions,
cursor: {
previous: first ? sessionCursor.encode(first, order, "previous", filters) : undefined,
next: last ? sessionCursor.encode(last, order, "next", filters) : undefined,
},
}
}).pipe(Effect.provide(SessionV2.defaultLayer))
})
},
)
.get(
"/session/:sessionID/message",
describeRoute({
summary: "Get v2 session messages",
description: "Retrieve projected v2 messages for a session directly from the message database.",
operationId: "v2.session.messages",
responses: {
200: {
description: "List of v2 session messages",
content: {
"application/json": {
schema: resolver(zod(MessagesResponse)),
},
},
},
...errors(400, 404),
},
}),
validator("param", z.object({ sessionID: SessionID.zod })),
validator(
"query",
z.object({
limit: z.coerce.number().int().min(1).max(200).optional(),
cursor: z.string().optional(),
order: z.enum(["asc", "desc"]).optional(),
}),
),
async (c) => {
const sessionID = c.req.valid("param").sessionID
const query = c.req.valid("query")
const decoded = (() => {
try {
return query.cursor ? cursor.decode(query.cursor) : undefined
} catch {
throw new HTTPException(400)
}
})()
const order = decoded?.order ?? query.order ?? "desc"
return jsonRequest("V2Routes.messages", c, function* () {
return yield* Effect.gen(function* () {
const session = yield* SessionV2.Service
const messages = yield* session.messages({
sessionID,
limit: query.limit ?? DefaultMessagesLimit,
order,
cursor: decoded ? { id: decoded.id, time: decoded.time, direction: decoded.direction } : undefined,
})
const first = messages[0]
const last = messages.at(-1)
return {
items: messages,
cursor: {
previous: first ? cursor.encode(first, order, "previous") : undefined,
next: last ? cursor.encode(last, order, "next") : undefined,
},
}
}).pipe(Effect.provide(SessionV2.defaultLayer))
})
},
),
)

View File

@@ -14,10 +14,13 @@ import { Config } from "@/config/config"
import { NotFoundError } from "@/storage/storage"
import { ModelID, ProviderID } from "@/provider/schema"
import { Effect, Layer, Context, Schema } from "effect"
import * as DateTime from "effect/DateTime"
import { InstanceState } from "@/effect/instance-state"
import { isOverflow as overflow, usable } from "./overflow"
import { makeRuntime } from "@/effect/run-service"
import { fn } from "@/util/fn"
import { SyncEvent } from "@/sync"
import { SessionEvent } from "@/v2/session-event"
const log = Log.create({ service: "session.compaction" })
@@ -556,7 +559,21 @@ export const layer: Layer.Layer<
}
if (processor.message.error) return "stop"
if (result === "continue") yield* bus.publish(Event.Compacted, { sessionID: input.sessionID })
if (result === "continue") {
const summary = summaryText(
(yield* session.messages({ sessionID: input.sessionID })).find((item) => item.info.id === msg.id) ?? {
info: msg,
parts: [],
},
)
SyncEvent.run(SessionEvent.Compaction.Ended.Sync, {
sessionID: input.sessionID,
timestamp: DateTime.makeUnsafe(Date.now()),
text: summary ?? "",
include: selected.tail_start_id,
})
yield* bus.publish(Event.Compacted, { sessionID: input.sessionID })
}
return result
})
@@ -583,6 +600,11 @@ export const layer: Layer.Layer<
auto: input.auto,
overflow: input.overflow,
})
SyncEvent.run(SessionEvent.Compaction.Started.Sync, {
sessionID: input.sessionID,
timestamp: DateTime.makeUnsafe(Date.now()),
reason: input.auto ? "auto" : "manual",
})
})
return Service.of({

View File

@@ -20,6 +20,9 @@ import { Question } from "@/question"
import { errorMessage } from "@/util/error"
import * as Log from "@opencode-ai/core/util/log"
import { isRecord } from "@/util/record"
import { SyncEvent } from "@/sync"
import { SessionEvent } from "@/v2/session-event"
import * as DateTime from "effect/DateTime"
const DOOM_LOOP_THRESHOLD = 3
const log = Log.create({ service: "session.processor" })
@@ -221,6 +224,12 @@ export const layer: Layer.Layer<
case "reasoning-start":
if (value.id in ctx.reasoningMap) return
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
SyncEvent.run(SessionEvent.Reasoning.Started.Sync, {
sessionID: ctx.sessionID,
reasoningID: value.id,
timestamp: DateTime.makeUnsafe(Date.now()),
})
ctx.reasoningMap[value.id] = {
id: PartID.ascending(),
messageID: ctx.assistantMessage.id,
@@ -235,6 +244,13 @@ export const layer: Layer.Layer<
case "reasoning-delta":
if (!(value.id in ctx.reasoningMap)) return
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
SyncEvent.run(SessionEvent.Reasoning.Delta.Sync, {
sessionID: ctx.sessionID,
reasoningID: value.id,
delta: value.text,
timestamp: DateTime.makeUnsafe(Date.now()),
})
ctx.reasoningMap[value.id].text += value.text
if (value.providerMetadata) ctx.reasoningMap[value.id].metadata = value.providerMetadata
yield* session.updatePartDelta({
@@ -248,6 +264,13 @@ export const layer: Layer.Layer<
case "reasoning-end":
if (!(value.id in ctx.reasoningMap)) return
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
SyncEvent.run(SessionEvent.Reasoning.Ended.Sync, {
sessionID: ctx.sessionID,
reasoningID: value.id,
text: ctx.reasoningMap[value.id].text,
timestamp: DateTime.makeUnsafe(Date.now()),
})
// oxlint-disable-next-line no-self-assign -- reactivity trigger
ctx.reasoningMap[value.id].text = ctx.reasoningMap[value.id].text
ctx.reasoningMap[value.id].time = { ...ctx.reasoningMap[value.id].time, end: Date.now() }
@@ -260,6 +283,13 @@ export const layer: Layer.Layer<
if (ctx.assistantMessage.summary) {
throw new Error(`Tool call not allowed while generating summary: ${value.toolName}`)
}
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
SyncEvent.run(SessionEvent.Tool.Input.Started.Sync, {
sessionID: ctx.sessionID,
callID: value.id,
name: value.toolName,
timestamp: DateTime.makeUnsafe(Date.now()),
})
const part = yield* session.updatePart({
id: ctx.toolcalls[value.id]?.partID ?? PartID.ascending(),
messageID: ctx.assistantMessage.id,
@@ -281,13 +311,34 @@ export const layer: Layer.Layer<
case "tool-input-delta":
return
case "tool-input-end":
case "tool-input-end": {
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
SyncEvent.run(SessionEvent.Tool.Input.Ended.Sync, {
sessionID: ctx.sessionID,
callID: value.id,
text: "",
timestamp: DateTime.makeUnsafe(Date.now()),
})
return
}
case "tool-call": {
if (ctx.assistantMessage.summary) {
throw new Error(`Tool call not allowed while generating summary: ${value.toolName}`)
}
const toolCall = yield* readToolCall(value.toolCallId)
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
SyncEvent.run(SessionEvent.Tool.Called.Sync, {
sessionID: ctx.sessionID,
callID: value.toolCallId,
tool: value.toolName,
input: value.input,
provider: {
executed: toolCall?.part.metadata?.providerExecuted === true,
...(value.providerMetadata ? { metadata: value.providerMetadata } : {}),
},
timestamp: DateTime.makeUnsafe(Date.now()),
})
yield* updateToolCall(value.toolCallId, (match) => ({
...match,
tool: value.toolName,
@@ -331,11 +382,48 @@ export const layer: Layer.Layer<
}
case "tool-result": {
const toolCall = yield* readToolCall(value.toolCallId)
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
SyncEvent.run(SessionEvent.Tool.Success.Sync, {
sessionID: ctx.sessionID,
callID: value.toolCallId,
structured: value.output.metadata,
content: [
{
type: "text",
text: value.output.output,
},
...(value.output.attachments?.map((item: MessageV2.FilePart) => ({
type: "file",
uri: item.url,
mime: item.mime,
name: item.filename,
})) ?? []),
],
provider: {
executed: toolCall?.part.metadata?.providerExecuted === true,
},
timestamp: DateTime.makeUnsafe(Date.now()),
})
yield* completeToolCall(value.toolCallId, value.output)
return
}
case "tool-error": {
const toolCall = yield* readToolCall(value.toolCallId)
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
SyncEvent.run(SessionEvent.Tool.Error.Sync, {
sessionID: ctx.sessionID,
callID: value.toolCallId,
error: {
type: "unknown",
message: errorMessage(value.error),
},
provider: {
executed: toolCall?.part.metadata?.providerExecuted === true,
},
timestamp: DateTime.makeUnsafe(Date.now()),
})
yield* failToolCall(value.toolCallId, value.error)
return
}
@@ -345,6 +433,20 @@ export const layer: Layer.Layer<
case "start-step":
if (!ctx.snapshot) ctx.snapshot = yield* snapshot.track()
if (!ctx.assistantMessage.summary) {
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
SyncEvent.run(SessionEvent.Step.Started.Sync, {
sessionID: ctx.sessionID,
agent: input.assistantMessage.agent,
model: {
id: ctx.model.id,
providerID: ctx.model.providerID,
variant: input.assistantMessage.variant,
},
snapshot: ctx.snapshot,
timestamp: DateTime.makeUnsafe(Date.now()),
})
}
yield* session.updatePart({
id: PartID.ascending(),
messageID: ctx.assistantMessage.id,
@@ -355,18 +457,30 @@ export const layer: Layer.Layer<
return
case "finish-step": {
const completedSnapshot = yield* snapshot.track()
const usage = Session.getUsage({
model: ctx.model,
usage: value.usage,
metadata: value.providerMetadata,
})
if (!ctx.assistantMessage.summary) {
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
SyncEvent.run(SessionEvent.Step.Ended.Sync, {
sessionID: ctx.sessionID,
finish: value.finishReason,
cost: usage.cost,
tokens: usage.tokens,
snapshot: completedSnapshot,
timestamp: DateTime.makeUnsafe(Date.now()),
})
}
ctx.assistantMessage.finish = value.finishReason
ctx.assistantMessage.cost += usage.cost
ctx.assistantMessage.tokens = usage.tokens
yield* session.updatePart({
id: PartID.ascending(),
reason: value.finishReason,
snapshot: yield* snapshot.track(),
snapshot: completedSnapshot,
messageID: ctx.assistantMessage.id,
sessionID: ctx.assistantMessage.sessionID,
type: "step-finish",
@@ -404,6 +518,13 @@ export const layer: Layer.Layer<
}
case "text-start":
if (!ctx.assistantMessage.summary) {
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
SyncEvent.run(SessionEvent.Text.Started.Sync, {
sessionID: ctx.sessionID,
timestamp: DateTime.makeUnsafe(Date.now()),
})
}
ctx.currentText = {
id: PartID.ascending(),
messageID: ctx.assistantMessage.id,
@@ -418,6 +539,13 @@ export const layer: Layer.Layer<
case "text-delta":
if (!ctx.currentText) return
if (ctx.assistantMessage.summary) {
SyncEvent.run(SessionEvent.Compaction.Delta.Sync, {
sessionID: ctx.sessionID,
text: value.text,
timestamp: DateTime.makeUnsafe(Date.now()),
})
}
ctx.currentText.text += value.text
if (value.providerMetadata) ctx.currentText.metadata = value.providerMetadata
yield* session.updatePartDelta({
@@ -442,6 +570,14 @@ export const layer: Layer.Layer<
},
{ text: ctx.currentText.text },
)).text
if (!ctx.assistantMessage.summary) {
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
SyncEvent.run(SessionEvent.Text.Ended.Sync, {
sessionID: ctx.sessionID,
text: ctx.currentText.text,
timestamp: DateTime.makeUnsafe(Date.now()),
})
}
{
const end = Date.now()
ctx.currentText.time = { start: ctx.currentText.time?.start ?? end, end }
@@ -568,13 +704,24 @@ export const layer: Layer.Layer<
Effect.retry(
SessionRetry.policy({
parse,
set: (info) =>
status.set(ctx.sessionID, {
set: (info) => {
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
SyncEvent.run(SessionEvent.Retried.Sync, {
sessionID: ctx.sessionID,
attempt: info.attempt,
error: {
message: info.message,
isRetryable: true,
},
timestamp: DateTime.makeUnsafe(Date.now()),
})
return status.set(ctx.sessionID, {
type: "retry",
attempt: info.attempt,
message: info.message,
next: info.next,
}),
})
},
}),
),
Effect.catch(halt),

View File

@@ -0,0 +1,177 @@
import { and, desc, eq } from "@/storage/db"
import type { Database } from "@/storage/db"
import { SessionMessage } from "@/v2/session-message"
import { SessionMessageUpdater } from "@/v2/session-message-updater"
import { SessionEvent } from "@/v2/session-event"
import * as DateTime from "effect/DateTime"
import { SyncEvent } from "@/sync"
import { SessionMessageTable, SessionTable } from "./session.sql"
import type { SessionID } from "./schema"
import { Schema } from "effect"
const decodeMessage = Schema.decodeUnknownSync(SessionMessage.Message)
type SessionMessageData = NonNullable<(typeof SessionMessageTable.$inferInsert)["data"]>
function encodeDateTimes(value: unknown): unknown {
if (DateTime.isDateTime(value)) return DateTime.toEpochMillis(value)
if (Array.isArray(value)) return value.map(encodeDateTimes)
if (typeof value === "object" && value !== null) {
return Object.fromEntries(Object.entries(value).map(([key, item]) => [key, encodeDateTimes(item)]))
}
return value
}
function encodeMessageData(value: unknown): SessionMessageData {
return encodeDateTimes(value) as SessionMessageData
}
function sqlite(db: Database.TxOrDb, sessionID: SessionID): SessionMessageUpdater.Adapter<void> {
return {
getCurrentAssistant() {
return db
.select()
.from(SessionMessageTable)
.where(and(eq(SessionMessageTable.session_id, sessionID), eq(SessionMessageTable.type, "assistant")))
.orderBy(desc(SessionMessageTable.id))
.all()
.map((row) => decodeMessage({ ...row.data, id: row.id, type: row.type }))
.find((message): message is SessionMessage.Assistant => message.type === "assistant" && !message.time.completed)
},
getCurrentCompaction() {
return db
.select()
.from(SessionMessageTable)
.where(and(eq(SessionMessageTable.session_id, sessionID), eq(SessionMessageTable.type, "compaction")))
.orderBy(desc(SessionMessageTable.id))
.all()
.map((row) => decodeMessage({ ...row.data, id: row.id, type: row.type }))
.find((message): message is SessionMessage.Compaction => message.type === "compaction")
},
updateAssistant(assistant) {
const { id, type, ...data } = assistant
db.update(SessionMessageTable)
.set({ data: encodeMessageData(data) })
.where(
and(
eq(SessionMessageTable.id, id),
eq(SessionMessageTable.session_id, sessionID),
eq(SessionMessageTable.type, type),
),
)
.run()
},
updateCompaction(compaction) {
const { id, type, ...data } = compaction
db.update(SessionMessageTable)
.set({ data: encodeMessageData(data) })
.where(
and(
eq(SessionMessageTable.id, id),
eq(SessionMessageTable.session_id, sessionID),
eq(SessionMessageTable.type, type),
),
)
.run()
},
appendMessage(message) {
const { id, type, ...data } = message
db.insert(SessionMessageTable)
.values([
{
id,
session_id: sessionID,
type,
time_created: DateTime.toEpochMillis(message.time.created),
data: encodeMessageData(data),
},
])
.run()
},
finish() {},
}
}
function update(db: Database.TxOrDb, event: SessionEvent.Event) {
SessionMessageUpdater.update(sqlite(db, event.data.sessionID), event)
}
export default [
SyncEvent.project(SessionEvent.AgentSwitched.Sync, (db, data, event) => {
db.update(SessionTable)
.set({
agent: data.agent,
time_updated: DateTime.toEpochMillis(data.timestamp),
})
.where(eq(SessionTable.id, data.sessionID))
.run()
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.agent.switched", data })
}),
SyncEvent.project(SessionEvent.ModelSwitched.Sync, (db, data, event) => {
db.update(SessionTable)
.set({
model: {
id: data.id,
providerID: data.providerID,
variant: data.variant,
},
time_updated: DateTime.toEpochMillis(data.timestamp),
})
.where(eq(SessionTable.id, data.sessionID))
.run()
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.model.switched", data })
}),
SyncEvent.project(SessionEvent.Prompted.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.prompted", data })
}),
SyncEvent.project(SessionEvent.Synthetic.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.synthetic", data })
}),
SyncEvent.project(SessionEvent.Step.Started.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.step.started", data })
}),
SyncEvent.project(SessionEvent.Step.Ended.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.step.ended", data })
}),
SyncEvent.project(SessionEvent.Text.Started.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.text.started", data })
}),
SyncEvent.project(SessionEvent.Text.Delta.Sync, () => {}),
SyncEvent.project(SessionEvent.Text.Ended.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.text.ended", data })
}),
SyncEvent.project(SessionEvent.Tool.Input.Started.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.tool.input.started", data })
}),
SyncEvent.project(SessionEvent.Tool.Input.Delta.Sync, () => {}),
SyncEvent.project(SessionEvent.Tool.Input.Ended.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.tool.input.ended", data })
}),
SyncEvent.project(SessionEvent.Tool.Called.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.tool.called", data })
}),
SyncEvent.project(SessionEvent.Tool.Success.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.tool.success", data })
}),
SyncEvent.project(SessionEvent.Tool.Error.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.tool.error", data })
}),
SyncEvent.project(SessionEvent.Reasoning.Started.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.reasoning.started", data })
}),
SyncEvent.project(SessionEvent.Reasoning.Delta.Sync, () => {}),
SyncEvent.project(SessionEvent.Reasoning.Ended.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.reasoning.ended", data })
}),
SyncEvent.project(SessionEvent.Retried.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.retried", data })
}),
SyncEvent.project(SessionEvent.Compaction.Started.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.compaction.started", data })
}),
SyncEvent.project(SessionEvent.Compaction.Delta.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.compaction.delta", data })
}),
SyncEvent.project(SessionEvent.Compaction.Ended.Sync, (db, data, event) => {
update(db, { id: SessionMessage.ID.make(event.id), type: "session.next.compaction.ended", data })
}),
]

View File

@@ -5,7 +5,8 @@ import { SyncEvent } from "@/sync"
import * as Session from "./session"
import { MessageV2 } from "./message-v2"
import { SessionTable, MessageTable, PartTable } from "./session.sql"
import * as Log from "@opencode-ai/core/util/log"
import { Log } from "@opencode-ai/core/util/log"
import nextProjectors from "./projectors-next"
const log = Log.create({ service: "session.projector" })
@@ -136,4 +137,6 @@ export default [
log.warn("ignored late part update", { partID: id, messageID, sessionID })
}
}),
...nextProjectors,
]

View File

@@ -41,6 +41,7 @@ import { Permission } from "@/permission"
import { SessionStatus } from "./status"
import { LLM } from "./llm"
import { Shell } from "@/shell/shell"
import { ShellToolID } from "@/tool/shell/id"
import { AppFileSystem } from "@opencode-ai/core/filesystem"
import { Truncate } from "@/tool/truncate"
import { decodeDataUrl } from "@/util/data-url"
@@ -53,6 +54,13 @@ import { InstanceState } from "@/effect/instance-state"
import { TaskTool, type TaskPromptOps } from "@/tool/task"
import { SessionRunState } from "./run-state"
import { EffectBridge } from "@/effect/bridge"
import { SessionEvent } from "@/v2/session-event"
import { AgentAttachment, FileAttachment, Source } from "@/v2/session-prompt"
import { SyncEvent } from "@/sync"
import * as DateTime from "effect/DateTime"
import { eq } from "@/storage/db"
import * as Database from "@/storage/db"
import { SessionTable } from "./session.sql"
// @ts-ignore
globalThis.AI_SDK_LOG_WARNINGS = false
@@ -117,6 +125,7 @@ export const layer = Layer.effect(
cancel: (sessionID: SessionID) => run.fork(cancel(sessionID)),
resolvePromptParts: (template: string) => resolvePromptParts(template),
prompt: (input: PromptInput) => prompt(input),
loop: (input: LoopInput) => loop(input),
} satisfies TaskPromptOps
})
@@ -780,7 +789,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the
id: PartID.ascending(),
messageID: msg.id,
sessionID: input.sessionID,
tool: "bash",
tool: ShellToolID.id,
callID: ulid(),
state: {
status: "running",
@@ -924,6 +933,34 @@ NOTE: At any point in time through this workflow you should feel free to ask the
format: input.format,
}
const current = Database.use((db) =>
db
.select({ agent: SessionTable.agent, model: SessionTable.model })
.from(SessionTable)
.where(eq(SessionTable.id, input.sessionID))
.get(),
)
if (current?.agent !== info.agent) {
SyncEvent.run(SessionEvent.AgentSwitched.Sync, {
sessionID: input.sessionID,
timestamp: DateTime.makeUnsafe(info.time.created),
agent: info.agent,
})
}
if (
current?.model?.providerID !== info.model.providerID ||
current.model.id !== info.model.modelID ||
current.model.variant !== info.model.variant
) {
SyncEvent.run(SessionEvent.ModelSwitched.Sync, {
sessionID: input.sessionID,
timestamp: DateTime.makeUnsafe(info.time.created),
id: info.model.modelID,
providerID: info.model.providerID,
variant: info.model.variant,
})
}
yield* Effect.addFinalizer(() => instruction.clear(info.id))
type Draft<T> = T extends MessageV2.Part ? Omit<T, "id"> & { id?: string } : never
@@ -1240,6 +1277,69 @@ NOTE: At any point in time through this workflow you should feel free to ask the
yield* sessions.updateMessage(info)
for (const part of parts) yield* sessions.updatePart(part)
const nextPrompt = parts.reduce(
(result, part) => {
if (part.type === "text") {
if (part.synthetic) result.synthetic.push(part.text)
else result.text.push(part.text)
}
if (part.type === "file") {
result.files.push(
new FileAttachment({
uri: part.url,
mime: part.mime,
name: part.filename,
source: part.source
? new Source({
start: part.source.text.start,
end: part.source.text.end,
text: part.source.text.value,
})
: undefined,
}),
)
}
if (part.type === "agent") {
result.agents.push(
new AgentAttachment({
name: part.name,
source: part.source
? new Source({
start: part.source.start,
end: part.source.end,
text: part.source.value,
})
: undefined,
}),
)
}
return result
},
{
text: [] as string[],
files: [] as FileAttachment[],
agents: [] as AgentAttachment[],
synthetic: [] as string[],
},
)
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
SyncEvent.run(SessionEvent.Prompted.Sync, {
sessionID: input.sessionID,
timestamp: DateTime.makeUnsafe(info.time.created),
prompt: {
text: nextPrompt.text.join("\n"),
files: nextPrompt.files,
agents: nextPrompt.agents,
},
})
for (const text of nextPrompt.synthetic) {
// TODO(v2): Temporary dual-write while migrating session messages to v2 events.
SyncEvent.run(SessionEvent.Synthetic.Sync, {
sessionID: input.sessionID,
timestamp: DateTime.makeUnsafe(info.time.created),
text,
})
}
return { info, parts }
}, Effect.scoped)

View File

@@ -1,7 +1,7 @@
import { sqliteTable, text, integer, index, primaryKey } from "drizzle-orm/sqlite-core"
import { ProjectTable } from "../project/project.sql"
import type { MessageV2 } from "./message-v2"
import type { SessionEntry } from "../v2/session-entry"
import type { SessionMessage } from "../v2/session-message"
import type { Snapshot } from "../snapshot"
import type { Permission } from "../permission"
import type { ProjectID } from "../project/schema"
@@ -11,6 +11,7 @@ import { Timestamps } from "../storage/schema.sql"
type PartData = Omit<MessageV2.Part, "id" | "sessionID" | "messageID">
type InfoData = Omit<MessageV2.Info, "id" | "sessionID">
type SessionMessageData = Omit<(typeof SessionMessage.Message)["Encoded"], "type" | "id">
export const SessionTable = sqliteTable(
"session",
@@ -34,6 +35,12 @@ export const SessionTable = sqliteTable(
summary_diffs: text({ mode: "json" }).$type<Snapshot.FileDiff[]>(),
revert: text({ mode: "json" }).$type<{ messageID: MessageID; partID?: PartID; snapshot?: string; diff?: string }>(),
permission: text({ mode: "json" }).$type<Permission.Ruleset>(),
agent: text(),
model: text({ mode: "json" }).$type<{
id: string
providerID: string
variant?: string
}>(),
...Timestamps,
time_compacting: integer(),
time_archived: integer(),
@@ -96,22 +103,22 @@ export const TodoTable = sqliteTable(
],
)
export const SessionEntryTable = sqliteTable(
"session_entry",
export const SessionMessageTable = sqliteTable(
"session_message",
{
id: text().$type<SessionEntry.ID>().primaryKey(),
id: text().$type<SessionMessage.ID>().primaryKey(),
session_id: text()
.$type<SessionID>()
.notNull()
.references(() => SessionTable.id, { onDelete: "cascade" }),
type: text().$type<SessionEntry.Type>().notNull(),
type: text().$type<SessionMessage.Type>().notNull(),
...Timestamps,
data: text({ mode: "json" }).notNull().$type<Omit<SessionEntry.Entry, "type" | "id">>(),
data: text({ mode: "json" }).notNull().$type<SessionMessageData>(),
},
(table) => [
index("session_entry_session_idx").on(table.session_id),
index("session_entry_session_type_idx").on(table.session_id, table.type),
index("session_entry_time_created_idx").on(table.time_created),
index("session_message_session_idx").on(table.session_id),
index("session_message_session_type_idx").on(table.session_id, table.type),
index("session_message_time_created_idx").on(table.time_created),
],
)

View File

@@ -32,6 +32,7 @@ import { Snapshot } from "@/snapshot"
import { ProjectID } from "../project/schema"
import { WorkspaceID } from "../control-plane/schema"
import { SessionID, MessageID, PartID } from "./schema"
import { ModelID, ProviderID } from "@/provider/schema"
import type { Provider } from "@/provider/provider"
import { Permission } from "@/permission"
@@ -78,6 +79,10 @@ export function fromRow(row: SessionRow): Info {
path: row.path ?? undefined,
parentID: row.parent_id ?? undefined,
title: row.title,
agent: row.agent ?? undefined,
model: row.model
? { id: ModelID.make(row.model.id), providerID: ProviderID.make(row.model.providerID), variant: row.model.variant }
: undefined,
version: row.version,
summary,
share,
@@ -102,6 +107,8 @@ export function toRow(info: Info) {
directory: info.directory,
path: info.path,
title: info.title,
agent: info.agent,
model: info.model,
version: info.version,
share_url: info.share?.url,
summary_additions: info.summary?.additions,
@@ -160,6 +167,12 @@ const Revert = Schema.Struct({
diff: optionalOmitUndefined(Schema.String),
})
const Model = Schema.Struct({
id: ModelID,
providerID: ProviderID,
variant: optionalOmitUndefined(Schema.String),
})
export const Info = Schema.Struct({
id: SessionID,
slug: Schema.String,
@@ -171,6 +184,8 @@ export const Info = Schema.Struct({
summary: optionalOmitUndefined(Summary),
share: optionalOmitUndefined(Share),
title: Schema.String,
agent: optionalOmitUndefined(Schema.String),
model: optionalOmitUndefined(Model),
version: Schema.String,
time: Time,
permission: optionalOmitUndefined(Permission.Ruleset),
@@ -201,6 +216,8 @@ export const CreateInput = Schema.optional(
Schema.Struct({
parentID: Schema.optional(SessionID),
title: Schema.optional(Schema.String),
agent: Schema.optional(Schema.String),
model: Schema.optional(Model),
permission: Schema.optional(Permission.Ruleset),
workspaceID: Schema.optional(WorkspaceID),
}),
@@ -272,6 +289,8 @@ const UpdatedInfo = Schema.Struct({
summary: Schema.optional(Schema.NullOr(Summary)),
share: Schema.optional(UpdatedShare),
title: Schema.optional(Schema.NullOr(Schema.String)),
agent: Schema.optional(Schema.NullOr(Schema.String)),
model: Schema.optional(Schema.NullOr(Model)),
version: Schema.optional(Schema.NullOr(Schema.String)),
time: Schema.optional(UpdatedTime),
permission: Schema.optional(Schema.NullOr(Permission.Ruleset)),
@@ -316,7 +335,8 @@ export const Event = {
sessionID: Schema.optional(SessionID),
// Reuses MessageV2.Assistant.fields.error (already Schema.optional) so
// the derived zod keeps the same discriminated-union shape on the bus.
error: MessageV2.Assistant.fields.error,
// Schema.suspend defers access to break circular init in compiled binaries.
error: Schema.suspend(() => MessageV2.Assistant.fields.error),
}),
),
}
@@ -404,6 +424,8 @@ export interface Interface {
readonly create: (input?: {
parentID?: SessionID
title?: string
agent?: string
model?: Schema.Schema.Type<typeof Model>
permission?: Permission.Ruleset
workspaceID?: WorkspaceID
}) => Effect.Effect<Info>
@@ -464,6 +486,8 @@ export const layer: Layer.Layer<Service, never, Bus.Service | Storage.Service |
const createNext = Effect.fn("Session.createNext")(function* (input: {
id?: SessionID
title?: string
agent?: string
model?: Schema.Schema.Type<typeof Model>
parentID?: SessionID
workspaceID?: WorkspaceID
directory: string
@@ -481,6 +505,8 @@ export const layer: Layer.Layer<Service, never, Bus.Service | Storage.Service |
workspaceID: input.workspaceID,
parentID: input.parentID,
title: input.title ?? createDefaultTitle(!!input.parentID),
agent: input.agent,
model: input.model,
permission: input.permission,
time: {
created: Date.now(),
@@ -591,6 +617,8 @@ export const layer: Layer.Layer<Service, never, Bus.Service | Storage.Service |
const create = Effect.fn("Session.create")(function* (input?: {
parentID?: SessionID
title?: string
agent?: string
model?: Schema.Schema.Type<typeof Model>
permission?: Permission.Ruleset
workspaceID?: WorkspaceID
}) {
@@ -601,6 +629,8 @@ export const layer: Layer.Layer<Service, never, Bus.Service | Storage.Service |
directory: ctx.directory,
path: sessionPath(ctx.worktree, ctx.directory),
title: input?.title,
agent: input?.agent,
model: input?.model,
permission: input?.permission,
workspaceID: input?.workspaceID ?? workspace,
})

View File

@@ -46,7 +46,7 @@ export type Properties<Def extends Definition = Definition> = EffectSchema.Schem
export type SerializedEvent<Def extends Definition = Definition> = Event<Def> & { type: string }
type ProjectorFunc = (db: Database.TxOrDb, data: unknown) => void
type ProjectorFunc = (db: Database.TxOrDb, data: unknown, event: Event) => void
type ConvertEvent = (type: string, data: Event["data"]) => unknown | Promise<unknown>
type PublishContext = {
instance?: InstanceContext
@@ -255,7 +255,7 @@ export function define<
export function project<Def extends Definition>(
def: Def,
func: (db: Database.TxOrDb, data: Event<Def>["data"]) => void,
func: (db: Database.TxOrDb, data: Event<Def>["data"], event: Event<Def>) => void,
): [Definition, ProjectorFunc] {
return [def, func as ProjectorFunc]
}
@@ -277,7 +277,7 @@ function process<Def extends Definition>(
// idempotent: need to ignore any events already logged
Database.transaction((tx) => {
projector(tx, event.data)
projector(tx, event.data, event)
if (Flag.OPENCODE_EXPERIMENTAL_WORKSPACES) {
tx.insert(EventSequenceTable)
@@ -308,7 +308,7 @@ function process<Def extends Definition>(
}
const result = convertEvent(def.type, event.data)
const publish = (data: unknown) => ProjectBus.publish(def, data as Properties<Def>)
const publish = (data: unknown) => ProjectBus.publish(def, data as Properties<Def>, { id: event.id })
if (result instanceof Promise) {
void result.then(publish)
} else {

View File

@@ -0,0 +1,62 @@
import { Effect, Schema } from "effect"
import { HttpClient } from "effect/unstable/http"
import * as Tool from "./tool"
import * as McpExa from "./mcp-exa"
import DESCRIPTION from "./codesearch.txt"
export const Parameters = Schema.Struct({
query: Schema.String.annotate({
description:
"Search query to find relevant context for APIs, Libraries, and SDKs. For example, 'React useState hook examples', 'Python pandas dataframe filtering', 'Express.js middleware', 'Next js partial prerendering configuration'",
}),
tokensNum: Schema.Number.check(Schema.isGreaterThanOrEqualTo(1000))
.check(Schema.isLessThanOrEqualTo(50000))
.pipe(Schema.optional, Schema.withDecodingDefault(Effect.succeed(5000)))
.annotate({
description:
"Number of tokens to return (1000-50000). Default is 5000 tokens. Adjust this value based on how much context you need - use lower values for focused queries and higher values for comprehensive documentation.",
}),
})
export const CodeSearchTool = Tool.define(
"codesearch",
Effect.gen(function* () {
const http = yield* HttpClient.HttpClient
return {
description: DESCRIPTION,
parameters: Parameters,
execute: (params: { query: string; tokensNum: number }, ctx: Tool.Context) =>
Effect.gen(function* () {
yield* ctx.ask({
permission: "codesearch",
patterns: [params.query],
always: ["*"],
metadata: {
query: params.query,
tokensNum: params.tokensNum,
},
})
const result = yield* McpExa.call(
http,
"get_code_context_exa",
McpExa.CodeArgs,
{
query: params.query,
tokensNum: params.tokensNum,
},
"30 seconds",
)
return {
output:
result ??
"No code snippets or documentation found. Please try a different query, be more specific about the library or programming concept, or check the spelling of framework names.",
title: `Code search: ${params.query}`,
metadata: {},
}
}).pipe(Effect.orDie),
}
}),
)

View File

@@ -0,0 +1,12 @@
- Search and get relevant context for any programming task using Exa Code API
- Provides the highest quality and freshest context for libraries, SDKs, and APIs
- Use this tool for ANY question or task related to programming
- Returns comprehensive code examples, documentation, and API references
- Optimized for finding specific programming patterns and solutions
Usage notes:
- Adjustable token count (1000-50000) for focused or comprehensive results
- Default 5000 tokens provides balanced context for most queries
- Use lower values for specific questions, higher values for comprehensive documentation
- Supports queries about frameworks, libraries, APIs, and programming concepts
- Examples: 'React useState hook examples', 'Python pandas dataframe filtering', 'Express.js middleware'

View File

@@ -35,6 +35,11 @@ export const SearchArgs = Schema.Struct({
contextMaxCharacters: Schema.optional(Schema.Number),
})
export const CodeArgs = Schema.Struct({
query: Schema.String,
tokensNum: Schema.Number,
})
const McpRequest = <F extends Schema.Struct.Fields>(args: Schema.Struct<F>) =>
Schema.Struct({
jsonrpc: Schema.Literal("2.0"),

View File

@@ -1,12 +1,13 @@
import { PlanExitTool } from "./plan"
import { Session } from "@/session/session"
import { QuestionTool } from "./question"
import { BashTool } from "./bash"
import { ShellTool } from "./shell"
import { EditTool } from "./edit"
import { GlobTool } from "./glob"
import { GrepTool } from "./grep"
import { ReadTool } from "./read"
import { TaskTool } from "./task"
import { TaskStatusTool } from "./task_status"
import { TodoWriteTool } from "./todo"
import { WebFetchTool } from "./webfetch"
import { WriteTool } from "./write"
@@ -22,6 +23,9 @@ import { Plugin } from "../plugin"
import { Provider } from "@/provider/provider"
import { ProviderID, type ModelID } from "../provider/schema"
import { WebSearchTool } from "./websearch"
import { CodeSearchTool } from "./codesearch"
import { RepoCloneTool } from "./repo_clone"
import { RepoOverviewTool } from "./repo_overview"
import { Flag } from "@opencode-ai/core/flag/flag"
import * as Log from "@opencode-ai/core/util/log"
import { LspTool } from "./lsp"
@@ -44,8 +48,11 @@ import { Instruction } from "../session/instruction"
import { AppFileSystem } from "@opencode-ai/core/filesystem"
import { Bus } from "../bus"
import { Agent } from "../agent/agent"
import { Git } from "@/git"
import { Skill } from "../skill"
import { Permission } from "@/permission"
import { SessionStatus } from "@/session/status"
import { BackgroundJob } from "@/background/job"
const log = Log.create({ service: "tool.registry" })
@@ -78,11 +85,14 @@ export const layer: Layer.Layer<
| Agent.Service
| Skill.Service
| Session.Service
| SessionStatus.Service
| Provider.Service
| Git.Service
| LSP.Service
| Instruction.Service
| AppFileSystem.Service
| Bus.Service
| BackgroundJob.Service
| HttpClient.HttpClient
| ChildProcessSpawner
| Ripgrep.Service
@@ -106,13 +116,17 @@ export const layer: Layer.Layer<
const plan = yield* PlanExitTool
const webfetch = yield* WebFetchTool
const websearch = yield* WebSearchTool
const bash = yield* BashTool
const shell = yield* ShellTool
const codesearch = yield* CodeSearchTool
const repoClone = yield* RepoCloneTool
const repoOverview = yield* RepoOverviewTool
const globtool = yield* GlobTool
const writetool = yield* WriteTool
const edit = yield* EditTool
const greptool = yield* GrepTool
const patchtool = yield* ApplyPatchTool
const skilltool = yield* SkillTool
const taskstatus = yield* TaskStatusTool
const agent = yield* Agent.Service
const state = yield* InstanceState.make<State>(
@@ -186,16 +200,20 @@ export const layer: Layer.Layer<
const tool = yield* Effect.all({
invalid: Tool.init(invalid),
bash: Tool.init(bash),
shell: Tool.init(shell),
read: Tool.init(read),
glob: Tool.init(globtool),
grep: Tool.init(greptool),
edit: Tool.init(edit),
write: Tool.init(writetool),
task: Tool.init(task),
taskstatus: Tool.init(taskstatus),
fetch: Tool.init(webfetch),
todo: Tool.init(todo),
search: Tool.init(websearch),
code: Tool.init(codesearch),
repo_clone: Tool.init(repoClone),
repo_overview: Tool.init(repoOverview),
skill: Tool.init(skilltool),
patch: Tool.init(patchtool),
question: Tool.init(question),
@@ -208,16 +226,20 @@ export const layer: Layer.Layer<
builtin: [
tool.invalid,
...(questionEnabled ? [tool.question] : []),
tool.bash,
tool.shell,
tool.read,
tool.glob,
tool.grep,
tool.edit,
tool.write,
tool.task,
tool.taskstatus,
tool.fetch,
tool.todo,
tool.search,
tool.code,
tool.repo_clone,
tool.repo_overview,
tool.skill,
tool.patch,
...(Flag.OPENCODE_EXPERIMENTAL_LSP_TOOL ? [tool.lsp] : []),
@@ -331,11 +353,14 @@ export const defaultLayer = Layer.suspend(() =>
Layer.provide(Skill.defaultLayer),
Layer.provide(Agent.defaultLayer),
Layer.provide(Session.defaultLayer),
Layer.provide(SessionStatus.defaultLayer),
Layer.provide(Provider.defaultLayer),
Layer.provide(Git.defaultLayer),
Layer.provide(LSP.defaultLayer),
Layer.provide(Instruction.defaultLayer),
Layer.provide(AppFileSystem.defaultLayer),
Layer.provide(Bus.layer),
Layer.provide(BackgroundJob.defaultLayer),
Layer.provide(FetchHttpClient.layer),
Layer.provide(Format.defaultLayer),
Layer.provide(CrossSpawnSpawner.defaultLayer),

View File

@@ -0,0 +1,207 @@
import path from "path"
import { Effect, Schema } from "effect"
import { AppFileSystem } from "@opencode-ai/core/filesystem"
import { Flock } from "@opencode-ai/core/util/flock"
import { Git } from "@/git"
import DESCRIPTION from "./repo_clone.txt"
import * as Tool from "./tool"
import { parseRepositoryReference, repositoryCachePath, sameRepositoryReference } from "@/util/repository"
export const Parameters = Schema.Struct({
repository: Schema.String.annotate({
description: "Repository to clone, as a git URL, host/path reference, or GitHub owner/repo shorthand",
}),
refresh: Schema.optional(Schema.Boolean).annotate({
description: "When true, fetches the latest remote state into the managed cache",
}),
branch: Schema.optional(Schema.String).annotate({
description: "Branch or ref to clone and inspect",
}),
})
type Metadata = {
repository: string
host: string
remote: string
localPath: string
status: "cached" | "cloned" | "refreshed"
head?: string
branch?: string
}
function statusForRepository(input: { reuse: boolean; refresh?: boolean; branchMatches?: boolean }) {
if (!input.reuse) return "cloned" as const
if (input.branchMatches === false) return "refreshed" as const
if (input.refresh) return "refreshed" as const
return "cached" as const
}
function resetTarget(input: {
requestedBranch?: string
remoteHead: { code: number; stdout: string }
branch: { code: number; stdout: string }
}) {
if (input.requestedBranch) return `origin/${input.requestedBranch}`
if (input.remoteHead.code === 0 && input.remoteHead.stdout) {
return input.remoteHead.stdout.replace(/^refs\/remotes\//, "")
}
if (input.branch.code === 0 && input.branch.stdout) {
return `origin/${input.branch.stdout}`
}
return "HEAD"
}
function validateBranch(branch: string) {
if (!/^[A-Za-z0-9/_.-]+$/.test(branch) || branch.startsWith("-") || branch.includes("..")) {
throw new Error(
"Branch must contain only alphanumeric characters, /, _, ., and -, and cannot start with - or contain ..",
)
}
}
export const RepoCloneTool = Tool.define<typeof Parameters, Metadata, AppFileSystem.Service | Git.Service>(
"repo_clone",
Effect.gen(function* () {
const fs = yield* AppFileSystem.Service
const git = yield* Git.Service
return {
description: DESCRIPTION,
parameters: Parameters,
execute: (params: Schema.Schema.Type<typeof Parameters>, ctx: Tool.Context<Metadata>) =>
Effect.gen(function* () {
const reference = parseRepositoryReference(params.repository)
if (!reference)
throw new Error("Repository must be a git URL, host/path reference, or GitHub owner/repo shorthand")
if (params.branch) validateBranch(params.branch)
const repository = reference.label
const remote = reference.remote
const localPath = repositoryCachePath(reference)
const cloneTarget = parseRepositoryReference(remote) ?? reference
yield* ctx.ask({
permission: "repo_clone",
patterns: [repository],
always: [repository],
metadata: {
repository,
remote,
path: localPath,
refresh: Boolean(params.refresh),
branch: params.branch,
},
})
return yield* Effect.acquireUseRelease(
Effect.promise((signal) => Flock.acquire(`repo-clone:${localPath}`, { signal })),
() =>
Effect.gen(function* () {
yield* fs.ensureDir(path.dirname(localPath)).pipe(Effect.orDie)
const exists = yield* fs.existsSafe(localPath)
const hasGitDir = yield* fs.existsSafe(path.join(localPath, ".git"))
const origin = hasGitDir
? yield* git.run(["config", "--get", "remote.origin.url"], { cwd: localPath })
: undefined
const originReference =
origin?.exitCode === 0 ? parseRepositoryReference(origin.text().trim()) : undefined
const reuse =
hasGitDir && Boolean(originReference && sameRepositoryReference(originReference, cloneTarget))
if (exists && !reuse) {
yield* fs.remove(localPath, { recursive: true }).pipe(Effect.orDie)
}
const currentBranch = hasGitDir ? yield* git.branch(localPath) : undefined
const status = statusForRepository({
reuse,
refresh: params.refresh,
branchMatches: params.branch ? currentBranch === params.branch : undefined,
})
if (status === "cloned") {
const clone = yield* git.run(
[
"clone",
"--depth",
"100",
...(params.branch ? ["--branch", params.branch] : []),
remote,
localPath,
],
{ cwd: path.dirname(localPath) },
)
if (clone.exitCode !== 0) {
throw new Error(
clone.stderr.toString().trim() || clone.text().trim() || `Failed to clone ${repository}`,
)
}
}
if (status === "refreshed") {
const fetch = yield* git.run(["fetch", "--all", "--prune"], { cwd: localPath })
if (fetch.exitCode !== 0) {
throw new Error(
fetch.stderr.toString().trim() || fetch.text().trim() || `Failed to refresh ${repository}`,
)
}
if (params.branch) {
const checkout = yield* git.run(["checkout", "-B", params.branch, `origin/${params.branch}`], {
cwd: localPath,
})
if (checkout.exitCode !== 0) {
throw new Error(
checkout.stderr.toString().trim() ||
checkout.text().trim() ||
`Failed to checkout ${params.branch}`,
)
}
}
const remoteHead = yield* git.run(["symbolic-ref", "refs/remotes/origin/HEAD"], { cwd: localPath })
const branch = yield* git.run(["symbolic-ref", "--quiet", "--short", "HEAD"], { cwd: localPath })
const target = resetTarget({
requestedBranch: params.branch,
remoteHead: { code: remoteHead.exitCode, stdout: remoteHead.text().trim() },
branch: { code: branch.exitCode, stdout: branch.text().trim() },
})
const reset = yield* git.run(["reset", "--hard", target], { cwd: localPath })
if (reset.exitCode !== 0) {
throw new Error(
reset.stderr.toString().trim() || reset.text().trim() || `Failed to reset ${repository}`,
)
}
}
const head = yield* git.run(["rev-parse", "HEAD"], { cwd: localPath })
const branch = yield* git.branch(localPath)
const headText = head.exitCode === 0 ? head.text().trim() : undefined
return {
title: repository,
metadata: {
repository,
host: reference.host,
remote,
localPath,
status,
head: headText,
branch,
},
output: [
`Repository ready: ${repository}`,
`Status: ${status}`,
`Local path: ${localPath}`,
...(branch ? [`Branch: ${branch}`] : []),
...(headText ? [`HEAD: ${headText}`] : []),
].join("\n"),
}
}),
(lock) => Effect.promise(() => lock.release()).pipe(Effect.ignore),
)
}).pipe(Effect.orDie),
} satisfies Tool.DefWithoutID<typeof Parameters, Metadata>
}),
)

View File

@@ -0,0 +1,5 @@
- Clone or refresh a repository into OpenCode's managed cache under the data directory
- Accepts git URLs, forge host/path references, or GitHub owner/repo shorthand
- Returns the cached absolute local path so other tools can explore the cloned source
- Use this before Read, Glob, or Grep when the code you need lives outside the current workspace
- This tool is intended for dependency and documentation research workflows, not for modifying the user's workspace

View File

@@ -0,0 +1,238 @@
import path from "path"
import { Effect, Schema } from "effect"
import { AppFileSystem } from "@opencode-ai/core/filesystem"
import { Git } from "@/git"
import { assertExternalDirectoryEffect } from "./external-directory"
import DESCRIPTION from "./repo_overview.txt"
import * as Tool from "./tool"
import { parseRepositoryReference, repositoryCachePath } from "@/util/repository"
import { Instance } from "@/project/instance"
export const Parameters = Schema.Struct({
repository: Schema.optional(Schema.String).annotate({
description: "Cached repository to inspect, as a git URL, host/path reference, or GitHub owner/repo shorthand",
}),
path: Schema.optional(Schema.String).annotate({
description: "Directory path to inspect instead of a cached repository",
}),
depth: Schema.optional(Schema.Number).annotate({
description: "Maximum structure depth to include. Defaults to 3.",
})
})
type Metadata = {
path: string
repository?: string
branch?: string
head?: string
package_manager?: string
ecosystems: string[]
dependency_files: string[]
entrypoints: string[]
depth: number
truncated: boolean
}
const IGNORED_DIRS = new Set([".git", "node_modules", "__pycache__", ".venv", "dist", "build", ".next", "target", "vendor"])
const STRUCTURE_LIMIT = 200
const DEPENDENCY_FILES = [
"package.json",
"package-lock.json",
"bun.lock",
"bun.lockb",
"pnpm-lock.yaml",
"yarn.lock",
"requirements.txt",
"pyproject.toml",
"go.mod",
"Cargo.toml",
"Gemfile",
"build.gradle",
"build.gradle.kts",
"pom.xml",
"composer.json",
]
function packageManager(files: Set<string>) {
if (files.has("bun.lock") || files.has("bun.lockb")) return "bun"
if (files.has("pnpm-lock.yaml")) return "pnpm"
if (files.has("yarn.lock")) return "yarn"
if (files.has("package-lock.json")) return "npm"
}
function ecosystems(files: Set<string>) {
return [
...(files.has("package.json") ? ["Node.js"] : []),
...(files.has("pyproject.toml") || files.has("requirements.txt") ? ["Python"] : []),
...(files.has("go.mod") ? ["Go"] : []),
...(files.has("Cargo.toml") ? ["Rust"] : []),
...(files.has("Gemfile") ? ["Ruby"] : []),
...(files.has("build.gradle") || files.has("build.gradle.kts") || files.has("pom.xml") ? ["Java/Kotlin"] : []),
...(files.has("composer.json") ? ["PHP"] : []),
]
}
function commonEntrypoints(files: Set<string>) {
return ["index.ts", "index.tsx", "index.js", "index.mjs", "main.ts", "main.js", "src/index.ts", "src/index.tsx", "src/index.js", "src/main.ts", "src/main.js"].filter((file) => files.has(file))
}
export const RepoOverviewTool = Tool.define<typeof Parameters, Metadata, AppFileSystem.Service | Git.Service>(
"repo_overview",
Effect.gen(function* () {
const fs = yield* AppFileSystem.Service
const git = yield* Git.Service
const resolveTarget = Effect.fn("RepoOverviewTool.resolveTarget")(function* (params: Schema.Schema.Type<typeof Parameters>) {
if (params.path) {
const full = path.isAbsolute(params.path) ? params.path : path.resolve(Instance.directory, params.path)
return { path: full, repository: params.repository }
}
if (!params.repository) throw new Error("Either repository or path is required")
const parsed = parseRepositoryReference(params.repository)
if (!parsed) throw new Error("Repository must be a git URL, host/path reference, or GitHub owner/repo shorthand")
const repository = parsed.label
return {
repository,
path: repositoryCachePath(parsed),
}
})
const structure = Effect.fn("RepoOverviewTool.structure")(function* (root: string, depth: number) {
let truncated = false
const lines: string[] = []
const visit: (dir: string, level: number) => Effect.Effect<void> = Effect.fnUntraced(function* (dir: string, level: number) {
if (level >= depth || lines.length >= STRUCTURE_LIMIT) {
truncated = truncated || lines.length >= STRUCTURE_LIMIT
return
}
const entries = yield* fs.readDirectoryEntries(dir).pipe(Effect.orElseSucceed(() => []))
const sorted = yield* Effect.forEach(
entries,
Effect.fnUntraced(function* (entry) {
if (IGNORED_DIRS.has(entry.name)) return undefined
const full = path.join(dir, entry.name)
const info = yield* fs.stat(full).pipe(Effect.catch(() => Effect.succeed(undefined)))
if (!info) return undefined
return { name: entry.name, full, directory: info.type === "Directory" }
}),
{ concurrency: 16 },
).pipe(
Effect.map((items) =>
items
.filter((item): item is { name: string; full: string; directory: boolean } => Boolean(item))
.sort((a, b) => Number(b.directory) - Number(a.directory) || a.name.localeCompare(b.name)),
),
)
for (const entry of sorted) {
if (lines.length >= STRUCTURE_LIMIT) {
truncated = true
return
}
lines.push(`${" ".repeat(level)}${entry.name}${entry.directory ? "/" : ""}`)
if (entry.directory) yield* visit(entry.full, level + 1)
}
})
yield* visit(root, 0)
return { lines, truncated }
})
return {
description: DESCRIPTION,
parameters: Parameters,
execute: (params: Schema.Schema.Type<typeof Parameters>, ctx: Tool.Context<Metadata>) =>
Effect.gen(function* () {
const target = yield* resolveTarget(params)
const depth = !params.depth || !Number.isInteger(params.depth) || params.depth < 1 || params.depth > 6 ? 3 : params.depth
yield* assertExternalDirectoryEffect(ctx, target.path, { kind: "directory" })
yield* ctx.ask({
permission: "repo_overview",
patterns: [target.repository ?? target.path],
always: [target.repository ?? target.path],
metadata: {
repository: target.repository,
path: target.path,
depth,
},
})
const info = yield* fs.stat(target.path).pipe(Effect.catch(() => Effect.succeed(undefined)))
if (!info) {
if (target.repository) throw new Error(`Repository is not cloned: ${target.repository}. Use repo_clone first.`)
throw new Error(`Directory not found: ${target.path}`)
}
if (info.type !== "Directory") throw new Error(`Path is not a directory: ${target.path}`)
const entries = yield* fs.readDirectoryEntries(target.path).pipe(Effect.orElseSucceed(() => []))
const topLevel = new Set(entries.map((entry) => entry.name))
const dependencyFiles = DEPENDENCY_FILES.filter((file) => topLevel.has(file))
const packageJson = topLevel.has("package.json")
? (yield* fs.readJson(path.join(target.path, "package.json")).pipe(Effect.orElseSucceed(() => ({})))) as Record<string, unknown>
: {}
const entrypoints = [
...(typeof packageJson.main === "string" ? [`main: ${packageJson.main}`] : []),
...(typeof packageJson.module === "string" ? [`module: ${packageJson.module}`] : []),
...(typeof packageJson.types === "string" ? [`types: ${packageJson.types}`] : []),
...(typeof packageJson.bin === "string" ? [`bin: ${packageJson.bin}`] : []),
...(packageJson.bin && typeof packageJson.bin === "object" && !Array.isArray(packageJson.bin)
? Object.keys(packageJson.bin as Record<string, unknown>).map((name) => `bin: ${name}`)
: []),
...(packageJson.exports && typeof packageJson.exports === "object" && !Array.isArray(packageJson.exports)
? Object.keys(packageJson.exports as Record<string, unknown>).slice(0, 10).map((name) => `exports: ${name}`)
: []),
]
const common = commonEntrypoints(new Set([
...topLevel,
...entries
.filter((entry) => entry.name === "src")
.flatMap(() => ["src/index.ts", "src/index.tsx", "src/index.js", "src/main.ts", "src/main.js"]),
]))
const structureResult = yield* structure(target.path, depth)
const branch = yield* git.branch(target.path)
const head = yield* git.run(["rev-parse", "HEAD"], { cwd: target.path })
const headText = head.exitCode === 0 ? head.text().trim() : undefined
const metadata: Metadata = {
path: target.path,
repository: target.repository,
branch,
head: headText,
package_manager: packageManager(topLevel),
ecosystems: ecosystems(topLevel),
dependency_files: dependencyFiles,
entrypoints: [...entrypoints, ...common.map((file) => `file: ${file}`)],
depth,
truncated: structureResult.truncated,
}
return {
title: target.repository ?? path.basename(target.path),
metadata,
output: [
`Path: ${target.path}`,
...(target.repository ? [`Repository: ${target.repository}`] : []),
...(branch ? [`Branch: ${branch}`] : []),
...(headText ? [`HEAD: ${headText}`] : []),
...(metadata.ecosystems.length ? [`Ecosystems: ${metadata.ecosystems.join(", ")}`] : []),
...(metadata.package_manager ? [`Package manager: ${metadata.package_manager}`] : []),
...(metadata.dependency_files.length ? [`Dependency files: ${metadata.dependency_files.join(", ")}`] : []),
...(metadata.entrypoints.length ? ["Likely entrypoints:", ...metadata.entrypoints.map((entry) => `- ${entry}`)] : []),
"Top-level structure:",
...structureResult.lines,
...(structureResult.truncated ? ["(Structure truncated)"] : []),
].join("\n"),
}
}).pipe(Effect.orDie),
} satisfies Tool.DefWithoutID<typeof Parameters, Metadata>
}),
)

View File

@@ -0,0 +1,4 @@
- Summarize the structure and likely entrypoints of a cloned repository or local directory
- Accepts either a cached repository reference or a directory path
- Reports detected ecosystems, dependency files, package manager, likely entrypoints, and a compact structure tree
- Use this after repo_clone to orient quickly before deeper Read, Glob, or Grep investigation

View File

@@ -1,12 +1,11 @@
import { Schema } from "effect"
import { PositiveInt } from "@/util/schema"
import { Effect, Stream } from "effect"
import os from "os"
import { createWriteStream } from "node:fs"
import * as Tool from "./tool"
import path from "path"
import DESCRIPTION from "./bash.txt"
import * as Log from "@opencode-ai/core/util/log"
import { containsPath, type InstanceContext } from "../project/instance-context"
import { InstanceState } from "@/effect/instance-state"
import { lazy } from "@/util/lazy"
import { Language, type Node } from "web-tree-sitter"
@@ -16,18 +15,20 @@ import { Config } from "@/config/config"
import { Flag } from "@opencode-ai/core/flag/flag"
import { Global } from "@opencode-ai/core/global"
import { Shell } from "@/shell/shell"
import { ShellKind, ShellToolID } from "./shell/id"
import { BashArity } from "@/permission/arity"
import * as Truncate from "./truncate"
import { Plugin } from "@/plugin"
import { Effect, Stream } from "effect"
import { ChildProcess } from "effect/unstable/process"
import { ChildProcessSpawner } from "effect/unstable/process/ChildProcessSpawner"
import { InstanceState } from "@/effect/instance-state"
import { ShellPrompt, type Parameters } from "./shell/prompt"
import { BashArity } from "@/permission/arity"
export { Parameters } from "./shell/prompt"
const MAX_METADATA_LENGTH = 30_000
const DEFAULT_TIMEOUT = Flag.OPENCODE_EXPERIMENTAL_BASH_DEFAULT_TIMEOUT_MS || 2 * 60 * 1000
const CWD = new Set(["cd", "push-location", "set-location"])
const CWD = new Set(["cd", "chdir", "popd", "pushd", "push-location", "set-location"])
const FILES = new Set([
...CWD,
"rm",
@@ -50,21 +51,10 @@ const FILES = new Set([
"new-item",
"rename-item",
])
const CMD_FILES = new Set(["copy", "del", "dir", "erase", "md", "mkdir", "move", "rd", "ren", "rename", "rmdir", "type"])
const FLAGS = new Set(["-destination", "-literalpath", "-path"])
const SWITCHES = new Set(["-confirm", "-debug", "-force", "-nonewline", "-recurse", "-verbose", "-whatif"])
export const Parameters = Schema.Struct({
command: Schema.String.annotate({ description: "The command to execute" }),
timeout: Schema.optional(PositiveInt).annotate({ description: "Optional timeout in milliseconds" }),
workdir: Schema.optional(Schema.String).annotate({
description: `The working directory to run the command in. Defaults to the current directory. Use this instead of 'cd' commands.`,
}),
description: Schema.String.annotate({
description:
"Clear, concise description of what this command does in 5-10 words. Examples:\nInput: ls\nOutput: Lists files in current directory\n\nInput: git status\nOutput: Shows working tree status\n\nInput: npm install\nOutput: Installs package dependencies\n\nInput: mkdir foo\nOutput: Creates directory 'foo'",
}),
})
type Part = {
type: string
text: string
@@ -81,7 +71,7 @@ type Chunk = {
size: number
}
export const log = Log.create({ service: "bash-tool" })
export const log = Log.create({ service: "shell-tool" })
const resolveWasm = (asset: string) => {
if (asset.startsWith("file://")) return fileURLToPath(asset)
@@ -187,11 +177,16 @@ function prefix(text: string) {
return text.slice(0, match.index)
}
function pathArgs(list: Part[], ps: boolean) {
function pathArgs(list: Part[], ps: boolean, cmd = false) {
if (!ps) {
return list
.slice(1)
.filter((item) => !item.text.startsWith("-") && !(list[0]?.text === "chmod" && item.text.startsWith("+")))
.filter(
(item) =>
!item.text.startsWith("-") &&
!(cmd && item.text.startsWith("/")) &&
!(list[0]?.text === "chmod" && item.text.startsWith("+")),
)
.map((item) => item.text)
}
@@ -251,13 +246,13 @@ function tail(text: string, maxLines: number, maxBytes: number) {
}
}
const parse = Effect.fn("BashTool.parse")(function* (command: string, ps: boolean) {
const parse = Effect.fn("ShellTool.parse")(function* (command: string, ps: boolean) {
const tree = yield* Effect.promise(() => parser().then((p) => (ps ? p.ps : p.bash).parse(command)))
if (!tree) throw new Error("Failed to parse command")
return tree
})
const ask = Effect.fn("BashTool.ask")(function* (ctx: Tool.Context, scan: Scan) {
const ask = Effect.fn("ShellTool.ask")(function* (ctx: Tool.Context, scan: Scan) {
if (scan.dirs.size > 0) {
const globs = Array.from(scan.dirs).map((dir) => {
if (process.platform === "win32") return AppFileSystem.normalizePathPattern(path.join(dir, "*"))
@@ -273,7 +268,7 @@ const ask = Effect.fn("BashTool.ask")(function* (ctx: Tool.Context, scan: Scan)
if (scan.patterns.size === 0) return
yield* ctx.ask({
permission: "bash",
permission: ShellToolID.id,
patterns: Array.from(scan.patterns),
always: Array.from(scan.always),
metadata: {},
@@ -325,9 +320,8 @@ const parser = lazy(async () => {
return { bash, ps }
})
// TODO: we may wanna rename this tool so it works better on other shells
export const BashTool = Tool.define(
"bash",
export const ShellTool = Tool.define(
ShellToolID.id,
Effect.gen(function* () {
const config = yield* Config.Service
const spawner = yield* ChildProcessSpawner
@@ -335,7 +329,7 @@ export const BashTool = Tool.define(
const trunc = yield* Truncate.Service
const plugin = yield* Plugin.Service
const cygpath = Effect.fn("BashTool.cygpath")(function* (shell: string, text: string) {
const cygpath = Effect.fn("ShellTool.cygpath")(function* (shell: string, text: string) {
const lines = yield* spawner
.lines(ChildProcess.make(shell, ["-lc", 'cygpath -w -- "$1"', "_", text]))
.pipe(Effect.catch(() => Effect.succeed([] as string[])))
@@ -344,7 +338,7 @@ export const BashTool = Tool.define(
return AppFileSystem.normalizePath(file)
})
const resolvePath = Effect.fn("BashTool.resolvePath")(function* (text: string, root: string, shell: string) {
const resolvePath = Effect.fn("ShellTool.resolvePath")(function* (text: string, root: string, shell: string) {
if (process.platform === "win32") {
if (Shell.posix(shell) && text.startsWith("/") && AppFileSystem.windowsPath(text) === text) {
const file = yield* cygpath(shell, text)
@@ -355,7 +349,7 @@ export const BashTool = Tool.define(
return path.resolve(root, text)
})
const argPath = Effect.fn("BashTool.argPath")(function* (arg: string, cwd: string, ps: boolean, shell: string) {
const argPath = Effect.fn("ShellTool.argPath")(function* (arg: string, cwd: string, ps: boolean, shell: string) {
const text = ps ? expand(arg, cwd, shell) : home(unquote(arg))
const file = text && prefix(text)
if (!file || dynamic(file, ps)) return
@@ -364,7 +358,7 @@ export const BashTool = Tool.define(
return yield* resolvePath(next, cwd, shell)
})
const collect = Effect.fn("BashTool.collect")(function* (
const collect = Effect.fn("ShellTool.collect")(function* (
root: Node,
cwd: string,
ps: boolean,
@@ -376,14 +370,15 @@ export const BashTool = Tool.define(
patterns: new Set<string>(),
always: new Set<string>(),
}
const shellKind = ShellKind.from(Shell.name(shell))
for (const node of commands(root)) {
const command = parts(node)
const tokens = command.map((item) => item.text)
const cmd = ps ? tokens[0]?.toLowerCase() : tokens[0]
if (cmd && FILES.has(cmd)) {
for (const arg of pathArgs(command, ps)) {
if (cmd && (FILES.has(cmd) || (shellKind === "cmd" && CMD_FILES.has(cmd)))) {
for (const arg of pathArgs(command, ps, shellKind === "cmd")) {
const resolved = yield* argPath(arg, cwd, ps, shell)
log.info("resolved path", { arg, resolved })
if (!resolved || containsPath(resolved, instance)) continue
@@ -401,7 +396,7 @@ export const BashTool = Tool.define(
return scan
})
const shellEnv = Effect.fn("BashTool.shellEnv")(function* (ctx: Tool.Context, cwd: string) {
const shellEnv = Effect.fn("ShellTool.shellEnv")(function* (ctx: Tool.Context, cwd: string) {
const extra = yield* plugin.trigger(
"shell.env",
{ cwd, sessionID: ctx.sessionID, callID: ctx.callID },
@@ -413,7 +408,7 @@ export const BashTool = Tool.define(
}
})
const run = Effect.fn("BashTool.run")(function* (
const run = Effect.fn("ShellTool.run")(function* (
input: {
shell: string
command: string
@@ -527,7 +522,7 @@ export const BashTool = Tool.define(
const meta: string[] = []
if (expired) {
meta.push(
`bash tool terminated command after exceeding timeout ${input.timeout} ms. If this command is expected to take longer and is not waiting for interactive input, retry with a larger timeout value in milliseconds.`,
`shell tool terminated command after exceeding timeout ${input.timeout} ms. If this command is expected to take longer and is not waiting for interactive input, retry with a larger timeout value in milliseconds.`,
)
}
if (aborted) meta.push("User aborted the command")
@@ -546,7 +541,7 @@ export const BashTool = Tool.define(
}
if (meta.length > 0) {
output += "\n\n<bash_metadata>\n" + meta.join("\n") + "\n</bash_metadata>"
output += "\n\n<shell_metadata>\n" + meta.join("\n") + "\n</shell_metadata>"
}
if (sink) {
const stream = sink
@@ -577,25 +572,14 @@ export const BashTool = Tool.define(
const cfg = yield* config.get()
const shell = Shell.acceptable(cfg.shell)
const name = Shell.name(shell)
const chain =
name === "powershell"
? "If the commands depend on each other and must run sequentially, avoid '&&' in this shell because Windows PowerShell 5.1 does not support it. Use PowerShell conditionals such as `cmd1; if ($?) { cmd2 }` when later commands must depend on earlier success."
: "If the commands depend on each other and must run sequentially, use a single Bash call with '&&' to chain them together (e.g., `git add . && git commit -m \"message\" && git push`). For instance, if one operation must complete before another starts (like mkdir before cp, Write before Bash for git operations, or git add before git commit), run these operations sequentially instead."
log.info("bash tool using shell", { shell })
const limits = yield* trunc.limits()
const instance = yield* InstanceState.context
const prompt = ShellPrompt.render(name, process.platform, limits)
log.info("shell tool using shell", { shell })
return {
description: DESCRIPTION.replaceAll("${directory}", instance.directory)
.replaceAll("${tmp}", Global.Path.tmp)
.replaceAll("${os}", process.platform)
.replaceAll("${shell}", name)
.replaceAll("${chaining}", chain)
.replaceAll("${maxLines}", String(limits.maxLines))
.replaceAll("${maxBytes}", String(limits.maxBytes)),
parameters: Parameters,
execute: (params: Schema.Schema.Type<typeof Parameters>, ctx: Tool.Context) =>
description: prompt.description,
parameters: prompt.parameters,
execute: (params: Parameters, ctx: Tool.Context) =>
Effect.gen(function* () {
const executeInstance = yield* InstanceState.context
const cwd = params.workdir

View File

@@ -0,0 +1,28 @@
export namespace ShellKind {
export const ids = ["bash", "pwsh", "powershell", "cmd"] as const
export type ID = (typeof ids)[number]
const kind = new Set<string>(ids)
const ps = new Set<string>(["pwsh", "powershell"])
export function has(value: string): value is ID {
return kind.has(value)
}
export function from(value: string): ID {
return has(value) ? value : "bash"
}
export function powershell(value: string) {
return ps.has(value)
}
}
export namespace ShellToolID {
export const id = "bash"
export type ID = typeof id
export function has(value: string): value is ID {
return value === id
}
}

View File

@@ -0,0 +1,296 @@
import { Schema } from "effect"
import DESCRIPTION from "./shell.txt"
const PS = new Set(["powershell", "pwsh"])
const CMD = new Set(["cmd"])
const descriptions = {
bash:
"Clear, concise description of what this command does in 5-10 words. Examples:\nInput: ls\nOutput: Lists files in current directory\n\nInput: git status\nOutput: Shows working tree status\n\nInput: npm install\nOutput: Installs package dependencies\n\nInput: mkdir foo\nOutput: Creates directory 'foo'",
powershell:
'Clear, concise description of what this command does in 5-10 words. Examples:\nInput: Get-ChildItem -LiteralPath "."\nOutput: Lists current directory\n\nInput: git status\nOutput: Shows working tree status\n\nInput: npm install\nOutput: Installs package dependencies\n\nInput: New-Item -ItemType Directory -Path "tmp"\nOutput: Creates directory tmp',
cmd:
'Clear, concise description of what this command does in 5-10 words. Examples:\nInput: dir\nOutput: Lists current directory\n\nInput: if exist "package.json" type "package.json"\nOutput: Prints package.json when it exists\n\nInput: mkdir tmp\nOutput: Creates directory tmp',
}
export type Limits = {
maxLines: number
maxBytes: number
}
export function parameterSchema(description: string) {
return Schema.Struct({
command: Schema.String.annotate({ description: "The command to execute" }),
timeout: Schema.optional(Schema.Number).annotate({ description: "Optional timeout in milliseconds" }),
workdir: Schema.optional(Schema.String).annotate({
description: `The working directory to run the command in. Defaults to the current directory. Use this instead of 'cd' commands.`,
}),
description: Schema.String.annotate({ description }),
})
}
export const Parameters = parameterSchema(descriptions.bash)
export type Parameters = Schema.Schema.Type<typeof Parameters>
function renderPrompt(template: string, values: Record<string, string>) {
return template.replace(/\$\{(\w+)\}/g, (_, key: string) => {
const value = values[key]
if (value === undefined) throw new Error(`Missing shell prompt value: ${key}`)
return value
})
}
function shellDisplayName(name: string) {
if (name === "pwsh") return "PowerShell (7+)"
if (name === "powershell") return "Windows PowerShell (5.1)"
if (name === "cmd") return "cmd.exe"
return name
}
function powershellNotes(name: string) {
if (name === "pwsh") {
return `# PowerShell (7+) shell notes
- This cross-platform shell supports pipeline chain operators (\`&&\` and \`||\`).
- Use double quotes for interpolated strings (\`"Hello $name"\`), single quotes for verbatim strings.
- Prefer full cmdlet names like \`Get-ChildItem\`, \`Set-Content\`, \`Remove-Item\`, and \`New-Item\` over aliases.
- Use \`$(...)\` for subexpressions. Use \`@(...)\` for array expressions.
- To call a native executable whose path contains spaces, use the call operator: \`& "path/to/exe" args\`.
- Escape special characters with the PowerShell backtick character.`
}
if (name === "powershell") {
return `# Windows PowerShell (5.1) shell notes
- Use \`cmd1; if ($?) { cmd2 }\` to chain dependent commands.
- Use double quotes for interpolated strings (\`"Hello $name"\`), single quotes for verbatim strings.
- Prefer full cmdlet names like \`Get-ChildItem\`, \`Set-Content\`, \`Remove-Item\`, and \`New-Item\` over aliases.
- Use \`$(...)\` for subexpressions. Use \`@(...)\` for array expressions.
- To call a native executable whose path contains spaces, use the call operator: \`& "path/to/exe" args\`.
- Escape special characters with the PowerShell backtick character.`
}
return ""
}
function chainGuidance(name: string) {
if (name === "powershell") {
return "If the commands depend on each other and must run sequentially, avoid '&&' in this shell because Windows PowerShell (5.1) does not support it. Use PowerShell conditionals such as `cmd1; if ($?) { cmd2 }` when later commands must depend on earlier success."
}
if (PS.has(name)) {
return "If the commands depend on each other and must run sequentially, use a single Shell call with '&&' to chain them together (e.g., `git add . && git commit -m \"message\" && git push`). For instance, if one operation must complete before another starts (like New-Item before Copy-Item, Write before Shell for git operations, or git add before git commit), run these operations sequentially instead."
}
if (CMD.has(name)) {
return "If the commands depend on each other and must run sequentially, use a single Shell call with `&&` to chain them together (e.g., `mkdir out && dir out`). For instance, if one operation must complete before another starts, run these operations sequentially instead."
}
return "If the commands depend on each other and must run sequentially, use a single Bash call with '&&' to chain them together (e.g., `git add . && git commit -m \"message\" && git push`). For instance, if one operation must complete before another starts (like mkdir before cp, Write before Bash for git operations, or git add before git commit), run these operations sequentially instead."
}
function bashCommandSection(chain: string, limits: Limits) {
return `Before executing the command, please follow these steps:
1. Directory Verification:
- If the command will create new directories or files, first use \`ls\` to verify the parent directory exists and is the correct location
- For example, before running "mkdir foo/bar", first use \`ls foo\` to check that "foo" exists and is the intended parent directory
2. Command Execution:
- Always quote file paths that contain spaces with double quotes (e.g., rm "path with spaces/file.txt")
- Examples of proper quoting:
- mkdir "/Users/name/My Documents" (correct)
- mkdir /Users/name/My Documents (incorrect - will fail)
- python "/path/with spaces/script.py" (correct)
- python /path/with spaces/script.py (incorrect - will fail)
- After ensuring proper quoting, execute the command.
- Capture the output of the command.
Usage notes:
- The command argument is required.
- You can specify an optional timeout in milliseconds. If not specified, commands will time out after 120000ms (2 minutes).
- It is very helpful if you write a clear, concise description of what this command does in 5-10 words.
- If the output exceeds ${limits.maxLines} lines or ${limits.maxBytes} bytes, it will be truncated and the full output will be written to a file. You can use Read with offset/limit to read specific sections or Grep to search the full content. Do NOT use \`head\`, \`tail\`, or other truncation commands to limit output; the full output will already be captured to a file for more precise searching.
- Avoid using Bash with the \`find\`, \`grep\`, \`cat\`, \`head\`, \`tail\`, \`sed\`, \`awk\`, or \`echo\` commands, unless explicitly instructed or when these commands are truly necessary for the task. Instead, always prefer using the dedicated tools for these commands:
- File search: Use Glob (NOT find or ls)
- Content search: Use Grep (NOT grep or rg)
- Read files: Use Read (NOT cat/head/tail)
- Edit files: Use Edit (NOT sed/awk)
- Write files: Use Write (NOT echo >/cat <<EOF)
- Communication: Output text directly (NOT echo/printf)
- When issuing multiple commands:
- If the commands are independent and can run in parallel, make multiple Shell tool calls in a single message. For example, if you need to run "git status" and "git diff", send a single message with two Shell tool calls in parallel.
- ${chain}
- Use ';' only when you need to run commands sequentially but don't care if earlier commands fail
- DO NOT use newlines to separate commands (newlines are ok in quoted strings)
- AVOID using \`cd <directory> && <command>\`. Use the \`workdir\` parameter to change directories instead.
<good-example>
Use workdir="/foo/bar" with command: pytest tests
</good-example>
<bad-example>
cd /foo/bar && pytest tests
</bad-example>`
}
function powershellCommandSection(name: string, chain: string, pathSep: string, limits: Limits) {
return `${powershellNotes(name)}
Before executing the command, please follow these steps:
1. Directory Verification:
- If the command will create new directories or files, first use \`Test-Path -LiteralPath <parent>\` to verify the parent directory exists and is the correct location
- For example, before creating \`foo${pathSep}bar\`, first use \`Test-Path -LiteralPath "foo"\` to check that \`foo\` exists and is the intended parent directory
2. Command Execution:
- Always quote file paths that contain spaces with double quotes (e.g., Remove-Item -LiteralPath "path with spaces${pathSep}file.txt")
- Examples of proper quoting:
- New-Item -ItemType Directory -Path "My Documents" (correct)
- New-Item -ItemType Directory -Path My Documents (incorrect - path is split)
- & "path with spaces${pathSep}script.ps1" (correct)
- path with spaces${pathSep}script.ps1 (incorrect - path is split and not invoked)
- After ensuring proper quoting, execute the command.
- Capture the output of the command.
Usage notes:
- The command argument is required.
- You can specify an optional timeout in milliseconds. If not specified, commands will time out after 120000ms (2 minutes).
- It is very helpful if you write a clear, concise description of what this command does in 5-10 words.
- If the output exceeds ${limits.maxLines} lines or ${limits.maxBytes} bytes, it will be truncated and the full output will be written to a file. You can use Read with offset/limit to read specific sections or Grep to search the full content. Do NOT use \`Select-Object -First\`, \`Select-Object -Last\`, or other truncation commands to limit output; the full output will already be captured to a file for more precise searching.
- Avoid using Shell with PowerShell file/content cmdlets unless explicitly instructed or when these cmdlets are truly necessary for the task. Instead, always prefer using the dedicated tools for these commands:
- File search: Use Glob (NOT Get-ChildItem)
- Content search: Use Grep (NOT Select-String)
- Read files: Use Read (NOT Get-Content)
- Edit files: Use Edit (NOT Set-Content)
- Write files: Use Write (NOT Set-Content/Out-File or here-strings)
- Communication: Output text directly (NOT Write-Output/Write-Host)
- When issuing multiple commands:
- If the commands are independent and can run in parallel, make multiple Shell tool calls in a single message. For example, if you need to run "git status" and "git diff", send a single message with two Shell tool calls in parallel.
- ${chain}
- Use \`;\` only when you need to run commands sequentially but don't care if earlier commands fail
- DO NOT use newlines to separate commands (newlines are ok in quoted strings)
- AVOID changing directories inside the command. Use the \`workdir\` parameter to change directories instead.
<good-example>
Use workdir="project${pathSep}subdir" with command: pytest tests
</good-example>
<bad-example>
${name === "powershell" ? `Set-Location -LiteralPath "project${pathSep}subdir"; if ($?) { pytest tests }` : `Set-Location -LiteralPath "project${pathSep}subdir" && pytest tests`}
</bad-example>`
}
function cmdCommandSection(chain: string, limits: Limits) {
return `# cmd.exe shell notes
- Use double quotes for paths with spaces.
- Use %VAR% for environment variables.
- Use \`if exist\` for existence checks.
- Use \`call\` when invoking batch files from another batch-style command.
Before executing the command, please follow these steps:
1. Directory Verification:
- If the command will create new directories or files, first use \`if exist\` to verify the parent directory exists and is the correct location
- For example, before creating \`foo\\bar\`, first use \`if exist "foo\\" dir "foo"\` to check that \`foo\` exists and is the intended parent directory
2. Command Execution:
- Always quote file paths that contain spaces with double quotes (e.g., del "path with spaces\\file.txt")
- Examples of proper quoting:
- mkdir "My Documents" (correct)
- mkdir My Documents (incorrect - path is split)
- call "path with spaces\\script.bat" (correct)
- path with spaces\\script.bat (incorrect - path is split and not invoked correctly)
- After ensuring proper quoting, execute the command.
- Capture the output of the command.
Usage notes:
- The command argument is required.
- You can specify an optional timeout in milliseconds. If not specified, commands will time out after 120000ms (2 minutes).
- It is very helpful if you write a clear, concise description of what this command does in 5-10 words.
- If the output exceeds ${limits.maxLines} lines or ${limits.maxBytes} bytes, it will be truncated and the full output will be written to a file. You can use Read with offset/limit to read specific sections or Grep to search the full content. Do NOT use \`more\` or other pagination commands to limit output; the full output will already be captured to a file for more precise searching.
- Avoid using Shell with cmd.exe file/content commands unless explicitly instructed or when these commands are truly necessary for the task. Instead, always prefer using the dedicated tools for these commands:
- File search: Use Glob (NOT dir /s)
- Content search: Use Grep (NOT findstr)
- Read files: Use Read (NOT type)
- Edit files: Use Edit (NOT copy)
- Write files: Use Write (NOT echo > file)
- Communication: Output text directly (NOT echo)
- When issuing multiple commands:
- If the commands are independent and can run in parallel, make multiple Shell tool calls in a single message. For example, if you need to run "dir" and "where cmd", send a single message with two Shell tool calls in parallel.
- ${chain}
- Use \`&\` only when you need to run commands sequentially but don't care if earlier commands fail
- DO NOT use newlines to separate commands (newlines are ok in quoted strings)
- AVOID changing directories inside the command. Use the \`workdir\` parameter to change directories instead.
<good-example>
Use workdir="project\\subdir" with command: dir
</good-example>
<bad-example>
cd /d "project\\subdir" && dir
</bad-example>`
}
function profile(name: string, platform: NodeJS.Platform, limits: Limits) {
const isPowerShell = PS.has(name)
const chain = chainGuidance(name)
if (CMD.has(name)) {
return {
intro: `Executes a given ${shellDisplayName(name)} command with optional timeout, ensuring proper handling and security measures.`,
workdirSection:
"All commands run in the current working directory by default. Use the `workdir` parameter if you need to run a command in a different directory. AVOID changing directories inside the command - use `workdir` instead.",
commandSection: cmdCommandSection(chain, limits),
gitCommands: "git commands",
toolName: "Shell",
gitCommandRestriction: "git commands",
createPrInstruction: "Create PR using a temporary body file so cmd.exe quoting stays simple.",
createPrExample: `(\n echo ## Summary\n echo - ^<1-3 bullet points^>\n) > pr-body.txt\ngh pr create --title "the pr title" --body-file pr-body.txt`,
parameterDescription: descriptions.cmd,
}
}
if (isPowerShell) {
return {
intro: `Executes a given ${shellDisplayName(name)} command with optional timeout, ensuring proper handling and security measures.`,
workdirSection:
"All commands run in the current working directory by default. Use the `workdir` parameter if you need to run a command in a different directory. AVOID changing directories inside the command - use `workdir` instead.",
commandSection: powershellCommandSection(name, chain, platform === "win32" ? "\\" : "/", limits),
gitCommands: "git commands",
toolName: "Shell",
gitCommandRestriction: "git commands",
createPrInstruction: "Create PR using gh pr create with a PowerShell here-string to pass the body correctly.",
createPrExample: `gh pr create --title "the pr title" --body @'
## Summary
- <1-3 bullet points>
'@`,
parameterDescription: descriptions.powershell,
}
}
return {
intro:
"Executes a given bash command in a persistent shell session with optional timeout, ensuring proper handling and security measures.",
workdirSection:
"All commands run in the current working directory by default. Use the `workdir` parameter if you need to run a command in a different directory. AVOID using `cd <directory> && <command>` patterns - use `workdir` instead.",
commandSection: bashCommandSection(chain, limits),
gitCommands: "bash commands",
toolName: "Shell",
gitCommandRestriction: "git bash commands",
createPrInstruction:
"Create PR using gh pr create with the format below. Use a HEREDOC to pass the body to ensure correct formatting.",
createPrExample: `gh pr create --title "the pr title" --body "$(cat <<'EOF'
## Summary
<1-3 bullet points>`,
parameterDescription: descriptions.bash,
}
}
export function render(name: string, platform: NodeJS.Platform, limits: Limits) {
const selected = profile(name, platform, limits)
return {
description: renderPrompt(DESCRIPTION, {
intro: selected.intro,
os: platform,
shell: name,
workdirSection: selected.workdirSection,
commandSection: selected.commandSection,
gitCommands: selected.gitCommands,
toolName: selected.toolName,
gitCommandRestriction: selected.gitCommandRestriction,
createPrInstruction: selected.createPrInstruction,
createPrExample: selected.createPrExample,
}),
parameters: parameterSchema(selected.parameterDescription),
}
}
export * as ShellPrompt from "./prompt"

View File

@@ -1,54 +1,14 @@
Executes a given bash command in a persistent shell session with optional timeout, ensuring proper handling and security measures.
${intro}
Be aware: OS: ${os}, Shell: ${shell}
All commands run in the current working directory by default. Use the `workdir` parameter if you need to run a command in a different directory. AVOID using `cd <directory> && <command>` patterns - use `workdir` instead.
${workdirSection}
Use `${tmp}` for temporary work outside the workspace. This directory has already been created, already exists, and is pre-approved for external directory access.
IMPORTANT: This tool is for terminal operations like git, npm, docker, etc. DO NOT use it for file operations (reading, writing, editing, searching, finding files) - use the specialized tools for this instead.
Before executing the command, please follow these steps:
1. Directory Verification:
- If the command will create new directories or files, first use `ls` to verify the parent directory exists and is the correct location
- For example, before running "mkdir foo/bar", first use `ls foo` to check that "foo" exists and is the intended parent directory
2. Command Execution:
- Always quote file paths that contain spaces with double quotes (e.g., rm "path with spaces/file.txt")
- Examples of proper quoting:
- mkdir "/Users/name/My Documents" (correct)
- mkdir /Users/name/My Documents (incorrect - will fail)
- python "/path/with spaces/script.py" (correct)
- python /path/with spaces/script.py (incorrect - will fail)
- After ensuring proper quoting, execute the command.
- Capture the output of the command.
Usage notes:
- The command argument is required.
- You can specify an optional timeout in milliseconds. If not specified, commands will time out after 120000ms (2 minutes).
- It is very helpful if you write a clear, concise description of what this command does in 5-10 words.
- If the output exceeds ${maxLines} lines or ${maxBytes} bytes, it will be truncated and the full output will be written to a file. You can use Read with offset/limit to read specific sections or Grep to search the full content. Do NOT use `head`, `tail`, or other truncation commands to limit output; the full output will already be captured to a file for more precise searching.
- Avoid using Bash with the `find`, `grep`, `cat`, `head`, `tail`, `sed`, `awk`, or `echo` commands, unless explicitly instructed or when these commands are truly necessary for the task. Instead, always prefer using the dedicated tools for these commands:
- File search: Use Glob (NOT find or ls)
- Content search: Use Grep (NOT grep or rg)
- Read files: Use Read (NOT cat/head/tail)
- Edit files: Use Edit (NOT sed/awk)
- Write files: Use Write (NOT echo >/cat <<EOF)
- Communication: Output text directly (NOT echo/printf)
- When issuing multiple commands:
- If the commands are independent and can run in parallel, make multiple Bash tool calls in a single message. For example, if you need to run "git status" and "git diff", send a single message with two Bash tool calls in parallel.
- ${chaining}
- Use ';' only when you need to run commands sequentially but don't care if earlier commands fail
- DO NOT use newlines to separate commands (newlines are ok in quoted strings)
- AVOID using `cd <directory> && <command>`. Use the `workdir` parameter to change directories instead.
<good-example>
Use workdir="/foo/bar" with command: pytest tests
</good-example>
<bad-example>
cd /foo/bar && pytest tests
</bad-example>
${commandSection}
# Committing changes with git
@@ -67,7 +27,7 @@ Git Safety Protocol:
- CRITICAL: If you already pushed to remote, NEVER amend unless user explicitly requests it (requires force push)
- NEVER commit changes unless the user explicitly asks you to. It is VERY IMPORTANT to only commit when explicitly asked, otherwise the user will feel that you are being too proactive.
1. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following bash commands in parallel, each using the Bash tool:
1. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following ${gitCommands} in parallel, each using the ${toolName} tool:
- Run a git status command to see all untracked files.
- Run a git diff command to see both staged and unstaged changes that will be committed.
- Run a git log command to see recent commit messages, so that you can follow this repository's commit message style.
@@ -84,18 +44,18 @@ Git Safety Protocol:
4. If the commit fails due to pre-commit hook, fix the issue and create a NEW commit (see amend rules above)
Important notes:
- NEVER run additional commands to read or explore code, besides git bash commands
- NEVER run additional commands to read or explore code, besides ${gitCommandRestriction}
- NEVER use the TodoWrite or Task tools
- DO NOT push to the remote repository unless the user explicitly asks you to do so
- IMPORTANT: Never use git commands with the -i flag (like git rebase -i or git add -i) since they require interactive input which is not supported.
- If there are no changes to commit (i.e., no untracked files and no modifications), do not create an empty commit
# Creating pull requests
Use the gh command via the Bash tool for ALL GitHub-related tasks including working with issues, pull requests, checks, and releases. If given a GitHub URL use the gh command to get the information needed.
Use the gh command via the ${toolName} tool for ALL GitHub-related tasks including working with issues, pull requests, checks, and releases. If given a GitHub URL use the gh command to get the information needed.
IMPORTANT: When the user asks you to create a pull request, follow these steps carefully:
1. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following bash commands in parallel using the Bash tool, in order to understand the current state of the branch since it diverged from the main branch:
1. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following ${gitCommands} in parallel using the ${toolName} tool, in order to understand the current state of the branch since it diverged from the main branch:
- Run a git status command to see all untracked files
- Run a git diff command to see both staged and unstaged changes that will be committed
- Check if the current branch tracks a remote branch and is up to date with the remote, so you know if you need to push to the remote
@@ -104,11 +64,9 @@ IMPORTANT: When the user asks you to create a pull request, follow these steps c
3. You can call multiple tools in a single response. When multiple independent pieces of information are requested and all commands are likely to succeed, run multiple tool calls in parallel for optimal performance. run the following commands in parallel:
- Create new branch if needed
- Push to remote with -u flag if needed
- Create PR using gh pr create with the format below. Use a HEREDOC to pass the body to ensure correct formatting.
- ${createPrInstruction}
<example>
gh pr create --title "the pr title" --body "$(cat <<'EOF'
## Summary
<1-3 bullet points>
${createPrExample}
</example>
Important:

View File

@@ -1,17 +1,22 @@
import * as Tool from "./tool"
import DESCRIPTION from "./task.txt"
import { Bus } from "@/bus"
import { Session } from "@/session/session"
import { SessionID, MessageID } from "../session/schema"
import { MessageV2 } from "../session/message-v2"
import { Agent } from "../agent/agent"
import type { SessionPrompt } from "../session/prompt"
import { SessionStatus } from "@/session/status"
import { TuiEvent } from "@/cli/cmd/tui/event"
import { Cause, Effect, Option, Schema } from "effect"
import { Config } from "@/config/config"
import { Effect, Schema } from "effect"
import { BackgroundJob } from "@/background/job"
export interface TaskPromptOps {
cancel(sessionID: SessionID): void
resolvePromptParts(template: string): Effect.Effect<SessionPrompt.PromptInput["parts"]>
prompt(input: SessionPrompt.PromptInput): Effect.Effect<MessageV2.WithParts>
loop(input: SessionPrompt.LoopInput): Effect.Effect<MessageV2.WithParts>
}
const id = "task"
@@ -20,24 +25,66 @@ export const Parameters = Schema.Struct({
description: Schema.String.annotate({ description: "A short (3-5 words) description of the task" }),
prompt: Schema.String.annotate({ description: "The task for the agent to perform" }),
subagent_type: Schema.String.annotate({ description: "The type of specialized agent to use for this task" }),
task_id: Schema.optional(Schema.String).annotate({
task_id: Schema.optional(SessionID).annotate({
description:
"This should only be set if you mean to resume a previous task (you can pass a prior task_id and the task will continue the same subagent session as before instead of creating a fresh one)",
}),
command: Schema.optional(Schema.String).annotate({ description: "The command that triggered this task" }),
background: Schema.optional(Schema.Boolean).annotate({
description: "When true, launch the subagent in the background and return immediately",
}),
})
function output(sessionID: SessionID, text: string) {
return [
`task_id: ${sessionID} (for resuming to continue this task if needed)`,
"",
"<task_result>",
text,
"</task_result>",
].join("\n")
}
function backgroundOutput(sessionID: SessionID) {
return [
`task_id: ${sessionID} (for polling this task with task_status)`,
"state: running",
"",
"<task_result>",
"Background task started. Continue your current work and call task_status when you need the result.",
"</task_result>",
].join("\n")
}
function backgroundMessage(input: { sessionID: SessionID; description: string; state: "completed" | "error"; text: string }) {
const tag = input.state === "completed" ? "task_result" : "task_error"
const title =
input.state === "completed"
? `Background task completed: ${input.description}`
: `Background task failed: ${input.description}`
return [title, `task_id: ${input.sessionID}`, `state: ${input.state}`, `<${tag}>`, input.text, `</${tag}>`].join(
"\n",
)
}
function errorText(error: unknown) {
if (error instanceof Error) return error.message
return String(error)
}
export const TaskTool = Tool.define(
id,
Effect.gen(function* () {
const agent = yield* Agent.Service
const bus = yield* Bus.Service
const config = yield* Config.Service
const sessions = yield* Session.Service
const status = yield* SessionStatus.Service
const jobs = yield* BackgroundJob.Service
const run = Effect.fn("TaskTool.execute")(function* (
params: Schema.Schema.Type<typeof Parameters>,
ctx: Tool.Context,
) {
const run = Effect.fn(
"TaskTool.execute",
)(function* (params: Schema.Schema.Type<typeof Parameters>, ctx: Tool.Context) {
const cfg = yield* config.get()
if (!ctx.extra?.bypassAgentCheck) {
@@ -62,7 +109,7 @@ export const TaskTool = Tool.define(
const taskID = params.task_id
const session = taskID
? yield* sessions.get(SessionID.make(taskID)).pipe(Effect.catchCause(() => Effect.succeed(undefined)))
? yield* sessions.get(taskID).pipe(Effect.catchCause(() => Effect.succeed(undefined)))
: undefined
const parent = yield* sessions.get(ctx.sessionID)
const nextSession =
@@ -107,19 +154,118 @@ export const TaskTool = Tool.define(
modelID: msg.info.modelID,
providerID: msg.info.providerID,
}
const parentModel = {
modelID: msg.info.modelID,
providerID: msg.info.providerID,
}
const background = params.background === true
const metadata = {
sessionId: nextSession.id,
model,
...(background ? { background: true } : {}),
}
yield* ctx.metadata({
title: params.description,
metadata: {
sessionId: nextSession.id,
model,
},
metadata,
})
const ops = ctx.extra?.promptOps as TaskPromptOps
if (!ops) return yield* Effect.fail(new Error("TaskTool requires promptOps in ctx.extra"))
const messageID = MessageID.ascending()
const runTask = Effect.fn("TaskTool.runTask")(function* () {
const parts = yield* ops.resolvePromptParts(params.prompt)
const result = yield* ops.prompt({
messageID: MessageID.ascending(),
sessionID: nextSession.id,
model: {
modelID: model.modelID,
providerID: model.providerID,
},
agent: next.name,
tools: {
...(canTodo ? {} : { todowrite: false }),
...(canTask ? {} : { task: false }),
...Object.fromEntries((cfg.experimental?.primary_tools ?? []).map((item) => [item, false])),
},
parts,
})
return result.parts.findLast((item) => item.type === "text")?.text ?? ""
})
const continueIfIdle = Effect.fn("TaskTool.continueIfIdle")(function* (input: {
userID: MessageID
state: "completed" | "error"
}) {
if ((yield* status.get(ctx.sessionID)).type !== "idle") return
const latest = yield* sessions.findMessage(ctx.sessionID, (item) => item.info.role === "user")
if (Option.isNone(latest)) return
if (latest.value.info.id !== input.userID) return
yield* bus.publish(TuiEvent.ToastShow, {
title: input.state === "completed" ? "Background task complete" : "Background task failed",
message:
input.state === "completed"
? `Background task \"${params.description}\" finished. Resuming the main thread.`
: `Background task \"${params.description}\" failed. Resuming the main thread.`,
variant: input.state === "completed" ? "success" : "error",
duration: 5000,
})
yield* ops.loop({ sessionID: ctx.sessionID }).pipe(Effect.ignore)
})
if (background) {
const inject = Effect.fn("TaskTool.injectBackgroundResult")(function* (state: "completed" | "error", text: string) {
const message = yield* ops.prompt({
sessionID: ctx.sessionID,
noReply: true,
model: parentModel,
agent: ctx.agent,
parts: [
{
type: "text",
synthetic: true,
text: backgroundMessage({
sessionID: nextSession.id,
description: params.description,
state,
text,
}),
},
],
})
yield* continueIfIdle({ userID: message.info.id, state })
})
yield* jobs.start({
id: nextSession.id,
type: id,
title: params.description,
metadata: {
parentSessionID: ctx.sessionID,
sessionID: nextSession.id,
subagent: next.name,
},
run: runTask().pipe(
Effect.matchCauseEffect({
onSuccess: (text) => inject("completed", text).pipe(Effect.as(text)),
onFailure: (cause) => {
const text = errorText(Cause.squash(cause))
return inject("error", text).pipe(
Effect.catchCause(() => Effect.void),
Effect.andThen(Effect.failCause(cause)),
)
},
}),
),
})
return {
title: params.description,
metadata,
output: backgroundOutput(nextSession.id),
}
}
function cancel() {
ops.cancel(nextSession.id)
@@ -131,36 +277,11 @@ export const TaskTool = Tool.define(
}),
() =>
Effect.gen(function* () {
const parts = yield* ops.resolvePromptParts(params.prompt)
const result = yield* ops.prompt({
messageID,
sessionID: nextSession.id,
model: {
modelID: model.modelID,
providerID: model.providerID,
},
agent: next.name,
tools: {
...(canTodo ? {} : { todowrite: false }),
...(canTask ? {} : { task: false }),
...Object.fromEntries((cfg.experimental?.primary_tools ?? []).map((item) => [item, false])),
},
parts,
})
const text = yield* runTask()
return {
title: params.description,
metadata: {
sessionId: nextSession.id,
model,
},
output: [
`task_id: ${nextSession.id} (for resuming to continue this task if needed)`,
"",
"<task_result>",
result.parts.findLast((item) => item.type === "text")?.text ?? "",
"</task_result>",
].join("\n"),
metadata,
output: output(nextSession.id, text),
}
}),
() =>
@@ -168,13 +289,12 @@ export const TaskTool = Tool.define(
ctx.abort.removeEventListener("abort", cancel)
}),
)
})
}, Effect.orDie)
return {
description: DESCRIPTION,
parameters: Parameters,
execute: (params: Schema.Schema.Type<typeof Parameters>, ctx: Tool.Context) =>
run(params, ctx).pipe(Effect.orDie),
execute: run,
}
}),
)

View File

@@ -14,11 +14,13 @@ When NOT to use the Task tool:
Usage notes:
1. Launch multiple agents concurrently whenever possible, to maximize performance; to do that, use a single message with multiple tool uses
2. When the agent is done, it will return a single message back to you. The result returned by the agent is not visible to the user. To show the user the result, you should send a text message back to the user with a concise summary of the result. The output includes a task_id you can reuse later to continue the same subagent session.
3. Each agent invocation starts with a fresh context unless you provide task_id to resume the same subagent session (which continues with its previous messages and tool outputs). When starting fresh, your prompt should contain a highly detailed task description for the agent to perform autonomously and you should specify exactly what information the agent should return back to you in its final and only message to you.
4. The agent's outputs should generally be trusted
5. Clearly tell the agent whether you expect it to write code or just to do research (search, file reads, web fetches, etc.), since it is not aware of the user's intent. Tell it how to verify its work if possible (e.g., relevant test commands).
6. If the agent description mentions that it should be used proactively, then you should try your best to use it without the user having to ask for it first. Use your judgement.
2. By default, task waits for completion and returns the result immediately, along with a task_id you can reuse later to continue the same subagent session.
3. Set background=true to launch asynchronously. In background mode, continue your current work without waiting.
4. For background runs, use task_status(task_id=..., wait=false) to poll, or wait=true to block until done (optionally with timeout_ms).
5. Each agent invocation starts with a fresh context unless you provide task_id to resume the same subagent session (which continues with its previous messages and tool outputs). When starting fresh, your prompt should contain a highly detailed task description for the agent to perform autonomously and you should specify exactly what information the agent should return back to you in its final and only message to you.
6. The agent's outputs should generally be trusted
7. Clearly tell the agent whether you expect it to write code or just to do research (search, file reads, web fetches, etc.), since it is not aware of the user's intent. Tell it how to verify its work if possible (e.g., relevant test commands).
8. If the agent description mentions that it should be used proactively, then you should try your best to use it without the user having to ask for it first. Use your judgement.
Example usage (NOTE: The agents below are fictional examples for illustration only - use the actual agents listed above):

View File

@@ -0,0 +1,197 @@
import * as Tool from "./tool"
import DESCRIPTION from "./task_status.txt"
import { Session } from "@/session/session"
import { SessionID } from "@/session/schema"
import { MessageV2 } from "@/session/message-v2"
import { SessionStatus } from "@/session/status"
import { PositiveInt } from "@/util/schema"
import { Effect, Option, Schema } from "effect"
import { BackgroundJob } from "@/background/job"
const DEFAULT_TIMEOUT = 60_000
const POLL_MS = 300
const Parameters = Schema.Struct({
task_id: SessionID.annotate({ description: "The task_id returned by the task tool" }),
wait: Schema.optional(Schema.Boolean).annotate({ description: "When true, wait until the task reaches a terminal state or timeout" }),
timeout_ms: Schema.optional(PositiveInt).annotate({
description: "Maximum milliseconds to wait when wait=true (default: 60000)",
}),
})
type State = "running" | "completed" | "error"
type InspectResult = { state: State; text: string }
function format(input: { taskID: SessionID; state: State; text: string }) {
return [`task_id: ${input.taskID}`, `state: ${input.state}`, "", "<task_result>", input.text, "</task_result>"].join(
"\n",
)
}
function errorText(error: NonNullable<MessageV2.Assistant["error"]>) {
const data = Reflect.get(error, "data")
const message = data && typeof data === "object" ? Reflect.get(data, "message") : undefined
if (typeof message === "string" && message) return message
return error.name
}
function jobResult(job: BackgroundJob.Info): InspectResult {
if (job.status === "running") {
return {
state: "running",
text: "Task is still running.",
}
}
if (job.status === "completed") {
return {
state: "completed",
text: job.output ?? "",
}
}
return {
state: "error",
text: job.error ?? `Task ${job.status}.`,
}
}
export const TaskStatusTool = Tool.define(
"task_status",
Effect.gen(function* () {
const sessions = yield* Session.Service
const status = yield* SessionStatus.Service
const jobs = yield* BackgroundJob.Service
const inspect: (taskID: SessionID) => Effect.Effect<InspectResult> = Effect.fn("TaskStatusTool.inspect")(function* (
taskID: SessionID,
) {
const current = yield* status.get(taskID)
if (current.type === "busy" || current.type === "retry") {
return {
state: "running" as const,
text: current.type === "retry" ? `Task is retrying: ${current.message}` : "Task is still running.",
}
}
const latestAssistant = yield* sessions.findMessage(taskID, (item) => item.info.role === "assistant")
if (Option.isNone(latestAssistant)) {
return {
state: "running" as const,
text: "Task has started but has not produced output yet.",
}
}
if (latestAssistant.value.info.role !== "assistant") {
return {
state: "running" as const,
text: "Task has started but has not produced output yet.",
}
}
const latestUser = yield* sessions.findMessage(taskID, (item) => item.info.role === "user")
if (
Option.isSome(latestUser) &&
latestUser.value.info.role === "user" &&
latestUser.value.info.id > latestAssistant.value.info.id
) {
return {
state: "running" as const,
text: "Task is starting.",
}
}
const text = latestAssistant.value.parts.findLast((part) => part.type === "text")?.text ?? ""
if (latestAssistant.value.info.error) {
return {
state: "error" as const,
text: text || errorText(latestAssistant.value.info.error),
}
}
const done =
!!latestAssistant.value.info.finish && !["tool-calls", "unknown"].includes(latestAssistant.value.info.finish)
if (done) {
return {
state: "completed" as const,
text,
}
}
return {
state: "running" as const,
text: text || "Task is still running.",
}
})
const waitForTerminal: (
taskID: SessionID,
timeout: number,
) => Effect.Effect<{ result: InspectResult; timedOut: boolean }> = Effect.fn(
"TaskStatusTool.waitForTerminal",
)(function* (taskID: SessionID, timeout: number) {
const result = yield* inspect(taskID)
if (result.state !== "running") return { result, timedOut: false }
if (timeout <= 0) return { result, timedOut: true }
const sleep = Math.min(POLL_MS, timeout)
yield* Effect.sleep(sleep)
return yield* waitForTerminal(taskID, timeout - sleep)
})
const run = Effect.fn(
"TaskStatusTool.execute",
)(function* (params: Schema.Schema.Type<typeof Parameters>, _ctx: Tool.Context) {
yield* sessions.get(params.task_id)
const job = yield* jobs.get(params.task_id)
const waitedJob =
job && params.wait === true
? yield* jobs.wait({ id: params.task_id, timeout: params.timeout_ms ?? DEFAULT_TIMEOUT })
: { info: job, timedOut: false }
if (waitedJob.info) {
const result = jobResult(waitedJob.info)
return {
title: "Task status",
metadata: {
task_id: params.task_id,
state: result.state,
timed_out: waitedJob.timedOut,
},
output: format({
taskID: params.task_id,
state: result.state,
text: waitedJob.timedOut
? `Timed out after ${params.timeout_ms ?? DEFAULT_TIMEOUT}ms while waiting for task completion.`
: result.text,
}),
}
}
const waited =
params.wait === true
? yield* waitForTerminal(params.task_id, params.timeout_ms ?? DEFAULT_TIMEOUT)
: { result: yield* inspect(params.task_id), timedOut: false }
const outputText = waited.timedOut
? `Timed out after ${params.timeout_ms ?? DEFAULT_TIMEOUT}ms while waiting for task completion.`
: waited.result.text
return {
title: "Task status",
metadata: {
task_id: params.task_id,
state: waited.result.state,
timed_out: waited.timedOut,
},
output: format({
taskID: params.task_id,
state: waited.result.state,
text: outputText,
}),
}
}, Effect.orDie)
return {
description: DESCRIPTION,
parameters: Parameters,
execute: run,
}
}),
)

View File

@@ -0,0 +1,13 @@
Poll the status of a subagent task launched with the task tool.
Use this to check background tasks started with `task(background=true)`.
Parameters:
- `task_id` (required): the task session id returned by the task tool
- `wait` (optional): when true, wait for completion
- `timeout_ms` (optional): max wait duration in milliseconds when `wait=true`
Returns compact, parseable output:
- `task_id`
- `state` (`running`, `completed`, or `error`)
- `<task_result>...</task_result>` containing final output, error summary, or current progress text

View File

@@ -90,7 +90,7 @@ function bodyWithChecks(ast: SchemaAST.AST): z.ZodTypeAny {
// Schema.withDecodingDefault also attaches encoding, but we want `.default(v)`
// on the inner Zod rather than a transform wrapper — so optional ASTs whose
// encoding resolves a default from Option.none() route through body()/opt().
const hasEncoding = ast.encoding?.length && ast._tag !== "Declaration"
const hasEncoding = ast.encoding?.length && (ast._tag !== "Declaration" || ast.typeParameters.length === 0)
const hasTransform = hasEncoding && !(SchemaAST.isOptional(ast) && extractDefault(ast) !== undefined)
const base = hasTransform ? encoded(ast) : body(ast)
return ast.checks?.length ? applyChecks(base, ast.checks, ast) : base
@@ -256,6 +256,8 @@ function body(ast: SchemaAST.AST): z.ZodTypeAny {
return array(ast)
case "Declaration":
return decl(ast)
case "Suspend":
return z.lazy(() => walk(ast.thunk()))
default:
return fail(ast)
}

View File

@@ -0,0 +1,106 @@
import path from "path"
import { Global } from "@opencode-ai/core/global"
export type Reference = {
host: string
path: string
segments: string[]
owner?: string
repo: string
remote: string
label: string
}
function normalize(input: string) {
return input.trim().replace(/^git\+/, "").replace(/#.*$/, "").replace(/\/+$/, "")
}
function trimGitSuffix(input: string) {
return input.replace(/\.git$/, "")
}
function parts(input: string) {
return input
.split("/")
.map((item) => trimGitSuffix(item.trim()))
.filter(Boolean)
}
function hostLike(input: string) {
return input.includes(".") || input.includes(":") || input === "localhost"
}
function withSlash(input: string) {
return input.endsWith("/") ? input : `${input}/`
}
function githubRemote(pathname: string) {
const base = process.env.OPENCODE_REPO_CLONE_GITHUB_BASE_URL
if (!base) return `https://github.com/${pathname}.git`
return new URL(`${pathname}.git`, withSlash(base)).href
}
function build(input: { host: string; segments: string[]; remote?: string }) {
const segments = input.segments.map(trimGitSuffix).filter(Boolean)
if (!segments.length) return null
const pathname = segments.join("/")
const repo = segments[segments.length - 1]
const host = input.host.toLowerCase()
return {
host,
path: pathname,
segments,
owner: segments.length === 2 ? segments[0] : undefined,
repo,
remote: input.remote ?? (host === "github.com" ? githubRemote(pathname) : `https://${host}/${pathname}.git`),
label: host === "github.com" && segments.length === 2 ? pathname : `${host}/${pathname}`,
} satisfies Reference
}
export function parseRepositoryReference(input: string) {
const cleaned = normalize(input)
if (!cleaned) return null
const githubPrefixed = cleaned.match(/^github:([^/\s]+)\/([^/\s]+)$/)
if (githubPrefixed) return build({ host: "github.com", segments: [githubPrefixed[1], githubPrefixed[2]] })
if (!cleaned.includes("://")) {
const scp = cleaned.match(/^(?:[^@/\s]+@)?([^:/\s]+):(.+)$/)
if (scp) return build({ host: scp[1], segments: parts(scp[2]), remote: cleaned })
const direct = parts(cleaned)
if (direct.length >= 2 && hostLike(direct[0])) {
return build({ host: direct[0], segments: direct.slice(1) })
}
if (direct.length === 2) {
return build({ host: "github.com", segments: direct })
}
}
try {
const url = new URL(cleaned)
const pathname = parts(url.pathname)
const host = url.protocol === "file:" ? "file" : url.host
return build({ host, segments: pathname, remote: host === "github.com" ? githubRemote(pathname.join("/")) : cleaned })
} catch {
return null
}
}
export function parseGitHubRemote(input: string) {
const cleaned = normalize(input)
if (!cleaned.includes("://") && !cleaned.match(/^(?:[^@/\s]+@)?github\.com:/)) return null
const parsed = parseRepositoryReference(cleaned)
if (!parsed || parsed.host !== "github.com" || !parsed.owner || parsed.segments.length !== 2) return null
return { owner: parsed.owner, repo: parsed.repo }
}
export function repositoryCachePath(input: Reference) {
return path.join(Global.Path.repos, ...input.host.split(":"), ...input.segments)
}
export function sameRepositoryReference(left: Reference, right: Reference) {
return left.host === right.host && left.path === right.path
}

View File

@@ -0,0 +1,43 @@
import { Identifier } from "@/id/id"
import { SyncEvent } from "@/sync"
import { withStatics } from "@/util/schema"
import * as Schema from "effect/Schema"
export const ID = Schema.String.pipe(
Schema.brand("Event.ID"),
withStatics((s) => ({
create: () => s.make(Identifier.create("evt", "ascending")),
})),
)
export type ID = Schema.Schema.Type<typeof ID>
export function define<const Type extends string, Fields extends Schema.Struct.Fields>(input: {
type: Type
schema: Fields
aggregate: string
version?: number
}) {
const Payload = Schema.Struct({
id: ID,
metadata: Schema.Record(Schema.String, Schema.Unknown).pipe(Schema.optional),
type: Schema.Literal(input.type),
data: Schema.Struct(input.schema),
}).annotate({
identifier: input.type,
})
const Sync = SyncEvent.define({
type: input.type,
version: input.version ?? 1,
aggregate: input.aggregate,
schema: Payload.fields.data,
})
return Object.assign(Payload, {
Sync,
version: input.version,
aggregate: input.aggregate,
})
}
export * as Event from "./event"

View File

@@ -1,261 +0,0 @@
import { produce, type WritableDraft } from "immer"
import { SessionEvent } from "./session-event"
import { SessionEntry } from "./session-entry"
export type MemoryState = {
entries: SessionEntry.Entry[]
pending: SessionEntry.Entry[]
}
export interface Adapter<Result> {
readonly getCurrentAssistant: () => SessionEntry.Assistant | undefined
readonly updateAssistant: (assistant: SessionEntry.Assistant) => void
readonly appendEntry: (entry: SessionEntry.Entry) => void
readonly appendPending: (entry: SessionEntry.Entry) => void
readonly finish: () => Result
}
export function memory(state: MemoryState): Adapter<MemoryState> {
const activeAssistantIndex = () =>
state.entries.findLastIndex((entry) => entry.type === "assistant" && !entry.time.completed)
return {
getCurrentAssistant() {
const index = activeAssistantIndex()
if (index < 0) return
const assistant = state.entries[index]
return assistant?.type === "assistant" ? assistant : undefined
},
updateAssistant(assistant) {
const index = activeAssistantIndex()
if (index < 0) return
const current = state.entries[index]
if (current?.type !== "assistant") return
state.entries[index] = assistant
},
appendEntry(entry) {
state.entries.push(entry)
},
appendPending(entry) {
state.pending.push(entry)
},
finish() {
return state
},
}
}
export function stepWith<Result>(adapter: Adapter<Result>, event: SessionEvent.Event): Result {
const currentAssistant = adapter.getCurrentAssistant()
type DraftAssistant = WritableDraft<SessionEntry.Assistant>
type DraftTool = WritableDraft<SessionEntry.AssistantTool>
type DraftText = WritableDraft<SessionEntry.AssistantText>
type DraftReasoning = WritableDraft<SessionEntry.AssistantReasoning>
const latestTool = (assistant: DraftAssistant | undefined, callID?: string) =>
assistant?.content.findLast(
(item): item is DraftTool => item.type === "tool" && (callID === undefined || item.callID === callID),
)
const latestText = (assistant: DraftAssistant | undefined) =>
assistant?.content.findLast((item): item is DraftText => item.type === "text")
const latestReasoning = (assistant: DraftAssistant | undefined) =>
assistant?.content.findLast((item): item is DraftReasoning => item.type === "reasoning")
SessionEvent.Event.match(event, {
prompt: (event) => {
const entry = SessionEntry.User.fromEvent(event)
if (currentAssistant) {
adapter.appendPending(entry)
return
}
adapter.appendEntry(entry)
},
synthetic: (event) => {
adapter.appendEntry(SessionEntry.Synthetic.fromEvent(event))
},
"step.started": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
draft.time.completed = event.timestamp
}),
)
}
adapter.appendEntry(SessionEntry.Assistant.fromEvent(event))
},
"step.ended": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
draft.time.completed = event.timestamp
draft.cost = event.cost
draft.tokens = event.tokens
}),
)
}
},
"text.started": () => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
draft.content.push({
type: "text",
text: "",
})
}),
)
}
},
"text.delta": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestText(draft)
if (match) match.text += event.delta
}),
)
}
},
"text.ended": () => {},
"tool.input.started": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
draft.content.push({
type: "tool",
callID: event.callID,
name: event.name,
time: {
created: event.timestamp,
},
state: {
status: "pending",
input: "",
},
})
}),
)
}
},
"tool.input.delta": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestTool(draft, event.callID)
// oxlint-disable-next-line no-base-to-string -- event.delta is a Schema.String (runtime string)
if (match && match.state.status === "pending") match.state.input += event.delta
}),
)
}
},
"tool.input.ended": () => {},
"tool.called": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestTool(draft, event.callID)
if (match) {
match.time.ran = event.timestamp
match.state = {
status: "running",
input: event.input,
}
}
}),
)
}
},
"tool.success": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestTool(draft, event.callID)
if (match && match.state.status === "running") {
match.state = {
status: "completed",
input: match.state.input,
output: event.output ?? "",
title: event.title,
metadata: event.metadata ?? {},
attachments: [...(event.attachments ?? [])],
}
}
}),
)
}
},
"tool.error": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestTool(draft, event.callID)
if (match && match.state.status === "running") {
match.state = {
status: "error",
error: event.error,
input: match.state.input,
metadata: event.metadata ?? {},
}
}
}),
)
}
},
"reasoning.started": () => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
draft.content.push({
type: "reasoning",
text: "",
})
}),
)
}
},
"reasoning.delta": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestReasoning(draft)
if (match) match.text += event.delta
}),
)
}
},
"reasoning.ended": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestReasoning(draft)
if (match) match.text = event.text
}),
)
}
},
retried: (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
draft.retries = [...(draft.retries ?? []), SessionEntry.AssistantRetry.fromEvent(event)]
}),
)
}
},
compacted: (event) => {
adapter.appendEntry(SessionEntry.Compaction.fromEvent(event))
},
})
return adapter.finish()
}
export function step(old: MemoryState, event: SessionEvent.Event): MemoryState {
return produce(old, (draft) => {
stepWith(memory(draft as MemoryState), event)
})
}
export * as SessionEntryStepper from "./session-entry-stepper"

View File

@@ -1,220 +0,0 @@
import { Schema } from "effect"
import { NonNegativeInt } from "@/util/schema"
import { SessionEvent } from "./session-event"
export const ID = SessionEvent.ID
export type ID = Schema.Schema.Type<typeof ID>
const Base = {
id: SessionEvent.ID,
metadata: Schema.Record(Schema.String, Schema.Unknown).pipe(Schema.optional),
time: Schema.Struct({
created: Schema.DateTimeUtc,
}),
}
export class User extends Schema.Class<User>("Session.Entry.User")({
...Base,
text: SessionEvent.Prompt.fields.text,
files: SessionEvent.Prompt.fields.files,
agents: SessionEvent.Prompt.fields.agents,
type: Schema.Literal("user"),
time: Schema.Struct({
created: Schema.DateTimeUtc,
}),
}) {
static fromEvent(event: SessionEvent.Prompt) {
return new User({
id: event.id,
type: "user",
metadata: event.metadata,
text: event.text,
files: event.files,
agents: event.agents,
time: { created: event.timestamp },
})
}
}
export class Synthetic extends Schema.Class<Synthetic>("Session.Entry.Synthetic")({
...SessionEvent.Synthetic.fields,
...Base,
type: Schema.Literal("synthetic"),
}) {
static fromEvent(event: SessionEvent.Synthetic) {
return new Synthetic({
...event,
time: { created: event.timestamp },
})
}
}
export class ToolStatePending extends Schema.Class<ToolStatePending>("Session.Entry.ToolState.Pending")({
status: Schema.Literal("pending"),
input: Schema.String,
}) {}
export class ToolStateRunning extends Schema.Class<ToolStateRunning>("Session.Entry.ToolState.Running")({
status: Schema.Literal("running"),
input: Schema.Record(Schema.String, Schema.Unknown),
title: Schema.String.pipe(Schema.optional),
metadata: Schema.Record(Schema.String, Schema.Unknown).pipe(Schema.optional),
}) {}
export class ToolStateCompleted extends Schema.Class<ToolStateCompleted>("Session.Entry.ToolState.Completed")({
status: Schema.Literal("completed"),
input: Schema.Record(Schema.String, Schema.Unknown),
output: Schema.String,
title: Schema.String,
metadata: Schema.Record(Schema.String, Schema.Unknown),
attachments: SessionEvent.FileAttachment.pipe(Schema.Array, Schema.optional),
}) {}
export class ToolStateError extends Schema.Class<ToolStateError>("Session.Entry.ToolState.Error")({
status: Schema.Literal("error"),
input: Schema.Record(Schema.String, Schema.Unknown),
error: Schema.String,
metadata: Schema.Record(Schema.String, Schema.Unknown).pipe(Schema.optional),
}) {}
export const ToolState = Schema.Union([ToolStatePending, ToolStateRunning, ToolStateCompleted, ToolStateError]).pipe(
Schema.toTaggedUnion("status"),
)
export type ToolState = Schema.Schema.Type<typeof ToolState>
export class AssistantTool extends Schema.Class<AssistantTool>("Session.Entry.Assistant.Tool")({
type: Schema.Literal("tool"),
callID: Schema.String,
name: Schema.String,
state: ToolState,
time: Schema.Struct({
created: Schema.DateTimeUtc,
ran: Schema.DateTimeUtc.pipe(Schema.optional),
completed: Schema.DateTimeUtc.pipe(Schema.optional),
pruned: Schema.DateTimeUtc.pipe(Schema.optional),
}),
}) {}
export class AssistantText extends Schema.Class<AssistantText>("Session.Entry.Assistant.Text")({
type: Schema.Literal("text"),
text: Schema.String,
}) {}
export class AssistantReasoning extends Schema.Class<AssistantReasoning>("Session.Entry.Assistant.Reasoning")({
type: Schema.Literal("reasoning"),
text: Schema.String,
}) {}
export class AssistantRetry extends Schema.Class<AssistantRetry>("Session.Entry.Assistant.Retry")({
attempt: NonNegativeInt,
error: SessionEvent.RetryError,
time: Schema.Struct({
created: Schema.DateTimeUtc,
}),
}) {
static fromEvent(event: SessionEvent.Retried) {
return new AssistantRetry({
attempt: event.attempt,
error: event.error,
time: {
created: event.timestamp,
},
})
}
}
export const AssistantContent = Schema.Union([AssistantText, AssistantReasoning, AssistantTool]).pipe(
Schema.toTaggedUnion("type"),
)
export type AssistantContent = Schema.Schema.Type<typeof AssistantContent>
export class Assistant extends Schema.Class<Assistant>("Session.Entry.Assistant")({
...Base,
type: Schema.Literal("assistant"),
content: AssistantContent.pipe(Schema.Array),
retries: AssistantRetry.pipe(Schema.Array, Schema.optional),
cost: Schema.Finite.pipe(Schema.optional),
tokens: Schema.Struct({
input: NonNegativeInt,
output: NonNegativeInt,
reasoning: NonNegativeInt,
cache: Schema.Struct({
read: NonNegativeInt,
write: NonNegativeInt,
}),
}).pipe(Schema.optional),
error: Schema.String.pipe(Schema.optional),
time: Schema.Struct({
created: Schema.DateTimeUtc,
completed: Schema.DateTimeUtc.pipe(Schema.optional),
}),
}) {
static fromEvent(event: SessionEvent.Step.Started) {
return new Assistant({
id: event.id,
type: "assistant",
time: {
created: event.timestamp,
},
content: [],
retries: [],
})
}
}
export class Compaction extends Schema.Class<Compaction>("Session.Entry.Compaction")({
...SessionEvent.Compacted.fields,
type: Schema.Literal("compaction"),
...Base,
}) {
static fromEvent(event: SessionEvent.Compacted) {
return new Compaction({
...event,
type: "compaction",
time: { created: event.timestamp },
})
}
}
export const Entry = Schema.Union([User, Synthetic, Assistant, Compaction]).pipe(Schema.toTaggedUnion("type"))
export type Entry = Schema.Schema.Type<typeof Entry>
export type Type = Entry["type"]
/*
export interface Interface {
readonly decode: (row: typeof SessionEntryTable.$inferSelect) => Entry
readonly fromSession: (sessionID: SessionID) => Effect.Effect<Entry[], never>
}
export class Service extends Context.Service<Service, Interface>()("@opencode/SessionEntry") {}
export const layer: Layer.Layer<Service, never, never> = Layer.effect(
Service,
Effect.gen(function* () {
const decodeEntry = Schema.decodeUnknownSync(Entry)
const decode: (typeof Service.Service)["decode"] = (row) => decodeEntry({ ...row, id: row.id, type: row.type })
const fromSession = Effect.fn("SessionEntry.fromSession")(function* (sessionID: SessionID) {
return Database.use((db) =>
db
.select()
.from(SessionEntryTable)
.where(eq(SessionEntryTable.session_id, sessionID))
.orderBy(SessionEntryTable.id)
.all()
.map((row) => decode(row)),
)
})
return Service.of({
decode,
fromSession,
})
}),
)
*/
export * as SessionEntry from "./session-entry"

View File

@@ -1,128 +1,94 @@
import { Identifier } from "@/id/id"
import { NonNegativeInt, withStatics } from "@/util/schema"
import * as DateTime from "effect/DateTime"
import { SessionID } from "@/session/schema"
import { NonNegativeInt } from "@/util/schema"
import { Event } from "./event"
import { FileAttachment, Prompt } from "./session-prompt"
import { Schema } from "effect"
export { FileAttachment }
import { ToolOutput } from "./tool-output"
import { ModelID, ProviderID } from "@/provider/schema"
export namespace SessionEvent {
export const ID = Schema.String.pipe(
Schema.brand("Session.Event.ID"),
withStatics((s) => ({
create: () => s.make(Identifier.create("evt", "ascending")),
})),
)
export type ID = Schema.Schema.Type<typeof ID>
type Stamp = Schema.Schema.Type<typeof Schema.DateTimeUtc>
type BaseInput = {
id?: ID
metadata?: Record<string, unknown>
timestamp?: Stamp
}
export const Source = Schema.Struct({
start: NonNegativeInt,
end: NonNegativeInt,
text: Schema.String,
}).annotate({
identifier: "session.next.event.source",
})
export type Source = Schema.Schema.Type<typeof Source>
const Base = {
id: ID,
metadata: Schema.Record(Schema.String, Schema.Unknown).pipe(Schema.optional),
timestamp: Schema.DateTimeUtc,
}
const Base = {
timestamp: Schema.DateTimeUtcFromMillis,
sessionID: SessionID,
}
export class Source extends Schema.Class<Source>("Session.Event.Source")({
start: NonNegativeInt,
end: NonNegativeInt,
text: Schema.String,
}) {}
export class FileAttachment extends Schema.Class<FileAttachment>("Session.Event.FileAttachment")({
uri: Schema.String,
mime: Schema.String,
name: Schema.String.pipe(Schema.optional),
description: Schema.String.pipe(Schema.optional),
source: Source.pipe(Schema.optional),
}) {
static create(input: FileAttachment) {
return new FileAttachment({
uri: input.uri,
mime: input.mime,
name: input.name,
description: input.description,
source: input.source,
})
}
}
export class AgentAttachment extends Schema.Class<AgentAttachment>("Session.Event.AgentAttachment")({
name: Schema.String,
source: Source.pipe(Schema.optional),
}) {}
export class RetryError extends Schema.Class<RetryError>("Session.Event.Retry.Error")({
message: Schema.String,
statusCode: NonNegativeInt.pipe(Schema.optional),
isRetryable: Schema.Boolean,
responseHeaders: Schema.Record(Schema.String, Schema.String).pipe(Schema.optional),
responseBody: Schema.String.pipe(Schema.optional),
metadata: Schema.Record(Schema.String, Schema.String).pipe(Schema.optional),
}) {}
export class Prompt extends Schema.Class<Prompt>("Session.Event.Prompt")({
export const AgentSwitched = Event.define({
type: "session.next.agent.switched",
aggregate: "sessionID",
version: 1,
schema: {
...Base,
type: Schema.Literal("prompt"),
text: Schema.String,
files: Schema.Array(FileAttachment).pipe(Schema.optional),
agents: Schema.Array(AgentAttachment).pipe(Schema.optional),
}) {
static create(input: BaseInput & { text: string; files?: FileAttachment[]; agents?: AgentAttachment[] }) {
return new Prompt({
id: input.id ?? ID.create(),
type: "prompt",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
text: input.text,
files: input.files,
agents: input.agents,
})
}
}
agent: Schema.String,
},
})
export type AgentSwitched = Schema.Schema.Type<typeof AgentSwitched>
export class Synthetic extends Schema.Class<Synthetic>("Session.Event.Synthetic")({
export const ModelSwitched = Event.define({
type: "session.next.model.switched",
aggregate: "sessionID",
version: 1,
schema: {
...Base,
type: Schema.Literal("synthetic"),
text: Schema.String,
}) {
static create(input: BaseInput & { text: string }) {
return new Synthetic({
id: input.id ?? ID.create(),
type: "synthetic",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
text: input.text,
})
}
}
id: ModelID,
providerID: ProviderID,
variant: Schema.String.pipe(Schema.optional),
},
})
export type ModelSwitched = Schema.Schema.Type<typeof ModelSwitched>
export namespace Step {
export class Started extends Schema.Class<Started>("Session.Event.Step.Started")({
export const Prompted = Event.define({
type: "session.next.prompted",
aggregate: "sessionID",
version: 1,
schema: {
...Base,
prompt: Prompt,
},
})
export type Prompted = Schema.Schema.Type<typeof Prompted>
export const Synthetic = Event.define({
type: "session.next.synthetic",
aggregate: "sessionID",
schema: {
...Base,
text: Schema.String,
},
})
export type Synthetic = Schema.Schema.Type<typeof Synthetic>
export namespace Step {
export const Started = Event.define({
type: "session.next.step.started",
aggregate: "sessionID",
schema: {
...Base,
type: Schema.Literal("step.started"),
agent: Schema.String,
model: Schema.Struct({
id: Schema.String,
providerID: Schema.String,
variant: Schema.String.pipe(Schema.optional),
}),
}) {
static create(input: BaseInput & { model: { id: string; providerID: string; variant?: string } }) {
return new Started({
id: input.id ?? ID.create(),
type: "step.started",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
model: input.model,
})
}
}
snapshot: Schema.String.pipe(Schema.optional),
},
})
export type Started = Schema.Schema.Type<typeof Started>
export class Ended extends Schema.Class<Ended>("Session.Event.Step.Ended")({
export const Ended = Event.define({
type: "session.next.step.ended",
aggregate: "sessionID",
schema: {
...Base,
type: Schema.Literal("step.ended"),
reason: Schema.String,
finish: Schema.String,
cost: Schema.Finite,
tokens: Schema.Struct({
input: NonNegativeInt,
@@ -133,177 +99,118 @@ export namespace SessionEvent {
write: NonNegativeInt,
}),
}),
}) {
static create(input: BaseInput & { reason: string; cost: number; tokens: Ended["tokens"] }) {
return new Ended({
id: input.id ?? ID.create(),
type: "step.ended",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
reason: input.reason,
cost: input.cost,
tokens: input.tokens,
})
}
}
}
snapshot: Schema.String.pipe(Schema.optional),
},
})
export type Ended = Schema.Schema.Type<typeof Ended>
}
export namespace Text {
export class Started extends Schema.Class<Started>("Session.Event.Text.Started")({
export namespace Text {
export const Started = Event.define({
type: "session.next.text.started",
aggregate: "sessionID",
schema: {
...Base,
type: Schema.Literal("text.started"),
}) {
static create(input: BaseInput = {}) {
return new Started({
id: input.id ?? ID.create(),
type: "text.started",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
})
}
}
},
})
export type Started = Schema.Schema.Type<typeof Started>
export class Delta extends Schema.Class<Delta>("Session.Event.Text.Delta")({
export const Delta = Event.define({
type: "session.next.text.delta",
aggregate: "sessionID",
schema: {
...Base,
type: Schema.Literal("text.delta"),
delta: Schema.String,
}) {
static create(input: BaseInput & { delta: string }) {
return new Delta({
id: input.id ?? ID.create(),
type: "text.delta",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
delta: input.delta,
})
}
}
},
})
export type Delta = Schema.Schema.Type<typeof Delta>
export class Ended extends Schema.Class<Ended>("Session.Event.Text.Ended")({
export const Ended = Event.define({
type: "session.next.text.ended",
aggregate: "sessionID",
schema: {
...Base,
type: Schema.Literal("text.ended"),
text: Schema.String,
}) {
static create(input: BaseInput & { text: string }) {
return new Ended({
id: input.id ?? ID.create(),
type: "text.ended",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
text: input.text,
})
}
}
}
},
})
export type Ended = Schema.Schema.Type<typeof Ended>
}
export namespace Reasoning {
export class Started extends Schema.Class<Started>("Session.Event.Reasoning.Started")({
export namespace Reasoning {
export const Started = Event.define({
type: "session.next.reasoning.started",
aggregate: "sessionID",
schema: {
...Base,
type: Schema.Literal("reasoning.started"),
}) {
static create(input: BaseInput = {}) {
return new Started({
id: input.id ?? ID.create(),
type: "reasoning.started",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
})
}
}
reasoningID: Schema.String,
},
})
export type Started = Schema.Schema.Type<typeof Started>
export class Delta extends Schema.Class<Delta>("Session.Event.Reasoning.Delta")({
export const Delta = Event.define({
type: "session.next.reasoning.delta",
aggregate: "sessionID",
schema: {
...Base,
type: Schema.Literal("reasoning.delta"),
reasoningID: Schema.String,
delta: Schema.String,
}) {
static create(input: BaseInput & { delta: string }) {
return new Delta({
id: input.id ?? ID.create(),
type: "reasoning.delta",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
delta: input.delta,
})
}
}
},
})
export type Delta = Schema.Schema.Type<typeof Delta>
export class Ended extends Schema.Class<Ended>("Session.Event.Reasoning.Ended")({
export const Ended = Event.define({
type: "session.next.reasoning.ended",
aggregate: "sessionID",
schema: {
...Base,
type: Schema.Literal("reasoning.ended"),
reasoningID: Schema.String,
text: Schema.String,
}) {
static create(input: BaseInput & { text: string }) {
return new Ended({
id: input.id ?? ID.create(),
type: "reasoning.ended",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
text: input.text,
})
}
}
}
},
})
export type Ended = Schema.Schema.Type<typeof Ended>
}
export namespace Tool {
export namespace Input {
export class Started extends Schema.Class<Started>("Session.Event.Tool.Input.Started")({
export namespace Tool {
export namespace Input {
export const Started = Event.define({
type: "session.next.tool.input.started",
aggregate: "sessionID",
schema: {
...Base,
callID: Schema.String,
name: Schema.String,
type: Schema.Literal("tool.input.started"),
}) {
static create(input: BaseInput & { callID: string; name: string }) {
return new Started({
id: input.id ?? ID.create(),
type: "tool.input.started",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
callID: input.callID,
name: input.name,
})
}
}
},
})
export type Started = Schema.Schema.Type<typeof Started>
export class Delta extends Schema.Class<Delta>("Session.Event.Tool.Input.Delta")({
export const Delta = Event.define({
type: "session.next.tool.input.delta",
aggregate: "sessionID",
schema: {
...Base,
callID: Schema.String,
type: Schema.Literal("tool.input.delta"),
delta: Schema.String,
}) {
static create(input: BaseInput & { callID: string; delta: string }) {
return new Delta({
id: input.id ?? ID.create(),
type: "tool.input.delta",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
callID: input.callID,
delta: input.delta,
})
}
}
},
})
export type Delta = Schema.Schema.Type<typeof Delta>
export class Ended extends Schema.Class<Ended>("Session.Event.Tool.Input.Ended")({
export const Ended = Event.define({
type: "session.next.tool.input.ended",
aggregate: "sessionID",
schema: {
...Base,
callID: Schema.String,
type: Schema.Literal("tool.input.ended"),
text: Schema.String,
}) {
static create(input: BaseInput & { callID: string; text: string }) {
return new Ended({
id: input.id ?? ID.create(),
type: "tool.input.ended",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
callID: input.callID,
text: input.text,
})
}
}
}
},
})
export type Ended = Schema.Schema.Type<typeof Ended>
}
export class Called extends Schema.Class<Called>("Session.Event.Tool.Called")({
export const Called = Event.define({
type: "session.next.tool.called",
aggregate: "sessionID",
schema: {
...Base,
type: Schema.Literal("tool.called"),
callID: Schema.String,
tool: Schema.String,
input: Schema.Record(Schema.String, Schema.Unknown),
@@ -311,148 +218,153 @@ export namespace SessionEvent {
executed: Schema.Boolean,
metadata: Schema.Record(Schema.String, Schema.Unknown).pipe(Schema.optional),
}),
}) {
static create(
input: BaseInput & {
callID: string
tool: string
input: Record<string, unknown>
provider: Called["provider"]
},
) {
return new Called({
id: input.id ?? ID.create(),
type: "tool.called",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
callID: input.callID,
tool: input.tool,
input: input.input,
provider: input.provider,
})
}
}
},
})
export type Called = Schema.Schema.Type<typeof Called>
export class Success extends Schema.Class<Success>("Session.Event.Tool.Success")({
export const Progress = Event.define({
type: "session.next.tool.progress",
aggregate: "sessionID",
schema: {
...Base,
type: Schema.Literal("tool.success"),
callID: Schema.String,
title: Schema.String,
output: Schema.String.pipe(Schema.optional),
attachments: Schema.Array(FileAttachment).pipe(Schema.optional),
structured: ToolOutput.Structured,
content: Schema.Array(ToolOutput.Content),
},
})
export type Progress = Schema.Schema.Type<typeof Progress>
export const Success = Event.define({
type: "session.next.tool.success",
aggregate: "sessionID",
schema: {
...Base,
callID: Schema.String,
structured: ToolOutput.Structured,
content: Schema.Array(ToolOutput.Content),
provider: Schema.Struct({
executed: Schema.Boolean,
metadata: Schema.Record(Schema.String, Schema.Unknown).pipe(Schema.optional),
}),
}) {
static create(
input: BaseInput & {
callID: string
title: string
output?: string
attachments?: FileAttachment[]
provider: Success["provider"]
},
) {
return new Success({
id: input.id ?? ID.create(),
type: "tool.success",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
callID: input.callID,
title: input.title,
output: input.output,
attachments: input.attachments,
provider: input.provider,
})
}
}
},
})
export type Success = Schema.Schema.Type<typeof Success>
export class Error extends Schema.Class<Error>("Session.Event.Tool.Error")({
export const Error = Event.define({
type: "session.next.tool.error",
aggregate: "sessionID",
schema: {
...Base,
type: Schema.Literal("tool.error"),
callID: Schema.String,
error: Schema.String,
error: Schema.Struct({
type: Schema.String,
message: Schema.String,
}),
provider: Schema.Struct({
executed: Schema.Boolean,
metadata: Schema.Record(Schema.String, Schema.Unknown).pipe(Schema.optional),
}),
}) {
static create(input: BaseInput & { callID: string; error: string; provider: Error["provider"] }) {
return new Error({
id: input.id ?? ID.create(),
type: "tool.error",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
callID: input.callID,
error: input.error,
provider: input.provider,
})
}
}
}
},
})
export type Error = Schema.Schema.Type<typeof Error>
}
export class Retried extends Schema.Class<Retried>("Session.Event.Retried")({
export const RetryError = Schema.Struct({
message: Schema.String,
statusCode: NonNegativeInt.pipe(Schema.optional),
isRetryable: Schema.Boolean,
responseHeaders: Schema.Record(Schema.String, Schema.String).pipe(Schema.optional),
responseBody: Schema.String.pipe(Schema.optional),
metadata: Schema.Record(Schema.String, Schema.String).pipe(Schema.optional),
}).annotate({
identifier: "session.next.retry_error",
})
export type RetryError = Schema.Schema.Type<typeof RetryError>
export const Retried = Event.define({
type: "session.next.retried",
aggregate: "sessionID",
schema: {
...Base,
type: Schema.Literal("retried"),
attempt: NonNegativeInt,
error: RetryError,
}) {
static create(input: BaseInput & { attempt: number; error: RetryError }) {
return new Retried({
id: input.id ?? ID.create(),
type: "retried",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
attempt: input.attempt,
error: input.error,
})
}
}
},
})
export type Retried = Schema.Schema.Type<typeof Retried>
export class Compacted extends Schema.Class<Compacted>("Session.Event.Compated")({
...Base,
type: Schema.Literal("compacted"),
auto: Schema.Boolean,
overflow: Schema.Boolean.pipe(Schema.optional),
}) {
static create(input: BaseInput & { auto: boolean; overflow?: boolean }) {
return new Compacted({
id: input.id ?? ID.create(),
type: "compacted",
timestamp: input.timestamp ?? DateTime.makeUnsafe(Date.now()),
metadata: input.metadata,
auto: input.auto,
overflow: input.overflow,
})
}
}
export const Event = Schema.Union(
[
Prompt,
Synthetic,
Step.Started,
Step.Ended,
Text.Started,
Text.Delta,
Text.Ended,
Tool.Input.Started,
Tool.Input.Delta,
Tool.Input.Ended,
Tool.Called,
Tool.Success,
Tool.Error,
Reasoning.Started,
Reasoning.Delta,
Reasoning.Ended,
Retried,
Compacted,
],
{
mode: "oneOf",
export namespace Compaction {
export const Started = Event.define({
type: "session.next.compaction.started",
aggregate: "sessionID",
schema: {
...Base,
reason: Schema.Union([Schema.Literal("auto"), Schema.Literal("manual")]),
},
).pipe(Schema.toTaggedUnion("type"))
export type Event = Schema.Schema.Type<typeof Event>
export type Type = Event["type"]
})
export type Started = Schema.Schema.Type<typeof Started>
export const Delta = Event.define({
type: "session.next.compaction.delta",
aggregate: "sessionID",
schema: {
...Base,
text: Schema.String,
},
})
export const Ended = Event.define({
type: "session.next.compaction.ended",
aggregate: "sessionID",
schema: {
...Base,
text: Schema.String,
include: Schema.String.pipe(Schema.optional),
},
})
export type Ended = Schema.Schema.Type<typeof Ended>
}
export const All = Schema.Union(
[
AgentSwitched,
ModelSwitched,
Prompted,
Synthetic,
Step.Started,
Step.Ended,
Text.Started,
Text.Delta,
Text.Ended,
Tool.Input.Started,
Tool.Input.Delta,
Tool.Input.Ended,
Tool.Called,
Tool.Progress,
Tool.Success,
Tool.Error,
Reasoning.Started,
Reasoning.Delta,
Reasoning.Ended,
Retried,
Compaction.Started,
Compaction.Delta,
Compaction.Ended,
],
{
mode: "oneOf",
},
).pipe(Schema.toTaggedUnion("type"))
// user
// assistant
// assistant
// assistant
// user
// compaction marker
// -> text
// assistant
export type Event = Schema.Schema.Type<typeof All>
export type Type = Event["type"]
export * as SessionEvent from "./session-event"

View File

@@ -0,0 +1,313 @@
import { produce, type WritableDraft } from "immer"
import { SessionEvent } from "./session-event"
import { SessionMessage } from "./session-message"
export type MemoryState = {
messages: SessionMessage.Message[]
}
export interface Adapter<Result> {
readonly getCurrentAssistant: () => SessionMessage.Assistant | undefined
readonly getCurrentCompaction: () => SessionMessage.Compaction | undefined
readonly updateAssistant: (assistant: SessionMessage.Assistant) => void
readonly updateCompaction: (compaction: SessionMessage.Compaction) => void
readonly appendMessage: (message: SessionMessage.Message) => void
readonly finish: () => Result
}
export function memory(state: MemoryState): Adapter<MemoryState> {
const activeAssistantIndex = () =>
state.messages.findLastIndex((message) => message.type === "assistant" && !message.time.completed)
const activeCompactionIndex = () => state.messages.findLastIndex((message) => message.type === "compaction")
return {
getCurrentAssistant() {
const index = activeAssistantIndex()
if (index < 0) return
const assistant = state.messages[index]
return assistant?.type === "assistant" ? assistant : undefined
},
getCurrentCompaction() {
const index = activeCompactionIndex()
if (index < 0) return
const compaction = state.messages[index]
return compaction?.type === "compaction" ? compaction : undefined
},
updateAssistant(assistant) {
const index = activeAssistantIndex()
if (index < 0) return
const current = state.messages[index]
if (current?.type !== "assistant") return
state.messages[index] = assistant
},
updateCompaction(compaction) {
const index = activeCompactionIndex()
if (index < 0) return
const current = state.messages[index]
if (current?.type !== "compaction") return
state.messages[index] = compaction
},
appendMessage(message) {
state.messages.push(message)
},
finish() {
return state
},
}
}
export function update<Result>(adapter: Adapter<Result>, event: SessionEvent.Event): Result {
const currentAssistant = adapter.getCurrentAssistant()
type DraftAssistant = WritableDraft<SessionMessage.Assistant>
type DraftTool = WritableDraft<SessionMessage.AssistantTool>
type DraftText = WritableDraft<SessionMessage.AssistantText>
type DraftReasoning = WritableDraft<SessionMessage.AssistantReasoning>
const latestTool = (assistant: DraftAssistant | undefined, callID?: string) =>
assistant?.content.findLast(
(item): item is DraftTool => item.type === "tool" && (callID === undefined || item.id === callID),
)
const latestText = (assistant: DraftAssistant | undefined) =>
assistant?.content.findLast((item): item is DraftText => item.type === "text")
const latestReasoning = (assistant: DraftAssistant | undefined, reasoningID: string) =>
assistant?.content.findLast(
(item): item is DraftReasoning => item.type === "reasoning" && item.id === reasoningID,
)
SessionEvent.All.match(event, {
"session.next.agent.switched": (event) => {
adapter.appendMessage(SessionMessage.AgentSwitched.fromEvent(event))
},
"session.next.model.switched": (event) => {
adapter.appendMessage(SessionMessage.ModelSwitched.fromEvent(event))
},
"session.next.prompted": (event) => {
adapter.appendMessage(SessionMessage.User.fromEvent(event))
},
"session.next.synthetic": (event) => {
adapter.appendMessage(SessionMessage.Synthetic.fromEvent(event))
},
"session.next.step.started": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
draft.time.completed = event.data.timestamp
}),
)
}
adapter.appendMessage(SessionMessage.Assistant.fromEvent(event))
},
"session.next.step.ended": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
draft.time.completed = event.data.timestamp
draft.finish = event.data.finish
draft.cost = event.data.cost
draft.tokens = event.data.tokens
if (event.data.snapshot) draft.snapshot = { ...draft.snapshot, end: event.data.snapshot }
}),
)
}
},
"session.next.text.started": () => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
draft.content.push({
type: "text",
text: "",
})
}),
)
}
},
"session.next.text.delta": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestText(draft)
if (match) match.text += event.data.delta
}),
)
}
},
"session.next.text.ended": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestText(draft)
if (match) match.text = event.data.text
}),
)
}
},
"session.next.tool.input.started": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
draft.content.push({
type: "tool",
id: event.data.callID,
name: event.data.name,
time: {
created: event.data.timestamp,
},
state: {
status: "pending",
input: "",
},
})
}),
)
}
},
"session.next.tool.input.delta": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestTool(draft, event.data.callID)
// oxlint-disable-next-line no-base-to-string -- event.delta is a Schema.String (runtime string)
if (match && match.state.status === "pending") match.state.input += event.data.delta
}),
)
}
},
"session.next.tool.input.ended": () => {},
"session.next.tool.called": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestTool(draft, event.data.callID)
if (match) {
match.provider = event.data.provider
match.time.ran = event.data.timestamp
match.state = {
status: "running",
input: event.data.input,
structured: {},
content: [],
}
}
}),
)
}
},
"session.next.tool.progress": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestTool(draft, event.data.callID)
if (match && match.state.status === "running") {
match.state.structured = event.data.structured
match.state.content = [...event.data.content]
}
}),
)
}
},
"session.next.tool.success": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestTool(draft, event.data.callID)
if (match && match.state.status === "running") {
match.provider = event.data.provider
match.time.completed = event.data.timestamp
match.state = {
status: "completed",
input: match.state.input,
structured: event.data.structured,
content: [...event.data.content],
}
}
}),
)
}
},
"session.next.tool.error": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestTool(draft, event.data.callID)
if (match && match.state.status === "running") {
match.provider = event.data.provider
match.time.completed = event.data.timestamp
match.state = {
status: "error",
error: event.data.error,
input: match.state.input,
structured: match.state.structured,
content: match.state.content,
}
}
}),
)
}
},
"session.next.reasoning.started": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
draft.content.push({
type: "reasoning",
id: event.data.reasoningID,
text: "",
})
}),
)
}
},
"session.next.reasoning.delta": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestReasoning(draft, event.data.reasoningID)
if (match) match.text += event.data.delta
}),
)
}
},
"session.next.reasoning.ended": (event) => {
if (currentAssistant) {
adapter.updateAssistant(
produce(currentAssistant, (draft) => {
const match = latestReasoning(draft, event.data.reasoningID)
if (match) match.text = event.data.text
}),
)
}
},
"session.next.retried": () => {},
"session.next.compaction.started": (event) => {
adapter.appendMessage(SessionMessage.Compaction.fromEvent(event))
},
"session.next.compaction.delta": (event) => {
const currentCompaction = adapter.getCurrentCompaction()
if (currentCompaction) {
adapter.updateCompaction(
produce(currentCompaction, (draft) => {
draft.summary += event.data.text
}),
)
}
},
"session.next.compaction.ended": (event) => {
const currentCompaction = adapter.getCurrentCompaction()
if (currentCompaction) {
adapter.updateCompaction(
produce(currentCompaction, (draft) => {
draft.summary = event.data.text
draft.include = event.data.include
}),
)
}
},
})
return adapter.finish()
}
export * as SessionMessageUpdater from "./session-message-updater"

View File

@@ -0,0 +1,236 @@
import { Schema } from "effect"
import { Prompt } from "./session-prompt"
import { SessionEvent } from "./session-event"
import { Event } from "./event"
import { ToolOutput } from "./tool-output"
export const ID = Event.ID
export type ID = Schema.Schema.Type<typeof ID>
const Base = {
id: ID,
metadata: Schema.Record(Schema.String, Schema.Unknown).pipe(Schema.optional),
time: Schema.Struct({
created: Schema.DateTimeUtcFromMillis,
}),
}
export class AgentSwitched extends Schema.Class<AgentSwitched>("Session.Message.AgentSwitched")({
...Base,
type: Schema.Literal("agent-switched"),
agent: SessionEvent.AgentSwitched.fields.data.fields.agent,
}) {
static fromEvent(event: SessionEvent.AgentSwitched) {
return new AgentSwitched({
id: event.id,
type: "agent-switched",
metadata: event.metadata,
agent: event.data.agent,
time: { created: event.data.timestamp },
})
}
}
export class ModelSwitched extends Schema.Class<ModelSwitched>("Session.Message.ModelSwitched")({
...Base,
type: Schema.Literal("model-switched"),
model: Schema.Struct({
id: SessionEvent.ModelSwitched.fields.data.fields.id,
providerID: SessionEvent.ModelSwitched.fields.data.fields.providerID,
variant: SessionEvent.ModelSwitched.fields.data.fields.variant,
}),
}) {
static fromEvent(event: SessionEvent.ModelSwitched) {
return new ModelSwitched({
id: event.id,
type: "model-switched",
metadata: event.metadata,
model: {
id: event.data.id,
providerID: event.data.providerID,
variant: event.data.variant,
},
time: { created: event.data.timestamp },
})
}
}
export class User extends Schema.Class<User>("Session.Message.User")({
...Base,
text: Prompt.fields.text,
files: Prompt.fields.files,
agents: Prompt.fields.agents,
type: Schema.Literal("user"),
time: Schema.Struct({
created: Schema.DateTimeUtcFromMillis,
}),
}) {
static fromEvent(event: SessionEvent.Prompted) {
return new User({
id: event.id,
type: "user",
metadata: event.metadata,
text: event.data.prompt.text,
files: event.data.prompt.files,
agents: event.data.prompt.agents,
time: { created: event.data.timestamp },
})
}
}
export class Synthetic extends Schema.Class<Synthetic>("Session.Message.Synthetic")({
...Base,
sessionID: SessionEvent.Synthetic.fields.data.fields.sessionID,
text: SessionEvent.Synthetic.fields.data.fields.text,
type: Schema.Literal("synthetic"),
}) {
static fromEvent(event: SessionEvent.Synthetic) {
return new Synthetic({
sessionID: event.data.sessionID,
text: event.data.text,
id: event.id,
type: "synthetic",
time: { created: event.data.timestamp },
})
}
}
export class ToolStatePending extends Schema.Class<ToolStatePending>("Session.Message.ToolState.Pending")({
status: Schema.Literal("pending"),
input: Schema.String,
}) {}
export class ToolStateRunning extends Schema.Class<ToolStateRunning>("Session.Message.ToolState.Running")({
status: Schema.Literal("running"),
input: Schema.Record(Schema.String, Schema.Unknown),
structured: ToolOutput.Structured,
content: ToolOutput.Content.pipe(Schema.Array),
}) {}
export class ToolStateCompleted extends Schema.Class<ToolStateCompleted>("Session.Message.ToolState.Completed")({
status: Schema.Literal("completed"),
input: Schema.Record(Schema.String, Schema.Unknown),
attachments: SessionEvent.FileAttachment.pipe(Schema.Array, Schema.optional),
content: ToolOutput.Content.pipe(Schema.Array),
structured: ToolOutput.Structured,
}) {}
export class ToolStateError extends Schema.Class<ToolStateError>("Session.Message.ToolState.Error")({
status: Schema.Literal("error"),
input: Schema.Record(Schema.String, Schema.Unknown),
content: ToolOutput.Content.pipe(Schema.Array),
structured: ToolOutput.Structured,
error: Schema.Struct({
type: Schema.String,
message: Schema.String,
}),
}) {}
export const ToolState = Schema.Union([ToolStatePending, ToolStateRunning, ToolStateCompleted, ToolStateError]).pipe(
Schema.toTaggedUnion("status"),
)
export type ToolState = Schema.Schema.Type<typeof ToolState>
export class AssistantTool extends Schema.Class<AssistantTool>("Session.Message.Assistant.Tool")({
type: Schema.Literal("tool"),
id: Schema.String,
name: Schema.String,
provider: Schema.Struct({
executed: Schema.Boolean,
metadata: Schema.Record(Schema.String, Schema.Unknown).pipe(Schema.optional),
}).pipe(Schema.optional),
state: ToolState,
time: Schema.Struct({
created: Schema.DateTimeUtcFromMillis,
ran: Schema.DateTimeUtcFromMillis.pipe(Schema.optional),
completed: Schema.DateTimeUtcFromMillis.pipe(Schema.optional),
pruned: Schema.DateTimeUtcFromMillis.pipe(Schema.optional),
}),
}) {}
export class AssistantText extends Schema.Class<AssistantText>("Session.Message.Assistant.Text")({
type: Schema.Literal("text"),
text: Schema.String,
}) {}
export class AssistantReasoning extends Schema.Class<AssistantReasoning>("Session.Message.Assistant.Reasoning")({
type: Schema.Literal("reasoning"),
id: Schema.String,
text: Schema.String,
}) {}
export const AssistantContent = Schema.Union([AssistantText, AssistantReasoning, AssistantTool]).pipe(
Schema.toTaggedUnion("type"),
)
export type AssistantContent = Schema.Schema.Type<typeof AssistantContent>
export class Assistant extends Schema.Class<Assistant>("Session.Message.Assistant")({
...Base,
type: Schema.Literal("assistant"),
agent: Schema.String,
model: SessionEvent.Step.Started.fields.data.fields.model,
content: AssistantContent.pipe(Schema.Array),
snapshot: Schema.Struct({
start: Schema.String.pipe(Schema.optional),
end: Schema.String.pipe(Schema.optional),
}).pipe(Schema.optional),
finish: Schema.String.pipe(Schema.optional),
cost: Schema.Number.pipe(Schema.optional),
tokens: Schema.Struct({
input: Schema.Number,
output: Schema.Number,
reasoning: Schema.Number,
cache: Schema.Struct({
read: Schema.Number,
write: Schema.Number,
}),
}).pipe(Schema.optional),
error: Schema.String.pipe(Schema.optional),
time: Schema.Struct({
created: Schema.DateTimeUtcFromMillis,
completed: Schema.DateTimeUtcFromMillis.pipe(Schema.optional),
}),
}) {
static fromEvent(event: SessionEvent.Step.Started) {
return new Assistant({
id: event.id,
type: "assistant",
agent: event.data.agent,
model: event.data.model,
time: {
created: event.data.timestamp,
},
content: [],
snapshot: event.data.snapshot ? { start: event.data.snapshot } : undefined,
})
}
}
export class Compaction extends Schema.Class<Compaction>("Session.Message.Compaction")({
type: Schema.Literal("compaction"),
reason: SessionEvent.Compaction.Started.fields.data.fields.reason,
summary: Schema.String,
include: Schema.String.pipe(Schema.optional),
...Base,
}) {
static fromEvent(event: SessionEvent.Compaction.Started) {
return new Compaction({
id: event.id,
type: "compaction",
metadata: event.metadata,
reason: event.data.reason,
summary: "",
time: { created: event.data.timestamp },
})
}
}
export const Message = Schema.Union([AgentSwitched, ModelSwitched, User, Synthetic, Assistant, Compaction])
.pipe(Schema.toTaggedUnion("type"))
.annotate({ identifier: "Session.Message" })
export type Message = Schema.Schema.Type<typeof Message>
export type Type = Message["type"]
export * as SessionMessage from "./session-message"

View File

@@ -0,0 +1,36 @@
import * as Schema from "effect/Schema"
export class Source extends Schema.Class<Source>("Prompt.Source")({
start: Schema.Number,
end: Schema.Number,
text: Schema.String,
}) {}
export class FileAttachment extends Schema.Class<FileAttachment>("Prompt.FileAttachment")({
uri: Schema.String,
mime: Schema.String,
name: Schema.String.pipe(Schema.optional),
description: Schema.String.pipe(Schema.optional),
source: Source.pipe(Schema.optional),
}) {
static create(input: FileAttachment) {
return new FileAttachment({
uri: input.uri,
mime: input.mime,
name: input.name,
description: input.description,
source: input.source,
})
}
}
export class AgentAttachment extends Schema.Class<AgentAttachment>("Prompt.AgentAttachment")({
name: Schema.String,
source: Source.pipe(Schema.optional),
}) {}
export class Prompt extends Schema.Class<Prompt>("Prompt")({
text: Schema.String,
files: Schema.Array(FileAttachment).pipe(Schema.optional),
agents: Schema.Array(AgentAttachment).pipe(Schema.optional),
}) {}

View File

@@ -1,69 +1,242 @@
import { Context, Layer, Schema, Effect } from "effect"
import { SessionEntry } from "./session-entry"
import { Struct } from "effect"
import { Session } from "@/session/session"
import { SessionMessageTable, SessionTable } from "@/session/session.sql"
import { SessionID } from "@/session/schema"
import { WorkspaceID } from "@/control-plane/schema"
import { and, asc, desc, eq, gt, gte, isNull, like, lt, or, type SQL } from "@/storage/db"
import * as Database from "@/storage/db"
import { Context, DateTime, Effect, Layer, Schema } from "effect"
import { SessionMessage } from "./session-message"
import type { Prompt } from "./session-prompt"
import type { Event } from "./event"
import { ProjectID } from "@/project/schema"
import { ModelID, ProviderID } from "@/provider/schema"
import { SessionEvent } from "./session-event"
import { SyncEvent } from "@/sync"
export const ID = SessionID
export const Delivery = Schema.Union([Schema.Literal("immediate"), Schema.Literal("deferred")]).annotate({
identifier: "Session.Delivery",
})
export type Delivery = Schema.Schema.Type<typeof Delivery>
export type ID = Schema.Schema.Type<typeof ID>
export class PromptInput extends Schema.Class<PromptInput>("Session.PromptInput")({
...Struct.omit(SessionEntry.User.fields, ["time", "type"]),
id: Schema.optionalKey(SessionEntry.ID),
sessionID: ID,
}) {}
export class CreateInput extends Schema.Class<CreateInput>("Session.CreateInput")({
id: Schema.optionalKey(ID),
}) {}
export const DefaultDelivery = "immediate" satisfies Delivery
export class Info extends Schema.Class<Info>("Session.Info")({
id: ID,
id: SessionID,
parentID: SessionID.pipe(Schema.optional),
projectID: ProjectID,
workspaceID: WorkspaceID.pipe(Schema.optional),
path: Schema.String.pipe(Schema.optional),
agent: Schema.String.pipe(Schema.optional),
model: Schema.Struct({
id: Schema.String,
providerID: Schema.String,
modelID: Schema.String,
id: ModelID,
providerID: ProviderID,
variant: Schema.String.pipe(Schema.optional),
}).pipe(Schema.optional),
time: Schema.Struct({
created: Schema.DateTimeUtcFromMillis,
updated: Schema.DateTimeUtcFromMillis,
archived: Schema.DateTimeUtcFromMillis.pipe(Schema.optional),
}),
title: Schema.String,
/*
slug: Schema.String,
directory: Schema.String,
path: optionalOmitUndefined(Schema.String),
parentID: optionalOmitUndefined(SessionID),
summary: optionalOmitUndefined(Summary),
share: optionalOmitUndefined(Share),
title: Schema.String,
version: Schema.String,
time: Time,
permission: optionalOmitUndefined(Permission.Ruleset),
revert: optionalOmitUndefined(Revert),
*/
}) {}
export interface Interface {
fromID: (id: ID) => Effect.Effect<Info>
create: (input: CreateInput) => Effect.Effect<Info>
prompt: (input: PromptInput) => Effect.Effect<SessionEntry.User>
readonly list: (input: {
limit?: number
order?: "asc" | "desc"
directory?: string
path?: string
workspaceID?: WorkspaceID
roots?: boolean
start?: number
search?: string
cursor?: {
id: SessionID
time: number
direction: "previous" | "next"
}
}) => Effect.Effect<Info[], never>
readonly messages: (input: {
sessionID: SessionID
limit?: number
order?: "asc" | "desc"
cursor?: {
id: SessionMessage.ID
time: number
direction: "previous" | "next"
}
}) => Effect.Effect<SessionMessage.Message[], never>
readonly prompt: (input: {
id?: Event.ID
sessionID: SessionID
prompt: Prompt
delivery?: Delivery
}) => Effect.Effect<SessionMessage.User, never>
readonly switchAgent: (input: { sessionID: SessionID; agent: string }) => Effect.Effect<void, never>
readonly switchModel: (input: {
sessionID: SessionID
id: ModelID
providerID: ProviderID
variant?: string
}) => Effect.Effect<void, never>
readonly compact: (sessionID: SessionID) => Effect.Effect<void, never>
readonly wait: (sessionID: SessionID) => Effect.Effect<void, never>
}
export class Service extends Context.Service<Service, Interface>()("Session.Service") {}
export class Service extends Context.Service<Service, Interface>()("@opencode/v2/Session") {}
export const layer = Layer.effect(Service)(
export const layer = Layer.effect(
Service,
Effect.gen(function* () {
const session = yield* Session.Service
const decodeMessage = Schema.decodeUnknownSync(SessionMessage.Message)
const create: Interface["create"] = Effect.fn("Session.create")(function* (_input) {
throw new Error("Not implemented")
})
const decode = (row: typeof SessionMessageTable.$inferSelect) =>
decodeMessage({ ...row.data, id: row.id, type: row.type })
const prompt: Interface["prompt"] = Effect.fn("Session.prompt")(function* (_input) {
throw new Error("Not implemented")
})
function fromRow(row: typeof SessionTable.$inferSelect): Info {
return {
id: SessionID.make(row.id),
projectID: ProjectID.make(row.project_id),
workspaceID: row.workspace_id ? WorkspaceID.make(row.workspace_id) : undefined,
title: row.title,
parentID: row.parent_id ? SessionID.make(row.parent_id) : undefined,
path: row.path ?? "",
agent: row.agent ?? undefined,
model: row.model
? {
id: ModelID.make(row.model.id),
providerID: ProviderID.make(row.model.providerID),
variant: row.model.variant,
}
: undefined,
time: {
created: DateTime.makeUnsafe(row.time_created),
updated: DateTime.makeUnsafe(row.time_updated),
archived: row.time_archived ? DateTime.makeUnsafe(row.time_archived) : undefined,
},
}
}
const fromID: Interface["fromID"] = Effect.fn("Session.fromID")(function* (id) {
const match = yield* session.get(id)
return fromV1(match)
})
const result: Interface = {
list: Effect.fn("V2Session.list")(function* (input) {
const direction = input.cursor?.direction ?? "next"
let order = input.order ?? "desc"
// Query the adjacent rows in reverse, then flip them back into the requested order below.
if (direction === "previous" && order === "asc") order = "desc"
if (direction === "previous" && order === "desc") order = "asc"
const conditions: SQL[] = []
if (input.directory) conditions.push(eq(SessionTable.directory, input.directory))
if (input.path)
conditions.push(or(eq(SessionTable.path, input.path), like(SessionTable.path, `${input.path}/%`))!)
if (input.workspaceID) conditions.push(eq(SessionTable.workspace_id, input.workspaceID))
if (input.roots) conditions.push(isNull(SessionTable.parent_id))
if (input.start) conditions.push(gte(SessionTable.time_created, input.start))
if (input.search) conditions.push(like(SessionTable.title, `%${input.search}%`))
if (input.cursor) {
conditions.push(
order === "asc"
? or(
gt(SessionTable.time_created, input.cursor.time),
and(eq(SessionTable.time_created, input.cursor.time), gt(SessionTable.id, input.cursor.id)),
)!
: or(
lt(SessionTable.time_created, input.cursor.time),
and(eq(SessionTable.time_created, input.cursor.time), lt(SessionTable.id, input.cursor.id)),
)!,
)
}
const query = Database.Client()
.select()
.from(SessionTable)
.where(conditions.length > 0 ? and(...conditions) : undefined)
.orderBy(
order === "asc" ? asc(SessionTable.time_created) : desc(SessionTable.time_created),
order === "asc" ? asc(SessionTable.id) : desc(SessionTable.id),
)
return Service.of({
create,
prompt,
fromID,
})
const rows = input.limit === undefined ? query.all() : query.limit(input.limit).all()
return (direction === "previous" ? rows.toReversed() : rows).map((row) => fromRow(row))
}),
messages: Effect.fn("V2Session.messages")(function* (input) {
const direction = input.cursor?.direction ?? "next"
let order = input.order ?? "desc"
// Query the adjacent rows in reverse, then flip them back into the requested order below.
if (direction === "previous" && order === "asc") order = "desc"
if (direction === "previous" && order === "desc") order = "asc"
const boundary = input.cursor
? order === "asc"
? or(
gt(SessionMessageTable.time_created, input.cursor.time),
and(
eq(SessionMessageTable.time_created, input.cursor.time),
gt(SessionMessageTable.id, input.cursor.id),
),
)
: or(
lt(SessionMessageTable.time_created, input.cursor.time),
and(
eq(SessionMessageTable.time_created, input.cursor.time),
lt(SessionMessageTable.id, input.cursor.id),
),
)
: undefined
const where = boundary
? and(eq(SessionMessageTable.session_id, input.sessionID), boundary)
: eq(SessionMessageTable.session_id, input.sessionID)
const rows = Database.use((db) => {
const query = db
.select()
.from(SessionMessageTable)
.where(where)
.orderBy(
order === "asc" ? asc(SessionMessageTable.time_created) : desc(SessionMessageTable.time_created),
order === "asc" ? asc(SessionMessageTable.id) : desc(SessionMessageTable.id),
)
const rows = input.limit === undefined ? query.all() : query.limit(input.limit).all()
return direction === "previous" ? rows.toReversed() : rows
})
return rows.map((row) => decode(row))
}),
prompt: Effect.fn("V2Session.prompt")(function* (_input) {
return {} as any
}),
switchAgent: Effect.fn("V2Session.switchAgent")(function* (input) {
SyncEvent.run(SessionEvent.AgentSwitched.Sync, {
sessionID: input.sessionID,
timestamp: DateTime.makeUnsafe(Date.now()),
agent: input.agent,
})
}),
switchModel: Effect.fn("V2Session.switchModel")(function* (input) {
SyncEvent.run(SessionEvent.ModelSwitched.Sync, {
sessionID: input.sessionID,
timestamp: DateTime.makeUnsafe(Date.now()),
id: input.id,
providerID: input.providerID,
variant: input.variant,
})
}),
compact: Effect.fn("V2Session.compact")(function* (_sessionID) {}),
wait: Effect.fn("V2Session.wait")(function* (_sessionID) {}),
}
return Service.of(result)
}),
)
function fromV1(input: Session.Info): Info {
return new Info({
id: ID.make(input.id),
})
}
export const defaultLayer = layer
export * as SessionV2 from "./session"

View File

@@ -0,0 +1,18 @@
export * as ToolOutput from "./tool-output"
import { Schema } from "effect"
export class TextContent extends Schema.Class<TextContent>("Tool.TextContent")({
type: Schema.Literal("text"),
text: Schema.String,
}) {}
export class FileContent extends Schema.Class<FileContent>("Tool.FileContent")({
type: Schema.Literal("file"),
uri: Schema.String,
mime: Schema.String,
name: Schema.String.pipe(Schema.optional),
}) {}
export const Content = Schema.Union([TextContent, FileContent]).pipe(Schema.toTaggedUnion("type"))
export const Structured = Schema.Record(Schema.String, Schema.Any)

View File

@@ -58,6 +58,7 @@ function toolEvent(
raw: opts.raw,
}
const payload: EventMessagePartUpdated = {
id: `evt_${opts.callID}`,
type: "message.part.updated",
properties: {
sessionID: sessionId,

View File

@@ -4,8 +4,8 @@ import path from "path"
import { provideInstance, tmpdir } from "../fixture/fixture"
import { Instance } from "../../src/project/instance"
import { Agent } from "../../src/agent/agent"
import { Permission } from "../../src/permission"
import { Global } from "@opencode-ai/core/global"
import { Permission } from "../../src/permission"
// Helper to evaluate permission for a tool with wildcard pattern
function evalPerm(agent: Agent.Info | undefined, permission: string): Permission.Action | undefined {
@@ -32,6 +32,7 @@ test("returns default native agents when no config", async () => {
expect(names).toContain("plan")
expect(names).toContain("general")
expect(names).toContain("explore")
expect(names).toContain("scout")
expect(names).toContain("compaction")
expect(names).toContain("title")
expect(names).toContain("summary")
@@ -48,8 +49,10 @@ test("build agent has correct default properties", async () => {
expect(build).toBeDefined()
expect(build?.mode).toBe("primary")
expect(build?.native).toBe(true)
expect(evalPerm(build, "edit")).toBe("allow")
expect(evalPerm(build, "edit")).toBe("ask")
expect(evalPerm(build, "bash")).toBe("allow")
expect(evalPerm(build, "repo_clone")).toBe("deny")
expect(evalPerm(build, "repo_overview")).toBe("deny")
},
})
})
@@ -101,6 +104,81 @@ test("explore agent asks for external directories and allows whitelisted externa
})
})
test("scout agent allows repo cloning and repo cache reads", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
const scout = await load(tmp.path, (svc) => svc.get("scout"))
expect(scout).toBeDefined()
expect(scout?.mode).toBe("subagent")
expect(evalPerm(scout, "repo_clone")).toBe("allow")
expect(evalPerm(scout, "repo_overview")).toBe("allow")
expect(evalPerm(scout, "edit")).toBe("deny")
expect(
Permission.evaluate(
"external_directory",
path.join(Global.Path.repos, "github.com", "owner", "repo", "README.md"),
scout!.permission,
).action,
).toBe("allow")
},
})
})
test("reference config creates scout-backed subagents", async () => {
await using tmp = await tmpdir({
config: {
reference: {
effect: "github.com/effect/effect-smol",
effectFull: {
repository: "Effect-TS/effect",
branch: "main",
},
localdocs: "../docs",
localdocsFull: {
path: "../local-docs",
},
},
},
})
await Instance.provide({
directory: tmp.path,
fn: async () => {
const effect = await load(tmp.path, (svc) => svc.get("effect"))
const effectFull = await load(tmp.path, (svc) => svc.get("effectFull"))
const local = await load(tmp.path, (svc) => svc.get("localdocs"))
const localFull = await load(tmp.path, (svc) => svc.get("localdocsFull"))
expect(effect).toBeDefined()
expect(effect?.mode).toBe("subagent")
expect(effect?.prompt).toContain("Repository: github.com/effect/effect-smol")
expect(evalPerm(effect, "repo_clone")).toBe("allow")
expect(effectFull).toBeDefined()
expect(effectFull?.mode).toBe("subagent")
expect(effectFull?.prompt).toContain("Repository: Effect-TS/effect")
expect(effectFull?.prompt).toContain("Branch/ref: main")
expect(evalPerm(effectFull, "repo_clone")).toBe("allow")
expect(local).toBeDefined()
expect(local?.mode).toBe("subagent")
expect(local?.prompt).toContain(`Local directory: ${path.resolve(tmp.path, "../docs")}`)
expect(
Permission.evaluate(
"external_directory",
path.join(path.resolve(tmp.path, "../docs"), "README.md"),
local!.permission,
).action,
).toBe("allow")
expect(localFull).toBeDefined()
expect(localFull?.mode).toBe("subagent")
expect(localFull?.prompt).toContain(`Local directory: ${path.resolve(tmp.path, "../local-docs")}`)
},
})
})
test("general agent denies todo tools", async () => {
await using tmp = await tmpdir()
await Instance.provide({

View File

@@ -0,0 +1,49 @@
import { describe, expect } from "bun:test"
import { Deferred, Effect, Layer } from "effect"
import { BackgroundJob } from "@/background/job"
import { CrossSpawnSpawner } from "@opencode-ai/core/cross-spawn-spawner"
import { provideTmpdirInstance } from "../fixture/fixture"
import { testEffect } from "../lib/effect"
const it = testEffect(Layer.mergeAll(BackgroundJob.defaultLayer, CrossSpawnSpawner.defaultLayer))
describe("background.job", () => {
it.live("tracks started jobs through completion", () =>
provideTmpdirInstance(() =>
Effect.gen(function* () {
const jobs = yield* BackgroundJob.Service
const latch = yield* Deferred.make<void>()
const job = yield* jobs.start({
type: "test",
title: "test job",
run: Deferred.await(latch).pipe(Effect.as("done")),
})
expect(job.status).toBe("running")
yield* Deferred.succeed(latch, undefined)
const done = yield* jobs.wait({ id: job.id })
expect(done.info?.status).toBe("completed")
expect(done.info?.output).toBe("done")
expect((yield* jobs.list()).map((item) => item.id)).toEqual([job.id])
}),
),
)
it.live("can cancel running jobs", () =>
provideTmpdirInstance(() =>
Effect.gen(function* () {
const jobs = yield* BackgroundJob.Service
const latch = yield* Deferred.make<void>()
const job = yield* jobs.start({
type: "test",
run: Deferred.await(latch).pipe(Effect.as("done")),
})
const cancelled = yield* jobs.cancel(job.id)
expect(cancelled?.status).toBe("cancelled")
}),
),
)
})

View File

@@ -25,6 +25,16 @@ test("parses ssh:// URL without .git suffix", () => {
expect(parseGitHubRemote("ssh://git@github.com/sst/opencode")).toEqual({ owner: "sst", repo: "opencode" })
})
test("parses git protocol URLs from package metadata", () => {
expect(parseGitHubRemote("git://github.com/facebook/react.git")).toEqual({ owner: "facebook", repo: "react" })
expect(parseGitHubRemote("git+https://github.com/facebook/react.git")).toEqual({ owner: "facebook", repo: "react" })
expect(parseGitHubRemote("git+ssh://git@github.com/facebook/react.git")).toEqual({ owner: "facebook", repo: "react" })
})
test("parses npm-style github shorthand", () => {
expect(parseGitHubRemote("github:facebook/react")).toBeNull()
})
test("parses http URL", () => {
expect(parseGitHubRemote("http://github.com/owner/repo")).toEqual({ owner: "owner", repo: "repo" })
})

View File

@@ -25,6 +25,7 @@ function event(payload: Event, input: { directory: string; workspace?: string })
function vcs(branch: string): Event {
return {
id: `evt_vcs_${branch}`,
type: "vcs.branch.updated",
properties: {
branch,
@@ -34,6 +35,7 @@ function vcs(branch: string): Event {
function update(version: string): Event {
return {
id: `evt_update_${version}`,
type: "installation.update-available",
properties: {
version,

View File

@@ -79,7 +79,7 @@ delete process.env["OPENCODE_SERVER_USERNAME"]
process.env["OPENCODE_DB"] = ":memory:"
// Now safe to import from src/
const Log = await import("@opencode-ai/core/util/log")
const { Log } = await import("@opencode-ai/core/util/log")
const { initProjectors } = await import("../src/server/projectors")
void Log.init({

View File

@@ -45,10 +45,10 @@ test("Bedrock: config region takes precedence over AWS_REGION env var", async ()
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("AWS_REGION", "us-east-1")
set("AWS_PROFILE", "default")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
@@ -70,10 +70,10 @@ test("Bedrock: falls back to AWS_REGION env var when no config region", async ()
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("AWS_REGION", "eu-west-1")
set("AWS_PROFILE", "default")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
@@ -125,11 +125,11 @@ test("Bedrock: loads when bearer token from auth.json is present", async () => {
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("AWS_PROFILE", "")
set("AWS_ACCESS_KEY_ID", "")
set("AWS_BEARER_TOKEN_BEDROCK", "")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
@@ -171,10 +171,10 @@ test("Bedrock: config profile takes precedence over AWS_PROFILE env var", async
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("AWS_PROFILE", "default")
set("AWS_ACCESS_KEY_ID", "test-key-id")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
@@ -203,9 +203,9 @@ test("Bedrock: includes custom endpoint in options when specified", async () =>
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("AWS_PROFILE", "default")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
@@ -236,12 +236,12 @@ test("Bedrock: autoloads when AWS_WEB_IDENTITY_TOKEN_FILE is present", async ()
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("AWS_WEB_IDENTITY_TOKEN_FILE", "/var/run/secrets/eks.amazonaws.com/serviceaccount/token")
set("AWS_ROLE_ARN", "arn:aws:iam::123456789012:role/my-eks-role")
set("AWS_PROFILE", "")
set("AWS_ACCESS_KEY_ID", "")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
@@ -279,9 +279,9 @@ test("Bedrock: model with us. prefix should not be double-prefixed", async () =>
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("AWS_PROFILE", "default")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
@@ -316,9 +316,9 @@ test("Bedrock: model with global. prefix should not be prefixed", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("AWS_PROFILE", "default")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
@@ -352,9 +352,9 @@ test("Bedrock: model with eu. prefix should not be double-prefixed", async () =>
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("AWS_PROFILE", "default")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.amazonBedrock]).toBeDefined()
@@ -388,9 +388,9 @@ test("Bedrock: model without prefix in US region should get us. prefix added", a
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("AWS_PROFILE", "default")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.amazonBedrock]).toBeDefined()

View File

@@ -82,9 +82,9 @@ test("provider loaded from env variable", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.anthropic]).toBeDefined()
@@ -137,9 +137,9 @@ test("disabled_providers excludes provider", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.anthropic]).toBeUndefined()
@@ -161,10 +161,10 @@ test("enabled_providers restricts to only listed providers", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
set("OPENAI_API_KEY", "test-openai-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.anthropic]).toBeDefined()
@@ -191,9 +191,9 @@ test("model whitelist filters models for provider", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.anthropic]).toBeDefined()
@@ -222,9 +222,9 @@ test("model blacklist excludes specific models", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.anthropic]).toBeDefined()
@@ -257,9 +257,9 @@ test("custom model alias via config", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.anthropic]).toBeDefined()
@@ -394,9 +394,9 @@ test("env variable takes precedence, config merges options", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "env-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.anthropic]).toBeDefined()
@@ -420,9 +420,9 @@ test("getModel returns model for valid provider/model", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const model = await getModel(ProviderID.anthropic, ModelID.make("claude-sonnet-4-20250514"))
expect(model).toBeDefined()
@@ -447,9 +447,9 @@ test("getModel throws ModelNotFoundError for invalid model", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
expect(getModel(ProviderID.anthropic, ModelID.make("nonexistent-model"))).rejects.toThrow()
},
@@ -500,9 +500,9 @@ test("defaultModel returns first available model when no config set", async () =
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const model = await defaultModel()
expect(model.providerID).toBeDefined()
@@ -525,9 +525,9 @@ test("defaultModel respects config model setting", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const model = await defaultModel()
expect(String(model.providerID)).toBe("anthropic")
@@ -640,9 +640,9 @@ test("model options are merged from existing model", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
const model = providers[ProviderID.anthropic].models["claude-sonnet-4-20250514"]
@@ -669,9 +669,9 @@ test("provider removed when all models filtered out", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.anthropic]).toBeUndefined()
@@ -692,9 +692,9 @@ test("closest finds model by partial match", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const result = await closest(ProviderID.anthropic, ["sonnet-4"])
expect(result).toBeDefined()
@@ -747,9 +747,9 @@ test("getModel uses realIdByKey for aliased models", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.anthropic].models["my-sonnet"]).toBeDefined()
@@ -862,9 +862,9 @@ test("model inherits properties from existing database model", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
const model = providers[ProviderID.anthropic].models["claude-sonnet-4-20250514"]
@@ -890,9 +890,9 @@ test("disabled_providers prevents loading even with env var", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("OPENAI_API_KEY", "test-openai-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.openai]).toBeUndefined()
@@ -914,10 +914,10 @@ test("enabled_providers with empty array allows no providers", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
set("OPENAI_API_KEY", "test-openai-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(Object.keys(providers).length).toBe(0)
@@ -944,9 +944,9 @@ test("whitelist and blacklist can be combined", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.anthropic]).toBeDefined()
@@ -1053,9 +1053,9 @@ test("getSmallModel returns appropriate small model", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const model = await getSmallModel(ProviderID.anthropic)
expect(model).toBeDefined()
@@ -1078,9 +1078,9 @@ test("getSmallModel respects config small_model override", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const model = await getSmallModel(ProviderID.anthropic)
expect(model).toBeDefined()
@@ -1126,10 +1126,10 @@ test("multiple providers can be configured simultaneously", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-anthropic-key")
set("OPENAI_API_KEY", "test-openai-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.anthropic]).toBeDefined()
@@ -1205,9 +1205,9 @@ test("model alias name defaults to alias key when id differs", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.anthropic].models["sonnet"].name).toBe("sonnet")
@@ -1245,9 +1245,9 @@ test("provider with multiple env var options only includes apiKey when single en
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("MULTI_ENV_KEY_1", "test-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.make("multi-env")]).toBeDefined()
@@ -1287,9 +1287,9 @@ test("provider with single env var includes apiKey automatically", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("SINGLE_ENV_KEY", "my-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.make("single-env")]).toBeDefined()
@@ -1324,9 +1324,9 @@ test("model cost overrides existing cost values", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
const model = providers[ProviderID.anthropic].models["claude-sonnet-4-20250514"]
@@ -1403,11 +1403,11 @@ test("disabled_providers and enabled_providers interaction", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-anthropic")
set("OPENAI_API_KEY", "test-openai")
set("GOOGLE_GENERATIVE_AI_API_KEY", "test-google")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
// anthropic: in enabled, not in disabled = allowed
@@ -1561,10 +1561,10 @@ test("provider env fallback - second env var used if first missing", async () =>
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
// Only set fallback, not primary
set("FALLBACK_KEY", "fallback-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
// Provider should load because fallback env var is set
@@ -1586,9 +1586,9 @@ test("getModel returns consistent results", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const model1 = await getModel(ProviderID.anthropic, ModelID.make("claude-sonnet-4-20250514"))
const model2 = await getModel(ProviderID.anthropic, ModelID.make("claude-sonnet-4-20250514"))
@@ -1647,9 +1647,9 @@ test("ModelNotFoundError includes suggestions for typos", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
try {
await getModel(ProviderID.anthropic, ModelID.make("claude-sonet-4")) // typo: sonet instead of sonnet
@@ -1675,9 +1675,9 @@ test("ModelNotFoundError for provider includes suggestions", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
try {
await getModel(ProviderID.make("antropic"), ModelID.make("claude-sonnet-4")) // typo: antropic
@@ -1723,9 +1723,9 @@ test("getProvider returns provider info", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const provider = await getProvider(ProviderID.anthropic)
expect(provider).toBeDefined()
@@ -1747,9 +1747,9 @@ test("closest returns undefined when no partial match found", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const result = await closest(ProviderID.anthropic, ["nonexistent-xyz-model"])
expect(result).toBeUndefined()
@@ -1770,9 +1770,9 @@ test("closest checks multiple query terms in order", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
// First term won't match, second will
const result = await closest(ProviderID.anthropic, ["nonexistent", "haiku"])
@@ -1842,9 +1842,9 @@ test("provider options are deeply merged", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
// Custom options should be merged
@@ -1880,9 +1880,9 @@ test("custom model inherits npm package from models.dev provider config", async
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("OPENAI_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
const model = providers[ProviderID.openai].models["my-custom-model"]
@@ -1915,9 +1915,9 @@ test("custom model inherits api.url from models.dev provider", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("OPENROUTER_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.openrouter]).toBeDefined()
@@ -2048,9 +2048,9 @@ test("model variants are generated for reasoning models", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
// Claude sonnet 4 has reasoning capability
@@ -2086,9 +2086,9 @@ test("model variants can be disabled via config", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
const model = providers[ProviderID.anthropic].models["claude-sonnet-4-20250514"]
@@ -2129,9 +2129,9 @@ test("model variants can be customized via config", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
const model = providers[ProviderID.anthropic].models["claude-sonnet-4-20250514"]
@@ -2168,9 +2168,9 @@ test("disabled key is stripped from variant config", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
const model = providers[ProviderID.anthropic].models["claude-sonnet-4-20250514"]
@@ -2206,9 +2206,9 @@ test("all variants can be disabled via config", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
const model = providers[ProviderID.anthropic].models["claude-sonnet-4-20250514"]
@@ -2244,9 +2244,9 @@ test("variant config merges with generated variants", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
const model = providers[ProviderID.anthropic].models["claude-sonnet-4-20250514"]
@@ -2282,9 +2282,9 @@ test("variants filtered in second pass for database models", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("OPENAI_API_KEY", "test-api-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
const model = providers[ProviderID.openai].models["gpt-5"]
@@ -2386,9 +2386,9 @@ test("Google Vertex: retains baseURL for custom proxy", async () => {
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("GOOGLE_APPLICATION_CREDENTIALS", "test-creds")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.make("vertex-proxy")]).toBeDefined()
@@ -2431,9 +2431,9 @@ test("Google Vertex: supports OpenAI compatible models", async () => {
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("GOOGLE_APPLICATION_CREDENTIALS", "test-creds")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
const model = providers[ProviderID.make("vertex-openai")].models["gpt-4"]
@@ -2457,11 +2457,11 @@ test("cloudflare-ai-gateway loads with env variables", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("CLOUDFLARE_ACCOUNT_ID", "test-account")
set("CLOUDFLARE_GATEWAY_ID", "test-gateway")
set("CLOUDFLARE_API_TOKEN", "test-token")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.make("cloudflare-ai-gateway")]).toBeDefined()
@@ -2489,11 +2489,11 @@ test("cloudflare-ai-gateway forwards config metadata options", async () => {
})
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("CLOUDFLARE_ACCOUNT_ID", "test-account")
set("CLOUDFLARE_GATEWAY_ID", "test-gateway")
set("CLOUDFLARE_API_TOKEN", "test-token")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.make("cloudflare-ai-gateway")]).toBeDefined()
@@ -2592,10 +2592,10 @@ test("plugin config enabled and disabled providers are honored", async () => {
await Instance.provide({
directory: tmp.path,
init: Effect.promise(async () => {
init: async () => {
set("ANTHROPIC_API_KEY", "test-anthropic-key")
set("OPENAI_API_KEY", "test-openai-key")
}).pipe(Effect.asVoid),
},
fn: async () => {
const providers = await list()
expect(providers[ProviderID.anthropic]).toBeDefined()

View File

@@ -16,7 +16,9 @@ import { Session } from "@/session/session"
import { MessageID, PartID, type SessionID } from "../../src/session/schema"
import { MessageV2 } from "../../src/session/message-v2"
import { Database } from "@/storage/db"
import { SessionTable } from "@/session/session.sql"
import { SessionMessageTable, SessionTable } from "@/session/session.sql"
import { SessionMessage } from "../../src/v2/session-message"
import * as DateTime from "effect/DateTime"
import * as Log from "@opencode-ai/core/util/log"
import { eq } from "drizzle-orm"
import { resetDatabase } from "../fixture/db"
@@ -202,6 +204,45 @@ describe("session HttpApi", () => {
{ headers },
),
).toMatchObject({ info: { id: message.info.id } })
yield* Effect.promise(() =>
Instance.provide({
directory: tmp.path,
fn: async () => {
const message = new SessionMessage.Assistant({
id: SessionMessage.ID.create(),
type: "assistant",
agent: "build",
model: { id: "model", providerID: "provider" },
time: { created: DateTime.makeUnsafe(1) },
content: [],
})
Database.use((db) =>
db
.insert(SessionMessageTable)
.values([
{
id: message.id,
session_id: parent.id,
type: message.type,
time_created: 1,
data: {
time: { created: 1 },
agent: message.agent,
model: message.model,
content: message.content,
} as NonNullable<typeof SessionMessageTable.$inferInsert["data"]>,
},
])
.run(),
)
},
}),
)
expect(yield* requestJson<SessionMessage.Message[]>(`/api/session/${parent.id}/message`, { headers })).toMatchObject([
{ type: "assistant" },
])
}),
),
)

View File

@@ -19,6 +19,7 @@ import { MessageV2 } from "../../src/session/message-v2"
import { MessageID, PartID, SessionID } from "../../src/session/schema"
import { SessionStatus } from "../../src/session/status"
import { SessionSummary } from "../../src/session/summary"
import { SessionV2 } from "../../src/v2/session"
import { ModelID, ProviderID } from "../../src/provider/schema"
import type { Provider } from "@/provider/provider"
import * as SessionProcessorModule from "../../src/session/processor"
@@ -595,6 +596,15 @@ describe("session.compaction.create", () => {
auto: true,
overflow: true,
})
const v2 = yield* SessionV2.Service.use((svc) => svc.messages({ sessionID: info.id })).pipe(
Effect.provide(SessionV2.defaultLayer),
)
expect(v2.at(-1)).toMatchObject({
type: "compaction",
reason: "auto",
summary: "",
})
}),
),
)

Some files were not shown because too many files have changed in this diff Show More