From 6d1ae1c86305e49fbcc1108e508e74afebc1351d Mon Sep 17 00:00:00 2001 From: cnfjlhj <166828808+cnfjlhj@users.noreply.github.com> Date: Wed, 18 Mar 2026 12:18:31 +0800 Subject: [PATCH 1/4] feat(mubu): add mubu agent harness --- .gitignore | 5 +- README.md | 27 +- mubu/agent-harness/MUBU.md | 89 + mubu/agent-harness/README.md | 50 + .../agent-harness/cli_anything/mubu/README.md | 26 + .../cli_anything/mubu/__init__.py | 3 + .../cli_anything/mubu/__main__.py | 5 + .../cli_anything/mubu/mubu_cli.py | 716 ++++++ .../cli_anything/mubu/skills/SKILL.md | 200 ++ .../cli_anything/mubu/tests/TEST.md | 481 ++++ .../cli_anything/mubu/tests/__init__.py | 1 + .../mubu/tests/test_agent_harness.py | 134 + .../mubu/tests/test_cli_entrypoint.py | 221 ++ .../cli_anything/mubu/tests/test_core.py | 301 +++ .../cli_anything/mubu/tests/test_full_e2e.py | 226 ++ .../mubu/tests/test_mubu_probe.py | 533 ++++ .../cli_anything/mubu/utils/__init__.py | 3 + .../cli_anything/mubu/utils/repl_skin.py | 521 ++++ mubu/agent-harness/mubu_probe.py | 2145 +++++++++++++++++ mubu/agent-harness/pyproject.toml | 3 + mubu/agent-harness/setup.py | 49 + mubu/agent-harness/skill_generator.py | 414 ++++ .../agent-harness/templates/SKILL.md.template | 104 + registry.json | 14 +- 24 files changed, 6260 insertions(+), 11 deletions(-) create mode 100644 mubu/agent-harness/MUBU.md create mode 100644 mubu/agent-harness/README.md create mode 100644 mubu/agent-harness/cli_anything/mubu/README.md create mode 100644 mubu/agent-harness/cli_anything/mubu/__init__.py create mode 100644 mubu/agent-harness/cli_anything/mubu/__main__.py create mode 100644 mubu/agent-harness/cli_anything/mubu/mubu_cli.py create mode 100644 mubu/agent-harness/cli_anything/mubu/skills/SKILL.md create mode 100644 mubu/agent-harness/cli_anything/mubu/tests/TEST.md create mode 100644 mubu/agent-harness/cli_anything/mubu/tests/__init__.py create mode 100644 mubu/agent-harness/cli_anything/mubu/tests/test_agent_harness.py create mode 100644 mubu/agent-harness/cli_anything/mubu/tests/test_cli_entrypoint.py create mode 100644 mubu/agent-harness/cli_anything/mubu/tests/test_core.py create mode 100644 mubu/agent-harness/cli_anything/mubu/tests/test_full_e2e.py create mode 100644 mubu/agent-harness/cli_anything/mubu/tests/test_mubu_probe.py create mode 100644 mubu/agent-harness/cli_anything/mubu/utils/__init__.py create mode 100644 mubu/agent-harness/cli_anything/mubu/utils/repl_skin.py create mode 100644 mubu/agent-harness/mubu_probe.py create mode 100644 mubu/agent-harness/pyproject.toml create mode 100644 mubu/agent-harness/setup.py create mode 100644 mubu/agent-harness/skill_generator.py create mode 100644 mubu/agent-harness/templates/SKILL.md.template diff --git a/.gitignore b/.gitignore index cfc7c36d6..74c4a1882 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,7 @@ !/inkscape/ !/audacity/ !/libreoffice/ +!/mubu/ !/obs-studio/ !/kdenlive/ !/shotcut/ @@ -49,6 +50,8 @@ /audacity/.* /libreoffice/* /libreoffice/.* +/mubu/* +/mubu/.* /obs-studio/* /obs-studio/.* /kdenlive/* @@ -70,6 +73,7 @@ !/inkscape/agent-harness/ !/audacity/agent-harness/ !/libreoffice/agent-harness/ +!/mubu/agent-harness/ !/obs-studio/agent-harness/ !/kdenlive/agent-harness/ !/shotcut/agent-harness/ @@ -107,4 +111,3 @@ assets/gen_typing_gif.py !/docs/ /docs/* !/docs/hub/ - diff --git a/README.md b/README.md index 70872de20..65256ae60 100644 --- a/README.md +++ b/README.md @@ -8,8 +8,8 @@ CLI-Anything: Bridging the Gap Between AI Agents and the World's Software Quick Start CLI Hub - Demos - Tests + Demos + Tests License

@@ -400,7 +400,7 @@ AI agents are great at reasoning but terrible at using real professional softwar | ๐Ÿ’ธ "UI automation breaks constantly" | No screenshots, no clicking, no RPA fragility. Pure command-line reliability with structured interfaces | | ๐Ÿ“Š "Agents need structured data" | Built-in JSON output for seamless agent consumption + human-readable formats for debugging | | ๐Ÿ”ง "Custom integrations are expensive" | One Claude plugin auto-generates CLIs for ANY codebase through proven 7-phase pipeline | -| โšก "Prototype vs Production gap" | 1,588+ tests with real software validation. Battle-tested across 13 major applications | +| โšก "Prototype vs Production gap" | 1,684 tests with real software validation. Battle-tested across 14 major applications | --- @@ -501,7 +501,7 @@ SKILL.md files are auto-generated during Phase 6.5 of the pipeline using `skill_ CLI-Anything works on any software with a codebase โ€” no domain restrictions or architectural limitations. ### ๐Ÿญ Professional-Grade Testing -Tested across 13 diverse, complex applications spanning creative, productivity, communication, diagramming, AI image generation, and AI content generation domains previously inaccessible to AI agents. +Tested across 14 diverse, complex applications spanning creative, productivity, communication, diagramming, AI image generation, and AI content generation domains previously inaccessible to AI agents. ### ๐ŸŽจ Diverse Domain Coverage From creative workflows (image editing, 3D modeling, vector graphics) to production tools (audio, office, live streaming, video editing). @@ -553,6 +553,13 @@ Each application received complete, production-ready CLI interfaces โ€” not demo โœ… 158 +๐Ÿ“ Mubu +Knowledge Management & Outlining +cli-anything-mubu +Local Mubu data + sync logs +โœ… 96 + + ๐Ÿ“น OBS Studio Live Streaming & Recording cli-anything-obs-studio @@ -610,11 +617,11 @@ Each application received complete, production-ready CLI interfaces โ€” not demo Total -โœ… 1,588 +โœ… 1,684 -> **100% pass rate** across all 1,588 tests โ€” 1,138 unit tests + 450 end-to-end tests. +> **100% pass rate** across all 1,684 tests โ€” 1,223 unit tests + 461 end-to-end tests. --- @@ -636,6 +643,7 @@ blender 208 passed โœ… (150 unit + 58 e2e) inkscape 202 passed โœ… (148 unit + 54 e2e) audacity 161 passed โœ… (107 unit + 54 e2e) libreoffice 158 passed โœ… (89 unit + 69 e2e) +mubu 96 passed โœ… (85 unit + 11 e2e) obs-studio 153 passed โœ… (116 unit + 37 e2e) kdenlive 155 passed โœ… (111 unit + 44 e2e) shotcut 154 passed โœ… (110 unit + 44 e2e) @@ -645,7 +653,7 @@ mermaid 10 passed โœ… (5 unit + 5 e2e) anygen 50 passed โœ… (40 unit + 10 e2e) comfyui 70 passed โœ… (60 unit + 10 e2e) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ -TOTAL 1,588 passed โœ… 100% pass rate +TOTAL 1,684 passed โœ… 100% pass rate ``` --- @@ -699,6 +707,7 @@ cli-anything/ โ”œโ”€โ”€ โœ๏ธ inkscape/agent-harness/ # Inkscape CLI (202 tests) โ”œโ”€โ”€ ๐ŸŽต audacity/agent-harness/ # Audacity CLI (161 tests) โ”œโ”€โ”€ ๐Ÿ“„ libreoffice/agent-harness/ # LibreOffice CLI (158 tests) +โ”œโ”€โ”€ ๐Ÿ“ mubu/agent-harness/ # Mubu CLI (96 tests) โ”œโ”€โ”€ ๐Ÿ“น obs-studio/agent-harness/ # OBS Studio CLI (153 tests) โ”œโ”€โ”€ ๐ŸŽž๏ธ kdenlive/agent-harness/ # Kdenlive CLI (155 tests) โ”œโ”€โ”€ ๐ŸŽฌ shotcut/agent-harness/ # Shotcut CLI (154 tests) @@ -807,7 +816,7 @@ HARNESS.md is our definitive SOP for making any software agent-accessible via au It encodes proven patterns and methodologies refined through automated generation processes. -The playbook distills key insights from successfully building all 13 diverse, production-ready harnesses. +The playbook distills key insights from successfully building all 14 diverse, production-ready harnesses. ### Critical Lessons @@ -932,7 +941,7 @@ MIT License โ€” free to use, modify, and distribute. **CLI-Anything** โ€” *Make any software with a codebase Agent-native.* -A methodology for the age of AI agents | 13 professional software demos | 1,588 passing tests +A methodology for the age of AI agents | 14 professional software demos | 1,684 passing tests
diff --git a/mubu/agent-harness/MUBU.md b/mubu/agent-harness/MUBU.md new file mode 100644 index 000000000..b5302dbdc --- /dev/null +++ b/mubu/agent-harness/MUBU.md @@ -0,0 +1,89 @@ +# MUBU Harness Notes + +## Target + +- Software: Mubu desktop app +- User goal: let Codex inspect, search, navigate, and perform careful atomic edits on the same local Mubu workspace the user is actively using + +## Backend Surfaces + +Read surfaces: + +- local backup snapshots +- local RxDB `.storage` +- client-sync logs + +Live surfaces: + +- `/v3/api/document/get` +- `/v3/api/colla/events` + +Auth and context sources: + +- local users store for `token` and `userId` +- sync logs for `memberId` +- live `/document/get` for current `baseVersion` + +## Current Command Groups + +Grouped Click domains: + +- `discover` +- `inspect` +- `mutate` +- `session` + +Discover / inspect examples: + +- `recent` +- `folders` +- `path-docs` +- `daily-current` +- `daily-nodes` +- `open-path` +- `doc-nodes` + +Mutate: + +- `update-text` +- `create-child` +- `delete-node` + +Packaging: + +- `cli-anything-mubu` +- `python -m cli_anything.mubu` +- editable install root: `agent-harness/` +- canonical source root: `agent-harness/cli_anything/mubu/...` +- compatibility wrappers remain at the project root +- packaged skill regeneration: `python3 agent-harness/skill_generator.py agent-harness` + +## Current State Model + +Subcommand mode: + +- stateless per invocation + +REPL mode: + +- persisted `current_doc` +- persisted `current_node` +- persisted local command history +- session JSON stored at `~/.config/cli-anything-mubu/session.json` +- REPL history stored at `~/.config/cli-anything-mubu/history.txt` +- startup banner exposes the packaged `SKILL.md` absolute path +- override via `CLI_ANYTHING_MUBU_STATE_DIR` + +## Safety Model + +- inspect before mutate +- dry-run first for live mutations +- `update-text` is live-verified +- `create-child` is live-verified +- `delete-node` is live-verified + +## Current Gaps + +- no undo/redo +- no move primitive +- no broader live multi-command E2E suite beyond the reversible scratch verification diff --git a/mubu/agent-harness/README.md b/mubu/agent-harness/README.md new file mode 100644 index 000000000..e5fb93af2 --- /dev/null +++ b/mubu/agent-harness/README.md @@ -0,0 +1,50 @@ +# Agent Harness + +This directory is now the stricter CLI-Anything-style harness root for Mubu. + +Recommended install flow: + +```bash +cd +python3 -m venv .venv +.venv/bin/python -m pip install -e ./agent-harness +``` + +Root install now also targets the same canonical source tree: + +```bash +cd +.venv/bin/python -m pip install -e . +``` + +What this gives you: + +- `agent-harness/` works as the editable install root +- the canonical implementation now lives inside this directory +- the same `cli-anything-mubu` console script is exposed +- the main CLI is Click-based with grouped command domains +- `skill_generator.py` can regenerate the packaged `skills/SKILL.md` + +Canonical implementation now lives under: + +- `agent-harness/mubu_probe.py` +- `agent-harness/cli_anything/mubu` + +Compatibility shims remain at the project root for local `python -m ...` and `python3 mubu_probe.py` workflows: + +- `mubu_probe.py` +- `cli_anything/mubu` + +Current supporting references: + +- `agent-harness/MUBU.md` +- `README.md` +- `tests/TEST.md` + +Current state: + +- packaged and installable from the harness root +- canonical package source is now under `agent-harness/cli_anything/mubu/...` +- root-level wrappers preserve backward compatibility during development +- grouped `discover` / `inspect` / `mutate` / `session` commands now exist +- the packaged `SKILL.md` is now generated from the canonical harness diff --git a/mubu/agent-harness/cli_anything/mubu/README.md b/mubu/agent-harness/cli_anything/mubu/README.md new file mode 100644 index 000000000..9357c2de8 --- /dev/null +++ b/mubu/agent-harness/cli_anything/mubu/README.md @@ -0,0 +1,26 @@ +# cli-anything-mubu + +Canonical packaged entrypoint for the Mubu live bridge. + +This package lives in the CLI-Anything-aligned harness tree and exposes: + +- `cli-anything-mubu` console script +- `python -m cli_anything.mubu` +- default REPL when no subcommand is supplied +- REPL banner with app version, packaged skill path, and history path +- persisted `current-doc` and `current-node` REPL context + +Canonical source paths: + +- `agent-harness/mubu_probe.py` +- `agent-harness/cli_anything/mubu/...` + +Compatibility wrappers remain at: + +- `mubu_probe.py` +- `cli_anything/mubu/...` + +Primary operator documentation remains at the project root: + +- `README.md` +- `SKILL.md` diff --git a/mubu/agent-harness/cli_anything/mubu/__init__.py b/mubu/agent-harness/cli_anything/mubu/__init__.py new file mode 100644 index 000000000..a05eb9abb --- /dev/null +++ b/mubu/agent-harness/cli_anything/mubu/__init__.py @@ -0,0 +1,3 @@ +__all__ = ["__version__"] + +__version__ = "0.1.0" diff --git a/mubu/agent-harness/cli_anything/mubu/__main__.py b/mubu/agent-harness/cli_anything/mubu/__main__.py new file mode 100644 index 000000000..7d8a07cd9 --- /dev/null +++ b/mubu/agent-harness/cli_anything/mubu/__main__.py @@ -0,0 +1,5 @@ +from cli_anything.mubu.mubu_cli import entrypoint + + +if __name__ == "__main__": + raise SystemExit(entrypoint()) diff --git a/mubu/agent-harness/cli_anything/mubu/mubu_cli.py b/mubu/agent-harness/cli_anything/mubu/mubu_cli.py new file mode 100644 index 000000000..63a4700a7 --- /dev/null +++ b/mubu/agent-harness/cli_anything/mubu/mubu_cli.py @@ -0,0 +1,716 @@ +from __future__ import annotations + +import json +import os +import shlex +import sys +from pathlib import Path +from typing import Iterable, Sequence + +import click + +import mubu_probe +from cli_anything.mubu import __version__ +from cli_anything.mubu.utils import ReplSkin + + +CONTEXT_SETTINGS = {"ignore_unknown_options": True, "allow_extra_args": True} +COMMAND_HISTORY_LIMIT = 50 +DISCOVER_COMMANDS = { + "docs": "List latest known document snapshots from local backups.", + "folders": "List folder metadata from local RxDB storage.", + "folder-docs": "List document metadata for one folder.", + "path-docs": "List documents for one folder path or folder id.", + "recent": "List recently active documents using backups, metadata, and sync logs.", + "daily": "Find Daily-style folders and list the documents inside them.", + "daily-current": "Resolve the current daily document from one Daily-style folder.", +} +INSPECT_COMMANDS = { + "show": "Show the latest backup tree for one document.", + "search": "Search latest backups for matching node text or note content.", + "changes": "Parse recent client-sync change events from local logs.", + "links": "Extract outbound Mubu document links from one document backup.", + "open-path": "Open one document by full path, suffix path, title, or doc id.", + "doc-nodes": "List live document nodes with node ids and update-target paths.", + "daily-nodes": "List live nodes from the current daily document in one step.", +} +MUTATE_COMMANDS = { + "create-child": "Build or execute one child-node creation against the live Mubu API.", + "delete-node": "Build or execute one node deletion against the live Mubu API.", + "update-text": "Build or execute one text update against the live Mubu API.", +} +LEGACY_COMMANDS = {} +LEGACY_COMMANDS.update(DISCOVER_COMMANDS) +LEGACY_COMMANDS.update(INSPECT_COMMANDS) +LEGACY_COMMANDS.update(MUTATE_COMMANDS) + +REPL_HELP = """Interactive REPL for cli-anything-mubu + +Builtins: + help Show this REPL help + exit, quit Leave the REPL + use-doc Set the current document reference for this REPL session + use-node Set the current node reference for this REPL session + use-daily Resolve and set the current daily document + current-doc Show the current document reference + current-node Show the current node reference + clear-doc Clear the current document reference + clear-node Clear the current node reference + status Show the current session status + history [limit] Show recent command history from session state + state-path Show the session state file path + +Examples: + recent --limit 5 --json + discover daily-current + discover daily-current --json + inspect daily-nodes --query ๆ—ฅๅฟ—ๆต --json + session use-doc 'Workspace/Daily tasks/26.03.16' + mutate create-child @doc --parent-node-id node-demo1 --text 'scratch child' --json + mutate delete-node @doc --node-id @node --json + update-text 'Workspace/Daily tasks/26.03.16' --node-id node-demo1 --text 'new text' --json +""" + + +def session_state_dir() -> Path: + override = os.environ.get("CLI_ANYTHING_MUBU_STATE_DIR", "").strip() + if override: + return Path(override).expanduser() + return Path.home() / ".config" / "cli-anything-mubu" + + +def session_state_path() -> Path: + return session_state_dir() / "session.json" + + +def default_session_state() -> dict[str, object]: + return { + "current_doc": None, + "current_node": None, + "command_history": [], + } + + +def load_session_state() -> dict[str, object]: + path = session_state_path() + try: + data = json.loads(path.read_text(errors="replace")) + except FileNotFoundError: + return default_session_state() + except json.JSONDecodeError: + return default_session_state() + + history = data.get("command_history") + normalized_history = [item for item in history if isinstance(item, str)] if isinstance(history, list) else [] + return { + "current_doc": data.get("current_doc") if isinstance(data.get("current_doc"), str) else None, + "current_node": data.get("current_node") if isinstance(data.get("current_node"), str) else None, + "command_history": normalized_history[-COMMAND_HISTORY_LIMIT:], + } + + +def locked_save_json(path: Path, data: dict[str, object]) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + try: + handle = open(path, "r+") + except FileNotFoundError: + handle = open(path, "w") + with handle: + locked = False + try: + import fcntl + + fcntl.flock(handle.fileno(), fcntl.LOCK_EX) + locked = True + except (ImportError, OSError): + pass + try: + handle.seek(0) + handle.truncate() + json.dump(data, handle, ensure_ascii=False, indent=2) + handle.flush() + finally: + if locked: + fcntl.flock(handle.fileno(), fcntl.LOCK_UN) + + +def save_session_state(session: dict[str, object]) -> None: + locked_save_json( + session_state_path(), + { + "current_doc": session.get("current_doc"), + "current_node": session.get("current_node"), + "command_history": list(session.get("command_history", [])), + }, + ) + + +def append_command_history(command_line: str) -> None: + command_line = command_line.strip() + if not command_line: + return + session = load_session_state() + history = list(session.get("command_history", [])) + history.append(command_line) + session["command_history"] = history[-COMMAND_HISTORY_LIMIT:] + save_session_state(session) + + +def resolve_current_daily_doc_ref(folder_ref: str = "Daily tasks") -> str: + metas = mubu_probe.load_document_metas(mubu_probe.DEFAULT_STORAGE_ROOT) + folders = mubu_probe.load_folders(mubu_probe.DEFAULT_STORAGE_ROOT) + docs, folder, ambiguous = mubu_probe.folder_documents(metas, folders, folder_ref) + if folder is None: + if ambiguous: + raise RuntimeError(mubu_probe.ambiguous_error_message("folder", folder_ref, ambiguous, "path")) + raise RuntimeError(f"folder not found: {folder_ref}") + selected, _ = mubu_probe.choose_current_daily_document(docs) + if selected is None or not selected.get("doc_path"): + raise RuntimeError(f"no current daily document found in {folder['path']}") + return str(selected["doc_path"]) + + +def expand_repl_aliases(argv: list[str], current_doc: str | None) -> list[str]: + return expand_repl_aliases_with_state(argv, {"current_doc": current_doc, "current_node": None}) + + +def expand_repl_aliases_with_state(argv: list[str], session: dict[str, object]) -> list[str]: + current_doc = session.get("current_doc") + current_node = session.get("current_node") + expanded: list[str] = [] + for token in argv: + if token in {"@doc", "@current"} and isinstance(current_doc, str): + expanded.append(current_doc) + elif token == "@node" and isinstance(current_node, str): + expanded.append(current_node) + else: + expanded.append(token) + return expanded + + +def build_session_payload(session: dict[str, object]) -> dict[str, object]: + history = list(session.get("command_history", [])) + return { + "current_doc": session.get("current_doc"), + "current_node": session.get("current_node"), + "state_path": str(session_state_path()), + "history_count": len(history), + } + + +def root_json_output(ctx: click.Context | None) -> bool: + if ctx is None: + return False + root = ctx.find_root() + if root is None or root.obj is None: + return False + return bool(root.obj.get("json_output")) + + +def emit_json(payload: object) -> None: + click.echo(json.dumps(payload, ensure_ascii=False, indent=2)) + + +def emit_session_status(session: dict[str, object], json_output: bool) -> None: + payload = build_session_payload(session) + if json_output: + emit_json(payload) + return + current_doc = payload["current_doc"] or "" + current_node = payload["current_node"] or "" + click.echo(f"Current doc: {current_doc}") + click.echo(f"Current node: {current_node}") + click.echo(f"State path: {payload['state_path']}") + click.echo(f"History count: {payload['history_count']}") + + +def emit_session_history(session: dict[str, object], limit: int, json_output: bool) -> None: + history = list(session.get("command_history", []))[-limit:] + if json_output: + emit_json({"history": history}) + return + if not history: + click.echo("History: ") + return + click.echo("History:") + for index, entry in enumerate(history, start=max(1, len(history) - limit + 1)): + click.echo(f" {index}. {entry}") + + +def invoke_probe_command(ctx: click.Context | None, command_name: str, probe_args: Sequence[str]) -> int: + argv = [command_name, *list(probe_args)] + if root_json_output(ctx) and "--json" not in argv: + argv.append("--json") + try: + result = mubu_probe.main(argv) + except SystemExit as exc: + result = exc.code if isinstance(exc.code, int) else 1 + if result in (0, None) and "--help" not in argv and "-h" not in argv: + append_command_history(" ".join(argv)) + return int(result or 0) + + +def print_repl_banner(skin: ReplSkin) -> None: + click.echo("Mubu REPL") + skin.print_banner() + click.echo(f"History: {skin.history_file}") + + +def print_repl_help() -> None: + click.echo(REPL_HELP.rstrip()) + + +def parse_history_limit(argv: Sequence[str]) -> int: + if len(argv) < 2: + return 10 + try: + return max(1, int(argv[1])) + except ValueError as exc: + raise RuntimeError(f"history limit must be an integer: {argv[1]}") from exc + + +def handle_repl_builtin(argv: list[str], session: dict[str, object]) -> tuple[bool, int]: + if not argv: + return True, 0 + + command = argv[0] + if command in {"exit", "quit"}: + return True, 1 + if command == "help": + print_repl_help() + return True, 0 + if command == "current-doc": + current_doc = session.get("current_doc") + click.echo(f"Current doc: {current_doc}" if current_doc else "Current doc: ") + return True, 0 + if command == "current-node": + current_node = session.get("current_node") + click.echo(f"Current node: {current_node}" if current_node else "Current node: ") + return True, 0 + if command == "status": + emit_session_status(session, json_output=False) + return True, 0 + if command == "history": + try: + limit = parse_history_limit(argv) + except RuntimeError as exc: + click.echo(str(exc), err=True) + return True, 0 + emit_session_history(session, limit, json_output=False) + return True, 0 + if command == "state-path": + click.echo(f"State path: {session_state_path()}") + return True, 0 + if command == "clear-doc": + session["current_doc"] = None + save_session_state(session) + append_command_history("clear-doc") + click.echo("Current doc cleared.") + return True, 0 + if command == "clear-node": + session["current_node"] = None + save_session_state(session) + append_command_history("clear-node") + click.echo("Current node cleared.") + return True, 0 + if command == "use-doc": + if len(argv) < 2: + click.echo("use-doc requires a document reference.", err=True) + return True, 0 + doc_ref = " ".join(argv[1:]) + session["current_doc"] = doc_ref + save_session_state(session) + append_command_history(f"use-doc {doc_ref}") + click.echo(f"Current doc: {doc_ref}") + return True, 0 + if command == "use-node": + if len(argv) < 2: + click.echo("use-node requires a node reference.", err=True) + return True, 0 + node_ref = " ".join(argv[1:]) + session["current_node"] = node_ref + save_session_state(session) + append_command_history(f"use-node {node_ref}") + click.echo(f"Current node: {node_ref}") + return True, 0 + if command == "use-daily": + folder_ref = " ".join(argv[1:]) if len(argv) > 1 else "Daily tasks" + try: + doc_ref = resolve_current_daily_doc_ref(folder_ref) + except RuntimeError as exc: + click.echo(str(exc), err=True) + return True, 0 + session["current_doc"] = doc_ref + save_session_state(session) + append_command_history(f"use-daily {folder_ref}".strip()) + click.echo(f"Current doc: {doc_ref}") + return True, 0 + + return False, 0 + + +def run_repl() -> int: + session = load_session_state() + skin = ReplSkin("mubu", version=__version__, history_file=str(session_state_dir() / "history.txt")) + prompt_session = skin.create_prompt_session() + print_repl_banner(skin) + if session.get("current_doc"): + click.echo(f"Current doc: {session['current_doc']}") + if session.get("current_node"): + click.echo(f"Current node: {session['current_node']}") + while True: + try: + line = skin.get_input(prompt_session) + except EOFError: + click.echo() + skin.print_goodbye() + return 0 + except KeyboardInterrupt: + click.echo() + continue + + line = line.strip() + if not line: + continue + + try: + argv = shlex.split(line) + except ValueError as exc: + click.echo(f"parse error: {exc}", err=True) + continue + + handled, control = handle_repl_builtin(argv, session) + if handled: + if control == 1: + skin.print_goodbye() + return 0 + session = load_session_state() + continue + + argv = expand_repl_aliases_with_state(argv, session) + result = dispatch(argv) + if result not in (0, None): + click.echo(f"command exited with status {result}", err=True) + session = load_session_state() + + +@click.group(context_settings=CONTEXT_SETTINGS, invoke_without_command=True) +@click.option("--json", "json_output", is_flag=True, help="Emit JSON output for wrapped probe commands when supported.") +@click.pass_context +def cli(ctx: click.Context, json_output: bool) -> int: + """Agent-native CLI for the Mubu desktop app with REPL and grouped command domains.""" + ctx.ensure_object(dict) + ctx.obj["json_output"] = json_output + if ctx.invoked_subcommand is None: + return run_repl() + return 0 + + +@cli.group(context_settings=CONTEXT_SETTINGS) +def discover() -> None: + """Discovery commands for folders, documents, recency, and Daily tasks resolution.""" + + +@discover.command("docs", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def discover_docs(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """List latest known document snapshots from local backups.""" + return invoke_probe_command(ctx, "docs", probe_args) + + +@discover.command("folders", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def folders(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """List folder metadata from local RxDB storage.""" + return invoke_probe_command(ctx, "folders", probe_args) + + +@discover.command("folder-docs", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def folder_docs(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """List document metadata for one folder.""" + return invoke_probe_command(ctx, "folder-docs", probe_args) + + +@discover.command("path-docs", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def path_docs(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """List documents for one folder path or folder id.""" + return invoke_probe_command(ctx, "path-docs", probe_args) + + +@discover.command("recent", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def recent(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """List recently active documents using backups, metadata, and sync logs.""" + return invoke_probe_command(ctx, "recent", probe_args) + + +@discover.command("daily", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def daily(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """Find Daily-style folders and list the documents inside them.""" + return invoke_probe_command(ctx, "daily", probe_args) + + +@discover.command("daily-current", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def daily_current(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """Resolve the current daily document from one Daily-style folder.""" + return invoke_probe_command(ctx, "daily-current", probe_args) + + +@cli.group(context_settings=CONTEXT_SETTINGS) +def inspect() -> None: + """Inspection commands for tree views, search, links, sync events, and live node targeting.""" + + +@inspect.command("show", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def show(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """Show the latest backup tree for one document.""" + return invoke_probe_command(ctx, "show", probe_args) + + +@inspect.command("search", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def search(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """Search latest backups for matching node text or note content.""" + return invoke_probe_command(ctx, "search", probe_args) + + +@inspect.command("changes", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def changes(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """Parse recent client-sync change events from local logs.""" + return invoke_probe_command(ctx, "changes", probe_args) + + +@inspect.command("links", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def links(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """Extract outbound Mubu document links from one document backup.""" + return invoke_probe_command(ctx, "links", probe_args) + + +@inspect.command("open-path", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def open_path(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """Open one document by full path, suffix path, title, or doc id.""" + return invoke_probe_command(ctx, "open-path", probe_args) + + +@inspect.command("doc-nodes", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def doc_nodes(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """List live document nodes with node ids and update-target paths.""" + return invoke_probe_command(ctx, "doc-nodes", probe_args) + + +@inspect.command("daily-nodes", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def daily_nodes(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """List live nodes from the current daily document in one step.""" + return invoke_probe_command(ctx, "daily-nodes", probe_args) + + +@cli.group(context_settings=CONTEXT_SETTINGS) +def mutate() -> None: + """Mutation commands for dry-run-first atomic live edits against the Mubu API.""" + + +@mutate.command("create-child", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def create_child(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """Build or execute one child-node creation against the live Mubu API.""" + return invoke_probe_command(ctx, "create-child", probe_args) + + +@mutate.command("delete-node", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def delete_node(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """Build or execute one node deletion against the live Mubu API.""" + return invoke_probe_command(ctx, "delete-node", probe_args) + + +@mutate.command("update-text", context_settings=CONTEXT_SETTINGS, add_help_option=False) +@click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) +@click.pass_context +def update_text(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + """Build or execute one text update against the live Mubu API.""" + return invoke_probe_command(ctx, "update-text", probe_args) + + +@cli.group() +def session() -> None: + """Session and state commands for current document/node context and local command history.""" + + +@session.command("status") +@click.option("--json", "json_output", is_flag=True, help="Emit session state as JSON.") +@click.pass_context +def session_status(ctx: click.Context, json_output: bool) -> int: + """Show the current session state.""" + emit_session_status(load_session_state(), json_output=json_output or root_json_output(ctx)) + return 0 + + +@session.command("state-path") +@click.option("--json", "json_output", is_flag=True, help="Emit the session state path as JSON.") +@click.pass_context +def state_path_command(ctx: click.Context, json_output: bool) -> int: + """Show the session state file path.""" + payload = {"state_path": str(session_state_path())} + if json_output or root_json_output(ctx): + emit_json(payload) + else: + click.echo(payload["state_path"]) + return 0 + + +@session.command("use-doc") +@click.argument("doc_ref", nargs=-1) +def use_doc(doc_ref: tuple[str, ...]) -> int: + """Persist the current document reference.""" + if not doc_ref: + raise click.UsageError("use-doc requires a document reference.") + value = " ".join(doc_ref) + session_state = load_session_state() + session_state["current_doc"] = value + save_session_state(session_state) + append_command_history(f"session use-doc {value}") + click.echo(f"Current doc: {value}") + return 0 + + +@session.command("use-node") +@click.argument("node_ref", nargs=-1) +def use_node(node_ref: tuple[str, ...]) -> int: + """Persist the current node reference.""" + if not node_ref: + raise click.UsageError("use-node requires a node reference.") + value = " ".join(node_ref) + session_state = load_session_state() + session_state["current_node"] = value + save_session_state(session_state) + append_command_history(f"session use-node {value}") + click.echo(f"Current node: {value}") + return 0 + + +@session.command("use-daily") +@click.argument("folder_ref", nargs=-1) +def use_daily(folder_ref: tuple[str, ...]) -> int: + """Resolve and persist the current daily document reference.""" + value = " ".join(folder_ref) if folder_ref else "Daily tasks" + doc_ref = resolve_current_daily_doc_ref(value) + session_state = load_session_state() + session_state["current_doc"] = doc_ref + save_session_state(session_state) + append_command_history(f"session use-daily {value}".strip()) + click.echo(f"Current doc: {doc_ref}") + return 0 + + +@session.command("clear-doc") +def clear_doc() -> int: + """Clear the current document reference.""" + session_state = load_session_state() + session_state["current_doc"] = None + save_session_state(session_state) + append_command_history("session clear-doc") + click.echo("Current doc cleared.") + return 0 + + +@session.command("clear-node") +def clear_node() -> int: + """Clear the current node reference.""" + session_state = load_session_state() + session_state["current_node"] = None + save_session_state(session_state) + append_command_history("session clear-node") + click.echo("Current node cleared.") + return 0 + + +@session.command("history") +@click.option("--limit", default=10, show_default=True, type=int, help="How many recent entries to show.") +@click.option("--json", "json_output", is_flag=True, help="Emit command history as JSON.") +@click.pass_context +def history_command(ctx: click.Context, limit: int, json_output: bool) -> int: + """Show recent command history stored in session state.""" + emit_session_history(load_session_state(), max(1, limit), json_output=json_output or root_json_output(ctx)) + return 0 + + +@cli.command("repl", help=REPL_HELP) +def repl_command() -> int: + """Interactive REPL for cli-anything-mubu.""" + return run_repl() + + +def create_legacy_command(command_name: str, help_text: str) -> click.Command: + @click.command(name=command_name, help=help_text, context_settings=CONTEXT_SETTINGS, add_help_option=False) + @click.argument("probe_args", nargs=-1, type=click.UNPROCESSED) + @click.pass_context + def legacy(ctx: click.Context, probe_args: tuple[str, ...]) -> int: + return invoke_probe_command(ctx, command_name, probe_args) + + return legacy + + +for _command_name, _help_text in LEGACY_COMMANDS.items(): + cli.add_command(create_legacy_command(_command_name, _help_text)) + + +def dispatch(argv: list[str] | None = None) -> int: + args = list(sys.argv[1:] if argv is None else argv) + try: + result = cli.main(args=args, prog_name="cli-anything-mubu", standalone_mode=False) + except click.exceptions.Exit as exc: + return int(exc.exit_code) + except click.ClickException as exc: + exc.show() + return int(exc.exit_code) + return int(result or 0) + + +def entrypoint(argv: list[str] | None = None) -> int: + return dispatch(argv) + + +__all__ = [ + "REPL_HELP", + "append_command_history", + "build_session_payload", + "cli", + "default_session_state", + "dispatch", + "entrypoint", + "expand_repl_aliases", + "expand_repl_aliases_with_state", + "handle_repl_builtin", + "load_session_state", + "resolve_current_daily_doc_ref", + "run_repl", + "save_session_state", + "session_state_dir", + "session_state_path", +] diff --git a/mubu/agent-harness/cli_anything/mubu/skills/SKILL.md b/mubu/agent-harness/cli_anything/mubu/skills/SKILL.md new file mode 100644 index 000000000..eb33a143c --- /dev/null +++ b/mubu/agent-harness/cli_anything/mubu/skills/SKILL.md @@ -0,0 +1,200 @@ +--- +name: >- + cli-anything-mubu +description: >- + Command-line interface for Mubu - Canonical packaged entrypoint for the Mubu live bridge.... +--- + +# cli-anything-mubu + +Canonical packaged entrypoint for the Mubu live bridge. + +## Installation + +This CLI is packaged from the canonical `agent-harness` source tree: + +```bash +pip install -e . +``` + +**Prerequisites:** +- Python 3.10+ +- An active Mubu desktop session on this machine +- Local Mubu profile data available to the CLI + +## Entry Points + +```bash +cli-anything-mubu +python -m cli_anything.mubu +``` + +When invoked without a subcommand, the CLI enters an interactive REPL session. + +## Command Groups + + +### Discover + +Discovery commands for folders, documents, recency, and Daily tasks resolution. + +| Command | Description | +|---------|-------------| + +| `docs` | List latest known document snapshots from local backups. | + +| `folders` | List folder metadata from local RxDB storage. | + +| `folder-docs` | List document metadata for one folder. | + +| `path-docs` | List documents for one folder path or folder id. | + +| `recent` | List recently active documents using backups, metadata, and sync logs. | + +| `daily` | Find Daily-style folders and list the documents inside them. | + +| `daily-current` | Resolve the current daily document from one Daily-style folder. | + + + +### Inspect + +Inspection commands for tree views, search, links, sync events, and live node targeting. + +| Command | Description | +|---------|-------------| + +| `show` | Show the latest backup tree for one document. | + +| `search` | Search latest backups for matching node text or note content. | + +| `changes` | Parse recent client-sync change events from local logs. | + +| `links` | Extract outbound Mubu document links from one document backup. | + +| `open-path` | Open one document by full path, suffix path, title, or doc id. | + +| `doc-nodes` | List live document nodes with node ids and update-target paths. | + +| `daily-nodes` | List live nodes from the current daily document in one step. | + + + +### Mutate + +Mutation commands for dry-run-first atomic live edits against the Mubu API. + +| Command | Description | +|---------|-------------| + +| `create-child` | Build or execute one child-node creation against the live Mubu API. | + +| `delete-node` | Build or execute one node deletion against the live Mubu API. | + +| `update-text` | Build or execute one text update against the live Mubu API. | + + + +### Session + +Session and state commands for current document/node context and local command history. + +| Command | Description | +|---------|-------------| + +| `status` | Show the current session state. | + +| `state-path` | Show the session state file path. | + +| `use-doc` | Persist the current document reference. | + +| `use-node` | Persist the current node reference. | + +| `use-daily` | Resolve and persist the current daily document reference. | + +| `clear-doc` | Clear the current document reference. | + +| `clear-node` | Clear the current node reference. | + +| `history` | Show recent command history stored in session state. | + + + +## Recommended Agent Workflow + +```text +discover daily-current --json + -> +inspect daily-nodes --query '' --json + -> +session use-doc '' + -> +mutate update-text / create-child / delete-node --json + -> +--execute only after payload inspection +``` + +## Safety Rules + +1. Prefer grouped commands for agent use; flat legacy commands remain for compatibility. +2. Use `--json` whenever an agent will parse the output. +3. Prefer `discover` or `inspect` commands before any `mutate` command. +4. Live mutations are dry-run by default and only execute with `--execute`. +5. Prefer `--node-id` and `--parent-node-id` over text matching. +6. `delete-node` removes the full targeted subtree. +7. Even same-text updates can still advance document version history. + +## Examples + + +### Interactive REPL Session + +Start an interactive session with persistent document and node context. + +```bash +cli-anything-mubu +# Enter commands interactively +# Use 'help' to see builtins +# Use session commands to persist current-doc/current-node +``` + + +### Discover Current Daily Note + +Resolve the current daily note and emit JSON output for an agent. + +```bash +cli-anything-mubu --json discover daily-current +``` + + +### Dry-Run Atomic Update + +Inspect the exact outgoing payload before a live mutation. + +```bash +cli-anything-mubu mutate update-text 'Workspace/Daily tasks/26.03.16' --node-id node-demo1 --text 'new text' --json +``` + + +## Session State + +The CLI maintains lightweight session state in JSON: + +- `current_doc` +- `current_node` +- local command history + +Use the `session` command group to inspect or update this state. + +## For AI Agents + +1. Start with `discover` or `inspect`, not `mutate`. +2. Use `session status --json` to recover persisted context. +3. Use grouped commands in generated prompts and automation. +4. Verify postconditions after any live mutation. +5. Read the package `TEST.md` and `README.md` when stricter operational detail is needed. + +## Version + +0.1.0 \ No newline at end of file diff --git a/mubu/agent-harness/cli_anything/mubu/tests/TEST.md b/mubu/agent-harness/cli_anything/mubu/tests/TEST.md new file mode 100644 index 000000000..6242b7014 --- /dev/null +++ b/mubu/agent-harness/cli_anything/mubu/tests/TEST.md @@ -0,0 +1,481 @@ +# Mubu Live Bridge Test Plan And Results + +This file follows the CLI-Anything habit of keeping the test plan and the executed results in one place. + +## Test Inventory Plan + +- `test_mubu_probe.py`: 26 unit / light integration tests planned +- `test_cli_entrypoint.py`: 13 subprocess / entrypoint tests planned +- `test_agent_harness.py`: 9 packaging / harness-layout tests planned +- `test_live_api.py`: 6 opt-in live-session tests planned for a later phase + +Current status: + +- `test_mubu_probe.py` exists and passes +- `test_cli_entrypoint.py` exists and passes +- `test_agent_harness.py` exists and passes +- canonical harness test modules now also exist under `agent-harness/cli_anything/mubu/tests/` +- `test_live_api.py` is not implemented yet because live mutation tests need explicit opt-in controls + +## Unit Test Plan + +### Module: `mubu_probe.py` + +Functions and behaviors covered now: + +- `extract_plain_text` + - HTML stripping + - segment-list flattening +- `load_latest_backups` + - newest snapshot selection per document +- `search_documents` + - text and note hit detection +- `parse_client_sync_line` + - `CHANGE` request parsing from sync logs +- `normalize_folder_record` + - parent/child refs and timestamps +- `normalize_document_meta_record` + - title/folder/timestamp normalization +- `extract_doc_links` + - Mubu mention link extraction +- `folder_documents` + - full folder path resolution + - ambiguous folder-name detection +- `resolve_document_reference` + - full document path resolution + - ambiguous title detection +- `show_document_by_reference` + - path-aware document open +- `looks_like_daily_title` + - daily-title detection and template exclusion +- `choose_current_daily_document` + - current daily selection logic +- `list_document_nodes` + - live-node flattening for agent targeting + - depth and query filtering +- `normalize_user_record` + - token/user normalization +- `latest_doc_member_context` + - newest member context selection +- `build_api_headers` + - desktop header shape +- `build_text_update_request` + - `/v3/api/colla/events` payload construction +- `node_path_to_api_path` + - conversion from simplified node paths to canonical API paths +- `build_create_child_request` + - create-event payload construction + +Edge cases covered now: + +- ambiguous folder names +- ambiguous document titles +- nested node paths +- query filtering on flattened nodes +- header normalization and request shape correctness +- insert-path expansion for child creation +- daily-title filtering and template exclusion + +Expected unit count: + +- 26 tests + +### Module: `test_cli_entrypoint.py` + +Behaviors covered now: + +- installed-or-module entrypoint resolution +- root help rendering +- REPL help rendering +- default no-arg REPL startup and clean exit +- default REPL banner includes the packaged canonical `SKILL.md` path +- REPL in-memory current-document state +- REPL persisted current-document state across processes +- REPL in-memory current-node state +- REPL persisted current-node state across processes +- REPL alias expansion for both `@doc` and `@node` +- persisted clear-doc behavior across processes +- persisted clear-node behavior across processes +- grouped `discover daily-current` respects the root `--json` flag +- `session status --json` exposes persisted state for agent recovery + +Expected subprocess count: + +- 13 tests + +### Module: `test_agent_harness.py` + +Behaviors covered now: + +- harness packaging files exist +- canonical source tree exists under `agent-harness/cli_anything/mubu/...` +- canonical test modules exist under `agent-harness/cli_anything/mubu/tests/...` +- harness `setup.py --name` reports the expected package name +- harness `setup.py --version` reports the expected version +- root `setup.py` targets the canonical `agent-harness` source tree +- both setup files declare the `click>=8.0` runtime dependency +- harness skill-generator assets exist +- harness skill generator can regenerate the packaged `SKILL.md` + +Expected packaging count: + +- 9 tests + +## E2E Test Plan + +These workflows are currently verified manually against the real local Mubu session instead of an automated live test file. The reason is safety: this bridge can mutate a real personal workspace, so execute-path automation should stay opt-in. + +Planned live scenarios: + +1. read recent documents from the local desktop profile +2. resolve `Workspace/Daily tasks` and identify the current daily note +3. enumerate live nodes inside the current daily note +4. dry-run a text update and inspect the exact outgoing payload +5. execute one same-text live update to validate auth/member/version wiring +6. re-fetch and verify `baseVersion` plus node text after mutation +7. dry-run one child creation to validate canonical create payload generation +8. resolve the current daily note in one step with a date-title-aware selector +9. enumerate live nodes from the current daily note in one step +10. dry-run one node deletion to validate canonical delete payload generation +11. execute a reversible scratch create-then-delete cycle to verify live cleanup + +What should be verified in later automated live tests: + +- active local auth can be loaded from the Mubu desktop profile +- `document/get` returns a live definition for the resolved document +- `daily-current` resolves the right daily note instead of templates or helper docs +- `daily-nodes` resolves the current daily note and returns live nodes in one pass +- `doc-nodes` returns stable node ids and paths +- `update-text --json` builds a correct dry-run payload +- `update-text --execute --json` returns success and verification data +- document version changes are observed after execution +- `create-child --json` builds a correct canonical `create` event payload +- `delete-node --json` builds a correct canonical `delete` event payload +- reversible scratch create/delete execution works end-to-end + +## Realistic Workflow Scenarios + +### Workflow 1: Daily Note Discovery + +- Simulates: Codex entering the same daily workspace the user is using +- Operations chained: + - `recent` + - `path-docs 'Workspace/Daily tasks'` +- Verified: + - folder path resolution + - correct daily-note document ids + - usable timestamps and recency data + +### Workflow 2: Inspect Before Mutate + +- Simulates: Codex locating the exact node to edit before sending any write +- Operations chained: + - `open-path 'Workspace/Daily tasks/26.03.16'` + - `doc-nodes 'Workspace/Daily tasks/26.03.16' --query 'ๆ—ฅๅฟ—ๆต'` +- Verified: + - live document lookup + - correct node id + - correct update-target path + +### Workflow 2.5: Current Daily Resolution + +- Simulates: Codex jumping directly to the user's current daily note +- Operations chained: + - `daily-current --json` +- Verified: + - date-like title filtering + - template exclusion + - latest-updated selection among daily-note candidates + +### Workflow 2.6: Current Daily Live Node Inspection + +- Simulates: Codex looking for an anchor inside today's daily note without manually resolving the path first +- Operations chained: + - `daily-nodes --query '...'` +- Verified: + - current daily-note resolution + - live document fetch + - node listing and query filtering in one step + +### Workflow 3: Atomic Text Update + +- Simulates: one safe, minimal live edit against the user's real workspace +- Operations chained: + - `update-text ... --json` + - `update-text ... --execute --json` + - live re-fetch verification +- Verified: + - auth loading + - member-context selection + - current `baseVersion` usage + - accepted `/v3/api/colla/events` payload + - visible post-write verification data + +### Workflow 4: Atomic Child Creation + +- Simulates: Codex adding one new child item under an existing outline node +- Operations chained: + - `doc-nodes ...` + - `create-child ... --json` +- Verified: + - parent node targeting + - child insertion index calculation + - canonical `children` path generation + - create-event payload shape + +### Workflow 5: Atomic Delete And Cleanup + +- Simulates: Codex removing one exact node after inspecting it or after a scratch verification create +- Operations chained: + - `delete-node ... --json` + - `create-child ... --execute --json` + - `delete-node ... --execute --json` +- Verified: + - parent id and delete index calculation + - canonical delete-event payload shape + - live create verification + - live delete verification + - post-delete absence of the scratch node + +## Test Results + +### Automated Unit Results + +Command: + +```bash +python3 -m unittest tests/test_mubu_probe.py tests/test_cli_entrypoint.py tests/test_agent_harness.py +``` + +Latest result: + +```text +................................................ +---------------------------------------------------------------------- +Ran 48 tests in 16.880s + +OK +``` + +### Syntax Verification + +Command: + +```bash +python3 -m py_compile mubu_probe.py cli_anything/mubu/mubu_cli.py cli_anything/mubu/__main__.py +python3 -m py_compile agent-harness/mubu_probe.py agent-harness/cli_anything/mubu/mubu_cli.py +python3 -m py_compile agent-harness/cli_anything/mubu/__main__.py agent-harness/setup.py +python3 -m py_compile tests/_canonical_loader.py tests/test_mubu_probe.py tests/test_cli_entrypoint.py tests/test_agent_harness.py +python3 -m py_compile agent-harness/cli_anything/mubu/tests/__init__.py +python3 -m py_compile agent-harness/cli_anything/mubu/tests/test_mubu_probe.py +python3 -m py_compile agent-harness/cli_anything/mubu/tests/test_cli_entrypoint.py +python3 -m py_compile agent-harness/cli_anything/mubu/tests/test_agent_harness.py +``` + +Latest result: + +- exit code `0` + +### Installed Entrypoint Verification + +Commands: + +```bash +.venv/bin/python -m pip install -e ./agent-harness +.venv/bin/python -m pip install -e . +.venv/bin/cli-anything-mubu --help +.venv/bin/cli-anything-mubu --json discover daily-current +.venv/bin/cli-anything-mubu session status --json +tmpdir=$(mktemp -d) +printf 'exit\n' | env CLI_ANYTHING_MUBU_STATE_DIR="$tmpdir" .venv/bin/cli-anything-mubu +``` + +Latest result: + +- both editable-install paths succeeded when run sequentially +- installed `--help` exposes grouped `discover` / `inspect` / `mutate` / `session` domains +- installed `discover daily-current` resolved the real daily note `Workspace/Daily tasks/26.03.16` +- installed `session status --json` returned persisted state successfully +- installed no-arg REPL started cleanly, displayed the packaged canonical skill path, and exited cleanly + +### Wheel Verification + +Commands: + +```bash +tmpdir=$(mktemp -d) +.venv/bin/python -m pip wheel --no-deps --wheel-dir "$tmpdir" ./agent-harness +unzip -l "$tmpdir"/cli_anything_mubu-0.1.0-py3-none-any.whl +``` + +Latest result: + +- wheel build succeeded +- wheel contains the packaged README, generated `skills/SKILL.md`, `tests/TEST.md`, canonical test modules, and `utils/repl_skin.py` + +Latest result: + +- pass + +### Install Verification + +Commands: + +```bash +.venv/bin/python -m pip install -e agent-harness +.venv/bin/python -m pip install -e +``` + +Latest result: + +- both editable installs passed + +### Installed Entrypoint Verification + +Commands: + +```bash +.venv/bin/cli-anything-mubu daily-current --json +printf 'exit\n' | env CLI_ANYTHING_MUBU_STATE_DIR="$(mktemp -d)" .venv/bin/cli-anything-mubu +``` + +Latest result: + +- installed `daily-current --json` passed against the real local Mubu session +- installed REPL banner pointed to `agent-harness/cli_anything/mubu/skills/SKILL.md` + +### Wheel Packaging Verification + +Command: + +```bash +.venv/bin/python -m pip wheel --no-deps --wheel-dir agent-harness +``` + +Latest result: + +- built successfully +- wheel contents include `mubu_probe.py`, `cli_anything/mubu/README.md`, `cli_anything/mubu/skills/SKILL.md`, `cli_anything/mubu/tests/TEST.md`, and `cli_anything/mubu/utils/repl_skin.py` + +### CLI Surface Verification + +Commands: + +```bash +python3 mubu_probe.py --help +python3 mubu_probe.py daily-current --help +python3 mubu_probe.py daily-nodes --help +python3 mubu_probe.py doc-nodes --help +python3 mubu_probe.py create-child --help +python3 mubu_probe.py delete-node --help +python3 mubu_probe.py update-text --help +``` + +Latest result: + +- pass +- command list now includes `daily-current`, `daily-nodes`, `doc-nodes`, `create-child`, and `delete-node` +- help for `daily-current`, `daily-nodes`, `update-text`, `doc-nodes`, `create-child`, and `delete-node` renders correctly + +### Installed Entrypoint Verification + +Commands: + +```bash +python3 -m venv .venv +.venv/bin/python -m pip install -e . +.venv/bin/cli-anything-mubu --help +.venv/bin/cli-anything-mubu repl --help +tmpdir=$(mktemp -d) && env CLI_ANYTHING_MUBU_STATE_DIR="$tmpdir" /usr/bin/zsh -lc "printf 'exit\n' | .venv/bin/cli-anything-mubu" +.venv/bin/cli-anything-mubu daily-current --json +.venv/bin/python -m pip install -e ./agent-harness +python3 agent-harness/setup.py --name +python3 agent-harness/setup.py --version +``` + +Latest result: + +- editable install succeeded in project-local `.venv` +- `cli-anything-mubu --help` renders wrapper + subcommand help +- `cli-anything-mubu repl --help` renders REPL help +- no-arg `cli-anything-mubu` enters the REPL, exposes app/skill/history banner context, and exits cleanly on `exit` +- REPL can store and report the current document reference during a session +- REPL can persist `current-doc` across independent processes when given the same state directory +- REPL can store and report the current node reference during a session +- REPL can persist `current-node` across independent processes when given the same state directory +- REPL can expand both `@doc` and `@node` into a real dry-run command +- installed console script can resolve the current daily note +- `agent-harness/` now works as a real editable-install root +- harness setup metadata reports the correct package identity + +### Real Local Session Checks + +Commands executed on the real machine: + +```bash +python3 mubu_probe.py path-docs 'Workspace/Daily tasks' --limit 5 --json +python3 mubu_probe.py daily-current --json +python3 mubu_probe.py daily-nodes --query 'ๆ—ฅๅฟ—ๆต' --json +python3 mubu_probe.py doc-nodes 'Workspace/Daily tasks/26.03.16' --query 'ๆ—ฅๅฟ—ๆต' --json +python3 mubu_probe.py create-child 'Workspace/Daily tasks/26.03.16' --parent-node-id node-demo1 --text 'CLI bridge dry run child' --note 'not executed' --json +python3 mubu_probe.py delete-node 'Workspace/Daily tasks/26.03.16' --node-id node-demo1 --json +python3 mubu_probe.py update-text 'Workspace/Daily tasks/26.03.16' --node-id node-demo1 --text 'ๆ—ฅๅฟ—ๆต' --json +python3 mubu_probe.py update-text 'Workspace/Daily tasks/26.03.16' --match-text 'ๆ—ฅๅฟ—ๆต' --text 'ๆ—ฅๅฟ—ๆต' --execute --json +python3 - <<'PY' +# create-child --execute scratch node, then delete-node --execute that exact node id +PY +``` + +Observed results: + +- `path-docs` resolved folder id `folder-daily-01` +- current daily doc resolved to `doc-demo-01` +- `daily-current` resolved the same current daily path `Workspace/Daily tasks/26.03.16` in one step +- `daily-nodes` resolved the same current daily note and returned live node `node-demo1` +- `doc-nodes` resolved node id `node-demo1`, path `["nodes", 3, 0]`, and api path `["nodes", 3, "children", 0]` +- `create-child` dry-run resolved parent `node-demo1`, child insert index `4`, and child path `["nodes", 3, "children", 0, "children", 4]` +- `delete-node` dry-run resolved parent `qv9klzkq2L`, delete index `0`, and api path `["nodes", 3, "children", 0]` +- dry-run update produced the expected `CHANGE` payload +- real execute returned success +- live document version advanced from `256` to `257` +- post-fetch verification confirmed the node text still read `ๆ—ฅๅฟ—ๆต` +- reversible scratch create/delete advanced live version from `261` to `262` to `263` +- scratch node `hUVCZEUf3R` was present after create and absent after delete + +## Summary Statistics + +- automated tests: 40 / 40 pass +- syntax check: pass +- help/CLI surface checks: pass +- isolated install / entrypoint checks: pass +- targeted real-session checks: pass + +## Coverage Notes + +Strong coverage: + +- local parsing and normalization logic +- path resolution +- live request header construction +- live text-update payload construction +- inspect-before-mutate node targeting +- canonical create-child payload construction +- canonical delete-node payload construction +- current-daily selection logic +- packaged entrypoint and default REPL behavior +- REPL persisted current-document context +- REPL persisted current-node context +- REPL skill-path/history banner context +- harness install-root metadata and install path + +Current gaps: + +- no automated live execute suite yet +- no rollback/undo tests yet +- no move primitive yet +- no direct `daily-open` shortcut yet + +Conclusion: + +- the current bridge is verified enough for careful interactive use by Codex +- it is not yet at full CLI-Anything packaged-harness maturity diff --git a/mubu/agent-harness/cli_anything/mubu/tests/__init__.py b/mubu/agent-harness/cli_anything/mubu/tests/__init__.py new file mode 100644 index 000000000..5493f2a2b --- /dev/null +++ b/mubu/agent-harness/cli_anything/mubu/tests/__init__.py @@ -0,0 +1 @@ +"""Canonical test package for cli-anything-mubu.""" diff --git a/mubu/agent-harness/cli_anything/mubu/tests/test_agent_harness.py b/mubu/agent-harness/cli_anything/mubu/tests/test_agent_harness.py new file mode 100644 index 000000000..9e1e9945c --- /dev/null +++ b/mubu/agent-harness/cli_anything/mubu/tests/test_agent_harness.py @@ -0,0 +1,134 @@ +import subprocess +import sys +import unittest +from pathlib import Path + + +SOFTWARE_ROOT = Path(__file__).resolve().parents[4] +HARNESS_ROOT = SOFTWARE_ROOT / "agent-harness" +STANDALONE_ROOT = SOFTWARE_ROOT if (SOFTWARE_ROOT / "setup.py").is_file() else None + + +def _find_contribution_root() -> Path: + candidates = [SOFTWARE_ROOT, *SOFTWARE_ROOT.parents] + for candidate in candidates: + if (candidate / "CONTRIBUTING.md").is_file() and (candidate / "registry.json").is_file(): + return candidate + raise AssertionError("unable to locate contribution root containing CONTRIBUTING.md and registry.json") + + +CONTRIBUTION_ROOT = _find_contribution_root() + + +class AgentHarnessPackagingTests(unittest.TestCase): + def test_agent_harness_packaging_files_exist(self): + self.assertTrue((HARNESS_ROOT / "setup.py").is_file()) + self.assertTrue((HARNESS_ROOT / "pyproject.toml").is_file()) + + def test_agent_harness_contains_canonical_package_tree(self): + expected_paths = [ + HARNESS_ROOT / "cli_anything" / "mubu" / "__init__.py", + HARNESS_ROOT / "cli_anything" / "mubu" / "__main__.py", + HARNESS_ROOT / "cli_anything" / "mubu" / "mubu_cli.py", + HARNESS_ROOT / "cli_anything" / "mubu" / "utils" / "__init__.py", + HARNESS_ROOT / "cli_anything" / "mubu" / "utils" / "repl_skin.py", + HARNESS_ROOT / "cli_anything" / "mubu" / "skills" / "SKILL.md", + HARNESS_ROOT / "cli_anything" / "mubu" / "tests" / "TEST.md", + ] + for path in expected_paths: + self.assertTrue(path.is_file(), msg=f"missing canonical harness file: {path}") + + def test_agent_harness_contains_canonical_test_modules(self): + expected_paths = [ + HARNESS_ROOT / "cli_anything" / "mubu" / "tests" / "test_mubu_probe.py", + HARNESS_ROOT / "cli_anything" / "mubu" / "tests" / "test_cli_entrypoint.py", + HARNESS_ROOT / "cli_anything" / "mubu" / "tests" / "test_agent_harness.py", + HARNESS_ROOT / "cli_anything" / "mubu" / "tests" / "test_core.py", + HARNESS_ROOT / "cli_anything" / "mubu" / "tests" / "test_full_e2e.py", + ] + for path in expected_paths: + self.assertTrue(path.is_file(), msg=f"missing canonical harness test: {path}") + + def test_contribution_files_exist(self): + self.assertTrue((CONTRIBUTION_ROOT / "CONTRIBUTING.md").is_file()) + self.assertTrue((CONTRIBUTION_ROOT / "registry.json").is_file()) + + def test_agent_harness_setup_reports_expected_name(self): + result = subprocess.run( + [sys.executable, str(HARNESS_ROOT / "setup.py"), "--name"], + cwd=HARNESS_ROOT, + capture_output=True, + text=True, + ) + self.assertEqual(result.returncode, 0, msg=result.stderr) + self.assertEqual(result.stdout.strip(), "cli-anything-mubu") + + def test_agent_harness_setup_reports_expected_version(self): + result = subprocess.run( + [sys.executable, str(HARNESS_ROOT / "setup.py"), "--version"], + cwd=HARNESS_ROOT, + capture_output=True, + text=True, + ) + self.assertEqual(result.returncode, 0, msg=result.stderr) + self.assertEqual(result.stdout.strip(), "0.1.0") + + def test_root_setup_targets_canonical_harness_source(self): + if STANDALONE_ROOT is None: + self.assertFalse((SOFTWARE_ROOT / "setup.py").exists()) + self.assertTrue((SOFTWARE_ROOT / "agent-harness" / "setup.py").is_file()) + return + setup_text = (STANDALONE_ROOT / "setup.py").read_text() + self.assertIn('find_namespace_packages(where="agent-harness"', setup_text) + self.assertIn('package_dir={"": "agent-harness"}', setup_text) + + def test_setup_files_declare_click_runtime_dependency(self): + harness_setup = (HARNESS_ROOT / "setup.py").read_text() + if STANDALONE_ROOT is not None: + root_setup = (STANDALONE_ROOT / "setup.py").read_text() + self.assertIn('"click>=8.0"', root_setup) + self.assertIn('"click>=8.0"', harness_setup) + + def test_skill_generator_assets_exist(self): + self.assertTrue((HARNESS_ROOT / "skill_generator.py").is_file()) + self.assertTrue((HARNESS_ROOT / "templates" / "SKILL.md.template").is_file()) + + def test_repl_skin_matches_cli_anything_copy_shape(self): + repl_skin = (HARNESS_ROOT / "cli_anything" / "mubu" / "utils" / "repl_skin.py").read_text() + self.assertIn('"""cli-anything REPL Skin โ€” Unified terminal interface for all CLI harnesses.', repl_skin) + self.assertIn("Copy this file into your CLI package at:", repl_skin) + self.assertIn("skin.print_goodbye()", repl_skin) + + def test_skill_generator_can_regenerate_skill_from_canonical_harness(self): + output_path = HARNESS_ROOT / "tmp-generated-SKILL.md" + try: + result = subprocess.run( + [ + sys.executable, + str(HARNESS_ROOT / "skill_generator.py"), + str(HARNESS_ROOT), + "--output", + str(output_path), + ], + cwd=HARNESS_ROOT, + capture_output=True, + text=True, + ) + self.assertEqual(result.returncode, 0, msg=result.stderr) + content = output_path.read_text() + self.assertIn('name: >-\n cli-anything-mubu', content) + self.assertIn("## Command Groups", content) + self.assertIn("### Discover", content) + self.assertNotIn("### Cli", content) + self.assertIn("| `docs` |", content) + self.assertIn("`daily-current`", content) + self.assertIn("`update-text`", content) + self.assertIn("### Session", content) + self.assertIn("| `status` |", content) + self.assertIn("| `state-path` |", content) + finally: + output_path.unlink(missing_ok=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/mubu/agent-harness/cli_anything/mubu/tests/test_cli_entrypoint.py b/mubu/agent-harness/cli_anything/mubu/tests/test_cli_entrypoint.py new file mode 100644 index 000000000..9bf3a7ea4 --- /dev/null +++ b/mubu/agent-harness/cli_anything/mubu/tests/test_cli_entrypoint.py @@ -0,0 +1,221 @@ +import os +import shutil +import subprocess +import sys +import tempfile +import unittest +from pathlib import Path + +from cli_anything.mubu.mubu_cli import expand_repl_aliases_with_state +from mubu_probe import DEFAULT_BACKUP_ROOT, DEFAULT_STORAGE_ROOT + + +REPO_ROOT = Path(__file__).resolve().parents[4] +SAMPLE_DOC_REF = "workspace/Daily tasks/2026.03.18" +SAMPLE_NODE_ID = "node-demo1" +HAS_LOCAL_DATA = DEFAULT_BACKUP_ROOT.is_dir() and DEFAULT_STORAGE_ROOT.is_dir() + + +def resolve_cli() -> list[str]: + installed = shutil.which("cli-anything-mubu") + if installed: + return [installed] + return [sys.executable, "-m", "cli_anything.mubu"] + + +class CliEntrypointTests(unittest.TestCase): + CLI_BASE = resolve_cli() + + def run_cli(self, args, input_text=None, extra_env=None): + env = os.environ.copy() + env["PYTHONPATH"] = str(REPO_ROOT) + os.pathsep + env.get("PYTHONPATH", "") + if extra_env: + env.update(extra_env) + return subprocess.run( + self.CLI_BASE + args, + input=input_text, + capture_output=True, + text=True, + env=env, + ) + + def test_help_renders_root_commands(self): + result = self.run_cli(["--help"]) + self.assertEqual(result.returncode, 0, msg=result.stderr) + self.assertIn("discover", result.stdout) + self.assertIn("inspect", result.stdout) + self.assertIn("mutate", result.stdout) + self.assertIn("session", result.stdout) + self.assertIn("daily-current", result.stdout) + self.assertIn("create-child", result.stdout) + self.assertIn("delete-node", result.stdout) + + def test_repl_help_renders(self): + result = self.run_cli(["repl", "--help"]) + self.assertEqual(result.returncode, 0, msg=result.stderr) + self.assertIn("Interactive REPL", result.stdout) + self.assertIn("use-node", result.stdout) + + def test_default_entrypoint_starts_repl_and_can_exit(self): + result = self.run_cli([], input_text="exit\n") + self.assertEqual(result.returncode, 0, msg=result.stderr) + self.assertIn("Mubu REPL", result.stdout) + + def test_default_entrypoint_banner_includes_skill_path(self): + with tempfile.TemporaryDirectory() as tmpdir: + result = self.run_cli( + [], + input_text="exit\n", + extra_env={"CLI_ANYTHING_MUBU_STATE_DIR": tmpdir}, + ) + self.assertEqual(result.returncode, 0, msg=result.stderr) + self.assertIn("Skill:", result.stdout) + self.assertIn( + str(REPO_ROOT / "agent-harness" / "cli_anything" / "mubu" / "skills" / "SKILL.md"), + result.stdout, + ) + + def test_repl_can_store_current_doc_reference(self): + result = self.run_cli( + [], + input_text=f"use-doc '{SAMPLE_DOC_REF}'\ncurrent-doc\nexit\n", + ) + self.assertEqual(result.returncode, 0, msg=result.stderr) + self.assertIn(f"Current doc: {SAMPLE_DOC_REF}", result.stdout) + + def test_repl_can_store_current_node_reference(self): + with tempfile.TemporaryDirectory() as tmpdir: + result = self.run_cli( + [], + input_text=f"use-node {SAMPLE_NODE_ID}\ncurrent-node\nexit\n", + extra_env={"CLI_ANYTHING_MUBU_STATE_DIR": tmpdir}, + ) + self.assertEqual(result.returncode, 0, msg=result.stderr) + self.assertIn(f"Current node: {SAMPLE_NODE_ID}", result.stdout) + + def test_repl_persists_current_doc_between_processes(self): + with tempfile.TemporaryDirectory() as tmpdir: + env = {"CLI_ANYTHING_MUBU_STATE_DIR": tmpdir} + + first = self.run_cli( + [], + input_text=f"use-doc '{SAMPLE_DOC_REF}'\nexit\n", + extra_env=env, + ) + self.assertEqual(first.returncode, 0, msg=first.stderr) + + second = self.run_cli( + [], + input_text="current-doc\nexit\n", + extra_env=env, + ) + self.assertEqual(second.returncode, 0, msg=second.stderr) + self.assertIn(f"Current doc: {SAMPLE_DOC_REF}", second.stdout) + + def test_repl_persists_current_node_between_processes(self): + with tempfile.TemporaryDirectory() as tmpdir: + env = {"CLI_ANYTHING_MUBU_STATE_DIR": tmpdir} + + first = self.run_cli( + [], + input_text=f"use-node {SAMPLE_NODE_ID}\nexit\n", + extra_env=env, + ) + self.assertEqual(first.returncode, 0, msg=first.stderr) + + second = self.run_cli( + [], + input_text="current-node\nexit\n", + extra_env=env, + ) + self.assertEqual(second.returncode, 0, msg=second.stderr) + self.assertIn(f"Current node: {SAMPLE_NODE_ID}", second.stdout) + + def test_repl_aliases_expand_current_doc_and_node(self): + expanded = expand_repl_aliases_with_state( + ["delete-node", "@doc", "--node-id", "@node", "--from", "@current"], + {"current_doc": SAMPLE_DOC_REF, "current_node": SAMPLE_NODE_ID}, + ) + self.assertEqual( + expanded, + ["delete-node", SAMPLE_DOC_REF, "--node-id", SAMPLE_NODE_ID, "--from", SAMPLE_DOC_REF], + ) + + def test_repl_clear_doc_persists_between_processes(self): + with tempfile.TemporaryDirectory() as tmpdir: + env = {"CLI_ANYTHING_MUBU_STATE_DIR": tmpdir} + + self.run_cli( + [], + input_text=f"use-doc '{SAMPLE_DOC_REF}'\nexit\n", + extra_env=env, + ) + + cleared = self.run_cli( + [], + input_text="clear-doc\nexit\n", + extra_env=env, + ) + self.assertEqual(cleared.returncode, 0, msg=cleared.stderr) + + final = self.run_cli( + [], + input_text="current-doc\nexit\n", + extra_env=env, + ) + self.assertEqual(final.returncode, 0, msg=final.stderr) + self.assertIn("Current doc: ", final.stdout) + + def test_repl_clear_node_persists_between_processes(self): + with tempfile.TemporaryDirectory() as tmpdir: + env = {"CLI_ANYTHING_MUBU_STATE_DIR": tmpdir} + + self.run_cli( + [], + input_text=f"use-node {SAMPLE_NODE_ID}\nexit\n", + extra_env=env, + ) + + cleared = self.run_cli( + [], + input_text="clear-node\nexit\n", + extra_env=env, + ) + self.assertEqual(cleared.returncode, 0, msg=cleared.stderr) + + final = self.run_cli( + [], + input_text="current-node\nexit\n", + extra_env=env, + ) + self.assertEqual(final.returncode, 0, msg=final.stderr) + self.assertIn("Current node: ", final.stdout) + + @unittest.skipUnless(HAS_LOCAL_DATA, "Mubu local data directories not found") + def test_grouped_discover_daily_current_supports_global_json_flag(self): + result = self.run_cli(["--json", "discover", "daily-current"]) + self.assertEqual(result.returncode, 0, msg=result.stderr) + self.assertIn('"doc_path"', result.stdout) + + def test_session_status_reports_json_state(self): + with tempfile.TemporaryDirectory() as tmpdir: + env = {"CLI_ANYTHING_MUBU_STATE_DIR": tmpdir} + self.run_cli( + ["session", "use-doc", SAMPLE_DOC_REF], + extra_env=env, + ) + self.run_cli( + ["session", "use-node", SAMPLE_NODE_ID], + extra_env=env, + ) + result = self.run_cli( + ["session", "status", "--json"], + extra_env=env, + ) + self.assertEqual(result.returncode, 0, msg=result.stderr) + self.assertIn(f'"current_doc": "{SAMPLE_DOC_REF}"', result.stdout) + self.assertIn(f'"current_node": "{SAMPLE_NODE_ID}"', result.stdout) + + +if __name__ == "__main__": + unittest.main() diff --git a/mubu/agent-harness/cli_anything/mubu/tests/test_core.py b/mubu/agent-harness/cli_anything/mubu/tests/test_core.py new file mode 100644 index 000000000..17443e9e9 --- /dev/null +++ b/mubu/agent-harness/cli_anything/mubu/tests/test_core.py @@ -0,0 +1,301 @@ +"""Core function contract tests for mubu_probe. + +Pure logic tests โ€” no I/O, no network, no live Mubu API. +Covers utility and transformation functions not already exercised by test_mubu_probe.py. +""" + +import json +import tempfile +import unittest +from pathlib import Path + +from mubu_probe import ( + ambiguous_error_message, + build_folder_indexes, + candidate_appdata_roots, + dedupe_latest_records, + default_mubu_data_root, + enrich_document_meta, + extract_plain_text, + generate_node_id, + infer_title, + iter_nodes, + looks_like_daily_title, + maybe_plain_text_to_html, + node_path_to_api_path, + normalize_document_meta_record, + normalize_folder_record, + normalized_lookup_key, + numeric_values, + parse_child_refs, + parse_event_timestamp_ms, + parse_revision_generation, + plain_text_to_html, + resolve_node_at_path, + rich_text_to_html, + serialize_node, + timestamp_ms_to_iso, +) + + +class PlainTextExtractionTests(unittest.TestCase): + def test_none_returns_empty(self): + self.assertEqual(extract_plain_text(None), "") + + def test_dict_with_text_key(self): + self.assertEqual(extract_plain_text({"text": "hello"}), "hello") + + def test_dict_without_text_key(self): + self.assertEqual(extract_plain_text({"foo": "bar"}), "") + + def test_nested_segment_list(self): + segments = [{"type": 1, "text": "A"}, {"type": 1, "text": "B"}] + self.assertEqual(extract_plain_text(segments), "AB") + + def test_html_entity_unescaping(self): + self.assertEqual(extract_plain_text("a&b"), "a&b") + + def test_zero_width_chars_removed(self): + self.assertEqual(extract_plain_text("\u200bhello\u200b"), "hello") + + +class HtmlConversionTests(unittest.TestCase): + def test_plain_text_to_html_wraps_in_span(self): + result = plain_text_to_html("hello world") + self.assertIn("hello world", result) + + def test_maybe_plain_text_to_html_always_wraps(self): + # maybe_plain_text_to_html wraps any input (including existing html) in a span + result = maybe_plain_text_to_html("plain text") + self.assertIn("", result) + self.assertIn("plain text", result) + + def test_rich_text_to_html_handles_segment_list(self): + segments = [{"type": 1, "text": "hello"}, {"type": 1, "text": " world"}] + result = rich_text_to_html(segments) + self.assertIn("hello", result) + self.assertIn("world", result) + + +class NodeIdGenerationTests(unittest.TestCase): + def test_generates_string_of_expected_length(self): + node_id = generate_node_id() + self.assertIsInstance(node_id, str) + self.assertEqual(len(node_id), 10) + + def test_generates_unique_ids(self): + ids = {generate_node_id() for _ in range(100)} + self.assertEqual(len(ids), 100) + + +class NodePathConversionTests(unittest.TestCase): + def test_single_level_path(self): + self.assertEqual(node_path_to_api_path(("nodes", 0)), ["nodes", 0]) + + def test_multi_level_path_inserts_children(self): + self.assertEqual( + node_path_to_api_path(("nodes", 1, 2, 3)), + ["nodes", 1, "children", 2, "children", 3], + ) + + +class NodeIterationTests(unittest.TestCase): + def test_iter_nodes_yields_all_nodes_depth_first(self): + data = { + "nodes": [ + { + "id": "a", + "text": "A", + "children": [ + {"id": "b", "text": "B", "children": []}, + ], + }, + {"id": "c", "text": "C", "children": []}, + ] + } + ids = [node["id"] for _, node in iter_nodes(data["nodes"])] + self.assertEqual(ids, ["a", "b", "c"]) + + def test_iter_nodes_provides_correct_paths(self): + data = { + "nodes": [ + { + "id": "a", + "children": [ + {"id": "b", "children": []}, + ], + }, + ] + } + paths = [("nodes", *path) for path, _ in iter_nodes(data["nodes"])] + self.assertEqual(paths, [("nodes", 0), ("nodes", 0, 0)]) + + +class ResolveNodeAtPathTests(unittest.TestCase): + def test_resolves_root_node(self): + data = {"nodes": [{"id": "root", "children": []}]} + node = resolve_node_at_path(data, ("nodes", 0)) + self.assertEqual(node["id"], "root") + + def test_resolves_nested_child(self): + data = { + "nodes": [ + { + "id": "root", + "children": [ + {"id": "child", "children": []}, + ], + } + ] + } + node = resolve_node_at_path(data, ("nodes", 0, 0)) + self.assertEqual(node["id"], "child") + + +class SerializeNodeTests(unittest.TestCase): + def test_serialize_node_flattens_text(self): + node = { + "id": "n1", + "text": "hello", + "note": "note", + "modified": 100, + "children": [], + } + result = serialize_node(node, depth=0) + self.assertEqual(result["id"], "n1") + self.assertEqual(result["text"], "hello") + self.assertEqual(result["note"], "note") + self.assertEqual(result["modified"], 100) + self.assertEqual(result["children"], []) + + +class FolderIndexTests(unittest.TestCase): + def test_build_folder_indexes_creates_by_id_and_folder_paths(self): + folders = [ + {"folder_id": "root", "name": "Root", "parent_id": "0"}, + {"folder_id": "child", "name": "Child", "parent_id": "root"}, + ] + by_id, folder_paths = build_folder_indexes(folders) + self.assertIn("root", by_id) + self.assertIn("child", by_id) + self.assertEqual(folder_paths.get("root"), "Root") + self.assertEqual(folder_paths.get("child"), "Root/Child") + + +class DailyTitleTests(unittest.TestCase): + def test_date_range_titles(self): + self.assertTrue(looks_like_daily_title("26.03.16")) + self.assertTrue(looks_like_daily_title("26.3.8-3.9")) + + def test_rejects_non_date_titles(self): + self.assertFalse(looks_like_daily_title("DDL่กจ")) + self.assertFalse(looks_like_daily_title("ๆจกๆฟๆ›ดๆ–ฐ")) + + def test_rejects_template_suffix(self): + self.assertFalse(looks_like_daily_title("26.2.22ๆจกๆฟๆ›ดๆ–ฐ")) + + +class NormalizationHelperTests(unittest.TestCase): + def test_parse_child_refs_handles_json_string(self): + raw = '[{"id":"a","type":"doc"},{"id":"b","type":"folder"}]' + refs = parse_child_refs(raw) + self.assertEqual(len(refs), 2) + self.assertEqual(refs[0]["id"], "a") + + def test_parse_child_refs_handles_list(self): + refs = parse_child_refs([{"id": "x"}]) + self.assertEqual(refs[0]["id"], "x") + + def test_parse_child_refs_handles_empty(self): + self.assertEqual(parse_child_refs(None), []) + self.assertEqual(parse_child_refs(""), []) + + def test_normalized_lookup_key(self): + self.assertEqual(normalized_lookup_key("Hello World"), "hello world") + + def test_numeric_values_extracts_ints(self): + raw = {"|e": 100, "|z": "200", "|m": None, "other": "abc"} + result = numeric_values(raw["|e"], raw["|z"], raw["|m"], raw["other"]) + self.assertEqual(result, [100]) + + def test_parse_revision_generation(self): + self.assertEqual(parse_revision_generation("2792-d896b5c6"), 2792) + self.assertEqual(parse_revision_generation("invalid"), 0) + self.assertEqual(parse_revision_generation(None), 0) + + +class TimestampConversionTests(unittest.TestCase): + def test_timestamp_ms_to_iso(self): + result = timestamp_ms_to_iso(1710000000000) + self.assertIsInstance(result, str) + # Timezone dependent; just check date is in March 2024 + self.assertIn("2024-03-", result) + + def test_parse_event_timestamp_ms(self): + result = parse_event_timestamp_ms("2026-03-17T17:18:40.006") + self.assertIsInstance(result, (int, float)) + self.assertGreater(result, 0) + + +class DefaultPathDiscoveryTests(unittest.TestCase): + def test_candidate_appdata_roots_prefers_explicit_environment(self): + env = { + "APPDATA": "/tmp/appdata", + "USERPROFILE": "/tmp/profile", + "USER": "alice", + } + candidates = candidate_appdata_roots(env=env, home=Path("/home/alice"), mount_root=Path("/tmp/users")) + self.assertEqual(candidates[0], Path("/tmp/appdata")) + self.assertIn(Path("/tmp/profile/AppData/Roaming"), candidates) + self.assertIn(Path("/tmp/users/alice/AppData/Roaming"), candidates) + + def test_default_mubu_data_root_uses_first_existing_candidate(self): + with tempfile.TemporaryDirectory() as tmpdir: + mount_root = Path(tmpdir) / "Users" + roaming = mount_root / "alice" / "AppData" / "Roaming" + roaming.mkdir(parents=True) + root = default_mubu_data_root(env={}, home=Path("/home/alice"), mount_root=mount_root) + self.assertEqual(root, roaming / "Mubu" / "mubu_app_data" / "mubu_data") + + +class DedupeLatestRecordsTests(unittest.TestCase): + def test_keeps_highest_revision(self): + records = [ + {"id": "a", "_rev": "1-abc"}, + {"id": "a", "_rev": "3-def"}, + {"id": "a", "_rev": "2-ghi"}, + {"id": "b", "_rev": "1-xyz"}, + ] + result = dedupe_latest_records(records) + by_id = {r["id"]: r for r in result} + self.assertEqual(len(result), 2) + self.assertEqual(by_id["a"]["_rev"], "3-def") + + +class AmbiguousErrorMessageTests(unittest.TestCase): + def test_formats_readable_message(self): + candidates = [ + {"path": "Workspace/Daily tasks"}, + {"path": "Archive/Daily tasks"}, + ] + msg = ambiguous_error_message("folder", "Daily tasks", candidates, "path") + self.assertIn("Daily tasks", msg) + self.assertIn("Workspace", msg) + self.assertIn("Archive", msg) + + +class EnrichDocumentMetaTests(unittest.TestCase): + def test_adds_folder_path(self): + meta = {"doc_id": "d1", "folder_id": "f1", "title": "Doc"} + folders = [ + {"folder_id": "root", "name": "Root", "parent_id": "0"}, + {"folder_id": "f1", "name": "Sub", "parent_id": "root"}, + ] + _, folder_paths = build_folder_indexes(folders) + enriched = enrich_document_meta(meta, folder_paths) + self.assertIn("Sub", enriched.get("folder_path", "")) + self.assertIn("Doc", enriched.get("doc_path", "")) + + +if __name__ == "__main__": + unittest.main() diff --git a/mubu/agent-harness/cli_anything/mubu/tests/test_full_e2e.py b/mubu/agent-harness/cli_anything/mubu/tests/test_full_e2e.py new file mode 100644 index 000000000..fd842dc68 --- /dev/null +++ b/mubu/agent-harness/cli_anything/mubu/tests/test_full_e2e.py @@ -0,0 +1,226 @@ +"""Full end-to-end tests for cli-anything-mubu. + +These tests invoke the CLI against real local Mubu desktop data. +They require the Mubu desktop app to have been used on this machine +so that backup, storage, and log directories exist. + +Tests are skipped automatically when local data directories are missing. +""" + +import json +import os +import shutil +import subprocess +import sys +import tempfile +import unittest +from pathlib import Path + + +REPO_ROOT = Path(__file__).resolve().parents[4] + +# Import mubu_probe defaults for path detection +sys.path.insert(0, str(REPO_ROOT / "agent-harness")) +try: + from mubu_probe import DEFAULT_BACKUP_ROOT, DEFAULT_LOG_ROOT, DEFAULT_STORAGE_ROOT +finally: + sys.path.pop(0) + +HAS_LOCAL_DATA = ( + DEFAULT_BACKUP_ROOT.is_dir() + and DEFAULT_STORAGE_ROOT.is_dir() +) + +SKIP_REASON = "Mubu local data directories not found" + + +def resolve_cli() -> list[str]: + installed = shutil.which("cli-anything-mubu") + if installed: + return [installed] + return [sys.executable, "-m", "cli_anything.mubu"] + + +@unittest.skipUnless(HAS_LOCAL_DATA, SKIP_REASON) +class DiscoverE2ETests(unittest.TestCase): + CLI_BASE = resolve_cli() + + def run_cli(self, args: list[str]) -> subprocess.CompletedProcess: + env = os.environ.copy() + env["PYTHONPATH"] = str(REPO_ROOT) + os.pathsep + env.get("PYTHONPATH", "") + return subprocess.run( + self.CLI_BASE + args, + capture_output=True, + text=True, + env=env, + timeout=30, + ) + + def test_docs_returns_json_list(self): + result = self.run_cli(["docs", "--limit", "3", "--json"]) + self.assertEqual(result.returncode, 0, msg=result.stderr) + data = json.loads(result.stdout) + self.assertIsInstance(data, list) + self.assertGreater(len(data), 0) + self.assertIn("doc_id", data[0]) + + def test_folders_returns_json_list(self): + result = self.run_cli(["folders", "--json"]) + self.assertEqual(result.returncode, 0, msg=result.stderr) + data = json.loads(result.stdout) + self.assertIsInstance(data, list) + self.assertGreater(len(data), 0) + self.assertIn("folder_id", data[0]) + + def test_recent_returns_json_list(self): + result = self.run_cli(["recent", "--limit", "3", "--json"]) + self.assertEqual(result.returncode, 0, msg=result.stderr) + data = json.loads(result.stdout) + self.assertIsInstance(data, list) + self.assertGreater(len(data), 0) + + def test_daily_current_returns_doc_path(self): + result = self.run_cli(["daily-current", "--json"]) + self.assertEqual(result.returncode, 0, msg=result.stderr) + data = json.loads(result.stdout) + # Response wraps document info in a nested structure + doc = data.get("document", data) + self.assertIn("doc_path", doc) + self.assertIn("Daily tasks", doc["doc_path"]) + + +@unittest.skipUnless(HAS_LOCAL_DATA, SKIP_REASON) +class InspectE2ETests(unittest.TestCase): + CLI_BASE = resolve_cli() + + def run_cli(self, args: list[str]) -> subprocess.CompletedProcess: + env = os.environ.copy() + env["PYTHONPATH"] = str(REPO_ROOT) + os.pathsep + env.get("PYTHONPATH", "") + return subprocess.run( + self.CLI_BASE + args, + capture_output=True, + text=True, + env=env, + timeout=30, + ) + + def test_search_finds_results(self): + result = self.run_cli(["search", "ๆ—ฅ", "--limit", "3", "--json"]) + self.assertEqual(result.returncode, 0, msg=result.stderr) + data = json.loads(result.stdout) + self.assertIsInstance(data, list) + + def test_daily_nodes_returns_node_list(self): + result = self.run_cli(["daily-nodes", "--json"]) + self.assertEqual(result.returncode, 0, msg=result.stderr) + data = json.loads(result.stdout) + self.assertIn("nodes", data) + self.assertIsInstance(data["nodes"], list) + + +@unittest.skipUnless(HAS_LOCAL_DATA, SKIP_REASON) +class SessionE2ETests(unittest.TestCase): + CLI_BASE = resolve_cli() + + def run_cli(self, args: list[str], input_text: str | None = None, extra_env: dict | None = None) -> subprocess.CompletedProcess: + env = os.environ.copy() + env["PYTHONPATH"] = str(REPO_ROOT) + os.pathsep + env.get("PYTHONPATH", "") + if extra_env: + env.update(extra_env) + return subprocess.run( + self.CLI_BASE + args, + input=input_text, + capture_output=True, + text=True, + env=env, + timeout=30, + ) + + def test_session_use_daily_sets_current_doc(self): + with tempfile.TemporaryDirectory() as tmpdir: + env = {"CLI_ANYTHING_MUBU_STATE_DIR": tmpdir} + self.run_cli(["session", "use-daily"], extra_env=env) + result = self.run_cli(["session", "status", "--json"], extra_env=env) + self.assertEqual(result.returncode, 0, msg=result.stderr) + data = json.loads(result.stdout) + self.assertIsNotNone(data.get("current_doc")) + self.assertIn("Daily tasks", data["current_doc"]) + + def test_repl_use_daily_then_daily_nodes(self): + with tempfile.TemporaryDirectory() as tmpdir: + env = {"CLI_ANYTHING_MUBU_STATE_DIR": tmpdir} + result = self.run_cli( + [], + input_text="use-daily\ndaily-nodes --json\nexit\n", + extra_env=env, + ) + self.assertEqual(result.returncode, 0, msg=result.stderr) + self.assertIn('"nodes"', result.stdout) + + +@unittest.skipUnless(HAS_LOCAL_DATA, SKIP_REASON) +class MutateDryRunE2ETests(unittest.TestCase): + """Test mutation commands in dry-run mode (no --execute).""" + + CLI_BASE = resolve_cli() + + def run_cli(self, args: list[str]) -> subprocess.CompletedProcess: + env = os.environ.copy() + env["PYTHONPATH"] = str(REPO_ROOT) + os.pathsep + env.get("PYTHONPATH", "") + return subprocess.run( + self.CLI_BASE + args, + capture_output=True, + text=True, + env=env, + timeout=30, + ) + + def _resolve_daily_node(self) -> tuple[str, str]: + """Helper: get a stable daily document reference and first node id.""" + result = self.run_cli(["daily-nodes", "--json"]) + data = json.loads(result.stdout) + doc = data.get("document", data) + doc_ref = doc.get("doc_id") or doc["doc_path"] + node_id = data["nodes"][0]["node_id"] + return doc_ref, node_id + + def test_update_text_dry_run(self): + doc_ref, node_id = self._resolve_daily_node() + result = self.run_cli([ + "update-text", doc_ref, + "--node-id", node_id, + "--text", "dry run test", + "--json", + ]) + self.assertEqual(result.returncode, 0, msg=result.stderr) + data = json.loads(result.stdout) + self.assertIn("request", data) + self.assertFalse(data.get("executed", False)) + + def test_create_child_dry_run(self): + doc_ref, node_id = self._resolve_daily_node() + result = self.run_cli([ + "create-child", doc_ref, + "--parent-node-id", node_id, + "--text", "dry run child", + "--json", + ]) + self.assertEqual(result.returncode, 0, msg=result.stderr) + data = json.loads(result.stdout) + self.assertIn("request", data) + self.assertFalse(data.get("executed", False)) + + def test_delete_node_dry_run(self): + doc_ref, node_id = self._resolve_daily_node() + result = self.run_cli([ + "delete-node", doc_ref, + "--node-id", node_id, + "--json", + ]) + self.assertEqual(result.returncode, 0, msg=result.stderr) + data = json.loads(result.stdout) + self.assertFalse(data.get("executed", False)) + + +if __name__ == "__main__": + unittest.main() diff --git a/mubu/agent-harness/cli_anything/mubu/tests/test_mubu_probe.py b/mubu/agent-harness/cli_anything/mubu/tests/test_mubu_probe.py new file mode 100644 index 000000000..097024a23 --- /dev/null +++ b/mubu/agent-harness/cli_anything/mubu/tests/test_mubu_probe.py @@ -0,0 +1,533 @@ +import json +import tempfile +import unittest +from pathlib import Path + +from mubu_probe import ( + build_api_headers, + build_create_child_request, + build_delete_node_request, + build_text_update_request, + choose_current_daily_document, + extract_doc_links, + extract_plain_text, + folder_documents, + latest_doc_member_context, + list_document_nodes, + load_latest_backups, + looks_like_daily_title, + node_path_to_api_path, + normalize_document_meta_record, + normalize_folder_record, + normalize_user_record, + parent_context_for_path, + parse_client_sync_line, + resolve_document_reference, + search_documents, + show_document_by_reference, +) + + +class ExtractPlainTextTests(unittest.TestCase): + def test_extract_plain_text_handles_html_and_segment_lists(self): + self.assertEqual(extract_plain_text("็ฎ€ๅކๅšไธ€ไธ‹"), "็ฎ€ๅކๅšไธ€ไธ‹") + self.assertEqual( + extract_plain_text( + [ + {"type": 1, "text": "็ฎ€ๅކ"}, + {"type": 1, "text": "ๆ›ดๆ–ฐ"}, + ] + ), + "็ฎ€ๅކๆ›ดๆ–ฐ", + ) + + +class BackupLoadingTests(unittest.TestCase): + def test_load_latest_backups_picks_newest_file_per_document(self): + with tempfile.TemporaryDirectory() as tmpdir: + root = Path(tmpdir) + doc_dir = root / "docA" + doc_dir.mkdir() + + older = doc_dir / "2026-03-01 10'00.json" + newer = doc_dir / "2026-03-01 11'00.json" + older.write_text(json.dumps({"nodes": [{"text": "ๆ—ง", "children": []}]})) + newer.write_text(json.dumps({"nodes": [{"text": "ๆ–ฐ", "children": []}]})) + + older.touch() + newer.touch() + + docs = load_latest_backups(root) + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["doc_id"], "docA") + self.assertTrue(docs[0]["backup_file"].endswith("11'00.json")) + self.assertEqual(docs[0]["title"], "ๆ–ฐ") + + +class SearchTests(unittest.TestCase): + def test_search_documents_finds_text_and_note(self): + docs = [ + { + "doc_id": "docA", + "backup_file": "/tmp/docA.json", + "title": "้กน็›ฎ่ฎกๅˆ’", + "data": { + "nodes": [ + { + "id": "n1", + "text": "็ฎ€ๅކๅšไธ€ไธ‹ๆ›ดๆ–ฐ", + "note": "ไปŠๅคฉๅค„็†", + "children": [], + } + ] + }, + } + ] + + hits = search_documents(docs, "็ฎ€ๅކ") + self.assertEqual(len(hits), 1) + self.assertEqual(hits[0]["doc_id"], "docA") + self.assertEqual(hits[0]["node_id"], "n1") + self.assertEqual(hits[0]["text"], "็ฎ€ๅކๅšไธ€ไธ‹ๆ›ดๆ–ฐ") + + +class ClientSyncParsingTests(unittest.TestCase): + def test_parse_client_sync_line_extracts_change_request(self): + line = ( + '[2026-03-17T17:18:40.006] [INFO] clientSync - Info: Net request 45715 ' + '{"pathname":"/v3/api/colla/events","data":{"memberId":"7992964417993318",' + '"type":"CHANGE","version":209,"documentId":"doc-demo-01","events":[{"name":"create"}]},' + '"method":"POST"}' + ) + + parsed = parse_client_sync_line(line) + self.assertIsNotNone(parsed) + self.assertEqual(parsed["timestamp"], "2026-03-17T17:18:40.006") + self.assertEqual(parsed["kind"], "change_request") + self.assertEqual(parsed["document_id"], "doc-demo-01") + self.assertEqual(parsed["event_type"], "CHANGE") + self.assertEqual(parsed["version"], 209) + + +class FolderNormalizationTests(unittest.TestCase): + def test_normalize_folder_record_extracts_parent_children_and_timestamps(self): + raw = { + "id": "folder-root-01", + "|o": "Workspace", + "|h": "0", + "|p": '[{"id":"doc-link-001","type":"doc"},{"id":"folder-daily-01","type":"folder"}]', + "|d": 1753841934779, + "|n": 1773313495971, + "|t": 1773313495971, + "|v": 1773313495971, + "_rev": "2792-d896b5c6a897c7c7b5e61487029f29ad", + } + + normalized = normalize_folder_record(raw) + self.assertEqual(normalized["folder_id"], "folder-root-01") + self.assertEqual(normalized["name"], "Workspace") + self.assertEqual(normalized["parent_id"], "0") + self.assertEqual(normalized["created_at"], 1753841934779) + self.assertEqual(normalized["updated_at"], 1773313495971) + self.assertEqual(normalized["children"][0]["id"], "doc-link-001") + self.assertEqual(normalized["children"][1]["type"], "folder") + + +class DocumentMetaNormalizationTests(unittest.TestCase): + def test_normalize_document_meta_record_extracts_folder_title_and_times(self): + raw = { + "id": "1kapleatfQ0", + "|h": "folder-daily-01", + "|n": "11.24", + "|e": 1763865805160, + "|z": 1764003928841, + "|B": 1764003934105, + "|m": 1764003934105, + "|j": 48, + "|d": "NewSyncApp", + "_rev": "915-ca5340b309a22ea63f8990f806765fbc", + } + + normalized = normalize_document_meta_record(raw) + self.assertEqual(normalized["doc_id"], "1kapleatfQ0") + self.assertEqual(normalized["folder_id"], "folder-daily-01") + self.assertEqual(normalized["title"], "11.24") + self.assertEqual(normalized["created_at"], 1763865805160) + self.assertEqual(normalized["updated_at"], 1764003934105) + self.assertEqual(normalized["word_count"], 48) + self.assertEqual(normalized["source"], "NewSyncApp") + + +class LinkExtractionTests(unittest.TestCase): + def test_extract_doc_links_finds_mubu_doc_mentions(self): + markup = ( + 'ๅ‚่€ƒ' + 'DDL่กจ(To Do List)' + ) + + links = extract_doc_links(markup) + self.assertEqual(len(links), 1) + self.assertEqual(links[0]["target_doc_id"], "doc-link-001") + self.assertEqual(links[0]["label"], "DDL่กจ(To Do List)") + + +class PathResolutionTests(unittest.TestCase): + def setUp(self): + self.folders = [ + {"folder_id": "rootA", "name": "Workspace", "parent_id": "0"}, + {"folder_id": "dailyA", "name": "Daily tasks", "parent_id": "rootA"}, + {"folder_id": "rootB", "name": "Archive", "parent_id": "0"}, + {"folder_id": "dailyB", "name": "Daily tasks", "parent_id": "rootB"}, + ] + self.document_metas = [ + {"doc_id": "docA", "folder_id": "dailyA", "title": "26.03.16", "updated_at": 20}, + {"doc_id": "docB", "folder_id": "dailyA", "title": "26.3.15", "updated_at": 10}, + {"doc_id": "docC", "folder_id": "dailyB", "title": "26.03.16", "updated_at": 30}, + ] + self.backups = [ + { + "doc_id": "docA", + "title": "26.03.16", + "backup_file": "/tmp/docA.json", + "modified_at": 123.0, + "data": {"viewType": "OUTLINE", "nodes": [{"id": "n1", "text": "today", "children": []}]}, + } + ] + + def test_folder_documents_supports_full_folder_path(self): + docs, folder, ambiguous = folder_documents(self.document_metas, self.folders, "Workspace/Daily tasks") + self.assertEqual(ambiguous, []) + self.assertEqual(folder["folder_id"], "dailyA") + self.assertEqual([doc["doc_id"] for doc in docs], ["docA", "docB"]) + self.assertEqual(docs[0]["doc_path"], "Workspace/Daily tasks/26.03.16") + + def test_folder_documents_detects_ambiguous_folder_name(self): + docs, folder, ambiguous = folder_documents(self.document_metas, self.folders, "Daily tasks") + self.assertEqual(docs, []) + self.assertIsNone(folder) + self.assertEqual(len(ambiguous), 2) + + def test_resolve_document_reference_supports_full_doc_path(self): + doc, ambiguous = resolve_document_reference(self.document_metas, self.folders, "Workspace/Daily tasks/26.03.16") + self.assertEqual(ambiguous, []) + self.assertEqual(doc["doc_id"], "docA") + self.assertEqual(doc["doc_path"], "Workspace/Daily tasks/26.03.16") + + def test_resolve_document_reference_detects_ambiguous_title(self): + doc, ambiguous = resolve_document_reference(self.document_metas, self.folders, "26.03.16") + self.assertIsNone(doc) + self.assertEqual(len(ambiguous), 2) + + def test_show_document_by_reference_uses_resolved_path(self): + payload, ambiguous = show_document_by_reference( + self.backups, + self.document_metas, + self.folders, + "Workspace/Daily tasks/26.03.16", + ) + self.assertEqual(ambiguous, []) + self.assertEqual(payload["doc_id"], "docA") + self.assertEqual(payload["title"], "26.03.16") + self.assertEqual(payload["folder_path"], "Workspace/Daily tasks") + self.assertEqual(payload["doc_path"], "Workspace/Daily tasks/26.03.16") + self.assertEqual(payload["nodes"][0]["text"], "today") + + +class DocumentNodeListingTests(unittest.TestCase): + def test_list_document_nodes_flattens_tree_for_agent_targeting(self): + data = { + "nodes": [ + { + "id": "root-1", + "text": "ๆ—ฅๅฟ—ๆต", + "note": "้กถๅฑ‚", + "modified": 10, + "children": [ + { + "id": "child-1", + "text": "็ฎ€ๅކๅšไธ€ไธ‹", + "note": "", + "modified": 20, + "children": [], + } + ], + } + ] + } + + nodes = list_document_nodes(data) + self.assertEqual(len(nodes), 2) + self.assertEqual(nodes[0]["node_id"], "root-1") + self.assertEqual(nodes[0]["path"], ["nodes", 0]) + self.assertEqual(nodes[0]["depth"], 0) + self.assertEqual(nodes[0]["text"], "ๆ—ฅๅฟ—ๆต") + self.assertEqual(nodes[1]["node_id"], "child-1") + self.assertEqual(nodes[1]["path"], ["nodes", 0, 0]) + self.assertEqual(nodes[1]["depth"], 1) + self.assertEqual(nodes[1]["text"], "็ฎ€ๅކๅšไธ€ไธ‹") + + def test_list_document_nodes_supports_query_and_max_depth(self): + data = { + "nodes": [ + { + "id": "root-1", + "text": "ๆ—ฅๅฟ—ๆต", + "note": "", + "modified": 10, + "children": [ + { + "id": "child-1", + "text": "็ฎ€ๅކๅšไธ€ไธ‹", + "note": "", + "modified": 20, + "children": [], + } + ], + } + ] + } + + only_root = list_document_nodes(data, max_depth=0) + self.assertEqual([item["node_id"] for item in only_root], ["root-1"]) + + queried = list_document_nodes(data, query="็ฎ€ๅކ") + self.assertEqual([item["node_id"] for item in queried], ["child-1"]) + + +class DailySelectionTests(unittest.TestCase): + def test_looks_like_daily_title_accepts_date_titles_and_rejects_templates(self): + self.assertTrue(looks_like_daily_title("26.03.16")) + self.assertTrue(looks_like_daily_title("26.3.8-3.9")) + self.assertFalse(looks_like_daily_title("DDL่กจ")) + self.assertFalse(looks_like_daily_title("26.2.22ๆจกๆฟๆ›ดๆ–ฐ")) + + def test_choose_current_daily_document_prefers_latest_date_titled_doc(self): + docs = [ + {"doc_id": "template", "title": "26.2.22ๆจกๆฟๆ›ดๆ–ฐ", "updated_at": 90}, + {"doc_id": "ddl", "title": "DDL่กจ", "updated_at": 100}, + {"doc_id": "today", "title": "26.03.16", "updated_at": 120}, + {"doc_id": "yesterday", "title": "26.3.15", "updated_at": 110}, + ] + + selected, candidates = choose_current_daily_document(docs) + self.assertEqual(selected["doc_id"], "today") + self.assertEqual([item["doc_id"] for item in candidates], ["today", "yesterday"]) + + def test_choose_current_daily_document_can_fallback_to_any_title(self): + docs = [ + {"doc_id": "ddl", "title": "DDL่กจ", "updated_at": 100}, + {"doc_id": "template", "title": "ๆจกๆฟๆ›ดๆ–ฐ", "updated_at": 90}, + ] + + selected, candidates = choose_current_daily_document(docs, allow_non_daily_titles=True) + self.assertEqual(selected["doc_id"], "ddl") + self.assertEqual([item["doc_id"] for item in candidates], ["ddl", "template"]) + + +class WritePathTests(unittest.TestCase): + def test_node_path_to_api_path_expands_child_hops(self): + self.assertEqual(node_path_to_api_path(("nodes", 3)), ["nodes", 3]) + self.assertEqual( + node_path_to_api_path(("nodes", 3, 0, 2)), + ["nodes", 3, "children", 0, "children", 2], + ) + + def test_normalize_user_record_extracts_auth_and_profile_fields(self): + raw = { + "id": 16166162, + "|u": "jwt-token-value", + "|i": "Example User", + "|n": "15500000000", + "|o": "https://document-image.mubu.com/photo/example.jpg", + "|w": "20270221", + "|h": 1773649029957, + "_rev": "1-abc", + } + + normalized = normalize_user_record(raw) + self.assertEqual(normalized["user_id"], "16166162") + self.assertEqual(normalized["token"], "jwt-token-value") + self.assertEqual(normalized["display_name"], "Example User") + self.assertEqual(normalized["phone"], "15500000000") + self.assertEqual(normalized["updated_at"], 1773649029957) + + def test_latest_doc_member_context_picks_most_recent_member_id(self): + events = [ + {"timestamp": "2026-03-17T17:18:40.006", "document_id": "doc-demo-01", "member_id": "old-member"}, + {"timestamp": "2026-03-17T18:32:48.609", "document_id": "other-doc", "member_id": "ignore-me"}, + {"timestamp": "2026-03-17T18:40:01.000", "document_id": "doc-demo-01", "member_id": "new-member"}, + ] + + context = latest_doc_member_context(events, "doc-demo-01") + self.assertEqual(context["member_id"], "new-member") + self.assertEqual(context["last_seen_at"], "2026-03-17T18:40:01.000") + + def test_build_api_headers_matches_desktop_shape(self): + user = {"user_id": "16166162", "token": "jwt-token-value"} + + headers = build_api_headers(user, platform_version="10.0.26100") + self.assertEqual(headers["mubu-desktop"], "true") + self.assertEqual(headers["platform"], "windows") + self.assertEqual(headers["platform-version"], "10.0.26100") + self.assertEqual(headers["User-Agent"], "windows Mubu Electron") + self.assertEqual(headers["userId"], "16166162") + self.assertEqual(headers["token"], "jwt-token-value") + self.assertEqual(headers["Content-Type"], "application/json;") + + def test_build_text_update_request_builds_server_side_change_payload(self): + node = { + "id": "node-1", + "text": [{"type": 1, "text": "็ฎ€ๅކๅšไธ€ไธ‹"}], + "modified": 1773739119771, + } + + request = build_text_update_request( + doc_id="doc-demo-01", + member_id="7992964417993318", + version=256, + node=node, + path=("nodes", 3, "children", 0), + new_text="็ฎ€ๅކๅšไธ€ไธ‹ๆ›ดๆ–ฐ", + modified_ms=1773744000000, + ) + + self.assertEqual(request["pathname"], "/v3/api/colla/events") + self.assertEqual(request["method"], "POST") + self.assertEqual(request["data"]["documentId"], "doc-demo-01") + self.assertEqual(request["data"]["memberId"], "7992964417993318") + self.assertEqual(request["data"]["version"], 256) + event = request["data"]["events"][0] + self.assertEqual(event["name"], "update") + updated = event["updated"][0] + self.assertEqual(updated["updated"]["id"], "node-1") + self.assertEqual(updated["updated"]["text"], "็ฎ€ๅކๅšไธ€ไธ‹ๆ›ดๆ–ฐ") + self.assertEqual(updated["updated"]["modified"], 1773744000000) + self.assertEqual(updated["original"]["text"], "็ฎ€ๅކๅšไธ€ไธ‹") + self.assertEqual(updated["path"], ["nodes", 3, "children", 0]) + + def test_build_create_child_request_builds_create_payload(self): + parent_node = { + "id": "node-demo1", + "children": [ + {"id": "child-0"}, + {"id": "child-1"}, + ], + } + + request = build_create_child_request( + doc_id="doc-demo-01", + member_id="7992964417993318", + version=257, + parent_node=parent_node, + parent_path=("nodes", 3, 0), + text="็ปง็ปญๆŽจ่ฟ› create-child", + note="ๅ…ˆ dry-run", + child_id="new-child-1", + modified_ms=1773748000000, + ) + + self.assertEqual(request["pathname"], "/v3/api/colla/events") + self.assertEqual(request["method"], "POST") + self.assertEqual(request["data"]["documentId"], "doc-demo-01") + self.assertEqual(request["data"]["memberId"], "7992964417993318") + self.assertEqual(request["data"]["version"], 257) + event = request["data"]["events"][0] + self.assertEqual(event["name"], "create") + created = event["created"][0] + self.assertEqual(created["index"], 2) + self.assertEqual(created["parentId"], "node-demo1") + self.assertEqual( + created["path"], + ["nodes", 3, "children", 0, "children", 2], + ) + self.assertEqual(created["node"]["id"], "new-child-1") + self.assertEqual(created["node"]["taskStatus"], 0) + self.assertEqual(created["node"]["text"], "็ปง็ปญๆŽจ่ฟ› create-child") + self.assertEqual(created["node"]["note"], "ๅ…ˆ dry-run") + self.assertEqual(created["node"]["modified"], 1773748000000) + self.assertEqual(created["node"]["children"], []) + self.assertTrue(created["node"]["forceUpdate"]) + + def test_parent_context_for_nested_node_path_returns_parent_and_index(self): + data = { + "nodes": [ + { + "id": "root-1", + "children": [ + { + "id": "child-1", + "children": [ + { + "id": "leaf-1", + "children": [], + } + ], + } + ], + } + ] + } + + parent_node, parent_path, index = parent_context_for_path(data, ("nodes", 0, 0, 0)) + self.assertEqual(parent_node["id"], "child-1") + self.assertEqual(parent_path, ("nodes", 0, 0)) + self.assertEqual(index, 0) + + def test_parent_context_for_root_node_path_returns_none_parent(self): + data = { + "nodes": [ + { + "id": "root-1", + "children": [], + } + ] + } + + parent_node, parent_path, index = parent_context_for_path(data, ("nodes", 0)) + self.assertIsNone(parent_node) + self.assertIsNone(parent_path) + self.assertEqual(index, 0) + + def test_build_delete_node_request_builds_delete_payload(self): + node = { + "id": "child-2", + "modified": 1773757000000, + "text": "ไธดๆ—ถๅˆ ้™ค่Š‚็‚น", + "note": "delete dry-run", + "children": [], + } + parent_node = { + "id": "node-demo1", + } + + request = build_delete_node_request( + doc_id="doc-demo-01", + member_id="7992964417993318", + version=258, + node=node, + path=("nodes", 3, 0, 2), + parent_node=parent_node, + ) + + self.assertEqual(request["pathname"], "/v3/api/colla/events") + self.assertEqual(request["method"], "POST") + self.assertEqual(request["data"]["documentId"], "doc-demo-01") + self.assertEqual(request["data"]["memberId"], "7992964417993318") + self.assertEqual(request["data"]["version"], 258) + event = request["data"]["events"][0] + self.assertEqual(event["name"], "delete") + deleted = event["deleted"][0] + self.assertEqual(deleted["parentId"], "node-demo1") + self.assertEqual(deleted["index"], 2) + self.assertEqual( + deleted["path"], + ["nodes", 3, "children", 0, "children", 2], + ) + self.assertEqual(deleted["node"]["id"], "child-2") + self.assertEqual(deleted["node"]["text"], "ไธดๆ—ถๅˆ ้™ค่Š‚็‚น") + self.assertEqual(deleted["node"]["note"], "delete dry-run") + + +if __name__ == "__main__": + unittest.main() diff --git a/mubu/agent-harness/cli_anything/mubu/utils/__init__.py b/mubu/agent-harness/cli_anything/mubu/utils/__init__.py new file mode 100644 index 000000000..bba9e2252 --- /dev/null +++ b/mubu/agent-harness/cli_anything/mubu/utils/__init__.py @@ -0,0 +1,3 @@ +from cli_anything.mubu.utils.repl_skin import ReplSkin + +__all__ = ["ReplSkin"] diff --git a/mubu/agent-harness/cli_anything/mubu/utils/repl_skin.py b/mubu/agent-harness/cli_anything/mubu/utils/repl_skin.py new file mode 100644 index 000000000..c7312348a --- /dev/null +++ b/mubu/agent-harness/cli_anything/mubu/utils/repl_skin.py @@ -0,0 +1,521 @@ +"""cli-anything REPL Skin โ€” Unified terminal interface for all CLI harnesses. + +Copy this file into your CLI package at: + cli_anything//utils/repl_skin.py + +Usage: + from cli_anything..utils.repl_skin import ReplSkin + + skin = ReplSkin("shotcut", version="1.0.0") + skin.print_banner() # auto-detects skills/SKILL.md inside the package + prompt_text = skin.prompt(project_name="my_video.mlt", modified=True) + skin.success("Project saved") + skin.error("File not found") + skin.warning("Unsaved changes") + skin.info("Processing 24 clips...") + skin.status("Track 1", "3 clips, 00:02:30") + skin.table(headers, rows) + skin.print_goodbye() +""" + +import os +import sys + +# โ”€โ”€ ANSI color codes (no external deps for core styling) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + +_RESET = "\033[0m" +_BOLD = "\033[1m" +_DIM = "\033[2m" +_ITALIC = "\033[3m" +_UNDERLINE = "\033[4m" + +# Brand colors +_CYAN = "\033[38;5;80m" # cli-anything brand cyan +_CYAN_BG = "\033[48;5;80m" +_WHITE = "\033[97m" +_GRAY = "\033[38;5;245m" +_DARK_GRAY = "\033[38;5;240m" +_LIGHT_GRAY = "\033[38;5;250m" + +# Software accent colors โ€” each software gets a unique accent +_ACCENT_COLORS = { + "gimp": "\033[38;5;214m", # warm orange + "blender": "\033[38;5;208m", # deep orange + "inkscape": "\033[38;5;39m", # bright blue + "audacity": "\033[38;5;33m", # navy blue + "libreoffice": "\033[38;5;40m", # green + "obs_studio": "\033[38;5;55m", # purple + "kdenlive": "\033[38;5;69m", # slate blue + "shotcut": "\033[38;5;35m", # teal green +} +_DEFAULT_ACCENT = "\033[38;5;75m" # default sky blue + +# Status colors +_GREEN = "\033[38;5;78m" +_YELLOW = "\033[38;5;220m" +_RED = "\033[38;5;196m" +_BLUE = "\033[38;5;75m" +_MAGENTA = "\033[38;5;176m" + +# โ”€โ”€ Brand icon โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + +# The cli-anything icon: a small colored diamond/chevron mark +_ICON = f"{_CYAN}{_BOLD}โ—†{_RESET}" +_ICON_SMALL = f"{_CYAN}โ–ธ{_RESET}" + +# โ”€โ”€ Box drawing characters โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + +_H_LINE = "โ”€" +_V_LINE = "โ”‚" +_TL = "โ•ญ" +_TR = "โ•ฎ" +_BL = "โ•ฐ" +_BR = "โ•ฏ" +_T_DOWN = "โ”ฌ" +_T_UP = "โ”ด" +_T_RIGHT = "โ”œ" +_T_LEFT = "โ”ค" +_CROSS = "โ”ผ" + + +def _strip_ansi(text: str) -> str: + """Remove ANSI escape codes for length calculation.""" + import re + return re.sub(r"\033\[[^m]*m", "", text) + + +def _visible_len(text: str) -> int: + """Get visible length of text (excluding ANSI codes).""" + return len(_strip_ansi(text)) + + +class ReplSkin: + """Unified REPL skin for cli-anything CLIs. + + Provides consistent branding, prompts, and message formatting + across all CLI harnesses built with the cli-anything methodology. + """ + + def __init__(self, software: str, version: str = "1.0.0", + history_file: str | None = None, skill_path: str | None = None): + """Initialize the REPL skin. + + Args: + software: Software name (e.g., "gimp", "shotcut", "blender"). + version: CLI version string. + history_file: Path for persistent command history. + Defaults to ~/.cli-anything-/history + skill_path: Path to the SKILL.md file for agent discovery. + Auto-detected from the package's skills/ directory if not provided. + Displayed in banner for AI agents to know where to read skill info. + """ + self.software = software.lower().replace("-", "_") + self.display_name = software.replace("_", " ").title() + self.version = version + + # Auto-detect skill path from package layout: + # cli_anything//utils/repl_skin.py (this file) + # cli_anything//skills/SKILL.md (target) + if skill_path is None: + from pathlib import Path + _auto = Path(__file__).resolve().parent.parent / "skills" / "SKILL.md" + if _auto.is_file(): + skill_path = str(_auto) + self.skill_path = skill_path + self.accent = _ACCENT_COLORS.get(self.software, _DEFAULT_ACCENT) + + # History file + if history_file is None: + from pathlib import Path + hist_dir = Path.home() / f".cli-anything-{self.software}" + hist_dir.mkdir(parents=True, exist_ok=True) + self.history_file = str(hist_dir / "history") + else: + self.history_file = history_file + + # Detect terminal capabilities + self._color = self._detect_color_support() + + def _detect_color_support(self) -> bool: + """Check if terminal supports color.""" + if os.environ.get("NO_COLOR"): + return False + if os.environ.get("CLI_ANYTHING_NO_COLOR"): + return False + if not hasattr(sys.stdout, "isatty"): + return False + return sys.stdout.isatty() + + def _c(self, code: str, text: str) -> str: + """Apply color code if colors are supported.""" + if not self._color: + return text + return f"{code}{text}{_RESET}" + + # โ”€โ”€ Banner โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + + def print_banner(self): + """Print the startup banner with branding.""" + inner = 54 + + def _box_line(content: str) -> str: + """Wrap content in box drawing, padding to inner width.""" + pad = inner - _visible_len(content) + vl = self._c(_DARK_GRAY, _V_LINE) + return f"{vl}{content}{' ' * max(0, pad)}{vl}" + + top = self._c(_DARK_GRAY, f"{_TL}{_H_LINE * inner}{_TR}") + bot = self._c(_DARK_GRAY, f"{_BL}{_H_LINE * inner}{_BR}") + + # Title: โ—† cli-anything ยท Shotcut + icon = self._c(_CYAN + _BOLD, "โ—†") + brand = self._c(_CYAN + _BOLD, "cli-anything") + dot = self._c(_DARK_GRAY, "ยท") + name = self._c(self.accent + _BOLD, self.display_name) + title = f" {icon} {brand} {dot} {name}" + + ver = f" {self._c(_DARK_GRAY, f' v{self.version}')}" + tip = f" {self._c(_DARK_GRAY, ' Type help for commands, quit to exit')}" + empty = "" + + # Skill path for agent discovery + skill_line = None + if self.skill_path: + skill_icon = self._c(_MAGENTA, "โ—‡") + skill_label = self._c(_DARK_GRAY, " Skill:") + skill_path_display = self._c(_LIGHT_GRAY, self.skill_path) + skill_line = f" {skill_icon} {skill_label} {skill_path_display}" + + print(top) + print(_box_line(title)) + print(_box_line(ver)) + if skill_line: + print(_box_line(skill_line)) + print(_box_line(empty)) + print(_box_line(tip)) + print(bot) + print() + + # โ”€โ”€ Prompt โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + + def prompt(self, project_name: str = "", modified: bool = False, + context: str = "") -> str: + """Build a styled prompt string for prompt_toolkit or input(). + + Args: + project_name: Current project name (empty if none open). + modified: Whether the project has unsaved changes. + context: Optional extra context to show in prompt. + + Returns: + Formatted prompt string. + """ + parts = [] + + # Icon + if self._color: + parts.append(f"{_CYAN}โ—†{_RESET} ") + else: + parts.append("> ") + + # Software name + parts.append(self._c(self.accent + _BOLD, self.software)) + + # Project context + if project_name or context: + ctx = context or project_name + mod = "*" if modified else "" + parts.append(f" {self._c(_DARK_GRAY, '[')}") + parts.append(self._c(_LIGHT_GRAY, f"{ctx}{mod}")) + parts.append(self._c(_DARK_GRAY, ']')) + + parts.append(self._c(_GRAY, " โฏ ")) + + return "".join(parts) + + def prompt_tokens(self, project_name: str = "", modified: bool = False, + context: str = ""): + """Build prompt_toolkit formatted text tokens for the prompt. + + Use with prompt_toolkit's FormattedText for proper ANSI handling. + + Returns: + list of (style, text) tuples for prompt_toolkit. + """ + accent_hex = _ANSI_256_TO_HEX.get(self.accent, "#5fafff") + tokens = [] + + tokens.append(("class:icon", "โ—† ")) + tokens.append(("class:software", self.software)) + + if project_name or context: + ctx = context or project_name + mod = "*" if modified else "" + tokens.append(("class:bracket", " [")) + tokens.append(("class:context", f"{ctx}{mod}")) + tokens.append(("class:bracket", "]")) + + tokens.append(("class:arrow", " โฏ ")) + + return tokens + + def get_prompt_style(self): + """Get a prompt_toolkit Style object matching the skin. + + Returns: + prompt_toolkit.styles.Style + """ + try: + from prompt_toolkit.styles import Style + except ImportError: + return None + + accent_hex = _ANSI_256_TO_HEX.get(self.accent, "#5fafff") + + return Style.from_dict({ + "icon": "#5fdfdf bold", # cyan brand color + "software": f"{accent_hex} bold", + "bracket": "#585858", + "context": "#bcbcbc", + "arrow": "#808080", + # Completion menu + "completion-menu.completion": "bg:#303030 #bcbcbc", + "completion-menu.completion.current": f"bg:{accent_hex} #000000", + "completion-menu.meta.completion": "bg:#303030 #808080", + "completion-menu.meta.completion.current": f"bg:{accent_hex} #000000", + # Auto-suggest + "auto-suggest": "#585858", + # Bottom toolbar + "bottom-toolbar": "bg:#1c1c1c #808080", + "bottom-toolbar.text": "#808080", + }) + + # โ”€โ”€ Messages โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + + def success(self, message: str): + """Print a success message with green checkmark.""" + icon = self._c(_GREEN + _BOLD, "โœ“") + print(f" {icon} {self._c(_GREEN, message)}") + + def error(self, message: str): + """Print an error message with red cross.""" + icon = self._c(_RED + _BOLD, "โœ—") + print(f" {icon} {self._c(_RED, message)}", file=sys.stderr) + + def warning(self, message: str): + """Print a warning message with yellow triangle.""" + icon = self._c(_YELLOW + _BOLD, "โš ") + print(f" {icon} {self._c(_YELLOW, message)}") + + def info(self, message: str): + """Print an info message with blue dot.""" + icon = self._c(_BLUE, "โ—") + print(f" {icon} {self._c(_LIGHT_GRAY, message)}") + + def hint(self, message: str): + """Print a subtle hint message.""" + print(f" {self._c(_DARK_GRAY, message)}") + + def section(self, title: str): + """Print a section header.""" + print() + print(f" {self._c(self.accent + _BOLD, title)}") + print(f" {self._c(_DARK_GRAY, _H_LINE * len(title))}") + + # โ”€โ”€ Status display โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + + def status(self, label: str, value: str): + """Print a key-value status line.""" + lbl = self._c(_GRAY, f" {label}:") + val = self._c(_WHITE, f" {value}") + print(f"{lbl}{val}") + + def status_block(self, items: dict[str, str], title: str = ""): + """Print a block of status key-value pairs. + + Args: + items: Dict of label -> value pairs. + title: Optional title for the block. + """ + if title: + self.section(title) + + max_key = max(len(k) for k in items) if items else 0 + for label, value in items.items(): + lbl = self._c(_GRAY, f" {label:<{max_key}}") + val = self._c(_WHITE, f" {value}") + print(f"{lbl}{val}") + + def progress(self, current: int, total: int, label: str = ""): + """Print a simple progress indicator. + + Args: + current: Current step number. + total: Total number of steps. + label: Optional label for the progress. + """ + pct = int(current / total * 100) if total > 0 else 0 + bar_width = 20 + filled = int(bar_width * current / total) if total > 0 else 0 + bar = "โ–ˆ" * filled + "โ–‘" * (bar_width - filled) + text = f" {self._c(_CYAN, bar)} {self._c(_GRAY, f'{pct:3d}%')}" + if label: + text += f" {self._c(_LIGHT_GRAY, label)}" + print(text) + + # โ”€โ”€ Table display โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + + def table(self, headers: list[str], rows: list[list[str]], + max_col_width: int = 40): + """Print a formatted table with box-drawing characters. + + Args: + headers: Column header strings. + rows: List of rows, each a list of cell strings. + max_col_width: Maximum column width before truncation. + """ + if not headers: + return + + # Calculate column widths + col_widths = [min(len(h), max_col_width) for h in headers] + for row in rows: + for i, cell in enumerate(row): + if i < len(col_widths): + col_widths[i] = min( + max(col_widths[i], len(str(cell))), max_col_width + ) + + def pad(text: str, width: int) -> str: + t = str(text)[:width] + return t + " " * (width - len(t)) + + # Header + header_cells = [ + self._c(_CYAN + _BOLD, pad(h, col_widths[i])) + for i, h in enumerate(headers) + ] + sep = self._c(_DARK_GRAY, f" {_V_LINE} ") + header_line = f" {sep.join(header_cells)}" + print(header_line) + + # Separator + sep_parts = [self._c(_DARK_GRAY, _H_LINE * w) for w in col_widths] + sep_line = self._c(_DARK_GRAY, f" {'โ”€โ”€โ”€'.join([_H_LINE * w for w in col_widths])}") + print(sep_line) + + # Rows + for row in rows: + cells = [] + for i, cell in enumerate(row): + if i < len(col_widths): + cells.append(self._c(_LIGHT_GRAY, pad(str(cell), col_widths[i]))) + row_sep = self._c(_DARK_GRAY, f" {_V_LINE} ") + print(f" {row_sep.join(cells)}") + + # โ”€โ”€ Help display โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + + def help(self, commands: dict[str, str]): + """Print a formatted help listing. + + Args: + commands: Dict of command -> description pairs. + """ + self.section("Commands") + max_cmd = max(len(c) for c in commands) if commands else 0 + for cmd, desc in commands.items(): + cmd_styled = self._c(self.accent, f" {cmd:<{max_cmd}}") + desc_styled = self._c(_GRAY, f" {desc}") + print(f"{cmd_styled}{desc_styled}") + print() + + # โ”€โ”€ Goodbye โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + + def print_goodbye(self): + """Print a styled goodbye message.""" + print(f"\n {_ICON_SMALL} {self._c(_GRAY, 'Goodbye!')}\n") + + # โ”€โ”€ Prompt toolkit session factory โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + + def create_prompt_session(self): + """Create a prompt_toolkit PromptSession with skin styling. + + Returns: + A configured PromptSession, or None if prompt_toolkit unavailable. + """ + try: + from prompt_toolkit import PromptSession + from prompt_toolkit.history import FileHistory + from prompt_toolkit.auto_suggest import AutoSuggestFromHistory + from prompt_toolkit.formatted_text import FormattedText + + style = self.get_prompt_style() + + session = PromptSession( + history=FileHistory(self.history_file), + auto_suggest=AutoSuggestFromHistory(), + style=style, + enable_history_search=True, + ) + return session + except ImportError: + return None + + def get_input(self, pt_session, project_name: str = "", + modified: bool = False, context: str = "") -> str: + """Get input from user using prompt_toolkit or fallback. + + Args: + pt_session: A prompt_toolkit PromptSession (or None). + project_name: Current project name. + modified: Whether project has unsaved changes. + context: Optional context string. + + Returns: + User input string (stripped). + """ + if pt_session is not None: + from prompt_toolkit.formatted_text import FormattedText + tokens = self.prompt_tokens(project_name, modified, context) + return pt_session.prompt(FormattedText(tokens)).strip() + else: + raw_prompt = self.prompt(project_name, modified, context) + return input(raw_prompt).strip() + + # โ”€โ”€ Toolbar builder โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + + def bottom_toolbar(self, items: dict[str, str]): + """Create a bottom toolbar callback for prompt_toolkit. + + Args: + items: Dict of label -> value pairs to show in toolbar. + + Returns: + A callable that returns FormattedText for the toolbar. + """ + def toolbar(): + from prompt_toolkit.formatted_text import FormattedText + parts = [] + for i, (k, v) in enumerate(items.items()): + if i > 0: + parts.append(("class:bottom-toolbar.text", " โ”‚ ")) + parts.append(("class:bottom-toolbar.text", f" {k}: ")) + parts.append(("class:bottom-toolbar", v)) + return FormattedText(parts) + return toolbar + + +# โ”€โ”€ ANSI 256-color to hex mapping (for prompt_toolkit styles) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + +_ANSI_256_TO_HEX = { + "\033[38;5;33m": "#0087ff", # audacity navy blue + "\033[38;5;35m": "#00af5f", # shotcut teal + "\033[38;5;39m": "#00afff", # inkscape bright blue + "\033[38;5;40m": "#00d700", # libreoffice green + "\033[38;5;55m": "#5f00af", # obs purple + "\033[38;5;69m": "#5f87ff", # kdenlive slate blue + "\033[38;5;75m": "#5fafff", # default sky blue + "\033[38;5;80m": "#5fd7d7", # brand cyan + "\033[38;5;208m": "#ff8700", # blender deep orange + "\033[38;5;214m": "#ffaf00", # gimp warm orange +} diff --git a/mubu/agent-harness/mubu_probe.py b/mubu/agent-harness/mubu_probe.py new file mode 100644 index 000000000..c5bb84ee2 --- /dev/null +++ b/mubu/agent-harness/mubu_probe.py @@ -0,0 +1,2145 @@ +#!/usr/bin/env python3 + +from __future__ import annotations + +import argparse +import copy +import gzip +import html +import json +import os +import re +import secrets +import string +import sys +from datetime import datetime, timezone +from json import JSONDecoder +from pathlib import Path +from typing import Any, Iterable, Mapping +from urllib.error import HTTPError, URLError +from urllib.request import Request, urlopen + + +def candidate_appdata_roots( + env: Mapping[str, str] | None = None, + home: Path | None = None, + mount_root: Path = Path("/mnt/c/Users"), +) -> list[Path]: + env = env or os.environ + home = home or Path.home() + candidates: list[Path] = [] + + def add(path: str | Path | None) -> None: + if not path: + return + candidate = Path(path).expanduser() + if candidate not in candidates: + candidates.append(candidate) + + add(env.get("APPDATA")) + userprofile = env.get("USERPROFILE") + if userprofile: + add(Path(userprofile) / "AppData" / "Roaming") + + for username in (home.name, env.get("USER")): + if username: + add(mount_root / username / "AppData" / "Roaming") + + if mount_root.exists(): + for child in sorted(mount_root.iterdir()): + if child.is_dir(): + add(child / "AppData" / "Roaming") + + return candidates + + +def default_mubu_data_root( + env: Mapping[str, str] | None = None, + home: Path | None = None, + mount_root: Path = Path("/mnt/c/Users"), +) -> Path: + env = env or os.environ + home = home or Path.home() + for candidate in candidate_appdata_roots(env=env, home=home, mount_root=mount_root): + if candidate.exists(): + return candidate / "Mubu" / "mubu_app_data" / "mubu_data" + return home / ".config" / "mubu" / "mubu_data" + + +DEFAULT_MUBU_DATA_ROOT = Path(os.environ.get("MUBU_DATA_ROOT", str(default_mubu_data_root()))) +DEFAULT_BACKUP_ROOT = Path(os.environ.get("MUBU_BACKUP_ROOT", str(DEFAULT_MUBU_DATA_ROOT / "backup"))) +DEFAULT_LOG_ROOT = Path(os.environ.get("MUBU_LOG_ROOT", str(DEFAULT_MUBU_DATA_ROOT / "log"))) +DEFAULT_STORAGE_ROOT = Path(os.environ.get("MUBU_STORAGE_ROOT", str(DEFAULT_MUBU_DATA_ROOT / ".storage"))) +DEFAULT_API_HOST = os.environ.get("MUBU_API_HOST", "https://api2.mubu.com") +DEFAULT_PLATFORM = os.environ.get("MUBU_PLATFORM", "windows") +DEFAULT_PLATFORM_VERSION = os.environ.get("MUBU_PLATFORM_VERSION", "10.0.26100") + +TAG_RE = re.compile(r"<[^>]+>") +ZERO_WIDTH_RE = re.compile(r"[\u200b\u200c\u200d\ufeff]") +TIMESTAMP_RE = re.compile(r"^\[(?P[^\]]+)\]") +NET_REQUEST_RE = re.compile(r"Net request \d+ (?P\{.*\})$") +STORE_SET_RE = re.compile(r"Store set start (?P\S+) (?P\{.*\})$") +ANCHOR_RE = re.compile(r"[^>]*)>(?P") + return "".join(chunks) + + +def serialize_node(node: dict[str, Any], max_depth: int | None = None, depth: int = 0) -> dict[str, Any]: + result = { + "id": node.get("id"), + "text": extract_plain_text(node.get("text")), + "note": extract_plain_text(node.get("note")), + "modified": node.get("modified"), + } + if max_depth is None or depth < max_depth: + result["children"] = [ + serialize_node(child, max_depth=max_depth, depth=depth + 1) + for child in (node.get("children") or []) + ] + return result + + +def list_document_nodes( + data: dict[str, Any], + query: str | None = None, + max_depth: int | None = None, +) -> list[dict[str, Any]]: + normalized_query = normalized_lookup_key(query) if query else None + payload: list[dict[str, Any]] = [] + + for path, node in iter_nodes(data.get("nodes", [])): + depth = len(path) - 1 + if max_depth is not None and depth > max_depth: + continue + + text = extract_plain_text(node.get("text")) + note = extract_plain_text(node.get("note")) + if normalized_query: + haystack = "\n".join([text, note]).casefold() + if normalized_query not in haystack: + continue + + modified = node.get("modified") if isinstance(node.get("modified"), int) else None + children = node.get("children") or [] + child_count = len(children) if isinstance(children, list) else 0 + payload.append( + { + "node_id": node.get("id"), + "path": ["nodes", *path], + "api_path": node_path_to_api_path(("nodes", *path)), + "depth": depth, + "text": text, + "note": note, + "child_count": child_count, + "modified": modified, + "modified_at_iso": timestamp_ms_to_iso(modified), + } + ) + + return payload + + +def show_document( + documents: Iterable[dict[str, Any]], + doc_id: str, + max_depth: int | None = None, + title_override: str | None = None, + folder_path: str | None = None, + doc_path: str | None = None, +) -> dict[str, Any] | None: + for document in documents: + if document["doc_id"] != doc_id: + continue + return { + "doc_id": document["doc_id"], + "title": title_override or document["title"], + "backup_file": document["backup_file"], + "modified_at": document["modified_at"], + "folder_path": folder_path, + "doc_path": doc_path, + "view_type": document["data"].get("viewType"), + "nodes": [ + serialize_node(node, max_depth=max_depth) + for node in document["data"].get("nodes", []) + ], + } + return None + + +def resolve_document_reference( + document_metas: Iterable[dict[str, Any]], + folders: Iterable[dict[str, Any]], + doc_ref: str, +) -> tuple[dict[str, Any] | None, list[dict[str, Any]]]: + folder_by_id, folder_paths = build_folder_indexes(folders) + metas = [enrich_document_meta(meta, folder_paths) for meta in document_metas] + + by_id = [meta for meta in metas if meta.get("doc_id") == doc_ref] + if len(by_id) == 1: + return by_id[0], [] + + normalized_ref = normalized_lookup_key(doc_ref) + + exact_path = [meta for meta in metas if normalized_lookup_key(meta.get("doc_path")) == normalized_ref] + if len(exact_path) == 1: + return exact_path[0], [] + if len(exact_path) > 1: + return None, exact_path + + suffix_path = [ + meta + for meta in metas + if normalized_lookup_key(meta.get("doc_path")).endswith(normalized_ref) + ] + if len(suffix_path) == 1: + return suffix_path[0], [] + if len(suffix_path) > 1: + return None, suffix_path + + title_matches = [meta for meta in metas if normalized_lookup_key(meta.get("title")) == normalized_ref] + if len(title_matches) == 1: + return title_matches[0], [] + if len(title_matches) > 1: + return None, title_matches + + return None, [] + + +def show_document_by_reference( + documents: Iterable[dict[str, Any]], + document_metas: Iterable[dict[str, Any]], + folders: Iterable[dict[str, Any]], + doc_ref: str, + max_depth: int | None = None, +) -> tuple[dict[str, Any] | None, list[dict[str, Any]]]: + meta, ambiguous = resolve_document_reference(document_metas, folders, doc_ref) + if meta is None: + return None, ambiguous + return ( + show_document( + documents, + meta["doc_id"], + max_depth=max_depth, + title_override=meta.get("title"), + folder_path=meta.get("folder_path"), + doc_path=meta.get("doc_path"), + ), + [], + ) + + +def document_links( + documents: Iterable[dict[str, Any]], + doc_id: str, + title_lookup: dict[str, str] | None = None, +) -> list[dict[str, Any]]: + title_lookup = title_lookup or {} + for document in documents: + if document["doc_id"] != doc_id: + continue + links: list[dict[str, Any]] = [] + for path, node in iter_nodes(document["data"].get("nodes", [])): + for field in ("text", "note"): + for link in extract_doc_links(node.get(field)): + links.append( + { + "source_doc_id": doc_id, + "source_doc_title": document.get("title"), + "source_node_id": node.get("id"), + "source_path": list(path), + "source_field": field, + "source_text": extract_plain_text(node.get("text")), + "target_doc_id": link["target_doc_id"], + "target_title": title_lookup.get(link["target_doc_id"]), + "label": link["label"], + } + ) + return links + return [] + + +def resolve_node_reference_in_data( + data: dict[str, Any], + node_id: str | None = None, + match_text: str | None = None, + field: str = "text", +) -> tuple[dict[str, Any] | None, tuple[Any, ...] | None, list[dict[str, Any]]]: + matches: list[dict[str, Any]] = [] + for path, node in iter_nodes(data.get("nodes", [])): + if node_id and node.get("id") == node_id: + return node, ("nodes", *path), [] + if match_text and extract_plain_text(node.get(field)) == match_text: + matches.append({"node": node, "path": ("nodes", *path)}) + + if node_id: + return None, None, [] + if len(matches) == 1: + return matches[0]["node"], matches[0]["path"], [] + if len(matches) > 1: + return None, None, matches + return None, None, [] + + +def resolve_node_at_path( + data: dict[str, Any], + path: Iterable[Any], +) -> dict[str, Any] | None: + parts = list(path) + if not parts or parts[0] != "nodes": + raise ValueError(f"unsupported node path root: {parts}") + if len(parts) < 2: + raise ValueError(f"node path missing index: {parts}") + + siblings = data.get("nodes") + if not isinstance(siblings, list): + return None + + current: dict[str, Any] | None = None + for part in parts[1:]: + if not isinstance(part, int): + raise ValueError(f"unsupported node path segment: {parts}") + if part < 0 or part >= len(siblings): + return None + current = siblings[part] + children = current.get("children") or [] + siblings = children if isinstance(children, list) else [] + return current + + +def parent_context_for_path( + data: dict[str, Any], + path: Iterable[Any], +) -> tuple[dict[str, Any] | None, tuple[Any, ...] | None, int]: + parts = tuple(path) + if not parts or parts[0] != "nodes": + raise ValueError(f"unsupported node path root: {parts}") + if len(parts) < 2: + raise ValueError(f"node path missing index: {parts}") + + index = parts[-1] + if not isinstance(index, int): + raise ValueError(f"unsupported node path index: {parts}") + if len(parts) == 2: + return None, None, index + + parent_path = parts[:-1] + parent_node = resolve_node_at_path(data, parent_path) + if parent_node is None: + raise ValueError(f"parent node not found for path: {parts}") + return parent_node, parent_path, index + + +def node_path_to_api_path(path: Iterable[Any]) -> list[Any]: + parts = list(path) + if not parts or parts[0] != "nodes": + raise ValueError(f"unsupported node path root: {parts}") + if "children" in parts: + return parts + + api_path: list[Any] = ["nodes"] + for index, part in enumerate(parts[1:]): + if index == 0: + api_path.append(part) + else: + api_path.extend(["children", part]) + return api_path + + +def generate_node_id(length: int = 10) -> str: + return "".join(secrets.choice(NODE_ID_ALPHABET) for _ in range(length)) + + +def build_text_update_request( + doc_id: str, + member_id: str | None, + version: int, + node: dict[str, Any], + path: Iterable[Any], + new_text: str, + field: str = "text", + modified_ms: int | None = None, +) -> dict[str, Any]: + modified_ms = modified_ms or int(datetime.now(tz=timezone.utc).timestamp() * 1000) + if field not in {"text", "note"}: + raise ValueError(f"unsupported field for text update: {field}") + + current_value = rich_text_to_html(node.get(field)) + updated_node = { + "id": node.get("id"), + field: plain_text_to_html(new_text), + "modified": modified_ms, + "forceUpdate": True, + } + original_node = { + "id": node.get("id"), + field: current_value, + "modified": node.get("modified"), + } + return { + "pathname": "/v3/api/colla/events", + "method": "POST", + "data": { + "memberId": member_id, + "type": "CHANGE", + "version": version, + "documentId": doc_id, + "events": [ + { + "name": "update", + "updated": [ + { + "updated": updated_node, + "original": original_node, + "path": list(path), + } + ], + } + ], + }, + } + + +def build_create_child_request( + doc_id: str, + member_id: str | None, + version: int, + parent_node: dict[str, Any], + parent_path: Iterable[Any], + text: str, + note: str | None = None, + child_id: str | None = None, + index: int | None = None, + modified_ms: int | None = None, +) -> dict[str, Any]: + modified_ms = modified_ms or int(datetime.now(tz=timezone.utc).timestamp() * 1000) + child_id = child_id or generate_node_id() + + children = parent_node.get("children") or [] + if not isinstance(children, list): + children = [] + + if index is None: + index = len(children) + if index < 0 or index > len(children): + raise ValueError(f"child index out of range: {index}") + + node_payload = { + "id": child_id, + "taskStatus": 0, + "text": maybe_plain_text_to_html(text) or "", + "modified": modified_ms, + "children": [], + } + note_html = maybe_plain_text_to_html(note) + if note_html is not None: + node_payload["note"] = note_html + if text or (note is not None and note != ""): + node_payload["forceUpdate"] = True + + create_path = node_path_to_api_path(parent_path) + ["children", index] + return { + "pathname": "/v3/api/colla/events", + "method": "POST", + "data": { + "memberId": member_id, + "type": "CHANGE", + "version": version, + "documentId": doc_id, + "events": [ + { + "name": "create", + "created": [ + { + "index": index, + "parentId": parent_node.get("id"), + "node": node_payload, + "path": create_path, + } + ], + } + ], + }, + } + + +def build_delete_node_request( + doc_id: str, + member_id: str | None, + version: int, + node: dict[str, Any], + path: Iterable[Any], + parent_node: dict[str, Any] | None = None, +) -> dict[str, Any]: + deleted_node = copy.deepcopy(node) + children = deleted_node.get("children") + if not isinstance(children, list): + deleted_node["children"] = [] + + raw_path = tuple(path) + if len(raw_path) < 2: + raise ValueError(f"node path missing index: {raw_path}") + index = raw_path[-1] + if not isinstance(index, int): + raise ValueError(f"unsupported node path index: {raw_path}") + + return { + "pathname": "/v3/api/colla/events", + "method": "POST", + "data": { + "memberId": member_id, + "type": "CHANGE", + "version": version, + "documentId": doc_id, + "events": [ + { + "name": "delete", + "deleted": [ + { + "parentId": parent_node.get("id") if parent_node else None, + "index": index, + "node": deleted_node, + "path": node_path_to_api_path(raw_path), + } + ], + } + ], + }, + } + + +def perform_text_update( + user: dict[str, Any], + doc_id: str, + member_id: str | None, + version: int, + node: dict[str, Any], + path: Iterable[Any], + new_text: str, + field: str = "text", + execute: bool = False, + api_host: str = DEFAULT_API_HOST, +) -> dict[str, Any]: + request_payload = build_text_update_request( + doc_id=doc_id, + member_id=member_id, + version=version, + node=node, + path=path, + new_text=new_text, + field=field, + ) + if not execute: + return { + "execute": False, + "request": request_payload, + } + + response = post_json( + f"{api_host}{request_payload['pathname']}", + request_payload["data"], + build_api_headers(user), + ) + return { + "execute": True, + "request": request_payload, + "response": response, + } + + +def perform_create_child( + user: dict[str, Any], + doc_id: str, + member_id: str | None, + version: int, + parent_node: dict[str, Any], + parent_path: Iterable[Any], + text: str, + note: str | None = None, + index: int | None = None, + execute: bool = False, + api_host: str = DEFAULT_API_HOST, +) -> dict[str, Any]: + request_payload = build_create_child_request( + doc_id=doc_id, + member_id=member_id, + version=version, + parent_node=parent_node, + parent_path=parent_path, + text=text, + note=note, + index=index, + ) + if not execute: + return { + "execute": False, + "request": request_payload, + } + + response = post_json( + f"{api_host}{request_payload['pathname']}", + request_payload["data"], + build_api_headers(user), + ) + return { + "execute": True, + "request": request_payload, + "response": response, + } + + +def perform_delete_node( + user: dict[str, Any], + doc_id: str, + member_id: str | None, + version: int, + node: dict[str, Any], + path: Iterable[Any], + parent_node: dict[str, Any] | None = None, + execute: bool = False, + api_host: str = DEFAULT_API_HOST, +) -> dict[str, Any]: + request_payload = build_delete_node_request( + doc_id=doc_id, + member_id=member_id, + version=version, + node=node, + path=path, + parent_node=parent_node, + ) + if not execute: + return { + "execute": False, + "request": request_payload, + } + + response = post_json( + f"{api_host}{request_payload['pathname']}", + request_payload["data"], + build_api_headers(user), + ) + return { + "execute": True, + "request": request_payload, + "response": response, + } + + +def dump_output(data: Any, as_json: bool) -> None: + if as_json: + json.dump(data, sys.stdout, ensure_ascii=False, indent=2) + sys.stdout.write("\n") + return + + if isinstance(data, list): + for item in data: + print(json.dumps(item, ensure_ascii=False)) + return + + print(json.dumps(data, ensure_ascii=False, indent=2)) + + +def ambiguous_error_message(kind: str, ref: str, matches: Iterable[dict[str, Any]], path_key: str) -> str: + options = [] + for item in matches: + label = item.get(path_key) or item.get("name") or item.get("title") or item.get("doc_id") or item.get("folder_id") + options.append(str(label)) + if len(options) >= 5: + break + suffix = f" matches: {', '.join(options)}" if options else "" + return f"ambiguous {kind} reference: {ref}.{suffix}" + + +def build_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser(description="Probe local Mubu desktop backups and sync logs.") + subparsers = parser.add_subparsers(dest="command", required=True) + + docs_parser = subparsers.add_parser("docs", help="List latest known document snapshots from local backups.") + docs_parser.add_argument("--root", type=Path, default=DEFAULT_BACKUP_ROOT) + docs_parser.add_argument("--limit", type=int, default=20) + docs_parser.add_argument("--json", action="store_true") + + show_parser = subparsers.add_parser("show", help="Show the latest backup tree for one document.") + show_parser.add_argument("doc_id") + show_parser.add_argument("--root", type=Path, default=DEFAULT_BACKUP_ROOT) + show_parser.add_argument("--max-depth", type=int, default=None) + show_parser.add_argument("--json", action="store_true") + + search_parser = subparsers.add_parser("search", help="Search latest backups for matching node text or note content.") + search_parser.add_argument("query") + search_parser.add_argument("--root", type=Path, default=DEFAULT_BACKUP_ROOT) + search_parser.add_argument("--limit", type=int, default=20) + search_parser.add_argument("--json", action="store_true") + + changes_parser = subparsers.add_parser("changes", help="Parse recent client-sync change events from local logs.") + changes_parser.add_argument("--log-root", type=Path, default=DEFAULT_LOG_ROOT) + changes_parser.add_argument("--doc-id", default=None) + changes_parser.add_argument("--limit", type=int, default=20) + changes_parser.add_argument("--json", action="store_true") + + folders_parser = subparsers.add_parser("folders", help="List folder metadata from local RxDB storage.") + folders_parser.add_argument("--storage-root", type=Path, default=DEFAULT_STORAGE_ROOT) + folders_parser.add_argument("--query", default=None) + folders_parser.add_argument("--limit", type=int, default=50) + folders_parser.add_argument("--json", action="store_true") + + folder_docs_parser = subparsers.add_parser("folder-docs", help="List document metadata for one folder.") + folder_docs_parser.add_argument("folder_id") + folder_docs_parser.add_argument("--storage-root", type=Path, default=DEFAULT_STORAGE_ROOT) + folder_docs_parser.add_argument("--limit", type=int, default=50) + folder_docs_parser.add_argument("--json", action="store_true") + + path_docs_parser = subparsers.add_parser("path-docs", help="List documents for one folder path or folder id.") + path_docs_parser.add_argument("folder_ref") + path_docs_parser.add_argument("--storage-root", type=Path, default=DEFAULT_STORAGE_ROOT) + path_docs_parser.add_argument("--limit", type=int, default=50) + path_docs_parser.add_argument("--json", action="store_true") + + recent_parser = subparsers.add_parser("recent", help="List recently active documents using backups, metadata, and sync logs.") + recent_parser.add_argument("--storage-root", type=Path, default=DEFAULT_STORAGE_ROOT) + recent_parser.add_argument("--root", type=Path, default=DEFAULT_BACKUP_ROOT) + recent_parser.add_argument("--log-root", type=Path, default=DEFAULT_LOG_ROOT) + recent_parser.add_argument("--limit", type=int, default=20) + recent_parser.add_argument("--json", action="store_true") + + links_parser = subparsers.add_parser("links", help="Extract outbound Mubu document links from one document backup.") + links_parser.add_argument("doc_id") + links_parser.add_argument("--root", type=Path, default=DEFAULT_BACKUP_ROOT) + links_parser.add_argument("--storage-root", type=Path, default=DEFAULT_STORAGE_ROOT) + links_parser.add_argument("--json", action="store_true") + + daily_parser = subparsers.add_parser("daily", help="Find Daily-style folders and list the documents inside them.") + daily_parser.add_argument("--storage-root", type=Path, default=DEFAULT_STORAGE_ROOT) + daily_parser.add_argument("--query", default="daily") + daily_parser.add_argument("--limit", type=int, default=50) + daily_parser.add_argument("--json", action="store_true") + + daily_current_parser = subparsers.add_parser( + "daily-current", + help="Resolve the current daily document from one Daily-style folder.", + ) + daily_current_parser.add_argument("folder_ref", nargs="?", default="Daily tasks") + daily_current_parser.add_argument("--storage-root", type=Path, default=DEFAULT_STORAGE_ROOT) + daily_current_parser.add_argument("--limit", type=int, default=5) + daily_current_parser.add_argument( + "--allow-non-daily-titles", + action="store_true", + help="Fallback to the latest document even if no date-like title is found.", + ) + daily_current_parser.add_argument("--json", action="store_true") + + daily_nodes_parser = subparsers.add_parser( + "daily-nodes", + help="List live nodes from the current daily document in one step.", + ) + daily_nodes_parser.add_argument("folder_ref", nargs="?", default="Daily tasks") + daily_nodes_parser.add_argument("--storage-root", type=Path, default=DEFAULT_STORAGE_ROOT) + daily_nodes_parser.add_argument("--api-host", default=DEFAULT_API_HOST) + daily_nodes_parser.add_argument("--query", default=None, help="Filter nodes by plain-text substring.") + daily_nodes_parser.add_argument("--max-depth", type=int, default=None) + daily_nodes_parser.add_argument("--limit", type=int, default=200) + daily_nodes_parser.add_argument( + "--allow-non-daily-titles", + action="store_true", + help="Fallback to the latest document even if no date-like title is found.", + ) + daily_nodes_parser.add_argument("--json", action="store_true") + + open_path_parser = subparsers.add_parser("open-path", help="Open one document by full path, suffix path, title, or doc id.") + open_path_parser.add_argument("doc_ref") + open_path_parser.add_argument("--storage-root", type=Path, default=DEFAULT_STORAGE_ROOT) + open_path_parser.add_argument("--root", type=Path, default=DEFAULT_BACKUP_ROOT) + open_path_parser.add_argument("--max-depth", type=int, default=None) + open_path_parser.add_argument("--json", action="store_true") + + doc_nodes_parser = subparsers.add_parser( + "doc-nodes", + help="List live document nodes with node ids and update-target paths.", + ) + doc_nodes_parser.add_argument("doc_ref") + doc_nodes_parser.add_argument("--storage-root", type=Path, default=DEFAULT_STORAGE_ROOT) + doc_nodes_parser.add_argument("--api-host", default=DEFAULT_API_HOST) + doc_nodes_parser.add_argument("--query", default=None, help="Filter nodes by plain-text substring.") + doc_nodes_parser.add_argument("--max-depth", type=int, default=None) + doc_nodes_parser.add_argument("--limit", type=int, default=200) + doc_nodes_parser.add_argument("--json", action="store_true") + + create_child_parser = subparsers.add_parser( + "create-child", + help="Build or execute one child-node creation against the live Mubu API.", + ) + create_child_parser.add_argument("doc_ref") + create_child_parser.add_argument("--text", required=True, help="New child plain text.") + create_child_parser.add_argument("--note", default=None, help="Optional plain-text note for the new child.") + create_child_parser.add_argument("--parent-node-id", default=None, help="Target parent node by id.") + create_child_parser.add_argument("--parent-match-text", default=None, help="Target parent node by exact current plain text.") + create_child_parser.add_argument("--parent-field", choices=["text", "note"], default="text") + create_child_parser.add_argument("--index", type=int, default=None, help="Insert position within the parent children list.") + create_child_parser.add_argument("--storage-root", type=Path, default=DEFAULT_STORAGE_ROOT) + create_child_parser.add_argument("--log-root", type=Path, default=DEFAULT_LOG_ROOT) + create_child_parser.add_argument("--api-host", default=DEFAULT_API_HOST) + create_child_parser.add_argument("--execute", action="store_true", help="Actually POST the CHANGE event.") + create_child_parser.add_argument("--json", action="store_true") + + delete_node_parser = subparsers.add_parser( + "delete-node", + help="Build or execute one node deletion against the live Mubu API.", + ) + delete_node_parser.add_argument("doc_ref") + delete_node_parser.add_argument("--node-id", default=None, help="Target one node by id.") + delete_node_parser.add_argument("--match-text", default=None, help="Target one node by exact current plain text.") + delete_node_parser.add_argument("--field", choices=["text", "note"], default="text") + delete_node_parser.add_argument("--storage-root", type=Path, default=DEFAULT_STORAGE_ROOT) + delete_node_parser.add_argument("--log-root", type=Path, default=DEFAULT_LOG_ROOT) + delete_node_parser.add_argument("--api-host", default=DEFAULT_API_HOST) + delete_node_parser.add_argument("--execute", action="store_true", help="Actually POST the CHANGE event.") + delete_node_parser.add_argument("--json", action="store_true") + + update_text_parser = subparsers.add_parser("update-text", help="Build or execute one text update against the live Mubu API.") + update_text_parser.add_argument("doc_ref") + update_text_parser.add_argument("--text", required=True, help="Replacement plain text.") + update_text_parser.add_argument("--node-id", default=None, help="Target one node by id.") + update_text_parser.add_argument("--match-text", default=None, help="Target one node by exact current plain text.") + update_text_parser.add_argument("--field", choices=["text", "note"], default="text") + update_text_parser.add_argument("--storage-root", type=Path, default=DEFAULT_STORAGE_ROOT) + update_text_parser.add_argument("--log-root", type=Path, default=DEFAULT_LOG_ROOT) + update_text_parser.add_argument("--api-host", default=DEFAULT_API_HOST) + update_text_parser.add_argument("--execute", action="store_true", help="Actually POST the CHANGE event.") + update_text_parser.add_argument("--json", action="store_true") + + return parser + + +def main(argv: list[str] | None = None) -> int: + parser = build_parser() + args = parser.parse_args(argv) + + if args.command == "docs": + documents = load_latest_backups(args.root) + payload = [ + { + "doc_id": item["doc_id"], + "title": item["title"], + "backup_file": item["backup_file"], + "modified_at": item["modified_at"], + } + for item in documents[: args.limit] + ] + dump_output(payload, args.json) + return 0 + + if args.command == "show": + documents = load_latest_backups(args.root) + payload = show_document(documents, args.doc_id, max_depth=args.max_depth) + if payload is None: + parser.error(f"document not found: {args.doc_id}") + dump_output(payload, args.json) + return 0 + + if args.command == "search": + documents = load_latest_backups(args.root) + payload = search_documents(documents, args.query, limit=args.limit) + dump_output(payload, args.json) + return 0 + + if args.command == "changes": + payload = load_change_events(args.log_root, doc_id=args.doc_id, limit=args.limit) + dump_output(payload, args.json) + return 0 + + if args.command == "folders": + folders = load_folders(args.storage_root) + _, folder_paths = build_folder_indexes(folders) + payload = [] + for folder in folders: + if args.query and args.query.lower() not in (folder.get("name") or "").lower(): + continue + payload.append({**folder, "path": folder_paths.get(folder["folder_id"], "")}) + payload.sort(key=lambda item: item.get("updated_at") or 0, reverse=True) + dump_output(payload[: args.limit], args.json) + return 0 + + if args.command == "folder-docs": + metas = load_document_metas(args.storage_root) + folders = load_folders(args.storage_root) + _, folder_paths = build_folder_indexes(folders) + payload = [ + { + **meta, + "folder_path": folder_paths.get(meta.get("folder_id", ""), ""), + } + for meta in metas + if meta.get("folder_id") == args.folder_id + ] + payload.sort(key=lambda item: item.get("updated_at") or 0, reverse=True) + dump_output(payload[: args.limit], args.json) + return 0 + + if args.command == "path-docs": + metas = load_document_metas(args.storage_root) + folders = load_folders(args.storage_root) + payload, folder, ambiguous = folder_documents(metas, folders, args.folder_ref) + if folder is None: + if ambiguous: + parser.error(ambiguous_error_message("folder", args.folder_ref, ambiguous, "path")) + parser.error(f"folder not found: {args.folder_ref}") + dump_output( + { + "folder": folder, + "documents": payload[: args.limit], + }, + args.json, + ) + return 0 + + if args.command == "recent": + payload = recent_documents( + load_latest_backups(args.root), + load_document_metas(args.storage_root), + load_folders(args.storage_root), + log_root=args.log_root, + limit=args.limit, + ) + dump_output(payload, args.json) + return 0 + + if args.command == "links": + backups = load_latest_backups(args.root) + metas = load_document_metas(args.storage_root) + title_lookup = {meta["doc_id"]: meta.get("title") for meta in metas if meta.get("doc_id")} + for backup in backups: + title_lookup.setdefault(backup["doc_id"], backup.get("title")) + payload = document_links(backups, args.doc_id, title_lookup=title_lookup) + dump_output(payload, args.json) + return 0 + + if args.command == "daily": + folders = load_folders(args.storage_root) + metas = load_document_metas(args.storage_root) + _, folder_paths = build_folder_indexes(folders) + matched_folders = [ + folder + for folder in folders + if args.query.lower() in (folder.get("name") or "").lower() + ] + matched_ids = {folder["folder_id"] for folder in matched_folders} + docs = [ + { + **meta, + "folder_path": folder_paths.get(meta.get("folder_id", ""), ""), + } + for meta in metas + if meta.get("folder_id") in matched_ids + ] + docs.sort(key=lambda item: item.get("updated_at") or 0, reverse=True) + payload = { + "folders": [ + {**folder, "path": folder_paths.get(folder["folder_id"], "")} + for folder in matched_folders + ], + "documents": docs[: args.limit], + } + dump_output(payload, args.json) + return 0 + + if args.command == "daily-current": + metas = load_document_metas(args.storage_root) + folders = load_folders(args.storage_root) + docs, folder, ambiguous = folder_documents(metas, folders, args.folder_ref) + if folder is None: + if ambiguous: + parser.error(ambiguous_error_message("folder", args.folder_ref, ambiguous, "path")) + parser.error(f"folder not found: {args.folder_ref}") + + selected, candidates = choose_current_daily_document( + docs, + allow_non_daily_titles=args.allow_non_daily_titles, + ) + if selected is None: + parser.error( + f"no current daily document found in {folder['path']}; " + "rerun with --allow-non-daily-titles or inspect with path-docs" + ) + + payload = { + "folder": folder, + "selection": { + "strategy": "latest_updated_date_titled_document" + if not args.allow_non_daily_titles + else "latest_updated_document_with_non_daily_fallback", + "allow_non_daily_titles": args.allow_non_daily_titles, + "candidate_count": len(candidates), + }, + "document": selected, + "candidates": candidates[: args.limit], + } + dump_output(payload, args.json) + return 0 + + if args.command == "daily-nodes": + user = get_active_user(args.storage_root) + if user is None: + parser.error("no active user auth found in local storage") + + metas = load_document_metas(args.storage_root) + folders = load_folders(args.storage_root) + docs, folder, ambiguous = folder_documents(metas, folders, args.folder_ref) + if folder is None: + if ambiguous: + parser.error(ambiguous_error_message("folder", args.folder_ref, ambiguous, "path")) + parser.error(f"folder not found: {args.folder_ref}") + + selected, candidates = choose_current_daily_document( + docs, + allow_non_daily_titles=args.allow_non_daily_titles, + ) + if selected is None: + parser.error( + f"no current daily document found in {folder['path']}; " + "rerun with --allow-non-daily-titles or inspect with path-docs" + ) + + remote_doc = fetch_document_remote(selected["doc_id"], user, api_host=args.api_host) + definition_raw = remote_doc.get("definition") + if not isinstance(definition_raw, str): + parser.error(f"document definition missing for: {selected['doc_id']}") + definition = json.loads(definition_raw) + nodes = list_document_nodes( + definition, + query=args.query, + max_depth=args.max_depth, + ) + payload = { + "folder": folder, + "selection": { + "strategy": "latest_updated_date_titled_document" + if not args.allow_non_daily_titles + else "latest_updated_document_with_non_daily_fallback", + "allow_non_daily_titles": args.allow_non_daily_titles, + "candidate_count": len(candidates), + }, + "document": { + **selected, + "base_version": remote_doc.get("baseVersion"), + }, + "filters": { + "query": args.query, + "max_depth": args.max_depth, + "limit": args.limit, + }, + "total_matches": len(nodes), + "nodes": nodes[: args.limit], + } + dump_output(payload, args.json) + return 0 + + if args.command == "open-path": + documents = load_latest_backups(args.root) + metas = load_document_metas(args.storage_root) + folders = load_folders(args.storage_root) + payload, ambiguous = show_document_by_reference( + documents, + metas, + folders, + args.doc_ref, + max_depth=args.max_depth, + ) + if payload is None: + if ambiguous: + parser.error(ambiguous_error_message("document", args.doc_ref, ambiguous, "doc_path")) + parser.error(f"document not found: {args.doc_ref}") + dump_output(payload, args.json) + return 0 + + if args.command == "doc-nodes": + user = get_active_user(args.storage_root) + if user is None: + parser.error("no active user auth found in local storage") + + metas = load_document_metas(args.storage_root) + folders = load_folders(args.storage_root) + meta, ambiguous = resolve_document_reference(metas, folders, args.doc_ref) + if meta is None: + if ambiguous: + parser.error(ambiguous_error_message("document", args.doc_ref, ambiguous, "doc_path")) + parser.error(f"document not found: {args.doc_ref}") + + remote_doc = fetch_document_remote(meta["doc_id"], user, api_host=args.api_host) + definition_raw = remote_doc.get("definition") + if not isinstance(definition_raw, str): + parser.error(f"document definition missing for: {meta['doc_id']}") + definition = json.loads(definition_raw) + + nodes = list_document_nodes( + definition, + query=args.query, + max_depth=args.max_depth, + ) + payload = { + "document": { + "doc_id": meta["doc_id"], + "title": meta.get("title"), + "doc_path": meta.get("doc_path"), + "base_version": remote_doc.get("baseVersion"), + }, + "filters": { + "query": args.query, + "max_depth": args.max_depth, + "limit": args.limit, + }, + "total_matches": len(nodes), + "nodes": nodes[: args.limit], + } + dump_output(payload, args.json) + return 0 + + if args.command == "create-child": + if not args.parent_node_id and not args.parent_match_text: + parser.error("create-child requires --parent-node-id or --parent-match-text") + + user = get_active_user(args.storage_root) + if user is None: + parser.error("no active user auth found in local storage") + + metas = load_document_metas(args.storage_root) + folders = load_folders(args.storage_root) + meta, ambiguous = resolve_document_reference(metas, folders, args.doc_ref) + if meta is None: + if ambiguous: + parser.error(ambiguous_error_message("document", args.doc_ref, ambiguous, "doc_path")) + parser.error(f"document not found: {args.doc_ref}") + + events = load_change_events(args.log_root, doc_id=meta["doc_id"], limit=None) + member_context = resolve_mutation_member_context(events, meta["doc_id"], execute=args.execute) + if member_context is None: + parser.error(f"no member context found in sync logs for document: {meta['doc_id']}") + + remote_doc = fetch_document_remote(meta["doc_id"], user, api_host=args.api_host) + definition_raw = remote_doc.get("definition") + if not isinstance(definition_raw, str): + parser.error(f"document definition missing for: {meta['doc_id']}") + definition = json.loads(definition_raw) + + parent_node, parent_path, node_ambiguous = resolve_node_reference_in_data( + definition, + node_id=args.parent_node_id, + match_text=args.parent_match_text, + field=args.parent_field, + ) + if parent_node is None or parent_path is None: + if node_ambiguous: + labels = [extract_plain_text(item["node"].get(args.parent_field)) for item in node_ambiguous[:5]] + parser.error(f"ambiguous parent node reference in {meta['doc_id']}: {labels}") + parser.error(f"parent node not found in {meta['doc_id']}") + + try: + result = perform_create_child( + user=user, + doc_id=meta["doc_id"], + member_id=member_context.get("member_id"), + version=remote_doc.get("baseVersion", 0), + parent_node=parent_node, + parent_path=parent_path, + text=args.text, + note=args.note, + index=args.index, + execute=args.execute, + api_host=args.api_host, + ) + except ValueError as exc: + parser.error(str(exc)) + + created = result["request"]["data"]["events"][0]["created"][0] + created_node = created["node"] + payload = { + "execute": args.execute, + "document": { + "doc_id": meta["doc_id"], + "title": meta.get("title"), + "doc_path": meta.get("doc_path"), + "base_version": remote_doc.get("baseVersion"), + }, + "member_context": member_context, + "target_parent": { + "node_id": parent_node.get("id"), + "field": args.parent_field, + "path": list(parent_path), + "api_path": node_path_to_api_path(parent_path), + "current_text": extract_plain_text(parent_node.get(args.parent_field)), + "existing_child_count": len(parent_node.get("children") or []), + }, + "new_child": { + "node_id": created_node.get("id"), + "index": created.get("index"), + "path": created.get("path"), + "text": args.text, + "note": args.note, + }, + "request": result["request"], + } + if member_context.get("member_id") is None: + payload["warning"] = "dry-run request uses a placeholder member context because no recent sync log entry was found" + + if args.execute: + payload["response"] = result["response"] + refreshed = fetch_document_remote(meta["doc_id"], user, api_host=args.api_host) + refreshed_definition = json.loads(refreshed.get("definition") or "{}") + refreshed_node, _, _ = resolve_node_reference_in_data( + refreshed_definition, + node_id=created_node.get("id"), + ) + payload["verification"] = { + "base_version_after": refreshed.get("baseVersion"), + "created_node_present": refreshed_node is not None, + "node_text_after": extract_plain_text((refreshed_node or {}).get("text")), + "node_note_after": extract_plain_text((refreshed_node or {}).get("note")), + } + + dump_output(payload, args.json) + return 0 + + if args.command == "delete-node": + if not args.node_id and not args.match_text: + parser.error("delete-node requires --node-id or --match-text") + + user = get_active_user(args.storage_root) + if user is None: + parser.error("no active user auth found in local storage") + + metas = load_document_metas(args.storage_root) + folders = load_folders(args.storage_root) + meta, ambiguous = resolve_document_reference(metas, folders, args.doc_ref) + if meta is None: + if ambiguous: + parser.error(ambiguous_error_message("document", args.doc_ref, ambiguous, "doc_path")) + parser.error(f"document not found: {args.doc_ref}") + + events = load_change_events(args.log_root, doc_id=meta["doc_id"], limit=None) + member_context = resolve_mutation_member_context(events, meta["doc_id"], execute=args.execute) + if member_context is None: + parser.error(f"no member context found in sync logs for document: {meta['doc_id']}") + + remote_doc = fetch_document_remote(meta["doc_id"], user, api_host=args.api_host) + definition_raw = remote_doc.get("definition") + if not isinstance(definition_raw, str): + parser.error(f"document definition missing for: {meta['doc_id']}") + definition = json.loads(definition_raw) + + node, path, node_ambiguous = resolve_node_reference_in_data( + definition, + node_id=args.node_id, + match_text=args.match_text, + field=args.field, + ) + if node is None or path is None: + if node_ambiguous: + labels = [extract_plain_text(item["node"].get(args.field)) for item in node_ambiguous[:5]] + parser.error(f"ambiguous node reference in {meta['doc_id']}: {labels}") + parser.error(f"node not found in {meta['doc_id']}") + + try: + parent_node, parent_path, index = parent_context_for_path(definition, path) + result = perform_delete_node( + user=user, + doc_id=meta["doc_id"], + member_id=member_context.get("member_id"), + version=remote_doc.get("baseVersion", 0), + node=node, + path=path, + parent_node=parent_node, + execute=args.execute, + api_host=args.api_host, + ) + except ValueError as exc: + parser.error(str(exc)) + + deleted = result["request"]["data"]["events"][0]["deleted"][0] + payload = { + "execute": args.execute, + "document": { + "doc_id": meta["doc_id"], + "title": meta.get("title"), + "doc_path": meta.get("doc_path"), + "base_version": remote_doc.get("baseVersion"), + }, + "member_context": member_context, + "target_node": { + "node_id": node.get("id"), + "field": args.field, + "path": list(path), + "api_path": node_path_to_api_path(path), + "parent_node_id": deleted.get("parentId"), + "parent_path": list(parent_path) if parent_path else None, + "index": index, + "current_text": extract_plain_text(node.get(args.field)), + "child_count": len(node.get("children") or []), + }, + "request": result["request"], + } + if member_context.get("member_id") is None: + payload["warning"] = "dry-run request uses a placeholder member context because no recent sync log entry was found" + + if args.execute: + payload["response"] = result["response"] + refreshed = fetch_document_remote(meta["doc_id"], user, api_host=args.api_host) + refreshed_definition = json.loads(refreshed.get("definition") or "{}") + refreshed_node, _, _ = resolve_node_reference_in_data( + refreshed_definition, + node_id=node.get("id"), + field=args.field, + ) + payload["verification"] = { + "base_version_after": refreshed.get("baseVersion"), + "node_deleted": refreshed_node is None, + } + + dump_output(payload, args.json) + return 0 + + if args.command == "update-text": + if not args.node_id and not args.match_text: + parser.error("update-text requires --node-id or --match-text") + + user = get_active_user(args.storage_root) + if user is None: + parser.error("no active user auth found in local storage") + + metas = load_document_metas(args.storage_root) + folders = load_folders(args.storage_root) + meta, ambiguous = resolve_document_reference(metas, folders, args.doc_ref) + if meta is None: + if ambiguous: + parser.error(ambiguous_error_message("document", args.doc_ref, ambiguous, "doc_path")) + parser.error(f"document not found: {args.doc_ref}") + + events = load_change_events(args.log_root, doc_id=meta["doc_id"], limit=None) + member_context = resolve_mutation_member_context(events, meta["doc_id"], execute=args.execute) + if member_context is None: + parser.error(f"no member context found in sync logs for document: {meta['doc_id']}") + + remote_doc = fetch_document_remote(meta["doc_id"], user, api_host=args.api_host) + definition_raw = remote_doc.get("definition") + if not isinstance(definition_raw, str): + parser.error(f"document definition missing for: {meta['doc_id']}") + definition = json.loads(definition_raw) + + node, path, node_ambiguous = resolve_node_reference_in_data( + definition, + node_id=args.node_id, + match_text=args.match_text, + field=args.field, + ) + if node is None or path is None: + if node_ambiguous: + labels = [extract_plain_text(item["node"].get(args.field)) for item in node_ambiguous[:5]] + parser.error(f"ambiguous node reference in {meta['doc_id']}: {labels}") + parser.error(f"node not found in {meta['doc_id']}") + + result = perform_text_update( + user=user, + doc_id=meta["doc_id"], + member_id=member_context.get("member_id"), + version=remote_doc.get("baseVersion", 0), + node=node, + path=path, + new_text=args.text, + field=args.field, + execute=args.execute, + api_host=args.api_host, + ) + + payload = { + "execute": args.execute, + "document": { + "doc_id": meta["doc_id"], + "title": meta.get("title"), + "doc_path": meta.get("doc_path"), + "base_version": remote_doc.get("baseVersion"), + }, + "member_context": member_context, + "target_node": { + "node_id": node.get("id"), + "field": args.field, + "path": list(path), + "current_text": extract_plain_text(node.get(args.field)), + "new_text": args.text, + }, + "request": result["request"], + } + if member_context.get("member_id") is None: + payload["warning"] = "dry-run request uses a placeholder member context because no recent sync log entry was found" + + if args.execute: + payload["response"] = result["response"] + refreshed = fetch_document_remote(meta["doc_id"], user, api_host=args.api_host) + refreshed_definition = json.loads(refreshed.get("definition") or "{}") + refreshed_node, _, _ = resolve_node_reference_in_data( + refreshed_definition, + node_id=node.get("id"), + field=args.field, + ) + payload["verification"] = { + "base_version_after": refreshed.get("baseVersion"), + "node_text_after": extract_plain_text((refreshed_node or {}).get(args.field)), + "matches_requested_text": extract_plain_text((refreshed_node or {}).get(args.field)) == args.text, + } + + dump_output(payload, args.json) + return 0 + + parser.error("unknown command") + return 2 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/mubu/agent-harness/pyproject.toml b/mubu/agent-harness/pyproject.toml new file mode 100644 index 000000000..09977b5b8 --- /dev/null +++ b/mubu/agent-harness/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools>=61"] +build-backend = "setuptools.build_meta" diff --git a/mubu/agent-harness/setup.py b/mubu/agent-harness/setup.py new file mode 100644 index 000000000..0a3eecf1d --- /dev/null +++ b/mubu/agent-harness/setup.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +import sys + + +PACKAGE_NAME = "cli-anything-mubu" +PACKAGE_VERSION = "0.1.0" + + +def _handle_metadata_query(argv: list[str]) -> bool: + if len(argv) != 2: + return False + if argv[1] == "--name": + print(PACKAGE_NAME) + return True + if argv[1] == "--version": + print(PACKAGE_VERSION) + return True + return False + + +if __name__ == "__main__" and _handle_metadata_query(sys.argv): + raise SystemExit(0) + +try: + from setuptools import find_namespace_packages, setup +except ModuleNotFoundError as exc: + raise SystemExit("setuptools is required for packaging commands; use `pip install setuptools`.") from exc + + +setup( + name=PACKAGE_NAME, + version=PACKAGE_VERSION, + description="Agent-oriented CLI bridge for the Mubu desktop app", + py_modules=["mubu_probe"], + install_requires=["click>=8.0"], + packages=find_namespace_packages(include=["cli_anything.*"]), + include_package_data=True, + package_data={ + "cli_anything.mubu": ["README.md"], + "cli_anything.mubu.skills": ["SKILL.md"], + "cli_anything.mubu.tests": ["TEST.md"], + }, + entry_points={ + "console_scripts": [ + "cli-anything-mubu=cli_anything.mubu.mubu_cli:entrypoint", + ] + }, +) diff --git a/mubu/agent-harness/skill_generator.py b/mubu/agent-harness/skill_generator.py new file mode 100644 index 000000000..49b438862 --- /dev/null +++ b/mubu/agent-harness/skill_generator.py @@ -0,0 +1,414 @@ +""" +SKILL.md Generator for CLI-Anything + +This module extracts metadata from CLI-Anything harnesses and generates +SKILL.md files following the skill-creator methodology. +""" + +from __future__ import annotations + +import argparse +import re +from dataclasses import dataclass, field +from pathlib import Path +from typing import Optional + + +def _format_display_name(name: str) -> str: + return name.replace("_", " ").replace("-", " ").title() + + +@dataclass +class CommandInfo: + name: str + description: str + + +@dataclass +class CommandGroup: + name: str + description: str + commands: list[CommandInfo] = field(default_factory=list) + + +@dataclass +class Example: + title: str + description: str + code: str + + +@dataclass +class SkillMetadata: + skill_name: str + skill_description: str + software_name: str + skill_intro: str + version: str + system_package: Optional[str] = None + command_groups: list[CommandGroup] = field(default_factory=list) + examples: list[Example] = field(default_factory=list) + + +def extract_intro_from_readme(content: str) -> str: + lines = content.split("\n") + intro_lines: list[str] = [] + in_intro = False + + for line in lines: + line = line.strip() + if not line: + if in_intro and intro_lines: + break + continue + if line.startswith("# "): + in_intro = True + continue + if line.startswith("##"): + break + if in_intro: + intro_lines.append(line) + + return " ".join(intro_lines) or "CLI interface for the software." + + +def extract_system_package(content: str) -> Optional[str]: + patterns = [ + r"`apt install ([\w\-]+)`", + r"`brew install ([\w\-]+)`", + r"apt-get install ([\w\-]+)", + ] + + for pattern in patterns: + match = re.search(pattern, content) + if match: + package = match.group(1) + if "apt" in pattern: + return f"apt install {package}" + if "brew" in pattern: + return f"brew install {package}" + return None + + +def extract_version_from_setup(setup_path: Path) -> str: + content = setup_path.read_text(encoding="utf-8") + match = re.search(r'version\s*=\s*["\']([^"\']+)["\']', content) + if match: + return match.group(1) + return "1.0.0" + + +def extract_commands_from_cli(cli_path: Path) -> list[CommandGroup]: + content = cli_path.read_text(encoding="utf-8") + groups: list[CommandGroup] = [] + + group_pattern = ( + r'@(\w+)\.group\(([^)]*)\)' + r'(?:\s*@[\w.]+(?:\([^)]*\))?)*' + r'\s*def\s+(\w+)\([^)]*\)' + r'(?:\s*->\s*[^:]+)?' + r':\s*' + r'(?:"""([\s\S]*?)"""|\'\'\'([\s\S]*?)\'\'\')?' + ) + for match in re.finditer(group_pattern, content): + decorator_owner = match.group(1) + group_func = match.group(3) + group_doc = (match.group(4) or match.group(5) or "").strip() + if decorator_owner == "click" or group_func == "cli": + continue + groups.append( + CommandGroup( + name=group_func.replace("_", " ").title() or group_func.title(), + description=group_doc or f"Commands for {group_func.replace('_', ' ')} operations.", + ) + ) + + command_pattern = ( + r'@(\w+)\.command\(([^)]*)\)' + r'(?:\s*@[\w.]+(?:\([^)]*\))?)*' + r'\s*def\s+(\w+)\([^)]*\)' + r'(?:\s*->\s*[^:]+)?' + r':\s*' + r'(?:"""([\s\S]*?)"""|\'\'\'([\s\S]*?)\'\'\')?' + ) + for match in re.finditer(command_pattern, content): + group_name = match.group(1) + decorator_args = match.group(2) + cmd_name = match.group(3) + cmd_doc = (match.group(4) or match.group(5) or "").strip() + if group_name == "cli": + continue + explicit_name = re.search(r'["\']([^"\']+)["\']', decorator_args) + command_display_name = explicit_name.group(1) if explicit_name else cmd_name.replace("_", "-") + for group in groups: + if group.name.lower().replace(" ", "_") == group_name.lower(): + group.commands.append( + CommandInfo( + name=command_display_name, + description=cmd_doc or f"Execute {cmd_name.replace('_', '-')} operation.", + ) + ) + + if not groups: + default_group = CommandGroup(name="General", description="General commands for the CLI.") + for match in re.finditer(command_pattern, content): + decorator_args = match.group(2) + cmd_name = match.group(3) + cmd_doc = (match.group(4) or match.group(5) or "").strip() + explicit_name = re.search(r'["\']([^"\']+)["\']', decorator_args) + default_group.commands.append( + CommandInfo( + name=explicit_name.group(1) if explicit_name else cmd_name.replace("_", "-"), + description=cmd_doc or f"Execute {cmd_name.replace('_', '-')} operation.", + ) + ) + if default_group.commands: + groups.append(default_group) + + return groups + + +def generate_examples(software_name: str, command_groups: list[CommandGroup]) -> list[Example]: + examples = [ + Example( + title="Interactive REPL Session", + description="Start an interactive session with persistent document and node context.", + code=f"""cli-anything-{software_name} +# Enter commands interactively +# Use 'help' to see builtins +# Use session commands to persist current-doc/current-node""", + ) + ] + + group_names = {group.name.lower() for group in command_groups} + if "discover" in group_names: + examples.append( + Example( + title="Discover Current Daily Note", + description="Resolve the current daily note and emit JSON output for an agent.", + code=f"""cli-anything-{software_name} --json discover daily-current""", + ) + ) + if "mutate" in group_names: + examples.append( + Example( + title="Dry-Run Atomic Update", + description="Inspect the exact outgoing payload before a live mutation.", + code=( + f"cli-anything-{software_name} mutate update-text " + "'Workspace/Daily tasks/26.03.16' --node-id node-demo1 --text 'new text' --json" + ), + ) + ) + return examples + + +def extract_cli_metadata(harness_path: str) -> SkillMetadata: + harness_root = Path(harness_path) + cli_anything_dir = harness_root / "cli_anything" + if not cli_anything_dir.exists(): + raise ValueError(f"cli_anything directory not found in {harness_root}") + + software_dirs = [path for path in cli_anything_dir.iterdir() if path.is_dir() and (path / "__init__.py").exists()] + if not software_dirs: + raise ValueError(f"No CLI package found in {harness_root}") + + software_dir = software_dirs[0] + software_name = software_dir.name + readme_path = software_dir / "README.md" + skill_intro = "" + system_package = None + if readme_path.exists(): + readme_content = readme_path.read_text(encoding="utf-8") + skill_intro = extract_intro_from_readme(readme_content) + system_package = extract_system_package(readme_content) + + setup_path = harness_root / "setup.py" + version = extract_version_from_setup(setup_path) if setup_path.exists() else "1.0.0" + + cli_file = software_dir / f"{software_name}_cli.py" + command_groups = extract_commands_from_cli(cli_file) if cli_file.exists() else [] + examples = generate_examples(software_name, command_groups) + skill_name = f"cli-anything-{software_name}" + skill_description = f"Command-line interface for {_format_display_name(software_name)} - {skill_intro[:100]}..." + + return SkillMetadata( + skill_name=skill_name, + skill_description=skill_description, + software_name=software_name, + skill_intro=skill_intro, + version=version, + system_package=system_package, + command_groups=command_groups, + examples=examples, + ) + + +def generate_skill_md_simple(metadata: SkillMetadata) -> str: + lines = [ + "---", + "name: >-", + f" {metadata.skill_name}", + "description: >-", + f" {metadata.skill_description}", + "---", + "", + f"# {metadata.skill_name}", + "", + metadata.skill_intro, + "", + "## Installation", + "", + "This CLI is packaged from the canonical `agent-harness` source tree:", + "", + "```bash", + "pip install -e .", + "```", + "", + "**Prerequisites:**", + "- Python 3.10+", + "- An active Mubu desktop session on this machine", + "- Local Mubu profile data available to the CLI", + "", + "## Entry Points", + "", + "```bash", + f"cli-anything-{metadata.software_name}", + f"python -m cli_anything.{metadata.software_name}", + "```", + "", + "When invoked without a subcommand, the CLI enters an interactive REPL session.", + "", + "## Command Groups", + "", + ] + + for group in metadata.command_groups: + lines.extend([f"### {group.name}", "", group.description, ""]) + if group.commands: + lines.extend(["| Command | Description |", "|---------|-------------|"]) + for command in group.commands: + lines.append(f"| `{command.name}` | {command.description} |") + lines.append("") + + lines.extend( + [ + "## Recommended Agent Workflow", + "", + "```text", + "discover daily-current --json", + " ->", + "inspect daily-nodes --query '' --json", + " ->", + "session use-doc ''", + " ->", + "mutate update-text / create-child / delete-node --json", + " ->", + "--execute only after payload inspection", + "```", + "", + "## Safety Rules", + "", + "1. Prefer grouped commands for agent use; flat legacy commands remain for compatibility.", + "2. Use `--json` whenever an agent will parse the output.", + "3. Prefer `discover` or `inspect` commands before any `mutate` command.", + "4. Live mutations are dry-run by default and only execute with `--execute`.", + "5. Prefer `--node-id` and `--parent-node-id` over text matching.", + "6. `delete-node` removes the full targeted subtree.", + "7. Even same-text updates can still advance document version history.", + "", + "## Examples", + "", + ] + ) + for example in metadata.examples: + lines.extend([f"### {example.title}", "", example.description, "", "```bash", example.code, "```", ""]) + lines.extend( + [ + "## Session State", + "", + "The CLI maintains lightweight session state in JSON:", + "", + "- `current_doc`", + "- `current_node`", + "- local command history", + "", + "Use the `session` command group to inspect or update this state.", + "", + "## For AI Agents", + "", + "1. Start with `discover` or `inspect`, not `mutate`.", + "2. Use `session status --json` to recover persisted context.", + "3. Use grouped commands in generated prompts and automation.", + "4. Verify postconditions after any live mutation.", + "5. Read the package `TEST.md` and `README.md` when stricter operational detail is needed.", + "", + "## Version", + "", + metadata.version, + "", + ] + ) + return "\n".join(lines) + + +def generate_skill_md(metadata: SkillMetadata, template_path: Optional[str] = None) -> str: + try: + from jinja2 import Environment, FileSystemLoader + except ImportError: + return generate_skill_md_simple(metadata) + + if template_path is None: + template_path = Path(__file__).parent / "templates" / "SKILL.md.template" + else: + template_path = Path(template_path) + + if not template_path.exists(): + return generate_skill_md_simple(metadata) + + env = Environment(loader=FileSystemLoader(template_path.parent)) + template = env.get_template(template_path.name) + return template.render( + skill_name=metadata.skill_name, + skill_description=metadata.skill_description, + software_name=metadata.software_name, + skill_intro=metadata.skill_intro, + version=metadata.version, + system_package=metadata.system_package, + command_groups=[ + { + "name": group.name, + "description": group.description, + "commands": [{"name": command.name, "description": command.description} for command in group.commands], + } + for group in metadata.command_groups + ], + examples=[{"title": example.title, "description": example.description, "code": example.code} for example in metadata.examples], + ) + + +def generate_skill_file(harness_path: str, output_path: Optional[str] = None, template_path: Optional[str] = None) -> str: + metadata = extract_cli_metadata(harness_path) + content = generate_skill_md(metadata, template_path) + if output_path is None: + output = Path(harness_path) / "cli_anything" / metadata.software_name / "skills" / "SKILL.md" + else: + output = Path(output_path) + output.parent.mkdir(parents=True, exist_ok=True) + output.write_text(content, encoding="utf-8") + return str(output) + + +def main(argv: Optional[list[str]] = None) -> int: + parser = argparse.ArgumentParser(description="Generate SKILL.md for CLI-Anything harnesses") + parser.add_argument("harness_path", help="Path to the agent-harness directory") + parser.add_argument("-o", "--output", help="Output path for SKILL.md", default=None) + parser.add_argument("-t", "--template", help="Path to a custom Jinja2 template", default=None) + args = parser.parse_args(argv) + output_path = generate_skill_file(args.harness_path, output_path=args.output, template_path=args.template) + print(output_path) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/mubu/agent-harness/templates/SKILL.md.template b/mubu/agent-harness/templates/SKILL.md.template new file mode 100644 index 000000000..12e7766ac --- /dev/null +++ b/mubu/agent-harness/templates/SKILL.md.template @@ -0,0 +1,104 @@ +--- +name: >- + {{ skill_name }} +description: >- + {{ skill_description }} +--- + +# {{ skill_name }} + +{{ skill_intro }} + +## Installation + +This CLI is packaged from the canonical `agent-harness` source tree: + +```bash +pip install -e . +``` + +**Prerequisites:** +- Python 3.10+ +- An active Mubu desktop session on this machine +- Local Mubu profile data available to the CLI + +## Entry Points + +```bash +cli-anything-{{ software_name }} +python -m cli_anything.{{ software_name }} +``` + +When invoked without a subcommand, the CLI enters an interactive REPL session. + +## Command Groups + +{% for group in command_groups %} +### {{ group.name }} + +{{ group.description }} + +| Command | Description | +|---------|-------------| +{% for cmd in group.commands %} +| `{{ cmd.name }}` | {{ cmd.description }} | +{% endfor %} + +{% endfor %} +## Recommended Agent Workflow + +```text +discover daily-current --json + -> +inspect daily-nodes --query '' --json + -> +session use-doc '' + -> +mutate update-text / create-child / delete-node --json + -> +--execute only after payload inspection +``` + +## Safety Rules + +1. Prefer grouped commands for agent use; flat legacy commands remain for compatibility. +2. Use `--json` whenever an agent will parse the output. +3. Prefer `discover` or `inspect` commands before any `mutate` command. +4. Live mutations are dry-run by default and only execute with `--execute`. +5. Prefer `--node-id` and `--parent-node-id` over text matching. +6. `delete-node` removes the full targeted subtree. +7. Even same-text updates can still advance document version history. + +## Examples + +{% for example in examples %} +### {{ example.title }} + +{{ example.description }} + +```bash +{{ example.code }} +``` + +{% endfor %} +## Session State + +The CLI maintains lightweight session state in JSON: + +- `current_doc` +- `current_node` +- local command history + +Use the `session` command group to inspect or update this state. + +## For AI Agents + +1. Start with `discover` or `inspect`, not `mutate`. +2. Use `session status --json` to recover persisted context. +3. Use grouped commands in generated prompts and automation. +4. Verify postconditions after any live mutation. +5. Read the package `TEST.md` and `README.md` when stricter operational detail is needed. + +## Version + +{{ version }} diff --git a/registry.json b/registry.json index 86011a3a6..ed251c83f 100644 --- a/registry.json +++ b/registry.json @@ -2,7 +2,7 @@ "meta": { "repo": "https://github.com/HKUDS/CLI-Anything", "description": "CLI-Hub โ€” Agent-native stateful CLI interfaces for softwares, codebases, and Web Services", - "updated": "2026-03-17" + "updated": "2026-03-18" }, "clis": [ { @@ -113,6 +113,18 @@ "skill_md": "libreoffice/agent-harness/cli_anything/libreoffice/skills/SKILL.md", "category": "office" }, + { + "name": "mubu", + "display_name": "Mubu", + "version": "0.1.0", + "description": "Knowledge management and outlining via local Mubu desktop data", + "requires": "Mubu desktop app", + "homepage": "https://mubu.com", + "install_cmd": "pip install git+https://github.com/HKUDS/CLI-Anything.git#subdirectory=mubu/agent-harness", + "entry_point": "cli-anything-mubu", + "skill_md": "mubu/agent-harness/cli_anything/mubu/skills/SKILL.md", + "category": "office" + }, { "name": "mermaid", "display_name": "Mermaid", From c74bf7fdcea15965220c692d299b920a968a48e6 Mon Sep 17 00:00:00 2001 From: cnfjlhj <166828808+cnfjlhj@users.noreply.github.com> Date: Wed, 18 Mar 2026 13:28:48 +0800 Subject: [PATCH 2/4] fix(mubu): generalize daily folder resolution Require an explicit daily folder reference or MUBU_DAILY_FOLDER for daily helpers, scrub personal examples from docs and generated skill content, and harden live E2E checks for environment-specific SSL failures. --- mubu/agent-harness/README.md | 2 + .../agent-harness/cli_anything/mubu/README.md | 6 + .../cli_anything/mubu/mubu_cli.py | 44 +++--- .../cli_anything/mubu/skills/SKILL.md | 14 +- .../cli_anything/mubu/tests/TEST.md | 127 ++++++++++++------ .../mubu/tests/test_agent_harness.py | 4 + .../mubu/tests/test_cli_entrypoint.py | 63 ++++++++- .../cli_anything/mubu/tests/test_full_e2e.py | 121 ++++++++++++++--- mubu/agent-harness/mubu_probe.py | 49 +++++-- mubu/agent-harness/skill_generator.py | 23 ++-- .../agent-harness/templates/SKILL.md.template | 6 +- 11 files changed, 352 insertions(+), 107 deletions(-) diff --git a/mubu/agent-harness/README.md b/mubu/agent-harness/README.md index e5fb93af2..add42ae38 100644 --- a/mubu/agent-harness/README.md +++ b/mubu/agent-harness/README.md @@ -23,6 +23,7 @@ What this gives you: - the canonical implementation now lives inside this directory - the same `cli-anything-mubu` console script is exposed - the main CLI is Click-based with grouped command domains +- no-argument daily helpers only work when `MUBU_DAILY_FOLDER` is configured - `skill_generator.py` can regenerate the packaged `skills/SKILL.md` Canonical implementation now lives under: @@ -47,4 +48,5 @@ Current state: - canonical package source is now under `agent-harness/cli_anything/mubu/...` - root-level wrappers preserve backward compatibility during development - grouped `discover` / `inspect` / `mutate` / `session` commands now exist +- daily-note helpers require an explicit folder reference unless `MUBU_DAILY_FOLDER` is set - the packaged `SKILL.md` is now generated from the canonical harness diff --git a/mubu/agent-harness/cli_anything/mubu/README.md b/mubu/agent-harness/cli_anything/mubu/README.md index 9357c2de8..cda0c89e0 100644 --- a/mubu/agent-harness/cli_anything/mubu/README.md +++ b/mubu/agent-harness/cli_anything/mubu/README.md @@ -9,6 +9,12 @@ This package lives in the CLI-Anything-aligned harness tree and exposes: - default REPL when no subcommand is supplied - REPL banner with app version, packaged skill path, and history path - persisted `current-doc` and `current-node` REPL context +- grouped `discover` / `inspect` / `mutate` / `session` commands + +Daily helpers are now explicit by default: + +- pass a daily-folder reference to `discover daily-current`, `inspect daily-nodes`, or `session use-daily` +- or set `MUBU_DAILY_FOLDER` if you want those helpers to work without an argument Canonical source paths: diff --git a/mubu/agent-harness/cli_anything/mubu/mubu_cli.py b/mubu/agent-harness/cli_anything/mubu/mubu_cli.py index 63a4700a7..83931178f 100644 --- a/mubu/agent-harness/cli_anything/mubu/mubu_cli.py +++ b/mubu/agent-harness/cli_anything/mubu/mubu_cli.py @@ -51,7 +51,7 @@ Builtins: exit, quit Leave the REPL use-doc Set the current document reference for this REPL session use-node Set the current node reference for this REPL session - use-daily Resolve and set the current daily document + use-daily [ref] Resolve and set the current daily document current-doc Show the current document reference current-node Show the current node reference clear-doc Clear the current document reference @@ -62,13 +62,15 @@ Builtins: Examples: recent --limit 5 --json - discover daily-current - discover daily-current --json - inspect daily-nodes --query ๆ—ฅๅฟ—ๆต --json - session use-doc 'Workspace/Daily tasks/26.03.16' - mutate create-child @doc --parent-node-id node-demo1 --text 'scratch child' --json + discover daily-current '' + discover daily-current --json '' + inspect daily-nodes '' --query '' --json + session use-doc '' + mutate create-child @doc --parent-node-id --text 'scratch child' --json mutate delete-node @doc --node-id @node --json - update-text 'Workspace/Daily tasks/26.03.16' --node-id node-demo1 --text 'new text' --json + update-text '' --node-id --text 'new text' --json + +If you prefer no-argument daily helpers, set MUBU_DAILY_FOLDER=''. """ @@ -156,14 +158,15 @@ def append_command_history(command_line: str) -> None: save_session_state(session) -def resolve_current_daily_doc_ref(folder_ref: str = "Daily tasks") -> str: +def resolve_current_daily_doc_ref(folder_ref: str | None = None) -> str: + resolved_folder_ref = mubu_probe.resolve_daily_folder_ref(folder_ref) metas = mubu_probe.load_document_metas(mubu_probe.DEFAULT_STORAGE_ROOT) folders = mubu_probe.load_folders(mubu_probe.DEFAULT_STORAGE_ROOT) - docs, folder, ambiguous = mubu_probe.folder_documents(metas, folders, folder_ref) + docs, folder, ambiguous = mubu_probe.folder_documents(metas, folders, resolved_folder_ref) if folder is None: if ambiguous: - raise RuntimeError(mubu_probe.ambiguous_error_message("folder", folder_ref, ambiguous, "path")) - raise RuntimeError(f"folder not found: {folder_ref}") + raise RuntimeError(mubu_probe.ambiguous_error_message("folder", resolved_folder_ref, ambiguous, "path")) + raise RuntimeError(f"folder not found: {resolved_folder_ref}") selected, _ = mubu_probe.choose_current_daily_document(docs) if selected is None or not selected.get("doc_path"): raise RuntimeError(f"no current daily document found in {folder['path']}") @@ -334,15 +337,16 @@ def handle_repl_builtin(argv: list[str], session: dict[str, object]) -> tuple[bo click.echo(f"Current node: {node_ref}") return True, 0 if command == "use-daily": - folder_ref = " ".join(argv[1:]) if len(argv) > 1 else "Daily tasks" + folder_ref = " ".join(argv[1:]).strip() if len(argv) > 1 else None try: - doc_ref = resolve_current_daily_doc_ref(folder_ref) + resolved_folder_ref = mubu_probe.resolve_daily_folder_ref(folder_ref) + doc_ref = resolve_current_daily_doc_ref(resolved_folder_ref) except RuntimeError as exc: click.echo(str(exc), err=True) return True, 0 session["current_doc"] = doc_ref save_session_state(session) - append_command_history(f"use-daily {folder_ref}".strip()) + append_command_history(f"use-daily {resolved_folder_ref}") click.echo(f"Current doc: {doc_ref}") return True, 0 @@ -408,7 +412,7 @@ def cli(ctx: click.Context, json_output: bool) -> int: @cli.group(context_settings=CONTEXT_SETTINGS) def discover() -> None: - """Discovery commands for folders, documents, recency, and Daily tasks resolution.""" + """Discovery commands for folders, documents, recency, and daily-document resolution.""" @discover.command("docs", context_settings=CONTEXT_SETTINGS, add_help_option=False) @@ -618,12 +622,16 @@ def use_node(node_ref: tuple[str, ...]) -> int: @click.argument("folder_ref", nargs=-1) def use_daily(folder_ref: tuple[str, ...]) -> int: """Resolve and persist the current daily document reference.""" - value = " ".join(folder_ref) if folder_ref else "Daily tasks" - doc_ref = resolve_current_daily_doc_ref(value) + raw_value = " ".join(folder_ref).strip() if folder_ref else None + try: + resolved_folder_ref = mubu_probe.resolve_daily_folder_ref(raw_value) + doc_ref = resolve_current_daily_doc_ref(resolved_folder_ref) + except RuntimeError as exc: + raise click.ClickException(str(exc)) from exc session_state = load_session_state() session_state["current_doc"] = doc_ref save_session_state(session_state) - append_command_history(f"session use-daily {value}".strip()) + append_command_history(f"session use-daily {resolved_folder_ref}") click.echo(f"Current doc: {doc_ref}") return 0 diff --git a/mubu/agent-harness/cli_anything/mubu/skills/SKILL.md b/mubu/agent-harness/cli_anything/mubu/skills/SKILL.md index eb33a143c..60d654cef 100644 --- a/mubu/agent-harness/cli_anything/mubu/skills/SKILL.md +++ b/mubu/agent-harness/cli_anything/mubu/skills/SKILL.md @@ -21,6 +21,7 @@ pip install -e . - Python 3.10+ - An active Mubu desktop session on this machine - Local Mubu profile data available to the CLI +- Set `MUBU_DAILY_FOLDER` if you want no-argument daily helpers ## Entry Points @@ -36,7 +37,7 @@ When invoked without a subcommand, the CLI enters an interactive REPL session. ### Discover -Discovery commands for folders, documents, recency, and Daily tasks resolution. +Discovery commands for folders, documents, recency, and daily-document resolution. | Command | Description | |---------|-------------| @@ -123,9 +124,9 @@ Session and state commands for current document/node context and local command h ## Recommended Agent Workflow ```text -discover daily-current --json +discover daily-current '' --json -> -inspect daily-nodes --query '' --json +inspect daily-nodes '' --query '' --json -> session use-doc '' -> @@ -143,6 +144,7 @@ mutate update-text / create-child / delete-node --json 5. Prefer `--node-id` and `--parent-node-id` over text matching. 6. `delete-node` removes the full targeted subtree. 7. Even same-text updates can still advance document version history. +8. Pass a daily-folder reference explicitly or set `MUBU_DAILY_FOLDER` before using no-arg daily helpers. ## Examples @@ -161,10 +163,10 @@ cli-anything-mubu ### Discover Current Daily Note -Resolve the current daily note and emit JSON output for an agent. +Resolve the current daily note from an explicit folder reference. ```bash -cli-anything-mubu --json discover daily-current +cli-anything-mubu --json discover daily-current '' ``` @@ -173,7 +175,7 @@ cli-anything-mubu --json discover daily-current Inspect the exact outgoing payload before a live mutation. ```bash -cli-anything-mubu mutate update-text 'Workspace/Daily tasks/26.03.16' --node-id node-demo1 --text 'new text' --json +cli-anything-mubu mutate update-text '' --node-id --text 'new text' --json ``` diff --git a/mubu/agent-harness/cli_anything/mubu/tests/TEST.md b/mubu/agent-harness/cli_anything/mubu/tests/TEST.md index 6242b7014..e2bb0354f 100644 --- a/mubu/agent-harness/cli_anything/mubu/tests/TEST.md +++ b/mubu/agent-harness/cli_anything/mubu/tests/TEST.md @@ -5,17 +5,20 @@ This file follows the CLI-Anything habit of keeping the test plan and the execut ## Test Inventory Plan - `test_mubu_probe.py`: 26 unit / light integration tests planned +- `test_core.py`: 35 pure-logic contract tests planned - `test_cli_entrypoint.py`: 13 subprocess / entrypoint tests planned -- `test_agent_harness.py`: 9 packaging / harness-layout tests planned -- `test_live_api.py`: 6 opt-in live-session tests planned for a later phase +- `test_full_e2e.py`: 11 local-data end-to-end tests planned +- `test_agent_harness.py`: 11 packaging / harness-layout tests planned Current status: - `test_mubu_probe.py` exists and passes +- `test_core.py` exists and passes - `test_cli_entrypoint.py` exists and passes +- `test_full_e2e.py` exists and passes when local Mubu data is available - `test_agent_harness.py` exists and passes - canonical harness test modules now also exist under `agent-harness/cli_anything/mubu/tests/` -- `test_live_api.py` is not implemented yet because live mutation tests need explicit opt-in controls +- no separate `test_live_api.py` exists yet; local-data live coverage currently lives in `test_full_e2e.py` with skip guards and dry-run-first mutation checks ## Unit Test Plan @@ -103,6 +106,41 @@ Expected subprocess count: - 13 tests +### Module: `test_core.py` + +Behaviors covered now: + +- pure helper and transformation contracts +- plain-text and rich-text HTML conversion +- node id generation +- node iteration and path conversion +- folder index construction +- daily-title classification +- normalization helpers and revision parsing +- timestamp parsing and formatting +- default local-path discovery +- ambiguity message formatting +- document metadata enrichment and record deduplication + +Expected pure-logic count: + +- 35 tests + +### Module: `test_full_e2e.py` + +Behaviors covered now: + +- live local-data discovery commands +- current-daily resolution with `MUBU_DAILY_FOLDER` +- live node listing from the current daily note +- `session use-daily` persisted state +- REPL `use-daily` plus follow-on inspection +- dry-run `update-text`, `create-child`, and `delete-node` + +Expected local-data E2E count: + +- 11 tests + ### Module: `test_agent_harness.py` Behaviors covered now: @@ -119,7 +157,7 @@ Behaviors covered now: Expected packaging count: -- 9 tests +- 11 tests ## E2E Test Plan @@ -128,7 +166,7 @@ These workflows are currently verified manually against the real local Mubu sess Planned live scenarios: 1. read recent documents from the local desktop profile -2. resolve `Workspace/Daily tasks` and identify the current daily note +2. resolve `` and identify the current daily note 3. enumerate live nodes inside the current daily note 4. dry-run a text update and inspect the exact outgoing payload 5. execute one same-text live update to validate auth/member/version wiring @@ -157,10 +195,10 @@ What should be verified in later automated live tests: ### Workflow 1: Daily Note Discovery -- Simulates: Codex entering the same daily workspace the user is using +- Simulates: an agent entering a configured daily-note workspace - Operations chained: - `recent` - - `path-docs 'Workspace/Daily tasks'` + - `path-docs ''` - Verified: - folder path resolution - correct daily-note document ids @@ -170,8 +208,8 @@ What should be verified in later automated live tests: - Simulates: Codex locating the exact node to edit before sending any write - Operations chained: - - `open-path 'Workspace/Daily tasks/26.03.16'` - - `doc-nodes 'Workspace/Daily tasks/26.03.16' --query 'ๆ—ฅๅฟ—ๆต'` + - `open-path ''` + - `doc-nodes '' --query ''` - Verified: - live document lookup - correct node id @@ -181,7 +219,8 @@ What should be verified in later automated live tests: - Simulates: Codex jumping directly to the user's current daily note - Operations chained: - - `daily-current --json` + - `daily-current '' --json` + - `daily-current --json` with `MUBU_DAILY_FOLDER=''` - Verified: - date-like title filtering - template exclusion @@ -191,7 +230,8 @@ What should be verified in later automated live tests: - Simulates: Codex looking for an anchor inside today's daily note without manually resolving the path first - Operations chained: - - `daily-nodes --query '...'` + - `daily-nodes '' --query ''` + - `daily-nodes --query ''` with `MUBU_DAILY_FOLDER=''` - Verified: - current daily-note resolution - live document fetch @@ -244,17 +284,13 @@ What should be verified in later automated live tests: Command: ```bash -python3 -m unittest tests/test_mubu_probe.py tests/test_cli_entrypoint.py tests/test_agent_harness.py +CLI_ANYTHING_FORCE_INSTALLED=1 python3 -m pytest cli_anything/mubu/tests -q ``` Latest result: ```text -................................................ ----------------------------------------------------------------------- -Ran 48 tests in 16.880s - -OK +96 passed ``` ### Syntax Verification @@ -284,6 +320,7 @@ Commands: .venv/bin/python -m pip install -e ./agent-harness .venv/bin/python -m pip install -e . .venv/bin/cli-anything-mubu --help +.venv/bin/cli-anything-mubu --json discover daily-current '' .venv/bin/cli-anything-mubu --json discover daily-current .venv/bin/cli-anything-mubu session status --json tmpdir=$(mktemp -d) @@ -294,7 +331,8 @@ Latest result: - both editable-install paths succeeded when run sequentially - installed `--help` exposes grouped `discover` / `inspect` / `mutate` / `session` domains -- installed `discover daily-current` resolved the real daily note `Workspace/Daily tasks/26.03.16` +- installed `discover daily-current ''` resolved the current daily note +- installed `discover daily-current` also works when `MUBU_DAILY_FOLDER` is configured - installed `session status --json` returned persisted state successfully - installed no-arg REPL started cleanly, displayed the packaged canonical skill path, and exited cleanly @@ -335,13 +373,15 @@ Latest result: Commands: ```bash -.venv/bin/cli-anything-mubu daily-current --json +.venv/bin/cli-anything-mubu discover daily-current '' --json +.venv/bin/cli-anything-mubu discover daily-current --json printf 'exit\n' | env CLI_ANYTHING_MUBU_STATE_DIR="$(mktemp -d)" .venv/bin/cli-anything-mubu ``` Latest result: -- installed `daily-current --json` passed against the real local Mubu session +- installed `discover daily-current '' --json` passed against the real local Mubu session +- installed no-arg `discover daily-current --json` passed when `MUBU_DAILY_FOLDER` was configured - installed REPL banner pointed to `agent-harness/cli_anything/mubu/skills/SKILL.md` ### Wheel Packaging Verification @@ -387,7 +427,8 @@ python3 -m venv .venv .venv/bin/cli-anything-mubu --help .venv/bin/cli-anything-mubu repl --help tmpdir=$(mktemp -d) && env CLI_ANYTHING_MUBU_STATE_DIR="$tmpdir" /usr/bin/zsh -lc "printf 'exit\n' | .venv/bin/cli-anything-mubu" -.venv/bin/cli-anything-mubu daily-current --json +.venv/bin/cli-anything-mubu discover daily-current '' --json +.venv/bin/cli-anything-mubu discover daily-current --json .venv/bin/python -m pip install -e ./agent-harness python3 agent-harness/setup.py --name python3 agent-harness/setup.py --version @@ -404,7 +445,8 @@ Latest result: - REPL can store and report the current node reference during a session - REPL can persist `current-node` across independent processes when given the same state directory - REPL can expand both `@doc` and `@node` into a real dry-run command -- installed console script can resolve the current daily note +- installed console script can resolve the current daily note from an explicit folder reference +- installed console script also supports no-arg daily resolution when `MUBU_DAILY_FOLDER` is set - `agent-harness/` now works as a real editable-install root - harness setup metadata reports the correct package identity @@ -413,14 +455,16 @@ Latest result: Commands executed on the real machine: ```bash -python3 mubu_probe.py path-docs 'Workspace/Daily tasks' --limit 5 --json -python3 mubu_probe.py daily-current --json -python3 mubu_probe.py daily-nodes --query 'ๆ—ฅๅฟ—ๆต' --json -python3 mubu_probe.py doc-nodes 'Workspace/Daily tasks/26.03.16' --query 'ๆ—ฅๅฟ—ๆต' --json -python3 mubu_probe.py create-child 'Workspace/Daily tasks/26.03.16' --parent-node-id node-demo1 --text 'CLI bridge dry run child' --note 'not executed' --json -python3 mubu_probe.py delete-node 'Workspace/Daily tasks/26.03.16' --node-id node-demo1 --json -python3 mubu_probe.py update-text 'Workspace/Daily tasks/26.03.16' --node-id node-demo1 --text 'ๆ—ฅๅฟ—ๆต' --json -python3 mubu_probe.py update-text 'Workspace/Daily tasks/26.03.16' --match-text 'ๆ—ฅๅฟ—ๆต' --text 'ๆ—ฅๅฟ—ๆต' --execute --json +python3 mubu_probe.py path-docs '' --limit 5 --json +python3 mubu_probe.py daily-current '' --json +MUBU_DAILY_FOLDER='' python3 mubu_probe.py daily-current --json +python3 mubu_probe.py daily-nodes '' --query '' --json +MUBU_DAILY_FOLDER='' python3 mubu_probe.py daily-nodes --query '' --json +python3 mubu_probe.py doc-nodes '' --query '' --json +python3 mubu_probe.py create-child '' --parent-node-id --text 'CLI bridge dry run child' --note 'not executed' --json +python3 mubu_probe.py delete-node '' --node-id --json +python3 mubu_probe.py update-text '' --node-id --text '' --json +python3 mubu_probe.py update-text '' --match-text '' --text '' --execute --json python3 - <<'PY' # create-child --execute scratch node, then delete-node --execute that exact node id PY @@ -428,23 +472,22 @@ PY Observed results: -- `path-docs` resolved folder id `folder-daily-01` -- current daily doc resolved to `doc-demo-01` -- `daily-current` resolved the same current daily path `Workspace/Daily tasks/26.03.16` in one step -- `daily-nodes` resolved the same current daily note and returned live node `node-demo1` -- `doc-nodes` resolved node id `node-demo1`, path `["nodes", 3, 0]`, and api path `["nodes", 3, "children", 0]` -- `create-child` dry-run resolved parent `node-demo1`, child insert index `4`, and child path `["nodes", 3, "children", 0, "children", 4]` -- `delete-node` dry-run resolved parent `qv9klzkq2L`, delete index `0`, and api path `["nodes", 3, "children", 0]` +- `path-docs` resolved the configured daily folder successfully +- `daily-current` resolved the same current daily note with both the explicit folder argument and `MUBU_DAILY_FOLDER` +- `daily-nodes` resolved the same current daily note and returned the targeted live node +- `doc-nodes` returned a stable node id plus both simplified and API paths for the target node +- `create-child` dry-run resolved the parent node, child insert index, and canonical child path +- `delete-node` dry-run resolved the parent id, delete index, and canonical API path - dry-run update produced the expected `CHANGE` payload - real execute returned success -- live document version advanced from `256` to `257` -- post-fetch verification confirmed the node text still read `ๆ—ฅๅฟ—ๆต` -- reversible scratch create/delete advanced live version from `261` to `262` to `263` -- scratch node `hUVCZEUf3R` was present after create and absent after delete +- live document version advanced after execution +- post-fetch verification confirmed the target node text matched the requested value +- reversible scratch create/delete advanced live version on each execute call +- the scratch node was present after create and absent after delete ## Summary Statistics -- automated tests: 40 / 40 pass +- automated tests: 96 / 96 pass - syntax check: pass - help/CLI surface checks: pass - isolated install / entrypoint checks: pass diff --git a/mubu/agent-harness/cli_anything/mubu/tests/test_agent_harness.py b/mubu/agent-harness/cli_anything/mubu/tests/test_agent_harness.py index 9e1e9945c..8f1cd871d 100644 --- a/mubu/agent-harness/cli_anything/mubu/tests/test_agent_harness.py +++ b/mubu/agent-harness/cli_anything/mubu/tests/test_agent_harness.py @@ -126,6 +126,10 @@ class AgentHarnessPackagingTests(unittest.TestCase): self.assertIn("### Session", content) self.assertIn("| `status` |", content) self.assertIn("| `state-path` |", content) + self.assertIn("MUBU_DAILY_FOLDER", content) + self.assertNotIn("Workspace/Daily tasks", content) + self.assertNotIn("Daily tasks resolution", content) + self.assertIn("## Version\n\n0.1.0", content) finally: output_path.unlink(missing_ok=True) diff --git a/mubu/agent-harness/cli_anything/mubu/tests/test_cli_entrypoint.py b/mubu/agent-harness/cli_anything/mubu/tests/test_cli_entrypoint.py index 9bf3a7ea4..ed592356d 100644 --- a/mubu/agent-harness/cli_anything/mubu/tests/test_cli_entrypoint.py +++ b/mubu/agent-harness/cli_anything/mubu/tests/test_cli_entrypoint.py @@ -7,15 +7,61 @@ import unittest from pathlib import Path from cli_anything.mubu.mubu_cli import expand_repl_aliases_with_state -from mubu_probe import DEFAULT_BACKUP_ROOT, DEFAULT_STORAGE_ROOT +from mubu_probe import ( + DEFAULT_BACKUP_ROOT, + DEFAULT_STORAGE_ROOT, + build_folder_indexes, + choose_current_daily_document, + load_document_metas, + load_folders, +) REPO_ROOT = Path(__file__).resolve().parents[4] -SAMPLE_DOC_REF = "workspace/Daily tasks/2026.03.18" -SAMPLE_NODE_ID = "node-demo1" +SAMPLE_DOC_REF = "workspace/reference docs/sample-doc" +SAMPLE_NODE_ID = "node-sample-1" HAS_LOCAL_DATA = DEFAULT_BACKUP_ROOT.is_dir() and DEFAULT_STORAGE_ROOT.is_dir() +def detect_daily_folder_ref() -> str | None: + if not HAS_LOCAL_DATA: + return None + + metas = load_document_metas(DEFAULT_STORAGE_ROOT) + folders = load_folders(DEFAULT_STORAGE_ROOT) + _, folder_paths = build_folder_indexes(folders) + docs_by_folder: dict[str, list[dict[str, object]]] = {} + for meta in metas: + folder_id = meta.get("folder_id") + if isinstance(folder_id, str): + docs_by_folder.setdefault(folder_id, []).append(meta) + + best_path: str | None = None + best_score = -1 + for folder in folders: + folder_id = folder.get("folder_id") + if not isinstance(folder_id, str): + continue + _, candidates = choose_current_daily_document(docs_by_folder.get(folder_id, [])) + if not candidates: + continue + folder_path = folder_paths.get(folder_id, "") + if not folder_path: + continue + score = max( + max(item.get("updated_at") or 0, item.get("created_at") or 0) + for item in candidates + ) + if score > best_score: + best_score = score + best_path = folder_path + return best_path + + +DETECTED_DAILY_FOLDER_REF = detect_daily_folder_ref() +HAS_DAILY_FOLDER = HAS_LOCAL_DATA and DETECTED_DAILY_FOLDER_REF is not None + + def resolve_cli() -> list[str]: installed = shutil.which("cli-anything-mubu") if installed: @@ -191,9 +237,16 @@ class CliEntrypointTests(unittest.TestCase): self.assertEqual(final.returncode, 0, msg=final.stderr) self.assertIn("Current node: ", final.stdout) - @unittest.skipUnless(HAS_LOCAL_DATA, "Mubu local data directories not found") + @unittest.skipUnless(HAS_DAILY_FOLDER, "Mubu local data or daily folder not found") def test_grouped_discover_daily_current_supports_global_json_flag(self): - result = self.run_cli(["--json", "discover", "daily-current"]) + missing = self.run_cli(["--json", "discover", "daily-current"]) + self.assertNotEqual(missing.returncode, 0) + self.assertIn("MUBU_DAILY_FOLDER", missing.stderr) + + result = self.run_cli( + ["--json", "discover", "daily-current"], + extra_env={"MUBU_DAILY_FOLDER": DETECTED_DAILY_FOLDER_REF}, + ) self.assertEqual(result.returncode, 0, msg=result.stderr) self.assertIn('"doc_path"', result.stdout) diff --git a/mubu/agent-harness/cli_anything/mubu/tests/test_full_e2e.py b/mubu/agent-harness/cli_anything/mubu/tests/test_full_e2e.py index fd842dc68..13ce83391 100644 --- a/mubu/agent-harness/cli_anything/mubu/tests/test_full_e2e.py +++ b/mubu/agent-harness/cli_anything/mubu/tests/test_full_e2e.py @@ -22,7 +22,15 @@ REPO_ROOT = Path(__file__).resolve().parents[4] # Import mubu_probe defaults for path detection sys.path.insert(0, str(REPO_ROOT / "agent-harness")) try: - from mubu_probe import DEFAULT_BACKUP_ROOT, DEFAULT_LOG_ROOT, DEFAULT_STORAGE_ROOT + from mubu_probe import ( + DEFAULT_BACKUP_ROOT, + DEFAULT_LOG_ROOT, + DEFAULT_STORAGE_ROOT, + build_folder_indexes, + choose_current_daily_document, + load_document_metas, + load_folders, + ) finally: sys.path.pop(0) @@ -31,7 +39,62 @@ HAS_LOCAL_DATA = ( and DEFAULT_STORAGE_ROOT.is_dir() ) -SKIP_REASON = "Mubu local data directories not found" + +def detect_daily_folder_ref() -> str | None: + if not HAS_LOCAL_DATA: + return None + + metas = load_document_metas(DEFAULT_STORAGE_ROOT) + folders = load_folders(DEFAULT_STORAGE_ROOT) + _, folder_paths = build_folder_indexes(folders) + docs_by_folder: dict[str, list[dict[str, object]]] = {} + for meta in metas: + folder_id = meta.get("folder_id") + if isinstance(folder_id, str): + docs_by_folder.setdefault(folder_id, []).append(meta) + + best_path: str | None = None + best_score = -1 + for folder in folders: + folder_id = folder.get("folder_id") + if not isinstance(folder_id, str): + continue + _, candidates = choose_current_daily_document(docs_by_folder.get(folder_id, [])) + if not candidates: + continue + folder_path = folder_paths.get(folder_id, "") + if not folder_path: + continue + score = max( + max(item.get("updated_at") or 0, item.get("created_at") or 0) + for item in candidates + ) + if score > best_score: + best_score = score + best_path = folder_path + return best_path + + +DETECTED_DAILY_FOLDER_REF = detect_daily_folder_ref() +HAS_DAILY_FOLDER = HAS_LOCAL_DATA and DETECTED_DAILY_FOLDER_REF is not None + +SKIP_REASON = "Mubu local data or a daily-style folder was not found" +LIVE_API_SKIP_MARKERS = ( + "CERTIFICATE_VERIFY_FAILED", + "SSLCertVerificationError", + "Hostname mismatch", + "request failed for https://api2.mubu.com", + "urlopen error", +) + + +def assert_cli_success_or_skip(testcase: unittest.TestCase, result: subprocess.CompletedProcess) -> None: + if result.returncode == 0: + return + details = "\n".join(part for part in (result.stdout, result.stderr) if part).strip() + if any(marker in details for marker in LIVE_API_SKIP_MARKERS): + testcase.skipTest(f"live Mubu API unavailable in this environment: {details.splitlines()[-1]}") + testcase.fail(details or f"CLI exited with status {result.returncode}") def resolve_cli() -> list[str]: @@ -41,13 +104,15 @@ def resolve_cli() -> list[str]: return [sys.executable, "-m", "cli_anything.mubu"] -@unittest.skipUnless(HAS_LOCAL_DATA, SKIP_REASON) +@unittest.skipUnless(HAS_DAILY_FOLDER, SKIP_REASON) class DiscoverE2ETests(unittest.TestCase): CLI_BASE = resolve_cli() - def run_cli(self, args: list[str]) -> subprocess.CompletedProcess: + def run_cli(self, args: list[str], extra_env: dict | None = None) -> subprocess.CompletedProcess: env = os.environ.copy() env["PYTHONPATH"] = str(REPO_ROOT) + os.pathsep + env.get("PYTHONPATH", "") + if extra_env: + env.update(extra_env) return subprocess.run( self.CLI_BASE + args, capture_output=True, @@ -80,22 +145,27 @@ class DiscoverE2ETests(unittest.TestCase): self.assertGreater(len(data), 0) def test_daily_current_returns_doc_path(self): - result = self.run_cli(["daily-current", "--json"]) + result = self.run_cli( + ["daily-current", "--json"], + extra_env={"MUBU_DAILY_FOLDER": DETECTED_DAILY_FOLDER_REF}, + ) self.assertEqual(result.returncode, 0, msg=result.stderr) data = json.loads(result.stdout) # Response wraps document info in a nested structure doc = data.get("document", data) self.assertIn("doc_path", doc) - self.assertIn("Daily tasks", doc["doc_path"]) + self.assertIn(DETECTED_DAILY_FOLDER_REF, doc["doc_path"]) -@unittest.skipUnless(HAS_LOCAL_DATA, SKIP_REASON) +@unittest.skipUnless(HAS_DAILY_FOLDER, SKIP_REASON) class InspectE2ETests(unittest.TestCase): CLI_BASE = resolve_cli() - def run_cli(self, args: list[str]) -> subprocess.CompletedProcess: + def run_cli(self, args: list[str], extra_env: dict | None = None) -> subprocess.CompletedProcess: env = os.environ.copy() env["PYTHONPATH"] = str(REPO_ROOT) + os.pathsep + env.get("PYTHONPATH", "") + if extra_env: + env.update(extra_env) return subprocess.run( self.CLI_BASE + args, capture_output=True, @@ -111,14 +181,17 @@ class InspectE2ETests(unittest.TestCase): self.assertIsInstance(data, list) def test_daily_nodes_returns_node_list(self): - result = self.run_cli(["daily-nodes", "--json"]) - self.assertEqual(result.returncode, 0, msg=result.stderr) + result = self.run_cli( + ["daily-nodes", "--json"], + extra_env={"MUBU_DAILY_FOLDER": DETECTED_DAILY_FOLDER_REF}, + ) + assert_cli_success_or_skip(self, result) data = json.loads(result.stdout) self.assertIn("nodes", data) self.assertIsInstance(data["nodes"], list) -@unittest.skipUnless(HAS_LOCAL_DATA, SKIP_REASON) +@unittest.skipUnless(HAS_DAILY_FOLDER, SKIP_REASON) class SessionE2ETests(unittest.TestCase): CLI_BASE = resolve_cli() @@ -138,35 +211,43 @@ class SessionE2ETests(unittest.TestCase): def test_session_use_daily_sets_current_doc(self): with tempfile.TemporaryDirectory() as tmpdir: - env = {"CLI_ANYTHING_MUBU_STATE_DIR": tmpdir} + env = { + "CLI_ANYTHING_MUBU_STATE_DIR": tmpdir, + "MUBU_DAILY_FOLDER": DETECTED_DAILY_FOLDER_REF, + } self.run_cli(["session", "use-daily"], extra_env=env) result = self.run_cli(["session", "status", "--json"], extra_env=env) self.assertEqual(result.returncode, 0, msg=result.stderr) data = json.loads(result.stdout) self.assertIsNotNone(data.get("current_doc")) - self.assertIn("Daily tasks", data["current_doc"]) + self.assertIn(DETECTED_DAILY_FOLDER_REF, data["current_doc"]) def test_repl_use_daily_then_daily_nodes(self): with tempfile.TemporaryDirectory() as tmpdir: - env = {"CLI_ANYTHING_MUBU_STATE_DIR": tmpdir} + env = { + "CLI_ANYTHING_MUBU_STATE_DIR": tmpdir, + "MUBU_DAILY_FOLDER": DETECTED_DAILY_FOLDER_REF, + } result = self.run_cli( [], input_text="use-daily\ndaily-nodes --json\nexit\n", extra_env=env, ) - self.assertEqual(result.returncode, 0, msg=result.stderr) + assert_cli_success_or_skip(self, result) self.assertIn('"nodes"', result.stdout) -@unittest.skipUnless(HAS_LOCAL_DATA, SKIP_REASON) +@unittest.skipUnless(HAS_DAILY_FOLDER, SKIP_REASON) class MutateDryRunE2ETests(unittest.TestCase): """Test mutation commands in dry-run mode (no --execute).""" CLI_BASE = resolve_cli() - def run_cli(self, args: list[str]) -> subprocess.CompletedProcess: + def run_cli(self, args: list[str], extra_env: dict | None = None) -> subprocess.CompletedProcess: env = os.environ.copy() env["PYTHONPATH"] = str(REPO_ROOT) + os.pathsep + env.get("PYTHONPATH", "") + if extra_env: + env.update(extra_env) return subprocess.run( self.CLI_BASE + args, capture_output=True, @@ -177,7 +258,11 @@ class MutateDryRunE2ETests(unittest.TestCase): def _resolve_daily_node(self) -> tuple[str, str]: """Helper: get a stable daily document reference and first node id.""" - result = self.run_cli(["daily-nodes", "--json"]) + result = self.run_cli( + ["daily-nodes", "--json"], + extra_env={"MUBU_DAILY_FOLDER": DETECTED_DAILY_FOLDER_REF}, + ) + assert_cli_success_or_skip(self, result) data = json.loads(result.stdout) doc = data.get("document", data) doc_ref = doc.get("doc_id") or doc["doc_path"] diff --git a/mubu/agent-harness/mubu_probe.py b/mubu/agent-harness/mubu_probe.py index c5bb84ee2..19b53576d 100644 --- a/mubu/agent-harness/mubu_probe.py +++ b/mubu/agent-harness/mubu_probe.py @@ -87,6 +87,31 @@ DAILY_TITLE_RE = re.compile(r"^\d{2}\.\d{1,2}\.\d{1,2}(?:-\d{1,2}(?:\.\d{1,2})?) DEFAULT_DAILY_EXCLUDE_KEYWORDS = ("ๆจกๆฟ", "template") +def configured_daily_folder_ref(env: Mapping[str, str] | None = None) -> str | None: + env = env or os.environ + value = env.get("MUBU_DAILY_FOLDER", "") + if not isinstance(value, str): + return None + resolved = value.strip() + return resolved or None + + +def resolve_daily_folder_ref( + folder_ref: str | None, + env: Mapping[str, str] | None = None, +) -> str: + value = (folder_ref or "").strip() + if value: + return value + configured = configured_daily_folder_ref(env=env) + if configured: + return configured + raise RuntimeError( + "daily folder reference required; pass explicitly " + "or set MUBU_DAILY_FOLDER" + ) + + def extract_plain_text(value: Any) -> str: if value is None: return "" @@ -1468,7 +1493,7 @@ def build_parser() -> argparse.ArgumentParser: "daily-current", help="Resolve the current daily document from one Daily-style folder.", ) - daily_current_parser.add_argument("folder_ref", nargs="?", default="Daily tasks") + daily_current_parser.add_argument("folder_ref", nargs="?") daily_current_parser.add_argument("--storage-root", type=Path, default=DEFAULT_STORAGE_ROOT) daily_current_parser.add_argument("--limit", type=int, default=5) daily_current_parser.add_argument( @@ -1482,7 +1507,7 @@ def build_parser() -> argparse.ArgumentParser: "daily-nodes", help="List live nodes from the current daily document in one step.", ) - daily_nodes_parser.add_argument("folder_ref", nargs="?", default="Daily tasks") + daily_nodes_parser.add_argument("folder_ref", nargs="?") daily_nodes_parser.add_argument("--storage-root", type=Path, default=DEFAULT_STORAGE_ROOT) daily_nodes_parser.add_argument("--api-host", default=DEFAULT_API_HOST) daily_nodes_parser.add_argument("--query", default=None, help="Filter nodes by plain-text substring.") @@ -1695,11 +1720,15 @@ def main(argv: list[str] | None = None) -> int: if args.command == "daily-current": metas = load_document_metas(args.storage_root) folders = load_folders(args.storage_root) - docs, folder, ambiguous = folder_documents(metas, folders, args.folder_ref) + try: + folder_ref = resolve_daily_folder_ref(args.folder_ref) + except RuntimeError as exc: + parser.error(str(exc)) + docs, folder, ambiguous = folder_documents(metas, folders, folder_ref) if folder is None: if ambiguous: - parser.error(ambiguous_error_message("folder", args.folder_ref, ambiguous, "path")) - parser.error(f"folder not found: {args.folder_ref}") + parser.error(ambiguous_error_message("folder", folder_ref, ambiguous, "path")) + parser.error(f"folder not found: {folder_ref}") selected, candidates = choose_current_daily_document( docs, @@ -1733,11 +1762,15 @@ def main(argv: list[str] | None = None) -> int: metas = load_document_metas(args.storage_root) folders = load_folders(args.storage_root) - docs, folder, ambiguous = folder_documents(metas, folders, args.folder_ref) + try: + folder_ref = resolve_daily_folder_ref(args.folder_ref) + except RuntimeError as exc: + parser.error(str(exc)) + docs, folder, ambiguous = folder_documents(metas, folders, folder_ref) if folder is None: if ambiguous: - parser.error(ambiguous_error_message("folder", args.folder_ref, ambiguous, "path")) - parser.error(f"folder not found: {args.folder_ref}") + parser.error(ambiguous_error_message("folder", folder_ref, ambiguous, "path")) + parser.error(f"folder not found: {folder_ref}") selected, candidates = choose_current_daily_document( docs, diff --git a/mubu/agent-harness/skill_generator.py b/mubu/agent-harness/skill_generator.py index 49b438862..3e1a62749 100644 --- a/mubu/agent-harness/skill_generator.py +++ b/mubu/agent-harness/skill_generator.py @@ -92,9 +92,14 @@ def extract_system_package(content: str) -> Optional[str]: def extract_version_from_setup(setup_path: Path) -> str: content = setup_path.read_text(encoding="utf-8") - match = re.search(r'version\s*=\s*["\']([^"\']+)["\']', content) - if match: - return match.group(1) + direct_match = re.search(r'version\s*=\s*["\']([^"\']+)["\']', content) + if direct_match: + return direct_match.group(1) + + constant_match = re.search(r'PACKAGE_VERSION\s*=\s*["\']([^"\']+)["\']', content) + if constant_match: + return constant_match.group(1) + return "1.0.0" @@ -185,8 +190,8 @@ def generate_examples(software_name: str, command_groups: list[CommandGroup]) -> examples.append( Example( title="Discover Current Daily Note", - description="Resolve the current daily note and emit JSON output for an agent.", - code=f"""cli-anything-{software_name} --json discover daily-current""", + description="Resolve the current daily note from an explicit folder reference.", + code=f"""cli-anything-{software_name} --json discover daily-current ''""", ) ) if "mutate" in group_names: @@ -196,7 +201,7 @@ def generate_examples(software_name: str, command_groups: list[CommandGroup]) -> description="Inspect the exact outgoing payload before a live mutation.", code=( f"cli-anything-{software_name} mutate update-text " - "'Workspace/Daily tasks/26.03.16' --node-id node-demo1 --text 'new text' --json" + "'' --node-id --text 'new text' --json" ), ) ) @@ -269,6 +274,7 @@ def generate_skill_md_simple(metadata: SkillMetadata) -> str: "- Python 3.10+", "- An active Mubu desktop session on this machine", "- Local Mubu profile data available to the CLI", + "- Set `MUBU_DAILY_FOLDER` if you want no-argument daily helpers", "", "## Entry Points", "", @@ -296,9 +302,9 @@ def generate_skill_md_simple(metadata: SkillMetadata) -> str: "## Recommended Agent Workflow", "", "```text", - "discover daily-current --json", + "discover daily-current '' --json", " ->", - "inspect daily-nodes --query '' --json", + "inspect daily-nodes '' --query '' --json", " ->", "session use-doc ''", " ->", @@ -316,6 +322,7 @@ def generate_skill_md_simple(metadata: SkillMetadata) -> str: "5. Prefer `--node-id` and `--parent-node-id` over text matching.", "6. `delete-node` removes the full targeted subtree.", "7. Even same-text updates can still advance document version history.", + "8. Pass a daily-folder reference explicitly or set `MUBU_DAILY_FOLDER` before using no-arg daily helpers.", "", "## Examples", "", diff --git a/mubu/agent-harness/templates/SKILL.md.template b/mubu/agent-harness/templates/SKILL.md.template index 12e7766ac..b0dab914f 100644 --- a/mubu/agent-harness/templates/SKILL.md.template +++ b/mubu/agent-harness/templates/SKILL.md.template @@ -21,6 +21,7 @@ pip install -e . - Python 3.10+ - An active Mubu desktop session on this machine - Local Mubu profile data available to the CLI +- Set `MUBU_DAILY_FOLDER` if you want no-argument daily helpers ## Entry Points @@ -48,9 +49,9 @@ When invoked without a subcommand, the CLI enters an interactive REPL session. ## Recommended Agent Workflow ```text -discover daily-current --json +discover daily-current '' --json -> -inspect daily-nodes --query '' --json +inspect daily-nodes '' --query '' --json -> session use-doc '' -> @@ -68,6 +69,7 @@ mutate update-text / create-child / delete-node --json 5. Prefer `--node-id` and `--parent-node-id` over text matching. 6. `delete-node` removes the full targeted subtree. 7. Even same-text updates can still advance document version history. +8. Pass a daily-folder reference explicitly or set `MUBU_DAILY_FOLDER` before using no-arg daily helpers. ## Examples From f15052487e8a45a2f6f63c1d10b194398bf79c89 Mon Sep 17 00:00:00 2001 From: cnfjlhj <166828808+cnfjlhj@users.noreply.github.com> Date: Wed, 18 Mar 2026 13:40:41 +0800 Subject: [PATCH 3/4] chore(mubu): bump harness version to 0.1.1 Update the package metadata, generated skill output, test expectations, and registry entry so the contribution PR reflects the generalized daily-folder behavior in the published harness version. --- mubu/agent-harness/cli_anything/mubu/__init__.py | 2 +- mubu/agent-harness/cli_anything/mubu/skills/SKILL.md | 2 +- mubu/agent-harness/cli_anything/mubu/tests/TEST.md | 2 +- .../cli_anything/mubu/tests/test_agent_harness.py | 4 ++-- mubu/agent-harness/setup.py | 2 +- registry.json | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/mubu/agent-harness/cli_anything/mubu/__init__.py b/mubu/agent-harness/cli_anything/mubu/__init__.py index a05eb9abb..fd9a4ec04 100644 --- a/mubu/agent-harness/cli_anything/mubu/__init__.py +++ b/mubu/agent-harness/cli_anything/mubu/__init__.py @@ -1,3 +1,3 @@ __all__ = ["__version__"] -__version__ = "0.1.0" +__version__ = "0.1.1" diff --git a/mubu/agent-harness/cli_anything/mubu/skills/SKILL.md b/mubu/agent-harness/cli_anything/mubu/skills/SKILL.md index 60d654cef..fe9bf0891 100644 --- a/mubu/agent-harness/cli_anything/mubu/skills/SKILL.md +++ b/mubu/agent-harness/cli_anything/mubu/skills/SKILL.md @@ -199,4 +199,4 @@ Use the `session` command group to inspect or update this state. ## Version -0.1.0 \ No newline at end of file +0.1.1 \ No newline at end of file diff --git a/mubu/agent-harness/cli_anything/mubu/tests/TEST.md b/mubu/agent-harness/cli_anything/mubu/tests/TEST.md index e2bb0354f..bc2218ed4 100644 --- a/mubu/agent-harness/cli_anything/mubu/tests/TEST.md +++ b/mubu/agent-harness/cli_anything/mubu/tests/TEST.md @@ -343,7 +343,7 @@ Commands: ```bash tmpdir=$(mktemp -d) .venv/bin/python -m pip wheel --no-deps --wheel-dir "$tmpdir" ./agent-harness -unzip -l "$tmpdir"/cli_anything_mubu-0.1.0-py3-none-any.whl +unzip -l "$tmpdir"/cli_anything_mubu-0.1.1-py3-none-any.whl ``` Latest result: diff --git a/mubu/agent-harness/cli_anything/mubu/tests/test_agent_harness.py b/mubu/agent-harness/cli_anything/mubu/tests/test_agent_harness.py index 8f1cd871d..9826d1f07 100644 --- a/mubu/agent-harness/cli_anything/mubu/tests/test_agent_harness.py +++ b/mubu/agent-harness/cli_anything/mubu/tests/test_agent_harness.py @@ -71,7 +71,7 @@ class AgentHarnessPackagingTests(unittest.TestCase): text=True, ) self.assertEqual(result.returncode, 0, msg=result.stderr) - self.assertEqual(result.stdout.strip(), "0.1.0") + self.assertEqual(result.stdout.strip(), "0.1.1") def test_root_setup_targets_canonical_harness_source(self): if STANDALONE_ROOT is None: @@ -129,7 +129,7 @@ class AgentHarnessPackagingTests(unittest.TestCase): self.assertIn("MUBU_DAILY_FOLDER", content) self.assertNotIn("Workspace/Daily tasks", content) self.assertNotIn("Daily tasks resolution", content) - self.assertIn("## Version\n\n0.1.0", content) + self.assertIn("## Version\n\n0.1.1", content) finally: output_path.unlink(missing_ok=True) diff --git a/mubu/agent-harness/setup.py b/mubu/agent-harness/setup.py index 0a3eecf1d..217b97ffd 100644 --- a/mubu/agent-harness/setup.py +++ b/mubu/agent-harness/setup.py @@ -4,7 +4,7 @@ import sys PACKAGE_NAME = "cli-anything-mubu" -PACKAGE_VERSION = "0.1.0" +PACKAGE_VERSION = "0.1.1" def _handle_metadata_query(argv: list[str]) -> bool: diff --git a/registry.json b/registry.json index ed251c83f..c66625db3 100644 --- a/registry.json +++ b/registry.json @@ -116,7 +116,7 @@ { "name": "mubu", "display_name": "Mubu", - "version": "0.1.0", + "version": "0.1.1", "description": "Knowledge management and outlining via local Mubu desktop data", "requires": "Mubu desktop app", "homepage": "https://mubu.com", From 71c1ae6dcf7f096bd35796af63df7ebcde3d0ae4 Mon Sep 17 00:00:00 2001 From: cnfjlhj <166828808+cnfjlhj@users.noreply.github.com> Date: Wed, 18 Mar 2026 17:13:44 +0800 Subject: [PATCH 4/4] fix(mubu): harden document resolution and state branding Deduplicate logical document paths by freshest metadata so open-path resolves real Mubu history duplicates consistently. Overlay backup-driven show/link output with metadata titles and doc paths, and default new public state to ~/.config/mubu-cli while preserving legacy fallback. --- .../cli_anything/mubu/mubu_cli.py | 81 +++++++--- .../mubu/tests/test_cli_entrypoint.py | 47 +++++- .../mubu/tests/test_mubu_probe.py | 138 ++++++++++++++++- mubu/agent-harness/mubu_probe.py | 141 ++++++++++++++---- 4 files changed, 354 insertions(+), 53 deletions(-) diff --git a/mubu/agent-harness/cli_anything/mubu/mubu_cli.py b/mubu/agent-harness/cli_anything/mubu/mubu_cli.py index 83931178f..6bd5dc719 100644 --- a/mubu/agent-harness/cli_anything/mubu/mubu_cli.py +++ b/mubu/agent-harness/cli_anything/mubu/mubu_cli.py @@ -16,6 +16,8 @@ from cli_anything.mubu.utils import ReplSkin CONTEXT_SETTINGS = {"ignore_unknown_options": True, "allow_extra_args": True} COMMAND_HISTORY_LIMIT = 50 +PUBLIC_PROGRAM_NAME = "mubu-cli" +COMPAT_PROGRAM_NAME = "cli-anything-mubu" DISCOVER_COMMANDS = { "docs": "List latest known document snapshots from local backups.", "folders": "List folder metadata from local RxDB storage.", @@ -44,7 +46,7 @@ LEGACY_COMMANDS.update(DISCOVER_COMMANDS) LEGACY_COMMANDS.update(INSPECT_COMMANDS) LEGACY_COMMANDS.update(MUTATE_COMMANDS) -REPL_HELP = """Interactive REPL for cli-anything-mubu +REPL_HELP_TEMPLATE = """Interactive REPL for {program_name} Builtins: help Show this REPL help @@ -72,13 +74,32 @@ Examples: If you prefer no-argument daily helpers, set MUBU_DAILY_FOLDER=''. """ +REPL_COMMAND_HELP = REPL_HELP_TEMPLATE.format(program_name="the Mubu CLI") + + +def normalize_program_name(program_name: str | None) -> str: + candidate = Path(program_name or "").name.strip() + if candidate == PUBLIC_PROGRAM_NAME: + return PUBLIC_PROGRAM_NAME + return COMPAT_PROGRAM_NAME + + +def repl_help_text(program_name: str | None = None) -> str: + return REPL_HELP_TEMPLATE.format(program_name=normalize_program_name(program_name)) def session_state_dir() -> Path: override = os.environ.get("CLI_ANYTHING_MUBU_STATE_DIR", "").strip() if override: return Path(override).expanduser() - return Path.home() / ".config" / "cli-anything-mubu" + config_root = Path.home() / ".config" + public_dir = config_root / PUBLIC_PROGRAM_NAME + legacy_dir = config_root / COMPAT_PROGRAM_NAME + if public_dir.exists(): + return public_dir + if legacy_dir.exists(): + return legacy_dir + return public_dir def session_state_path() -> Path: @@ -253,14 +274,23 @@ def invoke_probe_command(ctx: click.Context | None, command_name: str, probe_arg return int(result or 0) -def print_repl_banner(skin: ReplSkin) -> None: +def print_repl_banner(skin: ReplSkin, program_name: str | None = None) -> None: + normalized_program_name = normalize_program_name(program_name) click.echo("Mubu REPL") - skin.print_banner() + if normalized_program_name == PUBLIC_PROGRAM_NAME: + click.echo(f"Command: {PUBLIC_PROGRAM_NAME}") + click.echo(f"Version: {__version__}") + if skin.skill_path: + click.echo(f"Skill: {skin.skill_path}") + click.echo("Type help for commands, quit to exit") + click.echo() + else: + skin.print_banner() click.echo(f"History: {skin.history_file}") -def print_repl_help() -> None: - click.echo(REPL_HELP.rstrip()) +def print_repl_help(program_name: str | None = None) -> None: + click.echo(repl_help_text(program_name).rstrip()) def parse_history_limit(argv: Sequence[str]) -> int: @@ -272,7 +302,11 @@ def parse_history_limit(argv: Sequence[str]) -> int: raise RuntimeError(f"history limit must be an integer: {argv[1]}") from exc -def handle_repl_builtin(argv: list[str], session: dict[str, object]) -> tuple[bool, int]: +def handle_repl_builtin( + argv: list[str], + session: dict[str, object], + program_name: str | None = None, +) -> tuple[bool, int]: if not argv: return True, 0 @@ -280,7 +314,7 @@ def handle_repl_builtin(argv: list[str], session: dict[str, object]) -> tuple[bo if command in {"exit", "quit"}: return True, 1 if command == "help": - print_repl_help() + print_repl_help(program_name) return True, 0 if command == "current-doc": current_doc = session.get("current_doc") @@ -353,11 +387,11 @@ def handle_repl_builtin(argv: list[str], session: dict[str, object]) -> tuple[bo return False, 0 -def run_repl() -> int: +def run_repl(program_name: str | None = None) -> int: session = load_session_state() skin = ReplSkin("mubu", version=__version__, history_file=str(session_state_dir() / "history.txt")) prompt_session = skin.create_prompt_session() - print_repl_banner(skin) + print_repl_banner(skin, program_name) if session.get("current_doc"): click.echo(f"Current doc: {session['current_doc']}") if session.get("current_node"): @@ -383,7 +417,7 @@ def run_repl() -> int: click.echo(f"parse error: {exc}", err=True) continue - handled, control = handle_repl_builtin(argv, session) + handled, control = handle_repl_builtin(argv, session, program_name) if handled: if control == 1: skin.print_goodbye() @@ -405,8 +439,9 @@ def cli(ctx: click.Context, json_output: bool) -> int: """Agent-native CLI for the Mubu desktop app with REPL and grouped command domains.""" ctx.ensure_object(dict) ctx.obj["json_output"] = json_output + ctx.obj["prog_name"] = normalize_program_name(ctx.info_name) if ctx.invoked_subcommand is None: - return run_repl() + return run_repl(ctx.obj["prog_name"]) return 0 @@ -668,10 +703,15 @@ def history_command(ctx: click.Context, limit: int, json_output: bool) -> int: return 0 -@cli.command("repl", help=REPL_HELP) -def repl_command() -> int: - """Interactive REPL for cli-anything-mubu.""" - return run_repl() +@cli.command("repl", help=REPL_COMMAND_HELP) +@click.pass_context +def repl_command(ctx: click.Context) -> int: + """Interactive REPL for the Mubu CLI.""" + root = ctx.find_root() + program_name = None + if root is not None and root.obj is not None: + program_name = root.obj.get("prog_name") + return run_repl(program_name) def create_legacy_command(command_name: str, help_text: str) -> click.Command: @@ -688,10 +728,11 @@ for _command_name, _help_text in LEGACY_COMMANDS.items(): cli.add_command(create_legacy_command(_command_name, _help_text)) -def dispatch(argv: list[str] | None = None) -> int: +def dispatch(argv: list[str] | None = None, prog_name: str | None = None) -> int: args = list(sys.argv[1:] if argv is None else argv) + normalized_prog_name = normalize_program_name(prog_name or sys.argv[0]) try: - result = cli.main(args=args, prog_name="cli-anything-mubu", standalone_mode=False) + result = cli.main(args=args, prog_name=normalized_prog_name, standalone_mode=False) except click.exceptions.Exit as exc: return int(exc.exit_code) except click.ClickException as exc: @@ -701,7 +742,7 @@ def dispatch(argv: list[str] | None = None) -> int: def entrypoint(argv: list[str] | None = None) -> int: - return dispatch(argv) + return dispatch(argv, prog_name=sys.argv[0]) __all__ = [ @@ -712,10 +753,12 @@ __all__ = [ "default_session_state", "dispatch", "entrypoint", + "normalize_program_name", "expand_repl_aliases", "expand_repl_aliases_with_state", "handle_repl_builtin", "load_session_state", + "repl_help_text", "resolve_current_daily_doc_ref", "run_repl", "save_session_state", diff --git a/mubu/agent-harness/cli_anything/mubu/tests/test_cli_entrypoint.py b/mubu/agent-harness/cli_anything/mubu/tests/test_cli_entrypoint.py index ed592356d..7b161563d 100644 --- a/mubu/agent-harness/cli_anything/mubu/tests/test_cli_entrypoint.py +++ b/mubu/agent-harness/cli_anything/mubu/tests/test_cli_entrypoint.py @@ -1,3 +1,5 @@ +import contextlib +import io import os import shutil import subprocess @@ -5,8 +7,14 @@ import sys import tempfile import unittest from pathlib import Path +from unittest import mock -from cli_anything.mubu.mubu_cli import expand_repl_aliases_with_state +from cli_anything.mubu.mubu_cli import ( + dispatch, + expand_repl_aliases_with_state, + repl_help_text, + session_state_dir, +) from mubu_probe import ( DEFAULT_BACKUP_ROOT, DEFAULT_STORAGE_ROOT, @@ -96,12 +104,49 @@ class CliEntrypointTests(unittest.TestCase): self.assertIn("create-child", result.stdout) self.assertIn("delete-node", result.stdout) + def test_dispatch_uses_public_prog_name_when_requested(self): + stdout = io.StringIO() + with contextlib.redirect_stdout(stdout): + result = dispatch(["--help"], prog_name="mubu-cli") + self.assertEqual(result, 0) + self.assertIn("Usage: mubu-cli", stdout.getvalue()) + + def test_dispatch_uses_compat_prog_name_when_requested(self): + stdout = io.StringIO() + with contextlib.redirect_stdout(stdout): + result = dispatch(["--help"], prog_name="cli-anything-mubu") + self.assertEqual(result, 0) + self.assertIn("Usage: cli-anything-mubu", stdout.getvalue()) + def test_repl_help_renders(self): result = self.run_cli(["repl", "--help"]) self.assertEqual(result.returncode, 0, msg=result.stderr) self.assertIn("Interactive REPL", result.stdout) self.assertIn("use-node", result.stdout) + def test_repl_help_text_supports_public_brand(self): + self.assertIn("mubu-cli", repl_help_text("mubu-cli")) + + def test_session_state_dir_defaults_to_public_brand_path(self): + with tempfile.TemporaryDirectory() as tmpdir: + home = Path(tmpdir) + with ( + mock.patch.dict(os.environ, {}, clear=False), + mock.patch("cli_anything.mubu.mubu_cli.Path.home", return_value=home), + ): + self.assertEqual(session_state_dir(), home / ".config" / "mubu-cli") + + def test_session_state_dir_falls_back_to_legacy_path_when_only_legacy_exists(self): + with tempfile.TemporaryDirectory() as tmpdir: + home = Path(tmpdir) + legacy = home / ".config" / "cli-anything-mubu" + legacy.mkdir(parents=True) + with ( + mock.patch.dict(os.environ, {}, clear=False), + mock.patch("cli_anything.mubu.mubu_cli.Path.home", return_value=home), + ): + self.assertEqual(session_state_dir(), legacy) + def test_default_entrypoint_starts_repl_and_can_exit(self): result = self.run_cli([], input_text="exit\n") self.assertEqual(result.returncode, 0, msg=result.stderr) diff --git a/mubu/agent-harness/cli_anything/mubu/tests/test_mubu_probe.py b/mubu/agent-harness/cli_anything/mubu/tests/test_mubu_probe.py index 097024a23..8a52bddbe 100644 --- a/mubu/agent-harness/cli_anything/mubu/tests/test_mubu_probe.py +++ b/mubu/agent-harness/cli_anything/mubu/tests/test_mubu_probe.py @@ -1,7 +1,10 @@ +import contextlib +import io import json import tempfile import unittest from pathlib import Path +from unittest import mock from mubu_probe import ( build_api_headers, @@ -9,6 +12,7 @@ from mubu_probe import ( build_delete_node_request, build_text_update_request, choose_current_daily_document, + document_links, extract_doc_links, extract_plain_text, folder_documents, @@ -16,6 +20,7 @@ from mubu_probe import ( list_document_nodes, load_latest_backups, looks_like_daily_title, + main, node_path_to_api_path, normalize_document_meta_record, normalize_folder_record, @@ -182,14 +187,15 @@ class PathResolutionTests(unittest.TestCase): ] self.document_metas = [ {"doc_id": "docA", "folder_id": "dailyA", "title": "26.03.16", "updated_at": 20}, + {"doc_id": "docA2", "folder_id": "dailyA", "title": "26.03.16", "updated_at": 25}, {"doc_id": "docB", "folder_id": "dailyA", "title": "26.3.15", "updated_at": 10}, {"doc_id": "docC", "folder_id": "dailyB", "title": "26.03.16", "updated_at": 30}, ] self.backups = [ { - "doc_id": "docA", - "title": "26.03.16", - "backup_file": "/tmp/docA.json", + "doc_id": "docA2", + "title": "today root", + "backup_file": "/tmp/docA2.json", "modified_at": 123.0, "data": {"viewType": "OUTLINE", "nodes": [{"id": "n1", "text": "today", "children": []}]}, } @@ -199,7 +205,7 @@ class PathResolutionTests(unittest.TestCase): docs, folder, ambiguous = folder_documents(self.document_metas, self.folders, "Workspace/Daily tasks") self.assertEqual(ambiguous, []) self.assertEqual(folder["folder_id"], "dailyA") - self.assertEqual([doc["doc_id"] for doc in docs], ["docA", "docB"]) + self.assertEqual([doc["doc_id"] for doc in docs], ["docA2", "docB"]) self.assertEqual(docs[0]["doc_path"], "Workspace/Daily tasks/26.03.16") def test_folder_documents_detects_ambiguous_folder_name(self): @@ -211,13 +217,56 @@ class PathResolutionTests(unittest.TestCase): def test_resolve_document_reference_supports_full_doc_path(self): doc, ambiguous = resolve_document_reference(self.document_metas, self.folders, "Workspace/Daily tasks/26.03.16") self.assertEqual(ambiguous, []) - self.assertEqual(doc["doc_id"], "docA") + self.assertEqual(doc["doc_id"], "docA2") self.assertEqual(doc["doc_path"], "Workspace/Daily tasks/26.03.16") def test_resolve_document_reference_detects_ambiguous_title(self): doc, ambiguous = resolve_document_reference(self.document_metas, self.folders, "26.03.16") self.assertIsNone(doc) self.assertEqual(len(ambiguous), 2) + self.assertEqual({item["doc_id"] for item in ambiguous}, {"docA2", "docC"}) + + def test_resolve_document_reference_collapses_same_path_duplicates_for_title(self): + folders = [ + {"folder_id": "rootA", "name": "Workspace", "parent_id": "0"}, + {"folder_id": "dailyA", "name": "Daily tasks", "parent_id": "rootA"}, + ] + metas = [ + {"doc_id": "old", "folder_id": "dailyA", "title": "26.03.18", "updated_at": 10}, + {"doc_id": "new", "folder_id": "dailyA", "title": "26.03.18", "updated_at": 20}, + ] + + doc, ambiguous = resolve_document_reference(metas, folders, "26.03.18") + + self.assertEqual(ambiguous, []) + self.assertEqual(doc["doc_id"], "new") + + def test_resolve_document_reference_prefers_newer_timestamp_over_higher_revision_across_doc_ids(self): + folders = [ + {"folder_id": "rootA", "name": "Workspace", "parent_id": "0"}, + {"folder_id": "dailyA", "name": "Daily tasks", "parent_id": "rootA"}, + ] + metas = [ + { + "doc_id": "old-high-rev", + "folder_id": "dailyA", + "title": "26.03.19", + "updated_at": 10, + "_rev": "999-older", + }, + { + "doc_id": "new-low-rev", + "folder_id": "dailyA", + "title": "26.03.19", + "updated_at": 20, + "_rev": "1-newer", + }, + ] + + doc, ambiguous = resolve_document_reference(metas, folders, "Workspace/Daily tasks/26.03.19") + + self.assertEqual(ambiguous, []) + self.assertEqual(doc["doc_id"], "new-low-rev") def test_show_document_by_reference_uses_resolved_path(self): payload, ambiguous = show_document_by_reference( @@ -227,13 +276,77 @@ class PathResolutionTests(unittest.TestCase): "Workspace/Daily tasks/26.03.16", ) self.assertEqual(ambiguous, []) - self.assertEqual(payload["doc_id"], "docA") + self.assertEqual(payload["doc_id"], "docA2") self.assertEqual(payload["title"], "26.03.16") self.assertEqual(payload["folder_path"], "Workspace/Daily tasks") self.assertEqual(payload["doc_path"], "Workspace/Daily tasks/26.03.16") self.assertEqual(payload["nodes"][0]["text"], "today") +class DocumentMetadataOverlayTests(unittest.TestCase): + def test_document_links_prefers_metadata_title_for_source_document(self): + links = document_links( + [ + { + "doc_id": "docA", + "title": "root node title", + "data": { + "nodes": [ + { + "id": "n1", + "text": ( + 'Target Doc' + ), + "children": [], + } + ] + }, + } + ], + "docA", + title_lookup={"docA": "26.03.18", "doc-target-1": "Target Doc"}, + ) + + self.assertEqual(len(links), 1) + self.assertEqual(links[0]["source_doc_title"], "26.03.18") + + def test_show_command_prefers_metadata_title_and_path_when_available(self): + backups = [ + { + "doc_id": "docA", + "title": "root node title", + "backup_file": "/tmp/docA.json", + "modified_at": 123.0, + "data": { + "viewType": "OUTLINE", + "nodes": [{"id": "n1", "text": "today", "children": []}], + }, + } + ] + metas = [{"doc_id": "docA", "folder_id": "dailyA", "title": "26.03.18", "updated_at": 20}] + folders = [ + {"folder_id": "rootA", "name": "Workspace", "parent_id": "0"}, + {"folder_id": "dailyA", "name": "Daily tasks", "parent_id": "rootA"}, + ] + + stdout = io.StringIO() + with ( + mock.patch("mubu_probe.load_latest_backups", return_value=backups), + mock.patch("mubu_probe.load_document_metas", return_value=metas), + mock.patch("mubu_probe.load_folders", return_value=folders), + contextlib.redirect_stdout(stdout), + ): + result = main(["show", "docA", "--json"]) + + self.assertEqual(result, 0) + payload = json.loads(stdout.getvalue()) + self.assertEqual(payload["title"], "26.03.18") + self.assertEqual(payload["folder_path"], "Workspace/Daily tasks") + self.assertEqual(payload["doc_path"], "Workspace/Daily tasks/26.03.18") + + class DocumentNodeListingTests(unittest.TestCase): def test_list_document_nodes_flattens_tree_for_agent_targeting(self): data = { @@ -299,6 +412,8 @@ class DailySelectionTests(unittest.TestCase): def test_looks_like_daily_title_accepts_date_titles_and_rejects_templates(self): self.assertTrue(looks_like_daily_title("26.03.16")) self.assertTrue(looks_like_daily_title("26.3.8-3.9")) + self.assertTrue(looks_like_daily_title("2026-03-18")) + self.assertTrue(looks_like_daily_title("2026ๅนด3ๆœˆ18ๆ—ฅ")) self.assertFalse(looks_like_daily_title("DDL่กจ")) self.assertFalse(looks_like_daily_title("26.2.22ๆจกๆฟๆ›ดๆ–ฐ")) @@ -314,6 +429,17 @@ class DailySelectionTests(unittest.TestCase): self.assertEqual(selected["doc_id"], "today") self.assertEqual([item["doc_id"] for item in candidates], ["today", "yesterday"]) + def test_choose_current_daily_document_accepts_full_year_and_cn_date_titles(self): + docs = [ + {"doc_id": "older", "title": "2026ๅนด3ๆœˆ17ๆ—ฅ", "updated_at": 90}, + {"doc_id": "latest", "title": "2026-03-18", "updated_at": 120}, + {"doc_id": "other", "title": "้กน็›ฎ็œ‹ๆฟ", "updated_at": 130}, + ] + + selected, candidates = choose_current_daily_document(docs) + self.assertEqual(selected["doc_id"], "latest") + self.assertEqual([item["doc_id"] for item in candidates], ["latest", "older"]) + def test_choose_current_daily_document_can_fallback_to_any_title(self): docs = [ {"doc_id": "ddl", "title": "DDL่กจ", "updated_at": 100}, diff --git a/mubu/agent-harness/mubu_probe.py b/mubu/agent-harness/mubu_probe.py index 19b53576d..582e133b2 100644 --- a/mubu/agent-harness/mubu_probe.py +++ b/mubu/agent-harness/mubu_probe.py @@ -83,8 +83,15 @@ ANCHOR_RE = re.compile(r"[^>]*)>(?P