mirror of
https://github.com/awizemann/scarf.git
synced 2026-05-10 18:44:45 +00:00
Compare commits
77 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 4757b5ae49 | |||
| 3e470c7155 | |||
| 963d0e1a5c | |||
| 52c802676f | |||
| 5d8873d305 | |||
| 49bc4efe83 | |||
| adcc984091 | |||
| fd80f4f95a | |||
| 9f240ae291 | |||
| 9c149b288b | |||
| 37afbdeffc | |||
| bfd9bab9a0 | |||
| 2e0eb63ea4 | |||
| 3a3c87e033 | |||
| f9e3cd38f5 | |||
| a6a8cae8ff | |||
| 6b66b1c96f | |||
| 97ec4d2882 | |||
| cd5bb32a21 | |||
| 5e23b59697 | |||
| 09e33b2999 | |||
| 9f2e2ecfcd | |||
| 1eb5c92f6a | |||
| bccaba0742 | |||
| 4684b9deed | |||
| f6dc45b397 | |||
| f2ddcbbd60 | |||
| a193003842 | |||
| 93a64e3e82 | |||
| 00a1bbd109 | |||
| 20cc3a2985 | |||
| 432d5b0b52 | |||
| 12e152bfea | |||
| 099d73dde8 | |||
| 4efd84c119 | |||
| bd9bacb8b3 | |||
| 96af545e66 | |||
| 9df7142f49 | |||
| 9ff9a018e7 | |||
| 0a4f8de492 | |||
| 3126c34561 | |||
| 6cf59c8a44 | |||
| 272da6a915 | |||
| c7bcfd8655 | |||
| 9d945150e0 | |||
| fa15634381 | |||
| 3271391506 | |||
| 5afd391838 | |||
| 2a368a04f7 | |||
| 9aa901a286 | |||
| 111fe9bb67 | |||
| 6191c9f19f | |||
| b8b426ed75 | |||
| 593b4e62cb | |||
| de36411a8d | |||
| 6a7ac21ebe | |||
| 5be67282d8 | |||
| c661945a1f | |||
| f5f8dc30b6 | |||
| 34d315793b | |||
| acd3692faf | |||
| ab615f0c28 | |||
| 982ed7da92 | |||
| cb164f07f9 | |||
| 1dbdf9d079 | |||
| 101488cd0d | |||
| 03c996ee80 | |||
| 8428cbff10 | |||
| 381adfd925 | |||
| 254af46e93 | |||
| 596c844da5 | |||
| ec47d191a1 | |||
| 31e6c31acf | |||
| fcfe1c89d6 | |||
| df1b9caabf | |||
| a41c81c048 | |||
| 88add62997 |
@@ -0,0 +1,15 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||
polar: # Replace with a single Polar username
|
||||
buy_me_a_coffee: awizemann
|
||||
thanks_dev: # Replace with a single thanks.dev username
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||
@@ -61,3 +61,8 @@ releases/v*/appcast-entry.xml
|
||||
|
||||
# Wiki helper: personal patterns (hostnames, IPs) blocked from the wiki push.
|
||||
scripts/wiki-blocklist.txt
|
||||
|
||||
# TestFlight feedback / crash JSONs downloaded for triage. PII (emails,
|
||||
# carriers, locales) and never meant for the public repo — kept local
|
||||
# while a fix round is in progress, deleted afterward.
|
||||
crashes/
|
||||
|
||||
+24
@@ -0,0 +1,24 @@
|
||||
# Building Scarf
|
||||
|
||||
Scarf is a native macOS app built with Xcode. For contributor builds, use the local script:
|
||||
|
||||
```bash
|
||||
./scripts/local-build.sh
|
||||
```
|
||||
|
||||
Requirements:
|
||||
|
||||
- macOS 14.6 (Sonoma) or newer at runtime — that's the app's `MACOSX_DEPLOYMENT_TARGET`. Sonoma support is intentional and load-bearing; do not raise this without an explicit decision to drop Sonoma users
|
||||
- Xcode 16.0 or newer, selected by `xcode-select` (needed for Swift 6 strict-concurrency features the project uses)
|
||||
- Metal toolchain installed
|
||||
- Hermes installed at `~/.hermes/` (see the project README for setup)
|
||||
|
||||
If the Metal toolchain is missing, the script will offer to install it in interactive shells. You can also install it manually:
|
||||
|
||||
```bash
|
||||
xcodebuild -downloadComponent MetalToolchain
|
||||
```
|
||||
|
||||
`scripts/local-build.sh` resolves Swift package dependencies, detects `arm64` vs `x86_64`, and builds the Debug app unsigned. Signing is intentionally disabled for local Debug builds so contributors do not need the maintainer's Apple Developer account.
|
||||
|
||||
Release signing is separate from contributor builds. Maintainers should continue using the existing release process for signed distributable builds.
|
||||
@@ -113,9 +113,29 @@ Public documentation lives in the GitHub wiki at https://github.com/awizemann/sc
|
||||
|
||||
## Hermes Version
|
||||
|
||||
Targets Hermes v2026.4.30 (v0.12.0). Log lines may carry an optional `[session_id]` tag between the level and logger name — `HermesLogService.parseLine` treats the session tag as an optional capture group, so older untagged lines still parse.
|
||||
Targets Hermes v2026.5.7 (v0.13.0). Log lines may carry an optional `[session_id]` tag between the level and logger name — `HermesLogService.parseLine` treats the session tag as an optional capture group, so older untagged lines still parse.
|
||||
|
||||
**Capability gating.** Scarf detects the target's Hermes version once per server connection via [HermesCapabilities](scarf/Packages/ScarfCore/Sources/ScarfCore/Services/HermesCapabilities.swift) (`hermes --version` → semver + `YYYY.M.D` parse). The resulting `HermesCapabilitiesStore` is injected on `ContextBoundRoot` (Mac) and `ScarfGoTabRoot` (iOS) via `.environment(_:)` and `.hermesCapabilities(_:)`; UI that depends on a v0.12+ surface (Curator, Kanban, ACP image input, `auxiliary.curator`, `prompt_caching.cache_ttl`, Piper TTS, Vercel terminal) reads it through the typed environment key. Pre-v0.12 hosts gracefully hide the new affordances rather than throwing on unknown CLI subcommands. Add a new flag at the top of `HermesCapabilities` whenever Scarf gains a release-gated UI surface.
|
||||
**Capability gating.** Scarf detects the target's Hermes version once per server connection via [HermesCapabilities](scarf/Packages/ScarfCore/Sources/ScarfCore/Services/HermesCapabilities.swift) (`hermes --version` → semver + `YYYY.M.D` parse). The resulting `HermesCapabilitiesStore` is injected on `ContextBoundRoot` (Mac) and `ScarfGoTabRoot` (iOS) via `.environment(_:)` and `.hermesCapabilities(_:)`; UI that depends on a release-gated surface reads it through the typed environment key. Pre-target hosts gracefully hide the new affordances rather than throwing on unknown CLI subcommands. Add a new flag at the top of `HermesCapabilities` whenever Scarf gains a release-gated UI surface — group flags by the Hermes release that introduced them (`MARK: v0.13 (v2026.5.7) flags`, etc.).
|
||||
|
||||
**v2026.5.7 (v0.13.0)** added (Scarf-relevant subset; full v2.8.0 implementation lands across WS-2 through WS-9):
|
||||
|
||||
- **Persistent Goals** — `/goal <text>` slash command locks the agent onto a target across turns. Checkpoints v2 single-store rewrite + auto-resume after gateway restart. Surfaced in Scarf chat as a non-interruptive command + a "🎯 Goal locked: <text>" pill in the chat header. Gated on `HermesCapabilities.hasGoals`.
|
||||
- **ACP `/queue` slash command** — queues a prompt to run after the current turn completes. Joins `/steer` in `RichChatViewModel.nonInterruptiveCommands` with a transient "Queued" toast. Gated on `hasACPQueue`. `/steer` now also runs as a regular prompt on idle sessions (`hasACPSteerOnIdle`).
|
||||
- **Kanban v0.13 reliability + recovery UX** — hallucination gate on worker-created cards, generic diagnostics engine (per-task distress signals), per-task `max_retries` override, multiline title/body create, `auto_blocked_reason` rendered in the inspector banner, darwin zombie detection, unify failure counter across spawn/timeout/crash. New fields decode through tolerant `HermesKanbanRun` / `HermesKanbanTaskDetail` extensions; pre-v0.13 hosts ignore unknown keys. Gated on `hasKanbanDiagnostics`.
|
||||
- **Curator archive + prune** — `hermes curator archive <skill>` + `prune` + `list-archived` subcommands. The synchronous manual `hermes curator run` blocks until done (pre-v0.13 returned immediately). Surfaced as an "Archived" tab in CuratorView with per-row Restore + Prune actions and a destructive prune-confirm sheet. Gated on `hasCuratorArchive`.
|
||||
- **Messaging Gateway expansion** — Google Chat (20th platform; `hasGoogleChatPlatform`), cross-platform allowlists (`allowed_channels` / `allowed_chats` / `allowed_rooms` per platform; `hasGatewayAllowlists`), per-platform `gateway_restart_notification` (`hasGatewayRestartNotification`), `busy_ack_enabled` toggle (`hasGatewayBusyAckToggle`), slash-command auto-delete TTL, `[[as_document]]` skill media routing directive, `hermes gateway list` cross-profile status verb (`hasGatewayList`).
|
||||
- **Provider catalog refresh** — new models on Nous Portal + OpenRouter: `deepseek/deepseek-v4-pro`, `x-ai/grok-4.3`, `openrouter/owl-alpha` (free), `tencent/hy3-preview`, `arcee/trinity-large-thinking` (with temperature + compression overrides). `x-ai/grok-4.20-beta` renamed to `x-ai/grok-4.20` — keep alias map. Vercel AI Gateway demoted to bottom of the picker. `image_gen.model` from `config.yaml` now honored by Hermes (was advertised but ignored pre-v0.13); surfaced in `Settings → Auxiliary` (`hasImageGenModel`). OpenRouter response caching toggle (`hasOpenRouterResponseCache`).
|
||||
- **MCP SSE transport** — MCP servers can be configured with SSE transport + `sse_read_timeout`. Surfaced in MCPServersView add-server flow alongside stdio/pipe. Gated on `hasMCPSSETransport`.
|
||||
- **Cron `--no-agent` mode** — script-only watchdog jobs that skip the AI call. Surfaced in CronView edit sheet. Gated on `hasCronNoAgent`.
|
||||
- **Web Tools per-capability backends** — `web_search` and `web_extract` can use distinct backends; SearXNG joined as a search-only backend. Surfaced in the Web Tools settings tab. Gated on `hasWebToolsBackendSplit`.
|
||||
- **Profiles `--no-skills`** — `hermes profile create --no-skills` for empty-profile creation. Surfaced as a toggle in the create-profile flow. Gated on `hasProfileNoSkills`.
|
||||
- **CLI / UX additions** — context compression count in the status feed (rendered next to the token count in chat status bar; `hasContextCompressionCount`), `/new <name>` slash-command argument (`hasNewWithSessionName`), `hermes update --yes` non-interactive (`hasUpdateNonInteractive`), `display.language` static-message translation (zh / ja / de / es / fr / uk / tr; `hasDisplayLanguage`), xAI Custom Voices (voice-cloning badge next to xAI TTS provider; `hasXAIVoiceCloning`).
|
||||
- **Server-side defaults flipped** — secret redaction defaults back to ON in v0.13 (was off by default in v0.12). The Settings redaction toggle remains for opt-out; the default-state hint reflects the v0.13 semantics when the host advertises v0.13+.
|
||||
- **`video_analyze` tool** — native video understanding on Gemini-class models. Hermes handles transparently inside the agent loop; Scarf has no UI surface yet but `hasVideoAnalyze` is reserved for future widget gating.
|
||||
- **`transform_llm_output` plugin hook** — plugin-author concern; surfaced indirectly through PluginsView when a plugin advertises the hook. `hasTransformLLMOutputHook` gates the metadata badge.
|
||||
- **Schema is unchanged from v0.11/v0.12** — same state.db columns. No migration needed.
|
||||
|
||||
**v2026.4.30 (v0.12.0)** added (Scarf-relevant subset):
|
||||
|
||||
**v2026.4.30 (v0.12.0)** added (Scarf-relevant subset):
|
||||
|
||||
@@ -124,7 +144,7 @@ Targets Hermes v2026.4.30 (v0.12.0). Log lines may carry an optional `[session_i
|
||||
- **`flush_memories` aux task removed (server side)** — `auxiliary.flush_memories` is gone from v0.12 Hermes config but remains alive on pre-v0.12 hosts. Scarf preserves `AuxiliarySettings.flushMemories: AuxiliaryModel`, the YAML reader still emits an `aux("flush_memories")` row, and `AuxiliaryTab` only renders the row when `HermesCapabilities.hasFlushMemoriesAux` is `true` (inverse semantics — pre-v0.12 only). v0.12 users never see the row; v0.11 users keep their edit surface.
|
||||
- **`auxiliary.curator` aux task added** — Curator's review model is configurable independently of the main model. Surfaced in `Settings → Auxiliary` next to the other aux rows.
|
||||
- **Multimodal ACP `session/prompt`** — ACP advertises and forwards image content blocks. Scarf chat composers (Mac drag/drop + paste; iOS PhotosPicker) attach images that flow through `ACPClient.sendPrompt(sessionId:text:images:)` as `[{"type":"text","text":...}, {"type":"image","data":"<base64>","mimeType":"image/jpeg"}]` — wire shape matches `acp.schema.ImageContentBlock`. `ImageEncoder` downsamples to 1568px long-edge JPEG q=0.85 detached (never blocks MainActor). Gated on `HermesCapabilities.hasACPImagePrompts`.
|
||||
- **CLI additions:** `hermes -z <prompt>` (non-interactive one-shot), `hermes update --check` (preflight), `hermes fallback` (manage fallback providers), `hermes curator` (status / run / pause / resume / pin / unpin / restore), `hermes kanban` (full task-board CLI; multi-profile collab was reverted upstream so Scarf ships a read-only Kanban view only). All capability-gated.
|
||||
- **CLI additions:** `hermes -z <prompt>` (non-interactive one-shot), `hermes update --check` (preflight), `hermes fallback` (manage fallback providers), `hermes curator` (status / run / pause / resume / pin / unpin / restore), `hermes kanban` (full 27-verb task-board CLI). All capability-gated. **v2.7.5 lifts Kanban from a read-only list to a full drag-and-drop board.** See the dedicated [Kanban v3](#kanban-v3-drag-and-drop-board--per-project-tenants-v275) section below for the complete architecture.
|
||||
- **Skills surface:** `hermes skills install <https-url>` direct-URL install (SkillsView "Install from URL…" toolbar button), reload via `hermes skills audit` (Skills "Reload" button — equivalent to the `/reload-skills` slash command for non-ACP contexts), enabled/disabled state read from `skills.disabled` in config.yaml (rendered as strikethrough + "OFF" pill), Curator pin badge from `~/.hermes/skills/.curator_state` (rendered as a pin glyph). The disable-toggle write path is deferred to v2.7 — Hermes only exposes `hermes skills config` as an interactive verb, and Scarf prefers reading accurately to risking a clobbered list.
|
||||
- **Two new gateway platforms:** Microsoft Teams (19th, plugin-shipped) + Tencent 元宝 / Yuanbao (18th, native). Surfaced in the Mac Platforms tab.
|
||||
- **Cron upgrades:** per-job `--workdir <abs-path>` (project-aware cwd that pulls AGENTS.md / CLAUDE.md / .cursorrules) is exposed in the editor sheet, gated on `HermesCapabilities.hasCronWorkdir` so pre-v0.12 hosts don't see the field (and a defensive override in `CronView` strips the value before calling `createJob`/`updateJob` even if it was hydrated from a pre-existing job). Pass an empty string on edit to clear an existing workdir, mirroring the `--script` shape. Hermes also added a `context_from` field for chaining cron outputs but only via YAML so far — Scarf reads it (HermesCronJob.contextFrom) but doesn't write it.
|
||||
@@ -153,6 +173,40 @@ v0.10.0 introduced the **Tool Gateway** — paid Nous Portal subscribers route w
|
||||
|
||||
**Keep `ModelCatalogService.overlayOnlyProviders` in sync** with `HERMES_OVERLAYS` in `~/.hermes/hermes-agent/hermes_cli/providers.py`. When Hermes adds a new overlay-only provider, mirror the entry (display name, base URL, auth type, subscription-gated flag, doc URL) or the picker won't reach it.
|
||||
|
||||
## Kanban v3: drag-and-drop board + per-project tenants (v2.7.5)
|
||||
|
||||
Scarf v2.7.5 promotes Kanban from a read-only list to a full board with drag-and-drop, every Hermes write verb wired up, and per-project boards bound to a Scarf-minted tenant slug. The list view is preserved as a `Board | List` toggle for accessibility / narrow-window fallback.
|
||||
|
||||
**Sidebar move.** `.kanban` moved from *Manage* → *Monitor* in `SidebarView` (between `.activity` and the remaining Monitor entries). Kanban is runtime work-in-progress, not configuration. Position kept inside the same enum case — only the section bucket changed.
|
||||
|
||||
**Hermes constraints that drive design.**
|
||||
|
||||
1. **No `update` verb.** `priority`, `title`, `body`, `tenant` are write-once at `kanban create`. Mutations after create are state transitions (`assign` / `claim` / `complete` / `block` / `unblock` / `archive`) or new comments. Inline-edit on a card title is impossible at the wire level.
|
||||
2. **No `project_id` column.** Hermes Kanban is one global SQLite DB at `~/.hermes/kanban.db`. Closest namespace is the optional `tenant TEXT` column. Scarf hijacks it: each project gets a `scarf:<slug>` tenant minted on first kanban interaction.
|
||||
3. **No within-column position field.** Drag-to-reorder inside a column has no Hermes persistence path and is **disabled** in v2.7.5. Sort key is `priority DESC, created_at DESC` — matches dispatcher's actual run order. Cross-column drag is the only persisted gesture.
|
||||
4. **No file-watch / webhooks.** Polling at 5s while foregrounded; live `watch` streaming deferred to a later release (a `hasKanbanWatch` flag will gate it).
|
||||
5. **Status enum has 7 values, board collapses to 5 columns:** Triage / **Up Next** (`todo` + `ready`) / Running / Blocked / Done. Triage hides when empty; Archived hides behind a toolbar toggle.
|
||||
|
||||
**Service layer.** [KanbanService](scarf/Packages/ScarfCore/Sources/ScarfCore/Services/KanbanService.swift) is a Sendable `actor` in ScarfCore — pure I/O, no UI state. Wraps every v0.12 verb (`list / show / runs / stats / assignees / create / assign / claim / comment / complete / block / unblock / archive / dispatch / link / unlink`). Every method dispatches its CLI invocation through `Task.detached(priority: .utility)`, matching the existing `KanbanViewModel.load` pattern (re: Swift 6 rules in `~/.claude/CLAUDE.md`). Errors land in [KanbanError](scarf/Packages/ScarfCore/Sources/ScarfCore/Models/KanbanError.swift) and surface as inline banners (not modal alerts) since the board is high-frequency. The "no matching tasks" stdout sentinel is normalized to `[]`.
|
||||
|
||||
**Drag-drop transition planner.** `KanbanService.plan(for: KanbanTransition)` is a pure function that maps `(from, to)` columns to the right verb sequence — `(.upNext, .running) → [.claim]`, `(.blocked, .running) → [.unblock, .claim]`, etc. Disallowed transitions throw `KanbanError.forbiddenTransition` with a user-facing reason: drop on Done from anywhere triggers "Done is terminal — create a follow-up task to continue work."; drop on Triage from outside triggers "Triage tasks are promoted by a specifier agent." The view's drop handler short-circuits forbidden transitions with red-stroke target feedback.
|
||||
|
||||
**Per-project tenant.** [KanbanTenantResolver](scarf/scarf/Core/Services/KanbanTenantResolver.swift) (Mac) mints `scarf:<slug>` on first kanban interaction inside a project, persisting to `<project>/.scarf/manifest.json`'s new optional `kanbanTenant: String?` field. Tenants are **immutable across rename** (existing tasks already carry the old slug). Bare projects (no manifest) get a sentinel manifest written with `id: scarf/<project-id>` + `version: 0.0.0` + just the `kanbanTenant` set; `ProjectAgentContextService` recognizes the sentinel and refuses to surface it as a "Template" line. The cross-platform read-only counterpart is [KanbanTenantReader](scarf/Packages/ScarfCore/Sources/ScarfCore/Services/KanbanTenantReader.swift) in ScarfCore — iOS uses it to filter the per-project board without linking the full manifest model.
|
||||
|
||||
**Agent-side tenant injection.** `ProjectAgentContextService.renderBlock` adds a "Kanban tenant" line to the AGENTS.md scarf-managed block whenever a tenant exists. Since `ChatViewModel.startACPSession` calls `refresh(for:)` before opening every project chat, the agent sees the tenant on every session start and is told to pass `--tenant scarf:<slug>` on `hermes kanban create`. Agents are imperfect at flag discipline; misuse just sends the task to the global "Untagged" group on the global board, which is acceptable v2.7.5 behavior. A dedicated retag UX is a follow-up.
|
||||
|
||||
**View model.** [KanbanBoardViewModel](scarf/scarf/Features/Kanban/ViewModels/KanbanBoardViewModel.swift) is `@MainActor + @Observable`, holds the column-grouped task array, and applies optimistic-merge logic around drag-drops: an in-flight move records `optimisticOverrides[taskId] = newStatus`, mutates the local array immediately, and clears the override only when the polled response confirms the new status. Without this, a stale poll response can clobber a card the user just dragged. On CLI failure the override is removed and an error message lands in the inline banner.
|
||||
|
||||
**Mac surface.** [KanbanBoardView](scarf/scarf/Features/Kanban/Views/KanbanBoardView.swift) is the orchestrator (header + columns + side-pane inspector + create/block/complete sheets). [KanbanColumnView](scarf/scarf/Features/Kanban/Views/KanbanColumnView.swift) owns its `dropDestination(for: KanbanTaskRef.self)`. [KanbanCardView](scarf/scarf/Features/Kanban/Views/KanbanCardView.swift) handles the `.draggable` source, status-specific chrome (running edge accent + shimmer; blocked warning glyph; done dim 0.7/0.55), and a custom drag preview. [KanbanInspectorPane](scarf/scarf/Features/Kanban/Views/KanbanInspectorPane.swift) is a 420pt side-pane (not modal) so the user can keep dragging cards after inspecting one. [KanbanCreateSheet](scarf/scarf/Features/Kanban/Views/KanbanCreateSheet.swift) maps form state to a `KanbanCreateRequest`; the Workspace picker locks to "Project Dir" on per-project boards. [KanbanBlockReasonSheet](scarf/scarf/Features/Kanban/Views/KanbanBlockReasonSheet.swift) and [KanbanCompleteResultSheet](scarf/scarf/Features/Kanban/Views/KanbanCompleteResultSheet.swift) prompt for optional `--reason` / `--result` text on those transitions.
|
||||
|
||||
**Per-project surface.** New `DashboardTab.kanban` case in `ProjectsView.swift`, dispatched to [ProjectKanbanTab](scarf/scarf/Features/Projects/Views/ProjectKanbanTab.swift) which mints the tenant on appearance and wraps `KanbanBoardView` with `tenantFilter` + `projectPath` pre-applied. Capability-gated on `HermesCapabilities.hasKanban` so pre-v0.12 hosts don't see a broken destination. Plus a new `kanban_summary` widget — top 3 tasks by priority across `running` + `blocked` + `todo` for the project's tenant, with stats glance footer. Mirror in `tools/widget-schema.json`, `tools/build-catalog.py`, and `site/widgets.js`. Templates can reference it as `{ kind: kanban_summary, max_rows: 3 }` in dashboard.json.
|
||||
|
||||
**iOS surface.** Read-only board on the project Kanban tab ([ScarfGoKanbanView](Scarf%20iOS/Kanban/ScarfGoKanbanView.swift) + [ScarfGoKanbanDetailSheet](Scarf%20iOS/Kanban/ScarfGoKanbanDetailSheet.swift)). Renders the 5 columns as a horizontally-paged `Picker` of single-column lists — HIG-friendly on iPhone. No mutations, no drag-drop in v2.7.5 (deferred to a later release). Card titles use semantic `.headline` (not `ScarfFont`) so Dynamic Type works; chrome (badges) keeps `ScarfBadge` for fixed visual weight. Gated on `HermesCapabilities.hasKanban`; pre-v0.12 hosts don't see the segment.
|
||||
|
||||
**Capability gating.** Kept the single `HermesCapabilities.hasKanban` flag (`>= 0.12.0`). All 27 verbs shipped together; finer-grained gating is YAGNI. A `hasKanbanWatch` flag will land in a later release if `watch` semantics drift between point releases.
|
||||
|
||||
**Don't:** introduce within-column reorder via a client-side ordering sidecar — sort order would diverge from dispatcher's actual run order, which is worse than no manual order. Use `priority` on `kanban create` to set initial order; revisit when Hermes ships an `update --priority` verb. Don't try to mutate `priority` / `title` / `body` post-create — there's no verb. Don't drop cards from `done` into anything — Done is terminal. Don't call `transport.runProcess` directly from view bodies; route through `KanbanService` (the actor) so polling and writes share the same concurrency model.
|
||||
|
||||
## Project Templates
|
||||
|
||||
Scarf ships a `.scarftemplate` format (v1 as of 2.2.0) for sharing pre-packaged projects across users and machines. A bundle is a zip containing:
|
||||
|
||||
+4
-2
@@ -5,8 +5,10 @@ Thanks for your interest in contributing to Scarf.
|
||||
## Getting Started
|
||||
|
||||
1. Fork and clone the repo
|
||||
2. Open `scarf/scarf.xcodeproj` in Xcode 26.3+
|
||||
3. Build and run (requires macOS 26.2+ and Hermes installed at `~/.hermes/`)
|
||||
2. Open `scarf/scarf.xcodeproj` in Xcode 16.0+
|
||||
3. Build and run (Scarf runs on macOS 14.6 Sonoma or newer; Hermes must be installed at `~/.hermes/`)
|
||||
|
||||
For an unsigned command-line Debug build without an Apple Developer account, run [`./scripts/local-build.sh`](scripts/local-build.sh). See [BUILDING.md](BUILDING.md) for prerequisites.
|
||||
|
||||
## Architecture
|
||||
|
||||
|
||||
@@ -19,11 +19,56 @@
|
||||
<a href="https://www.buymeacoffee.com/awizemann"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me a Coffee" height="28"></a>
|
||||
</p>
|
||||
|
||||
## What's New in 2.5
|
||||
## What's New in 2.7
|
||||
|
||||
### ScarfGo — the iPhone companion ships in public TestFlight
|
||||
The biggest release since 2.6 — six weeks of work focused on **remote-context performance**, a **new project authoring flow**, **dashboard widgets**, **OAuth resilience**, and a top-to-bottom **performance instrumentation harness** that drove the bulk of the rest. 36 commits, no schema bump, no Hermes capability bump.
|
||||
|
||||
Same Hermes server you've been running on your Mac — now reachable from your phone over SSH. Multi-server, project-scoped chat, session resume, memory editor, cron list, skills tree, settings (read), all native iOS. Pure-Swift SSH (Citadel under the hood — no `ssh` binary needed on iOS). Per-project chat writes the same Scarf-managed `AGENTS.md` block the Mac app does, so the agent boots with the same project context regardless of which client opened the session.
|
||||
### Remote chats and Activity in seconds, not 30s timeouts
|
||||
|
||||
Resuming a chat or opening Activity on a slow remote (a 420ms-RTT droplet, an underprovisioned VPS, a tunnel through 4G) used to fetch the full message column set in one shot, which routinely tripped the 30s SSH timeout on chats with multi-page tool result blobs. v2.7 introduces a **skeleton-then-hydrate pattern** that bounds the wire payload by what the user actually needs to see RIGHT NOW, then fills in the heavy stuff in the background.
|
||||
|
||||
- **Chat skeleton** — user + assistant rows only (skips `role='tool'`), `tool_calls` / `reasoning` hard-NULLed at SQL level. Wire payload bounded by conversational text. The chat appears in seconds. Background hydration pages tool calls in 5-id batches; tool-result CONTENT is opt-in (Settings → Display → "Load tool results in past chats", default off) with per-card lazy-fetch in the inspector pane.
|
||||
- **Activity skeleton** — metadata-only fetch (~3 KB for 50 rows). Placeholder rows render immediately; real per-call entries swap in as paged hydration completes.
|
||||
- **Single-id whale recovery** — when a 5-id batch trips the 30s timeout (one row carries an oversized `tool_calls` blob), an L1 single-id retry isolates the offender so the rest of the batch still hydrates.
|
||||
|
||||
### SSH cancellation that actually cancels
|
||||
|
||||
`Task.detached` doesn't inherit cancellation from the awaiting parent. Pre-fix, navigating away from a chat left the underlying ssh subprocess running for the full 30s, pinning a remote sqlite query and a ControlMaster session — the "third chat hangs" / "dashboard spins after rapid switching" symptom. v2.7 wires `withTaskCancellationHandler` through `SSHScriptRunner.run` and `RemoteSQLiteBackend.query`; cancellation now reaches the `Process` within ~100ms.
|
||||
|
||||
### New Project from Scratch wizard + Keychain-backed cron secrets
|
||||
|
||||
A third project entry point alongside Browse Catalog and Add Existing Project. Scaffolds a Scarf-standard skeleton, registers it, and hands off to a chat session that auto-activates the bundled `scarf-template-author` skill. The skill drives the rest conversationally — widgets, optional config schema, optional cron — and writes the final files itself.
|
||||
|
||||
**Cron + Keychain.** Cron prompts that referenced `secret`-typed config fields used to get the literal `keychain://...` URI back, producing 401s. v2.7 mirrors resolved Keychain values into `~/.hermes/.env` under `$SCARF_<UPPER_SLUG>_<UPPER_FIELD>` env vars. Hermes already reloads `.env` per cron tick — credential rotation is automatic.
|
||||
|
||||
### Project dashboards — file-reading widgets, sparklines, typed status
|
||||
|
||||
Five new widget types and project-wide auto-refresh. **Backwards-compatible** — every existing `dashboard.json` renders byte-identically.
|
||||
|
||||
- **`markdown_file`** / **`log_tail`** / **`cron_status`** / **`image`** / **`status_grid`** — file-reading widgets that auto-refresh when the underlying file changes. By convention, place files inside `<project>/.scarf/`.
|
||||
- **`stat` widget gains inline sparklines** via optional `sparkline: [Number]`. SVG-only render; dozens per dashboard cost nothing.
|
||||
- **Typed status badges** with lenient decode (`ok`/`up` → success, `down`/`error` → danger). Unknown strings render as plain text rather than crashing.
|
||||
- **Structured widget error card** replaces the legacy "Unknown: \<type\>" placeholder.
|
||||
|
||||
### OAuth resilience + Credential Pools
|
||||
|
||||
- **Daily OAuth keepalive cron** prevents Anthropic OAuth refresh tokens from expiring after weeks of inactivity.
|
||||
- **Remote re-auth** unblocked — OAuth flow drives a remote `hermes auth add` correctly with stdin forwarded.
|
||||
- **OAuth remove button** + auto-refresh of Credential Pools on `auth.json` change.
|
||||
- **`resolve_provider_client` errors** (auxiliary task references an unauthenticated provider) classified into a clear hint with a one-click jump to Settings → Aux Models.
|
||||
- **Model/provider mismatch banner** detects when `model.default` carries a `<provider>/...` prefix that disagrees with `model.provider`, with one-click fix in either direction.
|
||||
|
||||
### ScarfMon — performance instrumentation harness
|
||||
|
||||
The diagnostic surface that drove the bulk of the v2.7 perf work. Off by default; signpost-only mode (Instruments-friendly) is free; Full mode keeps a 4096-entry in-memory ring buffer you can copy as JSON for paste-into-issue diagnosis. Wiki: [Performance-Monitoring](https://github.com/awizemann/scarf/wiki/Performance-Monitoring).
|
||||
|
||||
See the full [v2.7.0 release notes](https://github.com/awizemann/scarf/releases/tag/v2.7.0) for the complete list (36 commits, including: in-flight coalescing for `loadRecentSessions`, snapshot pipeline rewrite from `sqlite3 .backup` to direct SSH-streamed queries [#74](https://github.com/awizemann/scarf/issues/74), per-message TTS, window-position persistence, sidebar reorder, and many other fixes).
|
||||
|
||||
**Previous releases:** see the [Release Notes Index](https://github.com/awizemann/scarf/wiki/Release-Notes-Index) on the wiki for v2.6, v2.5, v2.3, v2.2, v2.0, v1.6, and earlier.
|
||||
|
||||
## ScarfGo — the iPhone companion
|
||||
|
||||
Same Hermes server you've been running on your Mac — reachable from your phone over SSH. Multi-server, project-scoped chat, session resume, memory editor, cron list, skills tree, settings (read), all native iOS. Pure-Swift SSH (Citadel under the hood — no `ssh` binary needed on iOS). Per-project chat writes the same Scarf-managed `AGENTS.md` block the Mac app does, so the agent boots with the same project context regardless of which client opened the session.
|
||||
|
||||
**[Join the public TestFlight](https://testflight.apple.com/join/qCrRpcTz)** — the link is live now but only accepts new beta testers once Apple's Beta Review approves the first build. If you hit a "not accepting testers" splash, bookmark it and try again in 24–48h.
|
||||
|
||||
@@ -39,21 +84,6 @@ Same Hermes server you've been running on your Mac — now reachable from your p
|
||||
|
||||
See the [ScarfGo wiki page](https://github.com/awizemann/scarf/wiki/ScarfGo) for the full feature tour, [ScarfGo Onboarding](https://github.com/awizemann/scarf/wiki/ScarfGo-Onboarding) for the SSH-key setup walkthrough, and [Platform Differences](https://github.com/awizemann/scarf/wiki/Platform-Differences) for what is and isn't shared between Mac and iOS.
|
||||
|
||||
### Everything else in 2.5
|
||||
|
||||
- **Portable project-scoped slash commands.** Author reusable prompt templates as Markdown files at `<project>/.scarf/slash-commands/<name>.md` with YAML frontmatter (name, description, argumentHint, optional model override). Invoke as `/<name> [args]` from chat — Scarf substitutes `{{argument}}` (with optional `default:` fallback) in the body and sends the expanded prompt to Hermes. Mac authoring tab + iOS read-only browser. Templates carry them via the new `slash-commands/` block in `.scarftemplate` bundles (schemaVersion 3). See [Slash Commands](https://github.com/awizemann/scarf/wiki/Slash-Commands) for the full schema.
|
||||
- **Hermes v2026.4.23 chat parity.** `/steer` non-interruptive guidance command, per-turn stopwatch on assistant bubbles, numbered keyboard shortcuts (1–9) on the permission sheet, git branch chip in the chat header. The new `messages.reasoning_content` and `sessions.api_call_count` columns surface as a richer reasoning disclosure + an "API" chip on session rows.
|
||||
- **Spotify + design-md skills.** Mac ships an in-app Spotify OAuth sheet (mirrors the v2.3 Nous Portal pattern); design-md gets a host-side `npx` prereq check on both platforms. SKILL.md frontmatter (`allowed_tools`, `related_skills`, `dependencies`) renders as chip rows. A "What's New" pill on the Skills tab tells you when remote skills changed since you last looked.
|
||||
- **Mac global Sessions: project filter + project badges** — parity with ScarfGo's Sessions tab. The list grows a filter Menu (All projects / Unattributed / each registered project) and each row carries a tinted folder chip with the project name when attributed.
|
||||
- **Human-readable cron schedules everywhere.** New `CronScheduleFormatter` in ScarfCore translates the common cron shapes into English phrases and falls back to the raw expression on anything custom. Mac and iOS render the same.
|
||||
- **Mac design-system overhaul.** Rust palette, typed token bundle (`ScarfColor`, `ScarfFont`, `ScarfSpace`, `ScarfRadius`), reusable components (`ScarfPageHeader`, `ScarfCard`, `ScarfBadge`, `ScarfTextField`, four button styles), redesigned 3-pane chat. iOS adopts the same tokens with a hybrid Dynamic Type policy so accessibility scaling on body text is preserved. See [Design System](https://github.com/awizemann/scarf/wiki/Design-System) for the full reference.
|
||||
- **Under the hood** — `SessionAttributionService`, `ProjectContextBlock`, `CronScheduleFormatter`, `GitBranchService`, `SkillPrereqService`, `SkillSnapshotService`, `ProjectSlashCommandService`, and the ACP error triplet (`acpError` / `acpErrorHint` / `acpErrorDetails`) consolidated into ScarfCore so Mac and iOS consume one source of truth. 179 tests across 13 suites, three consecutive green runs. Several `try?` swallows in iOS lifecycle code now surface real failures (Keychain unlock errors no longer drop people into onboarding; partial Forget operations report what failed).
|
||||
- **iOS push notifications skeleton** — `NotificationRouter` ships with foreground presentation + a lock-screen "Approve / Deny" action category gated by `apnsEnabled = false`. Lights up when Hermes ships a server-side push sender + an APNs cert.
|
||||
|
||||
See the full [v2.5.0 release notes](https://github.com/awizemann/scarf/releases/tag/v2.5.0).
|
||||
|
||||
**Previous releases:** see the [Release Notes Index](https://github.com/awizemann/scarf/wiki/Release-Notes-Index) on the wiki for v2.3, v2.2, v2.0, v1.6, and earlier.
|
||||
|
||||
## Connect ScarfGo to your Hermes server
|
||||
|
||||
ScarfGo speaks SSH directly — no companion service, no developer-controlled server in between. Onboarding takes about a minute:
|
||||
@@ -145,7 +175,7 @@ Custom, agent-generated dashboards for any project. Define stat boxes, charts, t
|
||||
- macOS 14.6+ (Sonoma) for Scarf
|
||||
- iOS 18.0+ for [ScarfGo](https://github.com/awizemann/scarf/wiki/ScarfGo) (the iPhone companion, public TestFlight from v2.5)
|
||||
- Xcode 16.0+ to build from source
|
||||
- [Hermes agent](https://github.com/hermes-ai/hermes-agent) v0.6.0+ installed at `~/.hermes/` on each target host (v0.11.0+ recommended for full v2.5 feature support — `/steer`, new state.db columns, design-md/spotify skills, SKILL.md frontmatter chips)
|
||||
- [Hermes agent](https://github.com/hermes-ai/hermes-agent) v0.6.0+ installed at `~/.hermes/` on each target host (v0.12.0+ recommended for full v2.6 feature support — autonomous Curator, multimodal image input, 5 new providers, Microsoft Teams + Yuanbao gateways, Kanban, Skills v0.12 surface, cron `--workdir`, prompt-cache TTL, Piper TTS, Vercel terminal)
|
||||
- For remote servers: SSH access (key-based), `sqlite3` on the remote (for atomic DB snapshots), and the `hermes` CLI resolvable from the remote user's `PATH` or at a path you specify per server. ScarfGo requires the same on every Hermes host it connects to.
|
||||
|
||||
### Compatibility
|
||||
@@ -159,9 +189,10 @@ Scarf reads Hermes's SQLite database and parses CLI output from `hermes status`,
|
||||
| v0.8.0 (2026-04-08) | Verified |
|
||||
| v0.9.0 (2026-04-13) | Verified |
|
||||
| v0.10.0 (2026-04-16) | Verified (Tool Gateway introduced) |
|
||||
| v0.11.0 (2026-04-23) | **Verified — current target (recommended for full v2.5 feature support)** |
|
||||
| v0.11.0 (2026-04-23) | Verified |
|
||||
| v0.12.0 (2026-04-30) | **Verified — current target (recommended for full v2.6 feature support)** |
|
||||
|
||||
Scarf 2.5 targets Hermes v0.11.0 for `/steer`, the new state.db columns (`messages.reasoning_content`, `sessions.api_call_count`), the new skills (design-md, spotify), the SKILL.md frontmatter chip surfaces, and the `hermes memory reset` toolbar action. Earlier Hermes versions remain supported for monitoring, sessions, file-based features, and ACP chat; v0.11-specific behavior degrades gracefully on older agents (`/steer` is harmless, new columns silently nil out).
|
||||
Scarf 2.6 targets Hermes v0.12.0 for the autonomous Curator, multimodal ACP image content blocks, the 5 new inference providers, Microsoft Teams + Yuanbao gateways, the read-only Kanban view, the Skills v0.12 surface (URL install / reload / disable badges / curator pin), cron `--workdir`, `auxiliary.curator`, `prompt_caching.cache_ttl`, the redaction toggle, the runtime metadata footer, Piper TTS, and the Vercel terminal backend. Every v0.12 surface is **capability-gated** — Scarf detects the host's Hermes version once per server connection (`hermes --version` → semver + `YYYY.M.D` parse) and hides v0.12-only UI on older hosts. v0.11.0 hosts keep the full v2.5 surface (`/steer`, `messages.reasoning_content`, `sessions.api_call_count`, design-md/spotify skills, SKILL.md frontmatter chips, `hermes memory reset`). Earlier Hermes versions remain supported for monitoring, sessions, file-based features, and ACP chat; new behavior degrades gracefully on older agents.
|
||||
|
||||
If a Hermes update changes the database schema or CLI output format, Scarf may need to be updated. Check the [Health](#features) view for compatibility warnings.
|
||||
|
||||
@@ -207,6 +238,8 @@ Or from the command line:
|
||||
xcodebuild -project scarf/scarf.xcodeproj -scheme scarf -configuration Release -arch arm64 -arch x86_64 ONLY_ACTIVE_ARCH=NO build
|
||||
```
|
||||
|
||||
For an unsigned local Debug build without an Apple Developer account (handy for contributors), use [`./scripts/local-build.sh`](scripts/local-build.sh) — see [BUILDING.md](BUILDING.md) for prerequisites.
|
||||
|
||||
## Architecture
|
||||
|
||||
Scarf follows the **MVVM-Feature** pattern with zero external dependencies beyond SwiftTerm:
|
||||
|
||||
@@ -106,6 +106,19 @@ The foundation of every gated surface above:
|
||||
|
||||
### Bug fixes
|
||||
|
||||
#### Chat composer + transcript (post-merge round)
|
||||
|
||||
- **Typing lag in the chat composer (#67)** — `RichChatInputBar.updateMenuState()` ran on every keystroke and unconditionally wrote both `showMenu` and `selectedIndex`, tripping SwiftUI's "action tried to update multiple times per frame" warning and stalling input. Composer now coalesces writes to deltas, short-circuits when not in slash mode (the common case), and watches `commands.count` instead of re-allocating `commands.map(\.id)` per keystroke.
|
||||
- **Chat font-size slider had no visible effect (#68)** — `RichChatView` only set `\.dynamicTypeSize`, but `ScarfFont` tokens are fixed-point (`Font.system(size: 14, …)`) so dynamic type didn't reach bubble text, reasoning, tool chips, code blocks, or markdown headings. New `\.chatFontScale` env value plumbed through `RichMessageBubble`, `MarkdownContentView`, and `CodeBlockView`; `ChatFontScale.{body, caption, captionStrong, caption2, mono, monoSmall, codeBlock, codeInline}(_:)` helpers mirror the ScarfFont base sizes so 100% is byte-for-byte identical to today's UI.
|
||||
- **Placeholder ghosting on first keystroke (#65)** — `TextEditor`'s NSTextView surfaces a typed glyph one frame before the SwiftUI binding propagates, so the bare `if text.isEmpty` overlay rendered the translucent placeholder text on top of the just-typed character. Pinned an opaque background behind the placeholder rect and switched the conditional to `.opacity(...)` so the view tree stays stable per keystroke.
|
||||
- **Draft text leaked between conversations (#62)** — composer `@State` survived session switches because the surrounding view tree was structurally identical. Bound `RichChatInputBar`'s identity to `richChat.sessionId` so SwiftUI rebuilds the view (and its `@State`) on session change. Stable fallback string for the "no session selected" window — `UUID()` would have minted a new id per body re-eval and trashed the composer mid-typing.
|
||||
- **Sent message rendered blank after navigating away (#63)** — when a user sent a prompt and immediately resumed a different session before Hermes flushed the row to state.db, `resumeSession`'s `reset()` cleared `messages` and `loadSessionHistory` then read an as-yet-empty DB. New per-session pending-user-messages cache survives `reset()` and re-injects still-pending entries on load; entries clear themselves as soon as a matching DB row catches up.
|
||||
- **No completion notification (#64)** — sending a long prompt and switching to other work required polling the chat to know when the response landed. New `ChatNotificationService` fires a local `UNUserNotificationCenter` banner on prompt completion when Scarf isn't the foreground app. Settings → Display → Feedback → "Notify when Hermes finishes" toggle, default on.
|
||||
- **Per-message TTS playback (#66)** — small speaker glyph in each settled assistant bubble's metadata footer; uses `AVSpeechSynthesizer` with the user's macOS Spoken Content default voice, picks up offline. Markdown control characters stripped before speech. The deeper Settings → Voice provider integration (Edge / ElevenLabs / OpenAI / NeuTTS / Piper) is queued as a v2.7 follow-up.
|
||||
- **ACP control-message timeout under gateway concurrency (#61)** — bumped 30s → 60s. State.db lock contention on a healthy host clears in seconds, but the previous 30s watchdog tripped under realistic gateway+ACP concurrency (Discord sync / skill registration / cron scheduling holding write locks during ACP `initialize` / `session/new` / `session/load`). 60s gives lock resolution headroom while still surfacing genuinely broken transports.
|
||||
|
||||
#### Pre-merge
|
||||
|
||||
- **Test target compile** — `M5FeatureVMTests.ScriptedTransport` had drifted off the `ServerTransport` protocol after `cachedSnapshotPath` landed in v2.5.2; added the missing stub. `M0dViewModelsTests` got the `ConnectionStatusViewModel.Status.degraded` argument-name update. `CredentialPoolsGatingTests` got the missing `import ScarfCore`. The full `swift test` suite now runs (and passes — 215 tests across 17 suites).
|
||||
- **iOS package compile** — `RemoteBackupService.zipDirectory` and `RemoteRestoreService.unzipArchive` used `Foundation.Process` unconditionally, breaking the iOS build entirely (Process is unavailable on the iOS SDK). Wrapped in `#if !os(iOS)` with iOS stubs that throw — backup/restore is Mac-only by design.
|
||||
|
||||
|
||||
@@ -0,0 +1,78 @@
|
||||
## What's in 2.6.5
|
||||
|
||||
A patch release that ships **template discoverability**, **cron observability**, and an **end-to-end UI test harness** that locks the new install path against regression. No breaking changes; every Hermes capability target is unchanged from 2.6.0.
|
||||
|
||||
### In-app Template Catalog
|
||||
|
||||
The catalog is no longer web-only. **Templates → Browse Catalog…** opens a sheet that fetches the live catalog from `awizemann.github.io/scarf/templates/`, renders one row per published template with name + version + tags, and one-click installs through the existing flow. Search filters across name / description / tags; the category picker constrains to whatever categories the loaded catalog actually carries.
|
||||
|
||||
- **Install-state badges** — each row shows "Installed v1.2.0" (green) or "Update v1.3.0" (amber) when the catalog version is newer than what's in `~/.hermes/scarf/projects.json`. Update is "uninstall + reinstall" today; in-place upgrade is on the v3 backlog.
|
||||
- **24h cache** at `~/.hermes/scarf/catalog_cache.json` so opening the sheet repeatedly doesn't re-hit the network. Refresh icon force-fetches.
|
||||
- **Bundled fallback** — fresh-install / offline users still see the official templates as a hardcoded list. Network failures serve stale cache with a "refresh failed" hint.
|
||||
- **Catalog-schema decoder fault tolerance** — one malformed entry on the live catalog can't bring down the whole list. The bad row is dropped with a logged warning; the rest survive.
|
||||
|
||||
### HackerNews Daily Digest template
|
||||
|
||||
First template added under the new dogfooding-templates loop. Configurable `min_score`, `max_items`, `topics`; one daily-at-08:00 cron job (paused on install) that pulls the HN Firebase API, filters, and prepends a markdown digest to the project's `digest.md`. No API keys required. Live at the catalog URL above.
|
||||
|
||||
### Cron observability — auth-error banner + running indicator + log tail
|
||||
|
||||
Cron rows now surface the same OAuth-refresh-revoked recovery flow as Chat instead of a generic red dot, plus three previously-missing observability cues:
|
||||
|
||||
- **OAuth re-auth.** `ACPErrorHint.classify` runs on `job.lastError`; when it returns `oauthRefreshRevoked(provider)` the detail pane shows the human-readable hint + a **Re-authenticate** button that drops the user into Credential Pools — same wiring ChatView's banner uses. Unrecognized errors fall back to the legacy red `lastError` text.
|
||||
- **Running indicator.** The row dot turns blue + pulses when `state == "running"` (precedence over disabled / error / success); the detail header gains a "running…" badge next to active/paused. No new polling — `HermesFileWatcher.lastChangeDate` already drives `CronViewModel.load()`.
|
||||
- **Last run output.** Collapsible panel replacing the inline log: a one-line summary (`<timestamp> — ok|error|running…`) always visible, full monospaced terminal-style scroll on expand, auto-scrolls to bottom when new runs land.
|
||||
|
||||
Also fixes a pre-existing bug in `HermesFileService.loadCronOutput` that returned the wrong file under Hermes's per-job-id output nesting.
|
||||
|
||||
### Layer B install-drive XCUITest harness
|
||||
|
||||
The dogfooding-templates initiative ships its first end-to-end UI test that drives the install pipeline:
|
||||
|
||||
```
|
||||
Launch with --scarf-test-mode → Sidebar → Projects → Install sheet
|
||||
(via --scarf-test-install-url launch arg) → Configure → Open Project
|
||||
→ Right-click → Uninstall Template → Confirm Remove → Done
|
||||
```
|
||||
|
||||
Runs ~30 s green on the dev Mac, validates 9 assertion points across the user journey. Covers the new accessibility identifiers wired in this release: `templateConfig.commitButton`, `projects.row.<name>`, `sidebar.section.<rawValue>`, `projects.contextMenu.uninstallTemplate`, `templateUninstall.confirmRemove`, `templateInstall.success.openProject`, `templateUninstall.success.done`. The `--scarf-test-install-url` launch arg + `TestModeFlags.isTestMode` gating lets XCUITest skip SwiftUI Menu / NSToolbarItem accessibility-bridging quirks that otherwise block toolbar-menu driving.
|
||||
|
||||
Wiki [Test-Harness](https://github.com/awizemann/scarf/wiki/Test-Harness) documents how to extend the harness for the next template.
|
||||
|
||||
### Sentinel-marker test isolation (incident-response hardening)
|
||||
|
||||
`SCARF_HERMES_HOME` override now requires the path to contain a `.scarf-test-home-marker` file to activate. Without the marker, production code falls through to the user's real `~/.hermes/`. Lands belt-and-braces protection for cases where a test crashes mid-teardown leaving the env var set, an env var inherits from a parent shell, or a misconfigured launchctl plist exports the variable. The override remains the seam every E2E test relies on; the marker file ensures it can't accidentally pivot a non-test process off the user's data.
|
||||
|
||||
### Chat fixes
|
||||
|
||||
- **OAuth refresh-revoked surface.** Chat-side error banner now classifies the message via `ACPErrorHint.classify` and offers an in-app **Re-authenticate** button that routes through Credential Pools (#65). Same primitive the new cron banner reuses.
|
||||
- **Placeholder ghosting fix.** TextEditor's placeholder now clips to the editor's bounds and clears on focus instead of bleeding past the cursor area when the user types fast (#67).
|
||||
|
||||
### Profile chip + structured logs
|
||||
|
||||
- **Active-profile chip in the sidebar header.** Click → routes to Profiles. Local contexts only (remote SSH would mislead).
|
||||
- **Switch & Relaunch** flow now writes `~/.hermes/active_profile` and relaunches Scarf in a single click instead of asking the user to quit+reopen.
|
||||
- Profile-resolver logs are now structured (key=value form) so `log show … | grep ProfileResolver` can pull "which profile did Scarf resolve to and why" out of support requests.
|
||||
|
||||
### Swift 6 cleanup
|
||||
|
||||
- `MessageSpeechService` — drop `@preconcurrency` on the AVSpeechSynthesizerDelegate conformance now that the protocol's Sendable annotations are upstreamed.
|
||||
- `ChatView` — `RichChatViewModel.PendingPermission: @retroactive Identifiable`. Quiets the Swift 6 compiler so downstream breakage would be loud if ScarfCore ever adds the conformance upstream.
|
||||
- `CredentialPoolsView` — `.help(Text(verbatim:))` so backticks render literally instead of being treated as markdown inline-code.
|
||||
|
||||
### iOS
|
||||
|
||||
- Composer redesigned with HIG touch targets + clear disabled state.
|
||||
- Portrait lock retained.
|
||||
- Chat-start preflight moved off MainActor.
|
||||
|
||||
### Known caveats
|
||||
|
||||
- **Cron-job-uninstall by name is ambiguous** when two projects share the same template id. The Layer B test surfaced this — manifests as: the test passes, but if you've manually installed the same template before running the test, your real cron job can disappear. Recovery is `hermes cron create`. Fix is queued: store cron-job IDs in `<project>/.scarf/template.lock.json` at install time and resolve by ID at uninstall time.
|
||||
- **Full-suite parallel test runs intermittently hang** — pre-existing flaky test infrastructure unrelated to this release. Individual suites all pass; the hang only manifests on `xcodebuild test` with everything concurrent. The sentinel-marker hardening prevents user-data damage from any race.
|
||||
|
||||
### Compatibility
|
||||
|
||||
- **Hermes target unchanged from 2.6.0**: v2026.4.30 (v0.12.0). Pre-v0.12 Hermes hosts continue to work — no new capability gates added in this release.
|
||||
- **Min macOS unchanged**: 14.6.
|
||||
- **No schema changes** to anything in `~/.hermes/`. The two new Scarf-owned files (`scarf/catalog_cache.json` and the template-installer's `.scarf-test-home-marker` for tests) are additive.
|
||||
@@ -0,0 +1,155 @@
|
||||
## What's in 2.7.0
|
||||
|
||||
The biggest release since 2.6.0 — a six-week stretch covering **remote-context performance**, a **new project authoring flow**, **dashboard widgets**, **OAuth resilience**, and a top-to-bottom **performance instrumentation harness** that drove the bulk of the rest. 36 commits, no schema bump, no Hermes capability bump.
|
||||
|
||||
The throughline: Scarf got materially faster and more honest on slow remote SSH links, where 30-second sqlite timeouts and silently-empty UI used to be common. The skeleton-then-hydrate pattern, SSH cancellation propagation, and ScarfMon-driven diagnosis are the shape of how that work gets done now.
|
||||
|
||||
---
|
||||
|
||||
### Remote-context performance — chats and Activity in seconds, not 30s timeouts
|
||||
|
||||
Resuming a chat on a slow remote (a 420ms-RTT droplet, an underprovisioned VPS, a tunnel through 4G) used to fetch the full message column set in one shot, which routinely tripped the 30s SSH timeout on chats with multi-page tool result blobs. The 160-message session was broken; the 30-message session was broken too. Activity didn't load at all.
|
||||
|
||||
v2.7 introduces a **skeleton-then-hydrate pattern** that bounds the wire payload by what the user actually needs to see RIGHT NOW, then fills in the heavy stuff in the background:
|
||||
|
||||
- **Chat skeleton.** [`fetchSkeletonMessages`](https://github.com/awizemann/scarf/blob/main/scarf/Packages/ScarfCore/Sources/ScarfCore/Services/HermesDataService.swift) selects user + assistant rows only (skips `role='tool'`) with `tool_calls` / `reasoning` / `reasoning_content` hard-NULLed at the SQL level. Wire payload bounded by conversational text alone — typically a few KB. The chat appears in seconds. Background `startToolHydration` pages through `hydrateAssistantToolCalls` in 5-id batches to splice tool calls in. Tool-result CONTENT is **opt-in** via Settings → Display → "Load tool results in past chats" (default off); the inspector pane lazy-fetches per-result content via `fetchToolResult(callId:)` when you open a card.
|
||||
- **Activity skeleton.** [`fetchRecentToolCallSkeleton`](https://github.com/awizemann/scarf/blob/main/scarf/Packages/ScarfCore/Sources/ScarfCore/Services/HermesDataService.swift) returns metadata-only rows (id + session_id + role + timestamp; everything else NULLed). Activity opens in <1s on remote with placeholder rows; real per-call entries swap in as paged hydration completes. New "Loading tool details…" pill in the page header surfaces hydration progress.
|
||||
- **Single-id whale recovery.** When a 5-id batch trips the 30s timeout (one row carries an oversized `tool_calls` blob — a long Edit's args, a big diff), an L1 single-id retry isolates the offending row so the rest of the batch still hydrates. Whale row stays bare; assistant message stays readable.
|
||||
- **Lazy tool result loading in the inspector.** Default-off avoids the bulk fetch. When you focus a tool call card, ChatInspectorPane fires `loadToolResultIfMissing(callId:)` which splices a single result into the message stream without re-fetching anything else.
|
||||
|
||||
Effect: a 160-message thinking-model session that used to time out at exactly 30s now opens in under 2 seconds with placeholder cards filling in over the next few. Activity loads in 500-800ms.
|
||||
|
||||
#### SSH cancellation that actually cancels
|
||||
|
||||
`Task.detached { … }` doesn't inherit cancellation from the awaiting parent, and `Task<…> { … }` (unstructured) also drops the signal. Without explicit bridging, cancelling a chat-load Task only unwinds Swift state — the underlying ssh subprocess kept running for the full 30s, pinning a remote sqlite query and a ControlMaster session slot. This produced the "third chat hangs" / "dashboard spins after rapid switching" symptom.
|
||||
|
||||
v2.7 wires `withTaskCancellationHandler` through [`SSHScriptRunner.run`](https://github.com/awizemann/scarf/blob/main/scarf/Packages/ScarfCore/Sources/ScarfCore/Transport/SSHScriptRunner.swift) and [`RemoteSQLiteBackend.query`](https://github.com/awizemann/scarf/blob/main/scarf/Packages/ScarfCore/Sources/ScarfCore/Services/Backends/RemoteSQLiteBackend.swift) so parent cancellation reaches the `Process` and calls `proc.terminate()` within 100ms. New `ssh.cancelled` ScarfMon event surfaces this.
|
||||
|
||||
#### In-flight coalescing for `loadRecentSessions`
|
||||
|
||||
File-watcher deltas during an active stream used to stack 2-3 parallel sessions-list reload tasks (the 500ms `scheduleSessionsRefresh` debounce only suppresses a pending tick, not one already executing). Subsequent callers now await the in-flight load instead of spawning a parallel SSH subprocess. New `mac.loadRecentSessions.coalesced` event tracks dedup hits.
|
||||
|
||||
#### Loading-state UX hardening
|
||||
|
||||
The Mac chat sidebar greys out and disables row taps the moment a session-switch is initiated (synchronously, before `client.start()` returns), with a floating ProgressView showing the current phase: **"Spawning hermes acp…"** → **"Authenticating…"** → **"Loading session…"** → **"Loading history…"** → **"Ready"**. Pre-fix the sidebar looked engageable while the 5-7 second SSH+ACP boot was still in flight, and the user could queue up a second session-switch behind the first. New `isStartingSession` flag flips on user click for instant feedback.
|
||||
|
||||
#### Partial-result + mismatch + pinned-model banners
|
||||
|
||||
- **Partial-result banner.** When the skeleton fetch trips an SSH transport failure (rather than a clean empty result), the chat surfaces "Couldn't load full chat history — the connection to *server* timed out" through the existing `acpError` triplet, plus forces `hasMoreHistory = true` so the "Load earlier" affordance shows up. Replaces the pre-fix silent empty transcript.
|
||||
- **Model/provider mismatch banner.** [`ModelPreflight.detectMismatch`](https://github.com/awizemann/scarf/blob/main/scarf/Packages/ScarfCore/Sources/ScarfCore/Services/ModelPreflight.swift) recognizes when `model.default` carries a `<provider>/...` prefix that disagrees with `model.provider` (e.g. `anthropic/claude-sonnet-4.6` + `provider: nous` after switching OAuth via Credential Pools). Banner offers one-click fix in either direction.
|
||||
- **Pinned-model failure hint.** ACP error classifier now recognizes `model_not_found` / `404 messages` / `model is not available` and surfaces "This session was created with a model the provider no longer offers — start a new chat to use your current model" so the pinned-model failure mode has a clear recovery path.
|
||||
- **OAuth-completion provider swap.** After a successful OAuth in Credential Pools, if the just-authed provider differs from `model.provider`, surface "Switch active provider to *name*?" with [Switch] / [Keep current] instead of auto-dismissing.
|
||||
|
||||
---
|
||||
|
||||
### New Project from Scratch wizard + Keychain-backed cron secrets
|
||||
|
||||
A **third project entry point** alongside Browse Catalog and Add Existing Project: a wizard that scaffolds a Scarf-standard project skeleton (`<project>/.scarf/dashboard.json` + AGENTS.md marker block), registers it, and hands off to a chat session that auto-activates the bundled `scarf-template-author` skill. The skill drives the rest conversationally — widgets, optional config schema, optional cron — and writes the final files itself. Wizard stays minimal because the agent does configuration better than a multi-step form. The skill ships bundled inside `Scarf.app/Contents/Resources/BuiltinSkills.bundle/` and copies into `~/.hermes/skills/` on launch (idempotent + version-gated).
|
||||
|
||||
**Cron + Keychain — `$SCARF_<SLUG>_<FIELD>` env vars.** Cron prompts that referenced `secret`-typed config fields used to get the literal `keychain://...` URI back when reading `config.json`, producing 401s. v2.7 mirrors resolved Keychain values into `~/.hermes/.env` under a marker-bounded block keyed by template slug:
|
||||
|
||||
```sh
|
||||
# scarf-secrets:begin local-news-aggregator
|
||||
SCARF_LOCAL_NEWS_AGGREGATOR_API_TOKEN=actual-value
|
||||
SCARF_LOCAL_NEWS_AGGREGATOR_RSS_URL=https://example.com/feed
|
||||
# scarf-secrets:end local-news-aggregator
|
||||
```
|
||||
|
||||
Hermes already reloads `~/.hermes/.env` per cron tick, so credential rotation is automatic — just edit the value in Configuration → next tick sees it. The mirror runs at every state-change point: install, post-install Configuration save, uninstall, "Remove from List", and on app launch (reconciliation pass over registered projects). Source of truth stays in the Keychain — `config.json` keeps `keychain://` URIs unchanged. Mode 0600 enforced on `~/.hermes/.env`.
|
||||
|
||||
Cron prompts now reference these env vars directly:
|
||||
|
||||
```json
|
||||
{
|
||||
"prompt": "Use the terminal: curl -sS -H \"Authorization: Bearer $SCARF_LOCAL_NEWS_AGGREGATOR_API_TOKEN\" \"$SCARF_LOCAL_NEWS_AGGREGATOR_RSS_URL\" -o {{PROJECT_DIR}}/.scarf/feed.xml"
|
||||
}
|
||||
```
|
||||
|
||||
**Migration.** First launch of v2.7 walks the project registry and writes the managed block per schemaful project — automatic. Existing cron prompts you wrote against the old (broken) `config.json` pattern still need updating: open the cron job in Scarf's Cron sidebar and edit the prompt, or ask the agent in chat ("Update my Local News cron job's prompt to use the new env var convention") — the bundled `scarf-template-author` skill (now v1.1.0) documents the convention with worked examples.
|
||||
|
||||
Also fixes [#75](https://github.com/awizemann/scarf/issues/75) — `_NSDetectedLayoutRecursion` on the Configuration form for projects whose form transitioned between stages with different intrinsic heights.
|
||||
|
||||
---
|
||||
|
||||
### Project dashboards — file-reading widgets, sparklines, typed status
|
||||
|
||||
Five new widget types, project-wide auto-refresh, and a structured error card for unknown widgets. Backwards-compatible — every existing `dashboard.json` renders byte-identically.
|
||||
|
||||
- **Project-wide auto-refresh.** [`HermesFileWatcher`](https://github.com/awizemann/scarf/blob/main/scarf/scarf/Core/Services/HermesFileWatcher.swift) used to watch each project's `dashboard.json` specifically. v2.7 promotes that to a watch on the entire `<project>/.scarf/` directory. A `markdown_file` or `log_tail` widget pointing at `<project>/.scarf/reports/foo.md` refreshes the moment a cron job rewrites the file. **By convention, place files the dashboard reads inside `.scarf/`** so the watch picks them up.
|
||||
- **`markdown_file`** — renders a markdown file from disk through the same `MarkdownContentView` pipeline used by inline `text` widgets.
|
||||
- **`log_tail`** — last `lines` of a file (default 20, max 200), monospaced, ANSI codes stripped.
|
||||
- **`cron_status`** — last run / next run / state for one Hermes cron job by `jobId`, plus a small inline log tail. Read-only — Run/Pause/Resume controls stay on the Cron tab.
|
||||
- **`image`** — local file (`path` relative to project root) or remote `url`. Optional `height` cap. Useful for matplotlib/Plotly PNGs the cron job generates.
|
||||
- **`status_grid`** — compact NxM grid of colored cells, one per service / item, with hover labels.
|
||||
- **`stat` widget gains inline sparklines.** Optional `sparkline: [Number]` field. SVG-only render, dozens per dashboard cost nothing.
|
||||
- **Typed status badges.** `list` items and `status_grid` cells share a typed enum (`success`, `warning`, `danger`, `info`, `pending`, `done`, `neutral`) with lenient decode for synonyms (`ok`/`up` → success, `down`/`error` → danger). Unknown strings render as plain text.
|
||||
- **Structured widget error card.** Replaces the legacy "Unknown: \<type\>" placeholder with a card surfacing the title, specific reason, and a hint.
|
||||
- **Schema mirror.** The widget vocabulary lives once at [`tools/widget-schema.json`](https://github.com/awizemann/scarf/blob/main/tools/widget-schema.json); the catalog validator reads from it and enforces per-type required fields.
|
||||
|
||||
---
|
||||
|
||||
### OAuth resilience + Credential Pools
|
||||
|
||||
- **Daily OAuth keepalive cron.** Prevents Anthropic OAuth refresh tokens from expiring after weeks of inactivity. New cron job `[scarf:oauth-keepalive]` (managed by Scarf) pings Hermes on a daily cadence; the in-app Refresh All Sessions action mirrors the same path on demand.
|
||||
- **Remote re-auth.** Re-authenticating against a remote droplet's OAuth provider used to be blocked by the lack of a stdin path through SSHTransport. The OAuth flow now drives a remote `hermes auth add` correctly with stdin forwarded.
|
||||
- **OAuth remove button.** Per-provider remove action in Credential Pools (auth.json edit), with confirmation dialog. Companion auto-refresh of the view when `auth.json` changes externally (file-watcher).
|
||||
- **`resolve_provider_client` error classification.** When an auxiliary task references a provider whose credentials aren't loaded, Hermes prints `resolve_provider_client: <name> requested but <Display Name> not configured` to stderr — pre-fix this surfaced in chat as the opaque `-32603 Internal error` with no actionable detail. Now classified into a clear hint pointing at Settings → Aux Models.
|
||||
- **Aux Tab unknown-task surface.** When `config.yaml` has an `auxiliary.<task>` block for a task Scarf doesn't know about (newer Hermes added it; Scarf hasn't caught up), render it as a plain row with the raw provider/model values instead of dropping it silently.
|
||||
- **Credential Pools refresh after OAuth sheet dismiss.** Closing the OAuth sheet after a successful add now refreshes the list immediately instead of leaving the just-added pool hidden until the next file-watcher tick.
|
||||
|
||||
---
|
||||
|
||||
### ScarfMon — performance instrumentation harness
|
||||
|
||||
The diagnostic surface that drove the bulk of the v2.7 perf work. Off by default; signpost-only mode (Instruments-friendly) is free; Full mode (4096-entry in-memory ring buffer + os.Logger) is a click away in Settings → Diagnostics → Performance. Wiki: https://github.com/awizemann/scarf/wiki/Performance-Monitoring
|
||||
|
||||
- **Phases 1-3** built the core: dispatcher + ring buffer + 3 backends, chat / transport / sqlite measure points, diagnostic counters for chat-render bursts, finalize-burst dampening.
|
||||
- **Tier A + B** added per-feature instrumentation: iOS file watcher, sessions list, model catalog, dashboard widgets, image encoder, message hydration.
|
||||
- **Nous picker investigation** localized a 60s + 120s beach-ball to a specific path (Nous catalog `readCache`), then killed the 120s one with dedupe + 5s timeout.
|
||||
- **Tier C catch-up** (this release): instrumented Memory / Skills / Cron / Curator load paths so future captures show how often these tabs cost multiple sequential SFTP RTTs on remote.
|
||||
- **Per-call bytes recorded** on transport + sqlite events so captures show payload sizes alongside latencies.
|
||||
- **`mac.emptyAssistantTurn` event** documents the Nous quirk where the model returns a thought stream with no body (the bubble looks like Hermes is "still thinking" but the turn already finished).
|
||||
|
||||
Adding a new measure point is two lines. The harness covers Mac and iOS uniformly. The "Copy as JSON" button exports the ring buffer for paste-into-issue diagnosis.
|
||||
|
||||
---
|
||||
|
||||
### Other fixes + polish
|
||||
|
||||
- **Sessions sidebar reload debounce** — file-watcher deltas during streaming used to flicker the sessions list. Coalesced into one trailing fetch ~500ms after the last tick.
|
||||
- **Session-load pagination + race guard** — switching to a small chat while a larger one is mid-fetch could last-write-wins the small chat away. Three race-checks against `self.sessionId` prevent the stale fetch from overwriting.
|
||||
- **Sessions + previews batched** — two separate SSH calls folded into one `queryBatch` round trip, halving the round-trips for every sidebar refresh.
|
||||
- **Remote SQLite query timeout** bumped 15→30s to better tolerate slow links; in-flight query coalescing dedupes concurrent identical queries.
|
||||
- **`Thread.sleep` spin replaced** with a kernel-wait via `DispatchGroup` for `runLocal` timeout; under concurrent SSH load the old loop accumulated spin-blocked threads and produced 7-second outliers in `loadRecentSessions`.
|
||||
- **Window position + size** persists across launches.
|
||||
- **Sidebar reorder** — Projects promoted to first section; profile chip moved under server name.
|
||||
- **`stop` badge suppressed** on metadata footer for normal turn ends (it was firing for every clean completion, looking like an error).
|
||||
- **Nous picker search field** + `model-picker` filter for the long Nous overlay model list.
|
||||
- **`oauth-keepalive` cron create** — drop the `--silent` flag Hermes doesn't accept.
|
||||
- **Snapshot pipeline rewritten** — replaced the `sqlite3 .backup`-then-download pipeline with direct SSH-streamed query execution (issue [#74](https://github.com/awizemann/scarf/issues/74)). Eliminates the multi-minute snapshot wait on multi-GB state.db files. Companion fix: pre-expand `~/` in Swift via `resolvedUserHome` so sqlite3 finds the DB without depending on the remote shell's tilde expansion.
|
||||
- **Aux nested-YAML parser** — corrected the parser so the unknown-task surface works on remote (was previously dropping aux blocks whose `provider:` value lived on a separate line).
|
||||
- **`ModelPreflight` newline trim bug** — `.whitespaces` doesn't strip newlines; switched both trims to `.whitespacesAndNewlines` so a stray `\n` in a hand-edited config.yaml doesn't false-positive the mismatch banner.
|
||||
|
||||
---
|
||||
|
||||
### What's measured today
|
||||
|
||||
321 ScarfCore tests pass (302 prior + 19 new ModelPreflight). New ScarfMon events documented in the [Performance-Monitoring wiki](https://github.com/awizemann/scarf/wiki/Performance-Monitoring).
|
||||
|
||||
### Compatibility
|
||||
|
||||
- macOS 14+ (unchanged).
|
||||
- Hermes target: still **v2026.4.30 (v0.12.0)**. No new Hermes capability gates added.
|
||||
- Existing `dashboard.json` files render unchanged.
|
||||
- Existing `.scarftemplate` bundles install unchanged. Catalog manifest schemaVersion stays at 1/2/3 — no bump.
|
||||
- Existing `~/.hermes/.env` content is preserved byte-identically — Scarf only writes inside its `# scarf-secrets:begin <slug>` / `# scarf-secrets:end <slug>` regions.
|
||||
- The skeleton-then-hydrate chat loader and SSH cancellation propagation are **Mac-only** in this release; ScarfGo (iOS) keeps its existing chat path.
|
||||
|
||||
### What's deferred
|
||||
|
||||
- **Per-widget data sources + per-widget refresh granularity.** The general "widget points at a typed data source" abstraction is the next-largest win in dashboards but materially expands the model + JS mirror + validator surface. The project-wide watch covers the common cron-driven workflow without it.
|
||||
- **Cross-project health digest sidebar rollup.** Counting attention-needed projects across the registry — scoped but didn't pull its weight. The typed status enum makes it cheap to add later.
|
||||
- **Automatic cron-prompt rewriter on upgrade.** Heuristic rewrites of free-form prompts are risky; the docs + agent-assisted path ships in v2.7. Revisit a "scan + fix" UI in v2.8 if real users miss the migration.
|
||||
- **iOS New Project wizard + iOS Keychain-env mirror.** ScarfGo's project surface is read-only; the wizard's chat-handoff pattern depends on Mac-only ACP plumbing.
|
||||
- **iOS skeleton-then-hydrate loaders.** Same data-service surfaces are public, but the iOS chat lifecycle is structured differently. Defer until iOS dogfooding shows the same payload-size pain.
|
||||
- **Tier C redesigns (Memory/Skills/Cron/Curator).** Instrumented in v2.7; redesign waits for capture data showing which path actually needs the skeleton-then-hydrate treatment.
|
||||
@@ -0,0 +1,34 @@
|
||||
## What's in 2.7.1
|
||||
|
||||
A patch release covering three bug reports filed against 2.7.0, plus follow-up cleanups in the same neighborhood. No data migrations, no UI surface changes — drop-in replacement for 2.7.0 on Mac.
|
||||
|
||||
### Bug fixes
|
||||
|
||||
#### Mac
|
||||
|
||||
- **[#77](https://github.com/awizemann/scarf/issues/77) — Sessions screen renders empty even when Dashboard reports sessions exist.** v2.7.0 folded the Sessions tab's two SQL queries (sessions list + previews) into a single batched SSH round-trip for perf. The combined wire payload for any user with ~150+ sessions crossed macOS's 16–64 KB pipe-buffer threshold; without a concurrent reader draining the pipe, the remote `sqlite3 -json` blocked, the script never finished, our 30-second timeout fired, and the call returned an empty result. `SSHScriptRunner` now drains stdout/stderr concurrently with the running process via `FileHandle.readabilityHandler`, so the kernel pipe never fills. Same fix applied to the local-execution path. New regression test pushes 256 KB of synthetic output through the runner and asserts full delivery — would have wedged pre-fix.
|
||||
|
||||
- **[#78](https://github.com/awizemann/scarf/issues/78) — Skills "What's New" pill contradicts the Updates sub-tab.** The pill at the top of the Skills page was rendering on every sub-tab, including Updates. It counts **local** file deltas since the user last clicked "Mark as seen" (e.g. "18 new" = 18 skills landed on disk that you haven't acknowledged), while the Updates body runs `hermes skills check` to find skills with newer **upstream** versions available — a different concept. Two surfaces using the word "update" for two different things made the screen contradict itself. Two changes: the pill now renders only on the Installed sub-tab (Mac and ScarfGo), and its label says "X **changed** since you last looked" instead of "X updated" so the local-file vocabulary doesn't collide with upstream-update vocabulary anywhere on the page.
|
||||
|
||||
- **[#79](https://github.com/awizemann/scarf/issues/79) — Skills hub search returns nothing for terms visible in Browse.** With the source picker on "All Sources", `hermes skills search <query>` (no `--source` flag) routes through Hermes's centralized index and skips external API sources (skills-sh, github, clawhub, lobehub, well-known) — but Browse still aggregates from those sources, so a skill like `honcho` would show up in Browse and disappear in search. Same picker, same query, contradictory results. Rather than chase Hermes's index gaps, "All Sources" search now means "filter what you can already see": Scarf caches the most recent Browse payload and runs a client-side substring filter (case-insensitive against name, description, and identifier) against it, instantly. Source-specific searches still shell out to `hermes skills search --source <s>` for full upstream search semantics. Five new tests cover the filter behavior.
|
||||
|
||||
- **`hermesPIDResult()` — narrow the Hermes "is it running?" probe to the gateway.** Previously `pgrep -f hermes`, which matched any process with "hermes" in its argv: chat sessions Scarf itself spawns, `hermes -z` one-shots, log tails, even the README in an editor. The Dashboard "Hermes is running" badge could read true even when the gateway daemon was down. Tightened to a regex that matches only the gateway shape — `python -m hermes_cli.main gateway run …` and `/path/to/hermes gateway run …`. All callers (DashboardViewModel, HealthViewModel, SettingsViewModel, scarfApp, stopHermes) want the gateway PID specifically. Cherry-picked from [#76](https://github.com/awizemann/scarf/pull/76) — thanks to [@unixwzrd](https://github.com/unixwzrd) for the diagnosis and regex.
|
||||
|
||||
- **`HealthViewModel.stopDashboard()` — stop the dashboard by port, not `pkill -f`.** External-instance fallback used to be `pkill -f "hermes dashboard"`, broad enough to match shell history, log tails, README readers — anything with the substring in its argv. Now `lsof -tiTCP:<port> -sTCP:LISTEN` resolves the PID actually bound to the dashboard port and only that one process gets `SIGTERM`. Trusting the port is correct here: Scarf owns the configured port and the user-visible intent is "stop the thing on this port." Direction cherry-picked from [#76](https://github.com/awizemann/scarf/pull/76); the `-c hermes` filter from the original was dropped because Hermes installs as a Python shebang script and the kernel COMM is `python`, not `hermes` — `-c hermes` would silently miss every standard install.
|
||||
|
||||
### Documentation + tooling
|
||||
|
||||
- **`scripts/local-build.sh` + `BUILDING.md` for contributor builds.** New unsigned single-arch Debug build script for contributors without an Apple Developer account. Detects arm64 / x86_64, verifies xcode-select / xcrun / xcodebuild, probes the Metal toolchain (offers an interactive install on TTY, errors cleanly on CI), resolves Swift packages, builds Debug with signing disabled. Optional one-touch `ditto` to `/Applications/scarf.app` on explicit y/N. The canonical Release universal CLI in `README.md` is unchanged — `local-build.sh` is an alternative for contributors, not a replacement for the shipping build. Cherry-picked from [#76](https://github.com/awizemann/scarf/pull/76).
|
||||
|
||||
- **`BUILDING.md` + `CONTRIBUTING.md` — restored Sonoma compatibility messaging.** The runtime min is **macOS 14.6 (Sonoma)** — that's the `MACOSX_DEPLOYMENT_TARGET` on the main `scarf` target and is intentional. Build min is **Xcode 16.0** (needed for Swift 6 strict-concurrency features). The legacy CONTRIBUTING.md line had drifted to "Xcode 26.3+ / macOS 26.2+", which would have steered Sonoma contributors and users away from a build that actually runs on their box. Corrected, with a load-bearing-callout in BUILDING.md so future doc edits don't silently raise the floor again.
|
||||
|
||||
### Migrating from 2.7.0
|
||||
|
||||
Sparkle will offer the update automatically. No config migration, no schema changes. Existing sessions, skills, and projects are untouched.
|
||||
|
||||
If you've been working around #77 by collapsing the sidebar or restarting Scarf to repopulate the Sessions list, you can stop — sessions should load reliably now.
|
||||
|
||||
### Acknowledgements
|
||||
|
||||
- [@bricelb](https://github.com/bricelb) for the three v2.7.0 bug reports ([#77](https://github.com/awizemann/scarf/issues/77), [#78](https://github.com/awizemann/scarf/issues/78), [#79](https://github.com/awizemann/scarf/issues/79)) — well-instrumented reproductions including screenshots and environment details made the diagnosis straightforward.
|
||||
- [@unixwzrd](https://github.com/unixwzrd) for [#76](https://github.com/awizemann/scarf/pull/76) — the gateway-pgrep tighten, the `pkill -f "hermes dashboard"` direction, and the `local-build.sh` contributor flow are all cherry-picked from that PR.
|
||||
@@ -0,0 +1,83 @@
|
||||
## What's in 2.7.5
|
||||
|
||||
A feature release that lifts Scarf's Kanban surface from a read-only list (the v2.6 placeholder shipped while upstream Kanban was still mid-rework) to a full drag-and-drop board with the complete Hermes v0.12 mutation surface wired up — plus per-project boards bound to a Scarf-minted tenant slug, and a read-only board on iOS for at-a-glance status from your phone. No data migrations, no schema changes; pre-v0.12 hosts gracefully hide the surface.
|
||||
|
||||
### New features
|
||||
|
||||
#### Mac
|
||||
|
||||
- **Drag-and-drop Kanban board** ([scarf/Features/Kanban/Views/KanbanBoardView.swift](scarf/scarf/Features/Kanban/Views/KanbanBoardView.swift)). Five visible columns — Triage / Up Next (`todo` + `ready`) / Running / Blocked / Done — collapsing Hermes's seven status values into a layout that doesn't waste space on `ready`, which the dispatcher only ever holds for a few seconds. Triage hides itself when empty; archived hides behind a header toggle. Drop a card onto a column and Scarf maps the gesture to the right Hermes verbs through a pure transition planner: drop-on-Running fires `kanban dispatch` (the dispatcher then spawns a worker), drop-on-Blocked opens a sheet asking for a reason and calls `kanban block`, drop-on-Done opens a result sheet and calls `kanban complete`, blocked → running chains `unblock` + `dispatch`. Forbidden transitions (anything dropped on Done; anything dragged out of Triage) reject with a red drop-target stroke and a tooltip explaining why — Done is terminal, Triage is promoted by a specifier worker, neither has a CLI verb that maps cleanly. Optimistic local updates apply on drop and revert on CLI failure with a toast, so the UI feels instant.
|
||||
|
||||
- **Side-pane inspector** ([KanbanInspectorPane.swift](scarf/scarf/Features/Kanban/Views/KanbanInspectorPane.swift)). Click a card and a 420 px pane slides in from the trailing edge. Not a modal sheet — modal would block triaging the next card after closing. Header carries the status, an inline assignee menu (more on that below), workspace kind, and tenant; below that, four tabs render `hermes kanban show <id>` data: **Comments** (with an inline composer that calls `kanban comment`), **Events** (the `task_events` log with per-kind glyphs), **Runs** (one row per attempt with outcome badge + summary + error), and **Log** — the worker's captured stdout/stderr from `hermes kanban log <id>`, polled every 2 s while the task is running with a "● streaming" indicator and auto-scroll to the latest line, snapshot-only with a refresh button when the task is in a terminal state. The action bar at the bottom has all the per-status verbs — Start (which is `claim` rebranded as a user-visible action), Complete, Block, Unblock, Archive — every one with a help tooltip explaining what it does and what Hermes verb it invokes. The "Archive" tooltip explicitly notes Hermes has no hard-delete: archived tasks remain in `~/.hermes/kanban.db` and are recoverable via the "Show archived" toggle until `hermes kanban gc` runs.
|
||||
|
||||
- **Inspector auto-refresh.** While the inspector is open, the detail (header, action buttons, comments, events, runs) re-fetches every 5 s on the same cadence as the board itself, so a worker transition (e.g. running → done elsewhere) is reflected without the user having to close + reopen. The Log tab's 2 s poll runs separately and self-cancels the moment the task transitions out of `running`.
|
||||
|
||||
- **Inline assignee picker on the inspector header.** The assignee badge is a clickable menu — set means a `.brand` (rust) chip, unassigned means a `.warning` (yellow) chip so the eye catches it instantly. Tapping opens a menu of every known profile (union of `~/.hermes/profiles/`, current task assignees, and the active local profile from `HermesProfileResolver`) plus an "Unassigned" option. Selection routes through `kanban assign` and immediately follows with `kanban dispatch` so the task gets picked up promptly. Solves the "I assigned a profile but nothing happened" gap end-to-end without the user touching a terminal.
|
||||
|
||||
- **Health banner in the inspector.** Surfaces two conditions that previously left users staring at a stuck task with no explanation. **Yellow** when the task is unassigned in `ready` / `todo`: *"Won't run automatically — Hermes's dispatcher silently skips tasks with no assignee."* The dispatcher's own `--json` output literally lists these under `skipped_unassigned`; we now surface that to the human. **Red** when the most-recently-completed run ended in a non-success outcome (`stale_lock` / `crashed` / `gave_up` / `timed_out` / `spawn_failed` / `reclaimed` / `failed`): banner displays the outcome label + the raw `error` field from the run record, so you don't have to dig into the Runs tab to discover it. The red banner is suppressed while a fresh attempt is running — once status flips back to `running`, the previous outcome is stale signal and the Log tab's live stream is the right thing to look at.
|
||||
|
||||
- **Card-level signals.** Cards in `running` get a 2 px `ScarfColor.info` left edge + a subtle title shimmer so live work is obvious at a glance. Blocked cards get a 2 px `ScarfColor.warning` left edge + a ⚠ glyph next to the title. Done cards dim to 0.7 opacity in light mode, 0.55 in dark, with a green ✓ in the title row. Cards in `ready` / `todo` with no assignee get a yellow ⚠ glyph in the title row with a tooltip explaining the dispatcher won't pick them up — same signal as the inspector banner, just at the board level so triage is one keypress away.
|
||||
|
||||
- **`Board | List` toggle at the top of the route.** The v2.6 read-only list view is preserved in `KanbanListView.swift` and surfaced via a segmented picker, so users on narrow windows or anyone who prefers a flat sortable list can opt in. Choice persists across launches via `@AppStorage`.
|
||||
|
||||
- **New Task sheet** ([KanbanCreateSheet.swift](scarf/scarf/Features/Kanban/Views/KanbanCreateSheet.swift)). Title, body (markdown supported), assignee (defaults to `HermesProfileResolver.activeProfileName()` so newly-created tasks actually run), workspace kind (segmented `Scratch / Worktree / Project Dir`; locked to Project Dir on per-project boards), priority slider, comma-separated skills with autocomplete from `~/.hermes/skills/`, optional tenant (hidden on per-project boards — the slug is implicit), and a "Send to triage" toggle. Submit fires `kanban create --json` and immediately follows with `kanban dispatch` so an assigned task transitions `ready` → `running` within seconds rather than waiting for the gateway dispatcher's internal cycle.
|
||||
|
||||
- **Kanban moved from Manage → Monitor in the sidebar.** It's runtime work-in-progress, not configuration. Sits between Activity and the rest of Manage so users see "what's happening right now" at a glance.
|
||||
|
||||
#### Per-project Kanban
|
||||
|
||||
- **`DashboardTab.kanban` on every project**, capability-gated on `HermesCapabilities.hasKanban`. Renders a project-scoped `KanbanBoardView` filtered to the project's tenant slug. Workspace defaults in the New Task sheet are pre-pinned to `dir:<project.path>`. Empty state explains the project doesn't have any tasks yet and offers a "New Task" CTA — the empty board IS the discovery surface.
|
||||
|
||||
- **Tenant minting via [KanbanTenantResolver](scarf/scarf/Core/Services/KanbanTenantResolver.swift).** Each Scarf project gets a stable `scarf:<slug>` tenant minted on first kanban interaction and persisted to `<project>/.scarf/manifest.json` (new optional `kanbanTenant` field on `ProjectTemplateManifest`). Slug rules: lowercased, hyphenated, ≤ 48 chars, `scarf:` prefix to avoid collision with hand-typed tenants. Once minted, the tenant is **immutable across rename** — tasks already on the board carry the original slug, so renaming the project doesn't orphan them. Bare projects (no manifest) get a sentinel manifest written with `id: scarf/<project-id>` + `version: 0.0.0` + just the `kanbanTenant` set; the `ProjectAgentContextService` reader recognizes the sentinel and refuses to surface it as a "Template" line in the AGENTS.md block, so the project doesn't suddenly start advertising a fake template to the agent.
|
||||
|
||||
- **Agent-side tenant injection.** [ProjectAgentContextService.renderBlock](scarf/scarf/Core/Services/ProjectAgentContextService.swift) emits a "Kanban tenant" line inside the `<!-- scarf-project -->` markers in `<project>/AGENTS.md` whenever a tenant exists, instructing the agent to pass `--tenant scarf:<slug>` on `hermes kanban create`. `ChatViewModel.startACPSession` already calls `refresh(for:)` before opening every project chat, so the agent reads a fresh tenant on every session start with no extra wiring. Agents are imperfect at flag discipline; a forgotten `--tenant` lands the task in the global "Untagged" group rather than failing — acceptable v2.7.5 behavior.
|
||||
|
||||
- **`kanban_summary` dashboard widget** ([KanbanSummaryWidgetView.swift](scarf/scarf/Features/Projects/Views/Widgets/KanbanSummaryWidgetView.swift)). New widget kind for project dashboards: shows the top three `running` / `blocked` / `todo` tasks for the project's tenant by priority, plus a glance footer (`"12 todo · 3 running · 5 blocked"`) sourced from `kanban stats`. Polls every 10 s while the dashboard is foregrounded. Widget vocabulary registered in [tools/widget-schema.json](tools/widget-schema.json) and rendered on the catalog site via [site/widgets.js](site/widgets.js); template authors can drop a `{ kind: kanban_summary, max_rows: 3 }` block into `dashboard.json`.
|
||||
|
||||
#### iOS / iPadOS
|
||||
|
||||
- **Read-only Kanban tab on `ProjectDetailView`** ([Scarf iOS/Kanban/ScarfGoKanbanView.swift](scarf/Scarf%20iOS/Kanban/ScarfGoKanbanView.swift)). Same five-column collapse rendered as a horizontally-paged segmented `Picker` of single-column lists — HIG-friendly on iPhone where a 5-column grid forces unreadable card widths. Pulls live status, assignee, workspace, skills, priority chips. Tap a card → modal `NavigationStack` detail sheet ([ScarfGoKanbanDetailSheet.swift](scarf/Scarf%20iOS/Kanban/ScarfGoKanbanDetailSheet.swift)) with the same Comments / Events / Runs tabs the Mac inspector has. Read-only in v2.7.5 — mutations + drag-drop on iPad land in v2.8 once the Mac flow is fully shaken out. Card titles use semantic `.headline` (not `ScarfFont`) so Dynamic Type works; chrome (badges) stays on `ScarfBadge` for fixed visual weight per the project's iOS conventions.
|
||||
|
||||
#### ScarfCore
|
||||
|
||||
- **`KanbanService` actor** ([Packages/ScarfCore/Sources/ScarfCore/Services/KanbanService.swift](scarf/Packages/ScarfCore/Sources/ScarfCore/Services/KanbanService.swift)) — pure-I/O Sendable actor wrapping every Hermes v0.12 verb (`list / show / runs / stats / assignees / create / assign / claim / comment / complete / block / unblock / archive / dispatch / link / unlink / log`). Dispatches each CLI invocation through `Task.detached(priority: .utility)` matching the existing concurrency conventions. Errors land in [KanbanError](scarf/Packages/ScarfCore/Sources/ScarfCore/Models/KanbanError.swift) and surface as inline banners (not modal alerts) since the board is high-frequency. The "no matching tasks" stdout sentinel is normalized to `[]` rather than thrown.
|
||||
|
||||
- **Pure transition planner.** `KanbanService.plan(for: KanbanTransition)` is a synchronous function that maps a `(from, to)` column pair to the right verb sequence — `(.upNext, .running) → [.dispatch]`, `(.blocked, .running) → [.unblock, .dispatch]`, etc. Disallowed transitions throw `KanbanError.forbiddenTransition` with a user-actionable reason. The planner is fully tested in `KanbanModelsTests.swift`. Critically: `dispatch` (not `claim`) is the verb used for Up-Next → Running. Hermes's `claim` is documented as "manual alternative to the dispatcher" and assumes the caller spawns the worker themselves — Scarf doesn't, so calling `claim` from drag-drop reserved tasks but never spawned work, and the dispatcher reclaimed them ~15 minutes later (`stale_lock`). `dispatch` is the right primitive for a GUI client.
|
||||
|
||||
- **Cross-platform [KanbanTenantReader](scarf/Packages/ScarfCore/Sources/ScarfCore/Services/KanbanTenantReader.swift).** Read-only projection over `<project>/.scarf/manifest.json`'s `kanbanTenant` field. The full `ProjectTemplateManifest` type lives in the Mac target; this lightweight reader gives iOS a way to filter the per-project board by tenant without linking the full manifest model.
|
||||
|
||||
- **Timestamp decoding tolerates both shapes.** Hermes emits `created_at` / `started_at` / `completed_at` / `last_heartbeat_at` etc. as Unix integer seconds (its SQLite columns are INTEGER), but earlier wire docs implied ISO-8601 strings. The decoder now accepts either an integer or a string and normalizes to ISO-8601 so downstream code only handles one type. Locked in by `decodeUnixIntegerTimestamps` in `KanbanModelsTests`.
|
||||
|
||||
- **`KanbanBoardViewModel` optimistic merge.** Holds `optimisticOverrides: [taskId: status]` for in-flight drags; the polled response merges with optimistic state until the server confirms the new status, so a stale poll arriving milliseconds after a drop can't snap the card back to its old column. On CLI failure the override is removed and the message lands in the inline banner.
|
||||
|
||||
### Dispatch + assignee fixes
|
||||
|
||||
A diagnostic round driving real tasks end-to-end exposed a connected bug pattern that the polish pass closed:
|
||||
|
||||
- **Hermes's dispatcher silently skips unassigned tasks** — its `kanban dispatch --json` output literally lists them under a `skipped_unassigned` key and moves on. Tasks created without an assignee sat in `ready` indefinitely and the user had no signal anything was wrong. The New Task sheet now defaults to the active Hermes profile, the inspector header shows a yellow "Unassigned" chip + warning banner, every `ready` / `todo` card without an assignee gets a ⚠ glyph + tooltip, and the inspector's inline assignee picker fixes it in one click.
|
||||
|
||||
- **Drag-to-Running used to call `claim`**, which is a manual alternative to the dispatcher. Status flipped to `running`, but no worker spawned (Scarf doesn't host workers), and 15 minutes later the dispatcher reclaimed the task with a `stale_lock` outcome. Replaced with `dispatch` end-to-end so the gateway-running dispatcher actually does the spawning.
|
||||
|
||||
- **`hermes kanban assignees` empty-state was leaking into the picker.** The CLI prints a literal sentinel `(no assignees — create a profile with hermes -p <name> setup)` when the table is empty; the parser was tokenizing it on whitespace and offering `(no` as a profile in the menu. Parser now skips the sentinel, validates each candidate against `^[a-zA-Z0-9_-]+$`, and falls back cleanly to the active local profile when the table is empty.
|
||||
|
||||
- **`spawn_failed` from "executable not found on PATH"** — most subtle of the lot. macOS GUI apps inherit a launch-services PATH (`/usr/bin:/bin:/usr/sbin:/sbin`) that doesn't include `~/.local/bin` (where pipx installs `hermes`) or `/opt/homebrew/bin`. Scarf was finding `hermes` for its own invocation via the absolute-path resolver in `HermesPathSet.hermesBinaryCandidates`, but when the dispatcher then spawned a worker process, that worker inherited Scarf's GUI PATH and couldn't find `hermes` by name — recording an `outcome=spawn_failed` run with the exact "executable not found on PATH" message. `LocalTransport` now grows an `environmentEnricher` static (mirroring `SSHTransport.environmentEnricher`) wired by `scarfApp.swift` to the same `HermesFileService.enrichedEnvironment()` login-shell probe the SSH transport uses. Every local subprocess Scarf spawns now sees the user's full PATH and credential env, so a spawned-from-Scarf hermes can spawn its children by name without reaching for absolute paths. Defense-in-depth: `subprocessEnvironment(forExecutable:)` also unconditionally prepends the executable's parent directory to PATH, so the fix works even if the enricher hasn't been wired (early startup, tests).
|
||||
|
||||
### Migrating from 2.7.1
|
||||
|
||||
Sparkle will offer the update automatically. No config migration, no schema changes — `~/.hermes/kanban.db` is shared across all Hermes clients and Scarf only reads/writes through the documented CLI surface. Existing Scarf projects pick up the new project Kanban tab on first open; the tenant slug is minted lazily on first kanban interaction inside the project, so projects with no kanban activity stay byte-identical until the user opens the tab.
|
||||
|
||||
If you have an existing project with a Scarf-managed `manifest.json`, the new optional `kanbanTenant` field is added on next mint and lives alongside any template-author config schema without touching it. Templates do not ship `kanbanTenant` (it's user-machine-scoped state); the export pipeline strips it.
|
||||
|
||||
If you've been running tasks via the v2.6 read-only list and your Hermes host already runs the gateway dispatcher, your existing kanban tasks should appear on the board automatically — there's no migration step. Tasks created without an assignee in v2.6 will now show the yellow "Unassigned" warning until you fix them through the inline picker.
|
||||
|
||||
### Known limitations
|
||||
|
||||
- **Within-column reorder is not supported.** Hermes has no `update` verb and no `position` column on the tasks table — `priority` is write-once at create time. Sort order inside each column is `priority DESC, created_at DESC`, matching the dispatcher's actual run order. We considered a client-side ordering sidecar; rejected because the on-screen order would diverge from what runs next, which is worse than no manual order. Will revisit if Hermes ships an `update --priority` verb.
|
||||
|
||||
- **No live `watch` streaming yet.** The board polls every 5 s; the inspector polls detail on the same cadence and the Log tab on a 2 s cadence while running. `hermes kanban watch --json` event streaming + reconnect-with-backoff lands in v2.8 along with iOS write surfaces.
|
||||
|
||||
- **No bulk re-tag for legacy NULL-tenant tasks.** Tasks created before this release (assignee or no assignee) appear in the global "Untagged" group on the global board. Hermes has no `tenant` mutation verb post-create, so retagging would be archive + recreate — too destructive to ship in this release.
|
||||
|
||||
### Acknowledgements
|
||||
|
||||
- Driven end-to-end against a fresh local Hermes v0.12.0 install with the gateway dispatcher running. Real bug surface mostly came from doing instead of speculating: the `claim` vs `dispatch` distinction, the silent `skipped_unassigned` behavior, the `(no` parse leak, the integer-vs-ISO timestamp shape, and the stale "Last run" banner during a fresh attempt all surfaced from driving real tasks and watching what actually happened.
|
||||
@@ -362,10 +362,17 @@ public actor ACPClient {
|
||||
#endif
|
||||
|
||||
// session/prompt streams events and can run for minutes — no hard
|
||||
// timeout. Control messages get a 30s watchdog.
|
||||
// timeout. Control messages get a 60s watchdog. Older versions
|
||||
// capped at 30s, which the field reported (#61) was tripping
|
||||
// under realistic gateway+ACP concurrency: the gateway holds
|
||||
// state.db locks for Discord sync / skill registration / cron
|
||||
// scheduling, and ACP's `initialize` / `session/new` /
|
||||
// `session/load` stall waiting for the lock. SQLite contention
|
||||
// on a healthy host clears in seconds; 60s gives that headroom
|
||||
// while still surfacing genuinely broken transports promptly.
|
||||
let timeoutTask: Task<Void, Error>? = if method != "session/prompt" {
|
||||
Task { [weak self] in
|
||||
try await Task.sleep(nanoseconds: 30 * 1_000_000_000)
|
||||
try await Task.sleep(nanoseconds: 60 * 1_000_000_000)
|
||||
await self?.timeoutRequest(id: requestId, method: method)
|
||||
}
|
||||
} else {
|
||||
@@ -586,7 +593,30 @@ public enum ACPClientError: Error, LocalizedError {
|
||||
/// human-readable hint for the chat UI. Pattern-matches the most common
|
||||
/// fresh-install failure modes. Returns nil when no known pattern matches.
|
||||
public enum ACPErrorHint {
|
||||
public static func classify(errorMessage: String, stderrTail: String) -> String? {
|
||||
/// Result of a classifier hit. `hint` is the user-facing copy; when
|
||||
/// the failure is an OAuth refresh-revocation, `oauthProvider` names
|
||||
/// the affected provider (lowercase, matching `auth.json` keys) so
|
||||
/// the UI can offer a one-click re-authenticate affordance. `nil`
|
||||
/// `oauthProvider` means "we matched a non-OAuth failure mode, or
|
||||
/// we matched OAuth but couldn't identify which provider."
|
||||
public struct Classification: Sendable, Equatable {
|
||||
public let hint: String
|
||||
public let oauthProvider: String?
|
||||
|
||||
public init(hint: String, oauthProvider: String? = nil) {
|
||||
self.hint = hint
|
||||
self.oauthProvider = oauthProvider
|
||||
}
|
||||
}
|
||||
|
||||
/// Known OAuth-authed providers Hermes ships. Listed lowercase to
|
||||
/// match `auth.json.providers.<key>` and the values
|
||||
/// `OAuthFlowController.start(provider:)` accepts.
|
||||
private static let oauthProviders = [
|
||||
"nous", "claude", "anthropic", "qwen", "gemini", "google", "copilot", "github",
|
||||
]
|
||||
|
||||
public static func classify(errorMessage: String, stderrTail: String) -> Classification? {
|
||||
let haystack = errorMessage + "\n" + stderrTail
|
||||
|
||||
// SSH-level failures come first — they apply only to remote
|
||||
@@ -596,30 +626,86 @@ public enum ACPErrorHint {
|
||||
// all surface as opaque "ACP process terminated" / "request
|
||||
// timed out", and the user has no idea where to look.
|
||||
if haystack.contains("Connection refused") {
|
||||
return "Couldn't reach the remote host — the SSH port is closed or the droplet is down. Check the host is running and reachable."
|
||||
return Classification(hint: "Couldn't reach the remote host — the SSH port is closed or the droplet is down. Check the host is running and reachable.")
|
||||
}
|
||||
if haystack.localizedCaseInsensitiveContains("Operation timed out")
|
||||
|| haystack.localizedCaseInsensitiveContains("Connection timed out")
|
||||
|| haystack.contains("Network is unreachable")
|
||||
|| haystack.contains("No route to host") {
|
||||
return "Couldn't reach the remote host — the network connection timed out. Check the host is running and your network is up."
|
||||
return Classification(hint: "Couldn't reach the remote host — the network connection timed out. Check the host is running and your network is up.")
|
||||
}
|
||||
if haystack.contains("Permission denied (publickey")
|
||||
|| haystack.contains("Permission denied, please try again") {
|
||||
return "SSH rejected the key. Make sure the right identity file is selected and that ssh-agent has the key loaded — open Terminal and run `ssh-add -l`."
|
||||
return Classification(hint: "SSH rejected the key. Make sure the right identity file is selected and that ssh-agent has the key loaded — open Terminal and run `ssh-add -l`.")
|
||||
}
|
||||
if haystack.contains("Host key verification failed")
|
||||
|| haystack.contains("REMOTE HOST IDENTIFICATION HAS CHANGED") {
|
||||
return "The remote host's SSH key changed. If you just rebuilt the droplet, remove the old entry with `ssh-keygen -R <host>`, then try again."
|
||||
return Classification(hint: "The remote host's SSH key changed. If you just rebuilt the droplet, remove the old entry with `ssh-keygen -R <host>`, then try again.")
|
||||
}
|
||||
if haystack.contains("Could not resolve hostname")
|
||||
|| haystack.contains("Name or service not known") {
|
||||
return "Couldn't resolve the host name. Check the host in this server's settings."
|
||||
return Classification(hint: "Couldn't resolve the host name. Check the host in this server's settings.")
|
||||
}
|
||||
if haystack.localizedCaseInsensitiveContains("command not found")
|
||||
|| haystack.contains("hermes: not found")
|
||||
|| haystack.contains("exit 127") {
|
||||
return "The remote shell couldn't find `hermes`. Either install Hermes on the remote (`pipx install hermes-agent`) or set an absolute binary path in this server's settings."
|
||||
return Classification(hint: "The remote shell couldn't find `hermes`. Either install Hermes on the remote (`pipx install hermes-agent`) or set an absolute binary path in this server's settings.")
|
||||
}
|
||||
|
||||
// OAuth refresh-token revocation. Hermes prints
|
||||
// "Refresh session has been revoked. Run `hermes model` to
|
||||
// re-authenticate." to stderr/stdout when an OAuth-authed
|
||||
// provider's refresh token can no longer mint access tokens
|
||||
// (user revoked, server rotated keys, etc.). We can't drive
|
||||
// `hermes model` interactively, but `hermes auth add <provider>
|
||||
// --type oauth` is the same code path Scarf already drives via
|
||||
// `OAuthFlowController` for first-time setup, so we surface a
|
||||
// re-authenticate affordance instead. Checked BEFORE the
|
||||
// generic "no credentials found" path because the message
|
||||
// contains the word "credentials" via the surrounding context.
|
||||
if haystack.localizedCaseInsensitiveContains("refresh session has been revoked")
|
||||
|| haystack.range(of: #"refresh.*revoked"#, options: [.regularExpression, .caseInsensitive]) != nil
|
||||
|| haystack.localizedCaseInsensitiveContains("re-authenticate")
|
||||
|| haystack.localizedCaseInsensitiveContains("reauthenticate")
|
||||
|| (haystack.contains("401") && oauthProvider(in: haystack) != nil)
|
||||
|| (haystack.localizedCaseInsensitiveContains("unauthorized") && oauthProvider(in: haystack) != nil) {
|
||||
let provider = oauthProvider(in: haystack)
|
||||
let suffix = provider.map { " (affected provider: \($0))." } ?? "."
|
||||
return Classification(
|
||||
hint: "Your OAuth session has expired or been revoked\(suffix) Click Re-authenticate below to sign in again.",
|
||||
oauthProvider: provider
|
||||
)
|
||||
}
|
||||
|
||||
// Auxiliary task references a provider that isn't authenticated.
|
||||
// Hermes prints `resolve_provider_client: <name> requested but
|
||||
// <Display Name> not configured` when an aux task (compression,
|
||||
// summarization, memory_flush, curator, vision, web_extract,
|
||||
// session_search, skills_hub) has `provider: <name>` set in
|
||||
// config.yaml but that provider's credentials aren't loaded.
|
||||
// Common after a user removes one OAuth provider while their
|
||||
// existing config.yaml still names it for an aux task. The
|
||||
// chat banner used to surface this as `-32603 Internal error`
|
||||
// with no actionable detail; surface a clear path now.
|
||||
if let match = haystack.range(
|
||||
of: #"resolve_provider_client:\s*([a-zA-Z0-9_-]+)\s+requested\s+but"#,
|
||||
options: .regularExpression
|
||||
) {
|
||||
let line = String(haystack[match])
|
||||
// Pull the captured provider name out of the matched line.
|
||||
// First word after "resolve_provider_client:" is the value.
|
||||
let provider: String = {
|
||||
let parts = line.split(whereSeparator: { $0.isWhitespace })
|
||||
if let idx = parts.firstIndex(where: { $0.contains("resolve_provider_client") }),
|
||||
parts.index(after: idx) < parts.endIndex {
|
||||
let candidate = parts[parts.index(after: idx)]
|
||||
return String(candidate)
|
||||
}
|
||||
return "an unauthenticated provider"
|
||||
}()
|
||||
return Classification(
|
||||
hint: "An auxiliary task is configured to use `\(provider)` but that provider isn't authenticated. Open Settings → Aux Models, or check `~/.hermes/config.yaml` for `auxiliary.<task>.provider: \(provider)` and switch it to your active provider (or set it to `auto`)."
|
||||
)
|
||||
}
|
||||
|
||||
if haystack.range(of: #"No\s+(Anthropic|OpenAI|OpenRouter|Gemini|Google|Groq|Mistral|XAI)?\s*credentials\s+found"#,
|
||||
@@ -628,7 +714,7 @@ public enum ACPErrorHint {
|
||||
|| haystack.contains("ANTHROPIC_TOKEN")
|
||||
|| haystack.contains("claude setup-token")
|
||||
|| haystack.contains("claude /login") {
|
||||
return "Hermes can't find your AI provider credentials. Set `ANTHROPIC_API_KEY` (or similar) in `~/.hermes/.env` or your shell profile, then restart Scarf."
|
||||
return Classification(hint: "Hermes can't find your AI provider credentials. Set `ANTHROPIC_API_KEY` (or similar) in `~/.hermes/.env` or your shell profile, then restart Scarf.")
|
||||
}
|
||||
if let match = haystack.range(of: #"No such file or directory:\s*'([^']+)'"#,
|
||||
options: .regularExpression) {
|
||||
@@ -636,13 +722,47 @@ public enum ACPErrorHint {
|
||||
if let nameStart = matched.range(of: "'"),
|
||||
let nameEnd = matched.range(of: "'", range: nameStart.upperBound..<matched.endIndex) {
|
||||
let name = String(matched[nameStart.upperBound..<nameEnd.lowerBound])
|
||||
return "Hermes couldn't find `\(name)` on PATH. If you use nvm/asdf/mise, make sure it's exported in `~/.zprofile` (not only `~/.zshrc`), then restart Scarf."
|
||||
return Classification(hint: "Hermes couldn't find `\(name)` on PATH. If you use nvm/asdf/mise, make sure it's exported in `~/.zprofile` (not only `~/.zshrc`), then restart Scarf.")
|
||||
}
|
||||
return "Hermes couldn't find a required binary on PATH. Check that your shell's PATH is exported in `~/.zprofile`, then restart Scarf."
|
||||
return Classification(hint: "Hermes couldn't find a required binary on PATH. Check that your shell's PATH is exported in `~/.zprofile`, then restart Scarf.")
|
||||
}
|
||||
if haystack.localizedCaseInsensitiveContains("rate limit")
|
||||
|| haystack.localizedCaseInsensitiveContains("429") {
|
||||
return "Your AI provider returned a rate-limit error. Try again in a moment."
|
||||
return Classification(hint: "Your AI provider returned a rate-limit error. Try again in a moment.")
|
||||
}
|
||||
// Model-availability failure. Hermes pins each session to the
|
||||
// model that opened it, so resuming an old session whose model
|
||||
// is no longer available (provider deprecation, OAuth swapped
|
||||
// to a different provider, model name changed) returns a 404
|
||||
// / model_not_found from the upstream provider — surfaced as
|
||||
// an opaque "-32603 Internal error" in chat. v2.8 surfaces a
|
||||
// clear "session is pinned" hint with the recovery path.
|
||||
if haystack.localizedCaseInsensitiveContains("model_not_found")
|
||||
|| haystack.localizedCaseInsensitiveContains("model not found")
|
||||
|| haystack.localizedCaseInsensitiveContains("invalid_model")
|
||||
|| haystack.localizedCaseInsensitiveContains("model is not available")
|
||||
|| haystack.localizedCaseInsensitiveContains("unknown model")
|
||||
|| (haystack.contains("404") && (haystack.localizedCaseInsensitiveContains("model")
|
||||
|| haystack.localizedCaseInsensitiveContains("messages"))) {
|
||||
return Classification(hint: "This session was created with a model the provider no longer offers. Hermes pins each session to its original model — start a new chat to use your current model, or run `hermes sessions clone` in Terminal to copy this conversation onto the new model.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/// Best-effort extraction of an OAuth provider name from raw error
|
||||
/// text. Returns the lowercase provider key (`"nous"`, `"claude"`,
|
||||
/// etc.) when one of the known OAuth providers appears as a whole
|
||||
/// word. The first match wins — Hermes typically logs the active
|
||||
/// provider name once, near the failure.
|
||||
private static func oauthProvider(in haystack: String) -> String? {
|
||||
let lowered = haystack.lowercased()
|
||||
for provider in oauthProviders {
|
||||
// Whole-word match so substrings like "anthropicapi" don't
|
||||
// false-trigger on "anthropic".
|
||||
let pattern = "\\b" + NSRegularExpression.escapedPattern(for: provider) + "\\b"
|
||||
if lowered.range(of: pattern, options: .regularExpression) != nil {
|
||||
return provider
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,277 @@
|
||||
import Foundation
|
||||
#if canImport(os)
|
||||
import os
|
||||
import os.signpost
|
||||
#endif
|
||||
|
||||
/// Lightweight performance instrumentation for the Scarf app family.
|
||||
///
|
||||
/// Three primitives — `measure(...)`, `measureAsync(...)`, `event(...)` — drop
|
||||
/// timing samples through whatever set of backends is currently active.
|
||||
/// Backends are pluggable: an always-on `os_signpost` backend (free outside
|
||||
/// Instruments), an in-memory ring buffer (drives the in-app panel), and an
|
||||
/// `os.Logger` debug backend (off by default).
|
||||
///
|
||||
/// **Cost when off.** When no backends are registered, every entry point is
|
||||
/// `@inline(__always)` and short-circuits to the body call without taking the
|
||||
/// `ContinuousClock.now` reading. Open source build defaults to "signpost
|
||||
/// only" — that backend pays one signpost emit per call, which Apple's runtime
|
||||
/// elides when no Instruments session is recording.
|
||||
///
|
||||
/// **Privacy.** Names are `StaticString` so we cannot accidentally pass user
|
||||
/// content through a metric tag. Optional `bytes:` field on `event` tracks
|
||||
/// payload size, never payload contents. The ring buffer never leaves the
|
||||
/// device unless the user explicitly hits "Copy as JSON" in the Diagnostics
|
||||
/// panel.
|
||||
public enum ScarfMon {
|
||||
|
||||
// MARK: - Public API
|
||||
|
||||
/// Synchronous timing wrapper. The body's return value flows through
|
||||
/// untouched; the time it took plus `(category, name)` are recorded.
|
||||
@inline(__always)
|
||||
public static func measure<T>(
|
||||
_ category: Category,
|
||||
_ name: StaticString,
|
||||
_ body: () throws -> T
|
||||
) rethrows -> T {
|
||||
guard isActive else { return try body() }
|
||||
let start = ContinuousClock.now
|
||||
defer { record(category, name, start: start, end: ContinuousClock.now) }
|
||||
return try body()
|
||||
}
|
||||
|
||||
/// Async variant. Same shape — the `defer` block fires after the body
|
||||
/// returns whether or not it threw, so cancelled / failed work still
|
||||
/// records its duration.
|
||||
@inline(__always)
|
||||
public static func measureAsync<T>(
|
||||
_ category: Category,
|
||||
_ name: StaticString,
|
||||
_ body: () async throws -> T
|
||||
) async rethrows -> T {
|
||||
guard isActive else { return try await body() }
|
||||
let start = ContinuousClock.now
|
||||
defer { record(category, name, start: start, end: ContinuousClock.now) }
|
||||
return try await body()
|
||||
}
|
||||
|
||||
/// Single-shot timestamped event. Use for things that aren't intervals
|
||||
/// (token arrivals, buffer flushes) where count + optional payload size
|
||||
/// is the useful signal.
|
||||
@inline(__always)
|
||||
public static func event(
|
||||
_ category: Category,
|
||||
_ name: StaticString,
|
||||
count: Int = 1,
|
||||
bytes: Int? = nil
|
||||
) {
|
||||
guard isActive else { return }
|
||||
recordEvent(category, name, count: count, bytes: bytes)
|
||||
}
|
||||
|
||||
// MARK: - Backend management
|
||||
|
||||
/// Install the desired backend set. Replaces the current set atomically.
|
||||
/// Call once at app boot from the launch sequence; safe to call again
|
||||
/// when the user toggles a setting on or off.
|
||||
public static func install(_ backends: [ScarfMonBackend]) {
|
||||
lock.lock()
|
||||
defer { lock.unlock() }
|
||||
installed = backends
|
||||
cachedActive = !backends.isEmpty
|
||||
}
|
||||
|
||||
/// Currently-installed backends. Test-only — callers should not iterate
|
||||
/// this in production.
|
||||
public static var currentBackends: [ScarfMonBackend] {
|
||||
lock.lock()
|
||||
defer { lock.unlock() }
|
||||
return installed
|
||||
}
|
||||
|
||||
/// Cheap "are we recording anything?" check. The flag is updated only
|
||||
/// when `install(...)` runs, so the hot path doesn't take the lock.
|
||||
@inline(__always)
|
||||
public static var isActive: Bool { cachedActive }
|
||||
|
||||
// MARK: - Internals
|
||||
|
||||
private static let lock = ScarfMonLock()
|
||||
nonisolated(unsafe) private static var installed: [ScarfMonBackend] = []
|
||||
nonisolated(unsafe) private static var cachedActive: Bool = false
|
||||
|
||||
@inline(__always)
|
||||
private static func record(
|
||||
_ category: Category,
|
||||
_ name: StaticString,
|
||||
start: ContinuousClock.Instant,
|
||||
end: ContinuousClock.Instant
|
||||
) {
|
||||
let duration = end - start
|
||||
let nanos = nanoseconds(of: duration)
|
||||
let backends = snapshotBackends()
|
||||
let sample = Sample(
|
||||
category: category,
|
||||
name: name,
|
||||
kind: .interval,
|
||||
timestamp: Date(),
|
||||
durationNanos: nanos,
|
||||
count: 1,
|
||||
bytes: nil
|
||||
)
|
||||
for backend in backends {
|
||||
backend.record(sample)
|
||||
}
|
||||
}
|
||||
|
||||
@inline(__always)
|
||||
private static func recordEvent(
|
||||
_ category: Category,
|
||||
_ name: StaticString,
|
||||
count: Int,
|
||||
bytes: Int?
|
||||
) {
|
||||
let backends = snapshotBackends()
|
||||
let sample = Sample(
|
||||
category: category,
|
||||
name: name,
|
||||
kind: .event,
|
||||
timestamp: Date(),
|
||||
durationNanos: 0,
|
||||
count: count,
|
||||
bytes: bytes
|
||||
)
|
||||
for backend in backends {
|
||||
backend.record(sample)
|
||||
}
|
||||
}
|
||||
|
||||
private static func snapshotBackends() -> [ScarfMonBackend] {
|
||||
lock.lock()
|
||||
defer { lock.unlock() }
|
||||
return installed
|
||||
}
|
||||
|
||||
private static func nanoseconds(of duration: Duration) -> UInt64 {
|
||||
// Duration is (seconds: Int64, attoseconds: Int64). Avoid Double
|
||||
// for the seconds term to keep precision on long intervals.
|
||||
let comps = duration.components
|
||||
let secondsAsNanos = UInt64(max(0, comps.seconds)) &* 1_000_000_000
|
||||
let attoAsNanos = UInt64(max(0, comps.attoseconds) / 1_000_000_000)
|
||||
return secondsAsNanos &+ attoAsNanos
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Categories
|
||||
|
||||
extension ScarfMon {
|
||||
/// Stable category vocabulary. Add cases here when new subsystems get
|
||||
/// instrumented; renames are breaking changes for any saved JSON dumps
|
||||
/// users have shared, so prefer adding over renaming.
|
||||
public enum Category: String, CaseIterable, Sendable, Codable {
|
||||
case chatRender
|
||||
case chatStream
|
||||
case sessionLoad
|
||||
case transport
|
||||
case sqlite
|
||||
case diskIO
|
||||
case render
|
||||
case other
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Sample
|
||||
|
||||
/// One recorded sample. All fields are value types so the struct is trivially
|
||||
/// `Sendable` across backend queues without locks.
|
||||
public struct ScarfMonSample: Sendable, Hashable {
|
||||
public enum Kind: String, Sendable, Codable {
|
||||
case interval
|
||||
case event
|
||||
}
|
||||
public let category: ScarfMon.Category
|
||||
/// Static name string captured at the call site. Not a `String` — keeping
|
||||
/// it `StaticString` proves at compile time that names cannot leak user
|
||||
/// data through this channel.
|
||||
public let name: StaticString
|
||||
public let kind: Kind
|
||||
public let timestamp: Date
|
||||
public let durationNanos: UInt64
|
||||
public let count: Int
|
||||
public let bytes: Int?
|
||||
|
||||
public init(
|
||||
category: ScarfMon.Category,
|
||||
name: StaticString,
|
||||
kind: Kind,
|
||||
timestamp: Date,
|
||||
durationNanos: UInt64,
|
||||
count: Int,
|
||||
bytes: Int?
|
||||
) {
|
||||
self.category = category
|
||||
self.name = name
|
||||
self.kind = kind
|
||||
self.timestamp = timestamp
|
||||
self.durationNanos = durationNanos
|
||||
self.count = count
|
||||
self.bytes = bytes
|
||||
}
|
||||
|
||||
/// `StaticString` does not conform to `Hashable` natively (it doesn't
|
||||
/// promise a stable hash). We hash via its UTF-8 representation so two
|
||||
/// samples with the same source-literal name compare equal.
|
||||
public static func == (lhs: ScarfMonSample, rhs: ScarfMonSample) -> Bool {
|
||||
lhs.category == rhs.category
|
||||
&& lhs.kind == rhs.kind
|
||||
&& lhs.timestamp == rhs.timestamp
|
||||
&& lhs.durationNanos == rhs.durationNanos
|
||||
&& lhs.count == rhs.count
|
||||
&& lhs.bytes == rhs.bytes
|
||||
&& lhs.name.description == rhs.name.description
|
||||
}
|
||||
|
||||
public func hash(into hasher: inout Hasher) {
|
||||
hasher.combine(category)
|
||||
hasher.combine(kind)
|
||||
hasher.combine(timestamp)
|
||||
hasher.combine(durationNanos)
|
||||
hasher.combine(count)
|
||||
hasher.combine(bytes)
|
||||
hasher.combine(name.description)
|
||||
}
|
||||
}
|
||||
|
||||
extension ScarfMon {
|
||||
public typealias Sample = ScarfMonSample
|
||||
}
|
||||
|
||||
// MARK: - Backend protocol
|
||||
|
||||
/// One sink for samples. Implementations must be cheap on the hot path —
|
||||
/// callers hold no lock while invoking `record`, but the hot path runs from
|
||||
/// every instrumented site, so allocations and disk I/O are off-limits here.
|
||||
public protocol ScarfMonBackend: Sendable {
|
||||
func record(_ sample: ScarfMon.Sample)
|
||||
}
|
||||
|
||||
// MARK: - Lock
|
||||
|
||||
/// Tiny `os_unfair_lock` wrapper. CLAUDE.md says "Use os_unfair_lock (not
|
||||
/// NSLock) for simple boolean flags accessed from multiple threads."
|
||||
@usableFromInline
|
||||
final class ScarfMonLock: @unchecked Sendable {
|
||||
private let _lock: UnsafeMutablePointer<os_unfair_lock>
|
||||
|
||||
init() {
|
||||
_lock = .allocate(capacity: 1)
|
||||
_lock.initialize(to: os_unfair_lock())
|
||||
}
|
||||
deinit {
|
||||
_lock.deinitialize(count: 1)
|
||||
_lock.deallocate()
|
||||
}
|
||||
@usableFromInline func lock() { os_unfair_lock_lock(_lock) }
|
||||
@usableFromInline func unlock() { os_unfair_lock_unlock(_lock) }
|
||||
}
|
||||
@@ -0,0 +1,76 @@
|
||||
import Foundation
|
||||
|
||||
/// Boot-time wiring for ScarfMon. Both app targets call
|
||||
/// `ScarfMonBoot.configure(...)` at launch and again whenever the user
|
||||
/// flips the Diagnostics → Performance toggle.
|
||||
///
|
||||
/// Three modes:
|
||||
/// - `.off` — nothing is recorded. Hot path is one branch + return.
|
||||
/// - `.signpostOnly` — Instruments-only. Default in the open-source build.
|
||||
/// Free outside an Instruments session.
|
||||
/// - `.full` — signpost + ring buffer + os.Logger debug stream. Drives the
|
||||
/// in-app panel and the "Copy as JSON" button. Opt-in.
|
||||
public enum ScarfMonBoot {
|
||||
public enum Mode: String, Sendable, CaseIterable {
|
||||
case off
|
||||
case signpostOnly
|
||||
case full
|
||||
}
|
||||
|
||||
/// User-defaults key for the persisted toggle. Same key on iOS + Mac
|
||||
/// so `defaults read com.scarf.app ScarfMonMode` works on either.
|
||||
public static let userDefaultsKey = "ScarfMonMode"
|
||||
|
||||
/// Read the persisted mode, defaulting to `.signpostOnly` so users
|
||||
/// always get Instruments-visible signposts unless they explicitly
|
||||
/// turn them off.
|
||||
public static func currentMode(_ defaults: UserDefaults = .standard) -> Mode {
|
||||
if let raw = defaults.string(forKey: userDefaultsKey),
|
||||
let mode = Mode(rawValue: raw) {
|
||||
return mode
|
||||
}
|
||||
return .signpostOnly
|
||||
}
|
||||
|
||||
/// Persist a new mode and reinstall the backend set.
|
||||
public static func setMode(_ mode: Mode, _ defaults: UserDefaults = .standard) {
|
||||
defaults.set(mode.rawValue, forKey: userDefaultsKey)
|
||||
configure(mode: mode)
|
||||
}
|
||||
|
||||
/// Install the backend set for a given mode. Returns the active ring
|
||||
/// buffer (if any) so the in-app Diagnostics panel can read from it.
|
||||
@discardableResult
|
||||
public static func configure(mode: Mode) -> ScarfMonRingBuffer? {
|
||||
switch mode {
|
||||
case .off:
|
||||
ScarfMon.install([])
|
||||
sharedRingBuffer = nil
|
||||
return nil
|
||||
case .signpostOnly:
|
||||
ScarfMon.install([ScarfMonSignpostBackend()])
|
||||
sharedRingBuffer = nil
|
||||
return nil
|
||||
case .full:
|
||||
let ring = ScarfMonRingBuffer()
|
||||
sharedRingBuffer = ring
|
||||
ScarfMon.install([
|
||||
ScarfMonSignpostBackend(),
|
||||
ring,
|
||||
ScarfMonLoggerBackend()
|
||||
])
|
||||
return ring
|
||||
}
|
||||
}
|
||||
|
||||
/// Process-wide ring buffer when running in `.full` mode. Nil otherwise.
|
||||
/// Read by the Diagnostics panel; writes happen through the backend
|
||||
/// dispatcher so this property is read-only.
|
||||
///
|
||||
/// `nonisolated(unsafe)` because the value is only mutated by
|
||||
/// `configure(...)` (which itself runs on whichever actor invokes
|
||||
/// the boot helper at app launch — single-writer in practice) and
|
||||
/// read from the panel UI on the main actor. Adding a lock here
|
||||
/// would just add overhead with no real safety win.
|
||||
nonisolated(unsafe) public private(set) static var sharedRingBuffer: ScarfMonRingBuffer?
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
import Foundation
|
||||
#if canImport(os)
|
||||
import os
|
||||
#endif
|
||||
|
||||
/// `os.Logger`-backed sink. Off by default — opt-in via the Diagnostics
|
||||
/// settings toggle. Writes one `.debug` line per sample at the
|
||||
/// `com.scarf.mon` subsystem, so users can stream the output via
|
||||
/// `log stream --predicate 'subsystem == "com.scarf.mon"'` without
|
||||
/// enabling private-data redaction overrides.
|
||||
///
|
||||
/// Only meaningful for users running their own debug build or with the
|
||||
/// "verbose performance logging" toggle on.
|
||||
public final class ScarfMonLoggerBackend: ScarfMonBackend, @unchecked Sendable {
|
||||
#if canImport(os)
|
||||
private let logger: Logger
|
||||
|
||||
public init(category: String = "perf") {
|
||||
self.logger = Logger(subsystem: "com.scarf.mon", category: category)
|
||||
}
|
||||
|
||||
public func record(_ sample: ScarfMon.Sample) {
|
||||
switch sample.kind {
|
||||
case .interval:
|
||||
// `\(static:)` interpolation keeps the StaticString out of the
|
||||
// private-data redaction path — names are public, durations
|
||||
// are public, the user's content never touches this channel.
|
||||
logger.debug(
|
||||
"\(sample.category.rawValue, privacy: .public) \(sample.name.description, privacy: .public) ms=\(Double(sample.durationNanos) / 1_000_000.0, privacy: .public)"
|
||||
)
|
||||
case .event:
|
||||
logger.debug(
|
||||
"\(sample.category.rawValue, privacy: .public) \(sample.name.description, privacy: .public) count=\(sample.count, privacy: .public) bytes=\(sample.bytes ?? -1, privacy: .public)"
|
||||
)
|
||||
}
|
||||
}
|
||||
#else
|
||||
public init(category: String = "perf") {}
|
||||
public func record(_ sample: ScarfMon.Sample) { /* no-op off-Apple */ }
|
||||
#endif
|
||||
}
|
||||
@@ -0,0 +1,176 @@
|
||||
import Foundation
|
||||
|
||||
/// Fixed-size, lock-protected ring of recent samples. Drives the in-app
|
||||
/// Diagnostics panel and the export-as-JSON button.
|
||||
///
|
||||
/// Capacity is a compile-time choice; 4096 entries × ~80 bytes per sample =
|
||||
/// ~320 KB resident. That's enough for several minutes of streaming-chat
|
||||
/// activity at 200 samples/s without overwriting interesting context.
|
||||
///
|
||||
/// The hot path takes one `os_unfair_lock` per `record`. Aggregation (the
|
||||
/// `summary(...)` reader) builds a fresh dictionary each call — only invoked
|
||||
/// from the panel UI, which polls at a human cadence.
|
||||
public final class ScarfMonRingBuffer: ScarfMonBackend, @unchecked Sendable {
|
||||
public let capacity: Int
|
||||
|
||||
private let lock = ScarfMonLock()
|
||||
private var storage: [ScarfMon.Sample?]
|
||||
/// Next write index. Wraps around `capacity` so the buffer never grows.
|
||||
private var head: Int = 0
|
||||
/// True once we've wrapped at least once — switches the read order from
|
||||
/// `[0..<head]` to `[head..<capacity] + [0..<head]`.
|
||||
private var didWrap: Bool = false
|
||||
|
||||
public init(capacity: Int = 4096) {
|
||||
precondition(capacity > 0, "ring buffer needs a positive capacity")
|
||||
self.capacity = capacity
|
||||
self.storage = Array(repeating: nil, count: capacity)
|
||||
}
|
||||
|
||||
public func record(_ sample: ScarfMon.Sample) {
|
||||
lock.lock()
|
||||
defer { lock.unlock() }
|
||||
storage[head] = sample
|
||||
head += 1
|
||||
if head >= capacity {
|
||||
head = 0
|
||||
didWrap = true
|
||||
}
|
||||
}
|
||||
|
||||
/// Snapshot of all currently-resident samples in chronological order.
|
||||
public func samples() -> [ScarfMon.Sample] {
|
||||
lock.lock()
|
||||
defer { lock.unlock() }
|
||||
if !didWrap {
|
||||
return storage[0..<head].compactMap { $0 }
|
||||
}
|
||||
let tail = storage[head..<capacity].compactMap { $0 }
|
||||
let leading = storage[0..<head].compactMap { $0 }
|
||||
return tail + leading
|
||||
}
|
||||
|
||||
/// Wipe the buffer. Used by the "Reset" button in the Diagnostics
|
||||
/// panel and at the top of every test case.
|
||||
public func reset() {
|
||||
lock.lock()
|
||||
defer { lock.unlock() }
|
||||
for i in 0..<capacity { storage[i] = nil }
|
||||
head = 0
|
||||
didWrap = false
|
||||
}
|
||||
|
||||
/// Aggregated stats over the current buffer. Buckets by
|
||||
/// `(category, name)`; computes count, total nanos, mean, p50, p95.
|
||||
public func summary() -> [ScarfMonStat] {
|
||||
let snapshot = samples()
|
||||
var buckets: [BucketKey: [UInt64]] = [:]
|
||||
var counts: [BucketKey: Int] = [:]
|
||||
var byteTotals: [BucketKey: Int] = [:]
|
||||
var kinds: [BucketKey: ScarfMon.Sample.Kind] = [:]
|
||||
|
||||
for sample in snapshot {
|
||||
let key = BucketKey(category: sample.category, name: sample.name.description)
|
||||
kinds[key] = sample.kind
|
||||
counts[key, default: 0] += sample.count
|
||||
if let b = sample.bytes { byteTotals[key, default: 0] += b }
|
||||
if sample.kind == .interval {
|
||||
buckets[key, default: []].append(sample.durationNanos)
|
||||
}
|
||||
}
|
||||
|
||||
var stats: [ScarfMonStat] = []
|
||||
for (key, _) in counts {
|
||||
let durations = buckets[key] ?? []
|
||||
let kind = kinds[key] ?? .event
|
||||
stats.append(ScarfMonStat(
|
||||
category: key.category,
|
||||
name: key.name,
|
||||
kind: kind,
|
||||
count: counts[key] ?? 0,
|
||||
totalNanos: durations.reduce(0, &+),
|
||||
p50Nanos: percentile(durations, 0.50),
|
||||
p95Nanos: percentile(durations, 0.95),
|
||||
maxNanos: durations.max() ?? 0,
|
||||
totalBytes: byteTotals[key] ?? 0
|
||||
))
|
||||
}
|
||||
stats.sort { $0.p95Nanos > $1.p95Nanos }
|
||||
return stats
|
||||
}
|
||||
|
||||
private struct BucketKey: Hashable {
|
||||
let category: ScarfMon.Category
|
||||
let name: String
|
||||
}
|
||||
|
||||
private func percentile(_ values: [UInt64], _ p: Double) -> UInt64 {
|
||||
guard !values.isEmpty else { return 0 }
|
||||
let sorted = values.sorted()
|
||||
// Nearest-rank percentile — good enough for triage and avoids
|
||||
// interpolation edge cases on tiny samples.
|
||||
let rank = max(1, min(sorted.count, Int((p * Double(sorted.count)).rounded(.up))))
|
||||
return sorted[rank - 1]
|
||||
}
|
||||
}
|
||||
|
||||
/// Per-bucket stats surfaced to the in-app panel.
|
||||
public struct ScarfMonStat: Sendable, Hashable, Codable {
|
||||
public let category: ScarfMon.Category
|
||||
public let name: String
|
||||
public let kind: ScarfMon.Sample.Kind
|
||||
public let count: Int
|
||||
public let totalNanos: UInt64
|
||||
public let p50Nanos: UInt64
|
||||
public let p95Nanos: UInt64
|
||||
public let maxNanos: UInt64
|
||||
public let totalBytes: Int
|
||||
|
||||
public var totalMs: Double { Double(totalNanos) / 1_000_000.0 }
|
||||
public var p50Ms: Double { Double(p50Nanos) / 1_000_000.0 }
|
||||
public var p95Ms: Double { Double(p95Nanos) / 1_000_000.0 }
|
||||
public var maxMs: Double { Double(maxNanos) / 1_000_000.0 }
|
||||
}
|
||||
|
||||
// MARK: - JSON export
|
||||
|
||||
extension ScarfMonRingBuffer {
|
||||
/// Compact JSON dump for the "Copy as JSON" button. One line per sample
|
||||
/// keeps the output greppable when the user pastes it into a feedback
|
||||
/// thread.
|
||||
public func exportJSON() -> String {
|
||||
struct Wire: Codable {
|
||||
let category: String
|
||||
let name: String
|
||||
let kind: String
|
||||
let timestampMs: Double
|
||||
let durationNanos: UInt64
|
||||
let count: Int
|
||||
let bytes: Int?
|
||||
}
|
||||
let snapshot = samples()
|
||||
let encoder = JSONEncoder()
|
||||
encoder.outputFormatting = [.sortedKeys]
|
||||
var lines: [String] = []
|
||||
lines.reserveCapacity(snapshot.count + 1)
|
||||
lines.append("[")
|
||||
for (i, s) in snapshot.enumerated() {
|
||||
let wire = Wire(
|
||||
category: s.category.rawValue,
|
||||
name: s.name.description,
|
||||
kind: s.kind.rawValue,
|
||||
timestampMs: s.timestamp.timeIntervalSince1970 * 1000,
|
||||
durationNanos: s.durationNanos,
|
||||
count: s.count,
|
||||
bytes: s.bytes
|
||||
)
|
||||
if let data = try? encoder.encode(wire),
|
||||
let line = String(data: data, encoding: .utf8) {
|
||||
let suffix = i == snapshot.count - 1 ? "" : ","
|
||||
lines.append(" " + line + suffix)
|
||||
}
|
||||
}
|
||||
lines.append("]")
|
||||
return lines.joined(separator: "\n")
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,54 @@
|
||||
import Foundation
|
||||
#if canImport(os)
|
||||
import os
|
||||
import os.signpost
|
||||
#endif
|
||||
|
||||
/// Always-on signpost backend. Emits an `os_signpost` event per sample so
|
||||
/// users can attach Instruments and see Scarf's instrumentation in the
|
||||
/// Points of Interest track without a debug build.
|
||||
///
|
||||
/// `os_signpost` is elided by the runtime when no Instruments session is
|
||||
/// recording the relevant subsystem — the backend pays the cost of one
|
||||
/// `OSLog` lookup per emit and nothing else.
|
||||
public final class ScarfMonSignpostBackend: ScarfMonBackend, @unchecked Sendable {
|
||||
#if canImport(os)
|
||||
private let log: OSLog
|
||||
|
||||
public init(subsystem: String = "com.scarf.mon") {
|
||||
self.log = OSLog(subsystem: subsystem, category: .pointsOfInterest)
|
||||
}
|
||||
|
||||
public func record(_ sample: ScarfMon.Sample) {
|
||||
// Signposts want a `StaticString` name — we already require
|
||||
// exactly that on the API. Format string is also static; the
|
||||
// dynamic values flow as printf-style args, so no allocations
|
||||
// for the event name itself.
|
||||
switch sample.kind {
|
||||
case .interval:
|
||||
os_signpost(
|
||||
.event,
|
||||
log: log,
|
||||
name: sample.name,
|
||||
"category=%{public}@ ms=%{public}.3f count=%d",
|
||||
sample.category.rawValue,
|
||||
Double(sample.durationNanos) / 1_000_000.0,
|
||||
sample.count
|
||||
)
|
||||
case .event:
|
||||
os_signpost(
|
||||
.event,
|
||||
log: log,
|
||||
name: sample.name,
|
||||
"category=%{public}@ count=%d bytes=%d",
|
||||
sample.category.rawValue,
|
||||
sample.count,
|
||||
sample.bytes ?? -1
|
||||
)
|
||||
}
|
||||
}
|
||||
#else
|
||||
public init(subsystem: String = "com.scarf.mon") {}
|
||||
public func record(_ sample: ScarfMon.Sample) { /* no-op off-Apple */ }
|
||||
#endif
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
import Foundation
|
||||
|
||||
/// Errors thrown by `CuratorService`. Each case carries enough detail
|
||||
/// to render a user-actionable message — the view model surfaces these
|
||||
/// inline as a banner above the leaderboard rather than blocking with a
|
||||
/// modal alert.
|
||||
public enum CuratorError: Error, LocalizedError, Sendable {
|
||||
/// `hermes` binary couldn't be located.
|
||||
case cliMissing
|
||||
/// Subprocess returned non-zero exit. `stderr` may carry a synthetic
|
||||
/// message when the transport itself failed.
|
||||
case nonZeroExit(verb: String, code: Int32, stderr: String)
|
||||
/// JSON decoding failed. Underlying message wrapped for diagnostics.
|
||||
case decoding(verb: String, message: String)
|
||||
/// Generic transport error — process couldn't start, IO failed, etc.
|
||||
case transport(message: String)
|
||||
|
||||
public var errorDescription: String? {
|
||||
switch self {
|
||||
case .cliMissing:
|
||||
return "Hermes CLI couldn't be found. Install Hermes v0.13+ and ensure it's on your PATH."
|
||||
case .nonZeroExit(let verb, let code, let stderr):
|
||||
let trimmed = stderr.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
if trimmed.isEmpty {
|
||||
return "`hermes curator \(verb)` exited with code \(code)."
|
||||
}
|
||||
return trimmed
|
||||
case .decoding(let verb, let message):
|
||||
return "Couldn't decode `hermes curator \(verb)` output: \(message)"
|
||||
case .transport(let message):
|
||||
return message
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -32,10 +32,21 @@ public enum QueryDefaults: Sendable {
|
||||
/// consistent budget — and so we have one knob to retune if perf
|
||||
/// concerns shift.
|
||||
public enum HistoryPageSize: Sendable {
|
||||
/// Initial chat-history load: covers the vast majority of
|
||||
/// sessions in one fetch while keeping the snapshot read bounded
|
||||
/// for the rare 1000+-message session.
|
||||
public nonisolated static let initial = 200
|
||||
/// Initial chat-history load. **Sized to fit the SSH wire payload
|
||||
/// inside a 30-second `RemoteSQLiteBackend.queryTimeout`.** A
|
||||
/// 157-message session at 200-row page size produced enough
|
||||
/// JSON (with `reasoning_content` for thinking models) to time
|
||||
/// out at exactly 30 s on a 420 ms-RTT remote. Dropped to 50,
|
||||
/// then to 25 in v2.7 after a 160-message session still timed
|
||||
/// out at 50 — `reasoning_content` for thinking-model turns can
|
||||
/// run 20+ KB per row, so 50 rows × 30 KB = 1.5 MB JSON which
|
||||
/// over a slow SSH channel still trips the 30s budget. Pair
|
||||
/// with `messageColumnsLight` (excludes `reasoning_content`)
|
||||
/// so the on-wire payload is small even at this size; the
|
||||
/// inspector pane lazy-loads via `fetchReasoningContent(for:)`
|
||||
/// when the user expands a disclosure. The "Load earlier"
|
||||
/// affordance pages back through older messages on demand.
|
||||
public nonisolated static let initial = 25
|
||||
/// Reconnection reconcile against the DB. 200 rows is plenty —
|
||||
/// disconnects don't generate hundreds of unseen messages.
|
||||
public nonisolated static let reconcile = 200
|
||||
|
||||
@@ -0,0 +1,124 @@
|
||||
import Foundation
|
||||
|
||||
/// One entry in the `hermes curator list-archived` output. Decoded
|
||||
/// tolerantly via `decodeIfPresent` so a stripped-down host (or a future
|
||||
/// Hermes that drops one of the optional columns) doesn't crash the view.
|
||||
///
|
||||
/// Only `name` is required — every other field is optional and the
|
||||
/// computed `*Label` accessors render `"—"` for missing values.
|
||||
public struct HermesCuratorArchivedSkill: Sendable, Equatable, Identifiable, Codable {
|
||||
public var id: String { name }
|
||||
public let name: String
|
||||
public let category: String?
|
||||
public let archivedAt: String?
|
||||
public let reason: String?
|
||||
public let sizeBytes: Int?
|
||||
public let path: String?
|
||||
|
||||
public init(
|
||||
name: String,
|
||||
category: String? = nil,
|
||||
archivedAt: String? = nil,
|
||||
reason: String? = nil,
|
||||
sizeBytes: Int? = nil,
|
||||
path: String? = nil
|
||||
) {
|
||||
self.name = name
|
||||
self.category = category
|
||||
self.archivedAt = archivedAt
|
||||
self.reason = reason
|
||||
self.sizeBytes = sizeBytes
|
||||
self.path = path
|
||||
}
|
||||
|
||||
private enum CodingKeys: String, CodingKey {
|
||||
case name
|
||||
case category
|
||||
case archivedAt = "archived_at"
|
||||
case reason
|
||||
case sizeBytes = "size_bytes"
|
||||
case path
|
||||
}
|
||||
|
||||
public init(from decoder: Decoder) throws {
|
||||
let c = try decoder.container(keyedBy: CodingKeys.self)
|
||||
self.name = try c.decode(String.self, forKey: .name)
|
||||
self.category = try c.decodeIfPresent(String.self, forKey: .category)
|
||||
self.archivedAt = try c.decodeIfPresent(String.self, forKey: .archivedAt)
|
||||
self.reason = try c.decodeIfPresent(String.self, forKey: .reason)
|
||||
self.sizeBytes = try c.decodeIfPresent(Int.self, forKey: .sizeBytes)
|
||||
self.path = try c.decodeIfPresent(String.self, forKey: .path)
|
||||
}
|
||||
|
||||
public func encode(to encoder: Encoder) throws {
|
||||
var c = encoder.container(keyedBy: CodingKeys.self)
|
||||
try c.encode(name, forKey: .name)
|
||||
try c.encodeIfPresent(category, forKey: .category)
|
||||
try c.encodeIfPresent(archivedAt, forKey: .archivedAt)
|
||||
try c.encodeIfPresent(reason, forKey: .reason)
|
||||
try c.encodeIfPresent(sizeBytes, forKey: .sizeBytes)
|
||||
try c.encodeIfPresent(path, forKey: .path)
|
||||
}
|
||||
|
||||
/// "4.4 KB" / "1.2 MB" / "—" for nil. Uses the SI byte formatter so
|
||||
/// the labels match what Finder shows.
|
||||
public var sizeLabel: String {
|
||||
guard let bytes = sizeBytes else { return "—" }
|
||||
let formatter = ByteCountFormatter()
|
||||
formatter.allowedUnits = [.useAll]
|
||||
formatter.countStyle = .file
|
||||
return formatter.string(fromByteCount: Int64(bytes))
|
||||
}
|
||||
|
||||
/// `2026-04-22` (ISO date prefix) / "—". Hermes returns full ISO
|
||||
/// timestamps with seconds + Z; the date prefix is what the user
|
||||
/// actually wants in the archived list.
|
||||
public var archivedAtLabel: String {
|
||||
guard let iso = archivedAt, !iso.isEmpty else { return "—" }
|
||||
// Trim to date prefix if it looks like a full ISO timestamp.
|
||||
if let tIdx = iso.firstIndex(of: "T") {
|
||||
return String(iso[..<tIdx])
|
||||
}
|
||||
return iso
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of `hermes curator prune --dry-run` — what would be removed
|
||||
/// if the user confirms. The view derives `totalCount` from
|
||||
/// `wouldRemove.count` so the wire shape stays flat.
|
||||
public struct CuratorPruneSummary: Sendable, Equatable, Codable {
|
||||
public let wouldRemove: [HermesCuratorArchivedSkill]
|
||||
public let totalBytes: Int
|
||||
public var totalCount: Int { wouldRemove.count }
|
||||
|
||||
public init(wouldRemove: [HermesCuratorArchivedSkill], totalBytes: Int) {
|
||||
self.wouldRemove = wouldRemove
|
||||
self.totalBytes = totalBytes
|
||||
}
|
||||
|
||||
private enum CodingKeys: String, CodingKey {
|
||||
case wouldRemove = "would_remove"
|
||||
case totalBytes = "total_bytes"
|
||||
}
|
||||
|
||||
public init(from decoder: Decoder) throws {
|
||||
let c = try decoder.container(keyedBy: CodingKeys.self)
|
||||
self.wouldRemove = try c.decodeIfPresent([HermesCuratorArchivedSkill].self, forKey: .wouldRemove) ?? []
|
||||
self.totalBytes = try c.decodeIfPresent(Int.self, forKey: .totalBytes) ?? 0
|
||||
}
|
||||
|
||||
public func encode(to encoder: Encoder) throws {
|
||||
var c = encoder.container(keyedBy: CodingKeys.self)
|
||||
try c.encode(wouldRemove, forKey: .wouldRemove)
|
||||
try c.encode(totalBytes, forKey: .totalBytes)
|
||||
}
|
||||
|
||||
/// "12.3 KB" / "—" for empty. Convenience for the confirm sheet header.
|
||||
public var totalBytesLabel: String {
|
||||
guard totalBytes > 0 else { return "—" }
|
||||
let formatter = ByteCountFormatter()
|
||||
formatter.allowedUnits = [.useAll]
|
||||
formatter.countStyle = .file
|
||||
return formatter.string(fromByteCount: Int64(totalBytes))
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
import Foundation
|
||||
|
||||
/// One row from `hermes kanban assignees --json`. The output is the
|
||||
/// union of profiles configured on the host (`~/.hermes/profiles/`)
|
||||
/// and any names appearing in the live board's `assignee` column —
|
||||
/// covers the case where a profile was renamed but historical tasks
|
||||
/// still reference the old name.
|
||||
public struct HermesKanbanAssignee: Sendable, Equatable, Identifiable, Codable {
|
||||
public var id: String { profile }
|
||||
public let profile: String
|
||||
public let activeCount: Int
|
||||
public let totalCount: Int
|
||||
|
||||
public init(profile: String, activeCount: Int = 0, totalCount: Int = 0) {
|
||||
self.profile = profile
|
||||
self.activeCount = activeCount
|
||||
self.totalCount = totalCount
|
||||
}
|
||||
|
||||
enum CodingKeys: String, CodingKey {
|
||||
case profile
|
||||
case activeCount = "active"
|
||||
case totalCount = "total"
|
||||
}
|
||||
|
||||
public init(from decoder: any Decoder) throws {
|
||||
let c = try decoder.container(keyedBy: CodingKeys.self)
|
||||
self.profile = try c.decode(String.self, forKey: .profile)
|
||||
self.activeCount = try c.decodeIfPresent(Int.self, forKey: .activeCount) ?? 0
|
||||
self.totalCount = try c.decodeIfPresent(Int.self, forKey: .totalCount) ?? 0
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,51 @@
|
||||
import Foundation
|
||||
|
||||
/// One comment from `hermes kanban show <id> --json` or appended via
|
||||
/// `hermes kanban comment <id> <text>`. Comments are append-only — there's
|
||||
/// no edit/delete verb.
|
||||
public struct HermesKanbanComment: Sendable, Equatable, Identifiable, Codable {
|
||||
public let id: Int
|
||||
public let taskId: String
|
||||
public let author: String
|
||||
public let body: String
|
||||
public let createdAt: String
|
||||
|
||||
public init(
|
||||
id: Int,
|
||||
taskId: String,
|
||||
author: String,
|
||||
body: String,
|
||||
createdAt: String
|
||||
) {
|
||||
self.id = id
|
||||
self.taskId = taskId
|
||||
self.author = author
|
||||
self.body = body
|
||||
self.createdAt = createdAt
|
||||
}
|
||||
|
||||
enum CodingKeys: String, CodingKey {
|
||||
case id
|
||||
case taskId = "task_id"
|
||||
case author
|
||||
case body
|
||||
case createdAt = "created_at"
|
||||
}
|
||||
|
||||
public init(from decoder: any Decoder) throws {
|
||||
let c = try decoder.container(keyedBy: CodingKeys.self)
|
||||
self.id = try c.decode(Int.self, forKey: .id)
|
||||
self.taskId = try c.decodeIfPresent(String.self, forKey: .taskId) ?? ""
|
||||
self.author = try c.decodeIfPresent(String.self, forKey: .author) ?? ""
|
||||
self.body = try c.decodeIfPresent(String.self, forKey: .body) ?? ""
|
||||
// Hermes emits Unix integer timestamps from its SQLite columns;
|
||||
// accept both ints and ISO strings.
|
||||
if let unix = try? c.decodeIfPresent(Double.self, forKey: .createdAt) {
|
||||
let f = ISO8601DateFormatter()
|
||||
f.formatOptions = [.withInternetDateTime]
|
||||
self.createdAt = f.string(from: Date(timeIntervalSince1970: unix))
|
||||
} else {
|
||||
self.createdAt = (try? c.decodeIfPresent(String.self, forKey: .createdAt)) ?? ""
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,175 @@
|
||||
import Foundation
|
||||
|
||||
/// One event from the `task_events` log — emitted by `hermes kanban show`
|
||||
/// (within a `HermesKanbanTaskDetail`) and streamed live by
|
||||
/// `hermes kanban watch --json`. Event kinds are open-ended on the Hermes
|
||||
/// side; v0.12 emits a small known set listed in `KanbanEventKind`. Unknown
|
||||
/// kinds map to `.unknown` so new Hermes builds don't break decoding.
|
||||
public struct HermesKanbanEvent: Sendable, Equatable, Identifiable, Codable {
|
||||
public let id: Int
|
||||
public let taskId: String
|
||||
public let runId: Int?
|
||||
/// Wire string for the event kind. Use `kindEnum` to interpret.
|
||||
public let kind: String
|
||||
public let createdAt: String
|
||||
/// Opaque diagnostics payload from the `task_events.payload` column.
|
||||
/// Stored as a JSON string so callers that don't need it pay no
|
||||
/// decoding cost; callers that do can re-parse.
|
||||
public let payloadJSON: String?
|
||||
|
||||
public init(
|
||||
id: Int,
|
||||
taskId: String,
|
||||
runId: Int? = nil,
|
||||
kind: String,
|
||||
createdAt: String,
|
||||
payloadJSON: String? = nil
|
||||
) {
|
||||
self.id = id
|
||||
self.taskId = taskId
|
||||
self.runId = runId
|
||||
self.kind = kind
|
||||
self.createdAt = createdAt
|
||||
self.payloadJSON = payloadJSON
|
||||
}
|
||||
|
||||
public var kindEnum: KanbanEventKind { KanbanEventKind.from(kind) }
|
||||
|
||||
enum CodingKeys: String, CodingKey {
|
||||
case id
|
||||
case taskId = "task_id"
|
||||
case runId = "run_id"
|
||||
case kind
|
||||
case createdAt = "created_at"
|
||||
case payload
|
||||
}
|
||||
|
||||
public init(from decoder: any Decoder) throws {
|
||||
let c = try decoder.container(keyedBy: CodingKeys.self)
|
||||
self.id = try c.decodeIfPresent(Int.self, forKey: .id) ?? 0
|
||||
self.taskId = try c.decodeIfPresent(String.self, forKey: .taskId) ?? ""
|
||||
self.runId = try c.decodeIfPresent(Int.self, forKey: .runId)
|
||||
self.kind = try c.decodeIfPresent(String.self, forKey: .kind) ?? "unknown"
|
||||
if let unix = try? c.decodeIfPresent(Double.self, forKey: .createdAt) {
|
||||
let f = ISO8601DateFormatter()
|
||||
f.formatOptions = [.withInternetDateTime]
|
||||
self.createdAt = f.string(from: Date(timeIntervalSince1970: unix))
|
||||
} else {
|
||||
self.createdAt = (try? c.decodeIfPresent(String.self, forKey: .createdAt)) ?? ""
|
||||
}
|
||||
|
||||
// payload may be absent, a JSON object, or already a string.
|
||||
if let raw = try? c.decodeIfPresent(String.self, forKey: .payload) {
|
||||
self.payloadJSON = raw
|
||||
} else if c.contains(.payload) {
|
||||
// Re-encode arbitrary JSON into a string so we can carry it
|
||||
// around without committing to a typed shape.
|
||||
let nested = try c.decode(JSONAny.self, forKey: .payload)
|
||||
let data = try JSONEncoder().encode(nested)
|
||||
self.payloadJSON = String(data: data, encoding: .utf8)
|
||||
} else {
|
||||
self.payloadJSON = nil
|
||||
}
|
||||
}
|
||||
|
||||
public func encode(to encoder: any Encoder) throws {
|
||||
var c = encoder.container(keyedBy: CodingKeys.self)
|
||||
try c.encode(id, forKey: .id)
|
||||
try c.encode(taskId, forKey: .taskId)
|
||||
try c.encodeIfPresent(runId, forKey: .runId)
|
||||
try c.encode(kind, forKey: .kind)
|
||||
try c.encode(createdAt, forKey: .createdAt)
|
||||
try c.encodeIfPresent(payloadJSON, forKey: .payload)
|
||||
}
|
||||
}
|
||||
|
||||
/// Known event kinds emitted by Hermes v0.12+. New kinds are surfaced
|
||||
/// as `.unknown` until the model catches up; UI defaults to a generic
|
||||
/// rendering for those.
|
||||
public enum KanbanEventKind: String, Sendable, CaseIterable {
|
||||
case created
|
||||
case claimed
|
||||
case released
|
||||
case started
|
||||
case completed
|
||||
case blocked
|
||||
case unblocked
|
||||
case commented
|
||||
case archived
|
||||
case heartbeat
|
||||
case statusChange = "status_change"
|
||||
case error
|
||||
case crashed
|
||||
case timedOut = "timed_out"
|
||||
case spawnFailed = "spawn_failed"
|
||||
case unknown
|
||||
|
||||
public static func from(_ raw: String) -> KanbanEventKind {
|
||||
KanbanEventKind(rawValue: raw.lowercased()) ?? .unknown
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - JSON-any helper
|
||||
|
||||
/// Minimal type-erased JSON wrapper used for opaque event payloads. We
|
||||
/// don't commit to a typed shape because Hermes treats payload as
|
||||
/// diagnostics and may evolve it freely. Used only inside Codable
|
||||
/// init/encode (a single decode→re-encode→string pass), so the `Any`
|
||||
/// payload never crosses an actor boundary — `@unchecked Sendable`
|
||||
/// is the appropriate seal here.
|
||||
struct JSONAny: Codable, @unchecked Sendable {
|
||||
let raw: Any
|
||||
|
||||
init(from decoder: any Decoder) throws {
|
||||
let container = try decoder.singleValueContainer()
|
||||
if container.decodeNil() {
|
||||
self.raw = NSNull()
|
||||
} else if let b = try? container.decode(Bool.self) {
|
||||
self.raw = b
|
||||
} else if let i = try? container.decode(Int64.self) {
|
||||
self.raw = i
|
||||
} else if let d = try? container.decode(Double.self) {
|
||||
self.raw = d
|
||||
} else if let s = try? container.decode(String.self) {
|
||||
self.raw = s
|
||||
} else if let arr = try? container.decode([JSONAny].self) {
|
||||
self.raw = arr.map(\.raw)
|
||||
} else if let dict = try? container.decode([String: JSONAny].self) {
|
||||
self.raw = dict.mapValues(\.raw)
|
||||
} else {
|
||||
throw DecodingError.dataCorruptedError(
|
||||
in: container,
|
||||
debugDescription: "Unsupported JSON value"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func encode(to encoder: any Encoder) throws {
|
||||
var c = encoder.singleValueContainer()
|
||||
switch raw {
|
||||
case is NSNull:
|
||||
try c.encodeNil()
|
||||
case let b as Bool:
|
||||
try c.encode(b)
|
||||
case let i as Int64:
|
||||
try c.encode(i)
|
||||
case let i as Int:
|
||||
try c.encode(Int64(i))
|
||||
case let d as Double:
|
||||
try c.encode(d)
|
||||
case let s as String:
|
||||
try c.encode(s)
|
||||
case let arr as [Any]:
|
||||
try c.encode(arr.map { JSONAny(unsafeRaw: $0) })
|
||||
case let dict as [String: Any]:
|
||||
try c.encode(dict.mapValues { JSONAny(unsafeRaw: $0) })
|
||||
default:
|
||||
throw EncodingError.invalidValue(
|
||||
raw,
|
||||
EncodingError.Context(codingPath: encoder.codingPath, debugDescription: "Unsupported")
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
private init(unsafeRaw: Any) { self.raw = unsafeRaw }
|
||||
}
|
||||
@@ -0,0 +1,144 @@
|
||||
import Foundation
|
||||
|
||||
/// One attempt to execute a kanban task — `hermes kanban runs <id> --json`
|
||||
/// returns an array of these per task. Each run records the worker
|
||||
/// profile that claimed the task, the outcome, and a structured
|
||||
/// metadata blob the worker handed back.
|
||||
public struct HermesKanbanRun: Sendable, Equatable, Identifiable, Codable {
|
||||
public let id: Int
|
||||
public let taskId: String
|
||||
public let profile: String?
|
||||
public let stepKey: String?
|
||||
public let status: String // running | done | blocked | crashed | timed_out | failed | released
|
||||
public let claimLock: String? // "host:pid" at spawn time
|
||||
public let claimExpires: Int?
|
||||
public let workerPid: Int?
|
||||
public let maxRuntimeSeconds: Int?
|
||||
public let lastHeartbeatAt: String?
|
||||
public let startedAt: String
|
||||
public let endedAt: String?
|
||||
public let outcome: String? // completed | blocked | crashed | timed_out | spawn_failed | gave_up | reclaimed
|
||||
public let summary: String?
|
||||
public let error: String?
|
||||
/// `metadata` is an opaque JSON dict from the worker. Carried as a
|
||||
/// raw string so we don't lock the typed shape.
|
||||
public let metadataJSON: String?
|
||||
|
||||
public init(
|
||||
id: Int,
|
||||
taskId: String,
|
||||
profile: String? = nil,
|
||||
stepKey: String? = nil,
|
||||
status: String,
|
||||
claimLock: String? = nil,
|
||||
claimExpires: Int? = nil,
|
||||
workerPid: Int? = nil,
|
||||
maxRuntimeSeconds: Int? = nil,
|
||||
lastHeartbeatAt: String? = nil,
|
||||
startedAt: String,
|
||||
endedAt: String? = nil,
|
||||
outcome: String? = nil,
|
||||
summary: String? = nil,
|
||||
error: String? = nil,
|
||||
metadataJSON: String? = nil
|
||||
) {
|
||||
self.id = id
|
||||
self.taskId = taskId
|
||||
self.profile = profile
|
||||
self.stepKey = stepKey
|
||||
self.status = status
|
||||
self.claimLock = claimLock
|
||||
self.claimExpires = claimExpires
|
||||
self.workerPid = workerPid
|
||||
self.maxRuntimeSeconds = maxRuntimeSeconds
|
||||
self.lastHeartbeatAt = lastHeartbeatAt
|
||||
self.startedAt = startedAt
|
||||
self.endedAt = endedAt
|
||||
self.outcome = outcome
|
||||
self.summary = summary
|
||||
self.error = error
|
||||
self.metadataJSON = metadataJSON
|
||||
}
|
||||
|
||||
enum CodingKeys: String, CodingKey {
|
||||
case id
|
||||
case taskId = "task_id"
|
||||
case profile
|
||||
case stepKey = "step_key"
|
||||
case status
|
||||
case claimLock = "claim_lock"
|
||||
case claimExpires = "claim_expires"
|
||||
case workerPid = "worker_pid"
|
||||
case maxRuntimeSeconds = "max_runtime_seconds"
|
||||
case lastHeartbeatAt = "last_heartbeat_at"
|
||||
case startedAt = "started_at"
|
||||
case endedAt = "ended_at"
|
||||
case outcome
|
||||
case summary
|
||||
case error
|
||||
case metadata
|
||||
}
|
||||
|
||||
public init(from decoder: any Decoder) throws {
|
||||
let c = try decoder.container(keyedBy: CodingKeys.self)
|
||||
self.id = try c.decodeIfPresent(Int.self, forKey: .id) ?? 0
|
||||
self.taskId = try c.decodeIfPresent(String.self, forKey: .taskId) ?? ""
|
||||
self.profile = try c.decodeIfPresent(String.self, forKey: .profile)
|
||||
self.stepKey = try c.decodeIfPresent(String.self, forKey: .stepKey)
|
||||
self.status = try c.decodeIfPresent(String.self, forKey: .status) ?? "unknown"
|
||||
self.claimLock = try c.decodeIfPresent(String.self, forKey: .claimLock)
|
||||
self.claimExpires = try c.decodeIfPresent(Int.self, forKey: .claimExpires)
|
||||
self.workerPid = try c.decodeIfPresent(Int.self, forKey: .workerPid)
|
||||
self.maxRuntimeSeconds = try c.decodeIfPresent(Int.self, forKey: .maxRuntimeSeconds)
|
||||
let f = ISO8601DateFormatter()
|
||||
f.formatOptions = [.withInternetDateTime]
|
||||
if let unix = try? c.decodeIfPresent(Double.self, forKey: .lastHeartbeatAt) {
|
||||
self.lastHeartbeatAt = f.string(from: Date(timeIntervalSince1970: unix))
|
||||
} else {
|
||||
self.lastHeartbeatAt = try c.decodeIfPresent(String.self, forKey: .lastHeartbeatAt)
|
||||
}
|
||||
if let unix = try? c.decodeIfPresent(Double.self, forKey: .startedAt) {
|
||||
self.startedAt = f.string(from: Date(timeIntervalSince1970: unix))
|
||||
} else {
|
||||
self.startedAt = (try? c.decodeIfPresent(String.self, forKey: .startedAt)) ?? ""
|
||||
}
|
||||
if let unix = try? c.decodeIfPresent(Double.self, forKey: .endedAt) {
|
||||
self.endedAt = f.string(from: Date(timeIntervalSince1970: unix))
|
||||
} else {
|
||||
self.endedAt = try c.decodeIfPresent(String.self, forKey: .endedAt)
|
||||
}
|
||||
self.outcome = try c.decodeIfPresent(String.self, forKey: .outcome)
|
||||
self.summary = try c.decodeIfPresent(String.self, forKey: .summary)
|
||||
self.error = try c.decodeIfPresent(String.self, forKey: .error)
|
||||
|
||||
if let raw = try? c.decodeIfPresent(String.self, forKey: .metadata) {
|
||||
self.metadataJSON = raw
|
||||
} else if c.contains(.metadata) {
|
||||
let nested = try c.decode(JSONAny.self, forKey: .metadata)
|
||||
let data = try JSONEncoder().encode(nested)
|
||||
self.metadataJSON = String(data: data, encoding: .utf8)
|
||||
} else {
|
||||
self.metadataJSON = nil
|
||||
}
|
||||
}
|
||||
|
||||
public func encode(to encoder: any Encoder) throws {
|
||||
var c = encoder.container(keyedBy: CodingKeys.self)
|
||||
try c.encode(id, forKey: .id)
|
||||
try c.encode(taskId, forKey: .taskId)
|
||||
try c.encodeIfPresent(profile, forKey: .profile)
|
||||
try c.encodeIfPresent(stepKey, forKey: .stepKey)
|
||||
try c.encode(status, forKey: .status)
|
||||
try c.encodeIfPresent(claimLock, forKey: .claimLock)
|
||||
try c.encodeIfPresent(claimExpires, forKey: .claimExpires)
|
||||
try c.encodeIfPresent(workerPid, forKey: .workerPid)
|
||||
try c.encodeIfPresent(maxRuntimeSeconds, forKey: .maxRuntimeSeconds)
|
||||
try c.encodeIfPresent(lastHeartbeatAt, forKey: .lastHeartbeatAt)
|
||||
try c.encode(startedAt, forKey: .startedAt)
|
||||
try c.encodeIfPresent(endedAt, forKey: .endedAt)
|
||||
try c.encodeIfPresent(outcome, forKey: .outcome)
|
||||
try c.encodeIfPresent(summary, forKey: .summary)
|
||||
try c.encodeIfPresent(error, forKey: .error)
|
||||
try c.encodeIfPresent(metadataJSON, forKey: .metadata)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,68 @@
|
||||
import Foundation
|
||||
|
||||
/// Output of `hermes kanban stats --json`. Drives the toolbar glance
|
||||
/// ("12 todo · 3 running · 5 blocked"), the per-project Kanban summary
|
||||
/// widget, and the column-count badges on the board header.
|
||||
public struct HermesKanbanStats: Sendable, Equatable, Codable {
|
||||
public let byStatus: [String: Int]
|
||||
public let byAssignee: [String: Int]
|
||||
public let byTenant: [String: Int]
|
||||
/// Age in seconds of the oldest task currently in the `ready` status.
|
||||
/// `nil` when no tasks are ready. Helps surface a stuck dispatcher.
|
||||
public let oldestReadyAgeSeconds: Double?
|
||||
|
||||
public init(
|
||||
byStatus: [String: Int],
|
||||
byAssignee: [String: Int] = [:],
|
||||
byTenant: [String: Int] = [:],
|
||||
oldestReadyAgeSeconds: Double? = nil
|
||||
) {
|
||||
self.byStatus = byStatus
|
||||
self.byAssignee = byAssignee
|
||||
self.byTenant = byTenant
|
||||
self.oldestReadyAgeSeconds = oldestReadyAgeSeconds
|
||||
}
|
||||
|
||||
public static let empty = HermesKanbanStats(byStatus: [:])
|
||||
|
||||
enum CodingKeys: String, CodingKey {
|
||||
case byStatus = "by_status"
|
||||
case byAssignee = "by_assignee"
|
||||
case byTenant = "by_tenant"
|
||||
case oldestReadyAgeSeconds = "oldest_ready_age_seconds"
|
||||
}
|
||||
|
||||
public init(from decoder: any Decoder) throws {
|
||||
let c = try decoder.container(keyedBy: CodingKeys.self)
|
||||
self.byStatus = try c.decodeIfPresent([String: Int].self, forKey: .byStatus) ?? [:]
|
||||
self.byAssignee = try c.decodeIfPresent([String: Int].self, forKey: .byAssignee) ?? [:]
|
||||
self.byTenant = try c.decodeIfPresent([String: Int].self, forKey: .byTenant) ?? [:]
|
||||
self.oldestReadyAgeSeconds = try c.decodeIfPresent(Double.self, forKey: .oldestReadyAgeSeconds)
|
||||
}
|
||||
|
||||
/// "12 todo · 3 running · 5 blocked" formatted glance string. Skips
|
||||
/// empty buckets and never includes archived. Returns an empty
|
||||
/// string when there's nothing to show so callers can hide chrome.
|
||||
public var glanceString: String {
|
||||
let order: [(String, String)] = [
|
||||
("todo", "todo"),
|
||||
("ready", "ready"),
|
||||
("running", "running"),
|
||||
("blocked", "blocked"),
|
||||
("done", "done")
|
||||
]
|
||||
let parts = order.compactMap { (key, label) -> String? in
|
||||
guard let n = byStatus[key], n > 0 else { return nil }
|
||||
return "\(n) \(label)"
|
||||
}
|
||||
return parts.joined(separator: " · ")
|
||||
}
|
||||
|
||||
/// Active task count across the board (everything except archived
|
||||
/// and done). Used as a badge on the sidebar / project tab.
|
||||
public var activeCount: Int {
|
||||
["triage", "todo", "ready", "running", "blocked"]
|
||||
.map { byStatus[$0] ?? 0 }
|
||||
.reduce(0, +)
|
||||
}
|
||||
}
|
||||
@@ -2,11 +2,15 @@ import Foundation
|
||||
|
||||
/// One task from `hermes kanban list --json` (v0.12+).
|
||||
///
|
||||
/// Hermes ships a SQLite-backed task board under `~/.hermes/kanban.db`
|
||||
/// — multi-profile collaboration was reverted upstream while the
|
||||
/// design is reworked, so Scarf v2.6 surfaces this as a read-only
|
||||
/// list. Create / claim / dispatch / dependency-link UI is deferred
|
||||
/// until upstream stabilizes.
|
||||
/// Hermes ships a SQLite-backed task board under `~/.hermes/kanban.db`.
|
||||
/// v2.6 surfaced this as a read-only list; v2.7.5 lifts it to a full
|
||||
/// drag-and-drop board with the complete write surface (`create`,
|
||||
/// `claim`, `complete`, `block`, `unblock`, `archive`, `assign`,
|
||||
/// `link`/`unlink`, `comment`, `dispatch`).
|
||||
///
|
||||
/// Hermes has no `update` verb — `priority` / `title` / `body` /
|
||||
/// `tenant` are write-once at create time. Mutations after that are
|
||||
/// expressed as state transitions (status, assignee) or new comments.
|
||||
public struct HermesKanbanTask: Sendable, Equatable, Identifiable, Codable {
|
||||
public let id: String
|
||||
public let title: String
|
||||
@@ -24,6 +28,12 @@ public struct HermesKanbanTask: Sendable, Equatable, Identifiable, Codable {
|
||||
public let result: String?
|
||||
public let skills: [String]
|
||||
|
||||
// v2.7.5 fields exposed by `kanban show --json` and `kanban watch`.
|
||||
public let idempotencyKey: String?
|
||||
public let lastHeartbeatAt: String?
|
||||
public let maxRuntimeSeconds: Int?
|
||||
public let currentRunId: Int?
|
||||
|
||||
public init(
|
||||
id: String,
|
||||
title: String,
|
||||
@@ -39,7 +49,11 @@ public struct HermesKanbanTask: Sendable, Equatable, Identifiable, Codable {
|
||||
startedAt: String? = nil,
|
||||
completedAt: String? = nil,
|
||||
result: String? = nil,
|
||||
skills: [String] = []
|
||||
skills: [String] = [],
|
||||
idempotencyKey: String? = nil,
|
||||
lastHeartbeatAt: String? = nil,
|
||||
maxRuntimeSeconds: Int? = nil,
|
||||
currentRunId: Int? = nil
|
||||
) {
|
||||
self.id = id
|
||||
self.title = title
|
||||
@@ -56,6 +70,10 @@ public struct HermesKanbanTask: Sendable, Equatable, Identifiable, Codable {
|
||||
self.completedAt = completedAt
|
||||
self.result = result
|
||||
self.skills = skills
|
||||
self.idempotencyKey = idempotencyKey
|
||||
self.lastHeartbeatAt = lastHeartbeatAt
|
||||
self.maxRuntimeSeconds = maxRuntimeSeconds
|
||||
self.currentRunId = currentRunId
|
||||
}
|
||||
|
||||
enum CodingKeys: String, CodingKey {
|
||||
@@ -67,6 +85,10 @@ public struct HermesKanbanTask: Sendable, Equatable, Identifiable, Codable {
|
||||
case startedAt = "started_at"
|
||||
case completedAt = "completed_at"
|
||||
case result, skills
|
||||
case idempotencyKey = "idempotency_key"
|
||||
case lastHeartbeatAt = "last_heartbeat_at"
|
||||
case maxRuntimeSeconds = "max_runtime_seconds"
|
||||
case currentRunId = "current_run_id"
|
||||
}
|
||||
|
||||
public init(from decoder: any Decoder) throws {
|
||||
@@ -81,10 +103,109 @@ public struct HermesKanbanTask: Sendable, Equatable, Identifiable, Codable {
|
||||
self.workspaceKind = try c.decodeIfPresent(String.self, forKey: .workspaceKind)
|
||||
self.workspacePath = try c.decodeIfPresent(String.self, forKey: .workspacePath)
|
||||
self.createdBy = try c.decodeIfPresent(String.self, forKey: .createdBy)
|
||||
self.createdAt = try c.decodeIfPresent(String.self, forKey: .createdAt)
|
||||
self.startedAt = try c.decodeIfPresent(String.self, forKey: .startedAt)
|
||||
self.completedAt = try c.decodeIfPresent(String.self, forKey: .completedAt)
|
||||
// Hermes emits timestamps as Unix integer seconds for tasks
|
||||
// returned from `create`/`show`/`list` (its SQLite columns are
|
||||
// INTEGER) but ISO-8601 strings in some other paths. Normalize
|
||||
// both shapes into ISO-8601 strings so UI code only deals with
|
||||
// one type.
|
||||
self.createdAt = try Self.decodeFlexibleTimestamp(c, forKey: .createdAt)
|
||||
self.startedAt = try Self.decodeFlexibleTimestamp(c, forKey: .startedAt)
|
||||
self.completedAt = try Self.decodeFlexibleTimestamp(c, forKey: .completedAt)
|
||||
self.result = try c.decodeIfPresent(String.self, forKey: .result)
|
||||
self.skills = try c.decodeIfPresent([String].self, forKey: .skills) ?? []
|
||||
self.idempotencyKey = try c.decodeIfPresent(String.self, forKey: .idempotencyKey)
|
||||
self.lastHeartbeatAt = try Self.decodeFlexibleTimestamp(c, forKey: .lastHeartbeatAt)
|
||||
self.maxRuntimeSeconds = try c.decodeIfPresent(Int.self, forKey: .maxRuntimeSeconds)
|
||||
self.currentRunId = try c.decodeIfPresent(Int.self, forKey: .currentRunId)
|
||||
}
|
||||
|
||||
/// Decode a timestamp that may arrive as a Unix integer or an
|
||||
/// ISO-8601 string. Returns the ISO-8601 string form so downstream
|
||||
/// code only deals with one type.
|
||||
static func decodeFlexibleTimestamp(
|
||||
_ container: KeyedDecodingContainer<CodingKeys>,
|
||||
forKey key: CodingKeys
|
||||
) throws -> String? {
|
||||
if !container.contains(key) { return nil }
|
||||
// Try the SQLite-style integer first (most common from Hermes).
|
||||
if let unix = try? container.decodeIfPresent(Double.self, forKey: key) {
|
||||
let date = Date(timeIntervalSince1970: unix)
|
||||
return Self.isoFormatter.string(from: date)
|
||||
}
|
||||
// Fall back to a plain string.
|
||||
return try container.decodeIfPresent(String.self, forKey: key)
|
||||
}
|
||||
|
||||
static let isoFormatter: ISO8601DateFormatter = {
|
||||
let f = ISO8601DateFormatter()
|
||||
f.formatOptions = [.withInternetDateTime]
|
||||
return f
|
||||
}()
|
||||
}
|
||||
|
||||
// MARK: - Status enum (typed view of the wire string)
|
||||
|
||||
/// Typed mirror of Hermes's status enum. Models keep `status: String` for
|
||||
/// forward compatibility with new statuses Hermes might add; UI code uses
|
||||
/// `KanbanStatus.from(_:)` to map known values into typed categories and
|
||||
/// fall back to `.unknown` for anything new.
|
||||
public enum KanbanStatus: String, Sendable, CaseIterable, Identifiable {
|
||||
case triage
|
||||
case todo
|
||||
case ready
|
||||
case running
|
||||
case blocked
|
||||
case done
|
||||
case archived
|
||||
case unknown
|
||||
|
||||
public var id: String { rawValue }
|
||||
|
||||
public static func from(_ raw: String) -> KanbanStatus {
|
||||
KanbanStatus(rawValue: raw.lowercased()) ?? .unknown
|
||||
}
|
||||
|
||||
/// Coarse 5-column board grouping. `triage` is a column; `todo` and
|
||||
/// `ready` collapse to one ("Up Next"); everything else maps 1:1.
|
||||
/// `archived` lives outside the board (toggle).
|
||||
public var boardColumn: KanbanBoardColumn {
|
||||
switch self {
|
||||
case .triage: return .triage
|
||||
case .todo, .ready, .unknown: return .upNext
|
||||
case .running: return .running
|
||||
case .blocked: return .blocked
|
||||
case .done: return .done
|
||||
case .archived: return .archived
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public enum KanbanBoardColumn: String, Sendable, CaseIterable, Identifiable {
|
||||
case triage
|
||||
case upNext
|
||||
case running
|
||||
case blocked
|
||||
case done
|
||||
case archived
|
||||
|
||||
public var id: String { rawValue }
|
||||
|
||||
public var displayName: String {
|
||||
switch self {
|
||||
case .triage: return "Triage"
|
||||
case .upNext: return "Up Next"
|
||||
case .running: return "Running"
|
||||
case .blocked: return "Blocked"
|
||||
case .done: return "Done"
|
||||
case .archived: return "Archived"
|
||||
}
|
||||
}
|
||||
|
||||
/// Visible columns in the default board layout. `archived` appears
|
||||
/// only when the "Show archived" toggle is on. `triage` is shown
|
||||
/// only when the board has at least one triage task (collapsed
|
||||
/// otherwise to keep the default layout focused).
|
||||
public static let defaultVisible: [KanbanBoardColumn] = [
|
||||
.triage, .upNext, .running, .blocked, .done
|
||||
]
|
||||
}
|
||||
|
||||
@@ -0,0 +1,60 @@
|
||||
import Foundation
|
||||
|
||||
/// Output of `hermes kanban show <id> --json`. Wraps a task with its full
|
||||
/// audit trail: comments + events + parent results. Loaded on-demand
|
||||
/// when the user opens the inspector pane; the board itself only carries
|
||||
/// the lightweight `HermesKanbanTask` rows.
|
||||
public struct HermesKanbanTaskDetail: Sendable, Equatable, Codable {
|
||||
public let task: HermesKanbanTask
|
||||
public let comments: [HermesKanbanComment]
|
||||
public let events: [HermesKanbanEvent]
|
||||
/// Parent-task results keyed by parent task id. Hermes hands these
|
||||
/// to the worker as upstream context; surfacing them in the
|
||||
/// inspector is useful for understanding why a task started.
|
||||
public let parentResults: [String: String]
|
||||
|
||||
public init(
|
||||
task: HermesKanbanTask,
|
||||
comments: [HermesKanbanComment] = [],
|
||||
events: [HermesKanbanEvent] = [],
|
||||
parentResults: [String: String] = [:]
|
||||
) {
|
||||
self.task = task
|
||||
self.comments = comments
|
||||
self.events = events
|
||||
self.parentResults = parentResults
|
||||
}
|
||||
|
||||
enum CodingKeys: String, CodingKey {
|
||||
case task
|
||||
case comments
|
||||
case events
|
||||
case parentResults = "parent_results"
|
||||
}
|
||||
|
||||
public init(from decoder: any Decoder) throws {
|
||||
// Hermes emits `kanban show --json` either as a nested
|
||||
// {task: {...}, comments: [...], events: [...]} object or
|
||||
// as a flat task object with extra `comments`/`events`
|
||||
// keys at top level. Try the nested form first; fall
|
||||
// back to top-level decode.
|
||||
let container = try decoder.container(keyedBy: CodingKeys.self)
|
||||
if let nested = try? container.decode(HermesKanbanTask.self, forKey: .task) {
|
||||
self.task = nested
|
||||
} else {
|
||||
let single = try decoder.singleValueContainer()
|
||||
self.task = try single.decode(HermesKanbanTask.self)
|
||||
}
|
||||
self.comments = (try? container.decodeIfPresent([HermesKanbanComment].self, forKey: .comments)) ?? []
|
||||
self.events = (try? container.decodeIfPresent([HermesKanbanEvent].self, forKey: .events)) ?? []
|
||||
self.parentResults = (try? container.decodeIfPresent([String: String].self, forKey: .parentResults)) ?? [:]
|
||||
}
|
||||
|
||||
public func encode(to encoder: any Encoder) throws {
|
||||
var c = encoder.container(keyedBy: CodingKeys.self)
|
||||
try c.encode(task, forKey: .task)
|
||||
try c.encode(comments, forKey: .comments)
|
||||
try c.encode(events, forKey: .events)
|
||||
try c.encode(parentResults, forKey: .parentResults)
|
||||
}
|
||||
}
|
||||
@@ -64,6 +64,28 @@ public struct HermesMessage: Identifiable, Sendable {
|
||||
if let rc = reasoningContent, !rc.isEmpty { return rc }
|
||||
return reasoning
|
||||
}
|
||||
|
||||
/// Return a copy of this message with `toolCalls` replaced. Used
|
||||
/// by the v2.8 two-phase chat loader: skeleton fetch returns
|
||||
/// messages with empty `toolCalls`; the background hydrate splices
|
||||
/// the parsed values in without re-fetching the conversational
|
||||
/// columns.
|
||||
public func withToolCalls(_ newCalls: [HermesToolCall]) -> HermesMessage {
|
||||
HermesMessage(
|
||||
id: id,
|
||||
sessionId: sessionId,
|
||||
role: role,
|
||||
content: content,
|
||||
toolCallId: toolCallId,
|
||||
toolCalls: newCalls,
|
||||
toolName: toolName,
|
||||
timestamp: timestamp,
|
||||
tokenCount: tokenCount,
|
||||
finishReason: finishReason,
|
||||
reasoning: reasoning,
|
||||
reasoningContent: reasoningContent
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
public struct HermesToolCall: Identifiable, Sendable, Codable {
|
||||
@@ -210,3 +232,23 @@ public enum ToolKind: String, Sendable, CaseIterable {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Outcome of a `fetchMessagesOutcome` call. `transportError` is non-nil
|
||||
/// only when the underlying SSH/SQLite call hit a transport-layer
|
||||
/// failure (timeout, ControlMaster drop) — distinguishes a genuine
|
||||
/// empty session from a silent partial-load. The chat resume path uses
|
||||
/// it to surface a "couldn't load full history" banner.
|
||||
public struct MessageFetchOutcome: Sendable {
|
||||
public let messages: [HermesMessage]
|
||||
public let transportError: String?
|
||||
|
||||
public init(messages: [HermesMessage], transportError: String?) {
|
||||
self.messages = messages
|
||||
self.transportError = transportError
|
||||
}
|
||||
|
||||
/// True when the fetch tripped a transport failure. Distinct from
|
||||
/// `messages.isEmpty` — an empty session is a successful zero-row
|
||||
/// result, while a transport error is "we don't know what's there."
|
||||
public var didTimeOut: Bool { transportError != nil }
|
||||
}
|
||||
|
||||
@@ -98,6 +98,12 @@ public struct HermesPathSet: Sendable, Hashable {
|
||||
/// on user request from the model picker. Survives offline runs so
|
||||
/// the picker still has something to render.
|
||||
public nonisolated var nousModelsCache: String { scarfDir + "/nous_models_cache.json" }
|
||||
/// Cached `templates/catalog.json` from awizemann.github.io. Populated
|
||||
/// by `CatalogService` on first sheet-open and refreshed on a 24h TTL
|
||||
/// or on explicit user click. Mirrors `nousModelsCache` exactly:
|
||||
/// JSON, scarf-owned, survives offline runs so the catalog browser
|
||||
/// still has something to render. Wiped by a Hermes home reset.
|
||||
public nonisolated var catalogCache: String { scarfDir + "/catalog_cache.json" }
|
||||
public nonisolated var mcpTokensDir: String { home + "/mcp-tokens" }
|
||||
|
||||
// MARK: - Binary resolution
|
||||
|
||||
@@ -0,0 +1,120 @@
|
||||
import Foundation
|
||||
|
||||
/// Swift-side parameter struct that maps 1:1 onto `hermes kanban create`
|
||||
/// flags. Constructing one then handing it to `KanbanService.create`
|
||||
/// keeps the CLI argv assembly in one place — VMs build a `KanbanCreateRequest`
|
||||
/// from form state and never assemble argv directly.
|
||||
public struct KanbanCreateRequest: Sendable, Equatable {
|
||||
public var title: String
|
||||
public var body: String?
|
||||
public var assignee: String?
|
||||
public var parentIds: [String]
|
||||
public var workspace: KanbanWorkspaceSpec?
|
||||
public var tenant: String?
|
||||
public var priority: Int?
|
||||
public var triage: Bool
|
||||
public var idempotencyKey: String?
|
||||
public var maxRuntimeSeconds: Int?
|
||||
public var createdBy: String?
|
||||
public var skills: [String]
|
||||
|
||||
public init(
|
||||
title: String,
|
||||
body: String? = nil,
|
||||
assignee: String? = nil,
|
||||
parentIds: [String] = [],
|
||||
workspace: KanbanWorkspaceSpec? = nil,
|
||||
tenant: String? = nil,
|
||||
priority: Int? = nil,
|
||||
triage: Bool = false,
|
||||
idempotencyKey: String? = nil,
|
||||
maxRuntimeSeconds: Int? = nil,
|
||||
createdBy: String? = nil,
|
||||
skills: [String] = []
|
||||
) {
|
||||
self.title = title
|
||||
self.body = body
|
||||
self.assignee = assignee
|
||||
self.parentIds = parentIds
|
||||
self.workspace = workspace
|
||||
self.tenant = tenant
|
||||
self.priority = priority
|
||||
self.triage = triage
|
||||
self.idempotencyKey = idempotencyKey
|
||||
self.maxRuntimeSeconds = maxRuntimeSeconds
|
||||
self.createdBy = createdBy
|
||||
self.skills = skills
|
||||
}
|
||||
|
||||
/// Build the argv suffix this request maps to (everything after
|
||||
/// `["kanban", "create"]`). Public for tests; consumers should
|
||||
/// call `KanbanService.create` instead of building argv directly.
|
||||
public func argv() -> [String] {
|
||||
var args: [String] = []
|
||||
if let body, !body.isEmpty {
|
||||
args.append(contentsOf: ["--body", body])
|
||||
}
|
||||
if let assignee, !assignee.isEmpty {
|
||||
args.append(contentsOf: ["--assignee", assignee])
|
||||
}
|
||||
for parent in parentIds {
|
||||
args.append(contentsOf: ["--parent", parent])
|
||||
}
|
||||
if let workspace {
|
||||
args.append(contentsOf: ["--workspace", workspace.cliValue])
|
||||
}
|
||||
if let tenant, !tenant.isEmpty {
|
||||
args.append(contentsOf: ["--tenant", tenant])
|
||||
}
|
||||
if let priority {
|
||||
args.append(contentsOf: ["--priority", String(priority)])
|
||||
}
|
||||
if triage {
|
||||
args.append("--triage")
|
||||
}
|
||||
if let idempotencyKey, !idempotencyKey.isEmpty {
|
||||
args.append(contentsOf: ["--idempotency-key", idempotencyKey])
|
||||
}
|
||||
if let maxRuntimeSeconds {
|
||||
args.append(contentsOf: ["--max-runtime", "\(maxRuntimeSeconds)s"])
|
||||
}
|
||||
if let createdBy, !createdBy.isEmpty {
|
||||
args.append(contentsOf: ["--created-by", createdBy])
|
||||
}
|
||||
for skill in skills {
|
||||
args.append(contentsOf: ["--skill", skill])
|
||||
}
|
||||
args.append("--json")
|
||||
// Title is the positional argument — appended last so flags
|
||||
// can't be confused for it.
|
||||
args.append(title)
|
||||
return args
|
||||
}
|
||||
}
|
||||
|
||||
/// Typed mirror of Hermes's `--workspace` flag. `scratch` and `worktree`
|
||||
/// are bare strings on the wire; `dir:<absolute path>` is a colon-prefixed
|
||||
/// path. We keep them typed in Swift so callers can't typo "scrach".
|
||||
public enum KanbanWorkspaceSpec: Sendable, Equatable {
|
||||
case scratch
|
||||
case worktree
|
||||
case directory(String)
|
||||
|
||||
public var cliValue: String {
|
||||
switch self {
|
||||
case .scratch: return "scratch"
|
||||
case .worktree: return "worktree"
|
||||
case .directory(let p): return "dir:\(p)"
|
||||
}
|
||||
}
|
||||
|
||||
/// "scratch" / "worktree" / "dir" — the kind segment, suitable
|
||||
/// for badge labels.
|
||||
public var displayKind: String {
|
||||
switch self {
|
||||
case .scratch: return "scratch"
|
||||
case .worktree: return "worktree"
|
||||
case .directory: return "dir"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
import Foundation
|
||||
|
||||
/// Errors thrown by `KanbanService`. Each case carries enough detail
|
||||
/// to render a user-actionable message — VMs surface these inline in
|
||||
/// the board's error banner rather than blocking with alerts, since
|
||||
/// kanban interactions are high-frequency.
|
||||
public enum KanbanError: Error, LocalizedError, Sendable {
|
||||
/// `hermes` binary couldn't be located (local) or the remote
|
||||
/// `hermesBinaryHint` is unset (SSH).
|
||||
case cliMissing
|
||||
/// Subprocess returned non-zero exit. `stderr` may be empty if the
|
||||
/// transport itself failed; carries a synthetic message in that case.
|
||||
case nonZeroExit(code: Int32, stderr: String)
|
||||
/// JSON decoding failed. Underlying `Error` is wrapped for
|
||||
/// diagnostics; the user-facing message is generic.
|
||||
case decoding(message: String)
|
||||
/// `hermes kanban list --json` printed the literal string
|
||||
/// "no matching tasks" instead of `[]`. Treated as a successful
|
||||
/// empty result by callers but exposed here so VMs can distinguish
|
||||
/// it from "transport error" if they want to.
|
||||
case noMatchingTasks
|
||||
/// Verb is not supported by this Hermes version (gated upstream
|
||||
/// by `HermesCapabilities.hasKanban` + reasoned-about feature
|
||||
/// drift). Carries the verb name + a hint.
|
||||
case notSupported(verb: String, reason: String)
|
||||
/// Disallowed transition the UI tried to perform (e.g. dragging a
|
||||
/// `done` card back to `todo`). Caller surfaces a tooltip; this is
|
||||
/// thrown only when a programmatic transition is requested instead
|
||||
/// of being filtered out at the drag-target gate.
|
||||
case forbiddenTransition(from: String, to: String, reason: String)
|
||||
|
||||
public var errorDescription: String? {
|
||||
switch self {
|
||||
case .cliMissing:
|
||||
return "Hermes CLI couldn't be found. Install Hermes v0.12+ and ensure it's on your PATH."
|
||||
case .nonZeroExit(let code, let stderr):
|
||||
let trimmed = stderr.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
if trimmed.isEmpty {
|
||||
return "Hermes exited with code \(code)."
|
||||
}
|
||||
return trimmed
|
||||
case .decoding(let message):
|
||||
return "Couldn't decode Hermes output: \(message)"
|
||||
case .noMatchingTasks:
|
||||
return "No matching tasks."
|
||||
case .notSupported(let verb, let reason):
|
||||
return "`hermes kanban \(verb)` isn't available: \(reason)"
|
||||
case .forbiddenTransition(let from, let to, let reason):
|
||||
return "Can't move a \(from) task to \(to): \(reason)"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,146 @@
|
||||
import Foundation
|
||||
|
||||
/// Filter options for `hermes kanban list --json`. Empty filter (default)
|
||||
/// returns all non-archived tasks across all tenants.
|
||||
public struct KanbanListFilter: Sendable, Equatable {
|
||||
public var status: KanbanStatus?
|
||||
public var assignee: String?
|
||||
/// `nil` = all tenants. Empty string → "untagged" (NULL tenant)
|
||||
/// — Hermes treats `--tenant ""` as "no tenant".
|
||||
public var tenant: String?
|
||||
public var includeArchived: Bool
|
||||
/// Show only my profile's tasks (`--mine`).
|
||||
public var mineOnly: Bool
|
||||
|
||||
public init(
|
||||
status: KanbanStatus? = nil,
|
||||
assignee: String? = nil,
|
||||
tenant: String? = nil,
|
||||
includeArchived: Bool = false,
|
||||
mineOnly: Bool = false
|
||||
) {
|
||||
self.status = status
|
||||
self.assignee = assignee
|
||||
self.tenant = tenant
|
||||
self.includeArchived = includeArchived
|
||||
self.mineOnly = mineOnly
|
||||
}
|
||||
|
||||
public static let all = KanbanListFilter()
|
||||
|
||||
/// Build the argv suffix after `["kanban", "list"]`.
|
||||
public func argv() -> [String] {
|
||||
var args: [String] = ["--json"]
|
||||
if mineOnly {
|
||||
args.append("--mine")
|
||||
}
|
||||
if let status, status != .unknown {
|
||||
args.append(contentsOf: ["--status", status.rawValue])
|
||||
}
|
||||
if let assignee, !assignee.isEmpty {
|
||||
args.append(contentsOf: ["--assignee", assignee])
|
||||
}
|
||||
if let tenant {
|
||||
args.append(contentsOf: ["--tenant", tenant])
|
||||
}
|
||||
if includeArchived {
|
||||
args.append("--archived")
|
||||
}
|
||||
return args
|
||||
}
|
||||
}
|
||||
|
||||
/// Filter options for `hermes kanban watch --json` (live event stream).
|
||||
public struct KanbanWatchFilter: Sendable, Equatable {
|
||||
public var assignee: String?
|
||||
public var tenant: String?
|
||||
public var kinds: [KanbanEventKind]
|
||||
public var intervalSeconds: Double
|
||||
|
||||
public init(
|
||||
assignee: String? = nil,
|
||||
tenant: String? = nil,
|
||||
kinds: [KanbanEventKind] = [],
|
||||
intervalSeconds: Double = 0.5
|
||||
) {
|
||||
self.assignee = assignee
|
||||
self.tenant = tenant
|
||||
self.kinds = kinds
|
||||
self.intervalSeconds = intervalSeconds
|
||||
}
|
||||
|
||||
public static let all = KanbanWatchFilter()
|
||||
|
||||
public func argv() -> [String] {
|
||||
var args: [String] = []
|
||||
if let assignee, !assignee.isEmpty {
|
||||
args.append(contentsOf: ["--assignee", assignee])
|
||||
}
|
||||
if let tenant, !tenant.isEmpty {
|
||||
args.append(contentsOf: ["--tenant", tenant])
|
||||
}
|
||||
if !kinds.isEmpty {
|
||||
let joined = kinds.map(\.rawValue).joined(separator: ",")
|
||||
args.append(contentsOf: ["--kinds", joined])
|
||||
}
|
||||
if intervalSeconds > 0 && intervalSeconds != 0.5 {
|
||||
args.append(contentsOf: ["--interval", String(format: "%.2f", intervalSeconds)])
|
||||
}
|
||||
return args
|
||||
}
|
||||
}
|
||||
|
||||
/// Summary of one `hermes kanban dispatch` pass. Used by the optional
|
||||
/// "Dispatch now" button to show what happened.
|
||||
public struct KanbanDispatchSummary: Sendable, Equatable, Codable {
|
||||
public let promoted: Int
|
||||
public let failed: Int
|
||||
public let dryRun: Bool
|
||||
public let perTask: [DispatchedTask]
|
||||
|
||||
public init(
|
||||
promoted: Int = 0,
|
||||
failed: Int = 0,
|
||||
dryRun: Bool = false,
|
||||
perTask: [DispatchedTask] = []
|
||||
) {
|
||||
self.promoted = promoted
|
||||
self.failed = failed
|
||||
self.dryRun = dryRun
|
||||
self.perTask = perTask
|
||||
}
|
||||
|
||||
public struct DispatchedTask: Sendable, Equatable, Codable, Identifiable {
|
||||
public var id: String { taskId }
|
||||
public let taskId: String
|
||||
public let decision: String // "promoted" | "skipped" | "failed"
|
||||
public let reason: String?
|
||||
|
||||
public init(taskId: String, decision: String, reason: String? = nil) {
|
||||
self.taskId = taskId
|
||||
self.decision = decision
|
||||
self.reason = reason
|
||||
}
|
||||
|
||||
enum CodingKeys: String, CodingKey {
|
||||
case taskId = "task_id"
|
||||
case decision
|
||||
case reason
|
||||
}
|
||||
}
|
||||
|
||||
enum CodingKeys: String, CodingKey {
|
||||
case promoted
|
||||
case failed
|
||||
case dryRun = "dry_run"
|
||||
case perTask = "per_task"
|
||||
}
|
||||
|
||||
public init(from decoder: any Decoder) throws {
|
||||
let c = try decoder.container(keyedBy: CodingKeys.self)
|
||||
self.promoted = try c.decodeIfPresent(Int.self, forKey: .promoted) ?? 0
|
||||
self.failed = try c.decodeIfPresent(Int.self, forKey: .failed) ?? 0
|
||||
self.dryRun = try c.decodeIfPresent(Bool.self, forKey: .dryRun) ?? false
|
||||
self.perTask = try c.decodeIfPresent([DispatchedTask].self, forKey: .perTask) ?? []
|
||||
}
|
||||
}
|
||||
@@ -39,6 +39,13 @@ public struct ProjectEntry: Codable, Sendable, Identifiable, Hashable {
|
||||
|
||||
public var dashboardPath: String { path + "/.scarf/dashboard.json" }
|
||||
|
||||
/// Directory holding the project's Scarf-managed sidecar files
|
||||
/// (dashboard.json, manifest.json, template.lock.json, config.json,
|
||||
/// plus any cron-job-written reports the dashboard widgets reference).
|
||||
/// Watched as a unit by `HermesFileWatcher` so any file added /
|
||||
/// removed / renamed inside refreshes the dashboard automatically.
|
||||
public var scarfDir: String { path + "/.scarf" }
|
||||
|
||||
// MARK: - Codable (custom for backward compat)
|
||||
|
||||
private enum CodingKeys: String, CodingKey {
|
||||
@@ -152,29 +159,54 @@ public struct DashboardWidget: Codable, Sendable, Identifiable {
|
||||
// List
|
||||
public let items: [ListItem]?
|
||||
|
||||
// Webview
|
||||
// Webview / Image (image reuses `url` for remote, `path` for local)
|
||||
public let url: String?
|
||||
public let height: Double?
|
||||
|
||||
// v2.7 — file-reading widgets (markdown_file, log_tail, image-local).
|
||||
// `path` is resolved relative to the project root (the directory that
|
||||
// contains `.scarf/`). Renderers must reject `..` segments after
|
||||
// normalization to prevent escape from the project boundary.
|
||||
public let path: String?
|
||||
public let lines: Int?
|
||||
|
||||
// v2.7 — cron_status widget; `jobId` matches HermesCronJob.id.
|
||||
public let jobId: String?
|
||||
|
||||
// v2.7 — status_grid widget; `cells` carries label + status per square,
|
||||
// `gridColumns` overrides the auto-fit column count (keep distinct
|
||||
// from `columns` which is the table-widget header list).
|
||||
public let cells: [StatusGridCell]?
|
||||
public let gridColumns: Int?
|
||||
|
||||
// v2.7 — optional sparkline trend on `stat` widgets.
|
||||
public let sparkline: [Double]?
|
||||
|
||||
public init(
|
||||
type: String,
|
||||
title: String,
|
||||
value: WidgetValue?,
|
||||
icon: String?,
|
||||
color: String?,
|
||||
subtitle: String?,
|
||||
label: String?,
|
||||
content: String?,
|
||||
format: String?,
|
||||
columns: [String]?,
|
||||
rows: [[String]]?,
|
||||
chartType: String?,
|
||||
xLabel: String?,
|
||||
yLabel: String?,
|
||||
series: [ChartSeries]?,
|
||||
items: [ListItem]?,
|
||||
url: String?,
|
||||
height: Double?
|
||||
value: WidgetValue? = nil,
|
||||
icon: String? = nil,
|
||||
color: String? = nil,
|
||||
subtitle: String? = nil,
|
||||
label: String? = nil,
|
||||
content: String? = nil,
|
||||
format: String? = nil,
|
||||
columns: [String]? = nil,
|
||||
rows: [[String]]? = nil,
|
||||
chartType: String? = nil,
|
||||
xLabel: String? = nil,
|
||||
yLabel: String? = nil,
|
||||
series: [ChartSeries]? = nil,
|
||||
items: [ListItem]? = nil,
|
||||
url: String? = nil,
|
||||
height: Double? = nil,
|
||||
path: String? = nil,
|
||||
lines: Int? = nil,
|
||||
jobId: String? = nil,
|
||||
cells: [StatusGridCell]? = nil,
|
||||
gridColumns: Int? = nil,
|
||||
sparkline: [Double]? = nil
|
||||
) {
|
||||
self.type = type
|
||||
self.title = title
|
||||
@@ -194,6 +226,29 @@ public struct DashboardWidget: Codable, Sendable, Identifiable {
|
||||
self.items = items
|
||||
self.url = url
|
||||
self.height = height
|
||||
self.path = path
|
||||
self.lines = lines
|
||||
self.jobId = jobId
|
||||
self.cells = cells
|
||||
self.gridColumns = gridColumns
|
||||
self.sparkline = sparkline
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Status Grid Data (v2.7)
|
||||
|
||||
/// One cell of a `status_grid` widget. Status semantics match `ListItem.status`
|
||||
/// — parsed via `ListItemStatus(raw:)` so the same vocabulary + synonyms apply.
|
||||
public struct StatusGridCell: Codable, Sendable, Identifiable, Hashable {
|
||||
public var id: String { label }
|
||||
public let label: String
|
||||
public let status: String?
|
||||
public let tooltip: String?
|
||||
|
||||
public init(label: String, status: String? = nil, tooltip: String? = nil) {
|
||||
self.label = label
|
||||
self.status = status
|
||||
self.tooltip = tooltip
|
||||
}
|
||||
}
|
||||
|
||||
@@ -284,3 +339,47 @@ public struct ListItem: Codable, Sendable, Identifiable {
|
||||
self.status = status
|
||||
}
|
||||
}
|
||||
|
||||
/// Typed semantic status for `ListItem` (and `status_grid` cells in v2.7+).
|
||||
///
|
||||
/// Wire format stays a free `String?` on `ListItem` for backwards compatibility —
|
||||
/// pre-existing dashboards never break. Renderers call `ListItemStatus(raw:)`
|
||||
/// to map known values + synonyms to a canonical case; unknown values return
|
||||
/// `nil` and render as plain neutral text.
|
||||
public enum ListItemStatus: String, Sendable, Hashable, CaseIterable {
|
||||
case success
|
||||
case warning
|
||||
case danger
|
||||
case info
|
||||
case pending
|
||||
case done
|
||||
case neutral
|
||||
|
||||
/// Lenient parse — accepts canonical names plus common synonyms seen in
|
||||
/// real-world dashboards (`ok`/`up` → success, `down`/`error`/`failed` →
|
||||
/// danger, `active` → info). Returns `nil` for unrecognized strings so
|
||||
/// the renderer can fall back to plain text.
|
||||
public init?(raw: String?) {
|
||||
guard let raw = raw?.trimmingCharacters(in: .whitespaces).lowercased(), !raw.isEmpty else {
|
||||
return nil
|
||||
}
|
||||
switch raw {
|
||||
case "success", "ok", "up", "green", "passing":
|
||||
self = .success
|
||||
case "warning", "warn", "yellow", "degraded":
|
||||
self = .warning
|
||||
case "danger", "down", "error", "failed", "failure", "red", "critical":
|
||||
self = .danger
|
||||
case "info", "active", "blue":
|
||||
self = .info
|
||||
case "pending", "queued", "waiting", "scheduled":
|
||||
self = .pending
|
||||
case "done", "complete", "completed", "finished":
|
||||
self = .done
|
||||
case "neutral", "muted", "gray":
|
||||
self = .neutral
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
+113
@@ -0,0 +1,113 @@
|
||||
import Foundation
|
||||
|
||||
/// Pluggable query engine for `HermesDataService`. Two implementations
|
||||
/// today:
|
||||
///
|
||||
/// * `LocalSQLiteBackend` — opens the local `~/.hermes/state.db` via
|
||||
/// libsqlite3 and runs queries in-process. Microseconds per query.
|
||||
/// * `RemoteSQLiteBackend` — invokes `sqlite3 -readonly -json` over an
|
||||
/// SSH session (ControlMaster keeps the channel warm), parses the
|
||||
/// JSON response into `Row`s. ~50–100 ms per query.
|
||||
///
|
||||
/// The data service picks one based on `ServerContext.isRemote`. View
|
||||
/// models are oblivious — they keep calling `await dataService.fetch…`
|
||||
/// like before.
|
||||
///
|
||||
/// **Why a protocol, not a class hierarchy.** Backends have very
|
||||
/// different internals (libsqlite3 handles vs. SSH script piping) but
|
||||
/// the call-site shape is identical. A protocol lets us hand the data
|
||||
/// service either backend through one stored property without
|
||||
/// abstract-class ceremony, and keeps the test mock (see
|
||||
/// `MockHermesQueryBackend` in tests) free of inheritance baggage.
|
||||
///
|
||||
/// **Sendable.** Concrete impls are actors, so they're trivially
|
||||
/// `Sendable`. The protocol conforms to `Sendable` to satisfy Swift 6
|
||||
/// strict-concurrency for the data-service stored property.
|
||||
public protocol HermesQueryBackend: Sendable {
|
||||
|
||||
/// True iff the connected DB has the v0.7 columns (`reasoning_tokens`,
|
||||
/// `actual_cost_usd`, `cost_status`, `billing_provider` on
|
||||
/// `sessions` plus `reasoning` on `messages`). Detected once at
|
||||
/// `open()` time.
|
||||
var hasV07Schema: Bool { get async }
|
||||
|
||||
/// True iff the connected DB has the v0.11 columns
|
||||
/// (`api_call_count` on `sessions`, `reasoning_content` on
|
||||
/// `messages`). Belt-and-braces: BOTH must be present (a
|
||||
/// partially-migrated DB stays on the v0.7 path to avoid "no such
|
||||
/// column" failures).
|
||||
var hasV011Schema: Bool { get async }
|
||||
|
||||
/// User-presentable error from the most recent `open()` (or the
|
||||
/// most recent failed query for the remote backend's
|
||||
/// connectivity-loss codepath). `nil` means everything is healthy.
|
||||
var lastOpenError: String? { get async }
|
||||
|
||||
/// One-time setup. Local: `sqlite3_open_v2` + `PRAGMA table_info`
|
||||
/// schema detection. Remote: one SSH round-trip running
|
||||
/// `sqlite3 --version` plus the two PRAGMA queries.
|
||||
///
|
||||
/// Returns `false` on any failure; detail is in `lastOpenError`.
|
||||
/// Calling `open()` on an already-open backend is a no-op that
|
||||
/// returns `true`.
|
||||
func open() async -> Bool
|
||||
|
||||
/// Local backend: `close()` then `open(forceFresh:)` — re-pulls
|
||||
/// the SQLite handle so a Hermes-side migration becomes visible.
|
||||
/// Remote backend: a no-op when `forceFresh: false` (every query
|
||||
/// is already fresh — there's nothing to refresh). `forceFresh:
|
||||
/// true` re-runs the schema preflight, covering the rare "user
|
||||
/// upgraded Hermes on the remote, my schema flags are stale" case.
|
||||
@discardableResult
|
||||
func refresh(forceFresh: Bool) async -> Bool
|
||||
|
||||
/// Drop any persistent resources. Idempotent.
|
||||
func close() async
|
||||
|
||||
/// Run a single SQL statement and collect every row before
|
||||
/// returning. SQL uses `?` placeholders; `params` is bound
|
||||
/// positionally (one entry per `?`).
|
||||
///
|
||||
/// Local backend: `sqlite3_prepare_v2` + `sqlite3_bind_*` +
|
||||
/// `sqlite3_step` loop, materialising each row into a `Row`.
|
||||
/// Remote backend: inlines params via `SQLValueInliner` to produce
|
||||
/// a final SQL string, runs `sqlite3 -readonly -json` over SSH,
|
||||
/// parses the resulting JSON array.
|
||||
///
|
||||
/// Throws `BackendError` on any failure. The data-service façade
|
||||
/// generally catches and returns empty results to preserve the
|
||||
/// existing "show empty UI on error" behaviour.
|
||||
func query(_ sql: String, params: [SQLValue]) async throws -> [Row]
|
||||
|
||||
/// Run several statements in one round-trip, returning each
|
||||
/// statement's row set in order. Lets multi-query view loads
|
||||
/// (Dashboard's 4-query pattern, Insights' 5-query pattern)
|
||||
/// amortise the SSH/sqlite3 cold-start cost.
|
||||
///
|
||||
/// Each `(sql, params)` pair has the same shape as `query` —
|
||||
/// `?` placeholders bound positionally per pair.
|
||||
func queryBatch(_ statements: [(sql: String, params: [SQLValue])]) async throws -> [[Row]]
|
||||
}
|
||||
|
||||
/// Errors that backends raise. Mapped into user-facing messages by the
|
||||
/// `humanize` helper that lives alongside `HermesDataService`.
|
||||
public enum BackendError: Error, Sendable, Equatable {
|
||||
/// Backend is not open — caller should `open()` first.
|
||||
case notOpen
|
||||
|
||||
/// Connectivity failure (SSH down, ControlMaster dead, transport
|
||||
/// can't reach the host). Carries a short human-readable reason.
|
||||
/// Triggers the data-service's `lastOpenError` populate path.
|
||||
case transport(String)
|
||||
|
||||
/// sqlite3 itself reported an error — non-zero exit, parse failure,
|
||||
/// schema mismatch. `exitCode` is the sqlite3 process exit (or
|
||||
/// libsqlite3 result code on the local backend); `stderr` is the
|
||||
/// sqlite3-emitted message (already user-readable in most cases).
|
||||
case sqlite(exitCode: Int32, stderr: String)
|
||||
|
||||
/// JSON-parsing failed on remote-backend output. Indicates either a
|
||||
/// sqlite3 binary that didn't honour `-json`, or output corruption
|
||||
/// (rare). Carries the first 200 bytes of stdout for diagnostics.
|
||||
case parseFailure(stdoutHead: String)
|
||||
}
|
||||
+254
@@ -0,0 +1,254 @@
|
||||
// MARK: - Platform gate
|
||||
//
|
||||
// libsqlite3 is a system module on macOS/iOS but not on swift-corelibs
|
||||
// foundation. Gate the entire backend so ScarfCore still compiles for
|
||||
// any future Linux target. Apple platforms — the runtime targets — get
|
||||
// the full implementation.
|
||||
#if canImport(SQLite3)
|
||||
|
||||
import Foundation
|
||||
import SQLite3
|
||||
#if canImport(os)
|
||||
import os
|
||||
#endif
|
||||
|
||||
/// `HermesQueryBackend` that opens a local SQLite file via libsqlite3
|
||||
/// and runs queries in-process. Microseconds per query.
|
||||
///
|
||||
/// Used for `ServerContext.local` (the user's own `~/.hermes/state.db`)
|
||||
/// — the previous behaviour of `HermesDataService` lifted out unchanged.
|
||||
/// For `.ssh` contexts the data service constructs `RemoteSQLiteBackend`
|
||||
/// instead.
|
||||
///
|
||||
/// Actor isolation matches the parent `HermesDataService` actor: queries
|
||||
/// serialise on this backend's executor, and the data service hops once
|
||||
/// (`await backend.query…`) per public method call.
|
||||
public actor LocalSQLiteBackend: HermesQueryBackend {
|
||||
|
||||
#if canImport(os)
|
||||
private static let logger = Logger(subsystem: "com.scarf", category: "LocalSQLiteBackend")
|
||||
#endif
|
||||
|
||||
private var db: OpaquePointer?
|
||||
private var openedAtPath: String?
|
||||
private(set) public var hasV07Schema = false
|
||||
private(set) public var hasV011Schema = false
|
||||
private(set) public var lastOpenError: String?
|
||||
|
||||
private let context: ServerContext
|
||||
|
||||
public init(context: ServerContext) {
|
||||
self.context = context
|
||||
}
|
||||
|
||||
// MARK: - Lifecycle
|
||||
|
||||
public func open() async -> Bool {
|
||||
if db != nil { return true }
|
||||
let path = context.paths.stateDB
|
||||
guard FileManager.default.fileExists(atPath: path) else {
|
||||
lastOpenError = "Hermes state database not found at \(path)."
|
||||
return false
|
||||
}
|
||||
let flags: Int32 = SQLITE_OPEN_READONLY | SQLITE_OPEN_NOMUTEX
|
||||
let rc = sqlite3_open_v2(path, &db, flags, nil)
|
||||
guard rc == SQLITE_OK else {
|
||||
let msg: String
|
||||
if let db {
|
||||
msg = String(cString: sqlite3_errmsg(db))
|
||||
} else {
|
||||
msg = "sqlite3_open_v2 returned \(rc)"
|
||||
}
|
||||
lastOpenError = "Couldn't open state.db: \(msg)"
|
||||
#if canImport(os)
|
||||
Self.logger.warning("sqlite3_open_v2 failed (\(rc)) at \(path, privacy: .public): \(msg, privacy: .public)")
|
||||
#endif
|
||||
db = nil
|
||||
return false
|
||||
}
|
||||
openedAtPath = path
|
||||
lastOpenError = nil
|
||||
detectSchema()
|
||||
return true
|
||||
}
|
||||
|
||||
@discardableResult
|
||||
public func refresh(forceFresh: Bool) async -> Bool {
|
||||
// Local always close-and-reopen — the file may have been swapped
|
||||
// by Hermes (rare) or we want to pick up a schema migration.
|
||||
// `forceFresh` is irrelevant locally; included for protocol
|
||||
// parity with the remote backend.
|
||||
await close()
|
||||
return await open()
|
||||
}
|
||||
|
||||
public func close() async {
|
||||
if let db {
|
||||
sqlite3_close(db)
|
||||
}
|
||||
db = nil
|
||||
openedAtPath = nil
|
||||
}
|
||||
|
||||
// MARK: - Schema detection
|
||||
|
||||
private func detectSchema() {
|
||||
guard let db else { return }
|
||||
|
||||
// sessions schema
|
||||
var stmt: OpaquePointer?
|
||||
if sqlite3_prepare_v2(db, "PRAGMA table_info(sessions)", -1, &stmt, nil) == SQLITE_OK {
|
||||
defer { sqlite3_finalize(stmt) }
|
||||
while sqlite3_step(stmt) == SQLITE_ROW {
|
||||
if let name = sqlite3_column_text(stmt, 1) {
|
||||
let column = String(cString: name)
|
||||
if column == "reasoning_tokens" {
|
||||
hasV07Schema = true
|
||||
}
|
||||
if column == "api_call_count" {
|
||||
hasV011Schema = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// messages schema — confirm `reasoning_content` is present too.
|
||||
// Belt-and-braces: a partially-migrated DB (sessions migrated,
|
||||
// messages not) shouldn't blow up reads with "no such column".
|
||||
if hasV011Schema {
|
||||
var msgStmt: OpaquePointer?
|
||||
var sawReasoningContent = false
|
||||
if sqlite3_prepare_v2(db, "PRAGMA table_info(messages)", -1, &msgStmt, nil) == SQLITE_OK {
|
||||
defer { sqlite3_finalize(msgStmt) }
|
||||
while sqlite3_step(msgStmt) == SQLITE_ROW {
|
||||
if let name = sqlite3_column_text(msgStmt, 1),
|
||||
String(cString: name) == "reasoning_content" {
|
||||
sawReasoningContent = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !sawReasoningContent {
|
||||
hasV011Schema = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Queries
|
||||
|
||||
public func query(_ sql: String, params: [SQLValue]) async throws -> [Row] {
|
||||
guard let db else { throw BackendError.notOpen }
|
||||
return try executeOne(db: db, sql: sql, params: params)
|
||||
}
|
||||
|
||||
public func queryBatch(_ statements: [(sql: String, params: [SQLValue])]) async throws -> [[Row]] {
|
||||
guard let db else { throw BackendError.notOpen }
|
||||
// Local backend has no SSH/process round-trip cost — running
|
||||
// sequentially against the open handle is exactly equivalent
|
||||
// to running each via `query`. The protocol method exists for
|
||||
// remote-backend amortisation; locally we just satisfy the
|
||||
// signature.
|
||||
var out: [[Row]] = []
|
||||
out.reserveCapacity(statements.count)
|
||||
for (sql, params) in statements {
|
||||
out.append(try executeOne(db: db, sql: sql, params: params))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// MARK: - Internals
|
||||
|
||||
private func executeOne(db: OpaquePointer, sql: String, params: [SQLValue]) throws -> [Row] {
|
||||
var stmt: OpaquePointer?
|
||||
let prepRC = sqlite3_prepare_v2(db, sql, -1, &stmt, nil)
|
||||
guard prepRC == SQLITE_OK, let stmt else {
|
||||
let msg = String(cString: sqlite3_errmsg(db))
|
||||
throw BackendError.sqlite(exitCode: prepRC, stderr: msg)
|
||||
}
|
||||
defer { sqlite3_finalize(stmt) }
|
||||
|
||||
for (i, value) in params.enumerated() {
|
||||
let col = Int32(i + 1)
|
||||
let rc: Int32
|
||||
switch value {
|
||||
case .null:
|
||||
rc = sqlite3_bind_null(stmt, col)
|
||||
case .integer(let n):
|
||||
rc = sqlite3_bind_int64(stmt, col, n)
|
||||
case .real(let d):
|
||||
rc = sqlite3_bind_double(stmt, col, d)
|
||||
case .text(let s):
|
||||
rc = sqlite3_bind_text(stmt, col, s, -1, sqliteTransient)
|
||||
case .blob(let d):
|
||||
rc = d.withUnsafeBytes { buf -> Int32 in
|
||||
guard let base = buf.baseAddress else {
|
||||
return sqlite3_bind_zeroblob(stmt, col, 0)
|
||||
}
|
||||
return sqlite3_bind_blob(stmt, col, base, Int32(buf.count), sqliteTransient)
|
||||
}
|
||||
}
|
||||
if rc != SQLITE_OK {
|
||||
let msg = String(cString: sqlite3_errmsg(db))
|
||||
throw BackendError.sqlite(exitCode: rc, stderr: msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Build column-name → index map once per result set, lazily on
|
||||
// first row (sqlite3_column_name needs the prepared stmt; cheap
|
||||
// either way). For a 0-row result set we still build it so
|
||||
// callers that read column names from the first hypothetical
|
||||
// row don't error — though `Row.columnIndex` on an empty
|
||||
// `[Row]` is moot.
|
||||
let columnCount = Int(sqlite3_column_count(stmt))
|
||||
var columnIndex: [String: Int] = [:]
|
||||
columnIndex.reserveCapacity(columnCount)
|
||||
for i in 0..<columnCount {
|
||||
if let cstr = sqlite3_column_name(stmt, Int32(i)) {
|
||||
columnIndex[String(cString: cstr)] = i
|
||||
}
|
||||
}
|
||||
|
||||
var rows: [Row] = []
|
||||
while true {
|
||||
let stepRC = sqlite3_step(stmt)
|
||||
if stepRC == SQLITE_DONE { break }
|
||||
if stepRC != SQLITE_ROW {
|
||||
let msg = String(cString: sqlite3_errmsg(db))
|
||||
throw BackendError.sqlite(exitCode: stepRC, stderr: msg)
|
||||
}
|
||||
var values: [SQLValue] = []
|
||||
values.reserveCapacity(columnCount)
|
||||
for i in 0..<columnCount {
|
||||
let col = Int32(i)
|
||||
let type = sqlite3_column_type(stmt, col)
|
||||
switch type {
|
||||
case SQLITE_NULL:
|
||||
values.append(.null)
|
||||
case SQLITE_INTEGER:
|
||||
values.append(.integer(sqlite3_column_int64(stmt, col)))
|
||||
case SQLITE_FLOAT:
|
||||
values.append(.real(sqlite3_column_double(stmt, col)))
|
||||
case SQLITE_TEXT:
|
||||
if let cstr = sqlite3_column_text(stmt, col) {
|
||||
values.append(.text(String(cString: cstr)))
|
||||
} else {
|
||||
values.append(.text(""))
|
||||
}
|
||||
case SQLITE_BLOB:
|
||||
let n = Int(sqlite3_column_bytes(stmt, col))
|
||||
if n > 0, let p = sqlite3_column_blob(stmt, col) {
|
||||
values.append(.blob(Data(bytes: p, count: n)))
|
||||
} else {
|
||||
values.append(.blob(Data()))
|
||||
}
|
||||
default:
|
||||
values.append(.null)
|
||||
}
|
||||
}
|
||||
rows.append(Row(values: values, columnIndex: columnIndex))
|
||||
}
|
||||
return rows
|
||||
}
|
||||
}
|
||||
|
||||
#endif // canImport(SQLite3)
|
||||
+651
@@ -0,0 +1,651 @@
|
||||
#if canImport(SQLite3)
|
||||
|
||||
import Foundation
|
||||
#if canImport(os)
|
||||
import os
|
||||
#endif
|
||||
|
||||
/// `HermesQueryBackend` that runs `sqlite3 -readonly -json` over an
|
||||
/// SSH session per query. Replaces the old snapshot-then-open pipeline
|
||||
/// (issue #74): no full-DB transfers, no local cache, every query
|
||||
/// against the live remote DB.
|
||||
///
|
||||
/// **Why one round-trip per query is OK.** ControlMaster keeps the SSH
|
||||
/// session warm — first connect spins up the master socket; subsequent
|
||||
/// queries reuse it at ~5 ms overhead. sqlite3 cold-start is ~30–50 ms,
|
||||
/// query execution is sub-millisecond for indexed queries, JSON
|
||||
/// serialisation is small. End-to-end ~50–100 ms per query, dominated
|
||||
/// by sqlite3 process spawn. Multi-query view loads (Dashboard,
|
||||
/// Insights) batch via `queryBatch` — one cold-start, all statements
|
||||
/// in a single sqlite3 invocation, ~80–100 ms total.
|
||||
///
|
||||
/// **Result format**. `sqlite3 -json` emits one JSON array per
|
||||
/// statement that returns rows: `[{"col":val,...}, ...]`. Multi-statement
|
||||
/// scripts emit each array on its own. We separate batched queries
|
||||
/// with a `SELECT '__SCARF_RS_BEGIN__N' AS marker;` synthesised line so
|
||||
/// the parser can split on the markers — sqlite3's marker rows
|
||||
/// preserve order and let us pair each result-set with the originating
|
||||
/// statement index.
|
||||
public actor RemoteSQLiteBackend: HermesQueryBackend {
|
||||
|
||||
#if canImport(os)
|
||||
private static let logger = Logger(subsystem: "com.scarf", category: "RemoteSQLiteBackend")
|
||||
#endif
|
||||
|
||||
private let context: ServerContext
|
||||
private let transport: any ServerTransport
|
||||
private(set) public var hasV07Schema = false
|
||||
private(set) public var hasV011Schema = false
|
||||
private(set) public var lastOpenError: String?
|
||||
private var isOpen = false
|
||||
/// Captured `sqlite3 --version` line from the most recent preflight.
|
||||
/// Stashed for diagnostic logs and a future "remote sqlite3 too old"
|
||||
/// error path.
|
||||
private var sqliteVersion: String?
|
||||
/// Resolved absolute remote `$HOME`, populated on `open()` via
|
||||
/// `context.resolvedUserHome()` so that `~/` paths can be expanded
|
||||
/// in Swift up front rather than relying on shell expansion across
|
||||
/// the streamScript pipeline. The base64 + pipe path through
|
||||
/// Citadel does not reliably propagate `$HOME` into the inner
|
||||
/// `/bin/sh` on every host — keeping this client-side avoids the
|
||||
/// issue (and matches how `RemoteBackupService.expandTilde` already
|
||||
/// handles the same problem). `nil` only when the probe failed,
|
||||
/// in which case `quoteForRemoteShell` falls back to `"$HOME/..."`
|
||||
/// shell expansion.
|
||||
private var resolvedHome: String?
|
||||
|
||||
/// In-flight query coalescing — keyed on the inlined SQL text,
|
||||
/// value is the Task currently fetching that exact result set.
|
||||
/// When two concurrent callers ask for the same query (common
|
||||
/// pattern: file watcher tick + chat-finalize debounce both
|
||||
/// firing `loadRecentSessions` within ~100 ms), the second
|
||||
/// caller awaits the first call's task instead of spawning a
|
||||
/// fresh SSH subprocess. Cleared on task completion. Drops
|
||||
/// duplicate `mac.loadRecentSessions` traces observed at
|
||||
/// t=960450 / t=960584 in the perf capture (two parallel 3-s
|
||||
/// loads for the same data, finishing 134 ms apart).
|
||||
///
|
||||
/// Coalescing is *only* applied to single `query` calls, not
|
||||
/// `queryBatch` — batches are larger payloads with caller-
|
||||
/// specific timeout scaling, and concurrent callers wanting
|
||||
/// "the same batch" is rare in practice. Keep coalescing
|
||||
/// surgical so we don't accidentally serialize independent
|
||||
/// work that just happens to match.
|
||||
private var inFlightQueries: [String: Task<[Row], Error>] = [:]
|
||||
|
||||
/// Per-query timeout for `query`. Healthy local queries are
|
||||
/// <100 ms; remote ones over 420 ms-RTT SSH amortize one round
|
||||
/// trip per call PLUS the wire payload time. A `fetchMessages`
|
||||
/// over a 157-message session (~50KB JSON encoded) exceeded
|
||||
/// the previous 15 s ceiling, silently returned 0 rows, and the
|
||||
/// chat appeared empty — a worse failure than the wait it was
|
||||
/// guarding against. Bumped to 30 s; the `streamScript`
|
||||
/// transport-level timeout still fires on truly wedged hosts.
|
||||
private let queryTimeout: TimeInterval = 30
|
||||
|
||||
/// Preflight timeout. First SSH round-trip may include cold
|
||||
/// ControlMaster establishment (~1–3 s) plus the schema PRAGMA
|
||||
/// queries; 30 s is generous.
|
||||
private let preflightTimeout: TimeInterval = 30
|
||||
|
||||
/// Marker prefix used to split `queryBatch` result sets. Picked to
|
||||
/// be very unlikely to collide with a real session_id, role string,
|
||||
/// or content fragment.
|
||||
private static let batchMarkerPrefix = "__SCARF_RS_BEGIN__"
|
||||
|
||||
public init(context: ServerContext, transport: any ServerTransport) {
|
||||
self.context = context
|
||||
self.transport = transport
|
||||
}
|
||||
|
||||
// MARK: - Lifecycle
|
||||
|
||||
public func open() async -> Bool {
|
||||
if isOpen { return true }
|
||||
// Resolve remote $HOME once (cached process-wide via
|
||||
// ServerContext.UserHomeCache so concurrent backends share
|
||||
// the probe result). Lets us hand sqlite3 absolute paths and
|
||||
// skip the unreliable nested-shell expansion altogether. A
|
||||
// probe failure leaves `resolvedHome == nil` and falls back
|
||||
// to "$HOME/..."-quoted args; the data-service open() will
|
||||
// surface whatever sqlite3 errors out with.
|
||||
let probedHome = await context.resolvedUserHome()
|
||||
if probedHome != "~" && !probedHome.isEmpty {
|
||||
resolvedHome = probedHome
|
||||
}
|
||||
let dbPath = context.paths.stateDB
|
||||
// One SSH round-trip running:
|
||||
// 1. sqlite3 --version (sanity + capture for diagnostics)
|
||||
// 2. PRAGMA table_info(sessions) | sessions schema
|
||||
// 3. PRAGMA table_info(messages) | messages schema
|
||||
// sqlite3 -json emits two arrays back-to-back for the two PRAGMA
|
||||
// statements; we parse them as separate result sets.
|
||||
let preflight = """
|
||||
set -e
|
||||
sqlite3 --version
|
||||
sqlite3 -readonly -json \(quoteForRemoteShell(dbPath)) "PRAGMA table_info(sessions); PRAGMA table_info(messages);"
|
||||
"""
|
||||
|
||||
do {
|
||||
let result = try await transport.streamScript(preflight, timeout: preflightTimeout)
|
||||
if result.exitCode != 0 {
|
||||
lastOpenError = errorMessage(stderr: result.stderrString, stdout: result.stdoutString, exitCode: result.exitCode)
|
||||
#if canImport(os)
|
||||
Self.logger.warning("Remote preflight failed (exit \(result.exitCode)): \(self.lastOpenError ?? "", privacy: .public)")
|
||||
#endif
|
||||
return false
|
||||
}
|
||||
try parsePreflightOutput(result.stdoutString)
|
||||
lastOpenError = nil
|
||||
isOpen = true
|
||||
#if canImport(os)
|
||||
Self.logger.info("Remote SQLite backend ready: sqlite3=\(self.sqliteVersion ?? "?", privacy: .public), v0.7=\(self.hasV07Schema), v0.11=\(self.hasV011Schema)")
|
||||
#endif
|
||||
return true
|
||||
} catch {
|
||||
lastOpenError = error.localizedDescription
|
||||
#if canImport(os)
|
||||
Self.logger.warning("Remote preflight transport error: \(error.localizedDescription, privacy: .public)")
|
||||
#endif
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@discardableResult
|
||||
public func refresh(forceFresh: Bool) async -> Bool {
|
||||
// Streaming queries are always fresh. The watcher tick still
|
||||
// fires `dataService.refresh()` on every observed file change
|
||||
// — locally that re-opens the SQLite handle; here it's a
|
||||
// no-op. `forceFresh: true` is the escape hatch for when the
|
||||
// user explicitly wants a re-preflight (e.g. they upgraded
|
||||
// Hermes on the remote). Drop the open state and re-run.
|
||||
if forceFresh {
|
||||
isOpen = false
|
||||
return await open()
|
||||
}
|
||||
return isOpen ? true : await open()
|
||||
}
|
||||
|
||||
public func close() async {
|
||||
isOpen = false
|
||||
}
|
||||
|
||||
// MARK: - Queries
|
||||
|
||||
public func query(_ sql: String, params: [SQLValue]) async throws -> [Row] {
|
||||
guard isOpen else { throw BackendError.notOpen }
|
||||
let inlined = SQLValueInliner.inline(sql, params: params)
|
||||
// In-flight coalescing — if a query with the exact same
|
||||
// inlined SQL is already pending, await its task instead
|
||||
// of spawning a new SSH subprocess. Surfaces in ScarfMon as
|
||||
// a `sqlite.query.coalesced` event so we can see how often
|
||||
// the dedup actually fires in the wild.
|
||||
if let existing = inFlightQueries[inlined] {
|
||||
ScarfMon.event(.sqlite, "query.coalesced", count: 1)
|
||||
return try await withTaskCancellationHandler(
|
||||
operation: { try await existing.value },
|
||||
onCancel: { existing.cancel() }
|
||||
)
|
||||
}
|
||||
let task = Task<[Row], Error> { [self] in
|
||||
try await ScarfMon.measureAsync(.sqlite, "query") {
|
||||
let dbPath = context.paths.stateDB
|
||||
let script = """
|
||||
sqlite3 -readonly -json \(quoteForRemoteShell(dbPath)) <<'__SCARF_SQL__'
|
||||
\(inlined)
|
||||
__SCARF_SQL__
|
||||
"""
|
||||
let result: ProcessResult
|
||||
do {
|
||||
result = try await transport.streamScript(script, timeout: queryTimeout)
|
||||
} catch {
|
||||
throw BackendError.transport(error.localizedDescription)
|
||||
}
|
||||
if result.exitCode != 0 {
|
||||
throw BackendError.sqlite(exitCode: result.exitCode, stderr: result.stderrString)
|
||||
}
|
||||
let rows = try parseSingleResultSet(result.stdoutString)
|
||||
ScarfMon.event(.sqlite, "query.rows", count: rows.count, bytes: result.stdout.count)
|
||||
return rows
|
||||
}
|
||||
}
|
||||
inFlightQueries[inlined] = task
|
||||
defer { inFlightQueries[inlined] = nil }
|
||||
// v2.8 — propagate parent task cancellation INTO the
|
||||
// unstructured `task`. `Task<...>{ ... }` doesn't inherit
|
||||
// cancellation from the awaiting context, so without this a
|
||||
// cancelled chat-hydration / dashboard-refresh would keep
|
||||
// the ssh subprocess alive for the full 30s queryTimeout
|
||||
// — pinning a remote sqlite query and a ControlMaster
|
||||
// session slot. With the bridge, the inner task's awaits
|
||||
// see a cancelled parent and `SSHScriptRunner.run`'s own
|
||||
// cancellation handler (v2.8) kills the ssh process inside
|
||||
// the next 100ms poll.
|
||||
return try await withTaskCancellationHandler(
|
||||
operation: { try await task.value },
|
||||
onCancel: { task.cancel() }
|
||||
)
|
||||
}
|
||||
|
||||
public func queryBatch(_ statements: [(sql: String, params: [SQLValue])]) async throws -> [[Row]] {
|
||||
try await ScarfMon.measureAsync(.sqlite, "queryBatch") {
|
||||
try await _queryBatchImpl(statements)
|
||||
}
|
||||
}
|
||||
|
||||
private func _queryBatchImpl(_ statements: [(sql: String, params: [SQLValue])]) async throws -> [[Row]] {
|
||||
guard isOpen else { throw BackendError.notOpen }
|
||||
if statements.isEmpty { return [] }
|
||||
// Build one sqlite3 invocation with marker SELECTs separating
|
||||
// each statement's result set. `SELECT '__SCARF_RS_BEGIN__N'`
|
||||
// emits a one-row JSON array we use as a sentinel.
|
||||
var sqlBlocks: [String] = []
|
||||
for (i, stmt) in statements.enumerated() {
|
||||
let inlined = SQLValueInliner.inline(stmt.sql, params: stmt.params)
|
||||
// Marker first (so we know which result-set follows even
|
||||
// if a query returns zero rows — sqlite3 -json prints
|
||||
// nothing for empty result sets, which would otherwise
|
||||
// make the parser drift).
|
||||
sqlBlocks.append("SELECT '\(Self.batchMarkerPrefix)\(i)' AS marker;")
|
||||
sqlBlocks.append(ensureTrailingSemicolon(inlined))
|
||||
}
|
||||
let combined = sqlBlocks.joined(separator: "\n")
|
||||
let dbPath = context.paths.stateDB
|
||||
let script = """
|
||||
sqlite3 -readonly -json \(quoteForRemoteShell(dbPath)) <<'__SCARF_SQL__'
|
||||
\(combined)
|
||||
__SCARF_SQL__
|
||||
"""
|
||||
let result: ProcessResult
|
||||
do {
|
||||
// Batched timeout: scale with statement count, capped at
|
||||
// a comfortable 30 s. Most batches are 4–5 statements.
|
||||
let timeout = min(30, queryTimeout + Double(statements.count) * 2)
|
||||
result = try await transport.streamScript(script, timeout: timeout)
|
||||
} catch {
|
||||
throw BackendError.transport(error.localizedDescription)
|
||||
}
|
||||
if result.exitCode != 0 {
|
||||
throw BackendError.sqlite(exitCode: result.exitCode, stderr: result.stderrString)
|
||||
}
|
||||
return try parseBatchResultSets(result.stdoutString, expectedCount: statements.count)
|
||||
}
|
||||
|
||||
// MARK: - Preflight parsing
|
||||
|
||||
private func parsePreflightOutput(_ stdout: String) throws {
|
||||
// Expected output:
|
||||
// <sqlite3 version line>
|
||||
// [<sessions PRAGMA result>]
|
||||
// [<messages PRAGMA result>]
|
||||
let lines = stdout.split(separator: "\n", omittingEmptySubsequences: false)
|
||||
guard let firstLine = lines.first, !firstLine.isEmpty else {
|
||||
throw BackendError.parseFailure(stdoutHead: String(stdout.prefix(200)))
|
||||
}
|
||||
sqliteVersion = String(firstLine).trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
|
||||
// The remaining lines should contain two JSON arrays. sqlite3
|
||||
// -json emits each on its own — though it can wrap long arrays
|
||||
// across multiple lines. We split on `][` boundaries to be
|
||||
// robust. Walk the stream looking for two top-level arrays.
|
||||
let rest = lines.dropFirst().joined(separator: "\n")
|
||||
let arrays = splitTopLevelJSONArrays(rest)
|
||||
guard arrays.count >= 2 else {
|
||||
throw BackendError.parseFailure(stdoutHead: String(stdout.prefix(200)))
|
||||
}
|
||||
let sessionsTable = try parseTableInfo(arrays[0])
|
||||
let messagesTable = try parseTableInfo(arrays[1])
|
||||
|
||||
// v0.7: sessions has `reasoning_tokens`.
|
||||
hasV07Schema = sessionsTable.contains("reasoning_tokens")
|
||||
// v0.11: BOTH sessions has `api_call_count` AND messages has
|
||||
// `reasoning_content`. Belt-and-braces against partial migrations.
|
||||
let sessionsHasV011 = sessionsTable.contains("api_call_count")
|
||||
let messagesHasV011 = messagesTable.contains("reasoning_content")
|
||||
hasV011Schema = sessionsHasV011 && messagesHasV011
|
||||
}
|
||||
|
||||
/// Extract column names from a `PRAGMA table_info(...)` result set.
|
||||
private func parseTableInfo(_ json: String) throws -> Set<String> {
|
||||
guard let data = json.data(using: .utf8),
|
||||
let arr = try? JSONSerialization.jsonObject(with: data) as? [[String: Any]] else {
|
||||
throw BackendError.parseFailure(stdoutHead: String(json.prefix(200)))
|
||||
}
|
||||
var names: Set<String> = []
|
||||
for row in arr {
|
||||
if let name = row["name"] as? String {
|
||||
names.insert(name)
|
||||
}
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// MARK: - Result-set parsing
|
||||
|
||||
private func parseSingleResultSet(_ stdout: String) throws -> [Row] {
|
||||
// sqlite3 -json prints nothing for empty result sets, so an
|
||||
// empty stdout is valid and means "0 rows".
|
||||
let trimmed = stdout.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
if trimmed.isEmpty { return [] }
|
||||
return try rowsFromJSONArray(trimmed)
|
||||
}
|
||||
|
||||
private func parseBatchResultSets(_ stdout: String, expectedCount: Int) throws -> [[Row]] {
|
||||
// Scan the output as a sequence of JSON arrays. Each marker
|
||||
// SELECT emits a one-row array `[{"marker":"__SCARF_RS_BEGIN__N"}]`;
|
||||
// the following array (if present) is statement N's result set.
|
||||
let arrays = splitTopLevelJSONArrays(stdout)
|
||||
var result: [[Row]] = Array(repeating: [], count: expectedCount)
|
||||
var i = 0
|
||||
while i < arrays.count {
|
||||
let chunk = arrays[i]
|
||||
// Try to read this chunk as a marker. A marker row is one
|
||||
// object with exactly the `marker` field. Anything else
|
||||
// is a real result set (which we attribute to the most
|
||||
// recent marker we saw).
|
||||
if let idx = markerIndex(in: chunk) {
|
||||
// Next array (if any) is this statement's result set.
|
||||
// If the next array is ALSO a marker, the current
|
||||
// statement returned zero rows.
|
||||
let next = i + 1
|
||||
if next < arrays.count, markerIndex(in: arrays[next]) == nil {
|
||||
result[idx] = try rowsFromJSONArray(arrays[next])
|
||||
i = next + 1
|
||||
} else {
|
||||
// Empty result set for this statement.
|
||||
i = next
|
||||
}
|
||||
} else {
|
||||
// Stray array (no preceding marker). Skip — shouldn't
|
||||
// happen in practice given how we build the script.
|
||||
i += 1
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
/// If the array's single row is a marker `{"marker":"__SCARF_RS_BEGIN__N"}`,
|
||||
/// return N. Otherwise nil.
|
||||
private func markerIndex(in json: String) -> Int? {
|
||||
guard let data = json.data(using: .utf8),
|
||||
let arr = try? JSONSerialization.jsonObject(with: data) as? [[String: Any]],
|
||||
arr.count == 1,
|
||||
let marker = arr[0]["marker"] as? String,
|
||||
marker.hasPrefix(Self.batchMarkerPrefix) else { return nil }
|
||||
let suffix = marker.dropFirst(Self.batchMarkerPrefix.count)
|
||||
return Int(suffix)
|
||||
}
|
||||
|
||||
private func rowsFromJSONArray(_ json: String) throws -> [Row] {
|
||||
guard let data = json.data(using: .utf8),
|
||||
let arr = try? JSONSerialization.jsonObject(with: data) as? [[String: Any]] else {
|
||||
throw BackendError.parseFailure(stdoutHead: String(json.prefix(200)))
|
||||
}
|
||||
if arr.isEmpty { return [] }
|
||||
// `[String: Any]` does NOT preserve insertion order on macOS
|
||||
// (NSDictionary backing). To keep the SELECT column order
|
||||
// intact — which the data-service row parsers depend on
|
||||
// (`row.string(at: 0)` for `id`, etc.) — we extract the key
|
||||
// order from the FIRST object's raw JSON bytes. Subsequent
|
||||
// rows reuse that key list to look up values by name from
|
||||
// their parsed dictionaries.
|
||||
let firstObjectRaw = extractFirstJSONObject(from: json)
|
||||
let orderedKeys = firstObjectRaw.flatMap(extractKeysInOrder) ?? Array(arr[0].keys)
|
||||
var columnIndex: [String: Int] = [:]
|
||||
columnIndex.reserveCapacity(orderedKeys.count)
|
||||
for (i, k) in orderedKeys.enumerated() { columnIndex[k] = i }
|
||||
|
||||
var rows: [Row] = []
|
||||
rows.reserveCapacity(arr.count)
|
||||
for obj in arr {
|
||||
var values: [SQLValue] = []
|
||||
values.reserveCapacity(orderedKeys.count)
|
||||
for key in orderedKeys {
|
||||
values.append(decode(obj[key]))
|
||||
}
|
||||
rows.append(Row(values: values, columnIndex: columnIndex))
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
/// Extract the substring of the first `{...}` object in a JSON
|
||||
/// array string. Used so we can scan its keys in original order
|
||||
/// before NSJSONSerialization's hash-table conversion strips the
|
||||
/// ordering. Tolerates nested objects/arrays via depth tracking.
|
||||
private func extractFirstJSONObject(from json: String) -> String? {
|
||||
guard let openIdx = json.firstIndex(of: "{") else { return nil }
|
||||
var depth = 0
|
||||
var inString = false
|
||||
var escape = false
|
||||
var i = openIdx
|
||||
while i < json.endIndex {
|
||||
let c = json[i]
|
||||
if inString {
|
||||
if escape { escape = false }
|
||||
else if c == "\\" { escape = true }
|
||||
else if c == "\"" { inString = false }
|
||||
i = json.index(after: i)
|
||||
continue
|
||||
}
|
||||
switch c {
|
||||
case "\"":
|
||||
inString = true
|
||||
case "{":
|
||||
depth += 1
|
||||
case "}":
|
||||
depth -= 1
|
||||
if depth == 0 {
|
||||
let end = json.index(after: i)
|
||||
return String(json[openIdx..<end])
|
||||
}
|
||||
default:
|
||||
break
|
||||
}
|
||||
i = json.index(after: i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/// Walk an object literal `{"k1": v1, "k2": v2, ...}` and return
|
||||
/// the keys in their literal order. Doesn't decode the values —
|
||||
/// that's what NSJSONSerialization handles. Just extracts
|
||||
/// `["k1", "k2", ...]` so we know the column ordering.
|
||||
private func extractKeysInOrder(_ objectJSON: String) -> [String] {
|
||||
var keys: [String] = []
|
||||
var i = objectJSON.startIndex
|
||||
// Skip past the leading `{`.
|
||||
while i < objectJSON.endIndex, objectJSON[i] != "{" {
|
||||
i = objectJSON.index(after: i)
|
||||
}
|
||||
if i < objectJSON.endIndex { i = objectJSON.index(after: i) }
|
||||
var depth = 0
|
||||
var inString = false
|
||||
var escape = false
|
||||
var keyStart: String.Index?
|
||||
// We're at the start of object body. Looking for `"key":` patterns
|
||||
// at depth 0. Toggle `expectingKey` after each `:`/`,`.
|
||||
var expectingKey = true
|
||||
while i < objectJSON.endIndex {
|
||||
let c = objectJSON[i]
|
||||
if inString {
|
||||
if escape {
|
||||
escape = false
|
||||
} else if c == "\\" {
|
||||
escape = true
|
||||
} else if c == "\"" {
|
||||
inString = false
|
||||
if expectingKey && depth == 0, let start = keyStart {
|
||||
keys.append(String(objectJSON[start..<i]))
|
||||
expectingKey = false
|
||||
keyStart = nil
|
||||
}
|
||||
}
|
||||
i = objectJSON.index(after: i)
|
||||
continue
|
||||
}
|
||||
switch c {
|
||||
case "\"":
|
||||
inString = true
|
||||
if expectingKey && depth == 0 {
|
||||
keyStart = objectJSON.index(after: i)
|
||||
}
|
||||
case "{", "[":
|
||||
depth += 1
|
||||
case "}", "]":
|
||||
if depth == 0 { return keys } // end of outer object
|
||||
depth -= 1
|
||||
case ",":
|
||||
if depth == 0 { expectingKey = true }
|
||||
case ":":
|
||||
if depth == 0 { expectingKey = false }
|
||||
default:
|
||||
break
|
||||
}
|
||||
i = objectJSON.index(after: i)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
private func decode(_ v: Any?) -> SQLValue {
|
||||
guard let v else { return .null }
|
||||
if v is NSNull { return .null }
|
||||
if let n = v as? NSNumber {
|
||||
// NSJSONSerialization decodes both ints and doubles into
|
||||
// NSNumber. Distinguish: if it round-trips through Int64
|
||||
// unchanged, treat as integer; else real.
|
||||
// A leading-zero-after-dot Double like 1.0 still has
|
||||
// .doubleValue == 1.0 and Int64(1.0) == 1, so the round-
|
||||
// trip check correctly bins integral doubles as integer
|
||||
// (which sqlite3 -json does too — `1` in JSON, not `1.0`).
|
||||
let asInt64 = n.int64Value
|
||||
if Double(asInt64) == n.doubleValue {
|
||||
return .integer(asInt64)
|
||||
}
|
||||
return .real(n.doubleValue)
|
||||
}
|
||||
if let s = v as? String {
|
||||
return .text(s)
|
||||
}
|
||||
// Fall-through: stringify whatever it is so we don't lose data
|
||||
// silently. SQLite -json doesn't emit booleans or nested
|
||||
// objects from PRAGMA / SELECT outputs in our usage.
|
||||
return .text(String(describing: v))
|
||||
}
|
||||
|
||||
// MARK: - JSON helpers
|
||||
|
||||
/// Walk a string of one or more concatenated JSON arrays at the top
|
||||
/// level (sqlite3 -json's batched output) and return each array as
|
||||
/// a separate substring. Tolerates whitespace/newlines between
|
||||
/// arrays.
|
||||
private func splitTopLevelJSONArrays(_ s: String) -> [String] {
|
||||
var out: [String] = []
|
||||
var depth = 0
|
||||
var inString = false
|
||||
var escape = false
|
||||
var start: String.Index?
|
||||
var i = s.startIndex
|
||||
while i < s.endIndex {
|
||||
let c = s[i]
|
||||
if inString {
|
||||
if escape {
|
||||
escape = false
|
||||
} else if c == "\\" {
|
||||
escape = true
|
||||
} else if c == "\"" {
|
||||
inString = false
|
||||
}
|
||||
i = s.index(after: i)
|
||||
continue
|
||||
}
|
||||
switch c {
|
||||
case "\"":
|
||||
inString = true
|
||||
case "[":
|
||||
if depth == 0 { start = i }
|
||||
depth += 1
|
||||
case "]":
|
||||
depth -= 1
|
||||
if depth == 0, let begin = start {
|
||||
let end = s.index(after: i)
|
||||
out.append(String(s[begin..<end]))
|
||||
start = nil
|
||||
}
|
||||
default:
|
||||
break
|
||||
}
|
||||
i = s.index(after: i)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
private func ensureTrailingSemicolon(_ sql: String) -> String {
|
||||
let trimmed = sql.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
if trimmed.hasSuffix(";") { return trimmed }
|
||||
return trimmed + ";"
|
||||
}
|
||||
|
||||
// MARK: - Quoting + error mapping
|
||||
|
||||
/// Build the shell argument that the remote `sh -c` will see for
|
||||
/// the SQLite path. Three cases, in priority order:
|
||||
///
|
||||
/// 1. **`~`-prefixed AND we have a `resolvedHome`** — the common
|
||||
/// case. Pre-expand to an absolute path in Swift, then single-
|
||||
/// quote. Sqlite3 receives a literal absolute path; no shell
|
||||
/// expansion needed.
|
||||
/// 2. **`~`-prefixed AND no `resolvedHome`** (probe failed) —
|
||||
/// fall back to `"$HOME/..."` and hope the remote shell expands
|
||||
/// it. Works on Mac SSHTransport (login shell with $HOME set);
|
||||
/// less reliable through Citadel's exec-channel + base64 +
|
||||
/// inner-`/bin/sh` pipeline on iOS, which is precisely why
|
||||
/// we prefer the resolved-home path above.
|
||||
/// 3. **Absolute** (`/home/agent/.hermes/state.db`) — single-quote
|
||||
/// with the standard sh escape for any embedded single-quote.
|
||||
///
|
||||
/// sqlite3 doesn't expand `~` itself (that's a shell affordance),
|
||||
/// so a default-config remote with `paths.stateDB ==
|
||||
/// "~/.hermes/state.db"` would produce `unable to open database
|
||||
/// "~/.hermes/state.db"` without one of these rewrites — issue
|
||||
/// reported on iOS Citadel against `127.0.0.1`.
|
||||
private func quoteForRemoteShell(_ path: String) -> String {
|
||||
if let home = resolvedHome {
|
||||
let expanded: String
|
||||
if path == "~" {
|
||||
expanded = home
|
||||
} else if path.hasPrefix("~/") {
|
||||
expanded = home + "/" + String(path.dropFirst(2))
|
||||
} else {
|
||||
expanded = path
|
||||
}
|
||||
return "'" + expanded.replacingOccurrences(of: "'", with: "'\\''") + "'"
|
||||
}
|
||||
// Probe-failed fallback: rely on remote-shell `$HOME` expansion.
|
||||
if path == "~" {
|
||||
return "\"$HOME\""
|
||||
}
|
||||
if path.hasPrefix("~/") {
|
||||
let rest = String(path.dropFirst(2))
|
||||
let escaped = rest
|
||||
.replacingOccurrences(of: "\\", with: "\\\\")
|
||||
.replacingOccurrences(of: "\"", with: "\\\"")
|
||||
.replacingOccurrences(of: "$", with: "\\$")
|
||||
.replacingOccurrences(of: "`", with: "\\`")
|
||||
return "\"$HOME/\(escaped)\""
|
||||
}
|
||||
return "'" + path.replacingOccurrences(of: "'", with: "'\\''") + "'"
|
||||
}
|
||||
|
||||
/// Translate a non-zero sqlite3 exit into a user-presentable
|
||||
/// message. Mirrors substrings that `HermesDataService.humanize`
|
||||
/// keys off so the existing dashboard banner renders correctly.
|
||||
private func errorMessage(stderr: String, stdout: String, exitCode: Int32) -> String {
|
||||
let combined = (stderr.isEmpty ? stdout : stderr).trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
if combined.isEmpty {
|
||||
return "sqlite3 exited \(exitCode) with no output"
|
||||
}
|
||||
return combined
|
||||
}
|
||||
}
|
||||
|
||||
#endif // canImport(SQLite3)
|
||||
@@ -0,0 +1,136 @@
|
||||
import Foundation
|
||||
|
||||
/// Typed SQLite column value. Mirrors SQLite's storage classes
|
||||
/// (`SQLITE_NULL`, `SQLITE_INTEGER`, `SQLITE_FLOAT`, `SQLITE_TEXT`,
|
||||
/// `SQLITE_BLOB`) so both backends — libsqlite3 (`LocalSQLiteBackend`)
|
||||
/// and remote `sqlite3 -json` parsing (`RemoteSQLiteBackend`) — can
|
||||
/// produce and consume the same `Row` shape.
|
||||
///
|
||||
/// Used in two places:
|
||||
///
|
||||
/// 1. **Bound parameters**: callers hand `[SQLValue]` to
|
||||
/// `HermesQueryBackend.query(_:params:)`. The local backend feeds
|
||||
/// them into `sqlite3_bind_*`; the remote backend inlines them as
|
||||
/// SQLite literals via `SQLValueInliner.inline(_:into:)`.
|
||||
/// 2. **Result columns**: each `Row.values` entry is one of these.
|
||||
/// Parsers (`sessionFromRow`, `messageFromRow` in HermesDataService)
|
||||
/// read positional accessors like `row.string(at: 3)` to get the
|
||||
/// typed value.
|
||||
public enum SQLValue: Sendable, Equatable {
|
||||
case null
|
||||
case integer(Int64)
|
||||
case real(Double)
|
||||
case text(String)
|
||||
case blob(Data)
|
||||
}
|
||||
|
||||
/// One result row from a query. Indexable both by position (matching the
|
||||
/// libsqlite3 `sqlite3_column_*` ergonomics that `HermesDataService`'s
|
||||
/// existing parsers expect) and by name (more readable for new code).
|
||||
///
|
||||
/// `columnIndex` is built once per result-set, not per row, so the
|
||||
/// per-row overhead is just the `[SQLValue]` allocation.
|
||||
public struct Row: Sendable {
|
||||
/// Ordered column values, indexable by their position in the
|
||||
/// underlying SELECT.
|
||||
public let values: [SQLValue]
|
||||
|
||||
/// Column-name → position map. Built once per result-set by the
|
||||
/// backend, then shared (by reference) across every row in the
|
||||
/// set. Lookups are case-sensitive — match SQLite's default.
|
||||
public let columnIndex: [String: Int]
|
||||
|
||||
public init(values: [SQLValue], columnIndex: [String: Int]) {
|
||||
self.values = values
|
||||
self.columnIndex = columnIndex
|
||||
}
|
||||
|
||||
public subscript(_ position: Int) -> SQLValue {
|
||||
guard position >= 0, position < values.count else { return .null }
|
||||
return values[position]
|
||||
}
|
||||
|
||||
public subscript(_ name: String) -> SQLValue {
|
||||
guard let i = columnIndex[name] else { return .null }
|
||||
return values[i]
|
||||
}
|
||||
|
||||
// MARK: - Typed positional accessors
|
||||
//
|
||||
// These mirror the `columnText(stmt, i)` / `columnDate(stmt, i)`
|
||||
// helpers that lived in HermesDataService so the row-parser
|
||||
// migrations from `OpaquePointer` to `Row` are line-for-line.
|
||||
|
||||
public func string(at i: Int) -> String {
|
||||
if case .text(let s) = self[i] { return s }
|
||||
return ""
|
||||
}
|
||||
|
||||
public func optionalString(at i: Int) -> String? {
|
||||
switch self[i] {
|
||||
case .text(let s): return s
|
||||
case .null: return nil
|
||||
default: return nil
|
||||
}
|
||||
}
|
||||
|
||||
public func int(at i: Int) -> Int {
|
||||
switch self[i] {
|
||||
case .integer(let n): return Int(n)
|
||||
case .real(let d): return Int(d)
|
||||
case .text(let s): return Int(s) ?? 0
|
||||
default: return 0
|
||||
}
|
||||
}
|
||||
|
||||
public func optionalInt(at i: Int) -> Int? {
|
||||
switch self[i] {
|
||||
case .integer(let n): return Int(n)
|
||||
case .real(let d): return Int(d)
|
||||
case .text(let s): return Int(s)
|
||||
case .null: return nil
|
||||
default: return nil
|
||||
}
|
||||
}
|
||||
|
||||
public func int64(at i: Int) -> Int64 {
|
||||
switch self[i] {
|
||||
case .integer(let n): return n
|
||||
case .real(let d): return Int64(d)
|
||||
case .text(let s): return Int64(s) ?? 0
|
||||
default: return 0
|
||||
}
|
||||
}
|
||||
|
||||
public func double(at i: Int) -> Double {
|
||||
switch self[i] {
|
||||
case .real(let d): return d
|
||||
case .integer(let n): return Double(n)
|
||||
case .text(let s): return Double(s) ?? 0
|
||||
default: return 0
|
||||
}
|
||||
}
|
||||
|
||||
public func optionalDouble(at i: Int) -> Double? {
|
||||
switch self[i] {
|
||||
case .real(let d): return d
|
||||
case .integer(let n): return Double(n)
|
||||
case .text(let s): return Double(s)
|
||||
case .null: return nil
|
||||
default: return nil
|
||||
}
|
||||
}
|
||||
|
||||
/// Interpret the column as a Unix-epoch timestamp (seconds, fractional
|
||||
/// allowed). Returns `nil` when the column is NULL or unparseable.
|
||||
/// Mirrors the existing `columnDate` helper exactly.
|
||||
public func date(at i: Int) -> Date? {
|
||||
guard let secs = optionalDouble(at: i) else { return nil }
|
||||
return Date(timeIntervalSince1970: secs)
|
||||
}
|
||||
|
||||
public func isNull(at i: Int) -> Bool {
|
||||
if case .null = self[i] { return true }
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,107 @@
|
||||
import Foundation
|
||||
|
||||
/// Replaces `?` placeholders in a SQL string with SQLite-escaped
|
||||
/// literal values, in order. Used by `RemoteSQLiteBackend` because
|
||||
/// the `sqlite3` CLI doesn't accept `?`-bound parameters on the
|
||||
/// command line — it would need stdin `.parameter set @name` dot-
|
||||
/// commands, which require a multi-line script for every query and
|
||||
/// add round-trip overhead with no upside for our use case.
|
||||
///
|
||||
/// **Trust model.** This is a literal-encoder for in-tree, trusted
|
||||
/// callers — every current param source is either an integer (`limit`,
|
||||
/// `before`, `since.timeIntervalSince1970`), a Hermes-internal ID
|
||||
/// (UUID-shaped session/tool IDs that come back from the same DB), or
|
||||
/// a search query that already passes through `sanitizeFTSQuery` in
|
||||
/// HermesDataService. It is **NOT** a general SQL-injection defense.
|
||||
/// Don't extend the data-service surface with methods that accept raw
|
||||
/// untrusted user input as a `.text` param without first validating
|
||||
/// upstream. The local backend skips inlining entirely (uses
|
||||
/// `sqlite3_bind_*`) so this only affects the remote path.
|
||||
///
|
||||
/// Escape rules mirror SQLite's literal syntax:
|
||||
/// * `.null` → `NULL`
|
||||
/// * `.integer(n)` → `<n>` (no quoting)
|
||||
/// * `.real(d)` → `%.17g`-formatted (round-trips Double via decimal)
|
||||
/// * `.text(s)` → `'<s with single-quotes doubled>'`
|
||||
/// * `.blob(d)` → `X'<hex>'`
|
||||
public enum SQLValueInliner {
|
||||
|
||||
/// Walk `sql`, replacing each `?` (outside SQL string literals) with
|
||||
/// the corresponding `params` entry's encoded form. Throws via
|
||||
/// fatalError if the placeholder count doesn't match `params.count`
|
||||
/// — a programmer error, not a runtime condition.
|
||||
///
|
||||
/// `?` inside string literals (e.g. `WHERE name = '?'`) is preserved
|
||||
/// unchanged. We track quote state with a tiny scanner so existing
|
||||
/// SQL with literal `?` chars in strings doesn't get mis-bound.
|
||||
public static func inline(_ sql: String, params: [SQLValue]) -> String {
|
||||
var out = ""
|
||||
out.reserveCapacity(sql.count + params.count * 16)
|
||||
var paramIndex = 0
|
||||
var inSingleQuote = false
|
||||
var inDoubleQuote = false
|
||||
var i = sql.startIndex
|
||||
while i < sql.endIndex {
|
||||
let c = sql[i]
|
||||
if c == "'" && !inDoubleQuote {
|
||||
// Check for SQL's `''` escape (a doubled single-quote
|
||||
// INSIDE a string literal stays inside; we don't toggle
|
||||
// out). The next char being another `'` keeps us in.
|
||||
let next = sql.index(after: i)
|
||||
if inSingleQuote && next < sql.endIndex && sql[next] == "'" {
|
||||
out.append("'")
|
||||
out.append("'")
|
||||
i = sql.index(after: next)
|
||||
continue
|
||||
}
|
||||
inSingleQuote.toggle()
|
||||
out.append(c)
|
||||
i = sql.index(after: i)
|
||||
continue
|
||||
}
|
||||
if c == "\"" && !inSingleQuote {
|
||||
inDoubleQuote.toggle()
|
||||
out.append(c)
|
||||
i = sql.index(after: i)
|
||||
continue
|
||||
}
|
||||
if c == "?" && !inSingleQuote && !inDoubleQuote {
|
||||
// Bind placeholder.
|
||||
if paramIndex >= params.count {
|
||||
fatalError("SQLValueInliner: more `?` placeholders in SQL than provided params (\(params.count)). SQL: \(sql)")
|
||||
}
|
||||
out.append(encode(params[paramIndex]))
|
||||
paramIndex += 1
|
||||
i = sql.index(after: i)
|
||||
continue
|
||||
}
|
||||
out.append(c)
|
||||
i = sql.index(after: i)
|
||||
}
|
||||
if paramIndex != params.count {
|
||||
fatalError("SQLValueInliner: \(params.count) params provided but only \(paramIndex) `?` placeholders consumed. SQL: \(sql)")
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
/// Encode a single value as a SQLite literal. Public so callers
|
||||
/// that build SQL strings by hand (rare — prefer `inline`) can
|
||||
/// reuse the same escape rules.
|
||||
public static func encode(_ value: SQLValue) -> String {
|
||||
switch value {
|
||||
case .null:
|
||||
return "NULL"
|
||||
case .integer(let n):
|
||||
return String(n)
|
||||
case .real(let d):
|
||||
// %.17g round-trips a Double precisely as a decimal.
|
||||
return String(format: "%.17g", d)
|
||||
case .text(let s):
|
||||
return "'" + s.replacingOccurrences(of: "'", with: "''") + "'"
|
||||
case .blob(let d):
|
||||
// SQLite blob literal: X'<hex>' (case-insensitive prefix).
|
||||
let hex = d.map { String(format: "%02x", $0) }.joined()
|
||||
return "X'\(hex)'"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,358 @@
|
||||
import Foundation
|
||||
#if canImport(os)
|
||||
import os
|
||||
#endif
|
||||
|
||||
/// Async, transport-aware client for `hermes curator …`. Wraps the v0.12
|
||||
/// verbs (`status / run / pause / resume / pin / unpin / restore`) plus
|
||||
/// the v0.13 archive surface (`archive / prune / list-archived` and a
|
||||
/// synchronous-blocking `run`).
|
||||
///
|
||||
/// **Concurrency.** Pure-I/O `actor` — no UI state. View models hold a
|
||||
/// service reference and `await` methods. Each public method dispatches
|
||||
/// the underlying CLI invocation through `Task.detached(priority:
|
||||
/// .utility)` so two concurrent reads from the VM don't queue end-to-end
|
||||
/// on a single thread. Mirrors `KanbanService` shape exactly.
|
||||
///
|
||||
/// **Capability gating happens at the call site, not in the service.**
|
||||
/// `runNow(synchronous:timeout:)` takes a flag from the VM (the VM reads
|
||||
/// `HermesCapabilities.hasCuratorArchive` to decide). The service stays
|
||||
/// version-agnostic — only the timeout differs in practice.
|
||||
public actor CuratorService {
|
||||
#if canImport(os)
|
||||
private static let logger = Logger(subsystem: "com.scarf", category: "CuratorService")
|
||||
#endif
|
||||
|
||||
private let context: ServerContext
|
||||
|
||||
public init(context: ServerContext) {
|
||||
self.context = context
|
||||
}
|
||||
|
||||
// MARK: - Reads
|
||||
|
||||
/// Run `hermes curator status` and parse stdout via
|
||||
/// `HermesCuratorStatusParser`. Combines the text output with the
|
||||
/// on-disk `.curator_state` JSON for richer last-run metadata.
|
||||
/// Never throws — a transport failure resolves to `.empty` so the
|
||||
/// view always has something to render.
|
||||
public func status() async -> HermesCuratorStatus {
|
||||
let context = self.context
|
||||
return await Task.detached(priority: .utility) { () -> HermesCuratorStatus in
|
||||
let textResult = Self.runHermesSync(context: context, args: ["curator", "status"], timeout: 30)
|
||||
let stateData = context.readData(context.paths.curatorStateFile)
|
||||
return HermesCuratorStatusParser.parse(text: textResult.output, stateFileJSON: stateData)
|
||||
}.value
|
||||
}
|
||||
|
||||
/// `hermes curator list-archived [--json]`. Prefers JSON; falls back
|
||||
/// to a defensive text parser. Empty / "no archived skills" sentinel
|
||||
/// folds to `[]`.
|
||||
public func listArchived() async throws -> [HermesCuratorArchivedSkill] {
|
||||
// TODO(WS-4-Q2): confirm `--json` is supported on v0.13
|
||||
// `list-archived`. If not, drop the flag and rely on the text
|
||||
// parser path. Until then we pass `--json` and parse the output
|
||||
// tolerantly.
|
||||
let args = ["curator", "list-archived", "--json"]
|
||||
let (code, stdout, stderr) = await runHermes(args: args, timeout: 30)
|
||||
|
||||
// If --json isn't recognized, the CLI typically emits
|
||||
// "unrecognized arguments: --json" or similar to stderr and
|
||||
// exits non-zero. Retry without the flag and parse text.
|
||||
if code != 0 {
|
||||
let lower = (stderr + stdout).lowercased()
|
||||
if lower.contains("unrecognized") || lower.contains("unknown") || lower.contains("no such option") {
|
||||
let (c2, out2, err2) = await runHermes(args: ["curator", "list-archived"], timeout: 30)
|
||||
try ensureSuccess(code: c2, stdout: out2, stderr: err2, verb: "list-archived")
|
||||
return Self.parseListArchivedText(out2)
|
||||
}
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "list-archived")
|
||||
}
|
||||
|
||||
let trimmed = stdout.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
if trimmed.isEmpty || trimmed.lowercased().contains("no archived skills") {
|
||||
return []
|
||||
}
|
||||
// Try JSON first — may also be a text dump if Hermes ignored `--json`.
|
||||
if let data = trimmed.data(using: .utf8),
|
||||
let arr = try? JSONDecoder().decode([HermesCuratorArchivedSkill].self, from: data) {
|
||||
return arr
|
||||
}
|
||||
// Some builds wrap in `{"archived": [...]}` envelope.
|
||||
struct Wrapper: Decodable { let archived: [HermesCuratorArchivedSkill] }
|
||||
if let data = trimmed.data(using: .utf8),
|
||||
let wrapped = try? JSONDecoder().decode(Wrapper.self, from: data) {
|
||||
return wrapped.archived
|
||||
}
|
||||
// Text fallback — defensive parse.
|
||||
return Self.parseListArchivedText(stdout)
|
||||
}
|
||||
|
||||
// MARK: - Writes (legacy v0.12 verbs; service form)
|
||||
|
||||
public func runNow(synchronous: Bool, timeout: TimeInterval) async throws {
|
||||
// TODO(WS-4-Q4): default 600s for v0.13 sync runs. No Cancel
|
||||
// button in v2.8 (transport.cancel parity not guaranteed across
|
||||
// LocalTransport / SSHTransport).
|
||||
let resolvedTimeout = synchronous ? timeout : 30
|
||||
let (code, stdout, stderr) = await runHermes(args: ["curator", "run"], timeout: resolvedTimeout)
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "run")
|
||||
}
|
||||
|
||||
public func pause() async throws {
|
||||
let (code, stdout, stderr) = await runHermes(args: ["curator", "pause"], timeout: 15)
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "pause")
|
||||
}
|
||||
|
||||
public func resume() async throws {
|
||||
let (code, stdout, stderr) = await runHermes(args: ["curator", "resume"], timeout: 15)
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "resume")
|
||||
}
|
||||
|
||||
public func pin(_ name: String) async throws {
|
||||
let (code, stdout, stderr) = await runHermes(args: ["curator", "pin", name], timeout: 15)
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "pin")
|
||||
}
|
||||
|
||||
public func unpin(_ name: String) async throws {
|
||||
let (code, stdout, stderr) = await runHermes(args: ["curator", "unpin", name], timeout: 15)
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "unpin")
|
||||
}
|
||||
|
||||
public func restore(_ name: String) async throws {
|
||||
let (code, stdout, stderr) = await runHermes(args: ["curator", "restore", name], timeout: 30)
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "restore")
|
||||
}
|
||||
|
||||
// MARK: - Writes (new in v0.13)
|
||||
|
||||
/// `hermes curator archive <name>` — non-destructive; moves the
|
||||
/// skill from the active set to the archived set. No `--json` is
|
||||
/// expected; the verb's success channel is the exit code.
|
||||
public func archive(_ name: String) async throws {
|
||||
let (code, stdout, stderr) = await runHermes(args: ["curator", "archive", name], timeout: 30)
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "archive")
|
||||
}
|
||||
|
||||
/// `hermes curator prune [--dry-run]`. Destructive when `dryRun`
|
||||
/// is `false` — removes everything currently archived from disk.
|
||||
/// Returns a `CuratorPruneSummary` describing what was (or would be)
|
||||
/// removed. On `dryRun=false`, the wire shape may not include the
|
||||
/// `would_remove` list — the caller should not depend on it; the
|
||||
/// archived list is empty after a successful destructive prune.
|
||||
@discardableResult
|
||||
public func prune(dryRun: Bool) async throws -> CuratorPruneSummary {
|
||||
// TODO(WS-4-Q1): confirm v0.13 ships `--dry-run`. If not, fall
|
||||
// back to enumerating via `list-archived` and treat any prune
|
||||
// call as destructive. The retry-without-flag path below covers
|
||||
// the "unrecognized argument" case automatically.
|
||||
var args = ["curator", "prune"]
|
||||
if dryRun { args.append("--dry-run") }
|
||||
// `--json` requested for the dry-run path so we can parse the
|
||||
// would-remove list. Destructive mode runs without --json since
|
||||
// we only need the exit code.
|
||||
if dryRun { args.append("--json") }
|
||||
|
||||
let (code, stdout, stderr) = await runHermes(args: args, timeout: 60)
|
||||
|
||||
// Detect "unrecognized --dry-run" / "unknown --json" gracefully.
|
||||
if code != 0 {
|
||||
let lower = (stderr + stdout).lowercased()
|
||||
let unrecognized = lower.contains("unrecognized") || lower.contains("unknown") || lower.contains("no such option")
|
||||
if dryRun && unrecognized {
|
||||
// Q1 fallback: enumerate via list-archived. Caller still
|
||||
// uses this summary for confirm-sheet display.
|
||||
let archived = try await listArchived()
|
||||
let total = archived.compactMap { $0.sizeBytes }.reduce(0, +)
|
||||
return CuratorPruneSummary(wouldRemove: archived, totalBytes: total)
|
||||
}
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "prune")
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return Self.parsePruneDryRun(stdout)
|
||||
}
|
||||
return CuratorPruneSummary(wouldRemove: [], totalBytes: 0)
|
||||
}
|
||||
|
||||
// MARK: - Pure parsers (nonisolated; safe to call from VMs without awaits)
|
||||
|
||||
/// Parse a `list-archived --json` payload. Tolerates the bare-array
|
||||
/// shape, the `{"archived": [...]}` envelope, and "no archived
|
||||
/// skills" / empty-string sentinels. Returns `[]` for any of the
|
||||
/// empty cases. Throws `CuratorError.decoding` only when the input
|
||||
/// is non-empty and clearly not JSON.
|
||||
public nonisolated static func parseListArchived(stdout: String) throws -> [HermesCuratorArchivedSkill] {
|
||||
let trimmed = stdout.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
if trimmed.isEmpty || trimmed.lowercased().contains("no archived skills") {
|
||||
return []
|
||||
}
|
||||
guard let data = trimmed.data(using: .utf8) else {
|
||||
throw CuratorError.decoding(verb: "list-archived", message: "non-UTF8 stdout")
|
||||
}
|
||||
if let arr = try? JSONDecoder().decode([HermesCuratorArchivedSkill].self, from: data) {
|
||||
return arr
|
||||
}
|
||||
struct Wrapper: Decodable { let archived: [HermesCuratorArchivedSkill] }
|
||||
if let wrapped = try? JSONDecoder().decode(Wrapper.self, from: data) {
|
||||
return wrapped.archived
|
||||
}
|
||||
// Last resort: text fallback.
|
||||
let parsed = parseListArchivedText(stdout)
|
||||
if !parsed.isEmpty {
|
||||
return parsed
|
||||
}
|
||||
throw CuratorError.decoding(verb: "list-archived", message: "stdout was neither JSON nor a recognised text list")
|
||||
}
|
||||
|
||||
/// Defensive text parser for `list-archived` output when `--json`
|
||||
/// isn't supported. Format inferred from `curator status`: one row
|
||||
/// per non-blank line, leading whitespace, name in column 1, then
|
||||
/// optional `archived=YYYY-MM-DD`, `size=NNNN`, `reason=...` k/v
|
||||
/// pairs. Blank lines, header lines, and the empty-state sentinel
|
||||
/// are skipped.
|
||||
public nonisolated static func parseListArchivedText(_ text: String) -> [HermesCuratorArchivedSkill] {
|
||||
var rows: [HermesCuratorArchivedSkill] = []
|
||||
for raw in text.split(separator: "\n") {
|
||||
let line = raw.trimmingCharacters(in: .whitespaces)
|
||||
if line.isEmpty { continue }
|
||||
let lower = line.lowercased()
|
||||
// Skip header / sentinel lines.
|
||||
if lower.hasPrefix("name") && lower.contains("archived") { continue }
|
||||
if lower.contains("no archived skills") { continue }
|
||||
if line.unicodeScalars.allSatisfy({ $0.value == 0x2500 || $0.properties.isWhitespace }) {
|
||||
continue
|
||||
}
|
||||
// Skip lines that look like JSON / non-row chrome — `{`,
|
||||
// `}`, `[`, `]` at the start or quotes / colons mean we're
|
||||
// parsing a malformed JSON dump, not a row table.
|
||||
if let first = line.first, "{[}]\":,".contains(first) {
|
||||
continue
|
||||
}
|
||||
// Find the first whitespace-separated token as the name; if
|
||||
// the name carries an `=` it's a header chip we should skip.
|
||||
let parts = line.split(whereSeparator: { $0 == "\t" || $0 == " " }).map(String.init)
|
||||
guard let name = parts.first, !name.contains("=") else { continue }
|
||||
// Reject names that look like punctuation / JSON fragments.
|
||||
if name.contains("\"") || name.contains(":") || name.contains("{") || name.contains("}") || name.contains("[") || name.contains("]") {
|
||||
continue
|
||||
}
|
||||
// Pull k=v pairs from the remainder.
|
||||
var archivedAt: String?
|
||||
var sizeBytes: Int?
|
||||
var reason: String?
|
||||
var category: String?
|
||||
var path: String?
|
||||
for token in parts.dropFirst() {
|
||||
guard let eq = token.firstIndex(of: "=") else { continue }
|
||||
let key = String(token[..<eq])
|
||||
let value = String(token[token.index(after: eq)...])
|
||||
switch key {
|
||||
case "archived", "archived_at":
|
||||
archivedAt = value
|
||||
case "size", "size_bytes":
|
||||
sizeBytes = Int(value)
|
||||
case "reason":
|
||||
reason = value
|
||||
case "category":
|
||||
category = value
|
||||
case "path":
|
||||
path = value
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
rows.append(
|
||||
HermesCuratorArchivedSkill(
|
||||
name: name,
|
||||
category: category,
|
||||
archivedAt: archivedAt,
|
||||
reason: reason,
|
||||
sizeBytes: sizeBytes,
|
||||
path: path
|
||||
)
|
||||
)
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
/// Parse a `prune --dry-run --json` payload. Tolerates an empty
|
||||
/// payload (returns a zero summary) and the `{would_remove: [],
|
||||
/// total_bytes: N}` shape.
|
||||
public nonisolated static func parsePruneDryRun(_ stdout: String) -> CuratorPruneSummary {
|
||||
let trimmed = stdout.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
guard !trimmed.isEmpty else {
|
||||
return CuratorPruneSummary(wouldRemove: [], totalBytes: 0)
|
||||
}
|
||||
if let data = trimmed.data(using: .utf8),
|
||||
let summary = try? JSONDecoder().decode(CuratorPruneSummary.self, from: data) {
|
||||
return summary
|
||||
}
|
||||
// Tolerate a bare-array fallback (some Hermes builds may print
|
||||
// just the would-remove list when --json is missing the wrapper).
|
||||
if let data = trimmed.data(using: .utf8),
|
||||
let arr = try? JSONDecoder().decode([HermesCuratorArchivedSkill].self, from: data) {
|
||||
let total = arr.compactMap { $0.sizeBytes }.reduce(0, +)
|
||||
return CuratorPruneSummary(wouldRemove: arr, totalBytes: total)
|
||||
}
|
||||
// Last-resort text parse for "would remove N skills (X bytes)".
|
||||
return CuratorPruneSummary(wouldRemove: [], totalBytes: 0)
|
||||
}
|
||||
|
||||
// MARK: - CLI invocation
|
||||
|
||||
private nonisolated func runHermes(
|
||||
args: [String],
|
||||
timeout: TimeInterval
|
||||
) async -> (exitCode: Int32, stdout: String, stderr: String) {
|
||||
let context = self.context
|
||||
return await Task.detached(priority: .utility) { () -> (Int32, String, String) in
|
||||
let result = Self.runHermesSync(context: context, args: args, timeout: timeout)
|
||||
return (result.exitCode, result.output, result.stderr)
|
||||
}.value
|
||||
}
|
||||
|
||||
/// Synchronous, transport-level invocation. `output` is stdout; the
|
||||
/// caller usually only reads `output` for parser input but sometimes
|
||||
/// needs `stderr` (e.g. to detect "unrecognized argument" patterns).
|
||||
private nonisolated static func runHermesSync(
|
||||
context: ServerContext,
|
||||
args: [String],
|
||||
timeout: TimeInterval
|
||||
) -> (exitCode: Int32, output: String, stderr: String) {
|
||||
let transport = context.makeTransport()
|
||||
do {
|
||||
let result = try transport.runProcess(
|
||||
executable: context.paths.hermesBinary,
|
||||
args: args,
|
||||
stdin: nil,
|
||||
timeout: timeout
|
||||
)
|
||||
return (result.exitCode, result.stdoutString, result.stderrString)
|
||||
} catch let error as TransportError {
|
||||
let message = error.diagnosticStderr.isEmpty
|
||||
? (error.errorDescription ?? "transport error")
|
||||
: error.diagnosticStderr
|
||||
return (-1, "", message)
|
||||
} catch {
|
||||
return (-1, "", error.localizedDescription)
|
||||
}
|
||||
}
|
||||
|
||||
private nonisolated func ensureSuccess(
|
||||
code: Int32,
|
||||
stdout: String,
|
||||
stderr: String,
|
||||
verb: String
|
||||
) throws {
|
||||
guard code != 0 else { return }
|
||||
if code == -1 && stderr.lowercased().contains("hermes binary not found") {
|
||||
throw CuratorError.cliMissing
|
||||
}
|
||||
let combined = stderr.isEmpty ? stdout : stderr
|
||||
#if canImport(os)
|
||||
Self.logger.warning("curator \(verb) exit=\(code, privacy: .public) stderr=\(combined, privacy: .public)")
|
||||
#endif
|
||||
throw CuratorError.nonZeroExit(verb: verb, code: code, stderr: combined)
|
||||
}
|
||||
}
|
||||
@@ -8,9 +8,13 @@ import os
|
||||
///
|
||||
/// Scarf tracks Hermes feature releases by date-version + semver. v0.12 added
|
||||
/// a dozen surfaces (Curator, Kanban, multimodal ACP, ...) and removed a few
|
||||
/// (`flush_memories` aux task). UI that branches on these surfaces calls
|
||||
/// the boolean accessors here so older Hermes installs degrade silently
|
||||
/// instead of throwing on an unknown CLI subcommand.
|
||||
/// (`flush_memories` aux task); v0.13 added Persistent Goals, ACP `/queue`,
|
||||
/// Kanban diagnostics + recovery UX, Curator archive/prune, Google Chat (20th
|
||||
/// platform), cross-platform allowlists, MCP SSE transport, Cron `no_agent`
|
||||
/// mode, Web Tools per-capability backends, Profiles `--no-skills`, and a
|
||||
/// handful of UX additions. UI that branches on these surfaces calls the
|
||||
/// boolean accessors here so older Hermes installs degrade silently instead
|
||||
/// of throwing on an unknown CLI subcommand.
|
||||
///
|
||||
/// Pure value type — no side effects. The async detection lives in
|
||||
/// `HermesCapabilitiesStore`.
|
||||
@@ -45,8 +49,11 @@ public struct HermesCapabilities: Sendable, Equatable {
|
||||
// MARK: - Capability flags
|
||||
//
|
||||
// Add a new flag here when Scarf gains UI that conditionally branches on
|
||||
// a Hermes capability. Keep the comparison conservative: `>= 0.12.0`
|
||||
// covers users still on the 0.12 line who haven't upgraded to 0.13 yet.
|
||||
// a Hermes capability. Keep the comparison conservative: a flag introduced
|
||||
// in v0.13.0 should gate on `>= 0.13.0`, not `>= 0.13.5`, so users on
|
||||
// an early 0.13 patch still see the surface.
|
||||
|
||||
// MARK: v0.12 (v2026.4.30) flags
|
||||
|
||||
/// `hermes curator` autonomous skill maintenance (v0.12+).
|
||||
public var hasCurator: Bool { atLeastSemver(0, 12, 0) }
|
||||
@@ -96,9 +103,123 @@ public struct HermesCapabilities: Sendable, Equatable {
|
||||
public var hasPromptCacheTTL: Bool { atLeastSemver(0, 12, 0) }
|
||||
|
||||
/// `redaction.enabled` is now off by default in v0.12 — Scarf surfaces
|
||||
/// the toggle so users can flip it back on.
|
||||
/// the toggle so users can flip it back on. v0.13 flips the server-side
|
||||
/// default back to ON; the toggle remains so users on v0.13 can opt out.
|
||||
public var hasRedactionToggle: Bool { atLeastSemver(0, 12, 0) }
|
||||
|
||||
// MARK: v0.13 (v2026.5.7) flags
|
||||
|
||||
/// `/goal` slash command + Persistent Goals + Checkpoints v2 single-store
|
||||
/// (v0.13+). Used by RichChatViewModel to add `/goal` to the
|
||||
/// non-interruptive command list and to render the "Goal locked" pill in
|
||||
/// the chat header.
|
||||
public var hasGoals: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// `/queue` slash command in the ACP adapter (v0.13+). Queues a prompt
|
||||
/// to run after the current turn completes without interrupting.
|
||||
public var hasACPQueue: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// `/steer` runs as a regular prompt on idle ACP sessions (v0.13+). Pre-
|
||||
/// v0.13 hosts silently no-op `/steer` when no turn is in flight; with
|
||||
/// this flag on, Scarf can surface `/steer` even when the agent isn't
|
||||
/// mid-turn without confusing UX.
|
||||
public var hasACPSteerOnIdle: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// Kanban v0.13 reliability surface: hallucination gate on worker-created
|
||||
/// cards, generic diagnostics engine, per-task `max_retries`, multiline
|
||||
/// title/body create, `auto_blocked_reason` on blocked tasks, darwin
|
||||
/// zombie detection. All read through the `kanban show` JSON surface.
|
||||
public var hasKanbanDiagnostics: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// `hermes curator archive`, `prune`, and `list-archived` subcommands
|
||||
/// (v0.13+). The synchronous manual `hermes curator run` lives behind
|
||||
/// this flag too — pre-v0.13 `run` returns immediately and the work
|
||||
/// happens in the background.
|
||||
public var hasCuratorArchive: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// Google Chat — 20th messaging-gateway platform (v0.13+).
|
||||
public var hasGoogleChatPlatform: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// Cross-platform allowlist keys: `allowed_channels` (Slack / Mattermost
|
||||
/// / Google Chat), `allowed_chats` (Telegram / WhatsApp), `allowed_rooms`
|
||||
/// (Matrix / DingTalk). Settable per platform in `config.yaml` (v0.13+).
|
||||
public var hasGatewayAllowlists: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// `busy_ack_enabled` config to suppress per-message "agent is working…"
|
||||
/// acks across platforms (v0.13+).
|
||||
public var hasGatewayBusyAckToggle: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// Per-platform `gateway_restart_notification` flag controls whether the
|
||||
/// platform posts a "Gateway restarted" notice on boot (v0.13+).
|
||||
public var hasGatewayRestartNotification: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// `hermes gateway list` cross-profile status verb (v0.13+). Lets Scarf
|
||||
/// show which profile is currently running which platform.
|
||||
public var hasGatewayList: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// MCP servers can use SSE transport (v0.13+). Adds an `sse_read_timeout`
|
||||
/// knob alongside the existing stdio/pipe transports.
|
||||
public var hasMCPSSETransport: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// Cron `--no-agent` mode for script-only watchdog jobs (v0.13+). Skips
|
||||
/// the AI call entirely — useful for keep-alive / periodic-check jobs.
|
||||
public var hasCronNoAgent: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// Web Tools split into per-capability backend selection: `web_search`
|
||||
/// and `web_extract` can now use distinct backends (v0.13+). SearXNG
|
||||
/// joined as a search-only backend.
|
||||
public var hasWebToolsBackendSplit: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// `hermes profile create --no-skills` flag for empty profiles (v0.13+).
|
||||
public var hasProfileNoSkills: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// Context compression count surfaced in the status feed (v0.13+). Scarf
|
||||
/// renders it next to the token count in the chat status bar.
|
||||
public var hasContextCompressionCount: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// `/new` slash command accepts an optional session-name argument (v0.13+).
|
||||
public var hasNewWithSessionName: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// `hermes update --yes` / `-y` skips interactive prompts (v0.13+). Used
|
||||
/// by Scarf's "Update Hermes" affordance to run unattended.
|
||||
public var hasUpdateNonInteractive: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// OpenRouter response caching toggle in `config.yaml` (v0.13+).
|
||||
public var hasOpenRouterResponseCache: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// `image_gen.model` honored from `config.yaml` (v0.13+). Pre-v0.13 the
|
||||
/// value was advertised but ignored at runtime.
|
||||
public var hasImageGenModel: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// `display.language` config key for static-message translation: zh / ja /
|
||||
/// de / es / fr / uk / tr (v0.13+).
|
||||
public var hasDisplayLanguage: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// xAI Custom Voices — voice cloning support (v0.13+). Exposed in Scarf
|
||||
/// as a "Cloning supported" badge next to the xAI TTS provider entry.
|
||||
public var hasXAIVoiceCloning: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// `video_analyze` tool — native video understanding on Gemini and
|
||||
/// compatible models (v0.13+). Hermes handles this transparently inside
|
||||
/// the agent loop; Scarf has no UI surface yet, but the flag lets future
|
||||
/// dashboards / activity views light up video-tool annotations.
|
||||
public var hasVideoAnalyze: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
/// `transform_llm_output` plugin hook for shaping LLM output before the
|
||||
/// conversation receives it (v0.13+). Plugin-author concern; Scarf's
|
||||
/// PluginsView surfaces it as a documented hook in plugin metadata.
|
||||
public var hasTransformLLMOutputHook: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
// MARK: Convenience predicates
|
||||
|
||||
/// Whether the connected host is on the v0.13 line or newer. Convenience
|
||||
/// for UI copy that needs to switch on the v0.12 → v0.13 boundary without
|
||||
/// proxying through a feature-specific flag (e.g. "v0.13 features active"
|
||||
/// badges, redaction default-state hints). Equivalent to any individual
|
||||
/// v0.13 flag; prefer this when the call site isn't actually about a
|
||||
/// specific feature.
|
||||
public var isV013OrLater: Bool { atLeastSemver(0, 13, 0) }
|
||||
|
||||
private func atLeastSemver(_ major: Int, _ minor: Int, _ patch: Int) -> Bool {
|
||||
guard let s = semver else { return false }
|
||||
return s >= SemVer(major: major, minor: minor, patch: patch)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -51,7 +51,19 @@ public enum HermesProfileResolver {
|
||||
/// Returns the default `~/.hermes` when no profile is active OR when
|
||||
/// the configured profile is invalid (logged) — so the worst-case
|
||||
/// failure mode is "Scarf shows what it always showed before."
|
||||
///
|
||||
/// **Test override.** Setting `SCARF_HERMES_HOME` in the environment
|
||||
/// pins this resolver to the supplied absolute path and bypasses both
|
||||
/// the cache and the `active_profile` lookup. Used by the E2E test
|
||||
/// harness (`TemplateE2ETests`, `TemplateInstallUITests`) to drive
|
||||
/// Scarf against an isolated tmpdir Hermes home so the user's real
|
||||
/// `~/.hermes` is never touched. Read on every call (cheap; a single
|
||||
/// `ProcessInfo` lookup) so tests can flip it across test methods
|
||||
/// without stale-cache surprises.
|
||||
public static func resolveLocalHome() -> String {
|
||||
if let override = scarfHermesHomeOverride() {
|
||||
return override
|
||||
}
|
||||
return refreshIfNeeded().home
|
||||
}
|
||||
|
||||
@@ -60,9 +72,55 @@ public enum HermesProfileResolver {
|
||||
/// reading from (issue #50 follow-up: prevents the next variant
|
||||
/// of "where's my data — wrong profile" by making it visible).
|
||||
public static func activeProfileName() -> String {
|
||||
if scarfHermesHomeOverride() != nil {
|
||||
return "test-override"
|
||||
}
|
||||
return refreshIfNeeded().name
|
||||
}
|
||||
|
||||
/// Sentinel filename that the override path MUST contain for the
|
||||
/// override to be honored. Without it, production code refuses to
|
||||
/// pivot off the user's real `~/.hermes` even if the env var is
|
||||
/// set. This is the "even if a test leaks the env var, even if
|
||||
/// some non-test process inherits it, the user's data is safe"
|
||||
/// belt-and-braces guard. Tests create this marker before
|
||||
/// `setenv("SCARF_HERMES_HOME", ...)`.
|
||||
public static let testHomeMarkerFilename = ".scarf-test-home-marker"
|
||||
|
||||
/// Read `SCARF_HERMES_HOME` from the environment. Returns `nil` when
|
||||
/// unset or empty so production callers fall through to the profile
|
||||
/// resolver. The override must:
|
||||
/// 1. Be an absolute path — relative paths are rejected (they'd
|
||||
/// land relative to the cwd of whatever process happened to
|
||||
/// invoke the resolver, which is not what tests want).
|
||||
/// 2. Contain the sentinel marker file
|
||||
/// `<path>/<testHomeMarkerFilename>`. Without the marker we
|
||||
/// treat the env var as untrusted and ignore it. This protects
|
||||
/// the user's real `~/.hermes/` from any code path that
|
||||
/// accidentally exports `SCARF_HERMES_HOME` to the wrong value
|
||||
/// (e.g. a test crashed mid-teardown, an env var inherited
|
||||
/// from a parent shell, a misconfigured launchctl plist).
|
||||
/// Both checks are cheap — `FileManager.fileExists` against a
|
||||
/// known path is microseconds. The override is hot but not
|
||||
/// hot-hot, so an extra stat per call is negligible.
|
||||
private static func scarfHermesHomeOverride() -> String? {
|
||||
guard let raw = ProcessInfo.processInfo.environment["SCARF_HERMES_HOME"] else {
|
||||
return nil
|
||||
}
|
||||
let trimmed = raw.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
guard !trimmed.isEmpty else { return nil }
|
||||
guard trimmed.hasPrefix("/") else {
|
||||
logger.warning("SCARF_HERMES_HOME=\(trimmed, privacy: .public) is not absolute; ignoring.")
|
||||
return nil
|
||||
}
|
||||
let markerPath = trimmed + "/" + testHomeMarkerFilename
|
||||
guard FileManager.default.fileExists(atPath: markerPath) else {
|
||||
logger.warning("SCARF_HERMES_HOME=\(trimmed, privacy: .public) lacks sentinel marker (\(testHomeMarkerFilename, privacy: .public)); ignoring to protect real ~/.hermes.")
|
||||
return nil
|
||||
}
|
||||
return trimmed
|
||||
}
|
||||
|
||||
/// Force a re-read on the next call, regardless of TTL. Test helper.
|
||||
public static func invalidateCache() {
|
||||
lock.withLock { $0.resolvedAt = .distantPast }
|
||||
@@ -95,15 +153,20 @@ public enum HermesProfileResolver {
|
||||
let defaultHome = defaultRootHome()
|
||||
let activeFile = defaultHome + "/active_profile"
|
||||
|
||||
// Absent file → default profile. This is the common case for users
|
||||
// who haven't run `hermes profile use ...` and shouldn't generate
|
||||
// any log noise.
|
||||
// Absent file → default profile. Common case for users who
|
||||
// haven't run `hermes profile use ...`. We still log at
|
||||
// `.info` (key=value, not warning) so support requests can
|
||||
// pull `log show … | grep ProfileResolver` and confirm the
|
||||
// resolver IS running and IS resolving to the default —
|
||||
// distinguishing "feature didn't fire" from "feature fired
|
||||
// and chose default" (issue #70).
|
||||
guard FileManager.default.fileExists(atPath: activeFile) else {
|
||||
logger.info("Resolved active Hermes profile: name=default, home=\(defaultHome, privacy: .public), source=default-no-file")
|
||||
return ("default", defaultHome)
|
||||
}
|
||||
|
||||
guard let raw = try? String(contentsOfFile: activeFile, encoding: .utf8) else {
|
||||
logger.warning("Found active_profile but could not read it; falling back to default profile.")
|
||||
logger.warning("Found active_profile but could not read it; falling back to default. home=\(defaultHome, privacy: .public)")
|
||||
return ("default", defaultHome)
|
||||
}
|
||||
|
||||
@@ -111,6 +174,7 @@ public enum HermesProfileResolver {
|
||||
|
||||
// Empty file or explicit "default" → default profile.
|
||||
if trimmed.isEmpty || trimmed == "default" {
|
||||
logger.info("Resolved active Hermes profile: name=default, home=\(defaultHome, privacy: .public), source=file-default")
|
||||
return ("default", defaultHome)
|
||||
}
|
||||
|
||||
@@ -129,7 +193,7 @@ public enum HermesProfileResolver {
|
||||
return ("default", defaultHome)
|
||||
}
|
||||
|
||||
logger.info("Resolved active Hermes profile to \(trimmed, privacy: .public) at \(profileHome, privacy: .public).")
|
||||
logger.info("Resolved active Hermes profile: name=\(trimmed, privacy: .public), home=\(profileHome, privacy: .public), source=file")
|
||||
return (trimmed, profileHome)
|
||||
}
|
||||
|
||||
|
||||
@@ -65,13 +65,15 @@ public struct ImageEncoder: Sendable {
|
||||
sourceFilename: String? = nil
|
||||
) throws -> ChatImageAttachment {
|
||||
guard !rawBytes.isEmpty else { throw EncoderError.empty }
|
||||
|
||||
ScarfMon.event(.render, "imageEncoder.input.bytes", count: 1, bytes: rawBytes.count)
|
||||
return try ScarfMon.measure(.render, "imageEncoder.downsample") {
|
||||
#if canImport(AppKit)
|
||||
guard let nsImage = NSImage(data: rawBytes) else { throw EncoderError.decodeFailed }
|
||||
let targetSize = Self.fittedSize(for: nsImage.size, maxLongEdge: Self.maxLongEdge)
|
||||
let mainData = try Self.jpegBytes(from: nsImage, size: targetSize)
|
||||
let thumbSize = Self.fittedSize(for: nsImage.size, maxLongEdge: Self.thumbnailLongEdge)
|
||||
let thumbData = try? Self.jpegBytes(from: nsImage, size: thumbSize)
|
||||
ScarfMon.event(.render, "imageEncoder.bytes", count: 1, bytes: mainData.count)
|
||||
return ChatImageAttachment(
|
||||
mimeType: "image/jpeg",
|
||||
base64Data: mainData.base64EncodedString(),
|
||||
@@ -86,6 +88,7 @@ public struct ImageEncoder: Sendable {
|
||||
let mainData = try Self.jpegBytes(from: uiImage, size: targetSize)
|
||||
let thumbSize = Self.fittedSize(for: uiImage.size, maxLongEdge: Self.thumbnailLongEdge)
|
||||
let thumbData = try? Self.jpegBytes(from: uiImage, size: thumbSize)
|
||||
ScarfMon.event(.render, "imageEncoder.bytes", count: 1, bytes: mainData.count)
|
||||
return ChatImageAttachment(
|
||||
mimeType: "image/jpeg",
|
||||
base64Data: mainData.base64EncodedString(),
|
||||
@@ -99,6 +102,7 @@ public struct ImageEncoder: Sendable {
|
||||
// input already looks like a JPEG, else refuse. Keeps the
|
||||
// package compiling without a hard AppKit/UIKit dep.
|
||||
if rawBytes.starts(with: [0xFF, 0xD8]) {
|
||||
ScarfMon.event(.render, "imageEncoder.bytes", count: 1, bytes: rawBytes.count)
|
||||
return ChatImageAttachment(
|
||||
mimeType: "image/jpeg",
|
||||
base64Data: rawBytes.base64EncodedString(),
|
||||
@@ -109,6 +113,7 @@ public struct ImageEncoder: Sendable {
|
||||
}
|
||||
throw EncoderError.unsupportedFormat
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
nonisolated private static func fittedSize(for source: CGSize, maxLongEdge: CGFloat) -> CGSize {
|
||||
|
||||
@@ -0,0 +1,503 @@
|
||||
import Foundation
|
||||
#if canImport(os)
|
||||
import os
|
||||
#endif
|
||||
|
||||
/// Async, transport-aware client for `hermes kanban …`. Wraps every CLI
|
||||
/// verb the v0.12 board exposes in a typed Swift surface.
|
||||
///
|
||||
/// **Concurrency.** This is a pure-I/O `actor` — no UI state. View models
|
||||
/// (`@MainActor` `@Observable`) hold a service reference and `await`
|
||||
/// methods. Each public method serializes through the actor, but the
|
||||
/// underlying CLI invocation runs on a `Task.detached(priority: .utility)`
|
||||
/// so two concurrent reads from different VMs don't queue end-to-end on
|
||||
/// a single thread.
|
||||
///
|
||||
/// **Hermes constraints surfaced as Swift constraints:**
|
||||
/// - There is no `update` verb, so there's no `update(taskId:title:body:)`.
|
||||
/// Mutations after create are state transitions (assign / claim /
|
||||
/// complete / block / unblock / archive / comment) or new comments.
|
||||
/// - The board is global with optional `tenant` namespacing — pass a
|
||||
/// tenant via `KanbanListFilter.tenant` for project-scoped views.
|
||||
/// - The CLI prints `"no matching tasks"` instead of `[]` when nothing
|
||||
/// matches a filter. We fold that into `[]` rather than throwing.
|
||||
public actor KanbanService {
|
||||
#if canImport(os)
|
||||
private static let logger = Logger(subsystem: "com.scarf", category: "KanbanService")
|
||||
#endif
|
||||
|
||||
private let context: ServerContext
|
||||
|
||||
public init(context: ServerContext) {
|
||||
self.context = context
|
||||
}
|
||||
|
||||
// MARK: - Reads
|
||||
|
||||
public func list(_ filter: KanbanListFilter = .all) async throws -> [HermesKanbanTask] {
|
||||
var args = ["kanban", "list"]
|
||||
args.append(contentsOf: filter.argv())
|
||||
let (code, stdout, stderr) = await runHermes(args: args, timeout: 20)
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "list")
|
||||
|
||||
// Empty filter on an empty board prints "no matching tasks" instead
|
||||
// of `[]`. Treat as empty rather than letting the JSON decode fail.
|
||||
if stdout.contains("no matching tasks") {
|
||||
return []
|
||||
}
|
||||
guard let data = stdout.data(using: .utf8) else {
|
||||
throw KanbanError.decoding(message: "non-UTF8 stdout")
|
||||
}
|
||||
do {
|
||||
return try JSONDecoder().decode([HermesKanbanTask].self, from: data)
|
||||
} catch {
|
||||
throw KanbanError.decoding(message: error.localizedDescription)
|
||||
}
|
||||
}
|
||||
|
||||
public func show(taskId: String) async throws -> HermesKanbanTaskDetail {
|
||||
let args = ["kanban", "show", taskId, "--json"]
|
||||
let (code, stdout, stderr) = await runHermes(args: args, timeout: 15)
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "show")
|
||||
guard let data = stdout.data(using: .utf8) else {
|
||||
throw KanbanError.decoding(message: "non-UTF8 stdout")
|
||||
}
|
||||
do {
|
||||
return try JSONDecoder().decode(HermesKanbanTaskDetail.self, from: data)
|
||||
} catch {
|
||||
throw KanbanError.decoding(message: error.localizedDescription)
|
||||
}
|
||||
}
|
||||
|
||||
public func runs(taskId: String) async throws -> [HermesKanbanRun] {
|
||||
let args = ["kanban", "runs", taskId, "--json"]
|
||||
let (code, stdout, stderr) = await runHermes(args: args, timeout: 15)
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "runs")
|
||||
guard let data = stdout.data(using: .utf8) else {
|
||||
throw KanbanError.decoding(message: "non-UTF8 stdout")
|
||||
}
|
||||
do {
|
||||
return try JSONDecoder().decode([HermesKanbanRun].self, from: data)
|
||||
} catch {
|
||||
// Some Hermes builds emit a `{"runs": [...]}` envelope.
|
||||
struct Wrapper: Decodable { let runs: [HermesKanbanRun] }
|
||||
if let wrapped = try? JSONDecoder().decode(Wrapper.self, from: data) {
|
||||
return wrapped.runs
|
||||
}
|
||||
throw KanbanError.decoding(message: error.localizedDescription)
|
||||
}
|
||||
}
|
||||
|
||||
public func stats() async throws -> HermesKanbanStats {
|
||||
let args = ["kanban", "stats", "--json"]
|
||||
let (code, stdout, stderr) = await runHermes(args: args, timeout: 15)
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "stats")
|
||||
guard let data = stdout.data(using: .utf8) else {
|
||||
throw KanbanError.decoding(message: "non-UTF8 stdout")
|
||||
}
|
||||
do {
|
||||
return try JSONDecoder().decode(HermesKanbanStats.self, from: data)
|
||||
} catch {
|
||||
throw KanbanError.decoding(message: error.localizedDescription)
|
||||
}
|
||||
}
|
||||
|
||||
/// Print the captured worker log for a task — `hermes kanban log
|
||||
/// <id>`. Returns whatever `$HERMES_HOME/kanban/logs/<id>` contains.
|
||||
/// Empty string when the worker hasn't written anything yet (or
|
||||
/// the task has never been claimed). Pass `tailBytes` to cap the
|
||||
/// returned size (useful when polling at high cadence).
|
||||
public func log(taskId: String, tailBytes: Int? = nil) async throws -> String {
|
||||
var args = ["kanban", "log"]
|
||||
if let tailBytes {
|
||||
args.append(contentsOf: ["--tail", String(tailBytes)])
|
||||
}
|
||||
args.append(taskId)
|
||||
let (code, stdout, stderr) = await runHermes(args: args, timeout: 15)
|
||||
// `kanban log` exits with code 0 even when no log file exists —
|
||||
// it just prints "No log file." or similar to stdout. Tolerate
|
||||
// non-zero codes too: some Hermes versions emit a warning to
|
||||
// stderr and exit 1 when the log dir is missing.
|
||||
if code != 0 {
|
||||
let combined = stderr.isEmpty ? stdout : stderr
|
||||
// Treat "no log" sentinels as empty rather than as errors.
|
||||
let lower = combined.lowercased()
|
||||
if lower.contains("no log") || lower.contains("not found") {
|
||||
return ""
|
||||
}
|
||||
throw KanbanError.nonZeroExit(code: code, stderr: combined)
|
||||
}
|
||||
return stdout
|
||||
}
|
||||
|
||||
public func assignees() async throws -> [HermesKanbanAssignee] {
|
||||
// The `assignees` verb doesn't take `--json` consistently across
|
||||
// 0.12.x — pass it anyway and fall back to a tab-delimited parse
|
||||
// if Hermes printed a human table.
|
||||
let args = ["kanban", "assignees"]
|
||||
let (code, stdout, stderr) = await runHermes(args: args, timeout: 15)
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "assignees")
|
||||
|
||||
if let data = stdout.data(using: .utf8),
|
||||
let arr = try? JSONDecoder().decode([HermesKanbanAssignee].self, from: data) {
|
||||
return arr
|
||||
}
|
||||
|
||||
// Fallback: each non-blank line of the form
|
||||
// "<profile>\t<active>\t<total>"
|
||||
// OR "<profile> <active> <total>" (whitespace separated).
|
||||
return parseAssigneeTable(stdout)
|
||||
}
|
||||
|
||||
private nonisolated func parseAssigneeTable(_ text: String) -> [HermesKanbanAssignee] {
|
||||
var result: [HermesKanbanAssignee] = []
|
||||
// Profile names follow the same convention as `hermes -p <name>`
|
||||
// — letters, digits, hyphen, underscore. Anything else is
|
||||
// chrome (header rows, Rich box-drawing, fallback messages
|
||||
// like "(no assignees — create a profile with `hermes -p
|
||||
// <name> setup`)") and gets skipped.
|
||||
for raw in text.split(separator: "\n") {
|
||||
let line = raw.trimmingCharacters(in: .whitespaces)
|
||||
if line.isEmpty { continue }
|
||||
// Skip the column header row.
|
||||
if line.lowercased().hasPrefix("profile") { continue }
|
||||
// Skip the empty-state sentinel without trying to tokenize
|
||||
// it (used to leak "(no" into the picker).
|
||||
if line.lowercased().contains("no assignees") { continue }
|
||||
// Skip Rich box-drawing separators (only ─ + whitespace).
|
||||
if line.unicodeScalars.allSatisfy({ $0.value == 0x2500 || $0.properties.isWhitespace }) {
|
||||
continue
|
||||
}
|
||||
// Strip the active marker `◆` (U+25C6) some `hermes`
|
||||
// commands prefix to the active profile.
|
||||
var working = line
|
||||
if working.hasPrefix("◆") {
|
||||
working = String(working.dropFirst()).trimmingCharacters(in: .whitespaces)
|
||||
}
|
||||
let parts = working
|
||||
.split(whereSeparator: { $0 == "\t" || $0 == " " })
|
||||
.map { String($0) }
|
||||
.filter { !$0.isEmpty }
|
||||
guard let profile = parts.first else { continue }
|
||||
// Validate: must look like a real profile slug, not a word
|
||||
// out of an English sentence.
|
||||
guard profile.range(of: "^[a-zA-Z0-9_-]+$", options: .regularExpression) != nil else {
|
||||
continue
|
||||
}
|
||||
let active = (parts.count > 1) ? Int(parts[1]) ?? 0 : 0
|
||||
let total = (parts.count > 2) ? Int(parts[2]) ?? 0 : active
|
||||
result.append(HermesKanbanAssignee(profile: profile, activeCount: active, totalCount: total))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// MARK: - Writes
|
||||
|
||||
public func create(_ request: KanbanCreateRequest) async throws -> HermesKanbanTask {
|
||||
var args = ["kanban", "create"]
|
||||
args.append(contentsOf: request.argv())
|
||||
let (code, stdout, stderr) = await runHermes(args: args, timeout: 30)
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "create")
|
||||
guard let data = stdout.data(using: .utf8) else {
|
||||
throw KanbanError.decoding(message: "non-UTF8 stdout")
|
||||
}
|
||||
// Hermes returns the full task object when --json is set.
|
||||
do {
|
||||
return try JSONDecoder().decode(HermesKanbanTask.self, from: data)
|
||||
} catch {
|
||||
// Some builds emit just the new id on stdout. Fall back to a
|
||||
// follow-up `show` so the caller always gets a typed task.
|
||||
let trimmed = stdout.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
if !trimmed.isEmpty, !trimmed.contains("\n"), !trimmed.contains("{") {
|
||||
let detail = try await show(taskId: trimmed)
|
||||
return detail.task
|
||||
}
|
||||
throw KanbanError.decoding(message: error.localizedDescription)
|
||||
}
|
||||
}
|
||||
|
||||
public func assign(taskId: String, profile: String?) async throws {
|
||||
let target = (profile?.isEmpty ?? true) ? "none" : profile!
|
||||
let args = ["kanban", "assign", taskId, target]
|
||||
let (code, _, stderr) = await runHermes(args: args, timeout: 15)
|
||||
try ensureSuccess(code: code, stdout: "", stderr: stderr, verb: "assign")
|
||||
}
|
||||
|
||||
@discardableResult
|
||||
public func claim(taskId: String, ttlSeconds: Int = 900) async throws -> String {
|
||||
let args = ["kanban", "claim", taskId, "--ttl", String(ttlSeconds)]
|
||||
let (code, stdout, stderr) = await runHermes(args: args, timeout: 20)
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "claim")
|
||||
// claim prints the resolved workspace path on stdout.
|
||||
return stdout.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
}
|
||||
|
||||
public func comment(taskId: String, text: String, author: String? = nil) async throws {
|
||||
var args = ["kanban", "comment"]
|
||||
if let author, !author.isEmpty {
|
||||
args.append(contentsOf: ["--author", author])
|
||||
}
|
||||
args.append(taskId)
|
||||
args.append(text)
|
||||
let (code, _, stderr) = await runHermes(args: args, timeout: 15)
|
||||
try ensureSuccess(code: code, stdout: "", stderr: stderr, verb: "comment")
|
||||
}
|
||||
|
||||
public func complete(
|
||||
taskIds: [String],
|
||||
result: String? = nil,
|
||||
summary: String? = nil,
|
||||
metadataJSON: String? = nil
|
||||
) async throws {
|
||||
guard !taskIds.isEmpty else { return }
|
||||
var args = ["kanban", "complete"]
|
||||
if let result, !result.isEmpty {
|
||||
args.append(contentsOf: ["--result", result])
|
||||
}
|
||||
if let summary, !summary.isEmpty {
|
||||
args.append(contentsOf: ["--summary", summary])
|
||||
}
|
||||
if let metadataJSON, !metadataJSON.isEmpty {
|
||||
args.append(contentsOf: ["--metadata", metadataJSON])
|
||||
}
|
||||
args.append(contentsOf: taskIds)
|
||||
let (code, _, stderr) = await runHermes(args: args, timeout: 30)
|
||||
try ensureSuccess(code: code, stdout: "", stderr: stderr, verb: "complete")
|
||||
}
|
||||
|
||||
public func block(taskId: String, reason: String? = nil) async throws {
|
||||
var args = ["kanban", "block", taskId]
|
||||
if let reason, !reason.trimmingCharacters(in: .whitespaces).isEmpty {
|
||||
// Hermes accepts free-form trailing words as the reason.
|
||||
args.append(contentsOf: reason.split(separator: " ").map(String.init))
|
||||
}
|
||||
let (code, _, stderr) = await runHermes(args: args, timeout: 15)
|
||||
try ensureSuccess(code: code, stdout: "", stderr: stderr, verb: "block")
|
||||
}
|
||||
|
||||
public func unblock(taskIds: [String]) async throws {
|
||||
guard !taskIds.isEmpty else { return }
|
||||
var args = ["kanban", "unblock"]
|
||||
args.append(contentsOf: taskIds)
|
||||
let (code, _, stderr) = await runHermes(args: args, timeout: 15)
|
||||
try ensureSuccess(code: code, stdout: "", stderr: stderr, verb: "unblock")
|
||||
}
|
||||
|
||||
public func archive(taskIds: [String]) async throws {
|
||||
guard !taskIds.isEmpty else { return }
|
||||
var args = ["kanban", "archive"]
|
||||
args.append(contentsOf: taskIds)
|
||||
let (code, _, stderr) = await runHermes(args: args, timeout: 15)
|
||||
try ensureSuccess(code: code, stdout: "", stderr: stderr, verb: "archive")
|
||||
}
|
||||
|
||||
@discardableResult
|
||||
public func dispatch(maxTasks: Int? = nil, dryRun: Bool = false) async throws -> KanbanDispatchSummary {
|
||||
var args = ["kanban", "dispatch", "--json"]
|
||||
if dryRun { args.append("--dry-run") }
|
||||
if let maxTasks { args.append(contentsOf: ["--max", String(maxTasks)]) }
|
||||
let (code, stdout, stderr) = await runHermes(args: args, timeout: 60)
|
||||
try ensureSuccess(code: code, stdout: stdout, stderr: stderr, verb: "dispatch")
|
||||
guard let data = stdout.data(using: .utf8) else {
|
||||
throw KanbanError.decoding(message: "non-UTF8 stdout")
|
||||
}
|
||||
do {
|
||||
return try JSONDecoder().decode(KanbanDispatchSummary.self, from: data)
|
||||
} catch {
|
||||
// Older builds may print human output. Return a stub summary.
|
||||
return KanbanDispatchSummary(promoted: 0, failed: 0, dryRun: dryRun, perTask: [])
|
||||
}
|
||||
}
|
||||
|
||||
public func link(parent: String, child: String) async throws {
|
||||
let args = ["kanban", "link", parent, child]
|
||||
let (code, _, stderr) = await runHermes(args: args, timeout: 15)
|
||||
try ensureSuccess(code: code, stdout: "", stderr: stderr, verb: "link")
|
||||
}
|
||||
|
||||
public func unlink(parent: String, child: String) async throws {
|
||||
let args = ["kanban", "unlink", parent, child]
|
||||
let (code, _, stderr) = await runHermes(args: args, timeout: 15)
|
||||
try ensureSuccess(code: code, stdout: "", stderr: stderr, verb: "unlink")
|
||||
}
|
||||
|
||||
// MARK: - Drag-drop transition mapper
|
||||
|
||||
/// Map a board-level column transition to the right Hermes verb call.
|
||||
/// Returns the list of CLI invocations the caller should run in order.
|
||||
/// Pure — no I/O. Called from VMs to build an action plan; the VM
|
||||
/// then either prompts the user (e.g. for a block reason) or calls
|
||||
/// the matching `KanbanService` methods.
|
||||
///
|
||||
/// Forbidden transitions throw `KanbanError.forbiddenTransition`
|
||||
/// rather than returning an empty plan, so callers can surface the
|
||||
/// reason to the user.
|
||||
public nonisolated static func plan(
|
||||
for transition: KanbanTransition
|
||||
) throws -> KanbanTransitionPlan {
|
||||
let from = transition.from
|
||||
let to = transition.to
|
||||
if from == to {
|
||||
return KanbanTransitionPlan(steps: [])
|
||||
}
|
||||
|
||||
// "Done" is terminal — Hermes has no `reopen` verb.
|
||||
if from == .done {
|
||||
throw KanbanError.forbiddenTransition(
|
||||
from: from.displayName,
|
||||
to: to.displayName,
|
||||
reason: "Done is terminal — create a follow-up task to continue work."
|
||||
)
|
||||
}
|
||||
|
||||
// Triage promotion isn't a CLI verb in v0.12 — it happens via
|
||||
// a specifier worker. UI should disallow drag from triage.
|
||||
if from == .triage {
|
||||
throw KanbanError.forbiddenTransition(
|
||||
from: from.displayName,
|
||||
to: to.displayName,
|
||||
reason: "Triage tasks are promoted by a specifier agent. Use the specifier worker pipeline."
|
||||
)
|
||||
}
|
||||
|
||||
// Archive lives outside the board — only via context menu.
|
||||
if to == .archived {
|
||||
return KanbanTransitionPlan(steps: [.archive])
|
||||
}
|
||||
|
||||
switch (from, to) {
|
||||
case (.upNext, .running):
|
||||
return KanbanTransitionPlan(steps: [.dispatch])
|
||||
case (.upNext, .blocked):
|
||||
return KanbanTransitionPlan(steps: [.block(reasonRequired: true)])
|
||||
case (.upNext, .done):
|
||||
// Direct todo→done is unusual but allowed (manual checkoff).
|
||||
return KanbanTransitionPlan(steps: [.complete(resultRequired: false)])
|
||||
case (.running, .blocked):
|
||||
return KanbanTransitionPlan(steps: [.block(reasonRequired: true)])
|
||||
case (.running, .done):
|
||||
return KanbanTransitionPlan(steps: [.complete(resultRequired: false)])
|
||||
case (.running, .upNext):
|
||||
// Release back to ready — no direct verb. Closest is unblock,
|
||||
// which only works for blocked tasks. Forbid for now.
|
||||
throw KanbanError.forbiddenTransition(
|
||||
from: from.displayName,
|
||||
to: to.displayName,
|
||||
reason: "Use the inspector's Comment + Unassign actions to hand a running task back."
|
||||
)
|
||||
case (.blocked, .upNext):
|
||||
return KanbanTransitionPlan(steps: [.unblock])
|
||||
case (.blocked, .running):
|
||||
return KanbanTransitionPlan(steps: [.unblock, .dispatch])
|
||||
case (.blocked, .done):
|
||||
return KanbanTransitionPlan(steps: [.unblock, .complete(resultRequired: false)])
|
||||
default:
|
||||
throw KanbanError.forbiddenTransition(
|
||||
from: from.displayName,
|
||||
to: to.displayName,
|
||||
reason: "No CLI path exists for this transition."
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - CLI invocation
|
||||
|
||||
private nonisolated func runHermes(
|
||||
args: [String],
|
||||
timeout: TimeInterval
|
||||
) async -> (exitCode: Int32, stdout: String, stderr: String) {
|
||||
let context = self.context
|
||||
return await Task.detached(priority: .utility) { () -> (Int32, String, String) in
|
||||
let transport = context.makeTransport()
|
||||
let executable = context.paths.hermesBinary
|
||||
do {
|
||||
let result = try transport.runProcess(
|
||||
executable: executable,
|
||||
args: args,
|
||||
stdin: nil,
|
||||
timeout: timeout
|
||||
)
|
||||
return (result.exitCode, result.stdoutString, result.stderrString)
|
||||
} catch let error as TransportError {
|
||||
let message = error.diagnosticStderr.isEmpty
|
||||
? (error.errorDescription ?? "transport error")
|
||||
: error.diagnosticStderr
|
||||
return (-1, "", message)
|
||||
} catch {
|
||||
return (-1, "", error.localizedDescription)
|
||||
}
|
||||
}.value
|
||||
}
|
||||
|
||||
private nonisolated func ensureSuccess(
|
||||
code: Int32,
|
||||
stdout: String,
|
||||
stderr: String,
|
||||
verb: String
|
||||
) throws {
|
||||
guard code != 0 else { return }
|
||||
if code == -1 && stderr.lowercased().contains("hermes binary not found") {
|
||||
throw KanbanError.cliMissing
|
||||
}
|
||||
let combined = stderr.isEmpty ? stdout : stderr
|
||||
#if canImport(os)
|
||||
Self.logger.warning("kanban \(verb) exit=\(code, privacy: .public) stderr=\(combined, privacy: .public)")
|
||||
#endif
|
||||
throw KanbanError.nonZeroExit(code: code, stderr: combined)
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Transition planning
|
||||
|
||||
/// Source/destination columns for a single drag-drop. Comparable to
|
||||
/// SwiftUI's `.dropDestination` payload but kept Sendable + Hashable
|
||||
/// so it can also drive iOS context-menu "Move to…" actions.
|
||||
public struct KanbanTransition: Sendable, Hashable {
|
||||
public let from: KanbanBoardColumn
|
||||
public let to: KanbanBoardColumn
|
||||
|
||||
public init(from: KanbanBoardColumn, to: KanbanBoardColumn) {
|
||||
self.from = from
|
||||
self.to = to
|
||||
}
|
||||
}
|
||||
|
||||
/// One Hermes verb call produced by `KanbanService.plan(for:)`. The VM
|
||||
/// resolves any user-input requirements (block reason, completion
|
||||
/// result) before invoking the corresponding actor method.
|
||||
///
|
||||
/// **Why `.dispatch` and not `.claim`.** `hermes kanban claim` reserves
|
||||
/// a task atomically and prints the workspace path — but it's a
|
||||
/// "manual alternative to the dispatcher" that assumes the caller will
|
||||
/// spawn the worker themselves. Scarf is not a worker host; the
|
||||
/// gateway-running dispatcher is. Calling `claim` from drag-drop
|
||||
/// flipped status to `running` without spawning any work, and the
|
||||
/// task got reclaimed (stale_lock) ~15 minutes later. The right
|
||||
/// verb is `dispatch`, which causes the dispatcher to spawn workers
|
||||
/// for every assigned `ready` task in one pass.
|
||||
public enum KanbanTransitionStep: Sendable, Equatable {
|
||||
/// Force a dispatcher pass so the gateway spawns workers for
|
||||
/// assigned `ready` tasks. Requires the task have an assignee
|
||||
/// — the dispatcher silently skips unassigned tasks.
|
||||
case dispatch
|
||||
case unblock
|
||||
case block(reasonRequired: Bool)
|
||||
case complete(resultRequired: Bool)
|
||||
case archive
|
||||
}
|
||||
|
||||
public struct KanbanTransitionPlan: Sendable, Equatable {
|
||||
public let steps: [KanbanTransitionStep]
|
||||
|
||||
public init(steps: [KanbanTransitionStep]) {
|
||||
self.steps = steps
|
||||
}
|
||||
|
||||
public var requiresBlockReason: Bool {
|
||||
steps.contains { if case .block(true) = $0 { return true } else { return false } }
|
||||
}
|
||||
|
||||
public var requiresCompleteResult: Bool {
|
||||
steps.contains { if case .complete(true) = $0 { return true } else { return false } }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
import Foundation
|
||||
|
||||
/// Cross-platform read-only helper for `<project>/.scarf/manifest.json`'s
|
||||
/// `kanbanTenant` field. The full `ProjectTemplateManifest` Codable
|
||||
/// type lives in the Mac app target (with all the install/export
|
||||
/// machinery); iOS doesn't link it, so this lightweight projection
|
||||
/// gives both targets a way to read just the tenant slug without
|
||||
/// duplicating the entire manifest model.
|
||||
public struct KanbanTenantReader: Sendable {
|
||||
public let context: ServerContext
|
||||
|
||||
public nonisolated init(context: ServerContext) {
|
||||
self.context = context
|
||||
}
|
||||
|
||||
/// Read the project's Kanban tenant slug, or `nil` if the manifest
|
||||
/// doesn't exist or doesn't carry one. Cheap — single JSON parse
|
||||
/// of a tiny projection.
|
||||
public nonisolated func tenant(forProjectPath projectPath: String) -> String? {
|
||||
let manifestPath = projectPath + "/.scarf/manifest.json"
|
||||
let transport = context.makeTransport()
|
||||
guard transport.fileExists(manifestPath),
|
||||
let data = try? transport.readFile(manifestPath)
|
||||
else {
|
||||
return nil
|
||||
}
|
||||
return Self.tenant(fromManifestData: data)
|
||||
}
|
||||
|
||||
/// Pure-input variant for tests + tooling that already have the
|
||||
/// JSON bytes in hand. Returns `nil` when the bytes don't decode
|
||||
/// or the field isn't present.
|
||||
public nonisolated static func tenant(fromManifestData data: Data) -> String? {
|
||||
struct Projection: Decodable {
|
||||
let kanbanTenant: String?
|
||||
}
|
||||
return (try? JSONDecoder().decode(Projection.self, from: data))?.kanbanTenant
|
||||
}
|
||||
}
|
||||
@@ -178,7 +178,11 @@ public struct ModelCatalogService: Sendable {
|
||||
/// can keep using the sync method.
|
||||
public nonisolated func loadProvidersAsync() async -> [HermesProviderInfo] {
|
||||
await Task.detached { [self] in
|
||||
self.loadProviders()
|
||||
let providers = ScarfMon.measure(.diskIO, "modelCatalog.loadProviders") {
|
||||
self.loadProviders()
|
||||
}
|
||||
ScarfMon.event(.diskIO, "modelCatalog.providers.count", count: providers.count)
|
||||
return providers
|
||||
}.value
|
||||
}
|
||||
|
||||
@@ -218,7 +222,11 @@ public struct ModelCatalogService: Sendable {
|
||||
/// Issue #59.
|
||||
public nonisolated func loadModelsAsync(for providerID: String) async -> [HermesModelInfo] {
|
||||
await Task.detached { [self] in
|
||||
self.loadModels(for: providerID)
|
||||
let models = ScarfMon.measure(.diskIO, "modelCatalog.loadModels") {
|
||||
self.loadModels(for: providerID)
|
||||
}
|
||||
ScarfMon.event(.diskIO, "modelCatalog.models.count", count: models.count)
|
||||
return models
|
||||
}.value
|
||||
}
|
||||
|
||||
@@ -335,47 +343,49 @@ public struct ModelCatalogService: Sendable {
|
||||
/// Nous's catalog has no such model and Hermes later failed with
|
||||
/// HTTP 404 at runtime. Catch that at save time, not 6 hours later.
|
||||
public func validateModel(_ modelID: String, for providerID: String) -> ModelValidation {
|
||||
let trimmed = modelID.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
guard !trimmed.isEmpty else {
|
||||
return .invalid(providerName: providerID, suggestions: [])
|
||||
}
|
||||
ScarfMon.measure(.diskIO, "modelCatalog.validateModel") {
|
||||
let trimmed = modelID.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
guard !trimmed.isEmpty else {
|
||||
return .invalid(providerName: providerID, suggestions: [])
|
||||
}
|
||||
|
||||
// Overlay-only providers (Nous Portal, OpenAI Codex, Qwen
|
||||
// OAuth, …) serve their own catalogs that aren't mirrored to
|
||||
// models.dev, so we don't have a reliable way to check model
|
||||
// IDs locally. Treat any non-empty value as provisionally
|
||||
// valid — the worst case is the runtime 404 we hit in pass-1,
|
||||
// but the UI has the error banner now (M7 #2) to surface that
|
||||
// cleanly.
|
||||
//
|
||||
// Exception: if an overlay-only provider DOES appear in the
|
||||
// models.dev cache (unlikely but possible as catalogs evolve),
|
||||
// we fall through to the real check below.
|
||||
let models = loadModels(for: providerID)
|
||||
if models.isEmpty {
|
||||
if Self.overlayOnlyProviders[providerID] != nil {
|
||||
// Overlay-only providers (Nous Portal, OpenAI Codex, Qwen
|
||||
// OAuth, …) serve their own catalogs that aren't mirrored to
|
||||
// models.dev, so we don't have a reliable way to check model
|
||||
// IDs locally. Treat any non-empty value as provisionally
|
||||
// valid — the worst case is the runtime 404 we hit in pass-1,
|
||||
// but the UI has the error banner now (M7 #2) to surface that
|
||||
// cleanly.
|
||||
//
|
||||
// Exception: if an overlay-only provider DOES appear in the
|
||||
// models.dev cache (unlikely but possible as catalogs evolve),
|
||||
// we fall through to the real check below.
|
||||
let models = loadModels(for: providerID)
|
||||
if models.isEmpty {
|
||||
if Self.overlayOnlyProviders[providerID] != nil {
|
||||
return .valid
|
||||
}
|
||||
return .unknownProvider(providerID: providerID)
|
||||
}
|
||||
|
||||
if models.contains(where: { $0.modelID == trimmed }) {
|
||||
return .valid
|
||||
}
|
||||
return .unknownProvider(providerID: providerID)
|
||||
}
|
||||
|
||||
if models.contains(where: { $0.modelID == trimmed }) {
|
||||
return .valid
|
||||
// No exact match — offer the closest names (by prefix) as
|
||||
// suggestions. Up to 5, ordered by release date (newest
|
||||
// first — already the sort order of loadModels).
|
||||
let lowerTrimmed = trimmed.lowercased()
|
||||
let byPrefix = models
|
||||
.filter { $0.modelID.lowercased().hasPrefix(String(lowerTrimmed.prefix(3))) }
|
||||
.prefix(5)
|
||||
.map(\.modelID)
|
||||
let suggestions = byPrefix.isEmpty
|
||||
? Array(models.prefix(5).map(\.modelID))
|
||||
: Array(byPrefix)
|
||||
let providerName = providerByID(providerID)?.providerName ?? providerID
|
||||
return .invalid(providerName: providerName, suggestions: suggestions)
|
||||
}
|
||||
|
||||
// No exact match — offer the closest names (by prefix) as
|
||||
// suggestions. Up to 5, ordered by release date (newest
|
||||
// first — already the sort order of loadModels).
|
||||
let lowerTrimmed = trimmed.lowercased()
|
||||
let byPrefix = models
|
||||
.filter { $0.modelID.lowercased().hasPrefix(String(lowerTrimmed.prefix(3))) }
|
||||
.prefix(5)
|
||||
.map(\.modelID)
|
||||
let suggestions = byPrefix.isEmpty
|
||||
? Array(models.prefix(5).map(\.modelID))
|
||||
: Array(byPrefix)
|
||||
let providerName = providerByID(providerID)?.providerName ?? providerID
|
||||
return .invalid(providerName: providerName, suggestions: suggestions)
|
||||
}
|
||||
|
||||
// MARK: - Decoding
|
||||
|
||||
@@ -50,7 +50,48 @@ public enum ModelPreflight: Sendable {
|
||||
}
|
||||
|
||||
private static func isUnset(_ value: String) -> Bool {
|
||||
let trimmed = value.trimmingCharacters(in: .whitespaces).lowercased()
|
||||
let trimmed = value.trimmingCharacters(in: .whitespacesAndNewlines).lowercased()
|
||||
return trimmed.isEmpty || trimmed == "unknown"
|
||||
}
|
||||
|
||||
/// Result of a `model.default` ↔ `model.provider` mismatch check.
|
||||
/// Captures the case where `model.default` carries a `<provider>/...`
|
||||
/// prefix that doesn't match the standalone `model.provider` key —
|
||||
/// observed in 2026-05-05 dogfooding when switching OAuth providers
|
||||
/// via Credential Pools left the prior provider's model name
|
||||
/// stranded in `model.default`. Hermes can't reconcile the two and
|
||||
/// chats die with an opaque `-32603 Internal error` at first prompt.
|
||||
public struct Mismatch: Sendable, Equatable {
|
||||
/// The provider prefix found in `model.default` (e.g. `"anthropic"`).
|
||||
public let prefixProvider: String
|
||||
/// The standalone `model.provider` value (e.g. `"nous"`).
|
||||
public let activeProvider: String
|
||||
/// The full `model.default` string as configured.
|
||||
public let modelDefault: String
|
||||
/// The bare model id (with the prefix stripped) — what the user
|
||||
/// would see if Scarf rewrites `model.default` for them.
|
||||
public let bareModel: String
|
||||
}
|
||||
|
||||
/// Detect a `model.default` / `model.provider` mismatch. Returns
|
||||
/// `nil` when there's no provider prefix on `model.default`, when
|
||||
/// either field is unset, or when the prefix matches the provider.
|
||||
/// Uses case-insensitive comparison — Hermes accepts both
|
||||
/// `Anthropic/...` and `anthropic/...` casings in the wild.
|
||||
public static func detectMismatch(_ config: HermesConfig) -> Mismatch? {
|
||||
let modelDefault = config.model.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
let activeProvider = config.provider.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
guard !isUnset(modelDefault), !isUnset(activeProvider) else { return nil }
|
||||
guard let slash = modelDefault.firstIndex(of: "/") else { return nil }
|
||||
let prefix = String(modelDefault[..<slash])
|
||||
let bare = String(modelDefault[modelDefault.index(after: slash)...])
|
||||
guard !prefix.isEmpty, !bare.isEmpty else { return nil }
|
||||
guard prefix.caseInsensitiveCompare(activeProvider) != .orderedSame else { return nil }
|
||||
return Mismatch(
|
||||
prefixProvider: prefix,
|
||||
activeProvider: activeProvider,
|
||||
modelDefault: modelDefault,
|
||||
bareModel: bare
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
+107
-41
@@ -95,22 +95,68 @@ public struct NousModelCatalogService: Sendable {
|
||||
/// cache lands on the droplet, not the user's Mac). Missing or
|
||||
/// malformed cache → nil; the loader treats that as "no cache" and
|
||||
/// kicks off a fresh fetch.
|
||||
public func readCache() -> NousModelsCache? {
|
||||
let transport = context.makeTransport()
|
||||
guard transport.fileExists(cachePath) else { return nil }
|
||||
do {
|
||||
let data = try transport.readFile(cachePath)
|
||||
let decoder = JSONDecoder()
|
||||
decoder.dateDecodingStrategy = .iso8601
|
||||
let cache = try decoder.decode(NousModelsCache.self, from: data)
|
||||
guard cache.version == NousModelsCache.currentVersion else {
|
||||
Self.logger.info("nous models cache schema mismatch (got v\(cache.version), expected v\(NousModelsCache.currentVersion)); ignoring")
|
||||
/// Race readCache against a sleep so a hung remote `cat` doesn't
|
||||
/// stall the picker for the full transport-level timeout (60 s).
|
||||
/// On timeout returns nil — the caller treats that as "no usable
|
||||
/// cache" and falls through to the network fetch.
|
||||
public func readCacheWithTimeout(seconds: TimeInterval) async -> NousModelsCache? {
|
||||
await withTaskGroup(of: NousModelsCache?.self) { group in
|
||||
group.addTask { [self] in
|
||||
// Detached because readCache is sync + does blocking
|
||||
// SSH I/O; running on the cooperative pool is fine
|
||||
// for one task but we don't want to fight executor
|
||||
// scheduling with the timer task below.
|
||||
await Task.detached { [self] in
|
||||
readCache()
|
||||
}.value
|
||||
}
|
||||
group.addTask {
|
||||
try? await Task.sleep(nanoseconds: UInt64(seconds * 1_000_000_000))
|
||||
ScarfMon.event(.diskIO, "nous.readCache.timeoutFired", count: 1)
|
||||
return nil
|
||||
}
|
||||
// First completion wins; cancel the other.
|
||||
let first = await group.next() ?? nil
|
||||
group.cancelAll()
|
||||
return first
|
||||
}
|
||||
}
|
||||
|
||||
public func readCache() -> NousModelsCache? {
|
||||
ScarfMon.measure(.diskIO, "nous.readCache") {
|
||||
let transport = context.makeTransport()
|
||||
// Split into separate measure points so the next perf
|
||||
// capture localizes the 60-second observed beach ball
|
||||
// — was it the fileExists probe, the read itself, or
|
||||
// the JSON decode? Each on its own ScarfMon row.
|
||||
let exists = ScarfMon.measure(.diskIO, "nous.readCache.fileExists") {
|
||||
transport.fileExists(cachePath)
|
||||
}
|
||||
guard exists else { return nil }
|
||||
do {
|
||||
let data = try ScarfMon.measure(.diskIO, "nous.readCache.readFile") {
|
||||
try transport.readFile(cachePath)
|
||||
}
|
||||
ScarfMon.event(.diskIO, "nous.readCache.bytes", count: 1, bytes: data.count)
|
||||
return ScarfMon.measure(.diskIO, "nous.readCache.decode") {
|
||||
let decoder = JSONDecoder()
|
||||
decoder.dateDecodingStrategy = .iso8601
|
||||
do {
|
||||
let cache = try decoder.decode(NousModelsCache.self, from: data)
|
||||
guard cache.version == NousModelsCache.currentVersion else {
|
||||
Self.logger.info("nous models cache schema mismatch (got v\(cache.version), expected v\(NousModelsCache.currentVersion)); ignoring")
|
||||
return Optional<NousModelsCache>.none
|
||||
}
|
||||
return cache
|
||||
} catch {
|
||||
Self.logger.warning("couldn't decode nous models cache: \(error.localizedDescription, privacy: .public)")
|
||||
return Optional<NousModelsCache>.none
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
Self.logger.warning("couldn't read nous models cache: \(error.localizedDescription, privacy: .public)")
|
||||
return nil
|
||||
}
|
||||
return cache
|
||||
} catch {
|
||||
Self.logger.warning("couldn't decode nous models cache: \(error.localizedDescription, privacy: .public)")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -148,15 +194,22 @@ public struct NousModelCatalogService: Sendable {
|
||||
// The subscription service already checks for `present`; we
|
||||
// re-read the raw token here because we need the actual string,
|
||||
// not just a Bool. Mirrors the SubscriptionService parse path.
|
||||
let transport = context.makeTransport()
|
||||
guard transport.fileExists(context.paths.authJSON) else { return nil }
|
||||
guard let data = try? transport.readFile(context.paths.authJSON) else { return nil }
|
||||
guard let root = try? JSONSerialization.jsonObject(with: data) as? [String: Any] else { return nil }
|
||||
let providers = root["providers"] as? [String: Any] ?? [:]
|
||||
let nous = providers["nous"] as? [String: Any]
|
||||
let token = nous?["access_token"] as? String
|
||||
guard let token, !token.isEmpty else { return nil }
|
||||
return token
|
||||
// ScarfMon: separate `nous.bearerToken` measure point because
|
||||
// this is the second auth.json read of the picker's open
|
||||
// sequence (subscriptionService.loadState() did the first).
|
||||
// Together with `nous.subscription.loadState`, total two SSH
|
||||
// round-trips of the same file — candidate for caching.
|
||||
ScarfMon.measure(.diskIO, "nous.bearerToken") {
|
||||
let transport = context.makeTransport()
|
||||
guard transport.fileExists(context.paths.authJSON) else { return nil }
|
||||
guard let data = try? transport.readFile(context.paths.authJSON) else { return nil }
|
||||
guard let root = try? JSONSerialization.jsonObject(with: data) as? [String: Any] else { return nil }
|
||||
let providers = root["providers"] as? [String: Any] ?? [:]
|
||||
let nous = providers["nous"] as? [String: Any]
|
||||
let token = nous?["access_token"] as? String
|
||||
guard let token, !token.isEmpty else { return nil }
|
||||
return token
|
||||
}
|
||||
}
|
||||
|
||||
/// Make the API call. Times out after `requestTimeout` so a hung
|
||||
@@ -164,25 +217,28 @@ public struct NousModelCatalogService: Sendable {
|
||||
/// `[NousModel]` on success, throws on any HTTP / decode error so
|
||||
/// the caller can log + fall back.
|
||||
public func fetchModels() async throws -> [NousModel] {
|
||||
guard let token = bearerToken() else {
|
||||
throw NousModelCatalogError.notAuthenticated
|
||||
}
|
||||
var request = URLRequest(url: Self.baseURL)
|
||||
request.httpMethod = "GET"
|
||||
request.timeoutInterval = Self.requestTimeout
|
||||
request.setValue("Bearer \(token)", forHTTPHeaderField: "Authorization")
|
||||
request.setValue("application/json", forHTTPHeaderField: "Accept")
|
||||
try await ScarfMon.measureAsync(.transport, "nous.fetchModels") {
|
||||
guard let token = bearerToken() else {
|
||||
throw NousModelCatalogError.notAuthenticated
|
||||
}
|
||||
var request = URLRequest(url: Self.baseURL)
|
||||
request.httpMethod = "GET"
|
||||
request.timeoutInterval = Self.requestTimeout
|
||||
request.setValue("Bearer \(token)", forHTTPHeaderField: "Authorization")
|
||||
request.setValue("application/json", forHTTPHeaderField: "Accept")
|
||||
|
||||
let (data, response) = try await session.data(for: request)
|
||||
guard let http = response as? HTTPURLResponse else {
|
||||
throw NousModelCatalogError.transport("non-HTTP response")
|
||||
let (data, response) = try await session.data(for: request)
|
||||
guard let http = response as? HTTPURLResponse else {
|
||||
throw NousModelCatalogError.transport("non-HTTP response")
|
||||
}
|
||||
guard (200..<300).contains(http.statusCode) else {
|
||||
throw NousModelCatalogError.http(status: http.statusCode)
|
||||
}
|
||||
struct Envelope: Decodable { let data: [NousModel] }
|
||||
let envelope = try JSONDecoder().decode(Envelope.self, from: data)
|
||||
ScarfMon.event(.transport, "nous.fetchModels.bytes", count: envelope.data.count, bytes: data.count)
|
||||
return envelope.data
|
||||
}
|
||||
guard (200..<300).contains(http.statusCode) else {
|
||||
throw NousModelCatalogError.http(status: http.statusCode)
|
||||
}
|
||||
struct Envelope: Decodable { let data: [NousModel] }
|
||||
let envelope = try JSONDecoder().decode(Envelope.self, from: data)
|
||||
return envelope.data
|
||||
}
|
||||
|
||||
// MARK: - Public entry
|
||||
@@ -193,7 +249,17 @@ public struct NousModelCatalogService: Sendable {
|
||||
/// based on the case so it can show a "could not refresh" hint
|
||||
/// next to a stale-but-still-useful list.
|
||||
public func loadModels(forceRefresh: Bool = false) async -> NousModelsLoadResult {
|
||||
let cached = readCache()
|
||||
// Cache-read with a short timeout. The underlying SSH `cat`
|
||||
// can hang on a corrupted or oversized cache file (a
|
||||
// 120-second picker stall observed in the wild — two 60 s
|
||||
// timeouts stacked from a duplicated read; perf capture
|
||||
// localized to `nous.readCache.readFile`). Cache is a
|
||||
// performance hint, not a correctness requirement; if it
|
||||
// doesn't return in 5 s, fall through to the network fetch
|
||||
// and let writeCache rebuild it. The runaway `cat` keeps
|
||||
// running on its own 60 s transport timeout but no longer
|
||||
// blocks the picker.
|
||||
let cached = await readCacheWithTimeout(seconds: 5)
|
||||
|
||||
if let cached, !forceRefresh, !isCacheStale(cached) {
|
||||
return .cache(models: cached.models, fetchedAt: cached.fetchedAt, refreshError: nil)
|
||||
|
||||
@@ -15,14 +15,18 @@ public struct ProjectDashboardService: Sendable {
|
||||
// MARK: - Registry
|
||||
|
||||
public func loadRegistry() -> ProjectRegistry {
|
||||
guard let data = try? transport.readFile(context.paths.projectsRegistry) else {
|
||||
return ProjectRegistry(projects: [])
|
||||
}
|
||||
do {
|
||||
return try JSONDecoder().decode(ProjectRegistry.self, from: data)
|
||||
} catch {
|
||||
Self.logger.error("Failed to decode project registry: \(error.localizedDescription, privacy: .public)")
|
||||
return ProjectRegistry(projects: [])
|
||||
// Tracks time spent reading + decoding projects.json from the transport
|
||||
// (local file or SSH). Helps spot slow remote round-trips.
|
||||
ScarfMon.measure(.diskIO, "dashboard.loadRegistry") {
|
||||
guard let data = try? transport.readFile(context.paths.projectsRegistry) else {
|
||||
return ProjectRegistry(projects: [])
|
||||
}
|
||||
do {
|
||||
return try JSONDecoder().decode(ProjectRegistry.self, from: data)
|
||||
} catch {
|
||||
Self.logger.error("Failed to decode project registry: \(error.localizedDescription, privacy: .public)")
|
||||
return ProjectRegistry(projects: [])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,251 @@
|
||||
import Foundation
|
||||
|
||||
/// Pure block-splice logic for Scarf's managed regions inside
|
||||
/// `~/.hermes/.env`. Each registered project that has at least one
|
||||
/// resolved secret carries one block, bounded by:
|
||||
///
|
||||
/// ```
|
||||
/// # scarf-secrets:begin <slug>
|
||||
/// SCARF_<UPPER_SLUG>_<UPPER_FIELDKEY>=<value>
|
||||
/// ...
|
||||
/// # scarf-secrets:end <slug>
|
||||
/// ```
|
||||
///
|
||||
/// The Mac wraps this in `KeychainEnvMirror` (Keychain-aware, atomic
|
||||
/// write, mode-0600 enforcement). This file handles only the marker
|
||||
/// contract + key naming + splice — logic that's testable in isolation
|
||||
/// against an in-memory string and shared across hosts.
|
||||
///
|
||||
/// **Why `~/.hermes/.env`.** Hermes's cron scheduler reloads that file
|
||||
/// fresh on every tick (cron/scheduler.py:897-903), so values become
|
||||
/// available to the agent's tool-invoked subprocesses (terminal,
|
||||
/// code_exec) without any Hermes-side change. Per-project `.env` is
|
||||
/// not loaded at cron time today, hence we mirror into the global
|
||||
/// file with namespaced keys.
|
||||
///
|
||||
/// **Marker contract is load-bearing.** Both markers carry the slug on
|
||||
/// the same line so a multi-project file is parsed deterministically
|
||||
/// and one project's edits can't disturb another's block.
|
||||
public enum SecretsEnvBlock {
|
||||
|
||||
/// Stable across releases — entries on disk reference these
|
||||
/// strings and a marker change would orphan every existing block.
|
||||
public static let beginMarkerPrefix = "# scarf-secrets:begin "
|
||||
public static let endMarkerPrefix = "# scarf-secrets:end "
|
||||
|
||||
// MARK: - Key naming
|
||||
|
||||
/// Build the env-var name for a (slug, fieldKey) pair. Uppercases,
|
||||
/// replaces every non-alphanumeric character with `_`, prefixes
|
||||
/// `SCARF_`. Stable: rotating a value writes to the same key.
|
||||
public static func envKeyName(slug: String, fieldKey: String) -> String {
|
||||
"SCARF_" + sanitize(slug) + "_" + sanitize(fieldKey)
|
||||
}
|
||||
|
||||
private static func sanitize(_ s: String) -> String {
|
||||
var out = ""
|
||||
for scalar in s.unicodeScalars {
|
||||
let c = Character(scalar)
|
||||
let isAlpha = ("A"..."Z").contains(c) || ("a"..."z").contains(c)
|
||||
let isDigit = ("0"..."9").contains(c)
|
||||
if isAlpha || isDigit {
|
||||
out.append(Character(scalar.properties.uppercaseMapping))
|
||||
} else {
|
||||
out.append("_")
|
||||
}
|
||||
}
|
||||
// Collapse runs of underscores so `foo--bar` doesn't become
|
||||
// `FOO__BAR` (two underscores trips dotenv parsers more often
|
||||
// than one). Trim leading/trailing underscores too.
|
||||
while out.contains("__") {
|
||||
out = out.replacingOccurrences(of: "__", with: "_")
|
||||
}
|
||||
while out.hasPrefix("_") { out.removeFirst() }
|
||||
while out.hasSuffix("_") { out.removeLast() }
|
||||
return out.isEmpty ? "UNNAMED" : out
|
||||
}
|
||||
|
||||
// MARK: - Block render
|
||||
|
||||
/// Render the bounded block for a single project. Empty `entries`
|
||||
/// produces an empty string — callers should treat that as
|
||||
/// "remove the project's block" rather than "write an empty
|
||||
/// block." `entries` are emitted in stable sort order so two
|
||||
/// runs with the same input produce byte-identical output.
|
||||
public static func renderBlock(
|
||||
slug: String,
|
||||
entries: [(key: String, value: String)]
|
||||
) -> String {
|
||||
guard !entries.isEmpty else { return "" }
|
||||
let sorted = entries.sorted { $0.key < $1.key }
|
||||
var lines: [String] = []
|
||||
lines.append(beginMarkerPrefix + slug)
|
||||
for entry in sorted {
|
||||
lines.append("\(entry.key)=\(escape(entry.value))")
|
||||
}
|
||||
lines.append(endMarkerPrefix + slug)
|
||||
return lines.joined(separator: "\n")
|
||||
}
|
||||
|
||||
/// Quote values that would confuse python-dotenv: anything with
|
||||
/// whitespace, `#`, `$`, or quote characters. Single quotes around
|
||||
/// the value are dotenv-canonical and preserve `$`-style
|
||||
/// references literally (no shell expansion). Backslash-escape
|
||||
/// embedded single quotes by closing+reopening: `'foo'\''bar'`.
|
||||
private static func escape(_ value: String) -> String {
|
||||
let needsQuoting = value.contains(where: { c in
|
||||
c.isWhitespace || c == "#" || c == "$" || c == "\"" || c == "'" || c == "\\"
|
||||
})
|
||||
if !needsQuoting { return value }
|
||||
let escaped = value.replacingOccurrences(of: "'", with: "'\\''")
|
||||
return "'" + escaped + "'"
|
||||
}
|
||||
|
||||
// MARK: - Splice
|
||||
|
||||
/// Splice `block` (already-rendered, with markers) into `existing`
|
||||
/// for the named `slug`. Three cases:
|
||||
/// 1. `existing` already has a `# scarf-secrets:begin <slug>` /
|
||||
/// `# scarf-secrets:end <slug>` pair → replace the inclusive
|
||||
/// region. Other slugs' blocks are preserved byte-identically.
|
||||
/// 2. `existing` has no block for this slug → append after a
|
||||
/// blank line at the end of file.
|
||||
/// 3. `block` is empty → behave like `removeBlock`.
|
||||
///
|
||||
/// Idempotent: feeding the output of one call back through
|
||||
/// `applyBlock` with the same inputs produces the same string.
|
||||
public static func applyBlock(
|
||||
_ block: String,
|
||||
forSlug slug: String,
|
||||
to existing: String
|
||||
) -> String {
|
||||
if block.isEmpty {
|
||||
return removeBlock(forSlug: slug, from: existing)
|
||||
}
|
||||
if let region = blockRange(forSlug: slug, in: existing) {
|
||||
// Replace the inclusive region. `blockRange` covers the
|
||||
// begin marker line through the end marker line plus any
|
||||
// trailing newline so `removeBlock` doesn't leave a
|
||||
// dangling blank line — but for `applyBlock`, we need to
|
||||
// re-emit that trailing newline so a round-trip
|
||||
// (mirror→read→mirror with identical entries) produces
|
||||
// byte-identical output. Without this, the second mirror
|
||||
// would write a file shorter by one newline byte and
|
||||
// bump the file's mtime, breaking the
|
||||
// no-op-when-unchanged contract that the launch
|
||||
// reconciler relies on.
|
||||
let before = String(existing[existing.startIndex..<region.lowerBound])
|
||||
let after = String(existing[region.upperBound..<existing.endIndex])
|
||||
// Restore a trailing newline only when the consumed region
|
||||
// had one (i.e., the block wasn't at end-of-string with
|
||||
// no terminating newline).
|
||||
let consumedTrailingNewline = region.upperBound > existing.startIndex
|
||||
&& existing[existing.index(before: region.upperBound)] == "\n"
|
||||
let separator = consumedTrailingNewline ? "\n" : ""
|
||||
return before + block + separator + after
|
||||
}
|
||||
// Append at end of file, separated from preceding content by
|
||||
// a blank line. Empty-or-whitespace files just become the
|
||||
// block plus a trailing newline.
|
||||
let trimmed = existing.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
if trimmed.isEmpty {
|
||||
return block + "\n"
|
||||
}
|
||||
let normalized = trimmingRightNewlines(existing)
|
||||
return normalized + "\n\n" + block + "\n"
|
||||
}
|
||||
|
||||
/// Strip the bounded block for `slug` from `existing`. No-op when
|
||||
/// absent. Preserves all other slugs' blocks and user-authored
|
||||
/// content byte-identically.
|
||||
public static func removeBlock(forSlug slug: String, from existing: String) -> String {
|
||||
guard let region = blockRange(forSlug: slug, in: existing) else {
|
||||
return existing
|
||||
}
|
||||
let before = String(existing[existing.startIndex..<region.lowerBound])
|
||||
let after = String(existing[region.upperBound..<existing.endIndex])
|
||||
// Collapse the blank line we may have inserted at append time
|
||||
// so repeated install/uninstall cycles don't accumulate
|
||||
// blank lines. Specifically: if `before` ends in `\n\n` and
|
||||
// `after` starts with `\n`, drop one of the newlines.
|
||||
var trimmedBefore = before
|
||||
var trimmedAfter = after
|
||||
if trimmedBefore.hasSuffix("\n\n") && trimmedAfter.hasPrefix("\n") {
|
||||
trimmedAfter.removeFirst()
|
||||
} else if trimmedBefore.hasSuffix("\n\n") {
|
||||
trimmedBefore.removeLast()
|
||||
}
|
||||
return trimmedBefore + trimmedAfter
|
||||
}
|
||||
|
||||
// MARK: - Range scan
|
||||
|
||||
/// Locate the inclusive character range covering one project's
|
||||
/// block, including a trailing newline if present so removal
|
||||
/// doesn't leave a dangling empty line. Returns nil when the
|
||||
/// block isn't present.
|
||||
private static func blockRange(
|
||||
forSlug slug: String,
|
||||
in existing: String
|
||||
) -> Range<String.Index>? {
|
||||
let beginLine = beginMarkerPrefix + slug
|
||||
let endLine = endMarkerPrefix + slug
|
||||
// Match begin marker as a full line — guard against false
|
||||
// positives where a slug is a prefix of another slug
|
||||
// (e.g. "foo" vs "foo-bar"). Require the marker to be
|
||||
// followed immediately by `\n` or end-of-string.
|
||||
guard let beginRange = lineRange(of: beginLine, in: existing) else {
|
||||
return nil
|
||||
}
|
||||
// Search for the matching end marker AFTER the begin range —
|
||||
// can't use a leading-anchor scan because there may be other
|
||||
// slugs' end markers between begin and the matching end.
|
||||
let searchStart = beginRange.upperBound
|
||||
guard let endRange = lineRange(of: endLine, in: existing, startingAt: searchStart) else {
|
||||
return nil
|
||||
}
|
||||
// Include a trailing newline if the file has one immediately
|
||||
// after the end marker — keeps the file shape clean across
|
||||
// remove operations.
|
||||
var upper = endRange.upperBound
|
||||
if upper < existing.endIndex, existing[upper] == "\n" {
|
||||
upper = existing.index(after: upper)
|
||||
}
|
||||
return beginRange.lowerBound..<upper
|
||||
}
|
||||
|
||||
/// Find a substring that appears as a complete line — bounded by
|
||||
/// start-of-string or `\n` on the left and `\n` or end-of-string
|
||||
/// on the right. Returns the range of the substring itself, not
|
||||
/// including any surrounding newlines.
|
||||
private static func lineRange(
|
||||
of needle: String,
|
||||
in haystack: String,
|
||||
startingAt start: String.Index? = nil
|
||||
) -> Range<String.Index>? {
|
||||
var searchStart = start ?? haystack.startIndex
|
||||
while searchStart <= haystack.endIndex {
|
||||
guard let range = haystack.range(of: needle, range: searchStart..<haystack.endIndex) else {
|
||||
return nil
|
||||
}
|
||||
let leftOK = range.lowerBound == haystack.startIndex
|
||||
|| haystack[haystack.index(before: range.lowerBound)] == "\n"
|
||||
let rightOK = range.upperBound == haystack.endIndex
|
||||
|| haystack[range.upperBound] == "\n"
|
||||
if leftOK && rightOK {
|
||||
return range
|
||||
}
|
||||
// Advance past this false positive and keep searching.
|
||||
searchStart = range.upperBound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
private static func trimmingRightNewlines(_ s: String) -> String {
|
||||
var result = s
|
||||
while let last = result.last, last.isNewline {
|
||||
result.removeLast()
|
||||
}
|
||||
return result
|
||||
}
|
||||
}
|
||||
@@ -133,12 +133,20 @@ public struct SkillSnapshotDiff: Sendable, Equatable {
|
||||
}
|
||||
|
||||
/// Compact label for the "What's New" pill, e.g.
|
||||
/// "2 new, 4 updated since you last looked" or "1 new skill".
|
||||
/// "2 new, 4 changed since you last looked" or "1 new skill".
|
||||
///
|
||||
/// Wording note (issue #78): we used to say "X updated since you
|
||||
/// last looked" but the same screen also surfaces an "Updates"
|
||||
/// sub-tab driven by `hermes skills check` (skills with newer
|
||||
/// **upstream** versions available). Two surfaces with the word
|
||||
/// "update" meaning two different things read as a contradiction
|
||||
/// to the user. "Changed" describes the local file delta without
|
||||
/// colliding with upstream-update vocabulary.
|
||||
public var label: String {
|
||||
switch (newCount, updatedCount) {
|
||||
case (let n, 0): return n == 1 ? "1 new skill since you last looked" : "\(n) new skills since you last looked"
|
||||
case (0, let u): return u == 1 ? "1 updated skill since you last looked" : "\(u) updated skills since you last looked"
|
||||
default: return "\(newCount) new, \(updatedCount) updated since you last looked"
|
||||
case (0, let u): return u == 1 ? "1 changed skill since you last looked" : "\(u) changed skills since you last looked"
|
||||
default: return "\(newCount) new, \(updatedCount) changed since you last looked"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
import Foundation
|
||||
|
||||
/// Process-wide toggles for test-mode launches.
|
||||
///
|
||||
/// Read `CommandLine.arguments` once at first access and cache the result so
|
||||
/// any code path can ask `TestModeFlags.shared.isTestMode` without paying for
|
||||
/// a re-scan. The harness sets `--scarf-test-mode` from XCUITest's
|
||||
/// `XCUIApplication.launchArguments` and pairs it with `SCARF_HERMES_HOME`
|
||||
/// (read by `HermesProfileResolver`) to drive Scarf against an isolated
|
||||
/// Hermes home.
|
||||
///
|
||||
/// The flags themselves don't do anything on their own — they're hook points
|
||||
/// for production code paths to gate behavior. v1 lands the wiring; the
|
||||
/// gating sites (Sparkle update prompt, capability live-probe, first-run
|
||||
/// walkthrough) are added incrementally as the harness exercises them and
|
||||
/// surfaces flakes.
|
||||
public struct TestModeFlags: Sendable {
|
||||
/// True when the process was launched with `--scarf-test-mode`. Read
|
||||
/// once from `CommandLine.arguments`; never mutated.
|
||||
public let isTestMode: Bool
|
||||
|
||||
/// Default singleton — cached on first access. Production code reads
|
||||
/// this; tests that need a different shape construct their own value.
|
||||
public static let shared: TestModeFlags = TestModeFlags(
|
||||
arguments: CommandLine.arguments
|
||||
)
|
||||
|
||||
/// Constructor exposed for tests so a synthetic argv can be passed
|
||||
/// without involving the real `CommandLine`. Production callers use
|
||||
/// `.shared`.
|
||||
public init(arguments: [String]) {
|
||||
self.isTestMode = arguments.contains("--scarf-test-mode")
|
||||
}
|
||||
}
|
||||
@@ -25,6 +25,63 @@ public struct LocalTransport: ServerTransport {
|
||||
self.contextID = contextID
|
||||
}
|
||||
|
||||
// MARK: - Environment enrichment
|
||||
|
||||
/// Injection point for local-subprocess environment enrichment.
|
||||
/// Mirrors `SSHTransport.environmentEnricher` — the Mac app wires
|
||||
/// this at launch to `HermesFileService.enrichedEnvironment()`,
|
||||
/// which probes the user's login shell for PATH + credential env
|
||||
/// vars. Without it, GUI-launched Scarf hands subprocesses a
|
||||
/// stripped `/usr/bin:/bin:/usr/sbin:/sbin` PATH and child
|
||||
/// `hermes` invocations from inside spawned workers fail with
|
||||
/// `executable not found on PATH`.
|
||||
///
|
||||
/// Set once at app launch (startup is single-threaded). Tests may
|
||||
/// inject a stub. iOS leaves this `nil` because LocalTransport
|
||||
/// doesn't run subprocesses there.
|
||||
nonisolated(unsafe) public static var environmentEnricher: (@Sendable () -> [String: String])?
|
||||
|
||||
/// Build the environment dict for a single subprocess. Process
|
||||
/// env wins for keys it has; the enricher fills gaps + always
|
||||
/// owns PATH (which is the whole point of running it). The
|
||||
/// executable's parent directory is appended as a final fallback
|
||||
/// so `runProcess` works even before the enricher has been wired
|
||||
/// (during very early startup, in tests, etc.).
|
||||
nonisolated static func subprocessEnvironment(forExecutable executable: String) -> [String: String] {
|
||||
var env = ProcessInfo.processInfo.environment
|
||||
if let enricher = Self.environmentEnricher {
|
||||
let extra = enricher()
|
||||
for (key, value) in extra where !value.isEmpty {
|
||||
if key == "PATH" {
|
||||
// Enricher always wins for PATH — that's the
|
||||
// whole reason the enricher exists. The GUI
|
||||
// process PATH is the broken thing we're
|
||||
// replacing.
|
||||
env[key] = value
|
||||
} else if (env[key] ?? "").isEmpty {
|
||||
// For other keys (credential env, locale, etc.)
|
||||
// an explicit non-empty value in the GUI
|
||||
// environment wins; an empty or absent value
|
||||
// gets filled by the shell-harvested copy.
|
||||
env[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
// Always make sure the executable's own directory is on PATH —
|
||||
// covers the case where the enricher hasn't been wired (tests,
|
||||
// pre-launch helpers) but a child process still tries to spawn
|
||||
// its sibling tools by bare name.
|
||||
let dir = (executable as NSString).deletingLastPathComponent
|
||||
if !dir.isEmpty {
|
||||
let currentPATH = env["PATH"] ?? "/usr/bin:/bin:/usr/sbin:/sbin"
|
||||
let parts = currentPATH.split(separator: ":").map(String.init)
|
||||
if !parts.contains(dir) {
|
||||
env["PATH"] = "\(dir):\(currentPATH)"
|
||||
}
|
||||
}
|
||||
return env
|
||||
}
|
||||
|
||||
// MARK: - Files
|
||||
|
||||
public func readFile(_ path: String) throws -> Data {
|
||||
@@ -116,6 +173,17 @@ public struct LocalTransport: ServerTransport {
|
||||
let proc = Process()
|
||||
proc.executableURL = URL(fileURLWithPath: executable)
|
||||
proc.arguments = args
|
||||
// Hand subprocesses an environment that includes the user's
|
||||
// login-shell PATH. Without this, `hermes` (pipx-installed at
|
||||
// `~/.local/bin/hermes`) ends up running with macOS's GUI
|
||||
// launch-services PATH (`/usr/bin:/bin:/usr/sbin:/sbin`), and
|
||||
// when Hermes itself shells out to spawn a worker (e.g. the
|
||||
// kanban dispatcher invoking `hermes` by name from a Python
|
||||
// subprocess), it returns "executable not found on PATH" and
|
||||
// the run records `outcome=spawn_failed`. Mirrors the SSH
|
||||
// transport's environmentEnricher hook and is wired by
|
||||
// `scarfApp.swift` at launch.
|
||||
proc.environment = Self.subprocessEnvironment(forExecutable: executable)
|
||||
let stdoutPipe = Pipe()
|
||||
let stderrPipe = Pipe()
|
||||
let stdinPipe = Pipe()
|
||||
@@ -289,18 +357,35 @@ public struct LocalTransport: ServerTransport {
|
||||
#endif
|
||||
}
|
||||
|
||||
// MARK: - SQLite
|
||||
// MARK: - Script streaming
|
||||
|
||||
public func snapshotSQLite(remotePath: String) throws -> URL {
|
||||
// Local case: no copy needed. Services open the path directly.
|
||||
URL(fileURLWithPath: remotePath)
|
||||
/// Run `script` through `/bin/sh -c` locally. Local data path
|
||||
/// doesn't actually call this in production (the data service
|
||||
/// hands `LocalSQLiteBackend` the libsqlite3-direct path) — kept
|
||||
/// for protocol parity and for tooling that wants a uniform
|
||||
/// "run a script" entry on either context kind.
|
||||
public func streamScript(_ script: String, timeout: TimeInterval) async throws -> ProcessResult {
|
||||
#if os(iOS)
|
||||
throw TransportError.other(message: "LocalTransport.streamScript is unavailable on iOS")
|
||||
#else
|
||||
let outcome = await SSHScriptRunner.run(
|
||||
script: script,
|
||||
context: ServerContext(id: contextID, displayName: "Local", kind: .local),
|
||||
timeout: timeout
|
||||
)
|
||||
switch outcome {
|
||||
case .connectFailure(let reason):
|
||||
throw TransportError.other(message: reason)
|
||||
case .completed(let stdout, let stderr, let exitCode):
|
||||
return ProcessResult(
|
||||
exitCode: exitCode,
|
||||
stdout: Data(stdout.utf8),
|
||||
stderr: Data(stderr.utf8)
|
||||
)
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/// Local transport reads the live DB directly — there's no cached
|
||||
/// snapshot to fall back to (and no failure mode where falling back
|
||||
/// would help, since a missing local file is missing both ways).
|
||||
public var cachedSnapshotPath: URL? { nil }
|
||||
|
||||
// MARK: - Watching
|
||||
|
||||
#if canImport(Darwin)
|
||||
|
||||
@@ -25,6 +25,58 @@ import Foundation
|
||||
/// callers can treat both uniformly.
|
||||
public enum SSHScriptRunner {
|
||||
|
||||
/// Thread-safe boolean flag used to bridge parent-task cancellation
|
||||
/// into the detached `Task` body that owns the ssh subprocess.
|
||||
/// `Task.detached { ... }` does NOT inherit cancellation from the
|
||||
/// awaiting parent; without this flag, cancelling a chat-load /
|
||||
/// hydration / activity-fetch Task only throws `CancellationError`
|
||||
/// at the chat layer while the ssh subprocess keeps running until
|
||||
/// its 30s timeout fires — pinning a remote sqlite query (and a
|
||||
/// ControlMaster session slot) for the full deadline. v2.8 fix
|
||||
/// observed in 2026-05-05 dogfooding: rapid chat-switching left a
|
||||
/// chain of stale 30s ssh subprocesses behind, blocking the
|
||||
/// dashboard's queryBatch and producing a "spinning" load.
|
||||
private final class CancelFlag: @unchecked Sendable {
|
||||
private let lock = NSLock()
|
||||
private var _cancelled = false
|
||||
var isCancelled: Bool {
|
||||
lock.lock(); defer { lock.unlock() }
|
||||
return _cancelled
|
||||
}
|
||||
func cancel() {
|
||||
lock.lock(); defer { lock.unlock() }
|
||||
_cancelled = true
|
||||
}
|
||||
}
|
||||
|
||||
/// Lock-protected `Data` accumulator used by the stdout/stderr
|
||||
/// readability handlers below. Two of these per script run, one per
|
||||
/// stream. `@unchecked Sendable` because mutation goes through the
|
||||
/// `NSLock` — Swift can't see that.
|
||||
///
|
||||
/// Why this exists (issue #77): the previous implementation read
|
||||
/// stdout/stderr via `readToEnd()` *after* the subprocess exited.
|
||||
/// On macOS pipes default to a 16–64 KB kernel buffer; once
|
||||
/// `sqlite3 -json` writes more than that, the SSH client back-
|
||||
/// pressures over the wire, the remote sqlite3 blocks, the script
|
||||
/// never finishes, the 30 s timeout fires, and the caller sees
|
||||
/// "Script timed out" + an empty result set. v2.7's
|
||||
/// `sessionListSnapshot(limit: 500)` crossed that threshold for
|
||||
/// any user with ~150+ sessions. Draining concurrently with
|
||||
/// `readabilityHandler` removes the back-pressure.
|
||||
private final class LockedData: @unchecked Sendable {
|
||||
private let lock = NSLock()
|
||||
private var buf = Data()
|
||||
func append(_ chunk: Data) {
|
||||
lock.lock(); defer { lock.unlock() }
|
||||
buf.append(chunk)
|
||||
}
|
||||
func snapshot() -> Data {
|
||||
lock.lock(); defer { lock.unlock() }
|
||||
return buf
|
||||
}
|
||||
}
|
||||
|
||||
public enum Outcome: Sendable {
|
||||
/// Couldn't even reach the remote (process spawn failed,
|
||||
/// timeout before any output, network refused). Carries the
|
||||
@@ -46,22 +98,38 @@ public enum SSHScriptRunner {
|
||||
/// cross-platform we return a connect failure on non-macOS so
|
||||
/// the file compiles everywhere.
|
||||
public static func run(script: String, context: ServerContext, timeout: TimeInterval = 30) async -> Outcome {
|
||||
#if os(macOS)
|
||||
switch context.kind {
|
||||
case .local:
|
||||
return await runLocally(script: script, timeout: timeout)
|
||||
case .ssh(let config):
|
||||
return await runOverSSH(script: script, config: config, timeout: timeout)
|
||||
await ScarfMon.measureAsync(.transport, "ssh.run") {
|
||||
// Bridge parent cancellation into the detached subprocess
|
||||
// task. Without this, killing a chat-hydration Task on a
|
||||
// session switch only unwinds Swift state — the ssh
|
||||
// subprocess keeps holding a remote sqlite query + a
|
||||
// ControlMaster session for the full 30s timeout. v2.8.
|
||||
let cancelFlag = CancelFlag()
|
||||
return await withTaskCancellationHandler(
|
||||
operation: {
|
||||
#if os(macOS)
|
||||
switch context.kind {
|
||||
case .local:
|
||||
return await runLocally(script: script, timeout: timeout, cancelFlag: cancelFlag)
|
||||
case .ssh(let config):
|
||||
return await runOverSSH(script: script, config: config, timeout: timeout, cancelFlag: cancelFlag)
|
||||
}
|
||||
#else
|
||||
return .connectFailure("SSHScriptRunner is only available on macOS")
|
||||
#endif
|
||||
},
|
||||
onCancel: {
|
||||
cancelFlag.cancel()
|
||||
ScarfMon.event(.transport, "ssh.cancelled", count: 1)
|
||||
}
|
||||
)
|
||||
}
|
||||
#else
|
||||
return .connectFailure("SSHScriptRunner is only available on macOS")
|
||||
#endif
|
||||
}
|
||||
|
||||
// MARK: - SSH path
|
||||
|
||||
#if os(macOS)
|
||||
private static func runOverSSH(script: String, config: SSHConfig, timeout: TimeInterval) async -> Outcome {
|
||||
private static func runOverSSH(script: String, config: SSHConfig, timeout: TimeInterval, cancelFlag: CancelFlag) async -> Outcome {
|
||||
var sshArgv: [String] = [
|
||||
"-o", "ControlMaster=auto",
|
||||
"-o", "ControlPath=\(SSHTransport.controlDirPath())/%C",
|
||||
@@ -111,9 +179,35 @@ public enum SSHScriptRunner {
|
||||
proc.standardOutput = stdoutPipe
|
||||
proc.standardError = stderrPipe
|
||||
|
||||
// Drain stdout/stderr concurrently with the running process —
|
||||
// see the LockedData docstring above for the issue-#77
|
||||
// back-story. Without these handlers a >64 KB script output
|
||||
// wedges the pipe + ssh + remote sqlite3 chain and the only
|
||||
// visible symptom is a timeout.
|
||||
let outBuf = LockedData()
|
||||
let errBuf = LockedData()
|
||||
stdoutPipe.fileHandleForReading.readabilityHandler = { handle in
|
||||
let chunk = handle.availableData
|
||||
if chunk.isEmpty {
|
||||
handle.readabilityHandler = nil
|
||||
} else {
|
||||
outBuf.append(chunk)
|
||||
}
|
||||
}
|
||||
stderrPipe.fileHandleForReading.readabilityHandler = { handle in
|
||||
let chunk = handle.availableData
|
||||
if chunk.isEmpty {
|
||||
handle.readabilityHandler = nil
|
||||
} else {
|
||||
errBuf.append(chunk)
|
||||
}
|
||||
}
|
||||
|
||||
do {
|
||||
try proc.run()
|
||||
} catch {
|
||||
stdoutPipe.fileHandleForReading.readabilityHandler = nil
|
||||
stderrPipe.fileHandleForReading.readabilityHandler = nil
|
||||
return .connectFailure("Failed to launch ssh: \(error.localizedDescription)")
|
||||
}
|
||||
|
||||
@@ -124,14 +218,42 @@ public enum SSHScriptRunner {
|
||||
|
||||
let deadline = Date().addingTimeInterval(timeout)
|
||||
while proc.isRunning && Date() < deadline {
|
||||
// Honor BOTH the detached-task's own cancellation flag
|
||||
// (set by the parent's `withTaskCancellationHandler`)
|
||||
// and the legacy `Task.isCancelled` check in case the
|
||||
// detached body gets cancelled directly. The flag is
|
||||
// the load-bearing path; Task.isCancelled is harmless
|
||||
// belt-and-suspenders.
|
||||
if cancelFlag.isCancelled || Task.isCancelled {
|
||||
proc.terminate()
|
||||
stdoutPipe.fileHandleForReading.readabilityHandler = nil
|
||||
stderrPipe.fileHandleForReading.readabilityHandler = nil
|
||||
try? stdoutPipe.fileHandleForReading.close()
|
||||
try? stderrPipe.fileHandleForReading.close()
|
||||
return .connectFailure("Script cancelled")
|
||||
}
|
||||
try? await Task.sleep(nanoseconds: 100_000_000)
|
||||
}
|
||||
if proc.isRunning {
|
||||
proc.terminate()
|
||||
stdoutPipe.fileHandleForReading.readabilityHandler = nil
|
||||
stderrPipe.fileHandleForReading.readabilityHandler = nil
|
||||
// Pipe fds leak otherwise — closing on the timeout branch
|
||||
// matches the success-path discipline (see CLAUDE.md
|
||||
// "Always close both fileHandleForReading and
|
||||
// fileHandleForWriting on Pipe objects").
|
||||
try? stdoutPipe.fileHandleForReading.close()
|
||||
try? stderrPipe.fileHandleForReading.close()
|
||||
return .connectFailure("Script timed out after \(Int(timeout))s")
|
||||
}
|
||||
let out = (try? stdoutPipe.fileHandleForReading.readToEnd()) ?? Data()
|
||||
let err = (try? stderrPipe.fileHandleForReading.readToEnd()) ?? Data()
|
||||
// Detach the readabilityHandlers and capture whatever the
|
||||
// accumulator has. The handler may have already seen EOF
|
||||
// (`chunk.isEmpty`) and self-cleared, but assigning nil is
|
||||
// idempotent and guards against a late tick from the queue.
|
||||
stdoutPipe.fileHandleForReading.readabilityHandler = nil
|
||||
stderrPipe.fileHandleForReading.readabilityHandler = nil
|
||||
let out = outBuf.snapshot()
|
||||
let err = errBuf.snapshot()
|
||||
// Best-effort fd close — Pipe leaks fd's otherwise.
|
||||
try? stdoutPipe.fileHandleForReading.close()
|
||||
try? stderrPipe.fileHandleForReading.close()
|
||||
@@ -145,7 +267,7 @@ public enum SSHScriptRunner {
|
||||
|
||||
// MARK: - Local path
|
||||
|
||||
private static func runLocally(script: String, timeout: TimeInterval) async -> Outcome {
|
||||
private static func runLocally(script: String, timeout: TimeInterval, cancelFlag: CancelFlag) async -> Outcome {
|
||||
return await Task.detached { () -> Outcome in
|
||||
let proc = Process()
|
||||
proc.executableURL = URL(fileURLWithPath: "/bin/sh")
|
||||
@@ -155,21 +277,61 @@ public enum SSHScriptRunner {
|
||||
let stderrPipe = Pipe()
|
||||
proc.standardOutput = stdoutPipe
|
||||
proc.standardError = stderrPipe
|
||||
|
||||
// Drain concurrently — same pipe-buffer fix as runOverSSH.
|
||||
// Local scripts can also blow past the 16–64 KB pipe buffer
|
||||
// (e.g. local `sqlite3 -json` over a fat result set) and
|
||||
// would wedge in exactly the same way.
|
||||
let outBuf = LockedData()
|
||||
let errBuf = LockedData()
|
||||
stdoutPipe.fileHandleForReading.readabilityHandler = { handle in
|
||||
let chunk = handle.availableData
|
||||
if chunk.isEmpty {
|
||||
handle.readabilityHandler = nil
|
||||
} else {
|
||||
outBuf.append(chunk)
|
||||
}
|
||||
}
|
||||
stderrPipe.fileHandleForReading.readabilityHandler = { handle in
|
||||
let chunk = handle.availableData
|
||||
if chunk.isEmpty {
|
||||
handle.readabilityHandler = nil
|
||||
} else {
|
||||
errBuf.append(chunk)
|
||||
}
|
||||
}
|
||||
|
||||
do {
|
||||
try proc.run()
|
||||
} catch {
|
||||
stdoutPipe.fileHandleForReading.readabilityHandler = nil
|
||||
stderrPipe.fileHandleForReading.readabilityHandler = nil
|
||||
return .connectFailure("Failed to launch /bin/sh: \(error.localizedDescription)")
|
||||
}
|
||||
let deadline = Date().addingTimeInterval(timeout)
|
||||
while proc.isRunning && Date() < deadline {
|
||||
if cancelFlag.isCancelled || Task.isCancelled {
|
||||
proc.terminate()
|
||||
stdoutPipe.fileHandleForReading.readabilityHandler = nil
|
||||
stderrPipe.fileHandleForReading.readabilityHandler = nil
|
||||
try? stdoutPipe.fileHandleForReading.close()
|
||||
try? stderrPipe.fileHandleForReading.close()
|
||||
return .connectFailure("Script cancelled")
|
||||
}
|
||||
try? await Task.sleep(nanoseconds: 100_000_000)
|
||||
}
|
||||
if proc.isRunning {
|
||||
proc.terminate()
|
||||
stdoutPipe.fileHandleForReading.readabilityHandler = nil
|
||||
stderrPipe.fileHandleForReading.readabilityHandler = nil
|
||||
try? stdoutPipe.fileHandleForReading.close()
|
||||
try? stderrPipe.fileHandleForReading.close()
|
||||
return .connectFailure("Script timed out after \(Int(timeout))s")
|
||||
}
|
||||
let out = (try? stdoutPipe.fileHandleForReading.readToEnd()) ?? Data()
|
||||
let err = (try? stderrPipe.fileHandleForReading.readToEnd()) ?? Data()
|
||||
stdoutPipe.fileHandleForReading.readabilityHandler = nil
|
||||
stderrPipe.fileHandleForReading.readabilityHandler = nil
|
||||
let out = outBuf.snapshot()
|
||||
let err = errBuf.snapshot()
|
||||
try? stdoutPipe.fileHandleForReading.close()
|
||||
try? stderrPipe.fileHandleForReading.close()
|
||||
return .completed(
|
||||
|
||||
@@ -620,67 +620,26 @@ public struct SSHTransport: ServerTransport {
|
||||
return env
|
||||
}
|
||||
|
||||
// MARK: - SQLite snapshot
|
||||
// MARK: - Script streaming
|
||||
|
||||
public func snapshotSQLite(remotePath: String) throws -> URL {
|
||||
try? FileManager.default.createDirectory(atPath: snapshotDir, withIntermediateDirectories: true)
|
||||
let localPath = snapshotDir + "/state.db"
|
||||
// `.backup` is WAL-safe: sqlite takes a consistent snapshot without
|
||||
// blocking writers. A plain `cp` of a WAL-mode DB could corrupt.
|
||||
let remoteTmp = "/tmp/scarf-snapshot-\(UUID().uuidString).db"
|
||||
// sqlite3's `.backup` is a dot-command, not a CLI arg. The whole
|
||||
// dot-command must be one shell argument (double-quoted) so sqlite3
|
||||
// receives it as a single command; the backup path inside it is
|
||||
// single-quoted so sqlite3 parses it correctly. The DB path is a
|
||||
// separate shell argument and goes through `remotePathArg`
|
||||
// (double-quoted, $HOME-aware) so `~/.hermes/state.db` actually
|
||||
// resolves on the remote.
|
||||
//
|
||||
// The second sqlite3 invocation flips the snapshot out of WAL mode
|
||||
// so the scp'd file is self-contained: `.backup` preserves the
|
||||
// source's journal_mode in the destination header, so without this
|
||||
// step the client would need the `-wal`/`-shm` sidecars too, and
|
||||
// every read would fail with "unable to open database file".
|
||||
//
|
||||
// Final shell command on the remote:
|
||||
// sqlite3 "$HOME/.hermes/state.db" ".backup '/tmp/scarf-snapshot-XYZ.db'" \
|
||||
// && sqlite3 '/tmp/scarf-snapshot-XYZ.db' "PRAGMA journal_mode=DELETE;"
|
||||
let backupScript = #"sqlite3 \#(Self.remotePathArg(remotePath)) ".backup '\#(remoteTmp)'" && sqlite3 '\#(remoteTmp)' "PRAGMA journal_mode=DELETE;" > /dev/null"#
|
||||
let backup = try runRemoteShell(backupScript)
|
||||
if backup.exitCode != 0 {
|
||||
throw TransportError.classifySSHFailure(host: config.host, exitCode: backup.exitCode, stderr: backup.stderrString)
|
||||
/// Pipe `script` to `/bin/sh -s` over the ControlMaster-shared SSH
|
||||
/// channel. Used by `RemoteSQLiteBackend` to invoke `sqlite3 -json`
|
||||
/// per query without the per-arg quoting that `runProcess` would
|
||||
/// apply. Delegates to `SSHScriptRunner` which already implements
|
||||
/// the ssh-stdin-pipe pattern correctly.
|
||||
public func streamScript(_ script: String, timeout: TimeInterval) async throws -> ProcessResult {
|
||||
let context = ServerContext(id: contextID, displayName: displayName, kind: .ssh(config))
|
||||
let outcome = await SSHScriptRunner.run(script: script, context: context, timeout: timeout)
|
||||
switch outcome {
|
||||
case .connectFailure(let reason):
|
||||
throw TransportError.other(message: reason)
|
||||
case .completed(let stdout, let stderr, let exitCode):
|
||||
return ProcessResult(
|
||||
exitCode: exitCode,
|
||||
stdout: Data(stdout.utf8),
|
||||
stderr: Data(stderr.utf8)
|
||||
)
|
||||
}
|
||||
// scp the backup down. scp/sftp expands `~` natively (it goes
|
||||
// through the SSH file-transfer protocol, not a remote shell), so
|
||||
// remoteTmp's `/tmp/...` absolute path round-trips as-is.
|
||||
ensureControlDir()
|
||||
var scpArgs: [String] = [
|
||||
"-o", "ControlMaster=auto",
|
||||
"-o", "ControlPath=\(controlDir)/%C",
|
||||
"-o", "ControlPersist=600",
|
||||
"-o", "StrictHostKeyChecking=accept-new",
|
||||
"-o", "LogLevel=QUIET",
|
||||
"-o", "BatchMode=yes"
|
||||
]
|
||||
if let port = config.port { scpArgs += ["-P", String(port)] }
|
||||
if let id = config.identityFile, !id.isEmpty { scpArgs += ["-i", id] }
|
||||
scpArgs.append("\(hostSpec):\(remoteTmp)")
|
||||
scpArgs.append(localPath)
|
||||
let pull = try runLocal(executable: scpBinary, args: scpArgs, stdin: nil, timeout: 120)
|
||||
// Regardless of pull outcome, try to clean up the remote tmp.
|
||||
_ = try? runRemoteShell("rm -f \(Self.remotePathArg(remoteTmp))")
|
||||
if pull.exitCode != 0 {
|
||||
throw TransportError.classifySSHFailure(host: config.host, exitCode: pull.exitCode, stderr: pull.stderrString)
|
||||
}
|
||||
return URL(fileURLWithPath: localPath)
|
||||
}
|
||||
|
||||
/// Path where the most recent successful snapshot was written —
|
||||
/// returned even when the remote is currently unreachable. The
|
||||
/// data service falls back to this when `snapshotSQLite` throws so
|
||||
/// Dashboard / Sessions / Chat-history stay viewable offline.
|
||||
public var cachedSnapshotPath: URL? {
|
||||
URL(fileURLWithPath: snapshotDir + "/state.db")
|
||||
}
|
||||
|
||||
// MARK: - Watching
|
||||
@@ -765,12 +724,28 @@ public struct SSHTransport: ServerTransport {
|
||||
try? stdinPipe.fileHandleForWriting.close()
|
||||
}
|
||||
if let timeout {
|
||||
let deadline = Date().addingTimeInterval(timeout)
|
||||
while proc.isRunning && Date() < deadline {
|
||||
Thread.sleep(forTimeInterval: 0.1)
|
||||
}
|
||||
if proc.isRunning {
|
||||
// Kernel-wait via DispatchGroup + terminationHandler instead
|
||||
// of a 100ms Thread.sleep spin loop. The old loop burned a
|
||||
// cooperative-pool thread for the full timeout duration AND
|
||||
// had 100ms granularity on the deadline; this version blocks
|
||||
// once on a semaphore that the OS wakes when the process
|
||||
// terminates (or when the timeout fires). Net effect: under
|
||||
// concurrent SSH load (sidebar reload + chat finalize +
|
||||
// watcher poll all firing together) we don't accumulate
|
||||
// multiple spin-blocked threads, which was the mechanism
|
||||
// behind the 7-second `loadRecentSessions` outliers
|
||||
// observed in remote-context perf captures.
|
||||
let waitGroup = DispatchGroup()
|
||||
waitGroup.enter()
|
||||
proc.terminationHandler = { _ in waitGroup.leave() }
|
||||
let outcome = waitGroup.wait(timeout: .now() + timeout)
|
||||
proc.terminationHandler = nil
|
||||
if outcome == .timedOut {
|
||||
proc.terminate()
|
||||
// Brief block until the kill actually lands so we can
|
||||
// collect partial stdout. terminate() is async; without
|
||||
// this wait the readToEnd below could race the close.
|
||||
proc.waitUntilExit()
|
||||
let partial = (try? stdoutPipe.fileHandleForReading.readToEnd()) ?? Data()
|
||||
try? stdoutPipe.fileHandleForReading.close()
|
||||
try? stderrPipe.fileHandleForReading.close()
|
||||
|
||||
@@ -96,27 +96,25 @@ public protocol ServerTransport: Sendable {
|
||||
args: [String]
|
||||
) -> AsyncThrowingStream<Data, Error>
|
||||
|
||||
// MARK: - SQLite
|
||||
|
||||
/// Return a local filesystem URL pointing at a fresh, consistent copy of
|
||||
/// the SQLite database at `remotePath`. For local transports this is
|
||||
/// just the remote path unchanged. For SSH transports this performs
|
||||
/// `sqlite3 .backup` on the remote side and scp's the backup into
|
||||
/// `~/Library/Caches/scarf/<serverID>/state.db`, returning that URL.
|
||||
nonisolated func snapshotSQLite(remotePath: String) throws -> URL
|
||||
|
||||
/// Local filesystem URL where this transport caches its SQLite snapshot,
|
||||
/// returned even when the remote is unreachable. Callers should
|
||||
/// `FileManager.default.fileExists(atPath:)` before reading — the
|
||||
/// transport can't atomically check existence and return the URL
|
||||
/// in one step without TOCTOU. Local transports return `nil`
|
||||
/// (their data is the live DB, not a cache).
|
||||
/// Pipe a multi-line shell script through `/bin/sh -s` on the
|
||||
/// target and return its captured output. The script travels as a
|
||||
/// single opaque byte stream — no per-line shell interpolation,
|
||||
/// no per-arg quoting — so `"$VAR"` references, here-docs, and
|
||||
/// nested quotes survive untouched.
|
||||
///
|
||||
/// Used by `HermesDataService.open()` to fall back to the last
|
||||
/// successful snapshot when a fresh `snapshotSQLite` call fails,
|
||||
/// so the app keeps showing data with a "Last updated X ago"
|
||||
/// affordance instead of a blank screen.
|
||||
nonisolated var cachedSnapshotPath: URL? { get }
|
||||
/// Replaces the old `snapshotSQLite` + scp pipeline. Used by
|
||||
/// `RemoteSQLiteBackend` to invoke `sqlite3 -readonly -json` over
|
||||
/// SSH per query (or per batch). Local transport runs the script
|
||||
/// in-process via `/bin/sh -c`. SSH transport delegates to
|
||||
/// `SSHScriptRunner` (ControlMaster-shared channel). Citadel
|
||||
/// transport (iOS) base64-encodes the script + decodes remotely
|
||||
/// to skirt Citadel's missing-stdin support.
|
||||
///
|
||||
/// Throws on transport failures (host unreachable, ssh exit 255,
|
||||
/// timeout). Returns `ProcessResult` with the script's exit code
|
||||
/// + stdout + stderr on completion — non-zero exit is NOT a
|
||||
/// throw; callers inspect `exitCode` and decide.
|
||||
nonisolated func streamScript(_ script: String, timeout: TimeInterval) async throws -> ProcessResult
|
||||
|
||||
// MARK: - Watching
|
||||
|
||||
|
||||
@@ -23,6 +23,13 @@ public final class ActivityViewModel {
|
||||
public var toolResult: String?
|
||||
public var sessionPreviews: [String: String] = [:]
|
||||
public var isLoading = true
|
||||
/// True while the Phase 2 background fill is paging through
|
||||
/// `hydrateAssistantToolCalls`. Drives a "Loading tool details…"
|
||||
/// pill in the page header so the user knows the placeholder
|
||||
/// rows on screen will fill in. v2.8.
|
||||
public var isHydratingToolCalls = false
|
||||
@ObservationIgnored
|
||||
private var hydrationTask: Task<Void, Never>?
|
||||
|
||||
public var availableSessions: [(id: String, label: String)] {
|
||||
var seen = Set<String>()
|
||||
@@ -34,8 +41,29 @@ public final class ActivityViewModel {
|
||||
}
|
||||
|
||||
public var filteredActivity: [ActivityEntry] {
|
||||
let entries = toolMessages.flatMap { message in
|
||||
message.toolCalls.map { call in
|
||||
let entries = toolMessages.flatMap { message -> [ActivityEntry] in
|
||||
// v2.8 — emit a single "Loading tool calls…" placeholder
|
||||
// entry per skeleton message (one whose tool_calls JSON
|
||||
// hasn't been hydrated yet). The user sees the timeline
|
||||
// shape immediately; real entries replace the placeholder
|
||||
// in-place when `hydrateAssistantToolCalls` returns.
|
||||
// Filtering still works (we apply the session filter
|
||||
// below) but kind filter hides placeholders since
|
||||
// .other is the placeholder's default kind.
|
||||
guard !message.toolCalls.isEmpty else {
|
||||
return [ActivityEntry(
|
||||
id: "skeleton-\(message.id)",
|
||||
sessionId: message.sessionId,
|
||||
toolName: "Loading tool details…",
|
||||
kind: .other,
|
||||
summary: "",
|
||||
arguments: "",
|
||||
messageContent: "",
|
||||
timestamp: message.timestamp,
|
||||
isPlaceholder: true
|
||||
)]
|
||||
}
|
||||
return message.toolCalls.map { call in
|
||||
ActivityEntry(
|
||||
id: call.callId,
|
||||
sessionId: message.sessionId,
|
||||
@@ -49,14 +77,34 @@ public final class ActivityViewModel {
|
||||
}
|
||||
}
|
||||
return entries.filter { entry in
|
||||
let kindOk = filterKind == nil || entry.kind == filterKind
|
||||
// Placeholders bypass the kind filter so they don't all
|
||||
// disappear when the user picks a non-`.other` filter
|
||||
// chip — they still represent rows that may resolve to
|
||||
// the matching kind once hydrated.
|
||||
let kindOk = filterKind == nil || entry.isPlaceholder || entry.kind == filterKind
|
||||
let sessionOk = filterSessionId == nil || entry.sessionId == filterSessionId
|
||||
return kindOk && sessionOk
|
||||
}
|
||||
}
|
||||
|
||||
/// Last load's transport-failure reason, if any. Activity surfaces
|
||||
/// this to the user instead of leaving the empty-state visible
|
||||
/// (which the user reads as "no activity" rather than "couldn't
|
||||
/// reach the host"). v2.8.
|
||||
public var loadError: String?
|
||||
|
||||
public func load() async {
|
||||
// Cancel any in-flight hydration from a prior load (e.g. a
|
||||
// file-watcher delta firing while the prior pass was still
|
||||
// paging). The new skeleton replaces the message set, so
|
||||
// hydrating against the old ids would just splice into rows
|
||||
// that no longer exist.
|
||||
hydrationTask?.cancel()
|
||||
hydrationTask = nil
|
||||
isHydratingToolCalls = false
|
||||
|
||||
isLoading = true
|
||||
loadError = nil
|
||||
// refresh() = close + reopen, which forces a fresh snapshot pull on
|
||||
// remote contexts. Using open() here would short-circuit after the
|
||||
// first load and show stale data for the view's lifetime. The DB
|
||||
@@ -64,12 +112,68 @@ public final class ActivityViewModel {
|
||||
// results without re-opening — cleanup() closes on disappear.
|
||||
let opened = await dataService.refresh()
|
||||
guard opened else {
|
||||
loadError = "Couldn't reach \(context.displayName) — check the SSH connection and pull-to-refresh to retry."
|
||||
isLoading = false
|
||||
return
|
||||
}
|
||||
toolMessages = await dataService.fetchRecentToolCalls(limit: 200)
|
||||
sessionPreviews = await dataService.fetchSessionPreviews(limit: 200)
|
||||
// v2.8 Phase L — skeleton-then-hydrate. Phase 1 metadata
|
||||
// fetch is bounded by 50 rows × ~50 bytes (id + session_id +
|
||||
// role + timestamp; tool_calls JSON is NULLed at the SQL
|
||||
// level) ≈ 3 KB on the wire regardless of how big the
|
||||
// underlying tool_calls blobs are. Comes back in
|
||||
// sub-second on healthy remotes; placeholder rows render
|
||||
// immediately. Phase 2 (paged hydrate) fills the real
|
||||
// tool details in via 5-id batches in the background.
|
||||
let outcome = await dataService.fetchRecentToolCallSkeleton(limit: 50)
|
||||
toolMessages = outcome.messages
|
||||
if let reason = outcome.transportError {
|
||||
loadError = "Couldn't load activity from \(context.displayName) — the connection timed out (\(reason)). Pull to refresh to retry."
|
||||
isLoading = false
|
||||
return
|
||||
}
|
||||
sessionPreviews = await dataService.fetchSessionPreviews(limit: 50)
|
||||
isLoading = false
|
||||
|
||||
// Phase 2 — background hydrate. Mirrors the chat path's
|
||||
// `startToolHydration`. Newest-first (the splice happens in
|
||||
// batch order), cancellable via `cleanup()` / next `load()`.
|
||||
startToolCallHydration()
|
||||
}
|
||||
|
||||
/// Phase 2 of the v2.8 Activity loader. Pages through
|
||||
/// `hydrateAssistantToolCalls` in batches of 5 ids and splices
|
||||
/// the parsed `[HermesToolCall]` arrays into the existing
|
||||
/// `toolMessages` skeleton. Once a message has its tool calls,
|
||||
/// `filteredActivity` swaps the placeholder entry for the real
|
||||
/// per-call entries on the next observation tick.
|
||||
private func startToolCallHydration() {
|
||||
let messageIds = toolMessages
|
||||
.filter { $0.toolCalls.isEmpty && $0.id > 0 }
|
||||
.map(\.id)
|
||||
guard !messageIds.isEmpty else {
|
||||
isHydratingToolCalls = false
|
||||
return
|
||||
}
|
||||
isHydratingToolCalls = true
|
||||
let dataService = self.dataService
|
||||
hydrationTask = Task { @MainActor [weak self] in
|
||||
defer { self?.isHydratingToolCalls = false }
|
||||
// Page in 5-id batches matching the chat path —
|
||||
// hydrateAssistantToolCalls already does the paging
|
||||
// internally; here we just hand it all the ids and
|
||||
// let it return whatever it could pull. Parent task
|
||||
// cancellation propagates down via the v2.8 SSH
|
||||
// cancellation handler we wired through SSHScriptRunner.
|
||||
let map = await dataService.hydrateAssistantToolCalls(messageIds: messageIds)
|
||||
guard let self else { return }
|
||||
if Task.isCancelled { return }
|
||||
if !map.isEmpty {
|
||||
self.toolMessages = self.toolMessages.map { msg in
|
||||
guard msg.toolCalls.isEmpty, let calls = map[msg.id] else { return msg }
|
||||
return msg.withToolCalls(calls)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func selectEntry(_ entry: ActivityEntry?) async {
|
||||
@@ -82,6 +186,9 @@ public final class ActivityViewModel {
|
||||
}
|
||||
|
||||
public func cleanup() async {
|
||||
hydrationTask?.cancel()
|
||||
hydrationTask = nil
|
||||
isHydratingToolCalls = false
|
||||
await dataService.close()
|
||||
}
|
||||
}
|
||||
@@ -95,6 +202,13 @@ public struct ActivityEntry: Identifiable, Sendable {
|
||||
public let arguments: String
|
||||
public let messageContent: String
|
||||
public let timestamp: Date?
|
||||
/// True for skeleton entries emitted while the v2.8 two-phase
|
||||
/// loader is still hydrating tool_calls JSON for the underlying
|
||||
/// message. ActivityRow renders these as greyed "Loading…" rows
|
||||
/// so the user sees the timeline shape without the per-call
|
||||
/// detail. Splice happens in-place when hydration completes —
|
||||
/// the placeholder vanishes and the real entries take its slot.
|
||||
public let isPlaceholder: Bool
|
||||
|
||||
public init(
|
||||
id: String,
|
||||
@@ -104,7 +218,8 @@ public struct ActivityEntry: Identifiable, Sendable {
|
||||
summary: String,
|
||||
arguments: String,
|
||||
messageContent: String,
|
||||
timestamp: Date?
|
||||
timestamp: Date?,
|
||||
isPlaceholder: Bool = false
|
||||
) {
|
||||
self.id = id
|
||||
self.sessionId = sessionId
|
||||
@@ -114,6 +229,7 @@ public struct ActivityEntry: Identifiable, Sendable {
|
||||
self.arguments = arguments
|
||||
self.messageContent = messageContent
|
||||
self.timestamp = timestamp
|
||||
self.isPlaceholder = isPlaceholder
|
||||
}
|
||||
|
||||
public var prettyArguments: String {
|
||||
|
||||
@@ -4,17 +4,19 @@ import Observation
|
||||
import os
|
||||
#endif
|
||||
|
||||
/// Mac + iOS view model for the v0.12 Curator surface.
|
||||
/// Mac + iOS view model for the Curator surface (v0.12 base + v0.13
|
||||
/// archive/prune additions).
|
||||
///
|
||||
/// Drives `hermes curator status / run / pause / resume / pin / unpin /
|
||||
/// restore` plus a parsed view of `~/.hermes/skills/.curator_state`
|
||||
/// JSON. The CLI doesn't ship a `--json` flag for `status`, so we
|
||||
/// text-parse stdout (HermesCuratorStatusParser) and use the state
|
||||
/// file for richer last-run metadata.
|
||||
/// restore` plus (v0.13+) `archive`, `prune`, `list-archived`. All CLI
|
||||
/// invocations route through `CuratorService` (the actor) so polling
|
||||
/// and writes share the same concurrency model and a single error path.
|
||||
///
|
||||
/// Capability-gated: callers should construct this only when
|
||||
/// `HermesCapabilities.hasCurator` is true. The view model does not
|
||||
/// gate itself — the gate happens at sidebar/tab routing time.
|
||||
/// `HermesCapabilities.hasCurator` is true. Archive-aware UI surfaces
|
||||
/// (Archive button, Archived section, Prune…) gate independently on
|
||||
/// `hasCuratorArchive`. The view model itself doesn't gate — it exposes
|
||||
/// every method and the View decides what to render.
|
||||
@Observable
|
||||
@MainActor
|
||||
public final class CuratorViewModel {
|
||||
@@ -27,76 +29,228 @@ public final class CuratorViewModel {
|
||||
public private(set) var status: HermesCuratorStatus = .empty
|
||||
public private(set) var isLoading = false
|
||||
public private(set) var lastReportMarkdown: String?
|
||||
|
||||
// Archive state (v0.13+ only — populated by `loadArchive()` on hosts
|
||||
// where `hasCuratorArchive` is true).
|
||||
public private(set) var archivedSkills: [HermesCuratorArchivedSkill] = []
|
||||
public private(set) var isLoadingArchive = false
|
||||
|
||||
// Prune state — `pruneSummary` non-nil while the confirm sheet is
|
||||
// mid-flight; `isPruning` flips during the destructive step.
|
||||
public private(set) var pruneSummary: CuratorPruneSummary?
|
||||
public private(set) var isPruning = false
|
||||
|
||||
// Track which active-skill row is currently being archived so the
|
||||
// row chrome can show an inline spinner without blocking the rest.
|
||||
public private(set) var pendingArchiveName: String?
|
||||
|
||||
/// Happy-path success toast ("Pinned X", "Resumed", "Archived
|
||||
/// legacy-helper"). Auto-clears 3s after assignment.
|
||||
public var transientMessage: String?
|
||||
|
||||
/// Failure path — populated by every CLI verb when it throws. Shown
|
||||
/// as an inline yellow banner above the status summary so users
|
||||
/// don't have to dismiss a modal alert during a high-frequency
|
||||
/// surface like the leaderboard. Manually dismissed via the View's
|
||||
/// "x" button (sets to nil).
|
||||
public var errorMessage: String?
|
||||
|
||||
@ObservationIgnored
|
||||
private let service: CuratorService
|
||||
|
||||
public init(context: ServerContext) {
|
||||
self.context = context
|
||||
self.service = CuratorService(context: context)
|
||||
}
|
||||
|
||||
// MARK: - Loads
|
||||
|
||||
public func load() async {
|
||||
isLoading = true
|
||||
defer { isLoading = false }
|
||||
let context = self.context
|
||||
let parsed = await Task.detached(priority: .userInitiated) { () -> (HermesCuratorStatus, String?) in
|
||||
let textResult = Self.runCuratorStatus(context: context)
|
||||
let stateData = context.readData(context.paths.curatorStateFile)
|
||||
let parsed = HermesCuratorStatusParser.parse(text: textResult, stateFileJSON: stateData)
|
||||
// Best-effort markdown report: the state file points at the
|
||||
// most recent <YYYYMMDD-HHMMSS>/ dir; load REPORT.md from
|
||||
// there. Missing on first run, which is fine.
|
||||
var report: String?
|
||||
if let reportDir = parsed.lastReportPath {
|
||||
let reportPath = reportDir.hasSuffix("/")
|
||||
? "\(reportDir)REPORT.md"
|
||||
: "\(reportDir)/REPORT.md"
|
||||
report = context.readText(reportPath)
|
||||
}
|
||||
return (parsed, report)
|
||||
}.value
|
||||
// v2.8 — instrumented. Curator load fires `hermes curator
|
||||
// status` (CLI subprocess) plus 1-2 file reads; on remote each
|
||||
// is a separate SSH RTT. Visibility lets future captures show
|
||||
// how often the report file is missing or oversized.
|
||||
let parsed = await ScarfMon.measureAsync(.diskIO, "curator.load") {
|
||||
await Task.detached(priority: .userInitiated) { () -> (HermesCuratorStatus, String?) in
|
||||
let textResult = Self.runCuratorStatus(context: context)
|
||||
let stateData = context.readData(context.paths.curatorStateFile)
|
||||
let parsed = HermesCuratorStatusParser.parse(text: textResult, stateFileJSON: stateData)
|
||||
// Best-effort markdown report: the state file points at the
|
||||
// most recent <YYYYMMDD-HHMMSS>/ dir; load REPORT.md from
|
||||
// there. Missing on first run, which is fine.
|
||||
var report: String?
|
||||
if let reportDir = parsed.lastReportPath {
|
||||
let reportPath = reportDir.hasSuffix("/")
|
||||
? "\(reportDir)REPORT.md"
|
||||
: "\(reportDir)/REPORT.md"
|
||||
report = context.readText(reportPath)
|
||||
}
|
||||
return (parsed, report)
|
||||
}.value
|
||||
}
|
||||
ScarfMon.event(
|
||||
.diskIO,
|
||||
"curator.load.bytes",
|
||||
count: 0,
|
||||
bytes: parsed.1?.utf8.count ?? 0
|
||||
)
|
||||
self.status = parsed.0
|
||||
self.lastReportMarkdown = parsed.1
|
||||
}
|
||||
|
||||
public func runNow() async {
|
||||
await runAndReload(args: ["curator", "run"], successMessage: "Curator run started")
|
||||
/// Refresh the archived-skills list. No-op on hosts without
|
||||
/// `hasCuratorArchive` — the caller gates the call.
|
||||
public func loadArchive() async {
|
||||
isLoadingArchive = true
|
||||
defer { isLoadingArchive = false }
|
||||
do {
|
||||
archivedSkills = try await service.listArchived()
|
||||
} catch {
|
||||
archivedSkills = []
|
||||
errorMessage = (error as? LocalizedError)?.errorDescription
|
||||
?? error.localizedDescription
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Writes (v0.12)
|
||||
|
||||
/// Run the curator manually. On v0.13+ hosts this blocks for the
|
||||
/// duration of the run (default 600s timeout); pre-v0.13 returns
|
||||
/// immediately. Caller passes the capability-decided flag.
|
||||
public func runNow(synchronous: Bool, timeout: TimeInterval = 600) async {
|
||||
await runWithReload(
|
||||
verb: "run",
|
||||
successMessage: synchronous ? "Curator run complete" : "Curator run started"
|
||||
) {
|
||||
try await self.service.runNow(synchronous: synchronous, timeout: timeout)
|
||||
}
|
||||
}
|
||||
|
||||
public func pause() async {
|
||||
await runAndReload(args: ["curator", "pause"], successMessage: "Curator paused")
|
||||
await runWithReload(verb: "pause", successMessage: "Curator paused") {
|
||||
try await self.service.pause()
|
||||
}
|
||||
}
|
||||
|
||||
public func resume() async {
|
||||
await runAndReload(args: ["curator", "resume"], successMessage: "Curator resumed")
|
||||
await runWithReload(verb: "resume", successMessage: "Curator resumed") {
|
||||
try await self.service.resume()
|
||||
}
|
||||
}
|
||||
|
||||
public func pin(_ skill: String) async {
|
||||
await runAndReload(args: ["curator", "pin", skill], successMessage: "Pinned \(skill)")
|
||||
await runWithReload(verb: "pin", successMessage: "Pinned \(skill)") {
|
||||
try await self.service.pin(skill)
|
||||
}
|
||||
}
|
||||
|
||||
public func unpin(_ skill: String) async {
|
||||
await runAndReload(args: ["curator", "unpin", skill], successMessage: "Unpinned \(skill)")
|
||||
await runWithReload(verb: "unpin", successMessage: "Unpinned \(skill)") {
|
||||
try await self.service.unpin(skill)
|
||||
}
|
||||
}
|
||||
|
||||
public func restore(_ skill: String) async {
|
||||
await runAndReload(args: ["curator", "restore", skill], successMessage: "Restored \(skill)")
|
||||
await runWithReload(verb: "restore", successMessage: "Restored \(skill)") {
|
||||
try await self.service.restore(skill)
|
||||
}
|
||||
// Restore drops the entry from the archived list — refresh it
|
||||
// so the row disappears immediately.
|
||||
await loadArchive()
|
||||
}
|
||||
|
||||
private func runAndReload(args: [String], successMessage: String) async {
|
||||
let context = self.context
|
||||
let exitCode = await Task.detached(priority: .userInitiated) {
|
||||
Self.runHermes(context: context, args: args).exitCode
|
||||
}.value
|
||||
transientMessage = exitCode == 0 ? successMessage : "Command failed"
|
||||
await load()
|
||||
// Auto-clear toast after 3s.
|
||||
// MARK: - Writes (v0.13)
|
||||
|
||||
public func archive(_ skill: String) async {
|
||||
pendingArchiveName = skill
|
||||
await runWithReload(verb: "archive", successMessage: "Archived \(skill)") {
|
||||
try await self.service.archive(skill)
|
||||
}
|
||||
pendingArchiveName = nil
|
||||
await loadArchive()
|
||||
}
|
||||
|
||||
/// Stage 1 of the bulk-prune flow. Calls `prune --dry-run` and
|
||||
/// populates `pruneSummary`; the View binds its confirm sheet to
|
||||
/// the non-nil presence of this property.
|
||||
public func planPrune() async {
|
||||
do {
|
||||
pruneSummary = try await service.prune(dryRun: true)
|
||||
} catch {
|
||||
errorMessage = (error as? LocalizedError)?.errorDescription
|
||||
?? error.localizedDescription
|
||||
pruneSummary = nil
|
||||
}
|
||||
}
|
||||
|
||||
/// Stage 2 of the bulk-prune flow. Destructive — removes everything
|
||||
/// currently archived. Clears `pruneSummary` regardless of outcome
|
||||
/// so the confirm sheet dismisses.
|
||||
public func confirmPrune() async {
|
||||
isPruning = true
|
||||
do {
|
||||
_ = try await service.prune(dryRun: false)
|
||||
transientMessage = "Pruned archived skills"
|
||||
errorMessage = nil
|
||||
await loadArchive()
|
||||
await load()
|
||||
scheduleTransientClear()
|
||||
} catch {
|
||||
errorMessage = (error as? LocalizedError)?.errorDescription
|
||||
?? error.localizedDescription
|
||||
}
|
||||
isPruning = false
|
||||
pruneSummary = nil
|
||||
}
|
||||
|
||||
/// Cancel the in-flight prune-confirm flow without running.
|
||||
public func cancelPrune() {
|
||||
pruneSummary = nil
|
||||
}
|
||||
|
||||
/// User-driven dismissal of the inline error banner.
|
||||
public func dismissError() {
|
||||
errorMessage = nil
|
||||
}
|
||||
|
||||
// MARK: - Helpers
|
||||
|
||||
/// Run a service call, route success → `transientMessage`, failure
|
||||
/// → `errorMessage`, and reload `status` either way. Mirrors the
|
||||
/// previous `runAndReload` helper but goes through the typed
|
||||
/// service surface.
|
||||
private func runWithReload(
|
||||
verb: String,
|
||||
successMessage: String,
|
||||
body: @escaping @Sendable () async throws -> Void
|
||||
) async {
|
||||
do {
|
||||
try await body()
|
||||
transientMessage = successMessage
|
||||
errorMessage = nil
|
||||
await load()
|
||||
scheduleTransientClear()
|
||||
} catch {
|
||||
let message = (error as? LocalizedError)?.errorDescription
|
||||
?? error.localizedDescription
|
||||
errorMessage = message
|
||||
transientMessage = nil
|
||||
await load()
|
||||
}
|
||||
}
|
||||
|
||||
private func scheduleTransientClear() {
|
||||
Task { @MainActor [weak self] in
|
||||
try? await Task.sleep(nanoseconds: 3_000_000_000)
|
||||
self?.transientMessage = nil
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrap the transport-level `runProcess` so the call sites don't
|
||||
/// have to reach for it directly. Combined stdout+stderr.
|
||||
// MARK: - Legacy sync helpers (kept for `load`'s detached path)
|
||||
|
||||
nonisolated private static func runHermes(
|
||||
context: ServerContext,
|
||||
args: [String]
|
||||
|
||||
@@ -29,17 +29,24 @@ public final class IOSCronViewModel {
|
||||
let ctx = context
|
||||
let path = ctx.paths.cronJobsJSON
|
||||
|
||||
let result: Result<CronJobsFile, Error> = await Task.detached {
|
||||
do {
|
||||
guard let data = ctx.readData(path) else {
|
||||
throw LoadError.missingFile(path: path)
|
||||
// v2.7 — instrumented for parity with Mac `cron.load`. iOS
|
||||
// Cron load is a single SFTP read of jobs.json so should be
|
||||
// snappy on most remotes; this measure point makes the cost
|
||||
// visible in ScarfMon traces alongside the rest of the iOS
|
||||
// load paths.
|
||||
let result: Result<CronJobsFile, Error> = await ScarfMon.measureAsync(.diskIO, "ios.cron.load") {
|
||||
await Task.detached {
|
||||
do {
|
||||
guard let data = ctx.readData(path) else {
|
||||
throw LoadError.missingFile(path: path)
|
||||
}
|
||||
let decoded = try JSONDecoder().decode(CronJobsFile.self, from: data)
|
||||
return .success(decoded)
|
||||
} catch {
|
||||
return Result<CronJobsFile, Error>.failure(error)
|
||||
}
|
||||
let decoded = try JSONDecoder().decode(CronJobsFile.self, from: data)
|
||||
return .success(decoded)
|
||||
} catch {
|
||||
return .failure(error)
|
||||
}
|
||||
}.value
|
||||
}.value
|
||||
}
|
||||
|
||||
switch result {
|
||||
case .success(let file):
|
||||
|
||||
@@ -96,15 +96,24 @@ public final class IOSMemoryViewModel {
|
||||
// Run the file read on a detached task — `readTextThrowing`
|
||||
// blocks on transport I/O, and we don't want the MainActor
|
||||
// hanging during a remote SFTP fetch.
|
||||
// v2.7 — instrumented for parity with Mac `memory.load`.
|
||||
// iOS path is one SFTP read per Memory tab open (per kind:
|
||||
// memory / user / soul); the bytes counter shows payload
|
||||
// size alongside latency.
|
||||
let ctx = context
|
||||
let path = kind.path(on: context)
|
||||
let result: Result<String?, Error> = await Task.detached {
|
||||
do {
|
||||
return .success(try ctx.readTextThrowing(path))
|
||||
} catch {
|
||||
return .failure(error)
|
||||
}
|
||||
}.value
|
||||
let result: Result<String?, Error> = await ScarfMon.measureAsync(.diskIO, "ios.memory.load") {
|
||||
await Task.detached {
|
||||
do {
|
||||
return Result<String?, Error>.success(try ctx.readTextThrowing(path))
|
||||
} catch {
|
||||
return Result<String?, Error>.failure(error)
|
||||
}
|
||||
}.value
|
||||
}
|
||||
if case .success(.some(let loaded)) = result {
|
||||
ScarfMon.event(.diskIO, "ios.memory.load.bytes", count: 0, bytes: loaded.utf8.count)
|
||||
}
|
||||
|
||||
switch result {
|
||||
case .success(.some(let loaded)):
|
||||
|
||||
@@ -117,12 +117,19 @@ public final class InsightsViewModel {
|
||||
}
|
||||
|
||||
let since = period.sinceDate
|
||||
// The four insights queries (user-message count, tool usage,
|
||||
// hourly + daily activity histograms) batch through one
|
||||
// `insightsSnapshot` round-trip. Sessions and session-previews
|
||||
// stay separate — they're large result sets and stay on their
|
||||
// own calls. For remote contexts this turns ~5 SSH round-trips
|
||||
// into 3.
|
||||
sessions = await dataService.fetchSessionsInPeriod(since: since)
|
||||
sessionPreviews = await dataService.fetchSessionPreviews(limit: 500)
|
||||
userMessageCount = await dataService.fetchUserMessageCount(since: since)
|
||||
let tools = await dataService.fetchToolUsage(since: since)
|
||||
hourlyActivity = await dataService.fetchSessionStartHours(since: since)
|
||||
dailyActivity = await dataService.fetchSessionDaysOfWeek(since: since)
|
||||
let snapshot = await dataService.insightsSnapshot(since: since)
|
||||
userMessageCount = snapshot.userMessageCount
|
||||
let tools = snapshot.toolUsage
|
||||
hourlyActivity = snapshot.startHours
|
||||
dailyActivity = snapshot.daysOfWeek
|
||||
|
||||
await dataService.close()
|
||||
|
||||
|
||||
@@ -164,6 +164,16 @@ public final class ProjectsViewModel {
|
||||
projects.map(\.dashboardPath)
|
||||
}
|
||||
|
||||
/// Per-project `.scarf/` directories — watched alongside `dashboardPaths`
|
||||
/// so that file-reading widgets (markdown_file, log_tail, image) refresh
|
||||
/// when their underlying files are added / removed / renamed inside the
|
||||
/// directory by a cron job. In-place file appends within an existing
|
||||
/// file are NOT detected here; the cron job should write atomically
|
||||
/// (write-then-rename) or `touch` dashboard.json after each run.
|
||||
public var projectScarfDirs: [String] {
|
||||
projects.map(\.scarfDir)
|
||||
}
|
||||
|
||||
private func loadDashboard(for project: ProjectEntry) {
|
||||
dashboardError = nil
|
||||
if !service.dashboardExists(for: project) {
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
import Foundation
|
||||
import Observation
|
||||
import SwiftUI
|
||||
|
||||
public enum ChatDisplayMode: String, CaseIterable {
|
||||
case terminal
|
||||
@@ -63,6 +64,23 @@ public final class RichChatViewModel {
|
||||
public var messages: [HermesMessage] = []
|
||||
public var currentSession: HermesSession?
|
||||
public var messageGroups: [MessageGroup] = []
|
||||
/// True while the v2.8 two-phase loader's background hydration
|
||||
/// (tool_calls JSON + tool result rows) is in flight. Chat header
|
||||
/// shows "Loading tool details…" so the user knows the bare
|
||||
/// transcript they're looking at will fill in. Cleared once both
|
||||
/// hydration passes finish or the session-id changes underneath.
|
||||
public var isHydratingTools: Bool = false
|
||||
@ObservationIgnored
|
||||
private var hydrationTask: Task<Void, Never>?
|
||||
|
||||
/// UserDefaults key controlling whether the chat resume path
|
||||
/// auto-fetches the CONTENT of tool result rows (`role='tool'`) for
|
||||
/// past messages. Defaults false — a single tool result blob
|
||||
/// (file dump, stack trace) can be hundreds of KB; bulk-fetching
|
||||
/// all of them during chat resume on a slow remote can blow past
|
||||
/// the 30s SSH timeout. The Mac Settings → Display tab exposes
|
||||
/// the toggle (mirror string in `ChatDensityKeys`).
|
||||
public static let loadHistoricalToolResultsKey = "scarf.chat.loadHistoricalToolResults"
|
||||
/// True from the moment the user sends a prompt until the ACP
|
||||
/// `promptComplete` event arrives. Covers the whole round-trip
|
||||
/// including auxiliary post-processing (title generation, usage
|
||||
@@ -120,6 +138,12 @@ public final class RichChatViewModel {
|
||||
/// users can copy-paste the raw output into a bug report.
|
||||
public var acpErrorDetails: String?
|
||||
|
||||
/// Lowercase OAuth provider name (`"nous"`, `"claude"`, …) when the
|
||||
/// most recent failure was an OAuth refresh-revocation Hermes asked
|
||||
/// the user to fix via re-authentication. Drives the chat banner's
|
||||
/// "Re-authenticate" button. Nil for any other failure mode.
|
||||
public var acpErrorOAuthProvider: String?
|
||||
|
||||
/// Optional stderr-tail provider the controller can hook up when it
|
||||
/// creates the ACPClient. Used by `handlePromptComplete` to enrich
|
||||
/// the error banner on non-retryable stopReasons. The closure is
|
||||
@@ -134,6 +158,7 @@ public final class RichChatViewModel {
|
||||
acpError = nil
|
||||
acpErrorHint = nil
|
||||
acpErrorDetails = nil
|
||||
acpErrorOAuthProvider = nil
|
||||
}
|
||||
|
||||
/// Populate the error triplet from a thrown Error + the ACPClient
|
||||
@@ -154,10 +179,11 @@ public final class RichChatViewModel {
|
||||
}
|
||||
let msg = error.localizedDescription
|
||||
let stderrTail = await client?.recentStderr ?? ""
|
||||
let hint = ACPErrorHint.classify(errorMessage: msg, stderrTail: stderrTail)
|
||||
let cls = ACPErrorHint.classify(errorMessage: msg, stderrTail: stderrTail)
|
||||
acpError = msg
|
||||
acpErrorHint = hint
|
||||
acpErrorHint = cls?.hint
|
||||
acpErrorDetails = stderrTail.isEmpty ? nil : stderrTail
|
||||
acpErrorOAuthProvider = cls?.oauthProvider
|
||||
}
|
||||
|
||||
/// Populate the error triplet when `handlePromptComplete` sees a
|
||||
@@ -168,11 +194,11 @@ public final class RichChatViewModel {
|
||||
public func recordPromptStopFailure(stopReason: String, client: ACPClient?) async {
|
||||
let msg = "Prompt ended without a response (stopReason: \(stopReason))."
|
||||
let stderrTail = await client?.recentStderr ?? ""
|
||||
let hint = ACPErrorHint.classify(errorMessage: msg, stderrTail: stderrTail)
|
||||
?? Self.fallbackHint(for: stopReason)
|
||||
let cls = ACPErrorHint.classify(errorMessage: msg, stderrTail: stderrTail)
|
||||
acpError = msg
|
||||
acpErrorHint = hint
|
||||
acpErrorHint = cls?.hint ?? Self.fallbackHint(for: stopReason)
|
||||
acpErrorDetails = stderrTail.isEmpty ? nil : stderrTail
|
||||
acpErrorOAuthProvider = cls?.oauthProvider
|
||||
}
|
||||
|
||||
/// Same as `recordPromptStopFailure` but pulls stderr from the
|
||||
@@ -182,11 +208,11 @@ public final class RichChatViewModel {
|
||||
private func recordPromptStopFailureUsingProvider(stopReason: String) async {
|
||||
let msg = "Prompt ended without a response (stopReason: \(stopReason))."
|
||||
let stderrTail = await acpStderrProvider?() ?? ""
|
||||
let hint = ACPErrorHint.classify(errorMessage: msg, stderrTail: stderrTail)
|
||||
?? Self.fallbackHint(for: stopReason)
|
||||
let cls = ACPErrorHint.classify(errorMessage: msg, stderrTail: stderrTail)
|
||||
acpError = msg
|
||||
acpErrorHint = hint
|
||||
acpErrorHint = cls?.hint ?? Self.fallbackHint(for: stopReason)
|
||||
acpErrorDetails = stderrTail.isEmpty ? nil : stderrTail
|
||||
acpErrorOAuthProvider = cls?.oauthProvider
|
||||
}
|
||||
|
||||
private static func fallbackHint(for stopReason: String) -> String? {
|
||||
@@ -354,10 +380,36 @@ public final class RichChatViewModel {
|
||||
/// spinner and we don't fan out duplicate page requests.
|
||||
public private(set) var isLoadingEarlier: Bool = false
|
||||
private var nextLocalId = -1
|
||||
|
||||
/// Issue #63: locally-created user messages awaiting state.db
|
||||
/// persistence, keyed by session id. ACP roundtrips Hermes' DB
|
||||
/// write asynchronously, so a user who sends a prompt and
|
||||
/// immediately switches to another session triggers `reset()`
|
||||
/// before Hermes flushes the row — `loadSessionHistory` then reads
|
||||
/// from a DB that doesn't have the message yet, and the bubble
|
||||
/// renders blank or vanishes on return. We hold a per-session
|
||||
/// copy here that survives `reset()` so `loadSessionHistory` can
|
||||
/// re-inject anything still in flight, and clean entries out as
|
||||
/// soon as a matching DB row appears.
|
||||
private var pendingLocalUserMessages: [String: [HermesMessage]] = [:]
|
||||
|
||||
private var streamingAssistantText = ""
|
||||
private var streamingThinkingText = ""
|
||||
private var streamingToolCalls: [HermesToolCall] = []
|
||||
|
||||
/// True while a turn is in flight, has emitted thought-stream
|
||||
/// bytes, but has NOT yet produced any visible assistant text.
|
||||
/// Surfaces the user-facing "Thinking…" status promotion (the
|
||||
/// model is reasoning before answering — Hermes reasoning models
|
||||
/// commonly take 3–8 s here, which the ScarfMon `firstThoughtByte`
|
||||
/// vs `firstByte` split makes visible). Becomes false the moment
|
||||
/// the first message chunk arrives or the turn ends.
|
||||
public var isStreamingThoughtsOnly: Bool {
|
||||
currentTurnStart != nil
|
||||
&& !streamingThinkingText.isEmpty
|
||||
&& streamingAssistantText.isEmpty
|
||||
}
|
||||
|
||||
// DB polling state (used in terminal mode fallback)
|
||||
private var lastKnownFingerprint: HermesDataService.MessageFingerprint?
|
||||
private var debounceTask: Task<Void, Never>?
|
||||
@@ -388,6 +440,9 @@ public final class RichChatViewModel {
|
||||
|
||||
public func reset() {
|
||||
debounceTask?.cancel()
|
||||
hydrationTask?.cancel()
|
||||
hydrationTask = nil
|
||||
isHydratingTools = false
|
||||
stopActivePolling()
|
||||
Task { await dataService.close() }
|
||||
messages = []
|
||||
@@ -435,13 +490,15 @@ public final class RichChatViewModel {
|
||||
|
||||
/// Re-fetch session metadata from DB to pick up cost/token updates.
|
||||
public func refreshSessionFromDB() async {
|
||||
guard let sessionId else { return }
|
||||
let opened = await dataService.open()
|
||||
guard opened else { return }
|
||||
if let session = await dataService.fetchSession(id: sessionId) {
|
||||
currentSession = session
|
||||
await ScarfMon.measureAsync(.sessionLoad, "mac.refreshSessionFromDB") {
|
||||
guard let sessionId else { return }
|
||||
let opened = await dataService.open()
|
||||
guard opened else { return }
|
||||
if let session = await dataService.fetchSession(id: sessionId) {
|
||||
currentSession = session
|
||||
}
|
||||
await dataService.close()
|
||||
}
|
||||
await dataService.close()
|
||||
}
|
||||
|
||||
// MARK: - ACP Event Handling
|
||||
@@ -468,6 +525,12 @@ public final class RichChatViewModel {
|
||||
reasoning: nil
|
||||
)
|
||||
messages.append(message)
|
||||
// Track the local message in the pending-user-messages cache
|
||||
// so a reset/resume cycle on this session before Hermes
|
||||
// persists the row can still re-inject it on return (#63).
|
||||
if let sid = sessionId {
|
||||
pendingLocalUserMessages[sid, default: []].append(message)
|
||||
}
|
||||
// Per-turn stopwatch (v2.5): record the start time only when
|
||||
// we're entering a fresh agent turn. /steer-style mid-run sends
|
||||
// arrive while isAgentWorking is already true; preserve the
|
||||
@@ -614,11 +677,23 @@ public final class RichChatViewModel {
|
||||
}
|
||||
|
||||
private func appendMessageChunk(text: String) {
|
||||
// ScarfMon "first byte" — fires once per turn, on the first
|
||||
// visible message chunk. Splits "user tap → first byte"
|
||||
// (network + Hermes thinking) from "first byte → turn end"
|
||||
// (streaming + Scarf rendering) so we can attribute slow-feel
|
||||
// bugs to the right side. `bytes` carries the first chunk's
|
||||
// size, not the full turn.
|
||||
if streamingAssistantText.isEmpty && currentTurnStart != nil {
|
||||
ScarfMon.event(.chatStream, "firstByte", count: 1, bytes: text.utf8.count)
|
||||
}
|
||||
streamingAssistantText += text
|
||||
upsertStreamingMessage()
|
||||
}
|
||||
|
||||
private func appendThoughtChunk(text: String) {
|
||||
if streamingThinkingText.isEmpty && currentTurnStart != nil {
|
||||
ScarfMon.event(.chatStream, "firstThoughtByte", count: 1, bytes: text.utf8.count)
|
||||
}
|
||||
streamingThinkingText += text
|
||||
upsertStreamingMessage()
|
||||
}
|
||||
@@ -831,6 +906,12 @@ public final class RichChatViewModel {
|
||||
|
||||
/// Convert the streaming message (id=0) into a permanent message and reset streaming state.
|
||||
private func finalizeStreamingMessage() {
|
||||
ScarfMon.measure(.chatStream, "finalizeStreamingMessage") {
|
||||
_finalizeStreamingMessageImpl()
|
||||
}
|
||||
}
|
||||
|
||||
private func _finalizeStreamingMessageImpl() {
|
||||
guard let idx = messages.firstIndex(where: { $0.id == Self.streamingId }) else { return }
|
||||
|
||||
// Only finalize if there's actual content
|
||||
@@ -838,22 +919,52 @@ public final class RichChatViewModel {
|
||||
|| !streamingThinkingText.isEmpty
|
||||
|| !streamingToolCalls.isEmpty
|
||||
|
||||
// ScarfMon — surface turns that finalize with NO visible
|
||||
// assistant text. Common Nous-model failure mode: model
|
||||
// emits a few thought-stream bytes then falls silent;
|
||||
// Hermes finalizes with empty content; the user sees a
|
||||
// stuck "(°□°) deliberating..." placeholder bubble. The
|
||||
// event fires for both the all-empty case (which gets
|
||||
// removed below) and the thoughts-only case (which is
|
||||
// kept as a permanent message with empty body) — both
|
||||
// are user-visible failures worth tracking.
|
||||
if streamingAssistantText.isEmpty && streamingToolCalls.isEmpty {
|
||||
ScarfMon.event(
|
||||
.chatStream,
|
||||
"emptyAssistantTurn",
|
||||
count: 1,
|
||||
bytes: streamingThinkingText.utf8.count
|
||||
)
|
||||
}
|
||||
|
||||
if hasContent {
|
||||
let id = nextLocalId
|
||||
nextLocalId -= 1
|
||||
messages[idx] = HermesMessage(
|
||||
id: id,
|
||||
sessionId: sessionId ?? "",
|
||||
role: "assistant",
|
||||
content: streamingAssistantText,
|
||||
toolCallId: nil,
|
||||
toolCalls: streamingToolCalls,
|
||||
toolName: nil,
|
||||
timestamp: Date(),
|
||||
tokenCount: nil,
|
||||
finishReason: streamingToolCalls.isEmpty ? "stop" : nil,
|
||||
reasoning: streamingThinkingText.isEmpty ? nil : streamingThinkingText
|
||||
)
|
||||
// Wrap the streaming-id rewrite in a no-animation
|
||||
// transaction. Without this SwiftUI sees an identity
|
||||
// change for the streaming ForEach element (id 0 → new
|
||||
// permanent id) and runs an animated diff against
|
||||
// adjacent elements, which costs ~5–8 RichMessageBubble
|
||||
// body re-evaluations per turn-end (visible in the
|
||||
// ScarfMon ring as a 1–2 ms burst right after every
|
||||
// `finalizeStreamingMessage` interval). The new message
|
||||
// is content-equal to the streaming one — there is no
|
||||
// animation worth running.
|
||||
withTransaction(Transaction(animation: nil)) {
|
||||
messages[idx] = HermesMessage(
|
||||
id: id,
|
||||
sessionId: sessionId ?? "",
|
||||
role: "assistant",
|
||||
content: streamingAssistantText,
|
||||
toolCallId: nil,
|
||||
toolCalls: streamingToolCalls,
|
||||
toolName: nil,
|
||||
timestamp: Date(),
|
||||
tokenCount: nil,
|
||||
finishReason: streamingToolCalls.isEmpty ? "stop" : nil,
|
||||
reasoning: streamingThinkingText.isEmpty ? nil : streamingThinkingText
|
||||
)
|
||||
}
|
||||
// Capture per-turn duration so the chat UI can render the
|
||||
// stopwatch pill (v2.5). Skips assistants we don't have a
|
||||
// start time for — e.g., the .promptComplete fired but the
|
||||
@@ -864,8 +975,12 @@ public final class RichChatViewModel {
|
||||
currentTurnStart = nil
|
||||
}
|
||||
} else {
|
||||
// Remove empty streaming placeholder
|
||||
messages.remove(at: idx)
|
||||
// Remove empty streaming placeholder. Same no-animation
|
||||
// transaction pattern — empty-finalize used to ripple the
|
||||
// ForEach diff to every following bubble.
|
||||
withTransaction(Transaction(animation: nil)) {
|
||||
messages.remove(at: idx)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset streaming state for next chunk
|
||||
@@ -940,7 +1055,20 @@ public final class RichChatViewModel {
|
||||
/// Load message history from the DB, optionally combining an origin session
|
||||
/// (e.g., CLI session) with the current ACP session.
|
||||
public func loadSessionHistory(sessionId: String, acpSessionId: String? = nil) async {
|
||||
await ScarfMon.measureAsync(.sessionLoad, "mac.hydrateMessages") {
|
||||
self.sessionId = sessionId
|
||||
// Capture the session-id we're loading FOR so we can verify
|
||||
// it's still the active one before assigning to `messages`.
|
||||
// Without this guard, switching to a small chat while a
|
||||
// larger one is mid-fetch can result in last-write-wins:
|
||||
// the slow fetch finishes after the small chat's, drops
|
||||
// the user back into the big chat's transcript, and the
|
||||
// user has to reselect the small one. Observed in remote
|
||||
// perf captures (parallel fetchMessages calls, one timing
|
||||
// out at 30s for a 157-message session, the other 2-message
|
||||
// chat completing in 425ms; the 30s one's assignment
|
||||
// overwrote the small chat).
|
||||
let loadingForSession = sessionId
|
||||
// Force a fresh snapshot pull on remote contexts. An earlier open()
|
||||
// would have cached a stale copy — on resume we need whatever
|
||||
// Hermes has actually persisted since then, or the resumed session
|
||||
@@ -950,9 +1078,30 @@ public final class RichChatViewModel {
|
||||
// messages the agent streamed during the user's offline window.
|
||||
let opened = await dataService.refresh(forceFresh: true)
|
||||
guard opened else { return }
|
||||
// Race-check #1: session id may have changed during refresh.
|
||||
guard self.sessionId == loadingForSession else {
|
||||
ScarfMon.event(.sessionLoad, "mac.hydrateMessages.dropped", count: 1)
|
||||
return
|
||||
}
|
||||
|
||||
// v2.8 two-phase loader. Phase 1 — skeleton: user + assistant
|
||||
// rows only, no tool_calls JSON, no reasoning, no
|
||||
// reasoning_content. Wire payload bounded by conversational
|
||||
// text alone so chats with multi-page tool result blobs (the
|
||||
// 30s-timeout case) come up in seconds. Phase 2 (kicked off
|
||||
// below in a Task.detached) fills tool calls + tool results in
|
||||
// the background — the chat is usable while it runs.
|
||||
let pageSize = HistoryPageSize.initial
|
||||
var allMessages = await dataService.fetchMessages(sessionId: sessionId, limit: pageSize)
|
||||
let originOutcome = await dataService.fetchSkeletonMessages(sessionId: sessionId, limit: pageSize)
|
||||
var allMessages = originOutcome.messages
|
||||
var transportFailure: String? = originOutcome.transportError
|
||||
// Race-check #2: session id may have changed during the
|
||||
// long fetch (the most common race — a 30s timeout on a
|
||||
// big session lets the user switch to a small one and back).
|
||||
guard self.sessionId == loadingForSession else {
|
||||
ScarfMon.event(.sessionLoad, "mac.hydrateMessages.dropped", count: 1)
|
||||
return
|
||||
}
|
||||
// The DB has more on-disk history when the initial fetch
|
||||
// saturated the limit. The "Load earlier" affordance reads
|
||||
// this flag.
|
||||
@@ -964,17 +1113,63 @@ public final class RichChatViewModel {
|
||||
if let acpId = acpSessionId, acpId != sessionId {
|
||||
originSessionId = sessionId
|
||||
self.sessionId = acpId
|
||||
let acpMessages = await dataService.fetchMessages(sessionId: acpId, limit: pageSize)
|
||||
if !acpMessages.isEmpty {
|
||||
allMessages.append(contentsOf: acpMessages)
|
||||
let acpOutcome = await dataService.fetchSkeletonMessages(sessionId: acpId, limit: pageSize)
|
||||
// Race-check #3: same guard, after the second fetch.
|
||||
guard self.sessionId == acpId else {
|
||||
ScarfMon.event(.sessionLoad, "mac.hydrateMessages.dropped", count: 1)
|
||||
return
|
||||
}
|
||||
if let acpErr = acpOutcome.transportError, transportFailure == nil {
|
||||
transportFailure = acpErr
|
||||
}
|
||||
if !acpOutcome.messages.isEmpty {
|
||||
allMessages.append(contentsOf: acpOutcome.messages)
|
||||
allMessages.sort { ($0.timestamp ?? .distantPast) < ($1.timestamp ?? .distantPast) }
|
||||
moreHistory = moreHistory || acpMessages.count >= pageSize
|
||||
moreHistory = moreHistory || acpOutcome.messages.count >= pageSize
|
||||
}
|
||||
}
|
||||
|
||||
messages = allMessages
|
||||
// Issue #63 — re-inject any locally-created user messages
|
||||
// we still have on file for this session that haven't yet
|
||||
// shown up in state.db. Covers two paths:
|
||||
// 1. The user just sent a prompt then resumed a different
|
||||
// session before Hermes persisted the row. `reset()` had
|
||||
// cleared `messages` but the per-session pending cache
|
||||
// survived; restore the row here so the bubble doesn't
|
||||
// come back blank.
|
||||
// 2. The DB-resume path on first load — a previously-pending
|
||||
// message Hermes is still mid-write may not appear in
|
||||
// this fetch. We merge it in, and drop it from the cache
|
||||
// as soon as a matching DB row (same content, persisted
|
||||
// id ≥ 0) shows up.
|
||||
let pendingForSession = pendingLocalUserMessages[sessionId] ?? []
|
||||
if pendingForSession.isEmpty {
|
||||
messages = allMessages
|
||||
} else {
|
||||
var merged = allMessages
|
||||
var stillPending: [HermesMessage] = []
|
||||
for local in pendingForSession {
|
||||
let persisted = merged.contains { msg in
|
||||
msg.isUser && msg.id >= 0 && msg.content == local.content
|
||||
}
|
||||
if persisted {
|
||||
continue // DB caught up — drop the local copy
|
||||
}
|
||||
if !merged.contains(where: { $0.id == local.id }) {
|
||||
merged.append(local)
|
||||
}
|
||||
stillPending.append(local)
|
||||
}
|
||||
merged.sort { ($0.timestamp ?? .distantPast) < ($1.timestamp ?? .distantPast) }
|
||||
messages = merged
|
||||
if stillPending.isEmpty {
|
||||
pendingLocalUserMessages.removeValue(forKey: sessionId)
|
||||
} else {
|
||||
pendingLocalUserMessages[sessionId] = stillPending
|
||||
}
|
||||
}
|
||||
currentSession = session
|
||||
let minId = allMessages.map(\.id).min() ?? 0
|
||||
let minId = messages.map(\.id).min() ?? 0
|
||||
nextLocalId = min(minId - 1, -1)
|
||||
// Track the oldest loaded id from THIS session (not the merged
|
||||
// origin) so `loadEarlier()` pages back through the live ACP
|
||||
@@ -987,7 +1182,182 @@ public final class RichChatViewModel {
|
||||
.map(\.id)
|
||||
.min()
|
||||
hasMoreHistory = moreHistory
|
||||
ScarfMon.event(.sessionLoad, "mac.hydrateMessages.rows", count: messages.count)
|
||||
buildMessageGroups()
|
||||
|
||||
// Partial-result detection — if a fetch tripped a transport
|
||||
// failure (SSH timeout / ControlMaster drop) the user is now
|
||||
// looking at zero or near-zero messages with no idea why. The
|
||||
// pre-v2.8 behavior was a silent empty transcript. Surface a
|
||||
// banner via the existing acpError triplet so the user sees
|
||||
// "couldn't load full history — connection slow." We assume
|
||||
// more history exists (so the "Load earlier" affordance is
|
||||
// honest about the gap) — caller can retry by reopening the
|
||||
// session.
|
||||
if let reason = transportFailure {
|
||||
acpError = "Couldn't load full chat history — the connection to \(dataService.context.displayName) timed out."
|
||||
acpErrorHint = "Reopen the session to retry, or check the SSH link if this keeps happening."
|
||||
acpErrorDetails = reason
|
||||
acpErrorOAuthProvider = nil
|
||||
hasMoreHistory = true
|
||||
} else {
|
||||
// v2.8 — kick off background hydration of tool_calls JSON
|
||||
// and tool result rows for the just-loaded skeleton.
|
||||
// Non-blocking on the main load path (chat is usable).
|
||||
startToolHydration(loadingForSession: self.sessionId ?? sessionId)
|
||||
}
|
||||
} // end measureAsync(.sessionLoad, "mac.hydrateMessages")
|
||||
}
|
||||
|
||||
/// Phase 2 of the two-phase chat loader. Pulls `tool_calls` JSON
|
||||
/// for the loaded assistant rows, then fetches `role='tool'` rows
|
||||
/// in the loaded id range and splices both into `messages` /
|
||||
/// `messageGroups` without disturbing what the user is already
|
||||
/// reading. Cancellable — restarting (a session switch, a
|
||||
/// `reset()`) drops any in-flight pass.
|
||||
///
|
||||
/// Tool calls go in first because they live ON the existing
|
||||
/// assistant message and surface the most-visible UI affordance
|
||||
/// (the tool card chips). Tool result content rows go in second
|
||||
/// because they're the heaviest payload and the UI degrades
|
||||
/// gracefully without them (the cards still show "running" /
|
||||
/// "complete" state; only the result body is missing).
|
||||
private func startToolHydration(loadingForSession: String) {
|
||||
hydrationTask?.cancel()
|
||||
let sessionForLoad = loadingForSession
|
||||
let dataService = self.dataService
|
||||
hydrationTask = Task { @MainActor [weak self] in
|
||||
guard let self else { return }
|
||||
self.isHydratingTools = true
|
||||
defer { self.isHydratingTools = false }
|
||||
|
||||
// Snapshot the assistant ids + id range from the messages
|
||||
// we just loaded. Doing this on MainActor keeps us in step
|
||||
// with the observable view of `messages`; the actual
|
||||
// SQL calls happen in `await` slots that release the actor.
|
||||
let assistantIds = self.messages
|
||||
.filter { $0.isAssistant && $0.id > 0 }
|
||||
.map(\.id)
|
||||
guard let minId = self.messages.map(\.id).min(),
|
||||
let maxId = self.messages.map(\.id).max(),
|
||||
!assistantIds.isEmpty || minId < maxId else {
|
||||
return
|
||||
}
|
||||
|
||||
// Phase 2a — tool_calls JSON. Splice parsed values into
|
||||
// each assistant message that has them.
|
||||
let toolCallMap = await dataService.hydrateAssistantToolCalls(messageIds: assistantIds)
|
||||
if Task.isCancelled || self.sessionId != sessionForLoad {
|
||||
ScarfMon.event(.sessionLoad, "mac.hydrateTools.dropped", count: 1)
|
||||
return
|
||||
}
|
||||
if !toolCallMap.isEmpty {
|
||||
self.messages = self.messages.map { msg in
|
||||
guard msg.isAssistant, let calls = toolCallMap[msg.id] else { return msg }
|
||||
return msg.withToolCalls(calls)
|
||||
}
|
||||
self.buildMessageGroups()
|
||||
}
|
||||
|
||||
// Phase 2b — tool result rows. Default OFF (v2.8). A
|
||||
// single tool result blob (file dump, stack trace) can run
|
||||
// hundreds of KB; bulk-fetching all of them during chat
|
||||
// resume on a slow remote was the cause of the 30s timeout
|
||||
// observed in 2026-05-05 dogfooding. Users can opt in via
|
||||
// Settings → Display → "Load tool results in past chats"
|
||||
// when bandwidth is plentiful. Tool call CARDS still
|
||||
// render either way (`tool_calls` JSON loads in Phase 2a);
|
||||
// only the inspector pane's "Output" section is empty
|
||||
// until the user opens a card, at which point a per-call
|
||||
// lazy fetch fills it in.
|
||||
let loadResults = UserDefaults.standard.bool(
|
||||
forKey: Self.loadHistoricalToolResultsKey
|
||||
)
|
||||
guard loadResults else {
|
||||
ScarfMon.event(.sessionLoad, "mac.hydrateTools.skippedToolResults", count: 1)
|
||||
return
|
||||
}
|
||||
let toolResults = await dataService.fetchToolResultsInRange(
|
||||
sessionId: sessionForLoad,
|
||||
minId: minId,
|
||||
maxId: maxId
|
||||
)
|
||||
if Task.isCancelled || self.sessionId != sessionForLoad {
|
||||
ScarfMon.event(.sessionLoad, "mac.hydrateTools.dropped", count: 1)
|
||||
return
|
||||
}
|
||||
if !toolResults.isEmpty {
|
||||
var merged = self.messages
|
||||
let existingIds = Set(merged.map(\.id))
|
||||
for tr in toolResults where !existingIds.contains(tr.id) {
|
||||
merged.append(tr)
|
||||
}
|
||||
merged.sort { lhs, rhs in
|
||||
let lt = lhs.timestamp ?? .distantPast
|
||||
let rt = rhs.timestamp ?? .distantPast
|
||||
if lt != rt { return lt < rt }
|
||||
return lhs.id < rhs.id
|
||||
}
|
||||
self.messages = merged
|
||||
self.buildMessageGroups()
|
||||
}
|
||||
ScarfMon.event(.sessionLoad, "mac.hydrateTools.complete", count: 1)
|
||||
}
|
||||
}
|
||||
|
||||
/// Lazy-load the content of a single tool result by call id and
|
||||
/// splice it into `messages` / `messageGroups` as a synthetic
|
||||
/// `role='tool'` row. Used by `ChatInspectorPane` when the user
|
||||
/// opens a tool call card whose result hasn't been hydrated yet
|
||||
/// (auto-hydrate is opt-in via `loadHistoricalToolResultsKey`).
|
||||
/// No-op when the result is already present in the transcript or
|
||||
/// the session id has changed underneath us.
|
||||
@MainActor
|
||||
public func loadToolResultIfMissing(callId: String) async {
|
||||
guard let sessionForLoad = sessionId else { return }
|
||||
// Already in the transcript? Done.
|
||||
if messages.contains(where: { $0.toolCallId == callId && $0.isToolResult }) {
|
||||
return
|
||||
}
|
||||
guard let content = await dataService.fetchToolResult(callId: callId) else {
|
||||
return
|
||||
}
|
||||
guard self.sessionId == sessionForLoad else { return }
|
||||
// Build a synthetic tool result row. We don't have the original
|
||||
// row id (would need a second SELECT) so we use a negative
|
||||
// local id that won't collide with persisted rows. The bubble
|
||||
// and inspector both key on `toolCallId`, not `id`, for tool
|
||||
// results — so this is enough to render correctly.
|
||||
let placeholderId = nextLocalId
|
||||
nextLocalId -= 1
|
||||
let synthetic = HermesMessage(
|
||||
id: placeholderId,
|
||||
sessionId: sessionForLoad,
|
||||
role: "tool",
|
||||
content: content,
|
||||
toolCallId: callId,
|
||||
toolCalls: [],
|
||||
toolName: nil,
|
||||
timestamp: Date(),
|
||||
tokenCount: nil,
|
||||
finishReason: nil,
|
||||
reasoning: nil,
|
||||
reasoningContent: nil
|
||||
)
|
||||
messages.append(synthetic)
|
||||
// Re-sort so the tool result lands next to its assistant
|
||||
// parent. ID-based ordering preserves the chronological order
|
||||
// of all the persisted rows; the synthetic placeholder uses a
|
||||
// negative id so it slots in last — fine for inspector display
|
||||
// since the inspector keys on toolCallId.
|
||||
messages.sort { lhs, rhs in
|
||||
let lt = lhs.timestamp ?? .distantPast
|
||||
let rt = rhs.timestamp ?? .distantPast
|
||||
if lt != rt { return lt < rt }
|
||||
return lhs.id < rhs.id
|
||||
}
|
||||
buildMessageGroups()
|
||||
ScarfMon.event(.sessionLoad, "mac.lazyToolResult.fetched", count: 1)
|
||||
}
|
||||
|
||||
// MARK: - Load Earlier (pagination)
|
||||
|
||||
@@ -49,6 +49,18 @@ public final class SkillsViewModel {
|
||||
public var hubMessage: String?
|
||||
public var hubSource: String = "all"
|
||||
|
||||
/// Last successful `browseHub` payload, kept around so that the
|
||||
/// "All Sources" search path can filter client-side (issue #79).
|
||||
/// `hermes skills search` with no `--source` flag routes through
|
||||
/// the centralized `hermes-index` source which can miss skills
|
||||
/// that are visible in browse — we'd rather give the user the
|
||||
/// canonical "type-to-filter" UX than chase Hermes's index gaps.
|
||||
/// Source-specific searches still shell out to the CLI for full
|
||||
/// upstream semantics. Setter is `internal` so the in-tree test
|
||||
/// suite can seed the cache without invoking the live CLI;
|
||||
/// out-of-module callers can still only read.
|
||||
public internal(set) var lastBrowseResults: [HermesHubSkill] = []
|
||||
|
||||
public let hubSources = ["all", "official", "skills-sh", "well-known", "github", "clawhub", "lobehub"]
|
||||
|
||||
public var filteredCategories: [HermesSkillCategory] {
|
||||
@@ -82,16 +94,23 @@ public final class SkillsViewModel {
|
||||
let ctx = context
|
||||
let xport = transport
|
||||
let pins = pinnedNames
|
||||
let cats: [HermesSkillCategory] = await Task.detached {
|
||||
let disabled = Self.readDisabledSkillNames(context: ctx)
|
||||
let pinned = pins ?? Self.readPinnedSkillNames(context: ctx)
|
||||
return SkillsScanner.scan(
|
||||
context: ctx,
|
||||
transport: xport,
|
||||
disabledNames: disabled,
|
||||
pinnedNames: pinned
|
||||
)
|
||||
}.value
|
||||
// v2.8 — instrumented so future captures show how many SSH
|
||||
// RTTs the SkillsScanner walk costs on remote (it stats
|
||||
// every ~/.hermes/skills/* directory + reads SKILL.md per).
|
||||
let cats: [HermesSkillCategory] = await ScarfMon.measureAsync(.diskIO, "skills.load") {
|
||||
await Task.detached {
|
||||
let disabled = Self.readDisabledSkillNames(context: ctx)
|
||||
let pinned = pins ?? Self.readPinnedSkillNames(context: ctx)
|
||||
return SkillsScanner.scan(
|
||||
context: ctx,
|
||||
transport: xport,
|
||||
disabledNames: disabled,
|
||||
pinnedNames: pinned
|
||||
)
|
||||
}.value
|
||||
}
|
||||
let totalSkills = cats.reduce(0) { $0 + $1.skills.count }
|
||||
ScarfMon.event(.diskIO, "skills.load.count", count: totalSkills)
|
||||
categories = cats
|
||||
isLoading = false
|
||||
}
|
||||
@@ -253,14 +272,34 @@ public final class SkillsViewModel {
|
||||
browseHub()
|
||||
return
|
||||
}
|
||||
let source = hubSource
|
||||
let query = hubQuery
|
||||
// Issue #79 — for "All Sources", filter the cached browse list
|
||||
// client-side instead of shelling out. Hermes's all-source
|
||||
// search routes through its centralized index which can miss
|
||||
// skills (e.g. honcho) that browse surfaces from non-indexed
|
||||
// registries. Specific-source searches keep the CLI path so
|
||||
// power users still get full upstream search semantics.
|
||||
if source == "all" {
|
||||
if lastBrowseResults.isEmpty {
|
||||
// No cache yet — kick off a browse, then filter on
|
||||
// completion. The chained call lets the user type a
|
||||
// query before ever clicking Browse.
|
||||
browseHubThenFilter(query: query)
|
||||
} else {
|
||||
// Pure in-memory filter — runs synchronously on the
|
||||
// calling actor (UI invocations are already on
|
||||
// MainActor) so the user sees the narrowed list
|
||||
// without a render-tick gap.
|
||||
applyClientSideFilter(query: query, against: lastBrowseResults)
|
||||
}
|
||||
return
|
||||
}
|
||||
isHubLoading = true
|
||||
let bin = context.paths.hermesBinary
|
||||
let xport = transport
|
||||
let source = hubSource
|
||||
let query = hubQuery
|
||||
Task.detached { [weak self] in
|
||||
var args = ["skills", "search", query, "--limit", "40"]
|
||||
if source != "all" { args += ["--source", source] }
|
||||
let args = ["skills", "search", query, "--limit", "40", "--source", source]
|
||||
let result = Self.runHermes(executable: bin, args: args, transport: xport, timeout: 30)
|
||||
let parsed = HermesSkillsHubParser.parseHubList(result.output)
|
||||
await self?.finishBrowse(
|
||||
@@ -272,6 +311,66 @@ public final class SkillsViewModel {
|
||||
}
|
||||
}
|
||||
|
||||
/// Run a browse fetch and then immediately apply a client-side
|
||||
/// filter. Used by `searchHub` when the user types into search
|
||||
/// before any browse has cached results.
|
||||
private func browseHubThenFilter(query: String) {
|
||||
isHubLoading = true
|
||||
let bin = context.paths.hermesBinary
|
||||
let xport = transport
|
||||
Task.detached { [weak self] in
|
||||
let args = ["skills", "browse", "--size", "40"]
|
||||
let result = Self.runHermes(executable: bin, args: args, transport: xport, timeout: 30)
|
||||
let parsed = HermesSkillsHubParser.parseHubList(result.output)
|
||||
await self?.finishBrowseThenFilter(
|
||||
browseResults: parsed,
|
||||
query: query,
|
||||
exitCode: result.exitCode,
|
||||
rawOutput: result.output
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@MainActor
|
||||
private func finishBrowseThenFilter(
|
||||
browseResults: [HermesHubSkill],
|
||||
query: String,
|
||||
exitCode: Int32,
|
||||
rawOutput: String
|
||||
) async {
|
||||
if exitCode == 0 {
|
||||
lastBrowseResults = browseResults
|
||||
applyClientSideFilter(query: query, against: browseResults)
|
||||
} else {
|
||||
// Surface the underlying browse failure rather than a
|
||||
// blank "no matches" state — the user typed a query, not
|
||||
// a browse request, but the cache was empty so we tried.
|
||||
isHubLoading = false
|
||||
hubResults = []
|
||||
let detail = Self.firstSignificantLine(rawOutput)
|
||||
hubMessage = detail.isEmpty
|
||||
? "Search failed (exit \(exitCode))"
|
||||
: "Search failed: \(detail)"
|
||||
}
|
||||
}
|
||||
|
||||
private func applyClientSideFilter(query: String, against pool: [HermesHubSkill]) {
|
||||
let needle = query.trimmingCharacters(in: .whitespaces)
|
||||
let matches: [HermesHubSkill]
|
||||
if needle.isEmpty {
|
||||
matches = pool
|
||||
} else {
|
||||
matches = pool.filter { skill in
|
||||
skill.name.localizedCaseInsensitiveContains(needle)
|
||||
|| skill.description.localizedCaseInsensitiveContains(needle)
|
||||
|| skill.identifier.localizedCaseInsensitiveContains(needle)
|
||||
}
|
||||
}
|
||||
isHubLoading = false
|
||||
hubResults = matches
|
||||
hubMessage = matches.isEmpty ? "No matches" : nil
|
||||
}
|
||||
|
||||
public func installHubSkill(_ skill: HermesHubSkill) {
|
||||
isHubLoading = true
|
||||
hubMessage = "Installing \(skill.identifier)…"
|
||||
@@ -414,6 +513,13 @@ public final class SkillsViewModel {
|
||||
) async {
|
||||
isHubLoading = false
|
||||
hubResults = results
|
||||
// Cache the fresh browse payload so the "All Sources" search
|
||||
// path can filter client-side (issue #79). Search results are
|
||||
// not cached — they're already filtered by the user's query
|
||||
// and would poison the filter pool.
|
||||
if !isSearch && exitCode == 0 {
|
||||
lastBrowseResults = results
|
||||
}
|
||||
if results.isEmpty {
|
||||
if exitCode == 0 {
|
||||
hubMessage = isSearch ? "No matches" : "No results"
|
||||
|
||||
@@ -0,0 +1,150 @@
|
||||
#if canImport(SQLite3)
|
||||
|
||||
import Foundation
|
||||
@testable import ScarfCore
|
||||
|
||||
/// Test double for `HermesQueryBackend`. Lets the data-service-façade
|
||||
/// tests assert which SQL gets emitted, with which params, and feed
|
||||
/// scripted result rows back.
|
||||
///
|
||||
/// Implemented as an `actor` to satisfy the protocol's `Sendable`
|
||||
/// requirement and to mirror how the real backends serialize state.
|
||||
/// Marked `final` to prevent accidental subclassing — Swift Testing
|
||||
/// instances are short-lived per-`@Test`, but a stray subclass could
|
||||
/// hide override quirks.
|
||||
final actor MockHermesQueryBackend: HermesQueryBackend {
|
||||
|
||||
// MARK: - Knobs
|
||||
|
||||
var openShouldSucceed: Bool = true
|
||||
var hasV07Schema: Bool = false
|
||||
var hasV011Schema: Bool = false
|
||||
var lastOpenError: String? = nil
|
||||
|
||||
/// Map of SQL prefix → rows. Lookup picks the longest matching
|
||||
/// prefix, so callers can register both broad ("SELECT") and
|
||||
/// narrow ("SELECT id, source FROM sessions") matchers without
|
||||
/// the broad one swallowing the narrow one.
|
||||
private var scriptedResults: [String: [Row]] = [:]
|
||||
|
||||
/// Map of SQL prefix → backend error to throw instead of returning
|
||||
/// rows. Used to test the data-service's error-swallowing paths.
|
||||
private var scriptedFailures: [String: BackendError] = [:]
|
||||
|
||||
/// Every `query(_:params:)` call lands here in order — assertion
|
||||
/// material for "did the façade emit the SQL we expected".
|
||||
private(set) var queryLog: [(sql: String, params: [SQLValue])] = []
|
||||
|
||||
/// Every `queryBatch` call lands here in order, one outer entry
|
||||
/// per call, inner entries for each statement in that batch.
|
||||
private(set) var batchLog: [[(sql: String, params: [SQLValue])]] = []
|
||||
|
||||
/// Track open/refresh/close lifecycle for a couple of tests that
|
||||
/// want to assert "façade really did call open()".
|
||||
private(set) var openCallCount = 0
|
||||
private(set) var refreshCallCount = 0
|
||||
private(set) var closeCallCount = 0
|
||||
|
||||
// MARK: - Knob mutators (called from tests)
|
||||
|
||||
func setOpenShouldSucceed(_ value: Bool) { openShouldSucceed = value }
|
||||
func setHasV07Schema(_ value: Bool) { hasV07Schema = value }
|
||||
func setHasV011Schema(_ value: Bool) { hasV011Schema = value }
|
||||
func setLastOpenError(_ value: String?) { lastOpenError = value }
|
||||
|
||||
/// Build a one-row result keyed on `prefix`. `columns` is the
|
||||
/// column-name → position map; `values` must be the same length.
|
||||
func _seedRow(forSQLPrefix prefix: String, columns: [String: Int], values: [SQLValue]) {
|
||||
let row = Row(values: values, columnIndex: columns)
|
||||
scriptedResults[prefix] = [row]
|
||||
}
|
||||
|
||||
/// Seed an arbitrary row sequence for queries that share `prefix`.
|
||||
func _seedRows(forSQLPrefix prefix: String, _ rows: [Row]) {
|
||||
scriptedResults[prefix] = rows
|
||||
}
|
||||
|
||||
/// Make `query` throw the specified `error` whenever it sees a SQL
|
||||
/// that begins with `prefix`.
|
||||
func _seedFailure(forSQLPrefix prefix: String, error: BackendError) {
|
||||
scriptedFailures[prefix] = error
|
||||
}
|
||||
|
||||
// MARK: - HermesQueryBackend conformance
|
||||
|
||||
func open() async -> Bool {
|
||||
openCallCount += 1
|
||||
return openShouldSucceed
|
||||
}
|
||||
|
||||
@discardableResult
|
||||
func refresh(forceFresh: Bool) async -> Bool {
|
||||
refreshCallCount += 1
|
||||
return openShouldSucceed
|
||||
}
|
||||
|
||||
func close() async {
|
||||
closeCallCount += 1
|
||||
}
|
||||
|
||||
func query(_ sql: String, params: [SQLValue]) async throws -> [Row] {
|
||||
queryLog.append((sql: sql, params: params))
|
||||
if let failure = longestMatchingFailure(for: sql) {
|
||||
throw failure
|
||||
}
|
||||
return longestMatchingRows(for: sql) ?? []
|
||||
}
|
||||
|
||||
func queryBatch(_ statements: [(sql: String, params: [SQLValue])]) async throws -> [[Row]] {
|
||||
batchLog.append(statements)
|
||||
var out: [[Row]] = []
|
||||
out.reserveCapacity(statements.count)
|
||||
for stmt in statements {
|
||||
if let failure = longestMatchingFailure(for: stmt.sql) {
|
||||
throw failure
|
||||
}
|
||||
out.append(longestMatchingRows(for: stmt.sql) ?? [])
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// MARK: - Internals
|
||||
|
||||
/// Pick the longest registered prefix that `sql` starts with.
|
||||
/// Ties go to whichever ordering Dictionary iteration produced —
|
||||
/// callers should not register two equal-length matchers for the
|
||||
/// same SQL because the resolution order is undefined.
|
||||
private func longestMatchingRows(for sql: String) -> [Row]? {
|
||||
var bestMatch: (key: String, rows: [Row])?
|
||||
for (prefix, rows) in scriptedResults {
|
||||
if sql.hasPrefix(prefix) {
|
||||
if let current = bestMatch {
|
||||
if prefix.count > current.key.count {
|
||||
bestMatch = (prefix, rows)
|
||||
}
|
||||
} else {
|
||||
bestMatch = (prefix, rows)
|
||||
}
|
||||
}
|
||||
}
|
||||
return bestMatch?.rows
|
||||
}
|
||||
|
||||
private func longestMatchingFailure(for sql: String) -> BackendError? {
|
||||
var bestMatch: (key: String, error: BackendError)?
|
||||
for (prefix, error) in scriptedFailures {
|
||||
if sql.hasPrefix(prefix) {
|
||||
if let current = bestMatch {
|
||||
if prefix.count > current.key.count {
|
||||
bestMatch = (prefix, error)
|
||||
}
|
||||
} else {
|
||||
bestMatch = (prefix, error)
|
||||
}
|
||||
}
|
||||
}
|
||||
return bestMatch?.error
|
||||
}
|
||||
}
|
||||
|
||||
#endif // canImport(SQLite3)
|
||||
@@ -9,6 +9,13 @@ import Foundation
|
||||
|
||||
// MARK: - Version line parsing
|
||||
|
||||
@Test func parseV013ReleaseLine() {
|
||||
let caps = HermesCapabilities.parseLine("Hermes Agent v0.13.0 (2026.5.7)")
|
||||
#expect(caps.semver == HermesCapabilities.SemVer(major: 0, minor: 13, patch: 0))
|
||||
#expect(caps.dateVersion == HermesCapabilities.DateVersion(year: 2026, month: 5, day: 7))
|
||||
#expect(caps.detected)
|
||||
}
|
||||
|
||||
@Test func parseV012ReleaseLine() {
|
||||
let caps = HermesCapabilities.parseLine("Hermes Agent v0.12.0 (2026.4.30)")
|
||||
#expect(caps.semver == HermesCapabilities.SemVer(major: 0, minor: 12, patch: 0))
|
||||
@@ -75,8 +82,42 @@ import Foundation
|
||||
|
||||
// MARK: - Capability flags
|
||||
|
||||
@Test func v013FlagsAllOn() {
|
||||
let caps = HermesCapabilities.parseLine("Hermes Agent v0.13.0 (2026.5.7)")
|
||||
// v0.12 surfaces remain on.
|
||||
#expect(caps.hasCurator)
|
||||
#expect(caps.hasKanban)
|
||||
#expect(caps.hasACPImagePrompts)
|
||||
#expect(!caps.hasFlushMemoriesAux)
|
||||
// v0.13 surfaces light up.
|
||||
#expect(caps.hasGoals)
|
||||
#expect(caps.hasACPQueue)
|
||||
#expect(caps.hasACPSteerOnIdle)
|
||||
#expect(caps.hasKanbanDiagnostics)
|
||||
#expect(caps.hasCuratorArchive)
|
||||
#expect(caps.hasGoogleChatPlatform)
|
||||
#expect(caps.hasGatewayAllowlists)
|
||||
#expect(caps.hasGatewayBusyAckToggle)
|
||||
#expect(caps.hasGatewayRestartNotification)
|
||||
#expect(caps.hasGatewayList)
|
||||
#expect(caps.hasMCPSSETransport)
|
||||
#expect(caps.hasCronNoAgent)
|
||||
#expect(caps.hasWebToolsBackendSplit)
|
||||
#expect(caps.hasProfileNoSkills)
|
||||
#expect(caps.hasContextCompressionCount)
|
||||
#expect(caps.hasNewWithSessionName)
|
||||
#expect(caps.hasUpdateNonInteractive)
|
||||
#expect(caps.hasOpenRouterResponseCache)
|
||||
#expect(caps.hasImageGenModel)
|
||||
#expect(caps.hasDisplayLanguage)
|
||||
#expect(caps.hasXAIVoiceCloning)
|
||||
#expect(caps.hasVideoAnalyze)
|
||||
#expect(caps.hasTransformLLMOutputHook)
|
||||
}
|
||||
|
||||
@Test func v012FlagsAllOn() {
|
||||
let caps = HermesCapabilities.parseLine("Hermes Agent v0.12.0 (2026.4.30)")
|
||||
// v0.12 surfaces on.
|
||||
#expect(caps.hasCurator)
|
||||
#expect(caps.hasFallbackCommand)
|
||||
#expect(caps.hasKanban)
|
||||
@@ -94,6 +135,22 @@ import Foundation
|
||||
#expect(caps.hasRedactionToggle)
|
||||
// flush_memories was REMOVED in v0.12 — flag inverts.
|
||||
#expect(!caps.hasFlushMemoriesAux)
|
||||
// v0.13 surfaces stay off on a v0.12 host.
|
||||
#expect(!caps.hasGoals)
|
||||
#expect(!caps.hasACPQueue)
|
||||
#expect(!caps.hasKanbanDiagnostics)
|
||||
#expect(!caps.hasCuratorArchive)
|
||||
#expect(!caps.hasGoogleChatPlatform)
|
||||
#expect(!caps.hasGatewayAllowlists)
|
||||
#expect(!caps.hasMCPSSETransport)
|
||||
#expect(!caps.hasCronNoAgent)
|
||||
#expect(!caps.hasWebToolsBackendSplit)
|
||||
#expect(!caps.hasProfileNoSkills)
|
||||
#expect(!caps.hasContextCompressionCount)
|
||||
#expect(!caps.hasOpenRouterResponseCache)
|
||||
#expect(!caps.hasImageGenModel)
|
||||
#expect(!caps.hasDisplayLanguage)
|
||||
#expect(!caps.hasXAIVoiceCloning)
|
||||
}
|
||||
|
||||
@Test func v011FlagsAllOff() {
|
||||
@@ -126,11 +183,45 @@ import Foundation
|
||||
}
|
||||
|
||||
@Test func futureVersionRetainsCapabilities() {
|
||||
// A v0.13 (hypothetical) should still see all v0.12 capabilities on.
|
||||
let caps = HermesCapabilities.parseLine("Hermes Agent v0.13.0 (2026.6.1)")
|
||||
// A v0.14 (hypothetical) should still see all v0.12 + v0.13 capabilities on.
|
||||
let caps = HermesCapabilities.parseLine("Hermes Agent v0.14.0 (2026.7.1)")
|
||||
#expect(caps.hasCurator)
|
||||
#expect(caps.hasACPImagePrompts)
|
||||
#expect(caps.hasGoals)
|
||||
#expect(caps.hasKanbanDiagnostics)
|
||||
#expect(caps.hasCuratorArchive)
|
||||
// And flush_memories stays gone.
|
||||
#expect(!caps.hasFlushMemoriesAux)
|
||||
}
|
||||
|
||||
@Test func v0_13_patchReleaseStillEnablesAllFlags() {
|
||||
// A v0.13.4 patch release should still enable every v0.13 flag.
|
||||
let caps = HermesCapabilities.parseLine("Hermes Agent v0.13.4 (2026.5.20)")
|
||||
#expect(caps.hasGoals)
|
||||
#expect(caps.hasACPQueue)
|
||||
#expect(caps.hasKanbanDiagnostics)
|
||||
#expect(caps.hasGoogleChatPlatform)
|
||||
}
|
||||
|
||||
// MARK: - isV013OrLater convenience predicate
|
||||
|
||||
@Test func isV013OrLater_v013HostTrue() {
|
||||
let caps = HermesCapabilities.parseLine("Hermes Agent v0.13.0 (2026.5.7)")
|
||||
#expect(caps.isV013OrLater)
|
||||
}
|
||||
|
||||
@Test func isV013OrLater_v012HostFalse() {
|
||||
let caps = HermesCapabilities.parseLine("Hermes Agent v0.12.0 (2026.4.30)")
|
||||
#expect(!caps.isV013OrLater)
|
||||
}
|
||||
|
||||
@Test func isV013OrLater_emptyFalse() {
|
||||
let caps = HermesCapabilities.empty
|
||||
#expect(!caps.isV013OrLater)
|
||||
}
|
||||
|
||||
@Test func isV013OrLater_v014HostTrue() {
|
||||
let caps = HermesCapabilities.parseLine("Hermes Agent v0.14.0 (2026.7.1)")
|
||||
#expect(caps.isV013OrLater)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -151,4 +151,169 @@ import Foundation
|
||||
#expect(parsed?.patchCount == 2)
|
||||
#expect(parsed?.lastActivityLabel == "2026-04-25")
|
||||
}
|
||||
|
||||
// MARK: - v0.13 list-archived / prune fixtures (WS-4)
|
||||
|
||||
/// Empty JSON array → `[]`. Locks in the happy-path no-archives shape.
|
||||
@Test func listArchivedEmpty() throws {
|
||||
let result = try CuratorService.parseListArchived(stdout: "[]")
|
||||
#expect(result.isEmpty)
|
||||
}
|
||||
|
||||
/// Three archives with full optional fields. Asserts each
|
||||
/// optional value decodes through `decodeIfPresent` and that
|
||||
/// the computed labels resolve.
|
||||
@Test func listArchivedThreeSkills() throws {
|
||||
let json = """
|
||||
[
|
||||
{
|
||||
"name": "legacy-helper",
|
||||
"category": "templates",
|
||||
"archived_at": "2026-04-22T03:14:09Z",
|
||||
"reason": "stale: 91d unused",
|
||||
"size_bytes": 4521,
|
||||
"path": "/Users/u/.hermes/skills/.archived/legacy-helper"
|
||||
},
|
||||
{
|
||||
"name": "old-translator",
|
||||
"category": "user",
|
||||
"archived_at": "2026-04-23T10:00:00Z",
|
||||
"reason": "consolidated with translator",
|
||||
"size_bytes": 8192
|
||||
},
|
||||
{
|
||||
"name": "minimal"
|
||||
}
|
||||
]
|
||||
"""
|
||||
let result = try CuratorService.parseListArchived(stdout: json)
|
||||
#expect(result.count == 3)
|
||||
#expect(result[0].name == "legacy-helper")
|
||||
#expect(result[0].category == "templates")
|
||||
#expect(result[0].reason == "stale: 91d unused")
|
||||
#expect(result[0].sizeBytes == 4521)
|
||||
#expect(result[0].archivedAtLabel == "2026-04-22")
|
||||
#expect(result[0].path == "/Users/u/.hermes/skills/.archived/legacy-helper")
|
||||
|
||||
// Tolerant: only `name` set on the third row.
|
||||
#expect(result[2].name == "minimal")
|
||||
#expect(result[2].category == nil)
|
||||
#expect(result[2].reason == nil)
|
||||
#expect(result[2].archivedAtLabel == "—")
|
||||
#expect(result[2].sizeLabel == "—")
|
||||
}
|
||||
|
||||
/// `{"archived": [...]}` envelope is also accepted.
|
||||
@Test func listArchivedEnvelope() throws {
|
||||
let json = """
|
||||
{"archived": [
|
||||
{"name": "envelope-skill", "size_bytes": 1024}
|
||||
]}
|
||||
"""
|
||||
let result = try CuratorService.parseListArchived(stdout: json)
|
||||
#expect(result.count == 1)
|
||||
#expect(result[0].name == "envelope-skill")
|
||||
}
|
||||
|
||||
/// Text fallback when `--json` isn't supported. Each row carries
|
||||
/// the name in column 1 plus k=v chips for the optional fields.
|
||||
@Test func listArchivedTextFallback() {
|
||||
let text = """
|
||||
legacy-helper archived=2026-04-22 size=4521 reason=stale
|
||||
old-translator archived=2026-04-23 size=8192
|
||||
minimal-row
|
||||
"""
|
||||
let result = CuratorService.parseListArchivedText(text)
|
||||
#expect(result.count == 3)
|
||||
#expect(result[0].name == "legacy-helper")
|
||||
#expect(result[0].archivedAt == "2026-04-22")
|
||||
#expect(result[0].sizeBytes == 4521)
|
||||
#expect(result[0].reason == "stale")
|
||||
#expect(result[2].name == "minimal-row")
|
||||
#expect(result[2].sizeBytes == nil)
|
||||
}
|
||||
|
||||
/// Empty-state sentinel folds to `[]` (parallel to KanbanService's
|
||||
/// `"no matching tasks"` handling).
|
||||
@Test func listArchivedNoArchivedSentinel() throws {
|
||||
let result = try CuratorService.parseListArchived(stdout: "no archived skills\n")
|
||||
#expect(result.isEmpty)
|
||||
}
|
||||
|
||||
/// Whitespace-only stdout also folds to empty.
|
||||
@Test func listArchivedWhitespaceFoldsToEmpty() throws {
|
||||
let result = try CuratorService.parseListArchived(stdout: " \n\n")
|
||||
#expect(result.isEmpty)
|
||||
}
|
||||
|
||||
/// Decode failure (clearly non-JSON, non-text) throws. We accept
|
||||
/// JSON, the envelope, the empty sentinel, or text rows; anything
|
||||
/// else surfaces as a `CuratorError.decoding`.
|
||||
@Test func listArchivedNonsenseThrows() throws {
|
||||
do {
|
||||
_ = try CuratorService.parseListArchived(stdout: "{garbage")
|
||||
Issue.record("expected decoding throw")
|
||||
} catch let error as CuratorError {
|
||||
if case .decoding = error {
|
||||
// expected
|
||||
} else {
|
||||
Issue.record("unexpected error \(error)")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Prune-dry-run JSON with `would_remove` + `total_bytes`.
|
||||
@Test func pruneDryRunHappyPath() {
|
||||
let json = """
|
||||
{
|
||||
"would_remove": [
|
||||
{"name": "stale-a", "size_bytes": 1000},
|
||||
{"name": "stale-b", "size_bytes": 2000}
|
||||
],
|
||||
"total_bytes": 3000
|
||||
}
|
||||
"""
|
||||
let summary = CuratorService.parsePruneDryRun(json)
|
||||
#expect(summary.totalCount == 2)
|
||||
#expect(summary.totalBytes == 3000)
|
||||
#expect(summary.wouldRemove.first?.name == "stale-a")
|
||||
}
|
||||
|
||||
/// Zero-skill prune is a valid dry-run (no archives).
|
||||
@Test func pruneDryRunZeroSkills() {
|
||||
let json = """
|
||||
{"would_remove": [], "total_bytes": 0}
|
||||
"""
|
||||
let summary = CuratorService.parsePruneDryRun(json)
|
||||
#expect(summary.totalCount == 0)
|
||||
#expect(summary.totalBytes == 0)
|
||||
#expect(summary.totalBytesLabel == "—")
|
||||
}
|
||||
|
||||
/// Bare-array fallback: some Hermes builds may print just the
|
||||
/// would-remove list when the wrapper is missing.
|
||||
@Test func pruneDryRunBareArrayFallback() {
|
||||
let json = """
|
||||
[{"name": "lonely", "size_bytes": 500}]
|
||||
"""
|
||||
let summary = CuratorService.parsePruneDryRun(json)
|
||||
#expect(summary.totalCount == 1)
|
||||
#expect(summary.totalBytes == 500)
|
||||
}
|
||||
|
||||
/// Empty / whitespace stdout → zero summary (no decoding throw).
|
||||
@Test func pruneDryRunEmptyStaysSafe() {
|
||||
let summary = CuratorService.parsePruneDryRun(" \n")
|
||||
#expect(summary.totalCount == 0)
|
||||
#expect(summary.totalBytes == 0)
|
||||
}
|
||||
|
||||
/// Verify the size label uses the byte formatter (not raw bytes).
|
||||
@Test func archivedSkillSizeLabelFormats() {
|
||||
let big = HermesCuratorArchivedSkill(name: "x", sizeBytes: 1_500_000)
|
||||
// ByteCountFormatter produces a localized label; just verify
|
||||
// it's non-empty and not raw "1500000".
|
||||
#expect(!big.sizeLabel.isEmpty)
|
||||
#expect(big.sizeLabel != "1500000")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,338 @@
|
||||
#if canImport(SQLite3)
|
||||
|
||||
import Testing
|
||||
import Foundation
|
||||
@testable import ScarfCore
|
||||
|
||||
/// Exercises the `HermesDataService` façade against a `MockHermesQueryBackend`
|
||||
/// via the `internal init(context:backend:)` test seam. Focus is the SQL
|
||||
/// the façade emits + how it consumes the rows that come back.
|
||||
@Suite struct HermesDataServiceBackendTests {
|
||||
|
||||
// MARK: - Helpers
|
||||
|
||||
/// Build a `Row` from `(name, value)` pairs in column order.
|
||||
/// Mirrors the shape `LocalSQLiteBackend.executeOne` produces.
|
||||
private func makeRow(_ pairs: [(String, SQLValue)]) -> Row {
|
||||
var values: [SQLValue] = []
|
||||
var columnIndex: [String: Int] = [:]
|
||||
values.reserveCapacity(pairs.count)
|
||||
for (i, pair) in pairs.enumerated() {
|
||||
values.append(pair.1)
|
||||
columnIndex[pair.0] = i
|
||||
}
|
||||
return Row(values: values, columnIndex: columnIndex)
|
||||
}
|
||||
|
||||
/// Default 16-column session row matching `sessionColumns` for
|
||||
/// the bare base schema. Uses `.text("s1")` for id by default.
|
||||
private func makeBaseSessionRow(id: String = "s1") -> Row {
|
||||
makeRow([
|
||||
("id", .text(id)),
|
||||
("source", .text("acp")),
|
||||
("user_id", .null),
|
||||
("model", .text("gpt-5")),
|
||||
("title", .text("hello")),
|
||||
("parent_session_id", .null),
|
||||
("started_at", .real(1_700_000_000.0)),
|
||||
("ended_at", .null),
|
||||
("end_reason", .null),
|
||||
("message_count", .integer(5)),
|
||||
("tool_call_count", .integer(2)),
|
||||
("input_tokens", .integer(100)),
|
||||
("output_tokens", .integer(200)),
|
||||
("cache_read_tokens", .integer(0)),
|
||||
("cache_write_tokens", .integer(0)),
|
||||
("estimated_cost_usd", .real(0.05))
|
||||
])
|
||||
}
|
||||
|
||||
/// 10-column message row matching `messageColumns` for the bare base schema.
|
||||
private func makeBaseMessageRow(id: Int, sessionId: String = "s1", timestamp: Double = 1_700_000_001.0) -> Row {
|
||||
makeRow([
|
||||
("id", .integer(Int64(id))),
|
||||
("session_id", .text(sessionId)),
|
||||
("role", .text("user")),
|
||||
("content", .text("hi #\(id)")),
|
||||
("tool_call_id", .null),
|
||||
("tool_calls", .null),
|
||||
("tool_name", .null),
|
||||
("timestamp", .real(timestamp)),
|
||||
("token_count", .integer(10)),
|
||||
("finish_reason", .null)
|
||||
])
|
||||
}
|
||||
|
||||
/// Use a real `ServerContext.local` so the data service has a
|
||||
/// transport to construct (it's never used by these tests — every
|
||||
/// I/O path goes through the injected backend).
|
||||
private let context: ServerContext = .local
|
||||
|
||||
// MARK: - fetchSessions
|
||||
|
||||
@Test func fetchSessionsEmitsExpectedSQLPrefixAndDefaultLimit() async {
|
||||
let mock = MockHermesQueryBackend()
|
||||
let service = HermesDataService(context: context, backend: mock)
|
||||
_ = await service.open()
|
||||
|
||||
_ = await service.fetchSessions()
|
||||
|
||||
let log = await mock.queryLog
|
||||
#expect(log.count == 1)
|
||||
let first = log[0]
|
||||
#expect(first.sql.hasPrefix("SELECT id, source"))
|
||||
#expect(first.sql.contains("FROM sessions WHERE parent_session_id IS NULL ORDER BY started_at DESC LIMIT ?"))
|
||||
// QueryDefaults.sessionLimit == 100.
|
||||
#expect(first.params == [.integer(100)])
|
||||
}
|
||||
|
||||
@Test func fetchSessionsBareSchemaUsesBaseColumnList() async {
|
||||
let mock = MockHermesQueryBackend()
|
||||
// Both schema flags off — neither v0.7 nor v0.11 columns selected.
|
||||
await mock.setHasV07Schema(false)
|
||||
await mock.setHasV011Schema(false)
|
||||
let service = HermesDataService(context: context, backend: mock)
|
||||
_ = await service.open()
|
||||
_ = await service.fetchSessions()
|
||||
|
||||
let sql = await mock.queryLog[0].sql
|
||||
#expect(!sql.contains("reasoning_tokens"))
|
||||
#expect(!sql.contains("api_call_count"))
|
||||
// Sanity: base columns are still all there.
|
||||
#expect(sql.contains("estimated_cost_usd"))
|
||||
}
|
||||
|
||||
@Test func fetchSessionsWithV07SchemaIncludesReasoningTokens() async {
|
||||
let mock = MockHermesQueryBackend()
|
||||
await mock.setHasV07Schema(true)
|
||||
await mock.setHasV011Schema(false)
|
||||
let service = HermesDataService(context: context, backend: mock)
|
||||
_ = await service.open()
|
||||
_ = await service.fetchSessions()
|
||||
|
||||
let sql = await mock.queryLog[0].sql
|
||||
#expect(sql.contains("reasoning_tokens"))
|
||||
#expect(sql.contains("actual_cost_usd"))
|
||||
#expect(sql.contains("cost_status"))
|
||||
#expect(sql.contains("billing_provider"))
|
||||
#expect(!sql.contains("api_call_count"))
|
||||
}
|
||||
|
||||
@Test func fetchSessionsWithV011SchemaIncludesApiCallCount() async {
|
||||
let mock = MockHermesQueryBackend()
|
||||
await mock.setHasV07Schema(true)
|
||||
await mock.setHasV011Schema(true)
|
||||
let service = HermesDataService(context: context, backend: mock)
|
||||
_ = await service.open()
|
||||
_ = await service.fetchSessions()
|
||||
|
||||
let sql = await mock.queryLog[0].sql
|
||||
#expect(sql.contains("reasoning_tokens"))
|
||||
#expect(sql.contains("api_call_count"))
|
||||
}
|
||||
|
||||
// MARK: - fetchSession(id:)
|
||||
|
||||
@Test func fetchSessionByIdBindsTextParam() async {
|
||||
let mock = MockHermesQueryBackend()
|
||||
let service = HermesDataService(context: context, backend: mock)
|
||||
_ = await service.open()
|
||||
|
||||
await mock._seedRow(
|
||||
forSQLPrefix: "SELECT id, source",
|
||||
columns: makeBaseSessionRow().columnIndex,
|
||||
values: makeBaseSessionRow().values
|
||||
)
|
||||
|
||||
let session = await service.fetchSession(id: "abc-123")
|
||||
#expect(session?.id == "s1") // From the seeded row.
|
||||
|
||||
let log = await mock.queryLog
|
||||
#expect(log.count == 1)
|
||||
#expect(log[0].sql.contains("FROM sessions WHERE id = ? LIMIT 1"))
|
||||
#expect(log[0].params == [.text("abc-123")])
|
||||
}
|
||||
|
||||
// MARK: - fetchMessages
|
||||
|
||||
@Test func fetchMessagesWithoutBeforeBindsSessionAndLimit() async {
|
||||
let mock = MockHermesQueryBackend()
|
||||
let service = HermesDataService(context: context, backend: mock)
|
||||
_ = await service.open()
|
||||
|
||||
_ = await service.fetchMessages(sessionId: "s1", limit: 25, before: nil)
|
||||
|
||||
let log = await mock.queryLog
|
||||
#expect(log.count == 1)
|
||||
#expect(!log[0].sql.contains("id < ?"))
|
||||
#expect(log[0].sql.contains("WHERE session_id = ? ORDER BY id DESC LIMIT ?"))
|
||||
#expect(log[0].params == [.text("s1"), .integer(25)])
|
||||
}
|
||||
|
||||
@Test func fetchMessagesWithBeforeIncludesIdLessThanClause() async {
|
||||
let mock = MockHermesQueryBackend()
|
||||
let service = HermesDataService(context: context, backend: mock)
|
||||
_ = await service.open()
|
||||
|
||||
_ = await service.fetchMessages(sessionId: "s1", limit: 25, before: 999)
|
||||
|
||||
let log = await mock.queryLog
|
||||
#expect(log.count == 1)
|
||||
#expect(log[0].sql.contains("WHERE session_id = ? AND id < ? ORDER BY id DESC LIMIT ?"))
|
||||
#expect(log[0].params == [.text("s1"), .integer(999), .integer(25)])
|
||||
}
|
||||
|
||||
@Test func fetchMessagesReversesDescResultsToChronological() async {
|
||||
let mock = MockHermesQueryBackend()
|
||||
let service = HermesDataService(context: context, backend: mock)
|
||||
_ = await service.open()
|
||||
|
||||
// Backend returns DESC (newest first); service should reverse to
|
||||
// chronological (oldest first) for display.
|
||||
let row3 = makeBaseMessageRow(id: 3, timestamp: 1_700_000_003.0)
|
||||
let row2 = makeBaseMessageRow(id: 2, timestamp: 1_700_000_002.0)
|
||||
let row1 = makeBaseMessageRow(id: 1, timestamp: 1_700_000_001.0)
|
||||
await mock._seedRows(forSQLPrefix: "SELECT id, session_id", [row3, row2, row1])
|
||||
|
||||
let result = await service.fetchMessages(sessionId: "s1", limit: 10, before: nil)
|
||||
#expect(result.count == 3)
|
||||
#expect(result.map { $0.id } == [1, 2, 3])
|
||||
}
|
||||
|
||||
// MARK: - dashboardSnapshot
|
||||
|
||||
@Test func dashboardSnapshotUsesQueryBatchNotIndividualQueries() async {
|
||||
let mock = MockHermesQueryBackend()
|
||||
let service = HermesDataService(context: context, backend: mock)
|
||||
_ = await service.open()
|
||||
|
||||
_ = await service.dashboardSnapshot()
|
||||
|
||||
let queries = await mock.queryLog
|
||||
let batches = await mock.batchLog
|
||||
#expect(queries.isEmpty)
|
||||
#expect(batches.count == 1)
|
||||
#expect(batches[0].count == 4)
|
||||
}
|
||||
|
||||
@Test func dashboardSnapshotBatchOrderIsStatsRecentSessionsPreviewsToolCalls() async {
|
||||
let mock = MockHermesQueryBackend()
|
||||
let service = HermesDataService(context: context, backend: mock)
|
||||
_ = await service.open()
|
||||
|
||||
_ = await service.dashboardSnapshot()
|
||||
|
||||
let batches = await mock.batchLog
|
||||
#expect(batches.count == 1)
|
||||
let stmts = batches[0]
|
||||
// 0: stats — selects COUNT(*), SUM(...) from sessions.
|
||||
#expect(stmts[0].sql.contains("COUNT(*)"))
|
||||
#expect(stmts[0].sql.contains("FROM sessions"))
|
||||
// 1: recent sessions — selects session columns with a LIMIT param.
|
||||
#expect(stmts[1].sql.hasPrefix("SELECT id, source"))
|
||||
#expect(stmts[1].sql.contains("ORDER BY started_at DESC LIMIT ?"))
|
||||
// 2: session previews — joins messages with first user message.
|
||||
#expect(stmts[2].sql.contains("INNER JOIN"))
|
||||
#expect(stmts[2].sql.contains("MIN(id)"))
|
||||
// 3: recent tool calls — selects messages WHERE tool_calls IS NOT NULL.
|
||||
#expect(stmts[3].sql.contains("WHERE tool_calls IS NOT NULL"))
|
||||
}
|
||||
|
||||
@Test func dashboardSnapshotAssemblesDataFromFourResultSets() async {
|
||||
let mock = MockHermesQueryBackend()
|
||||
let service = HermesDataService(context: context, backend: mock)
|
||||
_ = await service.open()
|
||||
|
||||
// Stats row (6 cols on bare schema).
|
||||
let statsRow = makeRow([
|
||||
("c0", .integer(7)), // totalSessions
|
||||
("c1", .integer(50)), // totalMessages
|
||||
("c2", .integer(12)), // totalToolCalls
|
||||
("c3", .integer(1000)), // totalInputTokens
|
||||
("c4", .integer(2000)), // totalOutputTokens
|
||||
("c5", .real(1.25)) // totalCostUSD
|
||||
])
|
||||
await mock._seedRow(forSQLPrefix: "SELECT COUNT(*),", columns: statsRow.columnIndex, values: statsRow.values)
|
||||
|
||||
// Recent sessions: one base session row.
|
||||
await mock._seedRows(forSQLPrefix: "SELECT id, source", [makeBaseSessionRow(id: "sess-A")])
|
||||
|
||||
// Previews: two-column rows (session_id, content slice).
|
||||
let p1 = makeRow([("session_id", .text("sess-A")), ("preview", .text("first user msg"))])
|
||||
await mock._seedRows(forSQLPrefix: "SELECT m.session_id", [p1])
|
||||
|
||||
// Recent tool calls: one message row with non-empty tool_calls.
|
||||
var toolRow = makeBaseMessageRow(id: 99, sessionId: "sess-A")
|
||||
// Manually rewrite tool_calls column (idx 5) to non-null/non-empty.
|
||||
let toolRowValues: [SQLValue] = [
|
||||
.integer(99), .text("sess-A"), .text("assistant"), .text("Calling tool"),
|
||||
.null, .text("[{\"id\":\"t1\",\"name\":\"bash\"}]"), .text("bash"),
|
||||
.real(1_700_000_010.0), .integer(15), .text("stop")
|
||||
]
|
||||
toolRow = Row(values: toolRowValues, columnIndex: toolRow.columnIndex)
|
||||
// Both `fetchRecentToolCalls` and the dashboard batch slot start
|
||||
// with the same `messageColumns` prefix; match on a shorter
|
||||
// common substring that's whitespace-stable across the two
|
||||
// SQL builders.
|
||||
await mock._seedRows(forSQLPrefix: "SELECT id, session_id, role, content, tool_call_id, tool_calls,\ntool_name", [toolRow])
|
||||
|
||||
let snapshot = await service.dashboardSnapshot()
|
||||
#expect(snapshot.stats.totalSessions == 7)
|
||||
#expect(snapshot.stats.totalMessages == 50)
|
||||
#expect(snapshot.recentSessions.map { $0.id } == ["sess-A"])
|
||||
#expect(snapshot.sessionPreviews["sess-A"] == "first user msg")
|
||||
#expect(snapshot.recentToolCalls.count == 1)
|
||||
#expect(snapshot.recentToolCalls[0].id == 99)
|
||||
}
|
||||
|
||||
// MARK: - searchMessages
|
||||
|
||||
@Test func searchMessagesEmptyInputReturnsEmptyAndSkipsBackend() async {
|
||||
let mock = MockHermesQueryBackend()
|
||||
let service = HermesDataService(context: context, backend: mock)
|
||||
_ = await service.open()
|
||||
|
||||
let result = await service.searchMessages(query: " ")
|
||||
#expect(result.isEmpty)
|
||||
|
||||
let log = await mock.queryLog
|
||||
#expect(log.isEmpty)
|
||||
}
|
||||
|
||||
@Test func searchMessagesWrapsTokensInDoubleQuotes() async {
|
||||
let mock = MockHermesQueryBackend()
|
||||
let service = HermesDataService(context: context, backend: mock)
|
||||
_ = await service.open()
|
||||
|
||||
_ = await service.searchMessages(query: "config.yaml v0.7.0")
|
||||
|
||||
let log = await mock.queryLog
|
||||
#expect(log.count == 1)
|
||||
// FTS query is the first param.
|
||||
guard case .text(let fts) = log[0].params[0] else {
|
||||
Issue.record("Expected first FTS search param to be .text")
|
||||
return
|
||||
}
|
||||
// Each whitespace-delimited token gets wrapped in double-quotes
|
||||
// and joined with spaces.
|
||||
#expect(fts == "\"config.yaml\" \"v0.7.0\"")
|
||||
}
|
||||
|
||||
// MARK: - Error swallowing
|
||||
|
||||
@Test func fetchSessionsReturnsEmptyOnBackendTransportError() async {
|
||||
let mock = MockHermesQueryBackend()
|
||||
let service = HermesDataService(context: context, backend: mock)
|
||||
_ = await service.open()
|
||||
await mock._seedFailure(forSQLPrefix: "SELECT id, source", error: .transport("ssh dropped"))
|
||||
|
||||
let result = await service.fetchSessions()
|
||||
#expect(result.isEmpty)
|
||||
|
||||
// Sanity: the error reached the backend (the call was made).
|
||||
let log = await mock.queryLog
|
||||
#expect(log.count == 1)
|
||||
}
|
||||
}
|
||||
|
||||
#endif // canImport(SQLite3)
|
||||
@@ -0,0 +1,119 @@
|
||||
import Testing
|
||||
import Foundation
|
||||
@testable import ScarfCore
|
||||
|
||||
/// Exercises the `SCARF_HERMES_HOME` test-mode override on `HermesProfileResolver`.
|
||||
/// The override is the seam every E2E test relies on — without it, tests would
|
||||
/// touch the user's real `~/.hermes`. Serialized because we mutate process-wide
|
||||
/// environment.
|
||||
///
|
||||
/// **Marker file requirement.** As of v2.8 the override only activates when the
|
||||
/// path contains the sentinel `HermesProfileResolver.testHomeMarkerFilename`.
|
||||
/// Tests that want the override active drop the marker before `setenv`. Tests
|
||||
/// that want to verify the override is rejected (relative path, missing
|
||||
/// marker, empty value) skip the marker. The hardening prevents a leaked env
|
||||
/// var from ever pivoting Scarf off the user's real `~/.hermes`.
|
||||
@Suite(.serialized)
|
||||
struct HermesProfileResolverOverrideTests {
|
||||
|
||||
private static let envKey = "SCARF_HERMES_HOME"
|
||||
|
||||
@Test func absoluteOverrideTakesPrecedenceWhenMarkerPresent() throws {
|
||||
let saved = ProcessInfo.processInfo.environment[Self.envKey]
|
||||
defer { restore(saved) }
|
||||
|
||||
let tmp = NSTemporaryDirectory().appending("scarf-test-home-\(UUID().uuidString)")
|
||||
try FileManager.default.createDirectory(atPath: tmp, withIntermediateDirectories: true)
|
||||
try Data().write(to: URL(fileURLWithPath: tmp + "/" + HermesProfileResolver.testHomeMarkerFilename))
|
||||
defer { try? FileManager.default.removeItem(atPath: tmp) }
|
||||
setenv(Self.envKey, tmp, 1)
|
||||
|
||||
#expect(HermesProfileResolver.resolveLocalHome() == tmp)
|
||||
#expect(HermesProfileResolver.activeProfileName() == "test-override")
|
||||
}
|
||||
|
||||
@Test func overrideIsIgnoredWhenMarkerMissing() throws {
|
||||
let saved = ProcessInfo.processInfo.environment[Self.envKey]
|
||||
defer { restore(saved) }
|
||||
|
||||
// Real-looking dir, no marker — exactly the shape a leaked env
|
||||
// var or misconfigured launchctl plist would produce. Must NOT
|
||||
// override; must fall through to the real resolver.
|
||||
let tmp = NSTemporaryDirectory().appending("scarf-no-marker-\(UUID().uuidString)")
|
||||
try FileManager.default.createDirectory(atPath: tmp, withIntermediateDirectories: true)
|
||||
defer { try? FileManager.default.removeItem(atPath: tmp) }
|
||||
setenv(Self.envKey, tmp, 1)
|
||||
HermesProfileResolver.invalidateCache()
|
||||
|
||||
let resolved = HermesProfileResolver.resolveLocalHome()
|
||||
#expect(resolved != tmp)
|
||||
#expect(resolved.hasSuffix("/.hermes") || resolved.contains("/.hermes/profiles/"))
|
||||
}
|
||||
|
||||
@Test func emptyOverrideFallsThrough() {
|
||||
let saved = ProcessInfo.processInfo.environment[Self.envKey]
|
||||
defer { restore(saved) }
|
||||
|
||||
setenv(Self.envKey, "", 1)
|
||||
HermesProfileResolver.invalidateCache()
|
||||
|
||||
let resolved = HermesProfileResolver.resolveLocalHome()
|
||||
#expect(!resolved.isEmpty)
|
||||
#expect(resolved.hasSuffix("/.hermes") || resolved.contains("/.hermes/profiles/"))
|
||||
}
|
||||
|
||||
@Test func relativeOverrideIsRejected() {
|
||||
let saved = ProcessInfo.processInfo.environment[Self.envKey]
|
||||
defer { restore(saved) }
|
||||
|
||||
setenv(Self.envKey, "relative/path", 1)
|
||||
HermesProfileResolver.invalidateCache()
|
||||
|
||||
let resolved = HermesProfileResolver.resolveLocalHome()
|
||||
#expect(!resolved.hasSuffix("relative/path"))
|
||||
}
|
||||
|
||||
@Test func unsetOverrideUsesProfileResolver() {
|
||||
let saved = ProcessInfo.processInfo.environment[Self.envKey]
|
||||
defer { restore(saved) }
|
||||
|
||||
unsetenv(Self.envKey)
|
||||
HermesProfileResolver.invalidateCache()
|
||||
|
||||
let resolved = HermesProfileResolver.resolveLocalHome()
|
||||
#expect(!resolved.isEmpty)
|
||||
}
|
||||
|
||||
@Test func overrideBypassesCacheWhenMarkerPresent() throws {
|
||||
let saved = ProcessInfo.processInfo.environment[Self.envKey]
|
||||
defer { restore(saved) }
|
||||
|
||||
let first = NSTemporaryDirectory().appending("scarf-cache-bypass-1-\(UUID().uuidString)")
|
||||
let second = NSTemporaryDirectory().appending("scarf-cache-bypass-2-\(UUID().uuidString)")
|
||||
try FileManager.default.createDirectory(atPath: first, withIntermediateDirectories: true)
|
||||
try FileManager.default.createDirectory(atPath: second, withIntermediateDirectories: true)
|
||||
try Data().write(to: URL(fileURLWithPath: first + "/" + HermesProfileResolver.testHomeMarkerFilename))
|
||||
try Data().write(to: URL(fileURLWithPath: second + "/" + HermesProfileResolver.testHomeMarkerFilename))
|
||||
defer {
|
||||
try? FileManager.default.removeItem(atPath: first)
|
||||
try? FileManager.default.removeItem(atPath: second)
|
||||
}
|
||||
|
||||
setenv(Self.envKey, first, 1)
|
||||
#expect(HermesProfileResolver.resolveLocalHome() == first)
|
||||
|
||||
// Flip env var without invalidating the cache. Override is read
|
||||
// fresh on every call, so the new value takes effect immediately.
|
||||
setenv(Self.envKey, second, 1)
|
||||
#expect(HermesProfileResolver.resolveLocalHome() == second)
|
||||
}
|
||||
|
||||
private func restore(_ saved: String?) {
|
||||
if let saved {
|
||||
setenv(Self.envKey, saved, 1)
|
||||
} else {
|
||||
unsetenv(Self.envKey)
|
||||
}
|
||||
HermesProfileResolver.invalidateCache()
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,330 @@
|
||||
import Testing
|
||||
import Foundation
|
||||
@testable import ScarfCore
|
||||
|
||||
/// Pure-logic tests for the v2.7.5 Kanban model layer. The actor-based
|
||||
/// `KanbanService` is exercised separately under integration tests
|
||||
/// since it spawns `hermes kanban …` subprocesses; this suite covers
|
||||
/// the wire-shape contracts and the synchronous transition planner.
|
||||
@Suite struct KanbanModelsTests {
|
||||
|
||||
// MARK: - HermesKanbanTask decoding
|
||||
|
||||
@Test func decodeListRow() throws {
|
||||
let json = """
|
||||
{
|
||||
"id": "t_9f2a",
|
||||
"title": "Investigate flaky test",
|
||||
"body": "Repro on CI but not local.",
|
||||
"assignee": "researcher",
|
||||
"status": "running",
|
||||
"priority": 50,
|
||||
"tenant": "scarf:demo",
|
||||
"workspace_kind": "scratch",
|
||||
"workspace_path": "/Users/alan/.hermes/kanban/workspaces/t_9f2a",
|
||||
"created_by": "user",
|
||||
"created_at": "2026-05-06T12:00:00Z",
|
||||
"started_at": "2026-05-06T12:01:00Z",
|
||||
"skills": ["debugging"],
|
||||
"idempotency_key": "abc",
|
||||
"last_heartbeat_at": "2026-05-06T12:05:00Z",
|
||||
"max_runtime_seconds": 1800,
|
||||
"current_run_id": 1
|
||||
}
|
||||
"""
|
||||
let task = try JSONDecoder().decode(HermesKanbanTask.self, from: Data(json.utf8))
|
||||
#expect(task.id == "t_9f2a")
|
||||
#expect(task.assignee == "researcher")
|
||||
#expect(task.status == "running")
|
||||
#expect(task.tenant == "scarf:demo")
|
||||
#expect(task.workspaceKind == "scratch")
|
||||
#expect(task.skills == ["debugging"])
|
||||
#expect(task.idempotencyKey == "abc")
|
||||
#expect(task.maxRuntimeSeconds == 1800)
|
||||
#expect(task.currentRunId == 1)
|
||||
}
|
||||
|
||||
// MARK: - Assignee table parsing
|
||||
//
|
||||
// `hermes kanban assignees` prints either a JSON array (when
|
||||
// `--json` is honored) OR a Rich-style human table OR an
|
||||
// empty-state sentinel — "(no assignees — create a profile with
|
||||
// `hermes -p <name> setup`)". The first iteration of the parser
|
||||
// tokenized the sentinel and emitted `(no` as a profile name,
|
||||
// which surfaced in the Mac inspector's assignee dropdown.
|
||||
|
||||
// MARK: - LocalTransport subprocess environment
|
||||
|
||||
@Test func localTransportSubprocessEnvIncludesExecutableDir() {
|
||||
// GUI-launched Scarf would otherwise hand subprocesses
|
||||
// `/usr/bin:/bin:/usr/sbin:/sbin`, which doesn't include
|
||||
// `~/.local/bin` — so when Hermes's kanban dispatcher
|
||||
// spawns a worker by bare name, it fails with
|
||||
// `executable not found on PATH` and the run records
|
||||
// `outcome=spawn_failed`. Unblock by always making sure
|
||||
// the directory of the executable we're launching is on
|
||||
// PATH for the child.
|
||||
let previous = LocalTransport.environmentEnricher
|
||||
defer { LocalTransport.environmentEnricher = previous }
|
||||
LocalTransport.environmentEnricher = nil
|
||||
|
||||
let env = LocalTransport.subprocessEnvironment(
|
||||
forExecutable: "/Users/alanwizemann/.local/bin/hermes"
|
||||
)
|
||||
let path = env["PATH"] ?? ""
|
||||
#expect(path.contains("/Users/alanwizemann/.local/bin"))
|
||||
}
|
||||
|
||||
@Test func localTransportSubprocessEnvLetsEnricherWinPATH() {
|
||||
let previous = LocalTransport.environmentEnricher
|
||||
defer { LocalTransport.environmentEnricher = previous }
|
||||
LocalTransport.environmentEnricher = {
|
||||
// Simulate a login-shell probe returning a fuller PATH +
|
||||
// some credential env. The enricher's PATH must override
|
||||
// the GUI-process PATH.
|
||||
return [
|
||||
"PATH": "/opt/homebrew/bin:/usr/local/bin:/Users/me/.local/bin",
|
||||
"ANTHROPIC_API_KEY": "sk-test-fake"
|
||||
]
|
||||
}
|
||||
let env = LocalTransport.subprocessEnvironment(
|
||||
forExecutable: "/usr/local/bin/hermes"
|
||||
)
|
||||
// Enricher's PATH wins (PATH is the whole point of running it).
|
||||
#expect(env["PATH"]?.contains("/opt/homebrew/bin") == true)
|
||||
// Credential env is forwarded (process env didn't have it).
|
||||
#expect(env["ANTHROPIC_API_KEY"] == "sk-test-fake")
|
||||
}
|
||||
|
||||
@Test func parseAssigneeTableSkipsNoAssigneesSentinel() {
|
||||
// Use the same parser via its public stand-in: round-trip
|
||||
// through a fixture that decodes via JSON would skip the
|
||||
// table parser, so we test the fallback indirectly by
|
||||
// constructing the same decoder pipeline. The parser is
|
||||
// private to KanbanService; this test asserts the visible
|
||||
// contract (no garbage profile names appear in the picker)
|
||||
// by verifying the decode path on the real CLI fixture
|
||||
// returns an empty array rather than a `(no` row.
|
||||
let fixture = "(no assignees — create a profile with `hermes -p <name> setup`)"
|
||||
// Through the public surface: we know `KanbanService.assignees`
|
||||
// would consume this stdout when --json fails. The validator
|
||||
// we care about is the regex check; reproduce inline:
|
||||
let pattern = "^[a-zA-Z0-9_-]+$"
|
||||
let firstToken = fixture
|
||||
.split(whereSeparator: { $0 == "\t" || $0 == " " })
|
||||
.first.map(String.init) ?? ""
|
||||
// Confirms the parser's regex would reject "(no".
|
||||
#expect(firstToken.range(of: pattern, options: .regularExpression) == nil)
|
||||
}
|
||||
|
||||
@Test func decodeUnixIntegerTimestamps() throws {
|
||||
// Real `hermes kanban create --json` output uses Unix integer
|
||||
// seconds for created_at / started_at — its SQLite columns are
|
||||
// INTEGER. The decoder must normalize them into ISO-8601 strings
|
||||
// so downstream code works with one type.
|
||||
let json = """
|
||||
{
|
||||
"id": "t_2a0be199",
|
||||
"title": "smoke",
|
||||
"status": "ready",
|
||||
"priority": 50,
|
||||
"created_at": 1778160614,
|
||||
"started_at": null,
|
||||
"skills": []
|
||||
}
|
||||
"""
|
||||
let task = try JSONDecoder().decode(HermesKanbanTask.self, from: Data(json.utf8))
|
||||
#expect(task.id == "t_2a0be199")
|
||||
// Should have been converted from Unix int to an ISO-8601 string
|
||||
// — exact format is platform-stable.
|
||||
#expect(task.createdAt?.contains("2026") == true)
|
||||
#expect(task.startedAt == nil)
|
||||
}
|
||||
|
||||
@Test func decodeMissingOptionalsBecomesNil() throws {
|
||||
// Hermes emits a minimal task object when many fields are
|
||||
// absent; the decoder must tolerate it.
|
||||
let json = """
|
||||
{ "id": "t_x", "title": "ok", "status": "todo" }
|
||||
"""
|
||||
let task = try JSONDecoder().decode(HermesKanbanTask.self, from: Data(json.utf8))
|
||||
#expect(task.id == "t_x")
|
||||
#expect(task.assignee == nil)
|
||||
#expect(task.priority == nil)
|
||||
#expect(task.tenant == nil)
|
||||
#expect(task.skills.isEmpty)
|
||||
}
|
||||
|
||||
// MARK: - Status / column projection
|
||||
|
||||
@Test func statusToColumnMapping() {
|
||||
#expect(KanbanStatus.from("triage").boardColumn == .triage)
|
||||
#expect(KanbanStatus.from("todo").boardColumn == .upNext)
|
||||
#expect(KanbanStatus.from("ready").boardColumn == .upNext)
|
||||
#expect(KanbanStatus.from("running").boardColumn == .running)
|
||||
#expect(KanbanStatus.from("blocked").boardColumn == .blocked)
|
||||
#expect(KanbanStatus.from("done").boardColumn == .done)
|
||||
#expect(KanbanStatus.from("archived").boardColumn == .archived)
|
||||
#expect(KanbanStatus.from("WHATEVER").boardColumn == .upNext) // unknown → upNext
|
||||
}
|
||||
|
||||
// MARK: - KanbanCreateRequest argv assembly
|
||||
|
||||
@Test func createRequestArgvIncludesAllFields() {
|
||||
let req = KanbanCreateRequest(
|
||||
title: "Translate doc",
|
||||
body: "Spanish, please",
|
||||
assignee: "researcher",
|
||||
parentIds: ["t_parent"],
|
||||
workspace: .directory("/tmp/proj"),
|
||||
tenant: "scarf:demo",
|
||||
priority: 75,
|
||||
triage: true,
|
||||
idempotencyKey: "key-1",
|
||||
maxRuntimeSeconds: 1800,
|
||||
createdBy: "alan",
|
||||
skills: ["translation", "github-code-review"]
|
||||
)
|
||||
let argv = req.argv()
|
||||
#expect(argv.contains("--body"))
|
||||
#expect(argv.contains("--assignee"))
|
||||
#expect(argv.contains("--parent"))
|
||||
#expect(argv.contains("--workspace"))
|
||||
#expect(argv.contains("dir:/tmp/proj"))
|
||||
#expect(argv.contains("--tenant"))
|
||||
#expect(argv.contains("scarf:demo"))
|
||||
#expect(argv.contains("--priority"))
|
||||
#expect(argv.contains("75"))
|
||||
#expect(argv.contains("--triage"))
|
||||
#expect(argv.contains("--idempotency-key"))
|
||||
#expect(argv.contains("--max-runtime"))
|
||||
#expect(argv.contains("--created-by"))
|
||||
#expect(argv.contains("--skill"))
|
||||
#expect(argv.last == "Translate doc") // positional title is last
|
||||
#expect(argv.contains("--json"))
|
||||
}
|
||||
|
||||
@Test func createRequestArgvOmitsAbsent() {
|
||||
let req = KanbanCreateRequest(title: "minimal")
|
||||
let argv = req.argv()
|
||||
#expect(argv.contains("--json"))
|
||||
#expect(argv.last == "minimal")
|
||||
#expect(!argv.contains("--body"))
|
||||
#expect(!argv.contains("--assignee"))
|
||||
#expect(!argv.contains("--triage"))
|
||||
}
|
||||
|
||||
// MARK: - KanbanListFilter argv
|
||||
|
||||
@Test func listFilterEmptyOnlyJSON() {
|
||||
let argv = KanbanListFilter.all.argv()
|
||||
#expect(argv == ["--json"])
|
||||
}
|
||||
|
||||
@Test func listFilterStatusFlag() {
|
||||
let argv = KanbanListFilter(status: .running).argv()
|
||||
#expect(argv.contains("--status"))
|
||||
#expect(argv.contains("running"))
|
||||
}
|
||||
|
||||
@Test func listFilterTenantPasses() {
|
||||
let argv = KanbanListFilter(tenant: "scarf:demo").argv()
|
||||
#expect(argv.contains("--tenant"))
|
||||
#expect(argv.contains("scarf:demo"))
|
||||
}
|
||||
|
||||
@Test func listFilterArchivedAndMine() {
|
||||
let argv = KanbanListFilter(includeArchived: true, mineOnly: true).argv()
|
||||
#expect(argv.contains("--mine"))
|
||||
#expect(argv.contains("--archived"))
|
||||
}
|
||||
|
||||
// MARK: - Transition planning
|
||||
|
||||
@Test func planUpNextToRunningDispatches() throws {
|
||||
// `dispatch`, not `claim`. See KanbanTransitionStep doc for the
|
||||
// rationale — claim doesn't spawn a worker; the dispatcher does.
|
||||
let plan = try KanbanService.plan(
|
||||
for: KanbanTransition(from: .upNext, to: .running)
|
||||
)
|
||||
#expect(plan.steps == [.dispatch])
|
||||
}
|
||||
|
||||
@Test func planRunningToBlockedRequiresReason() throws {
|
||||
let plan = try KanbanService.plan(
|
||||
for: KanbanTransition(from: .running, to: .blocked)
|
||||
)
|
||||
#expect(plan.requiresBlockReason)
|
||||
}
|
||||
|
||||
@Test func planBlockedToRunningChainsTwoVerbs() throws {
|
||||
let plan = try KanbanService.plan(
|
||||
for: KanbanTransition(from: .blocked, to: .running)
|
||||
)
|
||||
// unblock then dispatch
|
||||
#expect(plan.steps.count == 2)
|
||||
if case .unblock = plan.steps.first {} else {
|
||||
Issue.record("expected first step .unblock, got \(plan.steps)")
|
||||
}
|
||||
if case .dispatch = plan.steps.last {} else {
|
||||
Issue.record("expected last step .dispatch, got \(plan.steps)")
|
||||
}
|
||||
}
|
||||
|
||||
@Test func planDoneToAnythingForbidden() {
|
||||
do {
|
||||
_ = try KanbanService.plan(
|
||||
for: KanbanTransition(from: .done, to: .upNext)
|
||||
)
|
||||
Issue.record("expected error")
|
||||
} catch let err as KanbanError {
|
||||
if case .forbiddenTransition = err {
|
||||
// ok
|
||||
} else {
|
||||
Issue.record("wrong error: \(err)")
|
||||
}
|
||||
} catch {
|
||||
Issue.record("unexpected error: \(error)")
|
||||
}
|
||||
}
|
||||
|
||||
@Test func planTriageToUpNextForbidden() {
|
||||
do {
|
||||
_ = try KanbanService.plan(
|
||||
for: KanbanTransition(from: .triage, to: .upNext)
|
||||
)
|
||||
Issue.record("expected error")
|
||||
} catch let err as KanbanError {
|
||||
if case .forbiddenTransition = err {
|
||||
// ok
|
||||
} else {
|
||||
Issue.record("wrong error: \(err)")
|
||||
}
|
||||
} catch {
|
||||
Issue.record("unexpected error: \(error)")
|
||||
}
|
||||
}
|
||||
|
||||
@Test func planNoOpProducesEmptyPlan() throws {
|
||||
let plan = try KanbanService.plan(
|
||||
for: KanbanTransition(from: .running, to: .running)
|
||||
)
|
||||
#expect(plan.steps.isEmpty)
|
||||
}
|
||||
|
||||
// MARK: - Stats glance
|
||||
|
||||
@Test func glanceStringJoinsNonEmptyBuckets() {
|
||||
let stats = HermesKanbanStats(
|
||||
byStatus: ["todo": 12, "running": 3, "blocked": 5, "done": 0]
|
||||
)
|
||||
#expect(stats.glanceString == "12 todo · 3 running · 5 blocked")
|
||||
#expect(stats.activeCount == 12 + 3 + 5)
|
||||
}
|
||||
|
||||
@Test func glanceStringEmptyWhenZero() {
|
||||
let stats = HermesKanbanStats(byStatus: [:])
|
||||
#expect(stats.glanceString.isEmpty)
|
||||
#expect(stats.activeCount == 0)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
import Testing
|
||||
import Foundation
|
||||
@testable import ScarfCore
|
||||
|
||||
/// Verifies the lenient `ListItemStatus(raw:)` parser. Real dashboards on
|
||||
/// disk use a mix of canonical names + synonyms (`done`, `info`, `ok`,
|
||||
/// `pending`, `up` are seen on the dev's machine today) — the parser must
|
||||
/// fold those onto the canonical case set without throwing or returning nil
|
||||
/// for the common synonyms. Unknown strings → nil so the renderer can fall
|
||||
/// back to plain text without losing the original.
|
||||
@Suite struct ListItemStatusTests {
|
||||
@Test func canonicalNamesParse() {
|
||||
for c in ListItemStatus.allCases {
|
||||
#expect(ListItemStatus(raw: c.rawValue) == c)
|
||||
}
|
||||
}
|
||||
|
||||
@Test func synonymsCollapseToCanonical() {
|
||||
#expect(ListItemStatus(raw: "ok") == .success)
|
||||
#expect(ListItemStatus(raw: "OK") == .success) // case-insensitive
|
||||
#expect(ListItemStatus(raw: " up ") == .success) // whitespace trim
|
||||
#expect(ListItemStatus(raw: "down") == .danger)
|
||||
#expect(ListItemStatus(raw: "error") == .danger)
|
||||
#expect(ListItemStatus(raw: "failed") == .danger)
|
||||
#expect(ListItemStatus(raw: "warn") == .warning)
|
||||
#expect(ListItemStatus(raw: "degraded") == .warning)
|
||||
#expect(ListItemStatus(raw: "active") == .info)
|
||||
#expect(ListItemStatus(raw: "queued") == .pending)
|
||||
#expect(ListItemStatus(raw: "complete") == .done)
|
||||
}
|
||||
|
||||
@Test func unknownReturnsNilNotThrows() {
|
||||
#expect(ListItemStatus(raw: "hologram") == nil)
|
||||
#expect(ListItemStatus(raw: "") == nil)
|
||||
#expect(ListItemStatus(raw: nil) == nil)
|
||||
#expect(ListItemStatus(raw: " ") == nil)
|
||||
}
|
||||
|
||||
@Test func listItemStillDecodesUnknownStatusString() throws {
|
||||
// Backwards-compat invariant: `ListItem.status` stays a free String? on
|
||||
// the wire. Decoding a v2.6 dashboard with a non-canonical status must
|
||||
// succeed and preserve the original string (renderer falls back).
|
||||
let json = #"{"text":"foo","status":"weird"}"#.data(using: .utf8)!
|
||||
let item = try JSONDecoder().decode(ListItem.self, from: json)
|
||||
#expect(item.status == "weird")
|
||||
#expect(ListItemStatus(raw: item.status) == nil)
|
||||
}
|
||||
}
|
||||
@@ -219,12 +219,6 @@ import Foundation
|
||||
try transport.removeFile(tmp.path)
|
||||
}
|
||||
|
||||
@Test func localTransportSnapshotSQLiteReturnsPathUnchanged() throws {
|
||||
let transport = LocalTransport()
|
||||
let url = try transport.snapshotSQLite(remotePath: "/tmp/some/state.db")
|
||||
#expect(url.path == "/tmp/some/state.db")
|
||||
}
|
||||
|
||||
/// The Mac target wires `SSHTransport.environmentEnricher` at launch to
|
||||
/// `HermesFileService.enrichedEnvironment()` so SSH subprocesses
|
||||
/// inherit SSH_AUTH_SOCK from the user's login shell (1Password /
|
||||
|
||||
@@ -265,19 +265,20 @@ import Foundation
|
||||
errorMessage: "No Anthropic credentials found",
|
||||
stderrTail: ""
|
||||
)
|
||||
#expect(noCreds?.contains("ANTHROPIC_API_KEY") == true)
|
||||
#expect(noCreds?.hint.contains("ANTHROPIC_API_KEY") == true)
|
||||
#expect(noCreds?.oauthProvider == nil)
|
||||
|
||||
let missingBinary = ACPErrorHint.classify(
|
||||
errorMessage: "",
|
||||
stderrTail: "No such file or directory: 'npx'"
|
||||
)
|
||||
#expect(missingBinary?.contains("npx") == true)
|
||||
#expect(missingBinary?.hint.contains("npx") == true)
|
||||
|
||||
let rateLimit = ACPErrorHint.classify(
|
||||
errorMessage: "",
|
||||
stderrTail: "HTTP 429 Too Many Requests: rate limit"
|
||||
)
|
||||
#expect(rateLimit?.contains("rate-limit") == true)
|
||||
#expect(rateLimit?.hint.contains("rate-limit") == true)
|
||||
|
||||
let unknown = ACPErrorHint.classify(
|
||||
errorMessage: "weird thing",
|
||||
@@ -286,6 +287,53 @@ import Foundation
|
||||
#expect(unknown == nil)
|
||||
}
|
||||
|
||||
@Test func errorHintsClassifyOAuthRefreshRevoked() {
|
||||
// Primary trigger — Hermes's verbatim message when an OAuth
|
||||
// refresh token can't mint a new access token. Provider name
|
||||
// appears alongside; classifier should extract it.
|
||||
let revoked = ACPErrorHint.classify(
|
||||
errorMessage: "",
|
||||
stderrTail: "Refresh session has been revoked. Run `hermes model` to re-authenticate."
|
||||
)
|
||||
#expect(revoked?.hint.contains("Re-authenticate") == true)
|
||||
|
||||
// With provider context — surfaces the affected provider name
|
||||
// so the chat banner can offer a one-click re-auth that targets
|
||||
// the right OAuth flow.
|
||||
let revokedWithProvider = ACPErrorHint.classify(
|
||||
errorMessage: "",
|
||||
stderrTail: "Provider claude: Refresh session has been revoked. Run `hermes model` to re-authenticate."
|
||||
)
|
||||
#expect(revokedWithProvider?.oauthProvider == "claude")
|
||||
|
||||
// 401 + OAuth provider name — broader catchall for providers
|
||||
// that don't print the verbatim "revoked" string.
|
||||
let unauthorized = ACPErrorHint.classify(
|
||||
errorMessage: "",
|
||||
stderrTail: "HTTP 401 Unauthorized from nous portal"
|
||||
)
|
||||
#expect(unauthorized?.oauthProvider == "nous")
|
||||
#expect(unauthorized?.hint.contains("OAuth") == true)
|
||||
|
||||
// Unauthorized on a non-OAuth provider (API-key based) should
|
||||
// NOT classify as OAuth revocation — no `oauthProvider` known
|
||||
// to dispatch the re-auth flow against.
|
||||
let unauthorizedNonOAuth = ACPErrorHint.classify(
|
||||
errorMessage: "",
|
||||
stderrTail: "HTTP 401 Unauthorized for groq"
|
||||
)
|
||||
#expect(unauthorizedNonOAuth?.oauthProvider == nil)
|
||||
|
||||
// Word-boundary check — "anthropicapi" must not false-trigger
|
||||
// on "anthropic". Without word boundaries this catches the
|
||||
// wrong cases.
|
||||
let substringNoMatch = ACPErrorHint.classify(
|
||||
errorMessage: "",
|
||||
stderrTail: "401 unauthorized: anthropicapi.example.com"
|
||||
)
|
||||
#expect(substringNoMatch?.oauthProvider != "anthropic")
|
||||
}
|
||||
|
||||
// MARK: - Helpers
|
||||
|
||||
/// Poll `predicate` every ~20ms up to `timeout` seconds. Fails if
|
||||
|
||||
@@ -455,8 +455,9 @@ import Foundation
|
||||
}
|
||||
}
|
||||
}
|
||||
func snapshotSQLite(remotePath: String) throws -> URL { URL(fileURLWithPath: remotePath) }
|
||||
var cachedSnapshotPath: URL? { nil }
|
||||
func streamScript(_ script: String, timeout: TimeInterval) async throws -> ProcessResult {
|
||||
ProcessResult(exitCode: 0, stdout: Data(), stderr: Data())
|
||||
}
|
||||
func watchPaths(_ paths: [String]) -> AsyncStream<WatchEvent> {
|
||||
AsyncStream { $0.finish() }
|
||||
}
|
||||
|
||||
@@ -0,0 +1,182 @@
|
||||
import Testing
|
||||
import Foundation
|
||||
@testable import ScarfCore
|
||||
|
||||
/// Pure tests for `ModelPreflight` — both the `check(_:)` configured-vs-
|
||||
/// missing classifier and the v2.8 `detectMismatch(_:)` provider/prefix
|
||||
/// reconciliation. The mismatch path is what surfaces the orange
|
||||
/// "Model/provider mismatch in config.yaml" banner in ChatView when the
|
||||
/// user switches OAuth providers via Credential Pools and `model.default`
|
||||
/// is left carrying the old provider's prefix.
|
||||
@Suite struct ModelPreflightTests {
|
||||
|
||||
// MARK: - check(_:) — missing-field classifier
|
||||
|
||||
@Test func bothModelAndProviderEmptyReportsMissingBoth() {
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = ""
|
||||
cfg.provider = ""
|
||||
#expect(ModelPreflight.check(cfg) == .missingBoth)
|
||||
}
|
||||
|
||||
@Test func bothModelAndProviderUnknownReportsMissingBoth() {
|
||||
// `HermesConfig.empty` defaults model/provider to the literal
|
||||
// "unknown" — the classifier must treat that the same as "".
|
||||
let cfg = HermesConfig.empty
|
||||
#expect(ModelPreflight.check(cfg) == .missingBoth)
|
||||
}
|
||||
|
||||
@Test func providerSetButModelEmptyReportsMissingModel() {
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = ""
|
||||
cfg.provider = "anthropic"
|
||||
#expect(ModelPreflight.check(cfg) == .missingModel)
|
||||
}
|
||||
|
||||
@Test func modelSetButProviderEmptyReportsMissingProvider() {
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = "claude-sonnet-4.6"
|
||||
cfg.provider = ""
|
||||
#expect(ModelPreflight.check(cfg) == .missingProvider)
|
||||
}
|
||||
|
||||
@Test func bothSetReportsConfigured() {
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = "claude-sonnet-4.6"
|
||||
cfg.provider = "anthropic"
|
||||
#expect(ModelPreflight.check(cfg) == .configured)
|
||||
}
|
||||
|
||||
@Test func whitespaceTreatedAsUnsetForBothFields() {
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = " "
|
||||
cfg.provider = "\n"
|
||||
#expect(ModelPreflight.check(cfg) == .missingBoth)
|
||||
}
|
||||
|
||||
@Test func resultIsConfiguredOnlyForConfiguredCase() {
|
||||
#expect(ModelPreflight.Result.configured.isConfigured)
|
||||
#expect(!ModelPreflight.Result.missingBoth.isConfigured)
|
||||
#expect(!ModelPreflight.Result.missingModel.isConfigured)
|
||||
#expect(!ModelPreflight.Result.missingProvider.isConfigured)
|
||||
}
|
||||
|
||||
// MARK: - detectMismatch(_:)
|
||||
|
||||
@Test func detectMismatchReturnsNilWhenNoPrefixOnModelDefault() {
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = "claude-sonnet-4.6"
|
||||
cfg.provider = "anthropic"
|
||||
#expect(ModelPreflight.detectMismatch(cfg) == nil)
|
||||
}
|
||||
|
||||
@Test func detectMismatchReturnsNilWhenPrefixMatchesProvider() {
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = "anthropic/claude-sonnet-4.6"
|
||||
cfg.provider = "anthropic"
|
||||
#expect(ModelPreflight.detectMismatch(cfg) == nil)
|
||||
}
|
||||
|
||||
@Test func detectMismatchReturnsNilWhenModelDefaultIsUnset() {
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = ""
|
||||
cfg.provider = "nous"
|
||||
#expect(ModelPreflight.detectMismatch(cfg) == nil)
|
||||
}
|
||||
|
||||
@Test func detectMismatchReturnsNilWhenProviderIsUnset() {
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = "anthropic/claude-sonnet-4.6"
|
||||
cfg.provider = ""
|
||||
#expect(ModelPreflight.detectMismatch(cfg) == nil)
|
||||
}
|
||||
|
||||
@Test func detectMismatchReturnsNilWhenBothUnknown() {
|
||||
// The literal "unknown" sentinel from the YAML parser fallback
|
||||
// counts as unset on both sides — no mismatch to report.
|
||||
let cfg = HermesConfig.empty // model + provider both "unknown"
|
||||
#expect(ModelPreflight.detectMismatch(cfg) == nil)
|
||||
}
|
||||
|
||||
@Test func detectMismatchSurfacesPrefixVsActiveProvider() {
|
||||
// The dogfooding scenario: Anthropic-prefixed model still sitting
|
||||
// in config.yaml after the user OAuth'd into Nous via Credential
|
||||
// Pools. Hermes can't reconcile and chats die with -32603 at
|
||||
// first prompt. The banner offers a one-click fix in either
|
||||
// direction; this test pins the data the banner reads.
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = "anthropic/claude-sonnet-4.6"
|
||||
cfg.provider = "nous"
|
||||
let mismatch = ModelPreflight.detectMismatch(cfg)
|
||||
#expect(mismatch != nil)
|
||||
#expect(mismatch?.prefixProvider == "anthropic")
|
||||
#expect(mismatch?.activeProvider == "nous")
|
||||
#expect(mismatch?.modelDefault == "anthropic/claude-sonnet-4.6")
|
||||
#expect(mismatch?.bareModel == "claude-sonnet-4.6")
|
||||
}
|
||||
|
||||
@Test func detectMismatchIsCaseInsensitiveOnPrefixMatch() {
|
||||
// Hermes accepts both `Anthropic/...` and `anthropic/...` casings
|
||||
// in the wild — case-only differences must NOT surface as a
|
||||
// mismatch (would be a false-positive banner).
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = "Anthropic/claude-sonnet-4.6"
|
||||
cfg.provider = "anthropic"
|
||||
#expect(ModelPreflight.detectMismatch(cfg) == nil)
|
||||
}
|
||||
|
||||
@Test func detectMismatchHandlesNonAnthropicProviders() {
|
||||
// The mismatch banner needs to work for any provider pair —
|
||||
// not just the dogfooding case. Pin the openai+nous shape.
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = "openai/gpt-5"
|
||||
cfg.provider = "nous"
|
||||
let mismatch = ModelPreflight.detectMismatch(cfg)
|
||||
#expect(mismatch?.prefixProvider == "openai")
|
||||
#expect(mismatch?.activeProvider == "nous")
|
||||
#expect(mismatch?.bareModel == "gpt-5")
|
||||
}
|
||||
|
||||
@Test func detectMismatchReturnsNilForEmptyBareModel() {
|
||||
// A pathological "anthropic/" with no model name after the
|
||||
// slash isn't a valid mismatch — caller has no bare model to
|
||||
// write back. The classifier should refuse to surface it
|
||||
// rather than emit a useless fix button.
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = "anthropic/"
|
||||
cfg.provider = "nous"
|
||||
#expect(ModelPreflight.detectMismatch(cfg) == nil)
|
||||
}
|
||||
|
||||
@Test func detectMismatchReturnsNilForEmptyPrefix() {
|
||||
// Symmetric pathological case — leading slash, no provider
|
||||
// prefix. Don't fire.
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = "/claude-sonnet-4.6"
|
||||
cfg.provider = "nous"
|
||||
#expect(ModelPreflight.detectMismatch(cfg) == nil)
|
||||
}
|
||||
|
||||
@Test func detectMismatchHandlesModelsWithMultipleSlashes() {
|
||||
// Some provider/model strings carry path-style segments after
|
||||
// the first slash (e.g. an OpenRouter style path). The first
|
||||
// slash separates prefix from bare model; the rest of the
|
||||
// string is the bare model verbatim.
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = "openrouter/anthropic/claude-sonnet-4.6"
|
||||
cfg.provider = "anthropic"
|
||||
let mismatch = ModelPreflight.detectMismatch(cfg)
|
||||
#expect(mismatch?.prefixProvider == "openrouter")
|
||||
#expect(mismatch?.activeProvider == "anthropic")
|
||||
#expect(mismatch?.bareModel == "anthropic/claude-sonnet-4.6")
|
||||
}
|
||||
|
||||
@Test func detectMismatchTrimsWhitespaceBeforeComparing() {
|
||||
// A stray newline in a hand-edited config.yaml shouldn't read
|
||||
// as a mismatch when the trimmed values agree.
|
||||
var cfg = HermesConfig.empty
|
||||
cfg.model = "anthropic/claude-sonnet-4.6 "
|
||||
cfg.provider = " anthropic\n"
|
||||
#expect(ModelPreflight.detectMismatch(cfg) == nil)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,565 @@
|
||||
#if canImport(SQLite3)
|
||||
|
||||
import Testing
|
||||
import Foundation
|
||||
import SQLite3
|
||||
@testable import ScarfCore
|
||||
|
||||
// MARK: - LocalSQLite3Transport
|
||||
|
||||
/// Test-only transport that runs the script through `/bin/sh -c` on the
|
||||
/// local machine. Lets `RemoteSQLiteBackend`'s production codepath
|
||||
/// (which calls `transport.streamScript`) drive a real local sqlite3
|
||||
/// invocation against a tmp fixture DB. No SSH, no Citadel — the
|
||||
/// backend doesn't care how `streamScript` gets its bytes.
|
||||
private struct LocalSQLite3Transport: ServerTransport {
|
||||
let contextID: ServerID
|
||||
let isRemote: Bool = false
|
||||
|
||||
init(contextID: ServerID = ServerContext.local.id) {
|
||||
self.contextID = contextID
|
||||
}
|
||||
|
||||
func readFile(_ path: String) throws -> Data {
|
||||
try Data(contentsOf: URL(fileURLWithPath: path))
|
||||
}
|
||||
func writeFile(_ path: String, data: Data) throws {
|
||||
try data.write(to: URL(fileURLWithPath: path), options: .atomic)
|
||||
}
|
||||
func fileExists(_ path: String) -> Bool {
|
||||
FileManager.default.fileExists(atPath: path)
|
||||
}
|
||||
func stat(_ path: String) -> FileStat? {
|
||||
guard let attrs = try? FileManager.default.attributesOfItem(atPath: path) else { return nil }
|
||||
let size = (attrs[.size] as? Int64) ?? Int64((attrs[.size] as? Int) ?? 0)
|
||||
let mtime = (attrs[.modificationDate] as? Date) ?? Date(timeIntervalSince1970: 0)
|
||||
let isDir = (attrs[.type] as? FileAttributeType) == .typeDirectory
|
||||
return FileStat(size: size, mtime: mtime, isDirectory: isDir)
|
||||
}
|
||||
func listDirectory(_ path: String) throws -> [String] {
|
||||
try FileManager.default.contentsOfDirectory(atPath: path)
|
||||
}
|
||||
func createDirectory(_ path: String) throws {
|
||||
try FileManager.default.createDirectory(atPath: path, withIntermediateDirectories: true)
|
||||
}
|
||||
func removeFile(_ path: String) throws {
|
||||
guard FileManager.default.fileExists(atPath: path) else { return }
|
||||
try FileManager.default.removeItem(atPath: path)
|
||||
}
|
||||
|
||||
func runProcess(executable: String, args: [String], stdin: Data?, timeout: TimeInterval?) throws -> ProcessResult {
|
||||
throw TransportError.other(message: "LocalSQLite3Transport.runProcess unused in tests")
|
||||
}
|
||||
|
||||
#if !os(iOS)
|
||||
func makeProcess(executable: String, args: [String]) -> Process {
|
||||
let p = Process()
|
||||
p.executableURL = URL(fileURLWithPath: executable)
|
||||
p.arguments = args
|
||||
return p
|
||||
}
|
||||
#endif
|
||||
|
||||
func streamLines(executable: String, args: [String]) -> AsyncThrowingStream<String, Error> {
|
||||
AsyncThrowingStream { $0.finish() }
|
||||
}
|
||||
|
||||
/// The actual workhorse: feed the script to `/bin/sh -c` so heredocs
|
||||
/// and command substitution behave exactly as they would on the
|
||||
/// remote end of an SSH session. Capture stdout / stderr / exit
|
||||
/// code into a `ProcessResult`.
|
||||
func streamScript(_ script: String, timeout: TimeInterval) async throws -> ProcessResult {
|
||||
return try await withCheckedThrowingContinuation { continuation in
|
||||
DispatchQueue.global().async {
|
||||
let proc = Process()
|
||||
proc.executableURL = URL(fileURLWithPath: "/bin/sh")
|
||||
proc.arguments = ["-c", script]
|
||||
let outPipe = Pipe()
|
||||
let errPipe = Pipe()
|
||||
proc.standardOutput = outPipe
|
||||
proc.standardError = errPipe
|
||||
do {
|
||||
try proc.run()
|
||||
} catch {
|
||||
continuation.resume(throwing: TransportError.other(
|
||||
message: "Failed to launch /bin/sh: \(error.localizedDescription)"
|
||||
))
|
||||
return
|
||||
}
|
||||
try? outPipe.fileHandleForWriting.close()
|
||||
try? errPipe.fileHandleForWriting.close()
|
||||
proc.waitUntilExit()
|
||||
let stdout = (try? outPipe.fileHandleForReading.readToEnd()) ?? Data()
|
||||
let stderr = (try? errPipe.fileHandleForReading.readToEnd()) ?? Data()
|
||||
try? outPipe.fileHandleForReading.close()
|
||||
try? errPipe.fileHandleForReading.close()
|
||||
continuation.resume(returning: ProcessResult(
|
||||
exitCode: proc.terminationStatus,
|
||||
stdout: stdout,
|
||||
stderr: stderr
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func watchPaths(_ paths: [String]) -> AsyncStream<WatchEvent> {
|
||||
AsyncStream { $0.finish() }
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Suite
|
||||
|
||||
/// Integration tests for `RemoteSQLiteBackend`. Drives the real backend
|
||||
/// against a local sqlite3 binary (via `LocalSQLite3Transport`) and a
|
||||
/// per-test fixture state.db on disk.
|
||||
@Suite struct RemoteSQLiteBackendTests {
|
||||
|
||||
// MARK: - Fixture builders
|
||||
|
||||
/// Build a minimal v0.6 baseline state.db (no v0.7, no v0.11 columns).
|
||||
/// Each test takes ownership of cleanup via `defer`.
|
||||
private func makeFixtureStateDB(
|
||||
addV07Columns: Bool = false,
|
||||
addV011SessionsColumn: Bool = false,
|
||||
addV011MessagesColumn: Bool = false
|
||||
) throws -> URL {
|
||||
// Each test gets its own isolated parent dir. We can't dump the
|
||||
// fixture directly into `temporaryDirectory` because the symlink
|
||||
// we create alongside (`<parent>/state.db`) would clobber a
|
||||
// sibling test's symlink when the suite runs in parallel.
|
||||
let testDir = FileManager.default.temporaryDirectory
|
||||
.appendingPathComponent("scarf-test-\(UUID().uuidString)", isDirectory: true)
|
||||
try FileManager.default.createDirectory(at: testDir, withIntermediateDirectories: true)
|
||||
let url = testDir.appendingPathComponent("fixture.db")
|
||||
var db: OpaquePointer?
|
||||
guard sqlite3_open_v2(url.path, &db, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, nil) == SQLITE_OK else {
|
||||
throw TransportError.other(message: "sqlite3_open_v2 failed")
|
||||
}
|
||||
defer { sqlite3_close(db) }
|
||||
|
||||
var sessionsExtra = ""
|
||||
if addV07Columns {
|
||||
sessionsExtra += ", reasoning_tokens INTEGER, actual_cost_usd REAL, cost_status TEXT, billing_provider TEXT"
|
||||
}
|
||||
if addV011SessionsColumn {
|
||||
sessionsExtra += ", api_call_count INTEGER"
|
||||
}
|
||||
var messagesExtra = ""
|
||||
if addV011MessagesColumn {
|
||||
messagesExtra += ", reasoning_content TEXT"
|
||||
}
|
||||
|
||||
let schema = """
|
||||
CREATE TABLE sessions (
|
||||
id TEXT PRIMARY KEY,
|
||||
source TEXT,
|
||||
user_id TEXT,
|
||||
model TEXT,
|
||||
title TEXT,
|
||||
parent_session_id TEXT,
|
||||
started_at REAL,
|
||||
ended_at REAL,
|
||||
end_reason TEXT,
|
||||
message_count INTEGER,
|
||||
tool_call_count INTEGER,
|
||||
input_tokens INTEGER,
|
||||
output_tokens INTEGER,
|
||||
cache_read_tokens INTEGER,
|
||||
cache_write_tokens INTEGER,
|
||||
estimated_cost_usd REAL\(sessionsExtra)
|
||||
);
|
||||
INSERT INTO sessions (id, source, user_id, model, title, parent_session_id, started_at, ended_at, end_reason, message_count, tool_call_count, input_tokens, output_tokens, cache_read_tokens, cache_write_tokens, estimated_cost_usd)
|
||||
VALUES ('s1', 'acp', 'u1', 'gpt-5', 'Test', NULL, 1700000000.0, NULL, NULL, 5, 2, 100, 200, 0, 0, 0.05);
|
||||
CREATE TABLE messages (
|
||||
id INTEGER PRIMARY KEY,
|
||||
session_id TEXT,
|
||||
role TEXT,
|
||||
content TEXT,
|
||||
tool_call_id TEXT,
|
||||
tool_calls TEXT,
|
||||
tool_name TEXT,
|
||||
timestamp REAL,
|
||||
token_count INTEGER,
|
||||
finish_reason TEXT\(messagesExtra)
|
||||
);
|
||||
INSERT INTO messages (id, session_id, role, content, tool_call_id, tool_calls, tool_name, timestamp, token_count, finish_reason)
|
||||
VALUES (1, 's1', 'user', 'hi', NULL, NULL, NULL, 1700000001.0, NULL, NULL);
|
||||
"""
|
||||
var errMsg: UnsafeMutablePointer<CChar>?
|
||||
let rc = sqlite3_exec(db, schema, nil, nil, &errMsg)
|
||||
if rc != SQLITE_OK {
|
||||
let msg = errMsg.flatMap { String(cString: $0) } ?? "unknown"
|
||||
sqlite3_free(errMsg)
|
||||
throw TransportError.other(message: "sqlite3_exec failed: \(msg)")
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
/// Construct a remote-shaped context whose `paths.stateDB` points at
|
||||
/// the fixture file. We embed the absolute path under a fake
|
||||
/// `remoteHome` whose final `/.hermes/state.db` resolves to our
|
||||
/// real DB on disk.
|
||||
private func makeFixtureContext(dbURL: URL) -> ServerContext {
|
||||
// The DB the backend opens is `<paths.home>/state.db`. We point
|
||||
// `remoteHome` at the parent dir of the fixture file and then
|
||||
// symlink `state.db` to the fixture so the backend's resolved
|
||||
// path lands on it.
|
||||
let parent = dbURL.deletingLastPathComponent()
|
||||
let stateLink = parent.appendingPathComponent("state.db")
|
||||
// Replace any prior symlink/file at the canonical "state.db" path.
|
||||
try? FileManager.default.removeItem(at: stateLink)
|
||||
try? FileManager.default.createSymbolicLink(at: stateLink, withDestinationURL: dbURL)
|
||||
return ServerContext(
|
||||
id: UUID(),
|
||||
displayName: "fixture",
|
||||
kind: .ssh(SSHConfig(host: "fake.invalid", remoteHome: parent.path))
|
||||
)
|
||||
}
|
||||
|
||||
/// Construct a remote-shaped context that uses the default
|
||||
/// `~/.hermes` remote home — exercises the tilde-expansion path
|
||||
/// in `RemoteSQLiteBackend.quoteForRemoteShell`. The fixture DB
|
||||
/// is symlinked at `$HOME/.hermes/state.db` so the shell-expanded
|
||||
/// path resolves correctly. Cleanup restores anything we move.
|
||||
/// Returns the original-symlink (or absent state) so the caller
|
||||
/// can restore on teardown.
|
||||
private struct DefaultHomeFixture {
|
||||
let dbURL: URL
|
||||
let stateLink: URL
|
||||
let backupURL: URL?
|
||||
let context: ServerContext
|
||||
}
|
||||
private func makeDefaultHomeFixtureContext(dbURL: URL) throws -> DefaultHomeFixture {
|
||||
let homeURL = URL(fileURLWithPath: NSHomeDirectory())
|
||||
let hermesDir = homeURL.appendingPathComponent(".hermes", isDirectory: true)
|
||||
try FileManager.default.createDirectory(at: hermesDir, withIntermediateDirectories: true)
|
||||
let stateLink = hermesDir.appendingPathComponent("state.db")
|
||||
// If something is already at ~/.hermes/state.db (the user's
|
||||
// real Hermes install on dev machines), move it aside so we
|
||||
// can put our fixture in its place. Restore on teardown.
|
||||
var backupURL: URL?
|
||||
if FileManager.default.fileExists(atPath: stateLink.path) {
|
||||
let bak = hermesDir.appendingPathComponent("state.db.scarf-test-bak-\(UUID().uuidString)")
|
||||
try FileManager.default.moveItem(at: stateLink, to: bak)
|
||||
backupURL = bak
|
||||
}
|
||||
try FileManager.default.createSymbolicLink(at: stateLink, withDestinationURL: dbURL)
|
||||
let ctx = ServerContext(
|
||||
id: UUID(),
|
||||
displayName: "fixture",
|
||||
kind: .ssh(SSHConfig(host: "fake.invalid"))
|
||||
// No remoteHome override → defaults to "~/.hermes".
|
||||
)
|
||||
return DefaultHomeFixture(dbURL: dbURL, stateLink: stateLink, backupURL: backupURL, context: ctx)
|
||||
}
|
||||
private func cleanupDefaultHomeFixture(_ fixture: DefaultHomeFixture) {
|
||||
try? FileManager.default.removeItem(at: fixture.stateLink)
|
||||
if let bak = fixture.backupURL {
|
||||
try? FileManager.default.moveItem(at: bak, to: fixture.stateLink)
|
||||
}
|
||||
}
|
||||
|
||||
/// Skip the test if /usr/bin/sqlite3 isn't available. Mirrors how
|
||||
/// other Apple-only tests gate on system tooling.
|
||||
private func requireSqlite3() throws {
|
||||
let path = "/usr/bin/sqlite3"
|
||||
let exists = FileManager.default.isExecutableFile(atPath: path)
|
||||
try #require(exists, "Test requires /usr/bin/sqlite3")
|
||||
}
|
||||
|
||||
// MARK: - open() / schema detection
|
||||
|
||||
/// Regression: a default-config remote with `paths.stateDB ==
|
||||
/// "~/.hermes/state.db"` previously hit `unable to open database
|
||||
/// "~/.hermes/state.db"` because the backend single-quoted the
|
||||
/// path and sqlite3 doesn't expand `~` itself. Verify the
|
||||
/// $HOME-rewrite path works against a real shell.
|
||||
@Test func openWithDefaultTildeHomeExpands() async throws {
|
||||
try requireSqlite3()
|
||||
let dbURL = try makeFixtureStateDB()
|
||||
let fixture = try makeDefaultHomeFixtureContext(dbURL: dbURL)
|
||||
defer {
|
||||
cleanupDefaultHomeFixture(fixture)
|
||||
try? FileManager.default.removeItem(at: dbURL)
|
||||
try? FileManager.default.removeItem(at: dbURL.deletingLastPathComponent())
|
||||
}
|
||||
let backend = RemoteSQLiteBackend(context: fixture.context, transport: LocalSQLite3Transport())
|
||||
|
||||
let opened = await backend.open()
|
||||
#expect(opened)
|
||||
let err = await backend.lastOpenError
|
||||
#expect(err == nil)
|
||||
|
||||
// And actually run a query through the same expansion path.
|
||||
let rows = try await backend.query("SELECT id FROM sessions", params: [])
|
||||
#expect(rows.count == 1)
|
||||
}
|
||||
|
||||
@Test func openProbesSchemaSuccessfully() async throws {
|
||||
try requireSqlite3()
|
||||
let dbURL = try makeFixtureStateDB()
|
||||
defer {
|
||||
try? FileManager.default.removeItem(at: dbURL)
|
||||
try? FileManager.default.removeItem(at: dbURL.deletingLastPathComponent().appendingPathComponent("state.db"))
|
||||
}
|
||||
let ctx = makeFixtureContext(dbURL: dbURL)
|
||||
let backend = RemoteSQLiteBackend(context: ctx, transport: LocalSQLite3Transport())
|
||||
|
||||
let opened = await backend.open()
|
||||
#expect(opened)
|
||||
let v07 = await backend.hasV07Schema
|
||||
let v011 = await backend.hasV011Schema
|
||||
#expect(v07 == false)
|
||||
#expect(v011 == false)
|
||||
let err = await backend.lastOpenError
|
||||
#expect(err == nil)
|
||||
}
|
||||
|
||||
@Test func openOnV07SchemaDB() async throws {
|
||||
try requireSqlite3()
|
||||
let dbURL = try makeFixtureStateDB(addV07Columns: true)
|
||||
defer {
|
||||
try? FileManager.default.removeItem(at: dbURL)
|
||||
try? FileManager.default.removeItem(at: dbURL.deletingLastPathComponent().appendingPathComponent("state.db"))
|
||||
}
|
||||
let ctx = makeFixtureContext(dbURL: dbURL)
|
||||
let backend = RemoteSQLiteBackend(context: ctx, transport: LocalSQLite3Transport())
|
||||
|
||||
let opened = await backend.open()
|
||||
#expect(opened)
|
||||
let v07 = await backend.hasV07Schema
|
||||
let v011 = await backend.hasV011Schema
|
||||
#expect(v07 == true)
|
||||
#expect(v011 == false)
|
||||
}
|
||||
|
||||
@Test func openOnV011SchemaDB() async throws {
|
||||
try requireSqlite3()
|
||||
let dbURL = try makeFixtureStateDB(
|
||||
addV07Columns: true,
|
||||
addV011SessionsColumn: true,
|
||||
addV011MessagesColumn: true
|
||||
)
|
||||
defer {
|
||||
try? FileManager.default.removeItem(at: dbURL)
|
||||
try? FileManager.default.removeItem(at: dbURL.deletingLastPathComponent().appendingPathComponent("state.db"))
|
||||
}
|
||||
let ctx = makeFixtureContext(dbURL: dbURL)
|
||||
let backend = RemoteSQLiteBackend(context: ctx, transport: LocalSQLite3Transport())
|
||||
|
||||
let opened = await backend.open()
|
||||
#expect(opened)
|
||||
let v011 = await backend.hasV011Schema
|
||||
#expect(v011 == true)
|
||||
}
|
||||
|
||||
@Test func partialMigrationStaysOnV07() async throws {
|
||||
try requireSqlite3()
|
||||
// sessions has api_call_count but messages lacks reasoning_content
|
||||
// — the belt-and-braces guard should keep hasV011Schema false.
|
||||
let dbURL = try makeFixtureStateDB(
|
||||
addV07Columns: true,
|
||||
addV011SessionsColumn: true,
|
||||
addV011MessagesColumn: false
|
||||
)
|
||||
defer {
|
||||
try? FileManager.default.removeItem(at: dbURL)
|
||||
try? FileManager.default.removeItem(at: dbURL.deletingLastPathComponent().appendingPathComponent("state.db"))
|
||||
}
|
||||
let ctx = makeFixtureContext(dbURL: dbURL)
|
||||
let backend = RemoteSQLiteBackend(context: ctx, transport: LocalSQLite3Transport())
|
||||
|
||||
let opened = await backend.open()
|
||||
#expect(opened)
|
||||
let v011 = await backend.hasV011Schema
|
||||
#expect(v011 == false)
|
||||
let v07 = await backend.hasV07Schema
|
||||
#expect(v07 == true)
|
||||
}
|
||||
|
||||
// MARK: - query()
|
||||
|
||||
@Test func queryReturnsRows() async throws {
|
||||
try requireSqlite3()
|
||||
let dbURL = try makeFixtureStateDB()
|
||||
defer {
|
||||
try? FileManager.default.removeItem(at: dbURL)
|
||||
try? FileManager.default.removeItem(at: dbURL.deletingLastPathComponent().appendingPathComponent("state.db"))
|
||||
}
|
||||
let ctx = makeFixtureContext(dbURL: dbURL)
|
||||
let backend = RemoteSQLiteBackend(context: ctx, transport: LocalSQLite3Transport())
|
||||
_ = await backend.open()
|
||||
|
||||
let rows = try await backend.query("SELECT id FROM sessions", params: [])
|
||||
#expect(rows.count == 1)
|
||||
if case .text(let id) = rows[0][0] {
|
||||
#expect(id == "s1")
|
||||
} else {
|
||||
Issue.record("Expected .text id, got \(rows[0][0])")
|
||||
}
|
||||
}
|
||||
|
||||
@Test func queryWithIntParam() async throws {
|
||||
try requireSqlite3()
|
||||
let dbURL = try makeFixtureStateDB()
|
||||
defer {
|
||||
try? FileManager.default.removeItem(at: dbURL)
|
||||
try? FileManager.default.removeItem(at: dbURL.deletingLastPathComponent().appendingPathComponent("state.db"))
|
||||
}
|
||||
let ctx = makeFixtureContext(dbURL: dbURL)
|
||||
let backend = RemoteSQLiteBackend(context: ctx, transport: LocalSQLite3Transport())
|
||||
_ = await backend.open()
|
||||
|
||||
let rows = try await backend.query(
|
||||
"SELECT id FROM sessions WHERE message_count >= ?",
|
||||
params: [.integer(5)]
|
||||
)
|
||||
#expect(rows.count == 1)
|
||||
}
|
||||
|
||||
@Test func queryWithTextParamEscapesQuotes() async throws {
|
||||
try requireSqlite3()
|
||||
let dbURL = try makeFixtureStateDB()
|
||||
defer {
|
||||
try? FileManager.default.removeItem(at: dbURL)
|
||||
try? FileManager.default.removeItem(at: dbURL.deletingLastPathComponent().appendingPathComponent("state.db"))
|
||||
}
|
||||
let ctx = makeFixtureContext(dbURL: dbURL)
|
||||
let backend = RemoteSQLiteBackend(context: ctx, transport: LocalSQLite3Transport())
|
||||
_ = await backend.open()
|
||||
|
||||
// Injection-shaped value — should be escaped to a harmless literal,
|
||||
// matching nothing in the fixture.
|
||||
let rows = try await backend.query(
|
||||
"SELECT id FROM sessions WHERE id = ?",
|
||||
params: [.text("s' OR 1=1 --")]
|
||||
)
|
||||
#expect(rows.isEmpty)
|
||||
}
|
||||
|
||||
@Test func queryEmptyResultSet() async throws {
|
||||
try requireSqlite3()
|
||||
let dbURL = try makeFixtureStateDB()
|
||||
defer {
|
||||
try? FileManager.default.removeItem(at: dbURL)
|
||||
try? FileManager.default.removeItem(at: dbURL.deletingLastPathComponent().appendingPathComponent("state.db"))
|
||||
}
|
||||
let ctx = makeFixtureContext(dbURL: dbURL)
|
||||
let backend = RemoteSQLiteBackend(context: ctx, transport: LocalSQLite3Transport())
|
||||
_ = await backend.open()
|
||||
|
||||
let rows = try await backend.query(
|
||||
"SELECT id FROM sessions WHERE id = ?",
|
||||
params: [.text("does-not-exist")]
|
||||
)
|
||||
#expect(rows.isEmpty)
|
||||
}
|
||||
|
||||
@Test func queryNullValuesPreserved() async throws {
|
||||
try requireSqlite3()
|
||||
let dbURL = try makeFixtureStateDB()
|
||||
defer {
|
||||
try? FileManager.default.removeItem(at: dbURL)
|
||||
try? FileManager.default.removeItem(at: dbURL.deletingLastPathComponent().appendingPathComponent("state.db"))
|
||||
}
|
||||
let ctx = makeFixtureContext(dbURL: dbURL)
|
||||
let backend = RemoteSQLiteBackend(context: ctx, transport: LocalSQLite3Transport())
|
||||
_ = await backend.open()
|
||||
|
||||
let rows = try await backend.query(
|
||||
"SELECT id, ended_at, end_reason FROM sessions WHERE id = ?",
|
||||
params: [.text("s1")]
|
||||
)
|
||||
#expect(rows.count == 1)
|
||||
// ended_at and end_reason are NULL in the fixture row.
|
||||
#expect(rows[0].isNull(at: 1))
|
||||
#expect(rows[0].isNull(at: 2))
|
||||
}
|
||||
|
||||
// MARK: - queryBatch()
|
||||
|
||||
@Test func queryBatchSplitsResultsCorrectly() async throws {
|
||||
try requireSqlite3()
|
||||
let dbURL = try makeFixtureStateDB()
|
||||
defer {
|
||||
try? FileManager.default.removeItem(at: dbURL)
|
||||
try? FileManager.default.removeItem(at: dbURL.deletingLastPathComponent().appendingPathComponent("state.db"))
|
||||
}
|
||||
let ctx = makeFixtureContext(dbURL: dbURL)
|
||||
let backend = RemoteSQLiteBackend(context: ctx, transport: LocalSQLite3Transport())
|
||||
_ = await backend.open()
|
||||
|
||||
let results = try await backend.queryBatch([
|
||||
(sql: "SELECT id FROM sessions", params: []),
|
||||
(sql: "SELECT id FROM messages WHERE session_id = ?", params: [.text("s1")]),
|
||||
(sql: "SELECT COUNT(*) FROM sessions", params: [])
|
||||
])
|
||||
#expect(results.count == 3)
|
||||
// Slot 0: one session row.
|
||||
#expect(results[0].count == 1)
|
||||
if case .text(let sid) = results[0][0][0] {
|
||||
#expect(sid == "s1")
|
||||
} else {
|
||||
Issue.record("Expected .text in slot 0")
|
||||
}
|
||||
// Slot 1: one message row.
|
||||
#expect(results[1].count == 1)
|
||||
// Slot 2: one count row with integer 1.
|
||||
#expect(results[2].count == 1)
|
||||
if case .integer(let n) = results[2][0][0] {
|
||||
#expect(n == 1)
|
||||
} else {
|
||||
Issue.record("Expected .integer in slot 2")
|
||||
}
|
||||
}
|
||||
|
||||
@Test func queryBatchHandlesEmptyResultSets() async throws {
|
||||
try requireSqlite3()
|
||||
let dbURL = try makeFixtureStateDB()
|
||||
defer {
|
||||
try? FileManager.default.removeItem(at: dbURL)
|
||||
try? FileManager.default.removeItem(at: dbURL.deletingLastPathComponent().appendingPathComponent("state.db"))
|
||||
}
|
||||
let ctx = makeFixtureContext(dbURL: dbURL)
|
||||
let backend = RemoteSQLiteBackend(context: ctx, transport: LocalSQLite3Transport())
|
||||
_ = await backend.open()
|
||||
|
||||
// Middle statement returns 0 rows; outer slots should still be
|
||||
// populated correctly.
|
||||
let results = try await backend.queryBatch([
|
||||
(sql: "SELECT id FROM sessions", params: []),
|
||||
(sql: "SELECT id FROM messages WHERE session_id = ?", params: [.text("does-not-exist")]),
|
||||
(sql: "SELECT COUNT(*) FROM messages", params: [])
|
||||
])
|
||||
#expect(results.count == 3)
|
||||
#expect(results[0].count == 1)
|
||||
#expect(results[1].isEmpty)
|
||||
#expect(results[2].count == 1)
|
||||
}
|
||||
|
||||
// MARK: - Failure paths
|
||||
|
||||
@Test func nonZeroExitThrowsSqliteError() async throws {
|
||||
try requireSqlite3()
|
||||
// Point at a parent dir with no state.db symlink — sqlite3 will
|
||||
// open a brand-new empty DB, so the schema PRAGMAs return empty
|
||||
// tables. That actually succeeds. Instead, point remoteHome at
|
||||
// a path under a non-existent directory so sqlite3 can't open
|
||||
// the file at all.
|
||||
let nonExistentParent = "/var/empty/scarf-test-no-such-dir-\(UUID().uuidString)"
|
||||
let ctx = ServerContext(
|
||||
id: UUID(),
|
||||
displayName: "broken",
|
||||
kind: .ssh(SSHConfig(host: "fake.invalid", remoteHome: nonExistentParent))
|
||||
)
|
||||
let backend = RemoteSQLiteBackend(context: ctx, transport: LocalSQLite3Transport())
|
||||
|
||||
let opened = await backend.open()
|
||||
#expect(opened == false)
|
||||
let err = await backend.lastOpenError
|
||||
#expect(err != nil)
|
||||
#expect(!(err ?? "").isEmpty)
|
||||
}
|
||||
}
|
||||
|
||||
#endif // canImport(SQLite3)
|
||||
@@ -0,0 +1,147 @@
|
||||
import Testing
|
||||
import Foundation
|
||||
@testable import ScarfCore
|
||||
|
||||
/// Pure unit tests on `SQLValueInliner.inline(_:params:)` and
|
||||
/// `SQLValueInliner.encode(_:)`. No backend, no transport, no actor —
|
||||
/// these are the lexical-substitution rules that drive the remote
|
||||
/// SQLite backend's `?` → literal pipeline.
|
||||
@Suite struct SQLValueInlinerTests {
|
||||
|
||||
// MARK: - encode(_:) per SQLValue case
|
||||
|
||||
@Test func encodeNullProducesNULL() {
|
||||
#expect(SQLValueInliner.encode(.null) == "NULL")
|
||||
}
|
||||
|
||||
@Test func encodeIntegerProducesUnquotedDigits() {
|
||||
#expect(SQLValueInliner.encode(.integer(42)) == "42")
|
||||
#expect(SQLValueInliner.encode(.integer(-7)) == "-7")
|
||||
#expect(SQLValueInliner.encode(.integer(0)) == "0")
|
||||
#expect(SQLValueInliner.encode(.integer(Int64.max)) == "9223372036854775807")
|
||||
}
|
||||
|
||||
@Test func encodeRealUsesPercent17gFormat() {
|
||||
// %.17g round-trips a Double precisely as decimal. Verify the
|
||||
// formatted string parses back to the exact same Double.
|
||||
let original: Double = 3.14
|
||||
let encoded = SQLValueInliner.encode(.real(original))
|
||||
#expect(encoded == String(format: "%.17g", original))
|
||||
// Round-trip: encoded value re-parsed must equal the source.
|
||||
#expect(Double(encoded) == original)
|
||||
|
||||
// Tricky case: 0.1 + 0.2 has imprecise binary representation.
|
||||
let imprecise = 0.1 + 0.2
|
||||
let encodedImprecise = SQLValueInliner.encode(.real(imprecise))
|
||||
#expect(Double(encodedImprecise) == imprecise)
|
||||
}
|
||||
|
||||
@Test func encodeTextWrapsInSingleQuotes() {
|
||||
#expect(SQLValueInliner.encode(.text("hi")) == "'hi'")
|
||||
#expect(SQLValueInliner.encode(.text("")) == "''")
|
||||
}
|
||||
|
||||
@Test func encodeTextDoublesEmbeddedSingleQuotes() {
|
||||
// SQL literal escape: `it's` becomes `'it''s'`.
|
||||
#expect(SQLValueInliner.encode(.text("it's")) == "'it''s'")
|
||||
// Multiple embedded quotes — each one is doubled.
|
||||
#expect(SQLValueInliner.encode(.text("a'b'c")) == "'a''b''c'")
|
||||
// The classic injection-shaped value gets escaped to harmless.
|
||||
#expect(SQLValueInliner.encode(.text("' OR 1=1 --")) == "''' OR 1=1 --'")
|
||||
}
|
||||
|
||||
@Test func encodeBlobProducesHexLiteral() {
|
||||
// Two-byte blob: `X'dead'`.
|
||||
#expect(SQLValueInliner.encode(.blob(Data([0xde, 0xad]))) == "X'dead'")
|
||||
// Empty blob: `X''`.
|
||||
#expect(SQLValueInliner.encode(.blob(Data())) == "X''")
|
||||
// Lowercase hex, full byte range, with leading zero preserved.
|
||||
#expect(SQLValueInliner.encode(.blob(Data([0x00, 0x0f, 0xff]))) == "X'000fff'")
|
||||
}
|
||||
|
||||
// MARK: - inline(_:params:) substitution rules
|
||||
|
||||
@Test func inlineSubstitutesPlaceholdersInOrder() {
|
||||
let out = SQLValueInliner.inline(
|
||||
"INSERT INTO t VALUES (?, ?, ?)",
|
||||
params: [.integer(1), .text("two"), .real(3.0)]
|
||||
)
|
||||
// Order is preserved: integer 1, text 'two', real 3.0.
|
||||
#expect(out.hasPrefix("INSERT INTO t VALUES ("))
|
||||
#expect(out.contains("1"))
|
||||
#expect(out.contains("'two'"))
|
||||
// Real 3.0 should round-trip via %.17g.
|
||||
let real3 = String(format: "%.17g", 3.0)
|
||||
#expect(out.contains(real3))
|
||||
}
|
||||
|
||||
@Test func inlineSkipsPlaceholderInsideStringLiteral() {
|
||||
// The `?` inside `'?'` is part of a string and must not be bound.
|
||||
// Only the trailing `?` (outside the quotes) consumes the param.
|
||||
let out = SQLValueInliner.inline(
|
||||
"WHERE name = '?' AND id = ?",
|
||||
params: [.integer(7)]
|
||||
)
|
||||
#expect(out == "WHERE name = '?' AND id = 7")
|
||||
}
|
||||
|
||||
@Test func inlineSkipsPlaceholderInsideDoubleQuotedIdentifier() {
|
||||
// Double-quoted identifiers (column / table names with special chars)
|
||||
// are also a quoted region — `?` inside them is literal.
|
||||
let out = SQLValueInliner.inline(
|
||||
"SELECT \"col?\" FROM t WHERE x = ?",
|
||||
params: [.integer(1)]
|
||||
)
|
||||
#expect(out == "SELECT \"col?\" FROM t WHERE x = 1")
|
||||
}
|
||||
|
||||
@Test func inlineHandlesDoubledSingleQuoteEscapeInString() {
|
||||
// `'it''s ?'` is a single SQL string literal containing `it's ?`.
|
||||
// The doubled single-quote is the SQL escape for an embedded
|
||||
// apostrophe — the scanner must NOT toggle out of string state
|
||||
// at the doubled quote, and the trailing `?` is inside the string.
|
||||
// No params consumed.
|
||||
let out = SQLValueInliner.inline(
|
||||
"WHERE x = 'it''s ?'",
|
||||
params: []
|
||||
)
|
||||
#expect(out == "WHERE x = 'it''s ?'")
|
||||
}
|
||||
|
||||
@Test func inlineSelectShapeMatchesDataServicePattern() {
|
||||
// Sanity check: the SELECT shape `HermesDataService.fetchSessions`
|
||||
// generates inlines cleanly for the typical `[.integer(100)]`
|
||||
// limit param.
|
||||
let sql = "SELECT id, source FROM sessions WHERE parent_session_id IS NULL ORDER BY started_at DESC LIMIT ?"
|
||||
let out = SQLValueInliner.inline(sql, params: [.integer(100)])
|
||||
#expect(out == "SELECT id, source FROM sessions WHERE parent_session_id IS NULL ORDER BY started_at DESC LIMIT 100")
|
||||
}
|
||||
|
||||
@Test func inlineWithNoPlaceholdersReturnsInputUnchanged() {
|
||||
let sql = "SELECT COUNT(*) FROM messages"
|
||||
#expect(SQLValueInliner.inline(sql, params: []) == sql)
|
||||
}
|
||||
|
||||
@Test func inlinePreservesAllOtherCharacters() {
|
||||
// Make sure we're not mangling whitespace, semicolons, parens.
|
||||
let sql = " SELECT *\n FROM t WHERE id = ? ; "
|
||||
let out = SQLValueInliner.inline(sql, params: [.integer(5)])
|
||||
#expect(out == " SELECT *\n FROM t WHERE id = 5 ; ")
|
||||
}
|
||||
|
||||
@Test func inlineSubstitutesNullPlaceholder() {
|
||||
let out = SQLValueInliner.inline(
|
||||
"UPDATE t SET col = ? WHERE id = ?",
|
||||
params: [.null, .integer(1)]
|
||||
)
|
||||
#expect(out == "UPDATE t SET col = NULL WHERE id = 1")
|
||||
}
|
||||
|
||||
@Test func inlineSubstitutesBlobPlaceholder() {
|
||||
let out = SQLValueInliner.inline(
|
||||
"INSERT INTO t (data) VALUES (?)",
|
||||
params: [.blob(Data([0x01, 0x02, 0x03]))]
|
||||
)
|
||||
#expect(out == "INSERT INTO t (data) VALUES (X'010203')")
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
import Testing
|
||||
import Foundation
|
||||
@testable import ScarfCore
|
||||
|
||||
/// Regression tests for `SSHScriptRunner`. Mac-only because the
|
||||
/// implementation relies on `Foundation.Process`, which doesn't exist
|
||||
/// on Swift Linux. Drives the `runLocally` path so we don't need an
|
||||
/// SSH endpoint in CI.
|
||||
#if os(macOS)
|
||||
@Suite struct SSHScriptRunnerTests {
|
||||
|
||||
/// Issue #77 regression. Pre-fix the runner read stdout via
|
||||
/// `readToEnd()` *after* the subprocess exited; once the script's
|
||||
/// output crossed the kernel's pipe buffer (16–64 KB on macOS) the
|
||||
/// process wedged because nothing was draining the read end. The
|
||||
/// only visible symptom was a 30-second timeout and an empty
|
||||
/// result.
|
||||
///
|
||||
/// This script writes ~256 KB of bytes — comfortably past every
|
||||
/// pipe-buffer threshold. With the readabilityHandler drain in
|
||||
/// place the run should complete in well under a second and
|
||||
/// return the full payload.
|
||||
@Test func drainsLargeStdoutWithoutTimeout() async throws {
|
||||
// 256 lines × 1024 bytes/line = 256 KB.
|
||||
let script = """
|
||||
for i in $(seq 1 256); do
|
||||
printf '%04d:' "$i"
|
||||
printf '%.0sx' $(seq 1 1018)
|
||||
printf '\\n'
|
||||
done
|
||||
"""
|
||||
let outcome = await SSHScriptRunner.run(
|
||||
script: script,
|
||||
context: .local,
|
||||
timeout: 10
|
||||
)
|
||||
switch outcome {
|
||||
case .completed(let stdout, _, let exitCode):
|
||||
#expect(exitCode == 0)
|
||||
// 256 lines + final newline.
|
||||
let lines = stdout.split(separator: "\n", omittingEmptySubsequences: false)
|
||||
#expect(lines.count >= 256)
|
||||
#expect(stdout.utf8.count >= 256 * 1024)
|
||||
case .connectFailure(let reason):
|
||||
Issue.record("Expected completion, got connectFailure: \(reason)")
|
||||
}
|
||||
}
|
||||
|
||||
/// Sanity check that small scripts still come back the way they
|
||||
/// did before the drain refactor. Guards against an off-by-one in
|
||||
/// the readability handler that swallowed trailing bytes.
|
||||
@Test func smallScriptPayloadRoundTrips() async throws {
|
||||
let outcome = await SSHScriptRunner.run(
|
||||
script: "printf 'hello\\n' && printf 'world\\n' >&2 && exit 0",
|
||||
context: .local,
|
||||
timeout: 5
|
||||
)
|
||||
switch outcome {
|
||||
case .completed(let stdout, let stderr, let exitCode):
|
||||
#expect(exitCode == 0)
|
||||
#expect(stdout == "hello\n")
|
||||
#expect(stderr == "world\n")
|
||||
case .connectFailure(let reason):
|
||||
Issue.record("Expected completion, got connectFailure: \(reason)")
|
||||
}
|
||||
}
|
||||
|
||||
/// Non-zero exit codes should still be reported as `.completed`
|
||||
/// with the captured stdout/stderr — unchanged contract.
|
||||
@Test func nonZeroExitIsReportedAsCompleted() async throws {
|
||||
let outcome = await SSHScriptRunner.run(
|
||||
script: "echo nope >&2 && exit 7",
|
||||
context: .local,
|
||||
timeout: 5
|
||||
)
|
||||
switch outcome {
|
||||
case .completed(_, let stderr, let exitCode):
|
||||
#expect(exitCode == 7)
|
||||
#expect(stderr.contains("nope"))
|
||||
case .connectFailure(let reason):
|
||||
Issue.record("Expected completion, got connectFailure: \(reason)")
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -0,0 +1,202 @@
|
||||
import Testing
|
||||
import Foundation
|
||||
@testable import ScarfCore
|
||||
|
||||
/// `.serialized` because every test that exercises the wrappers
|
||||
/// (`measure`, `measureAsync`, `event`) installs and uninstalls the
|
||||
/// process-wide backend set, and parallel tests would race on that
|
||||
/// shared state. Tests of the ring buffer in isolation don't need
|
||||
/// serialization, but the suite-level annotation is the simplest way
|
||||
/// to keep the global-state ones honest.
|
||||
@Suite(.serialized) struct ScarfMonTests {
|
||||
|
||||
/// Ring-buffer ordering — fewer than capacity, no wrap.
|
||||
@Test func ringBufferKeepsOrderBeforeWrap() {
|
||||
let ring = ScarfMonRingBuffer(capacity: 8)
|
||||
ring.record(.fixture(name: "a"))
|
||||
ring.record(.fixture(name: "b"))
|
||||
ring.record(.fixture(name: "c"))
|
||||
let names = ring.samples().map { $0.name.description }
|
||||
#expect(names == ["a", "b", "c"])
|
||||
}
|
||||
|
||||
/// Ring-buffer wrap-around — the oldest entries are dropped, the
|
||||
/// newest entries appear at the end.
|
||||
@Test func ringBufferWrapsCorrectly() {
|
||||
let ring = ScarfMonRingBuffer(capacity: 4)
|
||||
ring.record(.fixture(name: "a"))
|
||||
ring.record(.fixture(name: "b"))
|
||||
ring.record(.fixture(name: "c"))
|
||||
ring.record(.fixture(name: "d"))
|
||||
ring.record(.fixture(name: "e"))
|
||||
ring.record(.fixture(name: "f"))
|
||||
let names = ring.samples().map { $0.name.description }
|
||||
#expect(names == ["c", "d", "e", "f"])
|
||||
}
|
||||
|
||||
/// Reset clears the buffer and resets wrap state — subsequent reads
|
||||
/// see only post-reset entries.
|
||||
@Test func ringBufferResetClearsState() {
|
||||
let ring = ScarfMonRingBuffer(capacity: 4)
|
||||
ring.record(.fixture(name: "a"))
|
||||
ring.record(.fixture(name: "b"))
|
||||
ring.record(.fixture(name: "c"))
|
||||
ring.record(.fixture(name: "d"))
|
||||
ring.record(.fixture(name: "e"))
|
||||
ring.reset()
|
||||
ring.record(.fixture(name: "x"))
|
||||
let names = ring.samples().map { $0.name.description }
|
||||
#expect(names == ["x"])
|
||||
}
|
||||
|
||||
/// Summary aggregates per (category, name) and computes percentiles.
|
||||
@Test func summaryAggregatesByCategoryAndName() {
|
||||
let ring = ScarfMonRingBuffer(capacity: 16)
|
||||
// Three "fast" intervals + two "slow" intervals on the same key.
|
||||
for nanos: UInt64 in [1_000_000, 2_000_000, 3_000_000, 50_000_000, 100_000_000] {
|
||||
ring.record(.fixture(name: "render", durationNanos: nanos))
|
||||
}
|
||||
let stats = ring.summary()
|
||||
#expect(stats.count == 1)
|
||||
let s = stats[0]
|
||||
#expect(s.count == 5)
|
||||
#expect(s.totalNanos == 156_000_000)
|
||||
// Nearest-rank p95 with 5 samples picks the 5th sorted value
|
||||
// (rank = ceil(5 * 0.95) = 5).
|
||||
#expect(s.p95Nanos == 100_000_000)
|
||||
// p50 with 5 samples picks the 3rd sorted value.
|
||||
#expect(s.p50Nanos == 3_000_000)
|
||||
}
|
||||
|
||||
/// Events accumulate count + bytes without contributing to interval
|
||||
/// percentiles.
|
||||
@Test func eventsAccumulateBytesNotDuration() {
|
||||
let ring = ScarfMonRingBuffer(capacity: 16)
|
||||
ring.record(ScarfMon.Sample(
|
||||
category: .chatStream, name: "token", kind: .event,
|
||||
timestamp: Date(), durationNanos: 0, count: 1, bytes: 256
|
||||
))
|
||||
ring.record(ScarfMon.Sample(
|
||||
category: .chatStream, name: "token", kind: .event,
|
||||
timestamp: Date(), durationNanos: 0, count: 1, bytes: 128
|
||||
))
|
||||
let stats = ring.summary()
|
||||
#expect(stats.count == 1)
|
||||
#expect(stats[0].count == 2)
|
||||
#expect(stats[0].totalBytes == 384)
|
||||
#expect(stats[0].p95Nanos == 0)
|
||||
}
|
||||
|
||||
/// `isActive` flips off when the backend set is empty so the
|
||||
/// hot-path short-circuit kicks in.
|
||||
@Test func installEmptyBackendsDeactivates() {
|
||||
ScarfMon.install([])
|
||||
#expect(ScarfMon.isActive == false)
|
||||
ScarfMon.install([ScarfMonRingBuffer(capacity: 4)])
|
||||
#expect(ScarfMon.isActive == true)
|
||||
ScarfMon.install([])
|
||||
}
|
||||
|
||||
/// `measure` records a duration into every installed backend.
|
||||
@Test func measureFlowsThroughInstalledBackends() throws {
|
||||
let ring = ScarfMonRingBuffer(capacity: 8)
|
||||
ScarfMon.install([ring])
|
||||
defer { ScarfMon.install([]) }
|
||||
|
||||
let result: Int = ScarfMon.measure(.render, "unit") {
|
||||
return 42
|
||||
}
|
||||
#expect(result == 42)
|
||||
let samples = ring.samples()
|
||||
#expect(samples.count == 1)
|
||||
#expect(samples[0].kind == .interval)
|
||||
#expect(samples[0].name.description == "unit")
|
||||
}
|
||||
|
||||
/// `measureAsync` records duration even when the body throws — the
|
||||
/// `defer` in the wrapper must fire on rethrow.
|
||||
@Test func measureAsyncRecordsDurationEvenOnThrow() async {
|
||||
struct Boom: Error {}
|
||||
let ring = ScarfMonRingBuffer(capacity: 8)
|
||||
ScarfMon.install([ring])
|
||||
defer { ScarfMon.install([]) }
|
||||
|
||||
await #expect(throws: Boom.self) {
|
||||
try await ScarfMon.measureAsync(.chatStream, "throws") {
|
||||
throw Boom()
|
||||
}
|
||||
}
|
||||
let samples = ring.samples()
|
||||
#expect(samples.count == 1)
|
||||
#expect(samples[0].name.description == "throws")
|
||||
}
|
||||
|
||||
/// `event(...)` records a count entry without taking a clock reading.
|
||||
@Test func eventRecordsCountSample() {
|
||||
let ring = ScarfMonRingBuffer(capacity: 8)
|
||||
ScarfMon.install([ring])
|
||||
defer { ScarfMon.install([]) }
|
||||
|
||||
ScarfMon.event(.chatStream, "token", count: 1, bytes: 32)
|
||||
let samples = ring.samples()
|
||||
#expect(samples.count == 1)
|
||||
#expect(samples[0].kind == .event)
|
||||
#expect(samples[0].count == 1)
|
||||
#expect(samples[0].bytes == 32)
|
||||
#expect(samples[0].durationNanos == 0)
|
||||
}
|
||||
|
||||
/// Boot configure flips the active backend set without leaking
|
||||
/// across tests.
|
||||
@Test func bootConfigureModesInstallExpectedBackends() {
|
||||
defer { ScarfMon.install([]) }
|
||||
|
||||
ScarfMonBoot.configure(mode: .off)
|
||||
#expect(ScarfMon.currentBackends.isEmpty)
|
||||
#expect(ScarfMonBoot.sharedRingBuffer == nil)
|
||||
|
||||
ScarfMonBoot.configure(mode: .signpostOnly)
|
||||
#expect(ScarfMon.currentBackends.count == 1)
|
||||
#expect(ScarfMonBoot.sharedRingBuffer == nil)
|
||||
|
||||
let ring = ScarfMonBoot.configure(mode: .full)
|
||||
#expect(ring != nil)
|
||||
#expect(ScarfMon.currentBackends.count == 3)
|
||||
#expect(ScarfMonBoot.sharedRingBuffer === ring)
|
||||
}
|
||||
|
||||
/// JSON export round-trips through `JSONSerialization` — proves the
|
||||
/// per-line format is valid JSON the user can paste into a feedback
|
||||
/// tool.
|
||||
@Test func exportJSONIsParseable() throws {
|
||||
let ring = ScarfMonRingBuffer(capacity: 8)
|
||||
ring.record(.fixture(name: "a", durationNanos: 1_500_000))
|
||||
ring.record(ScarfMon.Sample(
|
||||
category: .chatStream, name: "token", kind: .event,
|
||||
timestamp: Date(), durationNanos: 0, count: 1, bytes: 64
|
||||
))
|
||||
let json = ring.exportJSON()
|
||||
let data = json.data(using: .utf8)!
|
||||
let parsed = try JSONSerialization.jsonObject(with: data, options: [])
|
||||
let arr = parsed as? [[String: Any]]
|
||||
#expect(arr?.count == 2)
|
||||
}
|
||||
}
|
||||
|
||||
private extension ScarfMon.Sample {
|
||||
static func fixture(
|
||||
category: ScarfMon.Category = .render,
|
||||
name: StaticString,
|
||||
durationNanos: UInt64 = 1_000_000
|
||||
) -> ScarfMon.Sample {
|
||||
ScarfMon.Sample(
|
||||
category: category,
|
||||
name: name,
|
||||
kind: .interval,
|
||||
timestamp: Date(),
|
||||
durationNanos: durationNanos,
|
||||
count: 1,
|
||||
bytes: nil
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,312 @@
|
||||
import Testing
|
||||
import Foundation
|
||||
@testable import ScarfCore
|
||||
|
||||
/// Pure-logic tests for the marker-block splice helpers in
|
||||
/// `SecretsEnvBlock`. No Keychain access, no filesystem I/O — just
|
||||
/// strings in, strings out. The Mac-side `KeychainEnvMirror` wraps
|
||||
/// these with Keychain resolution + transport-aware writes; that
|
||||
/// integration is covered separately in `KeychainEnvMirrorTests`.
|
||||
@Suite("SecretsEnvBlock")
|
||||
struct SecretsEnvBlockTests {
|
||||
|
||||
// MARK: - envKeyName
|
||||
|
||||
@Test func envKeyNameStandardCase() {
|
||||
#expect(
|
||||
SecretsEnvBlock.envKeyName(slug: "local-news", fieldKey: "api_token")
|
||||
== "SCARF_LOCAL_NEWS_API_TOKEN"
|
||||
)
|
||||
}
|
||||
|
||||
@Test func envKeyNameNonAlphanumericChars() {
|
||||
// Dashes, underscores, dots, spaces all fold to single underscores.
|
||||
#expect(
|
||||
SecretsEnvBlock.envKeyName(slug: "foo.bar baz", fieldKey: "x-y-z")
|
||||
== "SCARF_FOO_BAR_BAZ_X_Y_Z"
|
||||
)
|
||||
}
|
||||
|
||||
@Test func envKeyNameRunsCollapse() {
|
||||
// Three consecutive special chars produce a single underscore,
|
||||
// not three.
|
||||
#expect(
|
||||
SecretsEnvBlock.envKeyName(slug: "foo---bar", fieldKey: "a__b")
|
||||
== "SCARF_FOO_BAR_A_B"
|
||||
)
|
||||
}
|
||||
|
||||
@Test func envKeyNameLeadingTrailingTrim() {
|
||||
// Leading/trailing dashes on the slug shouldn't produce
|
||||
// SCARF__... or trailing _ in the result.
|
||||
let key = SecretsEnvBlock.envKeyName(slug: "-foo-", fieldKey: "-bar-")
|
||||
#expect(key == "SCARF_FOO_BAR")
|
||||
#expect(!key.hasSuffix("_"))
|
||||
#expect(!key.contains("__"))
|
||||
}
|
||||
|
||||
@Test func envKeyNameAllSymbolsFallsBackToUnnamed() {
|
||||
// Pathological input — slug is all special chars. Sanitizer
|
||||
// emits `UNNAMED` rather than the empty string, so the env
|
||||
// var name is still parseable.
|
||||
#expect(
|
||||
SecretsEnvBlock.envKeyName(slug: "!!!", fieldKey: "...")
|
||||
== "SCARF_UNNAMED_UNNAMED"
|
||||
)
|
||||
}
|
||||
|
||||
// MARK: - renderBlock
|
||||
|
||||
@Test func renderBlockEmptyEntriesReturnsEmpty() {
|
||||
// Empty entries is the documented "use removeBlock instead"
|
||||
// sentinel — renderBlock should not produce a block with
|
||||
// dangling markers.
|
||||
let result = SecretsEnvBlock.renderBlock(slug: "foo", entries: [])
|
||||
#expect(result.isEmpty)
|
||||
}
|
||||
|
||||
@Test func renderBlockSortsEntries() {
|
||||
// Output is deterministic regardless of input order so two
|
||||
// runs with the same logical content produce byte-identical
|
||||
// bytes — load-bearing for the no-op-when-unchanged check
|
||||
// in the mirror's writeIfChanged.
|
||||
let aFirst = SecretsEnvBlock.renderBlock(
|
||||
slug: "foo",
|
||||
entries: [("ALPHA", "1"), ("BRAVO", "2")]
|
||||
)
|
||||
let bFirst = SecretsEnvBlock.renderBlock(
|
||||
slug: "foo",
|
||||
entries: [("BRAVO", "2"), ("ALPHA", "1")]
|
||||
)
|
||||
#expect(aFirst == bFirst)
|
||||
// Sanity: ALPHA precedes BRAVO in the output regardless of
|
||||
// insertion order.
|
||||
let alphaIdx = aFirst.range(of: "ALPHA")
|
||||
let bravoIdx = aFirst.range(of: "BRAVO")
|
||||
#expect(alphaIdx != nil && bravoIdx != nil)
|
||||
#expect(alphaIdx!.lowerBound < bravoIdx!.lowerBound)
|
||||
}
|
||||
|
||||
@Test func renderBlockEmitsMarkersAroundEntries() {
|
||||
let result = SecretsEnvBlock.renderBlock(
|
||||
slug: "site-status-checker",
|
||||
entries: [("SCARF_SITE_STATUS_CHECKER_TOKEN", "abc")]
|
||||
)
|
||||
#expect(result.hasPrefix("# scarf-secrets:begin site-status-checker"))
|
||||
#expect(result.hasSuffix("# scarf-secrets:end site-status-checker"))
|
||||
#expect(result.contains("SCARF_SITE_STATUS_CHECKER_TOKEN=abc"))
|
||||
}
|
||||
|
||||
@Test func renderBlockQuotesValuesWithWhitespace() {
|
||||
let result = SecretsEnvBlock.renderBlock(
|
||||
slug: "x",
|
||||
entries: [("KEY", "hello world")]
|
||||
)
|
||||
// Whitespace forces single-quoting (dotenv canonical) so the
|
||||
// value survives shell expansion and dotenv parsing.
|
||||
#expect(result.contains("KEY='hello world'"))
|
||||
}
|
||||
|
||||
@Test func renderBlockQuotesValuesWithSpecialChars() {
|
||||
let cases: [(input: String, mustContain: String)] = [
|
||||
("a#b", "KEY='a#b'"), // # is dotenv comment marker
|
||||
("a$b", "KEY='a$b'"), // $ is shell expansion
|
||||
("a\"b", "KEY='a\"b'"), // " conflicts with double-quote literal
|
||||
("a\\b", "KEY='a\\b'"), // backslash needs escaping
|
||||
]
|
||||
for (input, mustContain) in cases {
|
||||
let result = SecretsEnvBlock.renderBlock(
|
||||
slug: "x",
|
||||
entries: [("KEY", input)]
|
||||
)
|
||||
#expect(
|
||||
result.contains(mustContain),
|
||||
"value '\(input)' produced wrong escaping: \(result)"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@Test func renderBlockEscapesSingleQuotesViaCloseReopen() {
|
||||
// A literal single quote inside a single-quoted string is
|
||||
// dotenv-encoded as `'\''` (close, escape, reopen) — the
|
||||
// canonical sh/dotenv pattern.
|
||||
let result = SecretsEnvBlock.renderBlock(
|
||||
slug: "x",
|
||||
entries: [("KEY", "it's fine")]
|
||||
)
|
||||
#expect(result.contains("KEY='it'\\''s fine'"))
|
||||
}
|
||||
|
||||
@Test func renderBlockLeavesPlainValuesUnquoted() {
|
||||
// No-special-chars values stay unquoted — readability + matches
|
||||
// the convention Hermes's existing ANTHROPIC_API_KEY entries
|
||||
// follow.
|
||||
let result = SecretsEnvBlock.renderBlock(
|
||||
slug: "x",
|
||||
entries: [("KEY", "abc-123_def")]
|
||||
)
|
||||
#expect(result.contains("\nKEY=abc-123_def\n"))
|
||||
#expect(!result.contains("KEY='abc-123_def'"))
|
||||
}
|
||||
|
||||
// MARK: - applyBlock
|
||||
|
||||
@Test func applyBlockToEmptyFile() {
|
||||
let block = sampleBlock(slug: "foo", entries: [("KEY", "value")])
|
||||
let result = SecretsEnvBlock.applyBlock(block, forSlug: "foo", to: "")
|
||||
#expect(result == block + "\n")
|
||||
}
|
||||
|
||||
@Test func applyBlockToWhitespaceOnlyFile() {
|
||||
let block = sampleBlock(slug: "foo", entries: [("KEY", "value")])
|
||||
let result = SecretsEnvBlock.applyBlock(block, forSlug: "foo", to: " \n \n")
|
||||
// Whitespace-only treated like empty — block + newline, no
|
||||
// attempt to preserve the leading whitespace.
|
||||
#expect(result == block + "\n")
|
||||
}
|
||||
|
||||
@Test func applyBlockAppendsToFileWithUserContent() {
|
||||
let existing = "ANTHROPIC_API_KEY=sk-test\nOPENAI_API_KEY=sk-other\n"
|
||||
let block = sampleBlock(slug: "foo", entries: [("KEY", "value")])
|
||||
let result = SecretsEnvBlock.applyBlock(block, forSlug: "foo", to: existing)
|
||||
// User content is preserved at the top.
|
||||
#expect(result.hasPrefix("ANTHROPIC_API_KEY=sk-test"))
|
||||
#expect(result.contains("OPENAI_API_KEY=sk-other"))
|
||||
// Block appended after a blank-line separator.
|
||||
#expect(result.contains("OPENAI_API_KEY=sk-other\n\n# scarf-secrets:begin foo"))
|
||||
// And ends with a trailing newline.
|
||||
#expect(result.hasSuffix("\n"))
|
||||
}
|
||||
|
||||
@Test func applyBlockReplacesExistingBlockForSameSlug() {
|
||||
let oldBlock = sampleBlock(slug: "foo", entries: [("KEY", "old")])
|
||||
let newBlock = sampleBlock(slug: "foo", entries: [("KEY", "new")])
|
||||
let existing = "USER_VAR=something\n\n" + oldBlock + "\n"
|
||||
let result = SecretsEnvBlock.applyBlock(newBlock, forSlug: "foo", to: existing)
|
||||
#expect(result.contains("KEY=new"))
|
||||
#expect(!result.contains("KEY=old"))
|
||||
// User content above the block is preserved.
|
||||
#expect(result.contains("USER_VAR=something"))
|
||||
}
|
||||
|
||||
@Test func applyBlockPreservesOtherSlugBlocks() {
|
||||
// The most important invariant — multiple project blocks
|
||||
// coexist in one file and editing one mustn't disturb the
|
||||
// other.
|
||||
let blockA = sampleBlock(slug: "alpha", entries: [("A_KEY", "1")])
|
||||
let blockB = sampleBlock(slug: "bravo", entries: [("B_KEY", "2")])
|
||||
let existing = blockA + "\n\n" + blockB + "\n"
|
||||
let updatedA = sampleBlock(slug: "alpha", entries: [("A_KEY", "1-updated")])
|
||||
let result = SecretsEnvBlock.applyBlock(updatedA, forSlug: "alpha", to: existing)
|
||||
// A was updated.
|
||||
#expect(result.contains("A_KEY=1-updated"))
|
||||
#expect(!result.contains("A_KEY=1\n"))
|
||||
// B is byte-identical.
|
||||
#expect(result.contains(blockB))
|
||||
}
|
||||
|
||||
@Test func applyBlockIdempotent() {
|
||||
// Applying the output of one call back through applyBlock
|
||||
// with the same inputs produces the same string. Critical
|
||||
// for the launch reconciler — a no-op pass shouldn't keep
|
||||
// mutating the file.
|
||||
let block = sampleBlock(slug: "foo", entries: [("KEY", "value")])
|
||||
let existing = "USER_VAR=x\n"
|
||||
let once = SecretsEnvBlock.applyBlock(block, forSlug: "foo", to: existing)
|
||||
let twice = SecretsEnvBlock.applyBlock(block, forSlug: "foo", to: once)
|
||||
#expect(once == twice)
|
||||
}
|
||||
|
||||
@Test func applyBlockEmptyBlockBehavesLikeRemove() {
|
||||
// Documented behaviour: passing an empty block is the same as
|
||||
// calling removeBlock — the splice path uses this when a
|
||||
// project's secrets are all cleared.
|
||||
let block = sampleBlock(slug: "foo", entries: [("KEY", "value")])
|
||||
let withBlock = "USER=x\n\n" + block + "\n"
|
||||
let viaApply = SecretsEnvBlock.applyBlock("", forSlug: "foo", to: withBlock)
|
||||
let viaRemove = SecretsEnvBlock.removeBlock(forSlug: "foo", from: withBlock)
|
||||
#expect(viaApply == viaRemove)
|
||||
}
|
||||
|
||||
// MARK: - removeBlock
|
||||
|
||||
@Test func removeBlockNoOpWhenAbsent() {
|
||||
let existing = "USER_VAR=hello\nOTHER=world\n"
|
||||
let result = SecretsEnvBlock.removeBlock(forSlug: "foo", from: existing)
|
||||
#expect(result == existing)
|
||||
}
|
||||
|
||||
@Test func removeBlockStripsBlockOnly() {
|
||||
let block = sampleBlock(slug: "foo", entries: [("KEY", "value")])
|
||||
let existing = "USER_VAR=x\n\n" + block + "\n\nMORE_USER=y\n"
|
||||
let result = SecretsEnvBlock.removeBlock(forSlug: "foo", from: existing)
|
||||
#expect(!result.contains("scarf-secrets"))
|
||||
#expect(result.contains("USER_VAR=x"))
|
||||
#expect(result.contains("MORE_USER=y"))
|
||||
}
|
||||
|
||||
@Test func removeBlockCollapsesAppendedBlankLineSeparator() {
|
||||
// Round-trip: append a block, then remove it. The blank line
|
||||
// we inserted at append time should be absorbed so repeated
|
||||
// install/uninstall cycles don't accumulate blank lines.
|
||||
let block = sampleBlock(slug: "foo", entries: [("KEY", "value")])
|
||||
let original = "USER_VAR=x\n"
|
||||
let appended = SecretsEnvBlock.applyBlock(block, forSlug: "foo", to: original)
|
||||
let removed = SecretsEnvBlock.removeBlock(forSlug: "foo", from: appended)
|
||||
// Removed content should be very close to the original — at
|
||||
// most one trailing newline difference. No accumulation of
|
||||
// blank lines across the cycle.
|
||||
#expect(removed.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
== original.trimmingCharacters(in: .whitespacesAndNewlines))
|
||||
}
|
||||
|
||||
// MARK: - Slug-prefix collision
|
||||
|
||||
@Test func slugPrefixCollisionIsolated() {
|
||||
// A file with both `foo` and `foo-bar` blocks; editing `foo`
|
||||
// must not match the `foo-bar` markers as a prefix-substring
|
||||
// of the begin-line.
|
||||
let blockShort = sampleBlock(slug: "foo", entries: [("SHORT", "1")])
|
||||
let blockLong = sampleBlock(slug: "foo-bar", entries: [("LONG", "2")])
|
||||
let existing = blockShort + "\n\n" + blockLong + "\n"
|
||||
let updatedShort = sampleBlock(slug: "foo", entries: [("SHORT", "1-updated")])
|
||||
let result = SecretsEnvBlock.applyBlock(updatedShort, forSlug: "foo", to: existing)
|
||||
// Short was updated.
|
||||
#expect(result.contains("SHORT=1-updated"))
|
||||
#expect(!result.contains("SHORT=1\n"))
|
||||
// Long block is byte-identical.
|
||||
#expect(result.contains(blockLong))
|
||||
// Both markers still present, exactly once each.
|
||||
#expect(occurrences(of: "# scarf-secrets:begin foo\n", in: result) == 1)
|
||||
#expect(occurrences(of: "# scarf-secrets:begin foo-bar\n", in: result) == 1)
|
||||
}
|
||||
|
||||
@Test func removeBlockRespectsSlugPrefixIsolation() {
|
||||
let blockShort = sampleBlock(slug: "foo", entries: [("SHORT", "1")])
|
||||
let blockLong = sampleBlock(slug: "foo-bar", entries: [("LONG", "2")])
|
||||
let existing = blockShort + "\n\n" + blockLong + "\n"
|
||||
let result = SecretsEnvBlock.removeBlock(forSlug: "foo", from: existing)
|
||||
// foo gone, foo-bar preserved byte-identically.
|
||||
#expect(!result.contains("SHORT=1"))
|
||||
#expect(result.contains(blockLong))
|
||||
}
|
||||
|
||||
// MARK: - Helpers
|
||||
|
||||
private func sampleBlock(
|
||||
slug: String,
|
||||
entries: [(key: String, value: String)]
|
||||
) -> String {
|
||||
SecretsEnvBlock.renderBlock(slug: slug, entries: entries)
|
||||
}
|
||||
|
||||
private func occurrences(of needle: String, in haystack: String) -> Int {
|
||||
var count = 0
|
||||
var search = haystack.startIndex
|
||||
while let range = haystack.range(of: needle, range: search..<haystack.endIndex) {
|
||||
count += 1
|
||||
search = range.upperBound
|
||||
}
|
||||
return count
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,98 @@
|
||||
import Testing
|
||||
import Foundation
|
||||
@testable import ScarfCore
|
||||
|
||||
/// Issue #79 regression. `searchHub()` with `hubSource == "all"` must
|
||||
/// filter the cached browse list client-side (instead of shelling out
|
||||
/// to `hermes skills search`, which routes through Hermes's
|
||||
/// centralized index and can miss skills that browse aggregates from
|
||||
/// non-indexed registries — `honcho` was the user-reported example).
|
||||
///
|
||||
/// Source-specific searches keep the CLI path; that's not exercised
|
||||
/// here because it requires a live `hermes` binary — the existing
|
||||
/// HermesSkillsHubParser tests cover the parser side.
|
||||
@Suite("SkillsViewModel hub filter")
|
||||
@MainActor
|
||||
struct SkillsViewModelHubFilterTests {
|
||||
|
||||
private func makeViewModel() -> SkillsViewModel {
|
||||
SkillsViewModel(context: .local)
|
||||
}
|
||||
|
||||
private let stubBrowse: [HermesHubSkill] = [
|
||||
HermesHubSkill(
|
||||
identifier: "honcho",
|
||||
name: "honcho",
|
||||
description: "Memory provider for chat-scoped facts.",
|
||||
source: "github"
|
||||
),
|
||||
HermesHubSkill(
|
||||
identifier: "1password",
|
||||
name: "1password",
|
||||
description: "Set up and use 1Password integration.",
|
||||
source: "official"
|
||||
),
|
||||
HermesHubSkill(
|
||||
identifier: "spotify",
|
||||
name: "spotify",
|
||||
description: "Spotify skill — playback control via OAuth.",
|
||||
source: "official"
|
||||
),
|
||||
]
|
||||
|
||||
@Test func allSourcesFilterMatchesByName() {
|
||||
let vm = makeViewModel()
|
||||
vm.lastBrowseResults = stubBrowse
|
||||
vm.hubSource = "all"
|
||||
vm.hubQuery = "honcho"
|
||||
vm.searchHub()
|
||||
#expect(vm.hubResults.count == 1)
|
||||
#expect(vm.hubResults.first?.identifier == "honcho")
|
||||
#expect(vm.isHubLoading == false)
|
||||
#expect(vm.hubMessage == nil)
|
||||
}
|
||||
|
||||
@Test func allSourcesFilterMatchesByDescription() {
|
||||
let vm = makeViewModel()
|
||||
vm.lastBrowseResults = stubBrowse
|
||||
vm.hubSource = "all"
|
||||
vm.hubQuery = "OAuth"
|
||||
vm.searchHub()
|
||||
#expect(vm.hubResults.count == 1)
|
||||
#expect(vm.hubResults.first?.identifier == "spotify")
|
||||
}
|
||||
|
||||
@Test func allSourcesFilterIsCaseInsensitive() {
|
||||
let vm = makeViewModel()
|
||||
vm.lastBrowseResults = stubBrowse
|
||||
vm.hubSource = "all"
|
||||
vm.hubQuery = "HONCHO"
|
||||
vm.searchHub()
|
||||
#expect(vm.hubResults.count == 1)
|
||||
#expect(vm.hubResults.first?.identifier == "honcho")
|
||||
}
|
||||
|
||||
@Test func allSourcesFilterEmptyMatchSetsMessage() {
|
||||
let vm = makeViewModel()
|
||||
vm.lastBrowseResults = stubBrowse
|
||||
vm.hubSource = "all"
|
||||
vm.hubQuery = "ringtone"
|
||||
vm.searchHub()
|
||||
#expect(vm.hubResults.isEmpty)
|
||||
#expect(vm.hubMessage == "No matches")
|
||||
}
|
||||
|
||||
/// Empty query should fall through to `browseHub()`, which on
|
||||
/// `.local` with no Hermes installed will set isHubLoading=true
|
||||
/// and not block the test. We just assert the early-return guard
|
||||
/// kicked in by checking the cache was untouched.
|
||||
@Test func emptyQueryFallsThroughToBrowse() {
|
||||
let vm = makeViewModel()
|
||||
vm.lastBrowseResults = stubBrowse
|
||||
vm.hubSource = "all"
|
||||
vm.hubQuery = ""
|
||||
let cacheBefore = vm.lastBrowseResults
|
||||
vm.searchHub()
|
||||
#expect(vm.lastBrowseResults == cacheBefore)
|
||||
}
|
||||
}
|
||||
@@ -58,6 +58,9 @@ public final class CitadelServerTransport: ServerTransport, @unchecked Sendable
|
||||
|
||||
/// Shared directory under which cached SQLite snapshots land. On
|
||||
/// iOS this maps to `<Caches>/scarf/snapshots/<server-id>/`.
|
||||
/// Stable per-server cache directory. Was used by the snapshot
|
||||
/// pipeline pre-v2.7; kept for the cache-cleanup migration that
|
||||
/// purges old snapshot files at first launch on the new build.
|
||||
private let snapshotBaseDir: URL
|
||||
|
||||
/// Actor-serialized access to the one shared `SSHClient`. Opens
|
||||
@@ -159,19 +162,108 @@ public final class CitadelServerTransport: ServerTransport, @unchecked Sendable
|
||||
AsyncThrowingStream { $0.finish() }
|
||||
}
|
||||
|
||||
// MARK: - ServerTransport: SQLite snapshot
|
||||
// MARK: - ServerTransport: script streaming
|
||||
|
||||
public func snapshotSQLite(remotePath: String) throws -> URL {
|
||||
try runSync { try await self.asyncSnapshotSQLite(remotePath: remotePath) }
|
||||
/// Pipe `script` to `/bin/sh -s` over Citadel's exec channel.
|
||||
///
|
||||
/// **Why base64.** Citadel's `executeCommandStream` doesn't expose
|
||||
/// stdin in the version we're on, so we can't just open `sh -s` and
|
||||
/// write the script. Instead we encode the script as base64, decode
|
||||
/// it on the remote inline, and pipe the result into `sh`:
|
||||
///
|
||||
/// printf '%s' '<b64>' | base64 -d | /bin/sh
|
||||
///
|
||||
/// `base64 -d` is universally available on Linux/macOS. The base64
|
||||
/// blob travels as a single shell-safe argv token, so multi-line
|
||||
/// scripts with `"$VAR"` references and nested quotes survive
|
||||
/// untouched — same correctness guarantee as `SSHScriptRunner`'s
|
||||
/// stdin-pipe approach.
|
||||
public func streamScript(_ script: String, timeout: TimeInterval) async throws -> ProcessResult {
|
||||
try await ScarfMon.measureAsync(.transport, "ssh.streamScript") {
|
||||
try await _streamScriptImpl(script, timeout: timeout)
|
||||
}
|
||||
}
|
||||
|
||||
/// Path where the most recent successful snapshot was written —
|
||||
/// returned even when the SSH connection is currently down. The
|
||||
/// data service falls back to this when `snapshotSQLite` throws so
|
||||
/// Dashboard / Sessions / Chat-history stay viewable while the
|
||||
/// phone is offline.
|
||||
public var cachedSnapshotPath: URL? {
|
||||
snapshotBaseDir.appendingPathComponent("state.db")
|
||||
private func _streamScriptImpl(_ script: String, timeout: TimeInterval) async throws -> ProcessResult {
|
||||
let scriptBytes = Data(script.utf8)
|
||||
let b64 = scriptBytes.base64EncodedString()
|
||||
// Prepend the same PATH guard that `asyncRunProcess` uses so
|
||||
// base64 + sh resolve on hosts where they live in non-default
|
||||
// prefixes. Most distros have base64 in /usr/bin but
|
||||
// homebrew-installed coreutils in /opt/homebrew/bin would
|
||||
// otherwise be invisible from a stripped-PATH exec channel.
|
||||
let cmd = "PATH=\"$HOME/.local/bin:/opt/homebrew/bin:/usr/local/bin:$PATH\" "
|
||||
+ "printf '%s' '\(b64)' | base64 -d | /bin/sh"
|
||||
return try await runScript(cmd, timeout: timeout)
|
||||
}
|
||||
|
||||
private func runScript(_ cmd: String, timeout: TimeInterval) async throws -> ProcessResult {
|
||||
let client = try await connectionHolder.ssh()
|
||||
let stream: AsyncThrowingStream<ExecCommandOutput, Error>
|
||||
do {
|
||||
stream = try await client.executeCommandStream(cmd)
|
||||
} catch {
|
||||
throw TransportError.other(message: "Failed to start exec stream: \(error.localizedDescription)")
|
||||
}
|
||||
// Drain in a child task and race against a sleep so a wedged remote
|
||||
// sqlite3 (or a mid-stream Citadel transport failure) can't hang the
|
||||
// caller indefinitely. Mirrors the busy-wait deadline that
|
||||
// SSHScriptRunner enforces on Mac.
|
||||
return try await withThrowingTaskGroup(of: ProcessResult?.self) { group in
|
||||
group.addTask {
|
||||
var stdout = Data()
|
||||
var stderr = Data()
|
||||
var exitCode: Int32 = 0
|
||||
do {
|
||||
for try await chunk in stream {
|
||||
try Task.checkCancellation()
|
||||
switch chunk {
|
||||
case .stdout(var buf):
|
||||
if let s = buf.readString(length: buf.readableBytes) {
|
||||
stdout.append(Data(s.utf8))
|
||||
}
|
||||
case .stderr(var buf):
|
||||
if let s = buf.readString(length: buf.readableBytes) {
|
||||
stderr.append(Data(s.utf8))
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch let failed as SSHClient.CommandFailed {
|
||||
// Genuine remote non-zero exit — surface as
|
||||
// ProcessResult so the caller's existing exit-code
|
||||
// handling fires (mapped to BackendError.sqlite by
|
||||
// RemoteSQLiteBackend).
|
||||
exitCode = Int32(failed.exitCode)
|
||||
} catch is CancellationError {
|
||||
throw TransportError.timeout(seconds: timeout, partialStdout: stdout)
|
||||
} catch {
|
||||
// Transport-level failure (host unreachable, channel
|
||||
// dropped, ControlMaster died, NIO read error). Throw
|
||||
// as a typed TransportError so RemoteSQLiteBackend
|
||||
// routes it to BackendError.transport rather than
|
||||
// misclassifying as a sqlite crash via a fake -1 exit.
|
||||
throw TransportError.other(
|
||||
message: "SSH stream failed: \(error.localizedDescription)"
|
||||
)
|
||||
}
|
||||
return ProcessResult(exitCode: exitCode, stdout: stdout, stderr: stderr)
|
||||
}
|
||||
group.addTask {
|
||||
try await Task.sleep(nanoseconds: UInt64(timeout * 1_000_000_000))
|
||||
return nil
|
||||
}
|
||||
guard let first = try await group.next() else {
|
||||
group.cancelAll()
|
||||
throw TransportError.other(message: "SSH stream produced no result")
|
||||
}
|
||||
group.cancelAll()
|
||||
if let result = first {
|
||||
return result
|
||||
}
|
||||
// Timeout fired first — drain task gets cancelled by the
|
||||
// group cancel above; surface as a typed timeout.
|
||||
throw TransportError.timeout(seconds: timeout, partialStdout: Data())
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - ServerTransport: watching
|
||||
@@ -180,14 +272,32 @@ public final class CitadelServerTransport: ServerTransport, @unchecked Sendable
|
||||
// Polling-based, identical in shape to `SSHTransport`'s remote-
|
||||
// watch fallback: stat each path, yield `.anyChanged` when any
|
||||
// mtime shifts. 3s tick keeps bandwidth low.
|
||||
//
|
||||
// ScarfMon — A1 instrumentation:
|
||||
// - `ios.fileWatcher.tick` (interval) — full poll cycle latency,
|
||||
// includes the SSH stat round-trips. Pre-fix this is what an
|
||||
// "out of sync" user is feeling: anything > 1500 ms means
|
||||
// the channel is congested or the host is slow.
|
||||
// - `ios.fileWatcher.delta` (event) — fires only when the
|
||||
// signature actually changed. Low ratio (delta count / tick
|
||||
// count) means we're polling more aggressively than the
|
||||
// change rate warrants — opens the door to dropping the 3s
|
||||
// cadence on LAN.
|
||||
// - `ios.fileWatcher.paths` (event with bytes=count) — number
|
||||
// of paths watched per cycle, helps explain a slow tick when
|
||||
// the project list grows.
|
||||
AsyncStream { continuation in
|
||||
let task = Task.detached { [weak self] in
|
||||
var lastSignature = ""
|
||||
while !Task.isCancelled {
|
||||
guard let self else { break }
|
||||
let current = await self.buildWatchSignature(for: paths)
|
||||
ScarfMon.event(.transport, "ios.fileWatcher.paths", count: 1, bytes: paths.count)
|
||||
let current = await ScarfMon.measureAsync(.transport, "ios.fileWatcher.tick") {
|
||||
await self.buildWatchSignature(for: paths)
|
||||
}
|
||||
if !current.isEmpty, current != lastSignature {
|
||||
if !lastSignature.isEmpty {
|
||||
ScarfMon.event(.transport, "ios.fileWatcher.delta", count: 1)
|
||||
continuation.yield(.anyChanged)
|
||||
}
|
||||
lastSignature = current
|
||||
@@ -397,101 +507,6 @@ public final class CitadelServerTransport: ServerTransport, @unchecked Sendable
|
||||
return ProcessResult(exitCode: exitCode, stdout: stdout, stderr: stderr)
|
||||
}
|
||||
|
||||
private func asyncSnapshotSQLite(remotePath: String) async throws -> URL {
|
||||
// Same flow as SSHTransport: run `sqlite3 .backup` on the remote
|
||||
// (WAL-safe), flip out of WAL mode on the snapshot, then SFTP
|
||||
// the backup file down to the local cache.
|
||||
try? FileManager.default.createDirectory(at: snapshotBaseDir, withIntermediateDirectories: true)
|
||||
let localURL = snapshotBaseDir.appendingPathComponent("state.db")
|
||||
let client = try await connectionHolder.ssh()
|
||||
let remoteTmp = "/tmp/scarf-snapshot-\(UUID().uuidString).db"
|
||||
// Double-quote paths; $HOME expansion happens inside double quotes.
|
||||
let rewritten = Self.rewriteHomeRelative(remotePath)
|
||||
|
||||
// Prepend the same PATH prefix `asyncRunProcess` uses so `sqlite3`
|
||||
// resolves on hosts where it lives in /usr/local/bin or
|
||||
// /opt/homebrew/bin (issue #56). Citadel's bare exec channel
|
||||
// inherits a stripped PATH (typically `/usr/bin:/bin` on Linux);
|
||||
// without this, statically-linked or custom-prefix sqlite3
|
||||
// installs fail "command not found" at exit 127.
|
||||
let backupScript =
|
||||
#"PATH="$HOME/.local/bin:/opt/homebrew/bin:/usr/local/bin:$PATH" "#
|
||||
+ #"sqlite3 "\#(rewritten)" ".backup '\#(remoteTmp)'" && sqlite3 '\#(remoteTmp)' "PRAGMA journal_mode=DELETE;" > /dev/null"#
|
||||
|
||||
// Drive `executeCommandStream` instead of `executeCommand` so we
|
||||
// capture stderr regardless of exit code (issue #56). Pre-fix
|
||||
// a non-zero exit threw `CommandFailed` and discarded the buffer
|
||||
// — surfaced as the unhelpful "Citadel.SSHClient.CommandFailed
|
||||
// error 1" banner. Now we propagate the real stderr so
|
||||
// `HermesDataService.humanize` can translate "sqlite3: command
|
||||
// not found" / "no such file" / "permission denied" into the
|
||||
// dashboard banner with actionable copy.
|
||||
let stream: AsyncThrowingStream<ExecCommandOutput, Error>
|
||||
do {
|
||||
stream = try await client.executeCommandStream(backupScript)
|
||||
} catch {
|
||||
throw NSError(
|
||||
domain: "CitadelServerTransport",
|
||||
code: -1,
|
||||
userInfo: [NSLocalizedDescriptionKey: "Failed to start snapshot stream: \(error.localizedDescription)"]
|
||||
)
|
||||
}
|
||||
var stdout = Data()
|
||||
var stderr = Data()
|
||||
var exitCode: Int32 = 0
|
||||
do {
|
||||
for try await chunk in stream {
|
||||
switch chunk {
|
||||
case .stdout(var buf):
|
||||
if let s = buf.readString(length: buf.readableBytes) {
|
||||
stdout.append(Data(s.utf8))
|
||||
}
|
||||
case .stderr(var buf):
|
||||
if let s = buf.readString(length: buf.readableBytes) {
|
||||
stderr.append(Data(s.utf8))
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch let failed as SSHClient.CommandFailed {
|
||||
exitCode = Int32(failed.exitCode)
|
||||
} catch {
|
||||
stderr.append(Data(error.localizedDescription.utf8))
|
||||
exitCode = -1
|
||||
}
|
||||
if exitCode != 0 {
|
||||
// Combine stdout + stderr into the error message — sqlite3
|
||||
// sometimes prints "Error: ..." on stdout depending on the
|
||||
// remote shell. HermesDataService.humanize keys off
|
||||
// substrings like "sqlite3: command not found",
|
||||
// "permission denied", "no such file", so as long as one of
|
||||
// them ends up in the message we get a useful banner.
|
||||
let messageBytes = stderr.isEmpty ? stdout : stderr
|
||||
let message = String(data: messageBytes, encoding: .utf8)?.trimmingCharacters(in: .whitespacesAndNewlines) ?? ""
|
||||
throw NSError(
|
||||
domain: "CitadelServerTransport",
|
||||
code: Int(exitCode),
|
||||
userInfo: [
|
||||
NSLocalizedDescriptionKey: message.isEmpty
|
||||
? "Snapshot exited \(exitCode) with no output (likely sqlite3 missing on remote)"
|
||||
: message
|
||||
]
|
||||
)
|
||||
}
|
||||
|
||||
// SFTP-download the remote tmp into our local snapshot cache.
|
||||
let sftp = try await connectionHolder.sftp()
|
||||
let data: Data = try await sftp.withFile(filePath: remoteTmp, flags: [.read]) { file in
|
||||
let buf = try await file.readAll()
|
||||
return Data(buffer: buf)
|
||||
}
|
||||
try data.write(to: localURL, options: .atomic)
|
||||
|
||||
// Best-effort cleanup of the remote tmp.
|
||||
_ = try? await client.executeCommand("rm -f '\(remoteTmp)'")
|
||||
|
||||
return localURL
|
||||
}
|
||||
|
||||
// MARK: - Shell helpers
|
||||
|
||||
/// Minimal shell-argument joiner. Handles spaces + quotes; sufficient
|
||||
|
||||
@@ -70,10 +70,13 @@ public final class IOSDashboardViewModel {
|
||||
return
|
||||
}
|
||||
|
||||
stats = await dataService.fetchStats()
|
||||
recentSessions = await dataService.fetchSessions(limit: 5)
|
||||
allSessions = await dataService.fetchSessions(limit: 25)
|
||||
sessionPreviews = await dataService.fetchSessionPreviews(limit: 25)
|
||||
await ScarfMon.measureAsync(.sessionLoad, "ios.loadDashboard") {
|
||||
stats = await dataService.fetchStats()
|
||||
recentSessions = await dataService.fetchSessions(limit: 5)
|
||||
allSessions = await dataService.fetchSessions(limit: 25)
|
||||
sessionPreviews = await dataService.fetchSessionPreviews(limit: 25)
|
||||
}
|
||||
ScarfMon.event(.sessionLoad, "ios.allSessions.count", count: allSessions.count)
|
||||
|
||||
// Attribution lookup (pass-2 UX): load the session→project
|
||||
// sidecar + project registry once so Dashboard rows can show
|
||||
@@ -126,6 +129,7 @@ public final class IOSDashboardViewModel {
|
||||
|
||||
/// Called from the pull-to-refresh gesture.
|
||||
public func refresh() async {
|
||||
ScarfMon.event(.sessionLoad, "ios.dashboardRefresh.trigger", count: 1)
|
||||
await load()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,14 @@ struct ScarfIOSApp: App {
|
||||
)
|
||||
|
||||
init() {
|
||||
// ScarfMon — open-source perf instrumentation. Reads the
|
||||
// user-toggled mode from UserDefaults and installs the
|
||||
// matching backend set. Default is `.signpostOnly` so
|
||||
// Instruments-attached profiling works without users having
|
||||
// to opt in. The Diagnostics → Performance row in Settings
|
||||
// flips this between off / signpost-only / full.
|
||||
ScarfMonBoot.configure(mode: ScarfMonBoot.currentMode())
|
||||
|
||||
// Wire ScarfCore's transport factory to produce Citadel-backed
|
||||
// `ServerTransport`s for every `.ssh` context. Without this,
|
||||
// `ServerContext.makeTransport()` would fall back to the
|
||||
|
||||
@@ -66,7 +66,12 @@ struct ChatView: View {
|
||||
)!
|
||||
|
||||
var body: some View {
|
||||
VStack(spacing: 0) {
|
||||
// ScarfMon body-evaluation counter. Re-render churn during
|
||||
// streaming is one of the load-bearing perf signals; rendering
|
||||
// here costs ~one signpost emit + ring-buffer append (off the
|
||||
// hot path otherwise).
|
||||
let _: Void = ScarfMon.event(.chatRender, "ios.ChatView.body")
|
||||
return VStack(spacing: 0) {
|
||||
connectionBanner
|
||||
errorBanner
|
||||
projectContextBar
|
||||
@@ -395,7 +400,31 @@ struct ChatView: View {
|
||||
showSpinner: false
|
||||
)
|
||||
default:
|
||||
EmptyView()
|
||||
// v2.7: surface "Thinking…" while the agent's thought
|
||||
// stream is in flight without any visible message bytes.
|
||||
// Hermes reasoning models commonly take 3–8 s here and
|
||||
// the streaming bubble has nothing to render — the user
|
||||
// would otherwise see a stalled transcript. Disappears
|
||||
// the moment the first message chunk arrives.
|
||||
if controller.vm.isStreamingThoughtsOnly {
|
||||
connectionBannerStrip(
|
||||
text: "Thinking…",
|
||||
tint: ScarfColor.info,
|
||||
showSpinner: true
|
||||
)
|
||||
} else if controller.vm.isHydratingTools {
|
||||
// v2.7 — Phase 2 tool-call hydration is in flight.
|
||||
// Bare conversation skeleton is already on screen;
|
||||
// this banner tells the user the tool cards are
|
||||
// about to fill in.
|
||||
connectionBannerStrip(
|
||||
text: "Loading tool details…",
|
||||
tint: ScarfColor.info,
|
||||
showSpinner: true
|
||||
)
|
||||
} else {
|
||||
EmptyView()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -448,14 +477,15 @@ struct ChatView: View {
|
||||
}
|
||||
|
||||
private var composer: some View {
|
||||
VStack(alignment: .leading, spacing: 4) {
|
||||
VStack(alignment: .leading, spacing: ScarfSpace.s2) {
|
||||
if !controller.attachments.isEmpty || isEncodingAttachment || attachmentError != nil {
|
||||
attachmentStrip
|
||||
}
|
||||
composerRow
|
||||
}
|
||||
.padding(.horizontal, 12)
|
||||
.padding(.vertical, 8)
|
||||
.padding(.horizontal, ScarfSpace.s3)
|
||||
.padding(.top, ScarfSpace.s2)
|
||||
.padding(.bottom, ScarfSpace.s2)
|
||||
.background(.regularMaterial)
|
||||
#if canImport(PhotosUI)
|
||||
.photosPicker(
|
||||
@@ -536,18 +566,23 @@ struct ChatView: View {
|
||||
}
|
||||
|
||||
private var composerRow: some View {
|
||||
HStack(alignment: .bottom, spacing: 8) {
|
||||
HStack(alignment: .bottom, spacing: ScarfSpace.s2) {
|
||||
if supportsImagePrompts {
|
||||
Button {
|
||||
showPhotoPicker = true
|
||||
} label: {
|
||||
Image(systemName: "paperclip")
|
||||
.font(.system(size: 22))
|
||||
.foregroundStyle(.secondary)
|
||||
.padding(.bottom, 4)
|
||||
.font(.system(size: 20, weight: .regular))
|
||||
.foregroundStyle(
|
||||
attachDisabled
|
||||
? ScarfColor.foregroundFaint
|
||||
: ScarfColor.foregroundMuted
|
||||
)
|
||||
.frame(width: 44, height: 44)
|
||||
.contentShape(Rectangle())
|
||||
}
|
||||
.buttonStyle(.plain)
|
||||
.disabled(controller.state != .ready || controller.attachments.count >= Self.maxAttachments)
|
||||
.disabled(attachDisabled)
|
||||
.accessibilityLabel("Attach image")
|
||||
}
|
||||
TextField(
|
||||
@@ -555,8 +590,19 @@ struct ChatView: View {
|
||||
text: $controller.draft,
|
||||
axis: .vertical
|
||||
)
|
||||
.textFieldStyle(.roundedBorder)
|
||||
.textFieldStyle(.plain)
|
||||
.lineLimit(1...5)
|
||||
.padding(.horizontal, ScarfSpace.s3)
|
||||
.padding(.vertical, ScarfSpace.s2)
|
||||
.frame(minHeight: 44)
|
||||
.background(
|
||||
RoundedRectangle(cornerRadius: ScarfRadius.xl, style: .continuous)
|
||||
.fill(ScarfColor.backgroundSecondary)
|
||||
)
|
||||
.overlay(
|
||||
RoundedRectangle(cornerRadius: ScarfRadius.xl, style: .continuous)
|
||||
.strokeBorder(ScarfColor.borderStrong, lineWidth: 1)
|
||||
)
|
||||
.disabled(controller.state != .ready)
|
||||
.submitLabel(.send)
|
||||
.focused($composerFocused)
|
||||
@@ -592,13 +638,32 @@ struct ChatView: View {
|
||||
}
|
||||
}
|
||||
|
||||
// Big circular send button. Filled with the brand accent when
|
||||
// ready, swapped to a flat gray when disabled — opacity dims
|
||||
// alone read as "not quite tappable" (issue #69), the explicit
|
||||
// color swap makes the state unambiguous in both light and
|
||||
// dark mode.
|
||||
Button {
|
||||
Task { await controller.send() }
|
||||
} label: {
|
||||
Image(systemName: "arrow.up.circle.fill")
|
||||
.font(.system(size: 28))
|
||||
ZStack {
|
||||
Circle()
|
||||
.fill(canSendComposer
|
||||
? ScarfColor.accent
|
||||
: ScarfColor.backgroundTertiary)
|
||||
Image(systemName: "arrow.up")
|
||||
.font(.system(size: 18, weight: .semibold))
|
||||
.foregroundStyle(canSendComposer
|
||||
? ScarfColor.onAccent
|
||||
: ScarfColor.foregroundFaint)
|
||||
}
|
||||
.frame(width: 44, height: 44)
|
||||
.contentShape(Circle())
|
||||
.animation(ScarfAnimation.fast, value: canSendComposer)
|
||||
}
|
||||
.buttonStyle(.plain)
|
||||
.disabled(!canSendComposer)
|
||||
.accessibilityLabel("Send message")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -610,6 +675,12 @@ struct ChatView: View {
|
||||
return !controller.draft.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
|
||||
}
|
||||
|
||||
/// Mirror of the `.disabled(...)` predicate on the paperclip button.
|
||||
/// Pulled out so the button's foreground branch reads cleanly.
|
||||
private var attachDisabled: Bool {
|
||||
controller.state != .ready || controller.attachments.count >= Self.maxAttachments
|
||||
}
|
||||
|
||||
/// Pull JPEG/PNG bytes out of each PhotosPickerItem and feed them
|
||||
/// through ImageEncoder. Detached so the heavyweight resize +
|
||||
/// JPEG-encode work doesn't block MainActor; the resulting
|
||||
@@ -1041,10 +1112,21 @@ final class ChatController {
|
||||
/// the start intent so the preflight sheet can replay it after the
|
||||
/// user picks a model. Reads via `context.readText` (transport-
|
||||
/// aware) and parses with the ScarfCore YAML parser — same path
|
||||
/// `IOSSettingsViewModel.load` uses, just synchronous because the
|
||||
/// preflight runs before any `state = .connecting` UI transition.
|
||||
private func passModelPreflight(intent: PendingStart) -> Bool {
|
||||
let raw = context.readText(context.paths.configYAML) ?? ""
|
||||
/// `IOSSettingsViewModel.load` uses.
|
||||
///
|
||||
/// **Off MainActor.** `context.readText` synchronously calls
|
||||
/// `transport.fileExists` + `transport.readFile`; on a remote
|
||||
/// ScarfGo context that's a blocking SSH round-trip that, before
|
||||
/// this fix, ran on the controller's `@MainActor` and stalled the
|
||||
/// UI for seconds during connect — long enough for iOS's
|
||||
/// non-responsive-app watchdog to kill the process if the user
|
||||
/// kept tapping (the typing TestFlight crash report). Reading
|
||||
/// detached pushes the I/O off MainActor; the result and the
|
||||
/// `pendingStartIntent` / `modelPreflightReason` writes hop back.
|
||||
private func passModelPreflight(intent: PendingStart) async -> Bool {
|
||||
let path = context.paths.configYAML
|
||||
let ctx = context
|
||||
let raw = await Task.detached { ctx.readText(path) ?? "" }.value
|
||||
let config = HermesConfig(yaml: raw)
|
||||
let result = ModelPreflight.check(config)
|
||||
if result.isConfigured { return true }
|
||||
@@ -1138,7 +1220,7 @@ final class ChatController {
|
||||
/// can type and hit send immediately.
|
||||
func start() async {
|
||||
if state == .connecting || state == .ready { return }
|
||||
guard passModelPreflight(intent: .fresh) else { return }
|
||||
guard await passModelPreflight(intent: .fresh) else { return }
|
||||
state = .connecting
|
||||
vm.reset()
|
||||
let client = ACPClient.forIOSApp(
|
||||
@@ -1201,6 +1283,12 @@ final class ChatController {
|
||||
/// assistant reply streams back as ACP notifications handled by
|
||||
/// the event task.
|
||||
func send() async {
|
||||
await ScarfMon.measureAsync(.chatStream, "ios.send") {
|
||||
await _sendImpl()
|
||||
}
|
||||
}
|
||||
|
||||
private func _sendImpl() async {
|
||||
guard state == .ready, let client else { return }
|
||||
let text = draft.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
// v0.12+ allows image-only sends — vision models accept "describe
|
||||
@@ -1305,7 +1393,10 @@ final class ChatController {
|
||||
let stream = await client.events
|
||||
for await event in stream {
|
||||
guard !Task.isCancelled else { break }
|
||||
self?.vm.handleACPEvent(event)
|
||||
ScarfMon.event(.chatStream, "ios.acpEvent", count: 1)
|
||||
ScarfMon.measure(.chatStream, "ios.handleACPEvent") {
|
||||
self?.vm.handleACPEvent(event)
|
||||
}
|
||||
}
|
||||
// Stream ended — if we weren't explicitly cancelled the
|
||||
// channel died (EOF on stdin/out, write to dead pipe,
|
||||
@@ -1651,7 +1742,7 @@ final class ChatController {
|
||||
} else {
|
||||
intent = .fresh
|
||||
}
|
||||
guard passModelPreflight(intent: intent) else { return }
|
||||
guard await passModelPreflight(intent: intent) else { return }
|
||||
state = .connecting
|
||||
let client = ACPClient.forIOSApp(
|
||||
context: context,
|
||||
@@ -1735,7 +1826,13 @@ final class ChatController {
|
||||
/// to `session/load` if the remote doesn't support `session/resume`
|
||||
/// (Hermes < 0.9.x).
|
||||
func startResuming(sessionID: String) async {
|
||||
guard passModelPreflight(intent: .resume(sessionID: sessionID)) else { return }
|
||||
await ScarfMon.measureAsync(.sessionLoad, "ios.startResuming") {
|
||||
await _startResumingImpl(sessionID: sessionID)
|
||||
}
|
||||
}
|
||||
|
||||
private func _startResumingImpl(sessionID: String) async {
|
||||
guard await passModelPreflight(intent: .resume(sessionID: sessionID)) else { return }
|
||||
await stop()
|
||||
vm.reset()
|
||||
// Clear eagerly so a lingering project name from a prior
|
||||
@@ -1899,6 +1996,11 @@ private struct MessageBubble: View, Equatable {
|
||||
}
|
||||
|
||||
var body: some View {
|
||||
// Per-bubble render counter. The streaming bubble
|
||||
// (`message.id == 0`) re-renders on every chunk; tracking the
|
||||
// count here is what tells us if a slow chat is bottlenecked
|
||||
// on body re-eval vs. event-loop delivery.
|
||||
let _: Void = ScarfMon.event(.chatRender, "ios.MessageBubble.body")
|
||||
if message.isToolResult {
|
||||
ToolResultRow(message: message)
|
||||
} else {
|
||||
|
||||
@@ -13,11 +13,24 @@ import ScarfDesign
|
||||
/// `HermesCapabilities.hasCurator` is true.
|
||||
struct CuratorView: View {
|
||||
@State private var viewModel: CuratorViewModel
|
||||
@Environment(\.hermesCapabilities) private var capabilitiesStore
|
||||
|
||||
// TODO(WS-9): add a read-only "Archived" section mirroring the Mac
|
||||
// surface (no per-row Restore/Prune mutations on iOS in this
|
||||
// release). Gate on `capabilitiesStore?.capabilities.hasCuratorArchive`.
|
||||
|
||||
init(context: ServerContext) {
|
||||
_viewModel = State(initialValue: CuratorViewModel(context: context))
|
||||
}
|
||||
|
||||
/// Whether the connected host runs curator synchronously. Threaded
|
||||
/// into `runNow` so v0.13+ hosts block-with-spinner; pre-v0.13 fire
|
||||
/// and forget. WS-9 will surface a richer iOS progress affordance
|
||||
/// alongside the read-only Archived section.
|
||||
private var archiveAvailable: Bool {
|
||||
capabilitiesStore?.capabilities.hasCuratorArchive ?? false
|
||||
}
|
||||
|
||||
var body: some View {
|
||||
List {
|
||||
Section {
|
||||
@@ -115,7 +128,7 @@ struct CuratorView: View {
|
||||
private var actionFooter: some View {
|
||||
HStack(spacing: 8) {
|
||||
Button {
|
||||
Task { await viewModel.runNow() }
|
||||
Task { await viewModel.runNow(synchronous: archiveAvailable, timeout: 600) }
|
||||
} label: {
|
||||
Label("Run now", systemImage: "play.fill")
|
||||
}
|
||||
|
||||
@@ -0,0 +1,243 @@
|
||||
import SwiftUI
|
||||
import ScarfCore
|
||||
import ScarfDesign
|
||||
|
||||
/// Read-only Kanban task detail sheet for iOS. Mirrors the Mac
|
||||
/// inspector's 3-tab layout (Comments | Events | Runs) but routes
|
||||
/// through a `NavigationStack` for iOS-native chrome and dismisses
|
||||
/// to the parent kanban view, not to the board.
|
||||
///
|
||||
/// No mutations in v2.7.5 — write actions land on iOS in a later
|
||||
/// release via a bottom action bar with explicit verb buttons (no
|
||||
/// drag-drop).
|
||||
struct ScarfGoKanbanDetailSheet: View {
|
||||
let taskId: String
|
||||
let context: ServerContext
|
||||
|
||||
@Environment(\.dismiss) private var dismiss
|
||||
|
||||
@State private var detail: HermesKanbanTaskDetail?
|
||||
@State private var runs: [HermesKanbanRun] = []
|
||||
@State private var isLoading = true
|
||||
@State private var error: String?
|
||||
@State private var selectedTab: DetailTab = .comments
|
||||
|
||||
enum DetailTab: String, CaseIterable, Identifiable {
|
||||
case comments = "Comments"
|
||||
case events = "Events"
|
||||
case runs = "Runs"
|
||||
var id: String { rawValue }
|
||||
}
|
||||
|
||||
var body: some View {
|
||||
NavigationStack {
|
||||
content
|
||||
.navigationTitle(detail?.task.title ?? "Task")
|
||||
.navigationBarTitleDisplayMode(.inline)
|
||||
.toolbar {
|
||||
ToolbarItem(placement: .topBarTrailing) {
|
||||
Button("Done") { dismiss() }
|
||||
}
|
||||
}
|
||||
}
|
||||
.task(id: taskId) { await load() }
|
||||
}
|
||||
|
||||
@ViewBuilder
|
||||
private var content: some View {
|
||||
if isLoading && detail == nil {
|
||||
ProgressView("Loading…")
|
||||
.frame(maxWidth: .infinity, maxHeight: .infinity)
|
||||
} else if let error {
|
||||
ContentUnavailableView {
|
||||
Label("Couldn't load task", systemImage: "exclamationmark.triangle")
|
||||
} description: {
|
||||
Text(error)
|
||||
} actions: {
|
||||
Button("Try Again") {
|
||||
Task { await load() }
|
||||
}
|
||||
}
|
||||
} else if let detail {
|
||||
ScrollView {
|
||||
VStack(alignment: .leading, spacing: 16) {
|
||||
headerCard(detail.task)
|
||||
if let body = detail.task.body, !body.isEmpty {
|
||||
if let attributed = try? AttributedString(markdown: body) {
|
||||
Text(attributed)
|
||||
.font(.body)
|
||||
} else {
|
||||
Text(body)
|
||||
.font(.body)
|
||||
}
|
||||
}
|
||||
Picker("Section", selection: $selectedTab) {
|
||||
ForEach(DetailTab.allCases) { tab in
|
||||
Text(tab.rawValue).tag(tab)
|
||||
}
|
||||
}
|
||||
.pickerStyle(.segmented)
|
||||
switch selectedTab {
|
||||
case .comments: commentsSection(detail.comments)
|
||||
case .events: eventsSection(detail.events)
|
||||
case .runs: runsSection
|
||||
}
|
||||
}
|
||||
.padding()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func headerCard(_ task: HermesKanbanTask) -> some View {
|
||||
VStack(alignment: .leading, spacing: 8) {
|
||||
HStack(spacing: 6) {
|
||||
ScarfBadge(task.status.lowercased(), kind: badgeKind(for: task.status))
|
||||
if let assignee = task.assignee, !assignee.isEmpty {
|
||||
ScarfBadge(assignee, kind: .neutral)
|
||||
}
|
||||
if let workspace = task.workspaceKind {
|
||||
ScarfBadge(workspace, kind: .neutral)
|
||||
}
|
||||
if let tenant = task.tenant, !tenant.isEmpty {
|
||||
ScarfBadge(tenant, kind: .brand)
|
||||
}
|
||||
}
|
||||
if let priority = task.priority {
|
||||
Text("Priority \(priority)")
|
||||
.font(.caption)
|
||||
.foregroundStyle(.secondary)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func commentsSection(_ comments: [HermesKanbanComment]) -> some View {
|
||||
VStack(alignment: .leading, spacing: 8) {
|
||||
if comments.isEmpty {
|
||||
Text("No comments yet.")
|
||||
.font(.callout)
|
||||
.foregroundStyle(.tertiary)
|
||||
} else {
|
||||
ForEach(comments) { comment in
|
||||
VStack(alignment: .leading, spacing: 2) {
|
||||
HStack {
|
||||
Text(comment.author)
|
||||
.font(.subheadline)
|
||||
.bold()
|
||||
Text(comment.createdAt)
|
||||
.font(.caption2)
|
||||
.foregroundStyle(.tertiary)
|
||||
}
|
||||
Text(comment.body)
|
||||
.font(.body)
|
||||
.foregroundStyle(.secondary)
|
||||
}
|
||||
.padding(8)
|
||||
.background(ScarfColor.backgroundSecondary.opacity(0.5))
|
||||
.clipShape(RoundedRectangle(cornerRadius: ScarfRadius.md, style: .continuous))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func eventsSection(_ events: [HermesKanbanEvent]) -> some View {
|
||||
VStack(alignment: .leading, spacing: 6) {
|
||||
if events.isEmpty {
|
||||
Text("No events yet.")
|
||||
.font(.callout)
|
||||
.foregroundStyle(.tertiary)
|
||||
} else {
|
||||
ForEach(events) { event in
|
||||
HStack(alignment: .top) {
|
||||
VStack(alignment: .leading, spacing: 2) {
|
||||
Text(event.kind)
|
||||
.font(.subheadline)
|
||||
.bold()
|
||||
Text(event.createdAt)
|
||||
.font(.caption2)
|
||||
.foregroundStyle(.tertiary)
|
||||
}
|
||||
Spacer()
|
||||
}
|
||||
.padding(.vertical, 4)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private var runsSection: some View {
|
||||
VStack(alignment: .leading, spacing: 8) {
|
||||
if runs.isEmpty {
|
||||
Text("No runs yet.")
|
||||
.font(.callout)
|
||||
.foregroundStyle(.tertiary)
|
||||
} else {
|
||||
ForEach(runs) { run in
|
||||
VStack(alignment: .leading, spacing: 2) {
|
||||
HStack {
|
||||
ScarfBadge(run.outcome ?? run.status, kind: outcomeKind(run.outcome ?? run.status))
|
||||
if let profile = run.profile {
|
||||
Text(profile)
|
||||
.font(.subheadline)
|
||||
}
|
||||
Spacer()
|
||||
Text(run.startedAt)
|
||||
.font(.caption2)
|
||||
.foregroundStyle(.tertiary)
|
||||
}
|
||||
if let summary = run.summary, !summary.isEmpty {
|
||||
Text(summary)
|
||||
.font(.caption)
|
||||
.foregroundStyle(.secondary)
|
||||
}
|
||||
if let err = run.error, !err.isEmpty {
|
||||
Text(err)
|
||||
.font(.caption)
|
||||
.foregroundStyle(.red)
|
||||
}
|
||||
}
|
||||
.padding(8)
|
||||
.background(ScarfColor.backgroundSecondary.opacity(0.4))
|
||||
.clipShape(RoundedRectangle(cornerRadius: ScarfRadius.md, style: .continuous))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func badgeKind(for status: String) -> ScarfBadgeKind {
|
||||
switch KanbanStatus.from(status) {
|
||||
case .running, .ready: return .info
|
||||
case .done: return .success
|
||||
case .blocked: return .warning
|
||||
default: return .neutral
|
||||
}
|
||||
}
|
||||
|
||||
private func outcomeKind(_ outcome: String) -> ScarfBadgeKind {
|
||||
switch outcome.lowercased() {
|
||||
case "completed", "done": return .success
|
||||
case "blocked": return .warning
|
||||
case "crashed", "timed_out", "spawn_failed", "failed": return .danger
|
||||
case "running": return .info
|
||||
default: return .neutral
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Loading
|
||||
|
||||
private func load() async {
|
||||
isLoading = true
|
||||
defer { isLoading = false }
|
||||
let svc = KanbanService(context: context)
|
||||
do {
|
||||
async let detailLoaded = svc.show(taskId: taskId)
|
||||
async let runsLoaded = svc.runs(taskId: taskId)
|
||||
self.detail = try await detailLoaded
|
||||
self.runs = (try? await runsLoaded) ?? []
|
||||
self.error = nil
|
||||
} catch let err as KanbanError {
|
||||
self.error = err.errorDescription
|
||||
} catch {
|
||||
self.error = error.localizedDescription
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,236 @@
|
||||
import SwiftUI
|
||||
import ScarfCore
|
||||
import ScarfDesign
|
||||
|
||||
/// Read-only Kanban surface for iOS / iPadOS, scoped to one project's
|
||||
/// tenant. Renders the 5 standard board columns as a horizontally-
|
||||
/// paged `TabView` of single-column lists — HIG-friendly on iPhone
|
||||
/// where a 5-column grid would force unreadable card widths.
|
||||
///
|
||||
/// Mutations + drag-drop are deferred to a later release per
|
||||
/// CLAUDE.md's iOS catch-up policy. Tap a card to open a read-only
|
||||
/// detail sheet that surfaces the same comments / events / runs the
|
||||
/// Mac inspector shows. iPad gets the same view (no drag-drop yet) —
|
||||
/// same UI for both form factors keeps the future mutation path
|
||||
/// straightforward.
|
||||
struct ScarfGoKanbanView: View {
|
||||
let project: ProjectEntry
|
||||
let context: ServerContext
|
||||
|
||||
@State private var tasks: [HermesKanbanTask] = []
|
||||
@State private var stats: HermesKanbanStats = .empty
|
||||
@State private var isLoading = true
|
||||
@State private var error: String?
|
||||
@State private var selectedColumn: KanbanBoardColumn = .upNext
|
||||
@State private var inspectorTaskId: String?
|
||||
@State private var pollTask: Task<Void, Never>?
|
||||
|
||||
private var resolvedTenant: String? {
|
||||
KanbanTenantReader(context: context).tenant(forProjectPath: project.path)
|
||||
}
|
||||
|
||||
var body: some View {
|
||||
VStack(spacing: 0) {
|
||||
if !stats.glanceString.isEmpty {
|
||||
Text(stats.glanceString)
|
||||
.font(.caption)
|
||||
.foregroundStyle(.secondary)
|
||||
.padding(.vertical, 4)
|
||||
}
|
||||
columnPicker
|
||||
.padding(.horizontal)
|
||||
.padding(.bottom, 4)
|
||||
Divider()
|
||||
content
|
||||
}
|
||||
.background(ScarfColor.backgroundPrimary)
|
||||
.task(id: project.id) {
|
||||
await refresh()
|
||||
startPolling()
|
||||
}
|
||||
.onDisappear { pollTask?.cancel() }
|
||||
.sheet(item: Binding(
|
||||
get: { inspectorTaskId.map { TaskIDBox(id: $0) } },
|
||||
set: { inspectorTaskId = $0?.id }
|
||||
)) { box in
|
||||
ScarfGoKanbanDetailSheet(
|
||||
taskId: box.id,
|
||||
context: context
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
private var columnPicker: some View {
|
||||
Picker("Column", selection: $selectedColumn) {
|
||||
ForEach(visibleColumns, id: \.self) { column in
|
||||
Text("\(column.displayName) (\(taskCount(in: column)))").tag(column)
|
||||
}
|
||||
}
|
||||
.pickerStyle(.segmented)
|
||||
}
|
||||
|
||||
@ViewBuilder
|
||||
private var content: some View {
|
||||
if let error {
|
||||
errorView(error)
|
||||
} else if isLoading && tasks.isEmpty {
|
||||
ProgressView()
|
||||
.frame(maxWidth: .infinity, maxHeight: .infinity)
|
||||
} else {
|
||||
taskList
|
||||
}
|
||||
}
|
||||
|
||||
private var taskList: some View {
|
||||
let rows = tasks(in: selectedColumn)
|
||||
return Group {
|
||||
if rows.isEmpty {
|
||||
ContentUnavailableView(
|
||||
emptyTitle(for: selectedColumn),
|
||||
systemImage: "rectangle.split.3x1",
|
||||
description: Text(emptyCopy(for: selectedColumn))
|
||||
)
|
||||
} else {
|
||||
List(rows) { task in
|
||||
Button {
|
||||
inspectorTaskId = task.id
|
||||
} label: {
|
||||
cardRow(task)
|
||||
}
|
||||
.buttonStyle(.plain)
|
||||
}
|
||||
.listStyle(.plain)
|
||||
.refreshable {
|
||||
await refresh()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func cardRow(_ task: HermesKanbanTask) -> some View {
|
||||
VStack(alignment: .leading, spacing: 4) {
|
||||
Text(task.title)
|
||||
.font(.headline)
|
||||
.foregroundStyle(.primary)
|
||||
.lineLimit(2)
|
||||
HStack(spacing: 8) {
|
||||
if let assignee = task.assignee, !assignee.isEmpty {
|
||||
Label(assignee, systemImage: "person.fill")
|
||||
.labelStyle(.titleAndIcon)
|
||||
.font(.caption)
|
||||
.foregroundStyle(.secondary)
|
||||
}
|
||||
if let workspace = task.workspaceKind {
|
||||
ScarfBadge(workspace, kind: .neutral)
|
||||
}
|
||||
if let priority = task.priority, priority >= 70 {
|
||||
ScarfBadge("p\(priority)", kind: priority >= 90 ? .danger : .warning)
|
||||
}
|
||||
Spacer()
|
||||
}
|
||||
if !task.skills.isEmpty {
|
||||
Text(task.skills.prefix(2).joined(separator: ", ") + (task.skills.count > 2 ? " +\(task.skills.count - 2)" : ""))
|
||||
.font(.caption2)
|
||||
.foregroundStyle(.tertiary)
|
||||
.lineLimit(1)
|
||||
}
|
||||
}
|
||||
.padding(.vertical, 4)
|
||||
}
|
||||
|
||||
private func errorView(_ message: String) -> some View {
|
||||
ContentUnavailableView {
|
||||
Label("Couldn't load tasks", systemImage: "exclamationmark.triangle")
|
||||
} description: {
|
||||
Text(message)
|
||||
} actions: {
|
||||
Button("Try Again") {
|
||||
Task { await refresh() }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Loading
|
||||
|
||||
private func startPolling() {
|
||||
pollTask?.cancel()
|
||||
pollTask = Task {
|
||||
while !Task.isCancelled {
|
||||
try? await Task.sleep(nanoseconds: 5_000_000_000)
|
||||
if Task.isCancelled { break }
|
||||
await refresh()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func refresh() async {
|
||||
isLoading = true
|
||||
defer { isLoading = false }
|
||||
guard let tenant = resolvedTenant, !tenant.isEmpty else {
|
||||
tasks = []
|
||||
error = "No Kanban tenant has been minted for this project yet. Open the Kanban tab on the Mac app to mint one."
|
||||
return
|
||||
}
|
||||
let svc = KanbanService(context: context)
|
||||
let filter = KanbanListFilter(tenant: tenant)
|
||||
do {
|
||||
let polled = try await svc.list(filter)
|
||||
tasks = polled
|
||||
stats = (try? await svc.stats()) ?? .empty
|
||||
error = nil
|
||||
} catch let err as KanbanError {
|
||||
error = err.errorDescription
|
||||
} catch {
|
||||
self.error = error.localizedDescription
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Column projection
|
||||
|
||||
private var visibleColumns: [KanbanBoardColumn] {
|
||||
var cols: [KanbanBoardColumn] = []
|
||||
if !tasks(in: .triage).isEmpty { cols.append(.triage) }
|
||||
cols.append(contentsOf: [.upNext, .running, .blocked, .done])
|
||||
return cols
|
||||
}
|
||||
|
||||
private func taskCount(in column: KanbanBoardColumn) -> Int {
|
||||
tasks(in: column).count
|
||||
}
|
||||
|
||||
private func tasks(in column: KanbanBoardColumn) -> [HermesKanbanTask] {
|
||||
tasks.filter { KanbanStatus.from($0.status).boardColumn == column }
|
||||
.sorted { lhs, rhs in
|
||||
let lp = lhs.priority ?? 0
|
||||
let rp = rhs.priority ?? 0
|
||||
if lp != rp { return lp > rp }
|
||||
return (lhs.createdAt ?? "") > (rhs.createdAt ?? "")
|
||||
}
|
||||
}
|
||||
|
||||
private func emptyTitle(for column: KanbanBoardColumn) -> String {
|
||||
switch column {
|
||||
case .triage: return "Triage empty"
|
||||
case .upNext: return "Queue empty"
|
||||
case .running: return "No live workers"
|
||||
case .blocked: return "Nothing blocked"
|
||||
case .done: return "No completions yet"
|
||||
case .archived: return "No archived tasks"
|
||||
}
|
||||
}
|
||||
|
||||
private func emptyCopy(for column: KanbanBoardColumn) -> String {
|
||||
switch column {
|
||||
case .triage: return "No tasks waiting on a specifier."
|
||||
case .upNext: return "Drop a task on the Mac board, or create one with `hermes kanban create`."
|
||||
case .running: return "No workers are running tasks for this project right now."
|
||||
case .blocked: return "Nothing is blocked. When a worker hits a block, it'll show up here."
|
||||
case .done: return "Recent completions will land here."
|
||||
case .archived: return "Archived tasks are hidden by default."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private struct TaskIDBox: Identifiable {
|
||||
let id: String
|
||||
}
|
||||
@@ -19,6 +19,7 @@ struct ProjectDetailView: View {
|
||||
let config: IOSServerConfig
|
||||
|
||||
@Environment(\.scarfGoCoordinator) private var coordinator
|
||||
@Environment(\.hermesCapabilities) private var capabilitiesStore
|
||||
|
||||
private static let sharedContextID: ServerID = ServerID(
|
||||
uuidString: "00000000-0000-0000-0000-0000000000A2"
|
||||
@@ -35,7 +36,7 @@ struct ProjectDetailView: View {
|
||||
@State private var lastDashboardMtime: Date?
|
||||
|
||||
enum DetailTab: Hashable {
|
||||
case dashboard, site, sessions
|
||||
case dashboard, site, sessions, kanban
|
||||
}
|
||||
|
||||
private var serverContext: ServerContext {
|
||||
@@ -55,6 +56,9 @@ struct ProjectDetailView: View {
|
||||
var tabs: [DetailTab] = [.dashboard]
|
||||
if siteWidget != nil { tabs.append(.site) }
|
||||
tabs.append(.sessions)
|
||||
if capabilitiesStore?.capabilities.hasKanban ?? false {
|
||||
tabs.append(.kanban)
|
||||
}
|
||||
return tabs
|
||||
}
|
||||
|
||||
@@ -111,6 +115,7 @@ struct ProjectDetailView: View {
|
||||
case .dashboard: return "Dashboard"
|
||||
case .site: return "Site"
|
||||
case .sessions: return "Sessions"
|
||||
case .kanban: return "Kanban"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,6 +134,8 @@ struct ProjectDetailView: View {
|
||||
}
|
||||
case .sessions:
|
||||
ProjectSessionsView_iOS(project: project)
|
||||
case .kanban:
|
||||
ScarfGoKanbanView(project: project, context: serverContext)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -102,17 +102,31 @@ struct WidgetView: View {
|
||||
}
|
||||
|
||||
private var unsupportedView: some View {
|
||||
VStack(alignment: .leading, spacing: 4) {
|
||||
Label(widget.title, systemImage: "questionmark.app.dashed")
|
||||
.font(.caption)
|
||||
.foregroundStyle(ScarfColor.foregroundMuted)
|
||||
Text("Widget type \"\(widget.type)\" isn't supported in this version of Scarf yet.")
|
||||
VStack(alignment: .leading, spacing: 6) {
|
||||
HStack(spacing: 6) {
|
||||
Image(systemName: "exclamationmark.triangle.fill")
|
||||
.font(.caption)
|
||||
.foregroundStyle(ScarfColor.warning)
|
||||
Text(widget.title.isEmpty ? "Widget error" : widget.title)
|
||||
.font(.caption)
|
||||
.foregroundStyle(ScarfColor.foregroundMuted)
|
||||
}
|
||||
Text("Unknown widget type: \"\(widget.type)\"")
|
||||
.font(.callout)
|
||||
.foregroundStyle(.primary)
|
||||
.fixedSize(horizontal: false, vertical: true)
|
||||
Text("This Scarf build doesn't render this widget type. Update Scarf or change the widget type in dashboard.json.")
|
||||
.font(.caption2)
|
||||
.foregroundStyle(.tertiary)
|
||||
.fixedSize(horizontal: false, vertical: true)
|
||||
}
|
||||
.frame(maxWidth: .infinity, alignment: .leading)
|
||||
.padding(12)
|
||||
.background(.quaternary.opacity(0.5))
|
||||
.background(ScarfColor.warning.opacity(0.08))
|
||||
.overlay(
|
||||
RoundedRectangle(cornerRadius: 8)
|
||||
.strokeBorder(ScarfColor.warning.opacity(0.3), lineWidth: 1)
|
||||
)
|
||||
.clipShape(RoundedRectangle(cornerRadius: 8))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,15 +19,7 @@ struct ListWidgetView: View {
|
||||
}
|
||||
if let items = widget.items {
|
||||
ForEach(items) { item in
|
||||
HStack(spacing: 6) {
|
||||
Image(systemName: statusIcon(item.status))
|
||||
.font(.caption2)
|
||||
.foregroundStyle(statusColor(item.status))
|
||||
Text(item.text)
|
||||
.font(.callout)
|
||||
.strikethrough(item.status == "done")
|
||||
.foregroundStyle(item.status == "done" ? .secondary : .primary)
|
||||
}
|
||||
ListItemRow(item: item)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -36,21 +28,52 @@ struct ListWidgetView: View {
|
||||
.background(.quaternary.opacity(0.5))
|
||||
.clipShape(RoundedRectangle(cornerRadius: 8))
|
||||
}
|
||||
}
|
||||
|
||||
private func statusIcon(_ status: String?) -> String {
|
||||
switch status {
|
||||
case "done": return "checkmark.circle.fill"
|
||||
case "active": return "circle.inset.filled"
|
||||
case "pending": return "circle"
|
||||
default: return "circle"
|
||||
private struct ListItemRow: View {
|
||||
let item: ListItem
|
||||
|
||||
private var typedStatus: ListItemStatus? { ListItemStatus(raw: item.status) }
|
||||
|
||||
var body: some View {
|
||||
HStack(spacing: 6) {
|
||||
Image(systemName: iconName)
|
||||
.font(.caption2)
|
||||
.foregroundStyle(tint)
|
||||
Text(item.text)
|
||||
.font(.callout)
|
||||
.strikethrough(typedStatus == .done)
|
||||
.foregroundStyle(typedStatus == .done ? .secondary : .primary)
|
||||
if typedStatus == nil, let raw = item.status, !raw.isEmpty {
|
||||
Text(raw)
|
||||
.font(.caption2)
|
||||
.foregroundStyle(.secondary)
|
||||
.padding(.horizontal, 6)
|
||||
.padding(.vertical, 2)
|
||||
.background(.quaternary.opacity(0.5))
|
||||
.clipShape(Capsule())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func statusColor(_ status: String?) -> Color {
|
||||
switch status {
|
||||
case "done": return .green
|
||||
case "active": return .blue
|
||||
default: return .secondary
|
||||
private var iconName: String {
|
||||
switch typedStatus {
|
||||
case .success, .done: return "checkmark.circle.fill"
|
||||
case .warning: return "exclamationmark.triangle.fill"
|
||||
case .danger: return "xmark.octagon.fill"
|
||||
case .info: return "info.circle.fill"
|
||||
case .pending: return "circle.dashed"
|
||||
case .neutral, nil: return "circle"
|
||||
}
|
||||
}
|
||||
|
||||
private var tint: Color {
|
||||
switch typedStatus {
|
||||
case .success, .done: return ScarfColor.success
|
||||
case .warning: return ScarfColor.warning
|
||||
case .danger: return ScarfColor.danger
|
||||
case .info: return ScarfColor.info
|
||||
case .pending, .neutral, nil: return .secondary
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,176 @@
|
||||
import SwiftUI
|
||||
import ScarfCore
|
||||
import ScarfDesign
|
||||
import UIKit
|
||||
|
||||
/// In-app Diagnostics → Performance panel. Lets users flip the
|
||||
/// ScarfMon backend mode, watch live aggregated stats from the ring
|
||||
/// buffer, and copy a JSON dump to paste into a feedback thread.
|
||||
///
|
||||
/// Data never leaves the device unless the user taps "Copy as JSON" —
|
||||
/// no remote upload, no analytics. Same source-of-truth as the Mac
|
||||
/// panel; both sides read `ScarfMonBoot.sharedRingBuffer`.
|
||||
struct ScarfMonDiagnosticsView: View {
|
||||
@State private var mode: ScarfMonBoot.Mode = ScarfMonBoot.currentMode()
|
||||
@State private var stats: [ScarfMonStat] = []
|
||||
@State private var copiedToast: Bool = false
|
||||
|
||||
/// Ring buffer is process-wide; we read from it on a 1s timer
|
||||
/// while the panel is foregrounded. No live tail; this view only
|
||||
/// re-aggregates the in-memory snapshot.
|
||||
private let refreshInterval: TimeInterval = 1.0
|
||||
|
||||
var body: some View {
|
||||
List {
|
||||
modeSection
|
||||
if mode == .full {
|
||||
summarySection
|
||||
actionsSection
|
||||
} else {
|
||||
Section {
|
||||
Text("Switch to **Full** above to see live stats and copy a JSON dump. Off and Signpost-only modes don't keep an in-memory ring buffer.")
|
||||
.font(.callout)
|
||||
.foregroundStyle(.secondary)
|
||||
}
|
||||
}
|
||||
}
|
||||
.navigationTitle("Performance")
|
||||
.navigationBarTitleDisplayMode(.inline)
|
||||
.task(id: mode) {
|
||||
// Re-aggregate while the view is visible. SwiftUI cancels
|
||||
// this task on disappear, so the timer stops eating cycles
|
||||
// when the user backs out.
|
||||
guard mode == .full else { return }
|
||||
while !Task.isCancelled {
|
||||
refresh()
|
||||
try? await Task.sleep(nanoseconds: UInt64(refreshInterval * 1_000_000_000))
|
||||
}
|
||||
}
|
||||
.overlay(alignment: .top) {
|
||||
if copiedToast {
|
||||
Text("Copied to clipboard")
|
||||
.font(.caption)
|
||||
.padding(.horizontal, 12)
|
||||
.padding(.vertical, 6)
|
||||
.background(.regularMaterial)
|
||||
.clipShape(Capsule())
|
||||
.padding(.top, 8)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ViewBuilder
|
||||
private var modeSection: some View {
|
||||
Section {
|
||||
Picker("Mode", selection: $mode) {
|
||||
Text("Off").tag(ScarfMonBoot.Mode.off)
|
||||
Text("Signpost only").tag(ScarfMonBoot.Mode.signpostOnly)
|
||||
Text("Full").tag(ScarfMonBoot.Mode.full)
|
||||
}
|
||||
.pickerStyle(.segmented)
|
||||
.onChange(of: mode) { _, newValue in
|
||||
ScarfMonBoot.setMode(newValue)
|
||||
}
|
||||
} header: {
|
||||
Text("Recording mode")
|
||||
} footer: {
|
||||
Text("**Signpost only** is the default — Instruments can attach and read the Points of Interest track without any other overhead. **Full** also keeps a 4096-entry in-memory ring you can browse below and copy as JSON.")
|
||||
.font(.caption)
|
||||
}
|
||||
}
|
||||
|
||||
@ViewBuilder
|
||||
private var summarySection: some View {
|
||||
Section {
|
||||
if stats.isEmpty {
|
||||
Text("No samples yet. Use the app for a few seconds and the table will populate.")
|
||||
.font(.caption)
|
||||
.foregroundStyle(.secondary)
|
||||
} else {
|
||||
ForEach(stats.prefix(20), id: \.self) { stat in
|
||||
StatRow(stat: stat)
|
||||
}
|
||||
}
|
||||
} header: {
|
||||
Text("Top 20 by p95")
|
||||
} footer: {
|
||||
Text("Sorted by 95th-percentile duration. Counts include events; intervals are everything wrapped in `ScarfMon.measure`.")
|
||||
.font(.caption)
|
||||
}
|
||||
}
|
||||
|
||||
@ViewBuilder
|
||||
private var actionsSection: some View {
|
||||
Section {
|
||||
Button {
|
||||
copyJSON()
|
||||
} label: {
|
||||
Label("Copy ring buffer as JSON", systemImage: "doc.on.clipboard")
|
||||
}
|
||||
Button(role: .destructive) {
|
||||
ScarfMonBoot.sharedRingBuffer?.reset()
|
||||
refresh()
|
||||
} label: {
|
||||
Label("Reset ring buffer", systemImage: "trash")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func refresh() {
|
||||
stats = ScarfMonBoot.sharedRingBuffer?.summary() ?? []
|
||||
}
|
||||
|
||||
private func copyJSON() {
|
||||
guard let json = ScarfMonBoot.sharedRingBuffer?.exportJSON() else { return }
|
||||
UIPasteboard.general.string = json
|
||||
copiedToast = true
|
||||
Task { @MainActor in
|
||||
try? await Task.sleep(nanoseconds: 1_500_000_000)
|
||||
copiedToast = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private struct StatRow: View {
|
||||
let stat: ScarfMonStat
|
||||
|
||||
var body: some View {
|
||||
VStack(alignment: .leading, spacing: 2) {
|
||||
HStack {
|
||||
Text(stat.name)
|
||||
.font(.system(.body, design: .monospaced))
|
||||
Spacer()
|
||||
Text("p95 \(formatMs(stat.p95Ms))")
|
||||
.font(.caption.monospaced())
|
||||
.foregroundStyle(.secondary)
|
||||
}
|
||||
HStack(spacing: 12) {
|
||||
Text(stat.category.rawValue)
|
||||
.font(.caption2)
|
||||
.foregroundStyle(.tertiary)
|
||||
Text("count \(stat.count)")
|
||||
.font(.caption2.monospaced())
|
||||
.foregroundStyle(.tertiary)
|
||||
if stat.kind == .interval {
|
||||
Text("p50 \(formatMs(stat.p50Ms))")
|
||||
.font(.caption2.monospaced())
|
||||
.foregroundStyle(.tertiary)
|
||||
Text("max \(formatMs(stat.maxMs))")
|
||||
.font(.caption2.monospaced())
|
||||
.foregroundStyle(.tertiary)
|
||||
}
|
||||
if stat.totalBytes > 0 {
|
||||
Text("bytes \(stat.totalBytes)")
|
||||
.font(.caption2.monospaced())
|
||||
.foregroundStyle(.tertiary)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func formatMs(_ ms: Double) -> String {
|
||||
if ms >= 100 { return String(format: "%.0fms", ms) }
|
||||
if ms >= 1 { return String(format: "%.1fms", ms) }
|
||||
return String(format: "%.2fms", ms)
|
||||
}
|
||||
}
|
||||
@@ -13,6 +13,13 @@ struct SettingsView: View {
|
||||
@State private var vm: IOSSettingsViewModel
|
||||
@State private var showRawYAML = false
|
||||
@State private var editingSpec: SettingSpec?
|
||||
/// v2.7 — Scarf-local opt-in to bulk-fetch tool result CONTENT
|
||||
/// when resuming past chats. Default off; the shared
|
||||
/// `RichChatViewModel` reads this same UserDefaults key on
|
||||
/// every chat resume so iOS gets the same skeleton-then-hydrate
|
||||
/// behavior as Mac.
|
||||
@AppStorage(RichChatViewModel.loadHistoricalToolResultsKey)
|
||||
private var loadHistoricalToolResults: Bool = false
|
||||
|
||||
private static let sharedContextID: ServerID = ServerID(
|
||||
uuidString: "00000000-0000-0000-0000-0000000000A1"
|
||||
@@ -45,6 +52,7 @@ struct SettingsView: View {
|
||||
compressionSection
|
||||
loggingSection
|
||||
platformsSection
|
||||
diagnosticsSection
|
||||
rawYAMLToggleSection
|
||||
}
|
||||
}
|
||||
@@ -163,6 +171,28 @@ struct SettingsView: View {
|
||||
yesNoRow("Inline diffs", vm.config.display.inlineDiffs)
|
||||
LabeledContent("Personality", value: vm.config.personality)
|
||||
}
|
||||
chatScarfSection
|
||||
}
|
||||
|
||||
/// v2.7 — Scarf-local chat preferences. Mirrors the Mac Settings
|
||||
/// → Display → "Load tool results in past chats" toggle. Lives in
|
||||
/// its own section so it's clear these are app-side settings, not
|
||||
/// Hermes config values.
|
||||
@ViewBuilder
|
||||
private var chatScarfSection: some View {
|
||||
Section {
|
||||
Toggle(isOn: $loadHistoricalToolResults) {
|
||||
VStack(alignment: .leading, spacing: 2) {
|
||||
Text("Load tool results in past chats")
|
||||
.font(.body)
|
||||
Text("Off (default) keeps past chat resumes fast on slow remotes — tool call cards still render, but the inspector lazy-loads each result when you open it.")
|
||||
.font(.caption)
|
||||
.foregroundStyle(.secondary)
|
||||
}
|
||||
}
|
||||
} header: {
|
||||
Text("Chat (Scarf)")
|
||||
}
|
||||
}
|
||||
|
||||
@ViewBuilder
|
||||
@@ -257,6 +287,27 @@ struct SettingsView: View {
|
||||
}
|
||||
}
|
||||
|
||||
/// Diagnostics → Performance entry point. Hidden from the
|
||||
/// `quickEditsSection` flow because it doesn't touch config.yaml
|
||||
/// — it controls the in-process ScarfMon backend set instead. Off
|
||||
/// by default users still get Instruments-visible signposts; flip
|
||||
/// to Full when investigating a specific perf complaint.
|
||||
@ViewBuilder
|
||||
private var diagnosticsSection: some View {
|
||||
Section {
|
||||
NavigationLink {
|
||||
ScarfMonDiagnosticsView()
|
||||
} label: {
|
||||
Label("Performance", systemImage: "speedometer")
|
||||
}
|
||||
} header: {
|
||||
Text("Diagnostics")
|
||||
} footer: {
|
||||
Text("Performance instrumentation. Default mode emits Instruments signposts only; Full mode also keeps a 4096-entry in-memory ring you can copy as JSON.")
|
||||
.font(.caption)
|
||||
}
|
||||
}
|
||||
|
||||
@ViewBuilder
|
||||
private var rawYAMLToggleSection: some View {
|
||||
Section {
|
||||
|
||||
@@ -48,7 +48,12 @@ struct SkillsView: View {
|
||||
// picker when the per-server snapshot diff has changes.
|
||||
// First-load with no prior snapshot silently primes (no
|
||||
// pill, the snapshot just records what's there).
|
||||
if let diff = snapshotDiff,
|
||||
//
|
||||
// Issue #78: scope the pill to the Installed tab. It
|
||||
// describes local file deltas; rendering it on Updates
|
||||
// contradicts the upstream-version-check pane below.
|
||||
if currentTab == .installed,
|
||||
let diff = snapshotDiff,
|
||||
diff.hasChanges,
|
||||
!diff.previousSnapshotEmpty {
|
||||
whatsNewPill(diff: diff)
|
||||
|
||||
@@ -529,7 +529,7 @@
|
||||
ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor;
|
||||
CODE_SIGN_ENTITLEMENTS = "Scarf iOS/Scarf_iOS.entitlements";
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 28;
|
||||
CURRENT_PROJECT_VERSION = 34;
|
||||
DEVELOPMENT_TEAM = 3Q6X2L86C4;
|
||||
ENABLE_PREVIEWS = YES;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
@@ -540,13 +540,13 @@
|
||||
INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES;
|
||||
INFOPLIST_KEY_UILaunchScreen_Generation = YES;
|
||||
INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight";
|
||||
INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight";
|
||||
INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = UIInterfaceOrientationPortrait;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 18.6;
|
||||
LD_RUNPATH_SEARCH_PATHS = (
|
||||
"$(inherited)",
|
||||
"@executable_path/Frameworks",
|
||||
);
|
||||
MARKETING_VERSION = 2.5.2;
|
||||
MARKETING_VERSION = 2.7.5;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = com.scarfgo.app;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
SDKROOT = iphoneos;
|
||||
@@ -571,7 +571,7 @@
|
||||
ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor;
|
||||
CODE_SIGN_ENTITLEMENTS = "Scarf iOS/Scarf_iOS.entitlements";
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 28;
|
||||
CURRENT_PROJECT_VERSION = 34;
|
||||
DEVELOPMENT_TEAM = 3Q6X2L86C4;
|
||||
ENABLE_PREVIEWS = YES;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
@@ -582,13 +582,13 @@
|
||||
INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents = YES;
|
||||
INFOPLIST_KEY_UILaunchScreen_Generation = YES;
|
||||
INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight";
|
||||
INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight";
|
||||
INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = UIInterfaceOrientationPortrait;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 18.6;
|
||||
LD_RUNPATH_SEARCH_PATHS = (
|
||||
"$(inherited)",
|
||||
"@executable_path/Frameworks",
|
||||
);
|
||||
MARKETING_VERSION = 2.5.2;
|
||||
MARKETING_VERSION = 2.7.5;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = com.scarfgo.app;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
SDKROOT = iphoneos;
|
||||
@@ -612,7 +612,7 @@
|
||||
buildSettings = {
|
||||
BUNDLE_LOADER = "$(TEST_HOST)";
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 28;
|
||||
CURRENT_PROJECT_VERSION = 34;
|
||||
DEVELOPMENT_TEAM = 3Q6X2L86C4;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 26.2;
|
||||
@@ -635,7 +635,7 @@
|
||||
buildSettings = {
|
||||
BUNDLE_LOADER = "$(TEST_HOST)";
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 28;
|
||||
CURRENT_PROJECT_VERSION = 34;
|
||||
DEVELOPMENT_TEAM = 3Q6X2L86C4;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 26.2;
|
||||
@@ -658,7 +658,7 @@
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 28;
|
||||
CURRENT_PROJECT_VERSION = 34;
|
||||
DEVELOPMENT_TEAM = 3Q6X2L86C4;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 26.2;
|
||||
@@ -680,7 +680,7 @@
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 28;
|
||||
CURRENT_PROJECT_VERSION = 34;
|
||||
DEVELOPMENT_TEAM = 3Q6X2L86C4;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 26.2;
|
||||
@@ -834,7 +834,7 @@
|
||||
CODE_SIGN_ENTITLEMENTS = scarf/scarf.entitlements;
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
COMBINE_HIDPI_IMAGES = YES;
|
||||
CURRENT_PROJECT_VERSION = 28;
|
||||
CURRENT_PROJECT_VERSION = 34;
|
||||
DEAD_CODE_STRIPPING = YES;
|
||||
DEVELOPMENT_TEAM = 3Q6X2L86C4;
|
||||
ENABLE_APP_SANDBOX = NO;
|
||||
@@ -848,7 +848,7 @@
|
||||
"@executable_path/../Frameworks",
|
||||
);
|
||||
MACOSX_DEPLOYMENT_TARGET = 14.6;
|
||||
MARKETING_VERSION = 2.5.2;
|
||||
MARKETING_VERSION = 2.7.5;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = com.scarf.app;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
REGISTER_APP_GROUPS = YES;
|
||||
@@ -870,7 +870,7 @@
|
||||
CODE_SIGN_ENTITLEMENTS = scarf/scarf.entitlements;
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
COMBINE_HIDPI_IMAGES = YES;
|
||||
CURRENT_PROJECT_VERSION = 28;
|
||||
CURRENT_PROJECT_VERSION = 34;
|
||||
DEAD_CODE_STRIPPING = YES;
|
||||
DEVELOPMENT_TEAM = 3Q6X2L86C4;
|
||||
ENABLE_APP_SANDBOX = NO;
|
||||
@@ -884,7 +884,7 @@
|
||||
"@executable_path/../Frameworks",
|
||||
);
|
||||
MACOSX_DEPLOYMENT_TARGET = 14.6;
|
||||
MARKETING_VERSION = 2.5.2;
|
||||
MARKETING_VERSION = 2.7.5;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = com.scarf.app;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
REGISTER_APP_GROUPS = YES;
|
||||
@@ -902,12 +902,12 @@
|
||||
buildSettings = {
|
||||
BUNDLE_LOADER = "$(TEST_HOST)";
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 28;
|
||||
CURRENT_PROJECT_VERSION = 34;
|
||||
DEAD_CODE_STRIPPING = YES;
|
||||
DEVELOPMENT_TEAM = 3Q6X2L86C4;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
MACOSX_DEPLOYMENT_TARGET = 26.2;
|
||||
MARKETING_VERSION = 2.5.2;
|
||||
MARKETING_VERSION = 2.7.5;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = com.scarfTests;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
STRING_CATALOG_GENERATE_SYMBOLS = NO;
|
||||
@@ -924,12 +924,12 @@
|
||||
buildSettings = {
|
||||
BUNDLE_LOADER = "$(TEST_HOST)";
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 28;
|
||||
CURRENT_PROJECT_VERSION = 34;
|
||||
DEAD_CODE_STRIPPING = YES;
|
||||
DEVELOPMENT_TEAM = 3Q6X2L86C4;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
MACOSX_DEPLOYMENT_TARGET = 26.2;
|
||||
MARKETING_VERSION = 2.5.2;
|
||||
MARKETING_VERSION = 2.7.5;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = com.scarfTests;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
STRING_CATALOG_GENERATE_SYMBOLS = NO;
|
||||
@@ -945,11 +945,11 @@
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 28;
|
||||
CURRENT_PROJECT_VERSION = 34;
|
||||
DEAD_CODE_STRIPPING = YES;
|
||||
DEVELOPMENT_TEAM = 3Q6X2L86C4;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
MARKETING_VERSION = 2.5.2;
|
||||
MARKETING_VERSION = 2.7.5;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = com.scarfUITests;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
STRING_CATALOG_GENERATE_SYMBOLS = NO;
|
||||
@@ -965,11 +965,11 @@
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 28;
|
||||
CURRENT_PROJECT_VERSION = 34;
|
||||
DEAD_CODE_STRIPPING = YES;
|
||||
DEVELOPMENT_TEAM = 3Q6X2L86C4;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
MARKETING_VERSION = 2.5.2;
|
||||
MARKETING_VERSION = 2.7.5;
|
||||
PRODUCT_BUNDLE_IDENTIFIER = com.scarfUITests;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
STRING_CATALOG_GENERATE_SYMBOLS = NO;
|
||||
|
||||
@@ -0,0 +1,159 @@
|
||||
import Foundation
|
||||
import ScarfCore
|
||||
import os
|
||||
|
||||
/// One template entry as exposed by `awizemann.github.io/scarf/templates/catalog.json`.
|
||||
/// Mirrors the per-template shape `tools/build-catalog.py` emits — the
|
||||
/// validator is the source of truth on the schema, this struct is the
|
||||
/// Swift consumer. **Do not add fields here that aren't in `catalog.json`
|
||||
/// today.** Keeping the surface 1:1 means we can't accidentally render
|
||||
/// something the catalog doesn't actually carry.
|
||||
///
|
||||
/// Most fields are required-from-the-validator's-perspective but
|
||||
/// expressed as optionals here so a single-template typo on the
|
||||
/// website doesn't bring down the whole list — we drop the malformed
|
||||
/// entry and keep going (handled by the decoder in `CatalogService`).
|
||||
struct CatalogEntry: Codable, Sendable, Identifiable, Hashable {
|
||||
|
||||
// Hashable + Equatable conformance is identity-based on `id` —
|
||||
// `TemplateConfigSchema` only conforms to Equatable, so we can't
|
||||
// synthesize Hashable, and a content-based equality wouldn't be
|
||||
// useful anyway (the same template re-fetched from cache vs. fresh
|
||||
// is "the same entry" even if a description was edited upstream).
|
||||
static func == (lhs: CatalogEntry, rhs: CatalogEntry) -> Bool {
|
||||
lhs.id == rhs.id
|
||||
}
|
||||
func hash(into hasher: inout Hasher) {
|
||||
hasher.combine(id)
|
||||
}
|
||||
|
||||
|
||||
/// Stable identifier — `<author>/<template-name>`, e.g.
|
||||
/// `awizemann/hackernews-digest`. Matches the value in
|
||||
/// `template.json`'s `id` field.
|
||||
let id: String
|
||||
|
||||
/// Human-readable name shown in the catalog list.
|
||||
let name: String
|
||||
|
||||
/// Semver. Compared against the installed version from
|
||||
/// `InstalledTemplatesIndex` to detect "Update available".
|
||||
let version: String
|
||||
|
||||
let description: String?
|
||||
let category: String?
|
||||
let tags: [String]
|
||||
|
||||
let author: Author
|
||||
let minScarfVersion: String?
|
||||
let minHermesVersion: String?
|
||||
|
||||
/// HTTPS URL the install flow consumes.
|
||||
/// `TemplateInstallerViewModel.openRemoteURL(_:)` accepts this
|
||||
/// directly. The catalog itself only ships HTTPS URLs (validator
|
||||
/// enforced).
|
||||
let installUrl: String
|
||||
|
||||
/// Bundle metadata for size warnings and integrity checks. Optional
|
||||
/// because pre-v2 catalogs didn't carry these.
|
||||
let bundleSize: Int?
|
||||
let bundleSha256: String?
|
||||
|
||||
/// Slug used by the static-site generator for detail-page URLs.
|
||||
/// Reused as a stable accessibility-ID suffix so XCUITest can find
|
||||
/// rows even if the human-readable id contains slashes.
|
||||
let detailSlug: String?
|
||||
|
||||
/// What's inside the bundle, mirrored from `template.json`'s
|
||||
/// `contents` claim. Drives the "what will be installed" preview
|
||||
/// on the detail page.
|
||||
let contents: Contents?
|
||||
|
||||
/// Config schema + model recommendation if the template declares
|
||||
/// one. Using the existing `TemplateConfigSchema` decoder keeps
|
||||
/// parsing aligned with the install sheet's config form.
|
||||
let config: TemplateConfigSchema?
|
||||
|
||||
struct Author: Codable, Sendable, Equatable {
|
||||
let name: String
|
||||
let url: String?
|
||||
}
|
||||
|
||||
/// `template.json`'s `contents` object. All counts are optional —
|
||||
/// `nil` means "not declared," which the catalog renders as zero.
|
||||
struct Contents: Codable, Sendable, Equatable {
|
||||
let dashboard: Bool?
|
||||
let agentsMd: Bool?
|
||||
let cron: Int?
|
||||
let config: Int?
|
||||
let memory: Bool?
|
||||
let skills: [String]?
|
||||
}
|
||||
}
|
||||
|
||||
/// Top-level shape of `catalog.json`. Only carries what the Swift
|
||||
/// catalog browser actually uses — `templates` is the list itself,
|
||||
/// `schemaVersion` lets us reject incompatible future formats.
|
||||
///
|
||||
/// **The validator's `generated` field is intentionally NOT decoded.**
|
||||
/// It ships as a boolean (`true`) per `tools/build-catalog.py`'s
|
||||
/// "human reminder; a timestamp would churn the diff every run"
|
||||
/// comment. The catalog UI uses the cache file's `fetchedAt` for the
|
||||
/// "last refreshed" string, not anything from `catalog.json`.
|
||||
///
|
||||
/// **Per-element fault tolerance.** `templates` is decoded entry by
|
||||
/// entry through an unkeyed container — a single malformed entry
|
||||
/// (missing `tags`, `author`, etc.) is dropped with a logged warning
|
||||
/// rather than failing the whole catalog decode. Honors the contract
|
||||
/// the per-entry doc-comment promises.
|
||||
struct Catalog: Codable, Sendable {
|
||||
let schemaVersion: Int?
|
||||
let templates: [CatalogEntry]
|
||||
|
||||
init(schemaVersion: Int?, templates: [CatalogEntry]) {
|
||||
self.schemaVersion = schemaVersion
|
||||
self.templates = templates
|
||||
}
|
||||
|
||||
/// Custom decoder that drops every key other than `schemaVersion`
|
||||
/// and `templates`. Without this, `generated: true` would surface
|
||||
/// as a typeMismatch on `String?`.
|
||||
enum CodingKeys: String, CodingKey {
|
||||
case schemaVersion
|
||||
case templates
|
||||
}
|
||||
|
||||
private static let decodeLogger = Logger(subsystem: "com.scarf", category: "CatalogDecoder")
|
||||
|
||||
init(from decoder: Decoder) throws {
|
||||
let container = try decoder.container(keyedBy: CodingKeys.self)
|
||||
self.schemaVersion = try container.decodeIfPresent(Int.self, forKey: .schemaVersion)
|
||||
|
||||
var entries: [CatalogEntry] = []
|
||||
if container.contains(.templates) {
|
||||
var unkeyed = try container.nestedUnkeyedContainer(forKey: .templates)
|
||||
entries.reserveCapacity(unkeyed.count ?? 0)
|
||||
while !unkeyed.isAtEnd {
|
||||
do {
|
||||
entries.append(try unkeyed.decode(CatalogEntry.self))
|
||||
} catch {
|
||||
Self.decodeLogger.warning("dropping malformed catalog entry at index \(unkeyed.currentIndex - 1): \(error.localizedDescription, privacy: .public)")
|
||||
// Advance past the bad element so the loop terminates.
|
||||
// Decoding into a permissive `JSONValue` placeholder
|
||||
// would also work, but Foundation's Decoder API has
|
||||
// no built-in skip — `_Skip` consumes one element.
|
||||
_ = try? unkeyed.decode(_Skip.self)
|
||||
}
|
||||
}
|
||||
}
|
||||
self.templates = entries
|
||||
}
|
||||
|
||||
/// Placeholder type used to consume a malformed array element after
|
||||
/// the real decode threw. Decodes anything by ignoring it.
|
||||
private struct _Skip: Decodable {
|
||||
init(from decoder: Decoder) throws {
|
||||
_ = try decoder.singleValueContainer()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -31,6 +31,16 @@ struct ProjectTemplateManifest: Codable, Sendable, Equatable {
|
||||
/// optional-field decoding keeps them working unchanged.
|
||||
let config: TemplateConfigSchema?
|
||||
|
||||
/// Per-project Kanban tenant slug (manifest schemaVersion 3+, v2.7.5).
|
||||
/// Minted by `KanbanTenantResolver` on first kanban interaction
|
||||
/// inside this project. Templates never set this — it's
|
||||
/// user-machine-scoped state — but Codable's optional decoding
|
||||
/// means template manifests stay valid alongside user-minted ones.
|
||||
/// Once minted, immutable across renames so existing tasks stay
|
||||
/// attributable to the project. Read by `ProjectAgentContextService`
|
||||
/// to surface the tenant to the agent in the AGENTS.md block.
|
||||
var kanbanTenant: String? = nil
|
||||
|
||||
/// Filesystem-safe slug derived from `id` (`"owner/name"` → `"owner-name"`).
|
||||
/// Used for the install directory name, skills namespace, and cron-job tag.
|
||||
nonisolated var slug: String {
|
||||
|
||||
@@ -0,0 +1,99 @@
|
||||
import AppKit
|
||||
import Foundation
|
||||
import os
|
||||
|
||||
/// Quits the running app and brings up a fresh instance of the same
|
||||
/// bundle. Used by the Profile-switching flow (issue #70) so the new
|
||||
/// active profile lands in a process that has never observed the old
|
||||
/// one — sidesteps any in-process cache or service-state bug that
|
||||
/// might still be reading from the previous profile's home directory.
|
||||
///
|
||||
/// The pairing is intentional:
|
||||
/// 1. Caller invokes `try AppRelauncher.relaunch()`. That spawns a
|
||||
/// fresh `open -n <bundleURL>`, captures stderr/exitCode, returns
|
||||
/// success once the launcher has acknowledged the dispatch.
|
||||
/// 2. Caller schedules `NSApp.terminate(nil)` 250ms later. The
|
||||
/// 250ms gives macOS time to begin launching the second PID so
|
||||
/// the dock-icon hand-off looks smooth (no flash of missing
|
||||
/// icon). Without the gap, macOS can briefly show zero Scarf
|
||||
/// icons in the dock.
|
||||
///
|
||||
/// Refuses to relaunch when the running bundle is under
|
||||
/// `DerivedData/` or `Build/Products/Debug` — that's an Xcode
|
||||
/// debug session, and `terminate(nil)` would kill the run mid-debug
|
||||
/// without giving the new instance any way to attach. The caller
|
||||
/// surfaces a "restart manually" toast in that case.
|
||||
@MainActor
|
||||
enum AppRelauncher {
|
||||
static let logger = Logger(subsystem: "com.scarf.app", category: "AppRelauncher")
|
||||
|
||||
enum RelaunchError: Error, LocalizedError {
|
||||
case debugBuild
|
||||
case openFailed(exitCode: Int32, stderr: String)
|
||||
|
||||
var errorDescription: String? {
|
||||
switch self {
|
||||
case .debugBuild:
|
||||
return "Refusing to relaunch from an Xcode debug build."
|
||||
case .openFailed(let code, let stderr):
|
||||
return "open(1) exited \(code): \(stderr)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawns a fresh instance of the running app via `/usr/bin/open -n
|
||||
/// <bundleURL>` and returns once the launcher process has dispatched
|
||||
/// the new instance. The caller is responsible for the subsequent
|
||||
/// `NSApp.terminate(nil)` (deferred ~250ms for a smooth dock hand-off).
|
||||
/// Throws `.debugBuild` when launched from Xcode/DerivedData;
|
||||
/// `.openFailed` when `open` itself errored.
|
||||
static func relaunch() throws {
|
||||
let bundleURL = Bundle.main.bundleURL
|
||||
let path = bundleURL.path
|
||||
if path.contains("/DerivedData/")
|
||||
|| path.contains("/Build/Products/Debug")
|
||||
|| path.contains("/Build/Products/Debug-")
|
||||
{
|
||||
logger.warning("Refusing relaunch — running from Xcode build (\(path, privacy: .public))")
|
||||
throw RelaunchError.debugBuild
|
||||
}
|
||||
|
||||
let proc = Process()
|
||||
proc.executableURL = URL(fileURLWithPath: "/usr/bin/open")
|
||||
// -n: force a NEW instance (without it, `open` activates the
|
||||
// running app and we'd never get a fresh process).
|
||||
// Pass the bundle URL directly (not -a <bundleId>) so signed
|
||||
// dev clones in `~/Applications` still resolve correctly.
|
||||
// No -W: we want `open` to return immediately after dispatch,
|
||||
// not block until the spawned app exits.
|
||||
proc.arguments = ["-n", path]
|
||||
|
||||
let stderrPipe = Pipe()
|
||||
let stdoutPipe = Pipe()
|
||||
proc.standardError = stderrPipe
|
||||
proc.standardOutput = stdoutPipe
|
||||
|
||||
do {
|
||||
try proc.run()
|
||||
} catch {
|
||||
throw RelaunchError.openFailed(exitCode: -1, stderr: error.localizedDescription)
|
||||
}
|
||||
|
||||
proc.waitUntilExit()
|
||||
|
||||
// Drain both streams BEFORE inspecting exit code so we don't leak fds.
|
||||
let errData = (try? stderrPipe.fileHandleForReading.readToEnd()) ?? Data()
|
||||
_ = try? stdoutPipe.fileHandleForReading.readToEnd()
|
||||
try? stderrPipe.fileHandleForReading.close()
|
||||
try? stdoutPipe.fileHandleForReading.close()
|
||||
|
||||
guard proc.terminationStatus == 0 else {
|
||||
let stderr = String(data: errData, encoding: .utf8)?
|
||||
.trimmingCharacters(in: .whitespacesAndNewlines) ?? ""
|
||||
logger.warning("open(1) failed (\(proc.terminationStatus)): \(stderr, privacy: .public)")
|
||||
throw RelaunchError.openFailed(exitCode: proc.terminationStatus, stderr: stderr)
|
||||
}
|
||||
|
||||
logger.info("Relaunch dispatched for \(path, privacy: .public)")
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,228 @@
|
||||
import Foundation
|
||||
import ScarfCore
|
||||
import os
|
||||
|
||||
/// On-disk cache shape. Versioned so a future schema change can lift
|
||||
/// stale caches gracefully — bump `version` and the loader rejects
|
||||
/// anything older without trying to migrate. Stored next to the
|
||||
/// projects registry so a Hermes wipe takes it with the rest of the
|
||||
/// Scarf-owned state.
|
||||
struct CatalogCache: Codable, Sendable {
|
||||
static let currentVersion = 1
|
||||
let version: Int
|
||||
let fetchedAt: Date
|
||||
let catalog: Catalog
|
||||
|
||||
init(version: Int = CatalogCache.currentVersion, fetchedAt: Date, catalog: Catalog) {
|
||||
self.version = version
|
||||
self.fetchedAt = fetchedAt
|
||||
self.catalog = catalog
|
||||
}
|
||||
}
|
||||
|
||||
/// Result of a `loadCatalog` call. Distinguishes "fetched fresh" from
|
||||
/// "cache served, network failed" so the catalog UI can surface a
|
||||
/// "could not refresh" hint next to a stale-but-useful list.
|
||||
enum CatalogLoadResult: Sendable {
|
||||
case fresh(catalog: Catalog, fetchedAt: Date)
|
||||
case cache(catalog: Catalog, fetchedAt: Date, refreshError: String?)
|
||||
case fallback(catalog: Catalog, reason: String)
|
||||
}
|
||||
|
||||
enum CatalogServiceError: LocalizedError, Sendable {
|
||||
case transport(String)
|
||||
case http(status: Int)
|
||||
case decode(String)
|
||||
|
||||
var errorDescription: String? {
|
||||
switch self {
|
||||
case .transport(let m): return "Catalog transport: \(m)"
|
||||
case .http(let status): return "Catalog HTTP \(status)"
|
||||
case .decode(let m): return "Catalog decode: \(m)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetches + caches the public template catalog from
|
||||
/// awizemann.github.io. Mirrors `NousModelCatalogService` 1:1 in
|
||||
/// shape: cache-first, 24h TTL, fallback when both cache and fetch
|
||||
/// fail. The catalog is unauthenticated (a public static file on
|
||||
/// GitHub Pages), so no bearer-token plumbing.
|
||||
struct CatalogService: Sendable {
|
||||
|
||||
/// Where the catalog lives in production. The static-site builder
|
||||
/// publishes here on `./scripts/catalog.sh publish`. **Versioned
|
||||
/// constant**: if we ever move this URL, every old Scarf install
|
||||
/// pegs at its bundled fallback until the user updates Scarf — so
|
||||
/// keep it stable. Settings-configurable in v2.9 only if anyone
|
||||
/// asks.
|
||||
static let baseURL = URL(string: "https://awizemann.github.io/scarf/templates/catalog.json")!
|
||||
static let cacheTTL: TimeInterval = 24 * 60 * 60 // 24h
|
||||
static let requestTimeout: TimeInterval = 10 // seconds
|
||||
|
||||
/// Hard-coded fallback for offline-with-no-cache. Keeps the picker
|
||||
/// non-empty on a fresh install so the user sees *something* even
|
||||
/// before the first network call. **Update on every release that
|
||||
/// adds a template** — the validator's `tools/check-catalog-fallback-sync.py`
|
||||
/// (TODO) catches drift between this list and `templates/`.
|
||||
static let fallbackCatalog: Catalog = Catalog(
|
||||
schemaVersion: 1,
|
||||
templates: [
|
||||
CatalogEntry(
|
||||
id: "awizemann/site-status-checker",
|
||||
name: "Site Status Checker",
|
||||
version: "1.1.0",
|
||||
description: "Daily uptime check for a list of URLs you configure on install.",
|
||||
category: "monitoring",
|
||||
tags: ["monitoring", "uptime", "cron", "starter"],
|
||||
author: .init(name: "Alan Wizemann", url: "https://github.com/awizemann"),
|
||||
minScarfVersion: "2.3.0",
|
||||
minHermesVersion: "0.9.0",
|
||||
installUrl: "https://raw.githubusercontent.com/awizemann/scarf/main/templates/awizemann/site-status-checker/site-status-checker.scarftemplate",
|
||||
bundleSize: nil,
|
||||
bundleSha256: nil,
|
||||
detailSlug: "awizemann-site-status-checker",
|
||||
contents: .init(dashboard: true, agentsMd: true, cron: 1, config: 2, memory: nil, skills: nil),
|
||||
config: nil
|
||||
),
|
||||
CatalogEntry(
|
||||
id: "awizemann/hackernews-digest",
|
||||
name: "HackerNews Daily Digest",
|
||||
version: "1.0.0",
|
||||
description: "A daily digest of HackerNews top stories. No API keys required.",
|
||||
category: "news",
|
||||
tags: ["news", "digest", "hackernews", "cron", "starter"],
|
||||
author: .init(name: "Alan Wizemann", url: "https://github.com/awizemann"),
|
||||
minScarfVersion: "2.3.0",
|
||||
minHermesVersion: "0.9.0",
|
||||
installUrl: "https://raw.githubusercontent.com/awizemann/scarf/main/templates/awizemann/hackernews-digest/hackernews-digest.scarftemplate",
|
||||
bundleSize: nil,
|
||||
bundleSha256: nil,
|
||||
detailSlug: "awizemann-hackernews-digest",
|
||||
contents: .init(dashboard: true, agentsMd: true, cron: 1, config: 3, memory: nil, skills: nil),
|
||||
config: nil
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
private static let logger = Logger(subsystem: "com.scarf", category: "CatalogService")
|
||||
|
||||
let context: ServerContext
|
||||
private let session: URLSession
|
||||
private let cachePath: String
|
||||
|
||||
init(context: ServerContext = .local, session: URLSession = .shared) {
|
||||
self.context = context
|
||||
self.session = session
|
||||
self.cachePath = context.paths.catalogCache
|
||||
}
|
||||
|
||||
// MARK: - Cache I/O
|
||||
|
||||
/// Read the cache via the active transport so a remote droplet's
|
||||
/// cache lands on the droplet, not the user's Mac. Missing or
|
||||
/// malformed cache → nil; the loader treats that as "no cache" and
|
||||
/// kicks off a fresh fetch.
|
||||
func readCache() -> CatalogCache? {
|
||||
let transport = context.makeTransport()
|
||||
guard transport.fileExists(cachePath) else { return nil }
|
||||
do {
|
||||
let data = try transport.readFile(cachePath)
|
||||
let decoder = JSONDecoder()
|
||||
decoder.dateDecodingStrategy = .iso8601
|
||||
let cache = try decoder.decode(CatalogCache.self, from: data)
|
||||
guard cache.version == CatalogCache.currentVersion else {
|
||||
Self.logger.info("catalog cache schema mismatch (got v\(cache.version), expected v\(CatalogCache.currentVersion)); ignoring")
|
||||
return nil
|
||||
}
|
||||
return cache
|
||||
} catch {
|
||||
Self.logger.warning("couldn't decode catalog cache: \(error.localizedDescription, privacy: .public)")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
private func writeCache(_ cache: CatalogCache) {
|
||||
let transport = context.makeTransport()
|
||||
do {
|
||||
let encoder = JSONEncoder()
|
||||
encoder.dateEncodingStrategy = .iso8601
|
||||
encoder.outputFormatting = [.prettyPrinted, .sortedKeys]
|
||||
let data = try encoder.encode(cache)
|
||||
// Make sure the parent dir exists — fresh remote installs
|
||||
// may not yet have `~/.hermes/scarf/`. mkdir -p is cheap
|
||||
// and idempotent on both transports.
|
||||
let parent = (cachePath as NSString).deletingLastPathComponent
|
||||
if !parent.isEmpty {
|
||||
try? transport.createDirectory(parent)
|
||||
}
|
||||
try transport.writeFile(cachePath, data: data)
|
||||
} catch {
|
||||
Self.logger.warning("couldn't write catalog cache: \(error.localizedDescription, privacy: .public)")
|
||||
}
|
||||
}
|
||||
|
||||
func isCacheStale(_ cache: CatalogCache) -> Bool {
|
||||
Date().timeIntervalSince(cache.fetchedAt) > Self.cacheTTL
|
||||
}
|
||||
|
||||
// MARK: - Network fetch
|
||||
|
||||
/// Make the catalog GET. Times out after `requestTimeout` so a
|
||||
/// hung network doesn't block the picker indefinitely. Returns the
|
||||
/// parsed catalog on success, throws on any HTTP / decode error.
|
||||
func fetchCatalog() async throws -> Catalog {
|
||||
var request = URLRequest(url: Self.baseURL)
|
||||
request.httpMethod = "GET"
|
||||
request.timeoutInterval = Self.requestTimeout
|
||||
request.setValue("application/json", forHTTPHeaderField: "Accept")
|
||||
request.cachePolicy = .reloadIgnoringLocalCacheData
|
||||
|
||||
let (data, response) = try await session.data(for: request)
|
||||
guard let http = response as? HTTPURLResponse else {
|
||||
throw CatalogServiceError.transport("non-HTTP response")
|
||||
}
|
||||
guard (200..<300).contains(http.statusCode) else {
|
||||
throw CatalogServiceError.http(status: http.statusCode)
|
||||
}
|
||||
do {
|
||||
return try JSONDecoder().decode(Catalog.self, from: data)
|
||||
} catch {
|
||||
throw CatalogServiceError.decode(error.localizedDescription)
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Public entry
|
||||
|
||||
/// Top-level "give me the catalog" entry point. Cache-first: serve
|
||||
/// from cache if fresh, fetch + write through if stale or empty,
|
||||
/// fall back to the hard-coded list when both fail. The caller
|
||||
/// renders based on the case so it can show a "could not refresh"
|
||||
/// hint next to a stale-but-still-useful list.
|
||||
func loadCatalog(forceRefresh: Bool = false) async -> CatalogLoadResult {
|
||||
let cached = readCache()
|
||||
|
||||
if let cached, !forceRefresh, !isCacheStale(cached) {
|
||||
return .cache(catalog: cached.catalog, fetchedAt: cached.fetchedAt, refreshError: nil)
|
||||
}
|
||||
|
||||
do {
|
||||
let catalog = try await fetchCatalog()
|
||||
let now = Date()
|
||||
writeCache(CatalogCache(fetchedAt: now, catalog: catalog))
|
||||
return .fresh(catalog: catalog, fetchedAt: now)
|
||||
} catch let error as CatalogServiceError {
|
||||
if let cached {
|
||||
Self.logger.warning("catalog refresh failed (\(error.localizedDescription, privacy: .public)); serving stale cache")
|
||||
return .cache(catalog: cached.catalog, fetchedAt: cached.fetchedAt, refreshError: error.localizedDescription)
|
||||
}
|
||||
Self.logger.warning("catalog refresh failed and no cache; serving fallback (\(error.localizedDescription, privacy: .public))")
|
||||
return .fallback(catalog: Self.fallbackCatalog, reason: error.localizedDescription)
|
||||
} catch {
|
||||
if let cached {
|
||||
return .cache(catalog: cached.catalog, fetchedAt: cached.fetchedAt, refreshError: error.localizedDescription)
|
||||
}
|
||||
return .fallback(catalog: Self.fallbackCatalog, reason: error.localizedDescription)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,105 @@
|
||||
import Foundation
|
||||
import UserNotifications
|
||||
import os
|
||||
#if canImport(AppKit)
|
||||
import AppKit
|
||||
#endif
|
||||
|
||||
/// Posts a "Hermes finished responding" local notification when an
|
||||
/// agent prompt completes while Scarf is not in the foreground
|
||||
/// (issue #64). Users can switch to other work and learn when their
|
||||
/// prompt has landed without polling the chat pane.
|
||||
///
|
||||
/// Authorization is requested lazily on first use. The user's global
|
||||
/// toggle (`scarf.chat.notifyOnComplete`, default on) gates posting,
|
||||
/// and notifications are suppressed when `NSApp.isActive` so users
|
||||
/// who happen to be looking at the chat aren't pinged for nothing.
|
||||
@MainActor
|
||||
final class ChatNotificationService {
|
||||
static let shared = ChatNotificationService()
|
||||
|
||||
private let logger = Logger(subsystem: "com.scarf", category: "ChatNotifications")
|
||||
private let center = UNUserNotificationCenter.current()
|
||||
private var hasRequestedAuthorization = false
|
||||
private var isAuthorized = false
|
||||
|
||||
/// AppStorage-shared key for the "notify on completion" toggle.
|
||||
/// Default true; the toggle lives under Settings → Display.
|
||||
static let toggleKey = "scarf.chat.notifyOnComplete"
|
||||
|
||||
private init() {}
|
||||
|
||||
/// Post a local notification announcing prompt completion. Quietly
|
||||
/// no-ops when:
|
||||
/// - The user has disabled the toggle.
|
||||
/// - Scarf is the foreground app (the in-chat status indicator
|
||||
/// is sufficient).
|
||||
/// - The system has not yet granted (or has denied) notification
|
||||
/// authorization.
|
||||
/// `preview` is the first line of the assistant's reply, truncated
|
||||
/// to a sensible length for the lock-screen / notification center.
|
||||
func postPromptCompleted(sessionTitle: String?, preview: String) {
|
||||
let enabled = UserDefaults.standard.object(forKey: Self.toggleKey) as? Bool ?? true
|
||||
guard enabled else { return }
|
||||
|
||||
#if canImport(AppKit)
|
||||
if NSApp?.isActive == true { return }
|
||||
#endif
|
||||
|
||||
Task { [weak self] in
|
||||
guard let self else { return }
|
||||
let granted = await self.ensureAuthorized()
|
||||
guard granted else { return }
|
||||
|
||||
let content = UNMutableNotificationContent()
|
||||
content.title = sessionTitle?.isEmpty == false
|
||||
? "Hermes finished — \(sessionTitle ?? "")"
|
||||
: "Hermes finished responding"
|
||||
content.body = Self.trimmedPreview(preview)
|
||||
content.sound = .default
|
||||
|
||||
let request = UNNotificationRequest(
|
||||
identifier: UUID().uuidString,
|
||||
content: content,
|
||||
trigger: nil
|
||||
)
|
||||
do {
|
||||
try await self.center.add(request)
|
||||
} catch {
|
||||
self.logger.warning("Notification post failed: \(error.localizedDescription, privacy: .public)")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private func ensureAuthorized() async -> Bool {
|
||||
if isAuthorized { return true }
|
||||
if hasRequestedAuthorization {
|
||||
// Already asked once this run; respect the current settings.
|
||||
let settings = await center.notificationSettings()
|
||||
isAuthorized = settings.authorizationStatus == .authorized
|
||||
return isAuthorized
|
||||
}
|
||||
hasRequestedAuthorization = true
|
||||
do {
|
||||
let granted = try await center.requestAuthorization(options: [.alert, .sound])
|
||||
isAuthorized = granted
|
||||
return granted
|
||||
} catch {
|
||||
logger.warning("Notification authorization failed: \(error.localizedDescription, privacy: .public)")
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/// First non-empty line, capped at ~140 chars so the notification
|
||||
/// surface stays readable on every macOS notification style.
|
||||
static func trimmedPreview(_ raw: String) -> String {
|
||||
let firstLine = raw
|
||||
.split(whereSeparator: \.isNewline)
|
||||
.first
|
||||
.map(String.init) ?? raw
|
||||
let trimmed = firstLine.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
if trimmed.count <= 140 { return trimmed }
|
||||
let prefix = trimmed.prefix(140).trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
return prefix + "…"
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user