From 717f7b93a5dfbde8516e5ad92ca172a3d53d22a0 Mon Sep 17 00:00:00 2001 From: Dave Hudson Date: Thu, 7 May 2026 13:54:31 +0100 Subject: [PATCH 1/2] =?UTF-8?q?feat(missions):=20slice=201=20=E2=80=94=20e?= =?UTF-8?q?nd-to-end=20spine=20with=20stub=20worker=20(#248)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replaces the label-gated `.sandcastle/main.ts` issue runner with a Droid-style Missions orchestrator backed by a local Convex deployment. This is the first of three vertical slices. The worker role in `run-agent.ts` is a stub that appends a marker to README.md and pushes a commit; reviewer/fixer/validator/replanner are explicitly not implemented yet (slices 2 and 3). The point of this slice is to land the spine — schema, scheduler, CLI, planner, status table, orchestrator state transitions, PR creation — without the additional uncertainty of real agent behaviour. What's in: - `apps/missions/`: new turborepo package hosting the Convex schema (missions, milestones, features, runs, events) plus typed mutations and queries. Runs `convex --local` for v1; commit `_generated/` so fresh clones typecheck without booting the dev server. `convex-test` covers the create/pause/resume/replan paths. - `.sandcastle/missions/`: 6 modules. `orchestrator.ts` (state machine + Convex client) and `scheduler.ts` (dep + path-conflict resolution) carry the behaviour; the rest is glue. `bun:test` coverage on schema, scheduler, status-view. - `.claude/skills/mission-plan/`: portable planner skill, invokable interactively (`/mission-plan`) or headlessly via the CLI shim. - `.sandcastle/main.ts`: replaced with a CLI dispatcher (`plan`/`run`/`status`/`pause`/`resume`). - `package.json`: `mission` script (was `sandcastle`); `convex:dev` script; root `convex` + `picomatch` deps; root `typescript` devDependency for the new sandcastle typecheck step. - Root CI now also typechecks `.sandcastle/` — previously only workspace packages were typechecked, so orchestrator regressions could ship. What's out (deleted): `eligibility.ts`, `issue.ts`, `github.ts`, `implement-prompt.md`, `implement-docs-prompt.md`, `review-prompt.md`. The Ready-column flow is gone. Verification: `bun run ci` clean (typecheck across 7 packages + tests + sandcastle typecheck + biome + no-suppressions). Smoke-tested plan/status/pause/resume against local Convex; `mission run` deferred to the manual smoke since it creates a real PR. --- .claude/skills/mission-plan/SKILL.md | 50 ++ .../skills/mission-plan/schema-reference.md | 80 +++ .sandcastle/.env.example | 16 +- .sandcastle/eligibility.test.ts | 113 ---- .sandcastle/eligibility.ts | 72 --- .sandcastle/github.test.ts | 170 ------ .sandcastle/github.ts | 174 ------- .sandcastle/implement-docs-prompt.md | 34 -- .sandcastle/implement-prompt.md | 85 --- .sandcastle/issue.test.ts | 40 -- .sandcastle/issue.ts | 33 -- .sandcastle/main.ts | 488 +++++++----------- .sandcastle/missions/orchestrator.ts | 261 ++++++++++ .sandcastle/missions/planner.ts | 131 +++++ .sandcastle/missions/run-agent.ts | 112 ++++ .sandcastle/missions/scheduler.test.ts | 122 +++++ .sandcastle/missions/scheduler.ts | 99 ++++ .sandcastle/missions/schema.test.ts | 98 ++++ .sandcastle/missions/schema.ts | 64 +++ .sandcastle/missions/status-view.test.ts | 99 ++++ .sandcastle/missions/status-view.ts | 160 ++++++ .sandcastle/review-prompt.md | 82 --- apps/missions/.gitignore | 2 + apps/missions/README.md | 39 ++ apps/missions/convex/_generated/api.d.ts | 57 ++ apps/missions/convex/_generated/api.js | 23 + .../missions/convex/_generated/dataModel.d.ts | 60 +++ apps/missions/convex/_generated/server.d.ts | 143 +++++ apps/missions/convex/_generated/server.js | 93 ++++ apps/missions/convex/events.ts | 31 ++ apps/missions/convex/features.test.ts | 94 ++++ apps/missions/convex/features.ts | 130 +++++ apps/missions/convex/milestones.ts | 32 ++ apps/missions/convex/missions.test.ts | 117 +++++ apps/missions/convex/missions.ts | 224 ++++++++ apps/missions/convex/runs.ts | 50 ++ apps/missions/convex/schema.ts | 100 ++++ apps/missions/convex/test-types.d.ts | 3 + apps/missions/package.json | 27 + apps/missions/src/index.ts | 2 + apps/missions/tsconfig.json | 16 + apps/missions/vitest.config.ts | 8 + biome.json | 4 +- bun.lock | 207 +++++++- package.json | 17 +- scripts/check-no-suppressions.ts | 1 + 46 files changed, 2909 insertions(+), 1154 deletions(-) create mode 100644 .claude/skills/mission-plan/SKILL.md create mode 100644 .claude/skills/mission-plan/schema-reference.md delete mode 100644 .sandcastle/eligibility.test.ts delete mode 100644 .sandcastle/eligibility.ts delete mode 100644 .sandcastle/github.test.ts delete mode 100644 .sandcastle/github.ts delete mode 100644 .sandcastle/implement-docs-prompt.md delete mode 100644 .sandcastle/implement-prompt.md delete mode 100644 .sandcastle/issue.test.ts delete mode 100644 .sandcastle/issue.ts create mode 100644 .sandcastle/missions/orchestrator.ts create mode 100644 .sandcastle/missions/planner.ts create mode 100644 .sandcastle/missions/run-agent.ts create mode 100644 .sandcastle/missions/scheduler.test.ts create mode 100644 .sandcastle/missions/scheduler.ts create mode 100644 .sandcastle/missions/schema.test.ts create mode 100644 .sandcastle/missions/schema.ts create mode 100644 .sandcastle/missions/status-view.test.ts create mode 100644 .sandcastle/missions/status-view.ts delete mode 100644 .sandcastle/review-prompt.md create mode 100644 apps/missions/.gitignore create mode 100644 apps/missions/README.md create mode 100644 apps/missions/convex/_generated/api.d.ts create mode 100644 apps/missions/convex/_generated/api.js create mode 100644 apps/missions/convex/_generated/dataModel.d.ts create mode 100644 apps/missions/convex/_generated/server.d.ts create mode 100644 apps/missions/convex/_generated/server.js create mode 100644 apps/missions/convex/events.ts create mode 100644 apps/missions/convex/features.test.ts create mode 100644 apps/missions/convex/features.ts create mode 100644 apps/missions/convex/milestones.ts create mode 100644 apps/missions/convex/missions.test.ts create mode 100644 apps/missions/convex/missions.ts create mode 100644 apps/missions/convex/runs.ts create mode 100644 apps/missions/convex/schema.ts create mode 100644 apps/missions/convex/test-types.d.ts create mode 100644 apps/missions/package.json create mode 100644 apps/missions/src/index.ts create mode 100644 apps/missions/tsconfig.json create mode 100644 apps/missions/vitest.config.ts diff --git a/.claude/skills/mission-plan/SKILL.md b/.claude/skills/mission-plan/SKILL.md new file mode 100644 index 0000000..ae77d5a --- /dev/null +++ b/.claude/skills/mission-plan/SKILL.md @@ -0,0 +1,50 @@ +--- +name: mission-plan +description: Turn an open-ended objective into a structured Missions plan (milestones + features + dependencies + path ownership) for the Contexture orchestrator. Use when the user wants to plan a multi-feature mission and have agents execute it. +--- + +You are turning a free-form objective into a structured `MissionPlan` that the orchestrator can execute. You will: + +1. **Grill the user** until the objective is unambiguous and properly scoped. +2. **Emit a JSON plan** matching the schema in [`schema-reference.md`](schema-reference.md). + +Do not skip step 1. A bad plan compounds — every feature in a mission is acted on by an autonomous agent. Five minutes of clarifying questions saves hours of wasted runs. + +## Step 1: Grill until unambiguous + +Ask, in order, whatever you don't already know from context: + +- **What outcome does this mission deliver?** Push for a single sentence. +- **What's done at the end?** Concrete acceptance criteria (e.g. "Convex schema emit works for the existing IR test fixtures and `bun run ci` is clean"). +- **Where does the work live?** Specific paths or globs (e.g. `packages/core/src/emit-convex/**`). This drives `pathsOwned` and lets the scheduler parallelise safely. +- **What's the natural ordering?** Identify dependencies between sub-tasks. If two features can be done in parallel, say so explicitly. +- **What's the right milestone boundary?** A milestone is a checkpoint where the integrated state must pass `bun run ci` and a validator agent's success criteria. Don't put unrelated work in the same milestone. +- **Which agent should do what?** Default `claude` for most coding work. Use `codex` if the user has a preference for a specific feature. +- **Skill references** — does any feature need particular knowledge (e.g. `frontend`, `backend`, `db`, `tests`)? Available skills are in `.sandcastle/missions/skills/`. + +Stop asking when you can write a plan that: + +- Has at least one milestone. +- Each milestone has at least one feature. +- Every feature has a non-trivial prompt that an agent could act on autonomously, owned paths, and explicit dependencies. +- Success criteria are testable (`bun run ci` clean, specific commands pass, explicit assertions). + +If the user pushes back on a question, take their answer and move on. Don't loop. + +## Step 2: Emit the plan + +Output the plan as a single JSON code block fenced with ` ```json `. Match the schema in [`schema-reference.md`](schema-reference.md) exactly — the orchestrator validates it with Zod and rejects anything malformed. + +After the code block, add a one-paragraph summary of what you've planned, why you grouped features into milestones the way you did, and any assumptions you made. + +If the user invoked you headlessly via `bun run mission plan ""`, the orchestrator will capture the JSON code block, validate it, and insert the mission into Convex. Do not write any files yourself. + +If the user invoked you interactively (`/mission-plan`), the user will save the JSON to a file and then run `bun run mission plan --apply ` to ingest it. + +## Conventions + +- `slug` fields: lowercase letters, digits, hyphens. Used in branch names (`mission//`). +- `pathsOwned`: glob patterns. Used for path-conflict detection — features whose owned paths overlap will be serialised, not run in parallel. Be precise: `packages/core/**` is too broad if only `packages/core/src/emit-convex/**` is touched. +- `dependencies`: feature slugs (within the same mission) that must finish before this one starts. +- `successCriteria`: concrete, testable statements. The validator agent at the milestone gate evaluates these. +- `validationPrompt`: instructions to the validator agent — what to run, what to look at, what counts as pass/fail. diff --git a/.claude/skills/mission-plan/schema-reference.md b/.claude/skills/mission-plan/schema-reference.md new file mode 100644 index 0000000..a4ece15 --- /dev/null +++ b/.claude/skills/mission-plan/schema-reference.md @@ -0,0 +1,80 @@ +# MissionPlan JSON schema + +The orchestrator validates emitted plans against [`.sandcastle/missions/schema.ts`](../../../.sandcastle/missions/schema.ts) (Zod). This document is the human-readable mirror — keep them in sync. + +## Top-level shape + +```json +{ + "slug": "convex-ir-2026-05", + "title": "Bring up Convex emit for the IR", + "objective": "Generate Convex schemas, mutations, and CRUD seeds from the existing IR fixtures.", + "milestones": [ /* at least one */ ] +} +``` + +| Field | Type | Notes | +|---|---|---| +| `slug` | string | lowercase letters, digits, hyphens; unique across all missions | +| `title` | string | human-readable | +| `objective` | string | one or two sentences | +| `milestones` | array | at least one milestone | + +## Milestone + +```json +{ + "slug": "schema-emit", + "title": "Schema emit", + "successCriteria": [ + "Convex schema.ts is emitted for every IR table", + "bun run typecheck passes in apps/missions" + ], + "validationPrompt": "Run bun run ci. Inspect packages/schema/convex/schema.ts and confirm every table from the IR is present.", + "features": [ /* at least one */ ] +} +``` + +| Field | Type | Notes | +|---|---|---| +| `slug` | string | unique within the mission | +| `title` | string | human-readable | +| `successCriteria` | array of strings | at least one; concrete and testable | +| `validationPrompt` | string | what the validator agent runs at the milestone gate | +| `features` | array | at least one | + +## Feature + +```json +{ + "slug": "emit-schema-ts", + "title": "Emit schema.ts from IR", + "prompt": "Add an emitter function in packages/core/src/emit-convex/schema.ts that walks the IR and produces convex/schema.ts. Use existing IR types from packages/core/src/ir.ts.", + "dependencies": [], + "pathsOwned": ["packages/core/src/emit-convex/**"], + "preferredAgent": "claude", + "skillRefs": ["backend"] +} +``` + +| Field | Type | Notes | +|---|---|---| +| `slug` | string | unique across all features in the mission | +| `title` | string | human-readable | +| `prompt` | string | the instruction the worker agent receives. Be specific. | +| `dependencies` | array of feature slugs | features that must finish before this starts. Defaults to `[]`. | +| `pathsOwned` | array of globs | files/dirs this feature is allowed to modify. Used for conflict detection. Defaults to `[]`. | +| `preferredAgent` | "claude" \| "codex" | defaults to "claude" | +| `skillRefs` | array of strings | references to `.sandcastle/missions/skills/.md`. Defaults to `[]`. | + +## Validation + +The orchestrator rejects plans where: + +- A `slug` doesn't match `/^[a-z0-9][a-z0-9-]*$/`. +- A milestone has zero features, or a mission has zero milestones. +- A feature's `dependencies` reference an unknown feature slug. +- Two features share the same slug. +- Required string fields are empty. + +When validation fails, the orchestrator re-prompts you with the validation error. Read it carefully and emit a corrected plan. Do not start over from scratch. diff --git a/.sandcastle/.env.example b/.sandcastle/.env.example index d0749cd..d41c869 100644 --- a/.sandcastle/.env.example +++ b/.sandcastle/.env.example @@ -1,5 +1,15 @@ -# Anthropic API key -# If you want to use your Claude subscription instead of an API key, see https://github.com/mattpocock/sandcastle/issues/191 +# Anthropic API key (only needed if not running the planner via Claude Code). +# The planner uses your Claude subscription via the `claude` CLI; workers use it +# inside Sandcastle containers (slice 2+). ANTHROPIC_API_KEY= -# GitHub personal access token + +# GitHub personal access token used for `gh pr create` from the orchestrator. GH_TOKEN= + +# Convex deployment URL. For local dev, copy from apps/missions/.env.local +# (CONVEX_URL line) after running `bun run convex:dev`. +CONVEX_URL= + +# Optional: Convex deploy/admin key. Required when talking to a cloud +# deployment; not needed for local dev. +CONVEX_DEPLOY_KEY= diff --git a/.sandcastle/eligibility.test.ts b/.sandcastle/eligibility.test.ts deleted file mode 100644 index 29ecdda..0000000 --- a/.sandcastle/eligibility.test.ts +++ /dev/null @@ -1,113 +0,0 @@ -import { describe, expect, test } from "bun:test"; -import { evaluate, pickEligible } from "./eligibility"; -import type { IssueSnapshot, OpenPRClosing } from "./github"; - -const cfg = { label: "Sandcastle" }; - -const snapshot = (overrides: Partial = {}): IssueSnapshot => ({ - number: 42, - title: "Fix auth bug", - state: "open", - labels: ["Sandcastle"], - ...overrides, -}); - -describe("evaluate", () => { - test("eligible for an open, labelled, unclaimed snapshot", () => { - expect(evaluate(snapshot(), [], cfg)).toEqual({ eligible: true }); - }); - - test("rejects a closed issue", () => { - expect(evaluate(snapshot({ state: "closed" }), [], cfg)).toEqual({ - eligible: false, - reason: { kind: "issueClosed" }, - }); - }); - - test("rejects when the tracker label is missing", () => { - expect(evaluate(snapshot({ labels: ["bug"] }), [], cfg)).toEqual({ - eligible: false, - reason: { kind: "missingLabel" }, - }); - }); - - test("rejects when an open PR claims the issue", () => { - const openPRs: OpenPRClosing[] = [{ pr: 200, closes: [42] }]; - expect(evaluate(snapshot(), openPRs, cfg)).toEqual({ - eligible: false, - reason: { kind: "claimedByPR", pr: 200 }, - }); - }); - - test("a PR closing a different issue does not exclude #42", () => { - const openPRs: OpenPRClosing[] = [{ pr: 200, closes: [99] }]; - expect(evaluate(snapshot(), openPRs, cfg)).toEqual({ eligible: true }); - }); - - test("closed-state precedes label-missing when both are true (deterministic order)", () => { - expect(evaluate(snapshot({ state: "closed", labels: [] }), [], cfg)).toEqual({ - eligible: false, - reason: { kind: "issueClosed" }, - }); - }); - - test("label-missing precedes claimed-by-PR when both are true", () => { - const openPRs: OpenPRClosing[] = [{ pr: 200, closes: [42] }]; - expect(evaluate(snapshot({ labels: [] }), openPRs, cfg)).toEqual({ - eligible: false, - reason: { kind: "missingLabel" }, - }); - }); -}); - -describe("pickEligible", () => { - test("returns a single eligible issue", () => { - const result = pickEligible([snapshot()], [], cfg); - expect(result.eligible).toHaveLength(1); - expect(result.eligible[0]?.branch).toBe("sandcastle/issue-42-fix-auth-bug"); - expect(result.excluded).toEqual([]); - }); - - test("preserves input snapshot order so callers control selection ordering", () => { - const snapshots = [ - snapshot({ number: 5, title: "fifth" }), - snapshot({ number: 1, title: "first" }), - snapshot({ number: 3, title: "third" }), - ]; - const result = pickEligible(snapshots, [], cfg); - expect(result.eligible.map((i) => i.number)).toEqual([5, 1, 3]); - }); - - test("excludes issues missing the tracker label", () => { - const result = pickEligible([snapshot({ number: 99, labels: ["bug"] })], [], cfg); - expect(result.eligible).toEqual([]); - expect(result.excluded).toEqual([{ number: 99, reason: { kind: "missingLabel" } }]); - }); - - test("excludes issues claimed by an open PR via Closes #N", () => { - const openPRs: OpenPRClosing[] = [{ pr: 200, closes: [42] }]; - const result = pickEligible([snapshot()], openPRs, cfg); - expect(result.eligible).toEqual([]); - expect(result.excluded).toEqual([ - { number: 42, reason: { kind: "claimedByPR", pr: 200 } }, - ]); - }); - - test("mix of eligible and excluded", () => { - const snapshots = [ - snapshot({ number: 1, title: "kept" }), - snapshot({ number: 2, title: "no-label", labels: ["bug"] }), - snapshot({ number: 3, title: "claimed" }), - ]; - const openPRs: OpenPRClosing[] = [{ pr: 50, closes: [3] }]; - const result = pickEligible(snapshots, openPRs, cfg); - expect(result.eligible.map((i) => i.number)).toEqual([1]); - expect(result.excluded).toHaveLength(2); - }); - - test("returned issues are validated against the issue.ts Issue schema (branch matches regex)", () => { - const result = pickEligible([snapshot({ title: "Test it" })], [], cfg); - const branch = result.eligible[0]?.branch ?? ""; - expect(branch).toMatch(/^sandcastle\/issue-\d+-[a-z0-9._-]+$/); - }); -}); diff --git a/.sandcastle/eligibility.ts b/.sandcastle/eligibility.ts deleted file mode 100644 index 87a79ab..0000000 --- a/.sandcastle/eligibility.ts +++ /dev/null @@ -1,72 +0,0 @@ -import type { IssueSnapshot, OpenPRClosing } from "./github"; -import { Issue, makeBranch } from "./issue"; -import type { Issue as IssueT } from "./issue"; - -export type EligibilityConfig = { label: string }; - -export type ExclusionReason = - | { kind: "issueClosed" } - | { kind: "missingLabel" } - | { kind: "claimedByPR"; pr: number }; - -export type Verdict = - | { eligible: true } - | { eligible: false; reason: ExclusionReason }; - -// Single eligibility predicate, used at iteration-start (over each candidate -// snapshot) and at per-issue dispatch (over the live snapshot). Checks run in -// a fixed order so the reason returned is stable across call sites. -export function evaluate( - snapshot: IssueSnapshot, - openPRs: OpenPRClosing[], - config: EligibilityConfig, -): Verdict { - if (snapshot.state === "closed") { - return { eligible: false, reason: { kind: "issueClosed" } }; - } - if (!snapshot.labels.includes(config.label)) { - return { eligible: false, reason: { kind: "missingLabel" } }; - } - const claimingPR = openPRs.find((p) => p.closes.includes(snapshot.number))?.pr; - if (claimingPR !== undefined) { - return { eligible: false, reason: { kind: "claimedByPR", pr: claimingPR } }; - } - return { eligible: true }; -} - -export type EligibilityResult = { - eligible: IssueT[]; - excluded: Array<{ number: number; reason: ExclusionReason }>; -}; - -// Iteration-start partition: applies `evaluate` to every snapshot, mints -// branch names for survivors, validates them through issue.ts's Issue -// schema. Eligible issues are returned in the same order as the input -// snapshots — callers control ordering by passing snapshots in the order -// they want them processed (e.g. project board drag-order). -export function pickEligible( - snapshots: IssueSnapshot[], - openPRs: OpenPRClosing[], - config: EligibilityConfig, -): EligibilityResult { - const excluded: EligibilityResult["excluded"] = []; - const eligible: IssueT[] = []; - - for (const snap of snapshots) { - const verdict = evaluate(snap, openPRs, config); - if (!verdict.eligible) { - excluded.push({ number: snap.number, reason: verdict.reason }); - continue; - } - eligible.push( - Issue.parse({ - number: snap.number, - title: snap.title, - branch: makeBranch(snap.number, snap.title), - labels: snap.labels, - }), - ); - } - - return { eligible, excluded }; -} diff --git a/.sandcastle/github.test.ts b/.sandcastle/github.test.ts deleted file mode 100644 index 5c490e9..0000000 --- a/.sandcastle/github.test.ts +++ /dev/null @@ -1,170 +0,0 @@ -import { describe, expect, test } from "bun:test"; -import { - fetchIssueLiveState, - fetchOpenLabelledIssues, - fetchOpenPRsClosingIssues, - fetchProjectReadyIssues, - type RunGh, -} from "./github"; - -const fakeRunGh = (output: string): RunGh => { - return async () => output; -}; - -const capturingRunGh = (output: string): { runGh: RunGh; calls: string[][] } => { - const calls: string[][] = []; - const runGh: RunGh = async (args) => { - calls.push(args); - return output; - }; - return { runGh, calls }; -}; - -describe("fetchOpenLabelledIssues", () => { - test("normalises uppercase state and flattens labels", async () => { - const raw = JSON.stringify([ - { number: 1, title: "First", state: "OPEN", labels: [{ name: "Sandcastle" }] }, - { number: 2, title: "Second", state: "open", labels: [] }, - ]); - const result = await fetchOpenLabelledIssues("Sandcastle", fakeRunGh(raw)); - expect(result).toEqual([ - { number: 1, title: "First", state: "open", labels: ["Sandcastle"] }, - { number: 2, title: "Second", state: "open", labels: [] }, - ]); - }); - - test("passes label through to gh args", async () => { - const { runGh, calls } = capturingRunGh("[]"); - await fetchOpenLabelledIssues("Sandcastle", runGh); - expect(calls).toEqual([ - ["issue", "list", "--state", "open", "--label", "Sandcastle", "--json", "number,title,state,labels"], - ]); - }); - - test("rejects malformed gh output", async () => { - const raw = JSON.stringify([{ number: -1, title: "x", state: "open", labels: [] }]); - await expect(fetchOpenLabelledIssues("Sandcastle", fakeRunGh(raw))).rejects.toThrow(); - }); -}); - -describe("fetchOpenPRsClosingIssues", () => { - test("extracts Closes / Fixes / Resolves with optional past tense, case-insensitive", async () => { - const raw = JSON.stringify([ - { number: 100, body: "This PR Closes #1, fixes #2, resolved #3, and FIXED #4. Also closes #5." }, - ]); - const [entry] = await fetchOpenPRsClosingIssues(fakeRunGh(raw)); - expect(entry?.pr).toBe(100); - expect(entry?.closes.sort((a, b) => a - b)).toEqual([1, 2, 3, 4, 5]); - }); - - test("returns an empty closes array for a null body", async () => { - const raw = JSON.stringify([{ number: 101, body: null }]); - expect(await fetchOpenPRsClosingIssues(fakeRunGh(raw))).toEqual([{ pr: 101, closes: [] }]); - }); - - test("dedupes repeated mentions", async () => { - const raw = JSON.stringify([{ number: 102, body: "Closes #7. Fixes #7. Resolves #7." }]); - expect(await fetchOpenPRsClosingIssues(fakeRunGh(raw))).toEqual([{ pr: 102, closes: [7] }]); - }); - - test("ignores numbers not preceded by a closing keyword", async () => { - const raw = JSON.stringify([{ number: 103, body: "See #42 for context. Closes #43." }]); - expect(await fetchOpenPRsClosingIssues(fakeRunGh(raw))).toEqual([{ pr: 103, closes: [43] }]); - }); - - test("rejects non-string non-null body", async () => { - const raw = JSON.stringify([{ number: 104, body: 42 }]); - await expect(fetchOpenPRsClosingIssues(fakeRunGh(raw))).rejects.toThrow(); - }); -}); - -describe("fetchProjectReadyIssues", () => { - const buildItem = (overrides: { - status?: string; - labels?: string[]; - repo?: string; - type?: string; - number?: number; - title?: string; - }) => ({ - status: overrides.status ?? "Ready", - labels: overrides.labels ?? ["Sandcastle"], - content: { - type: overrides.type ?? "Issue", - number: overrides.number ?? 1, - title: overrides.title ?? "Title", - repository: overrides.repo ?? "applification/contexture", - }, - }); - - test("filters by status, repo, label, and type=Issue and preserves board order", async () => { - const raw = JSON.stringify({ - items: [ - buildItem({ number: 237, title: "Top of Ready" }), - buildItem({ number: 99, title: "Wrong column", status: "Backlog" }), - buildItem({ number: 50, title: "Wrong repo", repo: "applification/other" }), - buildItem({ number: 60, title: "Missing label", labels: ["enhancement"] }), - buildItem({ number: 70, title: "Draft item", type: "DraftIssue" }), - buildItem({ number: 233, title: "Second of Ready" }), - ], - }); - const result = await fetchProjectReadyIssues( - "applification", - 1, - "applification/contexture", - "Sandcastle", - fakeRunGh(raw), - ); - expect(result.map((i) => i.number)).toEqual([237, 233]); - expect(result[0]).toEqual({ - number: 237, - title: "Top of Ready", - state: "open", - labels: ["Sandcastle"], - }); - }); - - test("passes owner/number/limit through to gh args", async () => { - const { runGh, calls } = capturingRunGh(JSON.stringify({ items: [] })); - await fetchProjectReadyIssues("applification", 1, "applification/contexture", "Sandcastle", runGh); - expect(calls).toEqual([ - ["project", "item-list", "1", "--owner", "applification", "--format", "json", "--limit", "200"], - ]); - }); - - test("rejects malformed gh output", async () => { - const raw = JSON.stringify({ items: [{ status: "Ready", labels: [], content: { type: "Issue" } }] }); - await expect( - fetchProjectReadyIssues("applification", 1, "applification/contexture", "Sandcastle", fakeRunGh(raw)), - ).rejects.toThrow(); - }); -}); - -describe("fetchIssueLiveState", () => { - test("normalises uppercase CLOSED to lowercase", async () => { - const raw = JSON.stringify({ - number: 7, - title: "x", - state: "CLOSED", - labels: [{ name: "Sandcastle" }], - }); - expect(await fetchIssueLiveState(7, fakeRunGh(raw))).toEqual({ - number: 7, - title: "x", - state: "closed", - labels: ["Sandcastle"], - }); - }); - - test("rejects unknown state values", async () => { - const raw = JSON.stringify({ number: 7, title: "x", state: "merged", labels: [] }); - await expect(fetchIssueLiveState(7, fakeRunGh(raw))).rejects.toThrow(); - }); - - test("passes issue number through to gh args", async () => { - const raw = JSON.stringify({ number: 7, title: "x", state: "open", labels: [] }); - const { runGh, calls } = capturingRunGh(raw); - await fetchIssueLiveState(7, runGh); - expect(calls).toEqual([["issue", "view", "7", "--json", "number,title,state,labels"]]); - }); -}); diff --git a/.sandcastle/github.ts b/.sandcastle/github.ts deleted file mode 100644 index 88799a0..0000000 --- a/.sandcastle/github.ts +++ /dev/null @@ -1,174 +0,0 @@ -import { z } from "zod"; - -// Adapter to the `gh` CLI. Spawns the binary, validates its JSON output, and -// surfaces typed snapshots to the orchestrator. Schemas and the closing-keyword -// regex are file-private; callers see only fetchers and inferred types. -// -// Each fetcher takes a `runGh` seam as its last argument, defaulting to a real -// `gh` subprocess. Tests pass a fake `runGh` that returns canned JSON strings — -// the same surface a caller exercises in production. - -export type RunGh = (args: string[]) => Promise; - -const defaultRunGh: RunGh = async (args) => { - const proc = Bun.spawn(["gh", ...args], { stdout: "pipe", stderr: "pipe" }); - const [stdout, stderr] = await Promise.all([ - new Response(proc.stdout).text(), - new Response(proc.stderr).text(), - ]); - await proc.exited; - if (proc.exitCode !== 0) { - throw new Error(`gh ${args.join(" ")} exited ${proc.exitCode}: ${stderr.trim()}`); - } - return stdout; -}; - -const IssueSnapshotSchema = z.object({ - number: z.number().int().positive(), - title: z.string(), - state: z - .enum(["OPEN", "CLOSED", "open", "closed"]) - .transform((s) => s.toLowerCase() as "open" | "closed"), - labels: z.array(z.object({ name: z.string() })).transform((ls) => ls.map((l) => l.name)), -}); -const IssueListSchema = z.array(IssueSnapshotSchema); - -// Project items expose the underlying issue under `content` and lift the -// kanban Status column to a top-level `status` string. `repository` is the -// owner/name slug — we filter on it because the project can hold items from -// multiple repos, and Sandcastle only ever wants this repo's issues. `number` -// and `repository` are absent for `DraftIssue` items (project-only cards -// not yet promoted to a real issue), so the schema tolerates that and we -// filter draft items out by `content.type === "Issue"`. -const ProjectItemSchema = z.object({ - status: z.string().optional(), - labels: z.array(z.string()).optional().default([]), - content: z.object({ - type: z.string(), - number: z.number().int().positive().optional(), - title: z.string(), - repository: z.string().optional(), - }), -}); -const ProjectItemListSchema = z.object({ items: z.array(ProjectItemSchema) }); - -const PRBodyEntry = z.object({ - number: z.number().int().positive(), - body: z.string().nullable(), -}); -const PRListSchema = z.array(PRBodyEntry); - -// PRs declare which issues they close via `Closes #N` / `Fixes #N` / -// `Resolves #N` (case-insensitive, optional past tense). -const CLOSES_PATTERN = /(?:close[sd]?|fix(?:e[sd])?|resolve[sd]?)\s+#(\d+)/gi; - -export type IssueSnapshot = z.infer; -export type OpenPRClosing = { pr: number; closes: number[] }; - -function extractClosingNumbers(body: string | null): number[] { - if (body === null) return []; - const numbers = new Set(); - for (const match of body.matchAll(CLOSES_PATTERN)) { - const n = Number.parseInt(match[1] ?? "", 10); - if (Number.isInteger(n) && n > 0) numbers.add(n); - } - return [...numbers]; -} - -async function ghJson(runGh: RunGh, args: string[]): Promise { - return JSON.parse(await runGh(args)); -} - -// Fetch open issues carrying the harness's tracker label. -export async function fetchOpenLabelledIssues( - label: string, - runGh: RunGh = defaultRunGh, -): Promise { - const raw = await ghJson(runGh, [ - "issue", - "list", - "--state", - "open", - "--label", - label, - "--json", - "number,title,state,labels", - ]); - return IssueListSchema.parse(raw); -} - -// Fetch issues sitting in the project's `Ready` column for the given repo, -// preserving the board's drag-order. The orchestrator uses this in place of -// `fetchOpenLabelledIssues` so the user's kanban order drives selection. We -// synthesise `state: "open"` because Ready items are by definition open — -// closing an issue moves it out of Ready automatically. -export async function fetchProjectReadyIssues( - owner: string, - projectNumber: number, - repo: string, - label: string, - runGh: RunGh = defaultRunGh, -): Promise { - const raw = await ghJson(runGh, [ - "project", - "item-list", - String(projectNumber), - "--owner", - owner, - "--format", - "json", - "--limit", - "200", - ]); - const { items } = ProjectItemListSchema.parse(raw); - const out: IssueSnapshot[] = []; - for (const item of items) { - // Filter to Ready issues for this repo with the tracker label. DraftIssue - // items lack `content.number` and `content.repository`, so the type guard - // also narrows the optionals to defined values for the push below. - if ( - item.status !== "Ready" || - item.content.type !== "Issue" || - item.content.repository !== repo || - item.content.number === undefined || - !item.labels.includes(label) - ) { - continue; - } - out.push({ - number: item.content.number, - title: item.content.title, - state: "open", - labels: item.labels, - }); - } - return out; -} - -// Fetch every open PR and extract the issue numbers each one closes via its -// body. We don't filter PRs by tracker label — a PR opened against any -// Sandcastle issue still claims that issue. -export async function fetchOpenPRsClosingIssues( - runGh: RunGh = defaultRunGh, -): Promise { - const raw = await ghJson(runGh, ["pr", "list", "--state", "open", "--json", "number,body"]); - return PRListSchema.parse(raw).map((pr) => ({ - pr: pr.number, - closes: extractClosingNumbers(pr.body), - })); -} - -// Single-issue live state probe used by reconciliation. -export async function fetchIssueLiveState( - issueNumber: number, - runGh: RunGh = defaultRunGh, -): Promise { - const raw = await ghJson(runGh, [ - "issue", - "view", - String(issueNumber), - "--json", - "number,title,state,labels", - ]); - return IssueSnapshotSchema.parse(raw); -} diff --git a/.sandcastle/implement-docs-prompt.md b/.sandcastle/implement-docs-prompt.md deleted file mode 100644 index 9bba095..0000000 --- a/.sandcastle/implement-docs-prompt.md +++ /dev/null @@ -1,34 +0,0 @@ -# TASK - -Resolve documentation issue #{{ISSUE_NUMBER}}: {{ISSUE_TITLE}}. - -Pull in the issue using `gh issue view {{ISSUE_NUMBER}} --json number,title,body,labels,comments,state,author`. If it has a parent PRD, pull that in the same way. - -Only work on the issue specified. - -Work on branch {{BRANCH}}. This is a documentation-only change — markdown files only. Do not touch source code. - -# EXECUTION - -1. Read the relevant docs and the issue. -2. Make the documentation edits. -3. Re-read what you wrote — does it actually answer the issue? Is anything inaccurate compared to current code? -4. No tests, no `bun run ci` — pure docs. - -# COMMIT - -Make a git commit. The commit message must: - -1. Reference the issue (e.g. `Docs: Update README sandcastle section (issue #{{ISSUE_NUMBER}})`) - -Keep it concise. - -# THE ISSUE - -If the task is not complete, leave a comment on the GitHub issue with what was done. Do not close the issue — the merged PR will close it automatically. - -Once complete, output COMPLETE. - -# FINAL RULES - -ONLY WORK ON A SINGLE TASK. ONLY EDIT DOCUMENTATION (`.md` files). diff --git a/.sandcastle/implement-prompt.md b/.sandcastle/implement-prompt.md deleted file mode 100644 index acd4d6a..0000000 --- a/.sandcastle/implement-prompt.md +++ /dev/null @@ -1,85 +0,0 @@ -# TOOLS - -**Never use Bash for filesystem operations that have a dedicated tool.** Each Bash call costs ~30–40s of overhead in this sandbox; the dedicated tools are an order of magnitude faster and return structured results. This rule is not a suggestion — measured runs spend most of their wasted time here. - -| If you want to… | Use… | NOT Bash with… | -| --------------------------- | ---------------- | -------------------------------- | -| Read a file (whole or part) | `Read` | `cat` / `head` / `tail` / `less` | -| Find files by name or glob | `Glob` | `find` / `ls` / `fd` | -| Search file contents | `Grep` | `grep` / `rg` / `ag` | -| Edit a file | `Edit` / `Write` | `sed` / `awk` / `tee` / heredocs | - -Bash is for: running tests, git, gh, package managers, build commands. Nothing else. - -**Orientation.** To learn the tree layout, read `README.md` once — it lists every app and package. For anything it doesn't cover, run a single `Glob "**/*"` (or your harness's equivalent). One call gives you the whole tree instantly. - -# TASK - -Fix issue #{{ISSUE_NUMBER}}: {{ISSUE_TITLE}} - -Pull in the issue using `gh issue view {{ISSUE_NUMBER}} --json number,title,body,labels,comments,state,author`. If it has a parent PRD, pull that in the same way. (The `--json` form avoids a GitHub `projectItems` permission error on tokens without project scope.) - -Only work on the issue specified. - -Work on branch {{BRANCH}}. Make commits and run tests. Do NOT push the branch, open a PR, or close the issue — a separate agent handles PR creation, and the merged PR closes the issue automatically. - -# EXPLORATION - -Read only what you need to make this specific change. Do not crawl the whole repo. - -1. Start from the issue body — it usually names the files or modules involved. - - **If the change is isolated to 1–3 named files**, read them directly with `Read`. - - **If the issue spans multiple modules / layers, or you don't yet know which files to touch**, dispatch a single `Agent` call with `subagent_type: "Explore"` to map the relevant code in one round trip. Do not do 10+ narrow searches to discover the same thing — that wastes context. -2. Use `Grep` to find direct callers and tests of the symbols you'll change. Read the closest tests. -3. Stop exploring once you can describe the change you're about to make. If you find yourself reading unrelated code "for context", that's a signal to stop. - -Avoid: opening every file in a directory, reading framework/config files unrelated to the change, fetching docs you can already see in the source. - -# EXECUTION - -If applicable, use RGR to complete the task. - -1. RED: write one test -2. GREEN: write the implementation to pass that test -3. REPEAT until done -4. REFACTOR the code - -# FEEDBACK LOOPS - -Run `bun run ci` to ensure typecheck, tests, and lint all pass. This is mandatory — do not commit if it fails. If `bun run ci` fails before you have made any changes, the sandbox itself is broken (e.g. corrupt `node_modules`); stop and report the failure rather than committing. - -When a lint/typecheck/test command fails, read the **full error output** before re-running. Do not pipe to `head` / `tail` / narrow `grep` on the first failure — you will miss the actual error and re-run unnecessarily. Once you have the error, fix it and verify with one re-run. - -**Never run the same command twice with identical args.** If a command appears to have failed, look harder at the output you already have — the answer is in the previous result, not in re-running. Re-running a slow command (`bun run ci` takes ~40s; `gh pr create` is irreversible) wastes time at best and double-submits at worst. - -# COMMIT - -Make a git commit. The commit message must: - -1. Start with `IMPLEMENT:` prefix -2. Include task completed + PRD reference -3. Key decisions made -4. Files changed -5. Blockers or notes for next iteration - -Keep it concise. - -# THE ISSUE - -If the task is not complete, leave a comment on the GitHub issue with what was done. - -Do not close the issue — the merged PR will close it automatically via `Closes #N`. - -# FINAL OUTPUT - -When the commit is made and `bun run ci` passed, your final message must be exactly: - - COMPLETE - -No summary, no recap, no file list, no "here's what I did" — the commit message already contains all of that. Any text outside the sentinel risks tripping the harness's failure-tone detector and forcing a re-run. - -If you cannot complete the task, instead output a single line beginning with `BLOCKED:` followed by one sentence, then stop. - -# FINAL RULES - -ONLY WORK ON A SINGLE TASK. diff --git a/.sandcastle/issue.test.ts b/.sandcastle/issue.test.ts deleted file mode 100644 index 3d50dec..0000000 --- a/.sandcastle/issue.test.ts +++ /dev/null @@ -1,40 +0,0 @@ -import { describe, expect, test } from "bun:test"; -import { Issue, makeBranch } from "./issue"; - -describe("makeBranch", () => { - test("derives a kebab slug from a normal title", () => { - expect(makeBranch(42, "Fix auth bug")).toBe("sandcastle/issue-42-fix-auth-bug"); - }); - - test("is idempotent — re-slugifying an already-slug title produces the same branch", () => { - const slug = "fix-auth-bug"; - expect(makeBranch(7, slug)).toBe(`sandcastle/issue-7-${slug}`); - expect(makeBranch(7, slug)).toBe(makeBranch(7, slug)); - }); - - test("strips punctuation and collapses repeated separators", () => { - expect(makeBranch(1, "Hello, world!! again")).toBe("sandcastle/issue-1-hello-world-again"); - }); - - test("falls back to 'untitled' when slugify returns empty", () => { - expect(makeBranch(9, "🚀🎉")).toBe("sandcastle/issue-9-untitled"); - }); - - test("truncates very long titles so the branch stays under the 200 cap", () => { - const long = "word ".repeat(200); - const branch = makeBranch(123, long); - expect(branch.length).toBeLessThanOrEqual(200); - expect(branch.startsWith("sandcastle/issue-123-")).toBe(true); - }); - - test("handles unicode by transliterating to ascii where slugify supports it", () => { - const branch = makeBranch(5, "Café résumé"); - expect(branch).toBe("sandcastle/issue-5-cafe-resume"); - }); - - test("emits branches that match the schema's BRANCH_REGEX", () => { - const branch = makeBranch(1, "Whatever it is"); - const ok = Issue.parse({ number: 1, title: "Whatever it is", branch, labels: [] }); - expect(ok.branch).toBe(branch); - }); -}); diff --git a/.sandcastle/issue.ts b/.sandcastle/issue.ts deleted file mode 100644 index 31f01fe..0000000 --- a/.sandcastle/issue.ts +++ /dev/null @@ -1,33 +0,0 @@ -import slugify from "@sindresorhus/slugify"; -import { z } from "zod"; - -// Branch contract: `sandcastle/issue-{number}-{slug}` where slug is lowercase -// alphanumeric with `.`, `_`, `-` separators, and the whole branch is at most -// 200 chars. The regex and the minter live next to each other so the format -// has one source of truth. -const BRANCH_REGEX = /^sandcastle\/issue-\d+-[a-z0-9._-]+$/; - -// Slug cap accounts for the fixed prefix `sandcastle/issue-{N}-` (~26 chars -// for issue numbers under 10^7), leaving comfortable headroom under the -// 200-char branch cap. -const MAX_SLUG_LENGTH = 160; - -// Deterministic, idempotent issue → branch derivation. Uses -// @sindresorhus/slugify (handles Unicode + repeated separators) then -// truncates the slug. -export function makeBranch(issueNumber: number, title: string): string { - const rawSlug = slugify(title, { separator: "-", lowercase: true }); - // Slugify can return an empty string for titles with no slug-able content - // (e.g. only emoji). Fall back to a stable placeholder so the branch is - // still valid against the regex (`[a-z0-9._-]+` requires at least one char). - const slug = (rawSlug.length === 0 ? "untitled" : rawSlug).slice(0, MAX_SLUG_LENGTH); - return `sandcastle/issue-${issueNumber}-${slug}`; -} - -export const Issue = z.object({ - number: z.number().int().positive(), - title: z.string().min(1), - branch: z.string().regex(BRANCH_REGEX).max(200), - labels: z.array(z.string()), -}); -export type Issue = z.infer; diff --git a/.sandcastle/main.ts b/.sandcastle/main.ts index ce22490..2e2e124 100644 --- a/.sandcastle/main.ts +++ b/.sandcastle/main.ts @@ -1,341 +1,201 @@ -import * as sandcastle from "@ai-hero/sandcastle"; -import { docker } from "@ai-hero/sandcastle/sandboxes/docker"; -import { evaluate, pickEligible } from "./eligibility"; -import type { ExclusionReason } from "./eligibility"; -import { fetchIssueLiveState, fetchOpenPRsClosingIssues, fetchProjectReadyIssues } from "./github"; -import { enforcementFor } from "./enforcement"; -import { agent, emitPhaseOutcome, emitRunStart, emitUsageFromRun, streamLogger } from "./harness"; -import type { AgentSpec } from "./harness"; -import type { Issue } from "./issue"; - -// ---------- Tracker conventions ---------- - -// GitHub label used to opt issues into the Sandcastle workflow. -const LABEL = "Sandcastle"; - -// Source of truth for issue selection: the Contexture project board's `Ready` -// column, scoped to this repo. The board's drag-order drives what Sandcastle -// picks first — see `fetchProjectReadyIssues`. Requires the gh token to carry -// the `read:project` scope (`gh auth refresh -s read:project`). -const PROJECT_OWNER = "applification"; -const PROJECT_NUMBER = 1; -const PROJECT_REPO = "applification/contexture"; - -// ---------- Orchestrator limits ---------- - -// Each iteration drains a parallel batch of up to MAX_PARALLEL issues. We -// take the top N from the project board's Ready column (no LLM planner) — -// merge conflicts on overlapping PRs are a normal git outcome, not worth an -// LLM round-trip to predict. MAX_ITERATIONS is a safety cap; most runs exit -// early when Ready is drained. -const MAX_ITERATIONS = 5; -const MAX_PARALLEL = 2; - -// ---------- Sandbox setup ---------- - -// Skip host->worktree copy: this monorepo's node_modules is ~3.5GB and blows -// past sandcastle's hard-coded 60s copy timeout. The implementer sandbox runs -// `bun install` inside the container instead. Env files are gitignored, so -// copy them in explicitly. -const COPY_TO_WORKTREE: readonly string[] = ["apps/desktop/.env", "apps/web/.env.local"]; - -// Verify the install actually produced a usable workspace. `bun install` exits -// 0 even when individual extractions are mangled, so we follow with `turbo -// typecheck`, which resolves and loads imports across every workspace and -// fails loudly if a package's main entry is missing. -const INSTALL_AND_VERIFY = "bun install && bun run typecheck"; - -// ---------- Agent specs ---------- - -// Each agent is keyed by purpose. Effort levels reflect Sandcastle's intended -// workload: simple bug fixes and minor tweaks. Anything complex is handled -// HITL in Claude Code, not here — so we don't pay for high-effort thinking -// on routine work. Reviewer is the exception: edge-case stress-testing -// genuinely benefits from extended thinking, and it's the last gate before a -// human review. -// -// PR creation is not an agent — it's a host-side `git push` + `gh pr create -// --fill` after the sandbox sync. Inside the container the agent has no -// credentials for the remote, and an LLM round-trip to write a PR body -// duplicates work the conventional commits already do. -const AGENTS = { - implementer: { - provider: "claudeCode", - model: "claude-sonnet-4-6", - effort: "medium", - promptPath: "./.sandcastle/implement-prompt.md", - }, - implementerDocs: { - provider: "claudeCode", - model: "claude-haiku-4-5-20251001", - effort: "low", - promptPath: "./.sandcastle/implement-docs-prompt.md", - }, - reviewer: { - provider: "claudeCode", - model: "claude-sonnet-4-6", - effort: "high", - promptPath: "./.sandcastle/review-prompt.md", - }, -} as const satisfies Record; - -// ---------- Sandbox provider ---------- - -// Each sandbox gets its own bun cache. Sharing ~/.bun/install/cache across -// parallel sandboxes races on tarball extraction and silently produces broken -// installs (e.g. a package with package.json pointing at a build/ output that -// was never written). Cold installs are slower; broken node_modules are worse. -const sandboxProvider = docker({}); - -function describeExclusion(reason: ExclusionReason): string { - switch (reason.kind) { - case "issueClosed": - return "issue closed"; - case "missingLabel": - return `${LABEL} label missing`; - case "claimedByPR": - return `claimed by PR #${reason.pr}`; +import { api } from "@contexture/missions"; +import { Orchestrator } from "./missions/orchestrator"; +import { planMission, readPlanFromFile } from "./missions/planner"; +import { renderStatus } from "./missions/status-view"; + +const HELP = `Usage: bun run mission [args] + +Commands: + plan "" Run the mission-plan skill, validate, insert into Convex. + plan --apply Read a previously-saved plan JSON and insert into Convex. + run [--mission ] Execute runnable features for the given (or single active) mission. + status [--mission ] Render a coloured status table from Convex. + pause Mark a mission as paused. + resume Mark a paused mission as running. + replan Run the replanner agent (slice 3, not yet implemented). + +Environment: + CONVEX_URL URL of the Convex deployment (e.g. http://127.0.0.1:3210) + CONVEX_DEPLOY_KEY Optional. Admin auth for the deployment. + +Run 'bun run convex:dev' in another terminal to start the local Convex deployment.`; + +function readEnv(name: string, fallback?: string): string { + const v = process.env[name] ?? fallback; + if (!v) { + console.error(`Missing required environment variable: ${name}`); + console.error( + " Set it in .sandcastle/.env or your shell. For local dev, copy CONVEX_URL from apps/missions/.env.local.", + ); + process.exit(1); } + return v; } -const isDocsOnly = (issue: Issue) => - issue.labels.includes("documentation") && !issue.labels.some((l) => l !== "documentation" && l !== LABEL); +function buildOrchestrator(): Orchestrator { + const url = readEnv("CONVEX_URL"); + const deployKey = process.env.CONVEX_DEPLOY_KEY; + return new Orchestrator({ url, api, deployKey }); +} -async function gitOutput(args: string[], cwd: string): Promise { - const proc = Bun.spawn(["git", ...args], { cwd, stdout: "pipe", stderr: "pipe" }); - const out = await new Response(proc.stdout).text(); - await proc.exited; - return out; +function parseFlag(args: string[], name: string): string | undefined { + const idx = args.indexOf(name); + if (idx === -1 || idx === args.length - 1) return undefined; + return args[idx + 1]; } -// Run a command and surface stderr if it fails. Used for the host-side -// `git push` and `gh pr create` after the sandbox finishes — we want a -// clear failure mode, not a silent skip. -async function runOrFail(cmd: string[], cwd: string): Promise { - const proc = Bun.spawn(cmd, { cwd, stdout: "pipe", stderr: "pipe" }); - const [stdout, stderr] = await Promise.all([ - new Response(proc.stdout).text(), - new Response(proc.stderr).text(), - ]); - const code = await proc.exited; - if (code !== 0) { - throw new Error(`${cmd.join(" ")} failed (exit ${code}): ${stderr.trim() || stdout.trim()}`); +async function singleActiveMissionSlug(orch: Orchestrator): Promise { + const flag = parseFlag(process.argv, "--mission"); + if (flag) return flag; + const list = await orch.listMissions(); + const active = list.filter((m) => m.status !== "done" && m.status !== "failed"); + if (active.length === 0) { + console.error("No active missions. Run 'bun run mission plan \"\"' first."); + process.exit(1); + } + if (active.length > 1) { + console.error( + `Multiple active missions (${active.map((m) => m.slug).join(", ")}). Use --mission .`, + ); + process.exit(1); } - return stdout; + return active[0].slug; } -// Push the branch and open a PR from the host. Sandcastle has already -// synced the sandbox commits to the host's branch by the time we get here, -// so we use the host's existing git/gh credentials — no in-container -// credential plumbing needed. -// -// Title comes from the first commit (via `--fill-first`); body lists all -// commit subjects on the branch plus `Closes #N` so merging auto-closes -// the issue. We construct the body explicitly because passing `--body` -// alongside `--fill` would replace the commit-derived body, and `--fill` -// alone has no way to append the Closes line. -async function openPullRequest(branch: string, issueNumber: number, cwd: string): Promise { - await runOrFail(["git", "push", "-u", "origin", branch], cwd); - const subjects = (await gitOutput(["log", `main..${branch}`, "--reverse", "--format=%s"], cwd)) - .split("\n") - .filter((s) => s.length > 0); - const summary = subjects.length > 0 ? subjects.map((s) => `- ${s}`).join("\n") : "- (no commits)"; - const body = `## Summary\n\n${summary}\n\nCloses #${issueNumber}`; - const url = await runOrFail( - ["gh", "pr", "create", "--head", branch, "--fill-first", "--body", body], - cwd, - ); - return url.trim(); -} +async function cmdPlan(args: string[]): Promise { + const applyIdx = args.indexOf("--apply"); + if (applyIdx !== -1) { + const path = args[applyIdx + 1]; + if (!path) { + console.error("--apply requires a file path"); + process.exit(1); + } + const result = await readPlanFromFile(path); + if (!result.ok) { + console.error(`Failed to read plan: ${result.error}`); + process.exit(1); + } + const orch = buildOrchestrator(); + const id = await orch.createMission(result.plan); + console.log(`Created mission ${result.plan.slug} (id: ${id})`); + return; + } -// Files changed by `commits` relative to their first parent. We pass the SHA -// range `^..` so we capture exactly the work the agent -// introduced this run, regardless of where main currently sits or which -// branch the orchestrator was launched from. -async function pathsTouchedByCommits(worktreePath: string, commits: { sha: string }[]): Promise { - if (commits.length === 0) return []; - const first = commits[0]?.sha ?? ""; - const last = commits[commits.length - 1]?.sha ?? ""; - const range = `${first}^..${last}`; - const out = await gitOutput(["diff", "--name-only", range], worktreePath); - return out.split("\n").filter((p) => p.length > 0); + const objective = args.filter((a) => !a.startsWith("--")).join(" ").trim(); + if (!objective) { + console.error("plan requires an objective: bun run mission plan \"\""); + process.exit(1); + } + console.log(`Planning mission: ${objective}`); + const result = await planMission(objective); + if (!result.ok) { + console.error(`Planner failed: ${result.error}`); + if (result.rawResponse) console.error(`\n--- raw response (truncated) ---\n${result.rawResponse}`); + process.exit(1); + } + const orch = buildOrchestrator(); + const id = await orch.createMission(result.plan); + console.log(`Created mission ${result.plan.slug} (id: ${id})`); } -// Pre-flight: confirm the gh token has `read:project` scope before we start -// burning iterations. The project query is the only call that needs the -// scope; if it fails here, every iteration would fail the same way. Surface -// the fix-up command so a clean machine can recover without spelunking. -try { - await fetchProjectReadyIssues(PROJECT_OWNER, PROJECT_NUMBER, PROJECT_REPO, LABEL); -} catch (err) { - const msg = err instanceof Error ? err.message : String(err); - console.error( - `Failed to query project ${PROJECT_OWNER}/${PROJECT_NUMBER}: ${msg}\n` + - `If this is a missing-scope error, run: gh auth refresh -s read:project`, +async function cmdRun(): Promise { + const orch = buildOrchestrator(); + const slug = await singleActiveMissionSlug(orch); + console.log(`Running mission: ${slug}`); + const summary = await orch.run(slug); + console.log( + `Done. Features executed: ${summary.ranFeatures}. Milestones completed: ${summary.ranMilestones}.`, ); - process.exit(1); } -emitRunStart(); - -for (let iteration = 1; iteration <= MAX_ITERATIONS; iteration++) { - console.log(`\n=== Iteration ${iteration}/${MAX_ITERATIONS} ===\n`); - - // Phase 1: Eligibility. Snapshots come from the project board's Ready - // column in user-controlled drag-order; pickEligible() applies the - // existing label / claim-by-PR filters and preserves that order so the - // batch is the user's top N picks. - const [snapshots, openPRs] = await Promise.all([ - fetchProjectReadyIssues(PROJECT_OWNER, PROJECT_NUMBER, PROJECT_REPO, LABEL), - fetchOpenPRsClosingIssues(), - ]); - - const { eligible, excluded } = pickEligible(snapshots, openPRs, { label: LABEL }); - - for (const e of excluded) { - console.log(` - #${e.number} excluded: ${describeExclusion(e.reason)}`); - } - - const batch = eligible.slice(0, MAX_PARALLEL); - if (batch.length === 0) { - console.log("No eligible issues this iteration. Exiting."); - break; +async function cmdStatus(): Promise { + const orch = buildOrchestrator(); + const slug = await singleActiveMissionSlug(orch); + const state = await orch.getMission(slug); + if (!state) { + console.error(`Mission not found: ${slug}`); + process.exit(1); } + const useColor = process.stdout.isTTY ?? false; + const out = renderStatus({ + mission: state.mission, + milestones: state.milestones.map((m) => ({ + id: m._id, + order: m.order, + title: m.title, + status: m.status, + })), + features: state.features.map((f) => ({ + id: f._id, + milestoneId: f.milestoneId, + slug: f.slug, + title: f.title, + status: f.status, + branch: f.branch, + fixerAttempts: f.fixerAttempts, + })), + now: Date.now(), + useColor, + }); + console.log(out); +} - console.log(`Batch of ${batch.length}:`); - for (const issue of batch) { - console.log(` #${issue.number}: ${issue.title} → ${issue.branch} [${issue.labels.join(", ")}]`); - } - - // Phase 2: per-issue pipeline (implement → maybe review → open PR), run in - // parallel across the batch. Each task is fully independent — its own - // sandbox/worktree, its own agent runs, its own host-side PR creation. - // Outcomes: "commits" if the implementer produced at least one commit (PR - // opened), "no-commits" if it did not, "error" on any thrown failure. We - // exit the outer loop when no task in the batch produced commits — that's - // the kill-switch against burning iterations on a systemically broken setup. - type Outcome = { kind: "commits" | "no-commits" | "error"; issue: Issue }; - - const runIssue = async (issue: Issue): Promise => { - const tag = `[#${issue.number}]`; - try { - // Reconciliation-lite: re-check the issue's live state before creating - // a sandbox. The iteration-start snapshot can age while earlier batch - // items run, and we don't want to burn a multi-minute sandbox start on - // stale state. The openPRs cache stays iteration-start — we - // deliberately don't re-fetch PRs per issue. - const live = await fetchIssueLiveState(issue.number); - const verdict = evaluate(live, openPRs, { label: LABEL }); - if (!verdict.eligible) { - console.log(`${tag} ⤺ skipped (${describeExclusion(verdict.reason)})`); - return { kind: "no-commits", issue }; - } - - await using sandbox = await sandcastle.createSandbox({ - sandbox: sandboxProvider, - branch: issue.branch, - hooks: { sandbox: { onSandboxReady: [{ command: INSTALL_AND_VERIFY }] } }, - copyToWorktree: [...COPY_TO_WORKTREE], - }); - - const docsOnly = isDocsOnly(issue); - const implementerSpec = docsOnly ? AGENTS.implementerDocs : AGENTS.implementer; +async function cmdPause(slug: string): Promise { + const orch = buildOrchestrator(); + await orch.pause(slug); + console.log(`Paused mission: ${slug}`); +} - await enforcementFor(implementerSpec)?.install(sandbox.worktreePath); +async function cmdResume(slug: string): Promise { + const orch = buildOrchestrator(); + await orch.resume(slug); + console.log(`Resumed mission: ${slug}`); +} - const issuePromptArgs = { - ISSUE_NUMBER: String(issue.number), - ISSUE_TITLE: issue.title, - BRANCH: issue.branch, - }; +async function main(): Promise { + const [command, ...rest] = process.argv.slice(2); - const implementerLogName = `iter${iteration}-implementer-${issue.number}`; - const implementResult = await sandbox.run({ - name: "Implementer #" + issue.number, - agent: agent(implementerSpec), - promptFile: implementerSpec.promptPath, - promptArgs: issuePromptArgs, - logging: streamLogger(implementerLogName), - }); - emitUsageFromRun(implementerLogName, iteration, implementResult.iterations); - emitPhaseOutcome("implementer", iteration, issue.number, implementResult.commits.length); + if (!command || command === "--help" || command === "-h") { + console.log(HELP); + return; + } - // Reviewer and PR creation both gate on whether *this* implementer run - // made commits. Comparing `main..HEAD` would be incorrect when the - // orchestrator is launched from a feature branch — every commit the - // launching branch had already added on top of main would falsely - // trigger both phases. - if (implementResult.commits.length === 0) { - console.log(`${tag} no progress`); - return { kind: "no-commits", issue }; + switch (command) { + case "plan": + await cmdPlan(rest); + return; + case "run": + await cmdRun(); + return; + case "status": + await cmdStatus(); + return; + case "pause": { + const slug = rest[0]; + if (!slug) { + console.error("pause requires a mission slug: bun run mission pause "); + process.exit(1); } - - const thisRunPaths = await pathsTouchedByCommits(sandbox.worktreePath, implementResult.commits); - const allMarkdown = thisRunPaths.length > 0 && thisRunPaths.every((p) => p.endsWith(".md")); - const skipReview = docsOnly || allMarkdown; - - if (!skipReview) { - const reviewerLogName = `iter${iteration}-reviewer-${issue.number}`; - const reviewerResult = await sandbox.run({ - name: "Reviewer #" + issue.number, - agent: agent(AGENTS.reviewer), - promptFile: AGENTS.reviewer.promptPath, - promptArgs: issuePromptArgs, - logging: streamLogger(reviewerLogName), - }); - emitUsageFromRun(reviewerLogName, iteration, reviewerResult.iterations); - emitPhaseOutcome("reviewer", iteration, issue.number, reviewerResult.commits.length); - console.log( - reviewerResult.commits.length > 0 - ? `${tag} ✎ Reviewer made ${reviewerResult.commits.length} commit(s)` - : `${tag} ∅ Reviewer made no commits`, - ); + await cmdPause(slug); + return; + } + case "resume": { + const slug = rest[0]; + if (!slug) { + console.error("resume requires a mission slug: bun run mission resume "); + process.exit(1); } - - // PR creation runs on the host: sandcastle has already applied the - // sandbox's commits to the host's branch via syncOut, so we push and - // open the PR using the host's git/gh credentials. Pushes target - // distinct branches across the batch, so concurrent push+create from - // sibling tasks don't conflict. - const prUrl = await openPullRequest(issue.branch, issue.number, process.cwd()); - console.log(`${tag} ✔ PR opened: ${prUrl}`); - return { kind: "commits", issue }; - } catch (reason) { - const errorTag = - typeof reason === "object" && reason !== null && "_tag" in reason - ? String((reason as { _tag: unknown })._tag) - : reason instanceof Error - ? reason.constructor.name - : "UnknownError"; - const message = reason instanceof Error ? reason.message : String(reason); - console.error(`${tag} ✗ (${issue.branch}) failed [${errorTag}]: ${message}`); - return { kind: "error", issue }; + await cmdResume(slug); + return; } - }; - - const results = await Promise.all(batch.map(runIssue)); - const commitCount = results.filter((r) => r.kind === "commits").length; - - console.log( - `\nIteration ${iteration} complete. ${commitCount}/${batch.length} produced commits.`, - ); - - if (commitCount === 0) { - console.log("No progress this iteration. Exiting."); - break; + case "replan": + console.error("replan is not yet implemented (slice 3)."); + process.exit(1); + default: + console.error(`Unknown command: ${command}\n`); + console.log(HELP); + process.exit(1); } } -console.log("\nAll done."); - -// Auto-run the analyzer at end-of-orchestration so every run produces a -// fresh `.sandcastle/logs/analysis.md`. Skipped with --no-analyze; failures -// are non-fatal (we don't want a bad analyzer to break sandcastle). -if (!process.argv.includes("--no-analyze")) { - try { - await import("./analyze"); - } catch (err) { - const msg = err instanceof Error ? err.message : String(err); - console.error(`Analyzer failed: ${msg}`); - } -} +await main(); diff --git a/.sandcastle/missions/orchestrator.ts b/.sandcastle/missions/orchestrator.ts new file mode 100644 index 0000000..0609ffb --- /dev/null +++ b/.sandcastle/missions/orchestrator.ts @@ -0,0 +1,261 @@ +import type { api as MissionsApi, Id } from "@contexture/missions"; +import { ConvexHttpClient } from "convex/browser"; +import { runAgent, type RunAgentResult } from "./run-agent"; +import { selectRunnable } from "./scheduler"; +import type { MissionPlan } from "./schema"; + +const MAX_PARALLEL = 2; + +type ApiType = typeof MissionsApi; + +type FeatureStatus = "todo" | "planned" | "running" | "review" | "blocked" | "done"; + +type Mission = { + _id: Id<"missions">; + slug: string; + title: string; + status: "planning" | "running" | "paused" | "done" | "failed"; + updatedAt: number; +}; + +type Milestone = { + _id: Id<"milestones">; + missionId: Id<"missions">; + order: number; + title: string; + status: "todo" | "running" | "validating" | "done" | "blocked"; +}; + +type Feature = { + _id: Id<"features">; + milestoneId: Id<"milestones">; + missionId: Id<"missions">; + slug: string; + title: string; + status: FeatureStatus; + dependencies: Id<"features">[]; + pathsOwned: string[]; + preferredAgent: "claude" | "codex"; + branch?: string; + fixerAttempts: number; +}; + +export class Orchestrator { + private readonly client: ConvexHttpClient; + private readonly api: ApiType; + + constructor(opts: { url: string; api: ApiType; deployKey?: string }) { + this.client = new ConvexHttpClient(opts.url); + if (opts.deployKey) this.client.setAuth(opts.deployKey); + this.api = opts.api; + } + + async createMission(plan: MissionPlan): Promise> { + return await this.client.mutation(this.api.missions.create, plan); + } + + async listMissions(): Promise { + return await this.client.query(this.api.missions.list, {}); + } + + async getMission(slug: string): Promise<{ + mission: Mission; + milestones: Milestone[]; + features: Feature[]; + } | null> { + return await this.client.query(this.api.missions.getWithChildren, { slug }); + } + + async pause(slug: string): Promise { + await this.client.mutation(this.api.missions.pause, { slug }); + } + + async resume(slug: string): Promise { + await this.client.mutation(this.api.missions.resume, { slug }); + } + + async run(slug: string): Promise<{ ranFeatures: number; ranMilestones: number }> { + let ranFeatures = 0; + let ranMilestones = 0; + + while (true) { + const state = await this.getMission(slug); + if (!state) throw new Error(`Mission not found: ${slug}`); + if (state.mission.status === "paused") { + console.log(`Mission ${slug} is paused. Exiting cleanly.`); + return { ranFeatures, ranMilestones }; + } + + if (state.mission.status === "planning") { + await this.client.mutation(this.api.missions.setStatus, { + slug, + status: "running", + }); + } + + const currentMilestone = state.milestones.find((m) => m.status !== "done"); + if (!currentMilestone) { + await this.client.mutation(this.api.missions.setStatus, { + slug, + status: "done", + }); + return { ranFeatures, ranMilestones }; + } + + const milestoneFeatures = state.features.filter( + (f) => f.milestoneId === currentMilestone._id, + ); + const sched = selectRunnable({ + features: milestoneFeatures.map((f) => ({ + id: f._id, + slug: f.slug, + status: f.status, + dependencies: f.dependencies, + pathsOwned: f.pathsOwned, + })), + maxParallel: MAX_PARALLEL, + }); + + if (sched.runnable.length === 0) { + const allDone = milestoneFeatures.every((f) => f.status === "done"); + if (allDone) { + // Slice 1: no validator. Mark milestone done immediately. + await this.client.mutation(this.api.milestones.setStatus, { + milestoneId: currentMilestone._id, + status: "done", + }); + ranMilestones += 1; + continue; + } + // Nothing runnable but features still in flight (review/blocked) — exit. + console.log( + `Milestone ${currentMilestone.title}: nothing runnable. Deferred: ${sched.deferred + .map((d) => `${d.feature.slug}(${d.reason.kind})`) + .join(", ")}`, + ); + return { ranFeatures, ranMilestones }; + } + + // Run features in parallel. + const work = sched.runnable.map((sf) => { + const feature = milestoneFeatures.find((f) => f._id === sf.id); + if (!feature) throw new Error(`Feature missing: ${sf.id}`); + return this.executeFeature(state.mission, currentMilestone, feature); + }); + await Promise.all(work); + ranFeatures += work.length; + } + } + + private async executeFeature( + mission: Mission, + milestone: Milestone, + feature: Feature, + ): Promise { + const branch = feature.branch ?? `mission/${mission.slug}/${feature.slug}`; + + await this.client.mutation(this.api.features.setStatus, { + featureId: feature._id, + status: "running", + branch, + }); + + const runId = await this.client.mutation(this.api.runs.recordStart, { + missionId: feature.missionId, + featureId: feature._id, + milestoneId: feature.milestoneId, + role: "worker", + agent: `stub:${feature.preferredAgent}`, + branch, + }); + + let result: RunAgentResult; + try { + result = await runAgent({ + role: "worker", + missionId: mission._id, + missionSlug: mission.slug, + milestoneId: milestone._id, + featureId: feature._id, + featureSlug: feature.slug, + branch, + promptArgs: { + MISSION_SLUG: mission.slug, + FEATURE_SLUG: feature.slug, + }, + preferredAgent: feature.preferredAgent, + }); + } catch (err) { + await this.client.mutation(this.api.runs.recordEnd, { + runId, + outcome: "failure", + }); + await this.client.mutation(this.api.features.setStatus, { + featureId: feature._id, + status: "blocked", + }); + throw err; + } + + await this.client.mutation(this.api.runs.recordEnd, { + runId, + outcome: result.outcome, + logUri: result.logUri, + }); + + if (result.commits === 0) { + await this.client.mutation(this.api.features.setStatus, { + featureId: feature._id, + status: "blocked", + }); + return; + } + + // Slice 1: no reviewer. Treat review as a pass-through to done. + await this.client.mutation(this.api.features.setStatus, { + featureId: feature._id, + status: "review", + }); + + const prUrl = await this.openPullRequest(mission, feature, branch); + await this.client.mutation(this.api.features.setStatus, { + featureId: feature._id, + status: "done", + pullRequestUrl: prUrl ?? undefined, + }); + } + + private async openPullRequest( + mission: Mission, + feature: Feature, + branch: string, + ): Promise { + const body = `Mission: ${mission.slug}\nFeature: ${feature._id}\nBranch: ${branch}\n\nGenerated by the missions orchestrator (slice 1 stub).`; + const proc = Bun.spawn( + [ + "gh", + "pr", + "create", + "--title", + `mission: ${mission.slug}/${feature.slug}`, + "--body", + body, + "--head", + branch, + "--base", + "main", + ], + { stdin: "ignore", stdout: "pipe", stderr: "pipe" }, + ); + const [stdout, stderr] = await Promise.all([ + new Response(proc.stdout).text(), + new Response(proc.stderr).text(), + ]); + const code = await proc.exited; + if (code !== 0) { + console.error(`gh pr create failed (exit ${code}): ${stderr.trim()}`); + return null; + } + return stdout.trim() || null; + } +} diff --git a/.sandcastle/missions/planner.ts b/.sandcastle/missions/planner.ts new file mode 100644 index 0000000..8bd46d8 --- /dev/null +++ b/.sandcastle/missions/planner.ts @@ -0,0 +1,131 @@ +import { type MissionPlan, MissionPlanSchema } from "./schema"; + +const MAX_PLANNER_RETRIES = 1; + +export type PlannerResult = + | { ok: true; plan: MissionPlan; rawResponse?: string } + | { ok: false; error: string; rawResponse?: string }; + +type ClaudePrintResponse = { + type: "result"; + result?: string; + is_error?: boolean; +}; + +function extractJsonBlock(text: string): string | null { + const fence = /```json\s*\n([\s\S]*?)\n```/m.exec(text); + if (fence) return fence[1]; + const trimmed = text.trim(); + if (trimmed.startsWith("{") && trimmed.endsWith("}")) return trimmed; + return null; +} + +async function runClaude( + prompt: string, +): Promise<{ stdout: string; stderr: string; code: number }> { + const proc = Bun.spawn(["claude", "-p", "--output-format", "json", prompt], { + stdin: "ignore", + stdout: "pipe", + stderr: "pipe", + }); + const [stdout, stderr] = await Promise.all([ + new Response(proc.stdout).text(), + new Response(proc.stderr).text(), + ]); + const code = await proc.exited; + return { stdout, stderr, code }; +} + +function buildPrompt(objective: string, retryFeedback?: string): string { + let prompt = `/mission-plan ${objective}`; + if (retryFeedback) { + prompt += `\n\nThe previous plan failed validation:\n${retryFeedback}\n\nEmit a corrected plan.`; + } + return prompt; +} + +export async function planMission(objective: string): Promise { + let retryFeedback: string | undefined; + + for (let attempt = 0; attempt <= MAX_PLANNER_RETRIES; attempt++) { + const prompt = buildPrompt(objective, retryFeedback); + const { stdout, stderr, code } = await runClaude(prompt); + + if (code !== 0) { + return { + ok: false, + error: `claude CLI exited with code ${code}: ${stderr.trim() || stdout.slice(0, 500)}`, + }; + } + + let parsed: ClaudePrintResponse; + try { + parsed = JSON.parse(stdout) as ClaudePrintResponse; + } catch (err) { + return { + ok: false, + error: `claude CLI did not return valid JSON: ${(err as Error).message}`, + rawResponse: stdout.slice(0, 500), + }; + } + + if (parsed.is_error || !parsed.result) { + return { ok: false, error: parsed.result ?? "claude CLI returned an error" }; + } + + const block = extractJsonBlock(parsed.result); + if (!block) { + retryFeedback = "Your response did not contain a ```json fenced code block with the plan."; + continue; + } + + let planJson: unknown; + try { + planJson = JSON.parse(block); + } catch (err) { + retryFeedback = `The JSON code block is not valid JSON: ${(err as Error).message}`; + continue; + } + + const result = MissionPlanSchema.safeParse(planJson); + if (!result.success) { + retryFeedback = `The plan failed schema validation:\n${result.error.issues + .map((i) => `- ${i.path.join(".")}: ${i.message}`) + .join("\n")}`; + continue; + } + + return { ok: true, plan: result.data, rawResponse: parsed.result }; + } + + return { + ok: false, + error: `Planner failed after ${MAX_PLANNER_RETRIES + 1} attempts. Last feedback: ${retryFeedback}`, + }; +} + +export async function readPlanFromFile(path: string): Promise { + const file = Bun.file(path); + if (!(await file.exists())) { + return { ok: false, error: `File not found: ${path}` }; + } + const text = await file.text(); + const block = extractJsonBlock(text) ?? text; + + let planJson: unknown; + try { + planJson = JSON.parse(block); + } catch (err) { + return { ok: false, error: `File does not contain valid JSON: ${(err as Error).message}` }; + } + const result = MissionPlanSchema.safeParse(planJson); + if (!result.success) { + return { + ok: false, + error: `Plan failed schema validation:\n${result.error.issues + .map((i) => `- ${i.path.join(".")}: ${i.message}`) + .join("\n")}`, + }; + } + return { ok: true, plan: result.data }; +} diff --git a/.sandcastle/missions/run-agent.ts b/.sandcastle/missions/run-agent.ts new file mode 100644 index 0000000..0bd9e14 --- /dev/null +++ b/.sandcastle/missions/run-agent.ts @@ -0,0 +1,112 @@ +import { mkdir } from "node:fs/promises"; +import { join } from "node:path"; + +export type AgentRole = "worker" | "reviewer" | "fixer" | "validator" | "replanner"; + +export type RunAgentArgs = { + role: AgentRole; + missionId: string; + missionSlug: string; + milestoneId?: string; + featureId?: string; + featureSlug?: string; + branch: string; + baseBranch?: string; + promptArgs: Record; + preferredAgent?: "claude" | "codex"; +}; + +export type RunAgentResult = { + outcome: "success" | "failure" | "aborted"; + commits: number; + completionSignal?: string; + logUri?: string; +}; + +async function spawnGit(args: string[], cwd: string): Promise<{ stdout: string; code: number }> { + const proc = Bun.spawn(["git", ...args], { + cwd, + stdin: "ignore", + stdout: "pipe", + stderr: "pipe", + }); + const stdout = await new Response(proc.stdout).text(); + const code = await proc.exited; + return { stdout, code }; +} + +async function gitOrThrow(args: string[], cwd: string): Promise { + const proc = Bun.spawn(["git", ...args], { + cwd, + stdin: "ignore", + stdout: "pipe", + stderr: "pipe", + }); + const [stdout, stderr] = await Promise.all([ + new Response(proc.stdout).text(), + new Response(proc.stderr).text(), + ]); + const code = await proc.exited; + if (code !== 0) { + throw new Error(`git ${args.join(" ")} failed (exit ${code}): ${stderr.trim()}`); + } + return stdout; +} + +async function runStubWorker(args: RunAgentArgs, repoRoot: string): Promise { + if (!args.featureSlug || !args.featureId) { + throw new Error("worker role requires featureSlug and featureId"); + } + + const worktreesDir = join(repoRoot, ".sandcastle", "worktrees"); + await mkdir(worktreesDir, { recursive: true }); + const worktreePath = join(worktreesDir, `${args.missionSlug}-${args.featureSlug}`); + + const baseBranch = args.baseBranch ?? "main"; + const existing = await spawnGit(["rev-parse", "--verify", args.branch], repoRoot); + if (existing.code === 0) { + await gitOrThrow(["worktree", "add", worktreePath, args.branch], repoRoot); + } else { + await gitOrThrow( + ["worktree", "add", "-b", args.branch, worktreePath, baseBranch], + repoRoot, + ); + } + + try { + const readmePath = join(worktreePath, "README.md"); + const file = Bun.file(readmePath); + const existingContent = (await file.exists()) ? await file.text() : ""; + const stamp = `\n\n`; + await Bun.write(readmePath, existingContent + stamp); + + await gitOrThrow(["add", "README.md"], worktreePath); + await gitOrThrow( + ["commit", "-m", `stub: ${args.missionSlug}/${args.featureSlug}`], + worktreePath, + ); + await gitOrThrow(["push", "-u", "origin", args.branch], worktreePath); + + return { + outcome: "success", + commits: 1, + completionSignal: "COMPLETE", + }; + } finally { + await spawnGit(["worktree", "remove", worktreePath, "--force"], repoRoot); + } +} + +export async function runAgent(args: RunAgentArgs): Promise { + const repoRoot = process.cwd(); + + switch (args.role) { + case "worker": + return await runStubWorker(args, repoRoot); + case "reviewer": + case "fixer": + case "validator": + case "replanner": + throw new Error(`runAgent role "${args.role}" is not implemented in slice 1`); + } +} diff --git a/.sandcastle/missions/scheduler.test.ts b/.sandcastle/missions/scheduler.test.ts new file mode 100644 index 0000000..b982268 --- /dev/null +++ b/.sandcastle/missions/scheduler.test.ts @@ -0,0 +1,122 @@ +import { describe, expect, test } from "bun:test"; +import { type SchedulerFeature, selectRunnable } from "./scheduler"; + +function feature(overrides: Partial & { id: string }): SchedulerFeature { + return { + slug: overrides.id, + status: "todo", + dependencies: [], + pathsOwned: [], + ...overrides, + }; +} + +describe("selectRunnable", () => { + test("returns features whose status is todo and deps are done, capped at maxParallel", () => { + const result = selectRunnable({ + features: [ + feature({ id: "a" }), + feature({ id: "b" }), + feature({ id: "c" }), + ], + maxParallel: 2, + }); + expect(result.runnable.map((f) => f.id)).toEqual(["a", "b"]); + expect(result.deferred[0].feature.id).toBe("c"); + expect(result.deferred[0].reason.kind).toBe("concurrencyCap"); + }); + + test("blocks a feature whose dependency is not done", () => { + const result = selectRunnable({ + features: [ + feature({ id: "a", status: "running" }), + feature({ id: "b", dependencies: ["a"] }), + ], + maxParallel: 2, + }); + expect(result.runnable).toEqual([]); + const def = result.deferred.find((d) => d.feature.id === "b"); + expect(def?.reason.kind).toBe("depsNotDone"); + if (def?.reason.kind === "depsNotDone") { + expect(def.reason.pendingDeps).toContain("a"); + } + }); + + test("schedules a feature after its dependency is done", () => { + const result = selectRunnable({ + features: [ + feature({ id: "a", status: "done" }), + feature({ id: "b", dependencies: ["a"] }), + ], + maxParallel: 2, + }); + expect(result.runnable.map((f) => f.id)).toEqual(["b"]); + }); + + test("serializes features with overlapping pathsOwned", () => { + const result = selectRunnable({ + features: [ + feature({ id: "a", pathsOwned: ["src/foo/**"] }), + feature({ id: "b", pathsOwned: ["src/foo/bar.ts"] }), + feature({ id: "c", pathsOwned: ["src/other/**"] }), + ], + maxParallel: 3, + }); + expect(result.runnable.map((f) => f.id)).toEqual(["a", "c"]); + const def = result.deferred.find((d) => d.feature.id === "b"); + expect(def?.reason.kind).toBe("pathConflict"); + if (def?.reason.kind === "pathConflict") { + expect(def.reason.conflictsWith).toBe("a"); + } + }); + + test("respects already-running features when serializing on path", () => { + const result = selectRunnable({ + features: [ + feature({ id: "a", status: "running", pathsOwned: ["src/foo/**"] }), + feature({ id: "b", pathsOwned: ["src/foo/bar.ts"] }), + ], + maxParallel: 2, + }); + expect(result.runnable).toEqual([]); + expect(result.deferred[0].reason.kind).toBe("pathConflict"); + }); + + test("counts running features toward the concurrency cap", () => { + const result = selectRunnable({ + features: [ + feature({ id: "a", status: "running" }), + feature({ id: "b" }), + feature({ id: "c" }), + ], + maxParallel: 2, + }); + expect(result.runnable.map((f) => f.id)).toEqual(["b"]); + const def = result.deferred.find((d) => d.feature.id === "c"); + expect(def?.reason.kind).toBe("concurrencyCap"); + }); + + test("ignores features in non-actionable statuses (review, done, blocked)", () => { + const result = selectRunnable({ + features: [ + feature({ id: "a", status: "review" }), + feature({ id: "b", status: "done" }), + feature({ id: "c", status: "blocked" }), + feature({ id: "d" }), + ], + maxParallel: 2, + }); + expect(result.runnable.map((f) => f.id)).toEqual(["d"]); + expect(result.deferred.some((d) => d.feature.id === "a")).toBe(true); + expect(result.deferred.some((d) => d.feature.id === "c")).toBe(true); + expect(result.deferred.some((d) => d.feature.id === "b")).toBe(false); + }); + + test("features with no pathsOwned do not conflict with each other", () => { + const result = selectRunnable({ + features: [feature({ id: "a" }), feature({ id: "b" })], + maxParallel: 3, + }); + expect(result.runnable).toHaveLength(2); + }); +}); diff --git a/.sandcastle/missions/scheduler.ts b/.sandcastle/missions/scheduler.ts new file mode 100644 index 0000000..5cfcea1 --- /dev/null +++ b/.sandcastle/missions/scheduler.ts @@ -0,0 +1,99 @@ +import picomatch from "picomatch"; + +export type SchedulerFeature = { + id: string; + slug: string; + status: "todo" | "planned" | "running" | "review" | "blocked" | "done"; + dependencies: string[]; + pathsOwned: string[]; +}; + +export type SelectInput = { + features: SchedulerFeature[]; + maxParallel: number; +}; + +export type SelectResult = { + runnable: SchedulerFeature[]; + deferred: { feature: SchedulerFeature; reason: DeferReason }[]; +}; + +export type DeferReason = + | { kind: "wrongStatus" } + | { kind: "depsNotDone"; pendingDeps: string[] } + | { kind: "pathConflict"; conflictsWith: string } + | { kind: "concurrencyCap" }; + +const SAMPLE_PATHS = [ + "src/foo.ts", + "src/bar/baz.ts", + "packages/core/src/x.ts", + "apps/web/app/page.tsx", + "scripts/migrate.ts", +]; + +function ownerships(patterns: string[]): { test(path: string): boolean } { + if (patterns.length === 0) return { test: () => false }; + const matchers = patterns.map((p) => picomatch(p)); + return { test: (path: string) => matchers.some((m) => m(path)) }; +} + +function hasPathOverlap(a: string[], b: string[]): boolean { + if (a.length === 0 || b.length === 0) return false; + + const aMatch = ownerships(a); + const bMatch = ownerships(b); + if (a.some((p) => bMatch.test(p))) return true; + if (b.some((p) => aMatch.test(p))) return true; + for (const sample of SAMPLE_PATHS) { + if (aMatch.test(sample) && bMatch.test(sample)) return true; + } + return false; +} + +export function selectRunnable({ features, maxParallel }: SelectInput): SelectResult { + const byId = new Map(features.map((f) => [f.id, f])); + const running = features.filter((f) => f.status === "running"); + const remainingSlots = Math.max(0, maxParallel - running.length); + + const runnable: SchedulerFeature[] = []; + const deferred: SelectResult["deferred"] = []; + const claimedPaths: { feature: SchedulerFeature }[] = running.map((f) => ({ feature: f })); + + for (const f of features) { + if (f.status !== "todo") { + if (f.status !== "running" && f.status !== "done") { + deferred.push({ feature: f, reason: { kind: "wrongStatus" } }); + } + continue; + } + + const pendingDeps = f.dependencies + .map((depId) => byId.get(depId)) + .filter((dep): dep is SchedulerFeature => dep !== undefined && dep.status !== "done") + .map((dep) => dep.slug); + if (pendingDeps.length > 0) { + deferred.push({ feature: f, reason: { kind: "depsNotDone", pendingDeps } }); + continue; + } + + const conflict = claimedPaths.find((c) => hasPathOverlap(f.pathsOwned, c.feature.pathsOwned)); + if (conflict) { + deferred.push({ + feature: f, + reason: { kind: "pathConflict", conflictsWith: conflict.feature.slug }, + }); + continue; + } + + if (runnable.length >= remainingSlots) { + deferred.push({ feature: f, reason: { kind: "concurrencyCap" } }); + continue; + } + + runnable.push(f); + claimedPaths.push({ feature: f }); + } + + return { runnable, deferred }; +} diff --git a/.sandcastle/missions/schema.test.ts b/.sandcastle/missions/schema.test.ts new file mode 100644 index 0000000..177860a --- /dev/null +++ b/.sandcastle/missions/schema.test.ts @@ -0,0 +1,98 @@ +import { describe, expect, test } from "bun:test"; +import { MissionPlanSchema } from "./schema"; + +const validPlan = { + slug: "convex-ir", + title: "Bring up Convex emit IR", + objective: "Generate Convex schemas from IR", + milestones: [ + { + slug: "m1", + title: "Schema emit", + successCriteria: ["Tables emitted", "Indexes match IR"], + validationPrompt: "Run bun run typecheck and confirm no errors.", + features: [ + { + slug: "f1", + title: "Emit schema.ts", + prompt: "Add an emitter for schema.ts", + dependencies: [], + pathsOwned: ["packages/core/src/emit-convex/**"], + preferredAgent: "claude", + skillRefs: ["backend"], + }, + { + slug: "f2", + title: "Emit per-table CRUD", + prompt: "Generate CRUD functions", + dependencies: ["f1"], + pathsOwned: ["packages/core/src/emit-convex/crud/**"], + preferredAgent: "claude", + skillRefs: [], + }, + ], + }, + ], +}; + +describe("MissionPlanSchema", () => { + test("accepts a valid plan and applies defaults", () => { + const minimal = { + slug: "min", + title: "Min", + objective: "Minimal", + milestones: [ + { + slug: "m1", + title: "M1", + successCriteria: ["ok"], + validationPrompt: "check", + features: [ + { slug: "f1", title: "F1", prompt: "do thing" }, + ], + }, + ], + }; + const parsed = MissionPlanSchema.parse(minimal); + expect(parsed.milestones[0].features[0].dependencies).toEqual([]); + expect(parsed.milestones[0].features[0].preferredAgent).toBe("claude"); + }); + + test("accepts a fully-specified plan", () => { + const parsed = MissionPlanSchema.parse(validPlan); + expect(parsed.milestones).toHaveLength(1); + expect(parsed.milestones[0].features[1].dependencies).toEqual(["f1"]); + }); + + test("rejects duplicate feature slugs", () => { + const broken = structuredClone(validPlan); + broken.milestones[0].features[1].slug = "f1"; + const result = MissionPlanSchema.safeParse(broken); + expect(result.success).toBe(false); + if (result.success) return; + expect(result.error.issues.some((i) => i.message.includes("duplicate feature slug"))).toBe( + true, + ); + }); + + test("rejects unknown dependency slug", () => { + const broken = structuredClone(validPlan); + broken.milestones[0].features[1].dependencies = ["ghost"]; + const result = MissionPlanSchema.safeParse(broken); + expect(result.success).toBe(false); + if (result.success) return; + expect( + result.error.issues.some((i) => i.message.includes("unknown feature: ghost")), + ).toBe(true); + }); + + test("rejects empty milestones array", () => { + const broken = { ...validPlan, milestones: [] }; + expect(MissionPlanSchema.safeParse(broken).success).toBe(false); + }); + + test("rejects invalid slug characters", () => { + const broken = { ...validPlan, slug: "Has Spaces" }; + expect(MissionPlanSchema.safeParse(broken).success).toBe(false); + }); +}); diff --git a/.sandcastle/missions/schema.ts b/.sandcastle/missions/schema.ts new file mode 100644 index 0000000..dca3cb3 --- /dev/null +++ b/.sandcastle/missions/schema.ts @@ -0,0 +1,64 @@ +import { z } from "zod"; + +const slug = z + .string() + .min(1) + .regex(/^[a-z0-9][a-z0-9-]*$/, "slug must be lowercase letters, digits, or hyphens"); + +export const FeaturePlanSchema = z.object({ + slug, + title: z.string().min(1), + prompt: z.string().min(1), + dependencies: z.array(slug).default([]), + pathsOwned: z.array(z.string()).default([]), + preferredAgent: z.enum(["claude", "codex"]).default("claude"), + skillRefs: z.array(z.string()).default([]), +}); + +export const MilestonePlanSchema = z.object({ + slug, + title: z.string().min(1), + successCriteria: z.array(z.string().min(1)).min(1), + validationPrompt: z.string().min(1), + features: z.array(FeaturePlanSchema).min(1), +}); + +export const MissionPlanSchema = z + .object({ + slug, + title: z.string().min(1), + objective: z.string().min(1), + milestones: z.array(MilestonePlanSchema).min(1), + }) + .superRefine((plan, ctx) => { + const allFeatureSlugs = new Set(); + for (const m of plan.milestones) { + for (const f of m.features) { + if (allFeatureSlugs.has(f.slug)) { + ctx.addIssue({ + code: "custom", + message: `duplicate feature slug: ${f.slug}`, + path: ["milestones", m.slug, "features", f.slug], + }); + } + allFeatureSlugs.add(f.slug); + } + } + for (const m of plan.milestones) { + for (const f of m.features) { + for (const dep of f.dependencies) { + if (!allFeatureSlugs.has(dep)) { + ctx.addIssue({ + code: "custom", + message: `feature ${f.slug} depends on unknown feature: ${dep}`, + path: ["milestones", m.slug, "features", f.slug, "dependencies"], + }); + } + } + } + } + }); + +export type FeaturePlan = z.infer; +export type MilestonePlan = z.infer; +export type MissionPlan = z.infer; diff --git a/.sandcastle/missions/status-view.test.ts b/.sandcastle/missions/status-view.test.ts new file mode 100644 index 0000000..65a7f55 --- /dev/null +++ b/.sandcastle/missions/status-view.test.ts @@ -0,0 +1,99 @@ +import { describe, expect, test } from "bun:test"; +import { renderStatus } from "./status-view"; + +const sample = { + mission: { + slug: "demo", + title: "Demo mission", + status: "running" as const, + updatedAt: 1_000_000, + }, + milestones: [ + { id: "m1", order: 0, title: "Milestone 1", status: "running" as const }, + { id: "m2", order: 1, title: "Milestone 2", status: "todo" as const }, + ], + features: [ + { + id: "f1", + milestoneId: "m1", + slug: "foo", + title: "Add foo", + status: "running" as const, + branch: "mission/demo/foo", + fixerAttempts: 0, + lastRunAt: 999_000, + }, + { + id: "f2", + milestoneId: "m1", + slug: "bar", + title: "Add bar", + status: "review" as const, + branch: "mission/demo/bar", + fixerAttempts: 1, + lastRunAt: 990_000, + }, + { + id: "f3", + milestoneId: "m2", + slug: "baz", + title: "Add baz", + status: "todo" as const, + fixerAttempts: 0, + }, + ], + now: 1_000_000, + useColor: false, +}; + +describe("renderStatus", () => { + test("renders mission header with slug and status", () => { + const out = renderStatus(sample); + expect(out).toContain("Demo mission"); + expect(out).toContain("(demo)"); + expect(out).toContain("running"); + }); + + test("renders both milestones with their statuses", () => { + const out = renderStatus(sample); + expect(out).toContain("Milestone 1"); + expect(out).toContain("Milestone 2"); + expect(out).toContain("[running]"); + expect(out).toContain("[todo]"); + }); + + test("renders feature rows under their milestone", () => { + const out = renderStatus(sample); + const lines = out.split("\n"); + const m1Index = lines.findIndex((l) => l.includes("Milestone 1")); + const m2Index = lines.findIndex((l) => l.includes("Milestone 2")); + const fooIndex = lines.findIndex((l) => l.includes("foo")); + const bazIndex = lines.findIndex((l) => l.includes("baz")); + expect(m1Index).toBeLessThan(fooIndex); + expect(fooIndex).toBeLessThan(m2Index); + expect(m2Index).toBeLessThan(bazIndex); + }); + + test("renders relative last-run time", () => { + const out = renderStatus(sample); + expect(out).toContain("1s ago"); + expect(out).toContain("10s ago"); + }); + + test("emits ANSI codes when useColor is true", () => { + const out = renderStatus({ ...sample, useColor: true }); + expect(out).toContain("\x1b["); + }); + + test("renders an em-dash for missing branch and last run", () => { + const out = renderStatus(sample); + const bazLine = out.split("\n").find((l) => l.startsWith(" baz")) ?? ""; + expect(bazLine).toContain("—"); + }); + + test("handles a milestone with no features", () => { + const noFeatures = { ...sample, features: [] }; + const out = renderStatus(noFeatures); + expect(out).toContain("(no features)"); + }); +}); diff --git a/.sandcastle/missions/status-view.ts b/.sandcastle/missions/status-view.ts new file mode 100644 index 0000000..6a7d44f --- /dev/null +++ b/.sandcastle/missions/status-view.ts @@ -0,0 +1,160 @@ +type FeatureStatus = "todo" | "planned" | "running" | "review" | "blocked" | "done"; +type MilestoneStatus = "todo" | "running" | "validating" | "done" | "blocked"; +type MissionStatus = "planning" | "running" | "paused" | "done" | "failed"; + +export type StatusViewMission = { + slug: string; + title: string; + status: MissionStatus; + updatedAt: number; +}; + +export type StatusViewMilestone = { + id: string; + order: number; + title: string; + status: MilestoneStatus; +}; + +export type StatusViewFeature = { + id: string; + milestoneId: string; + slug: string; + title: string; + status: FeatureStatus; + branch?: string; + fixerAttempts: number; + lastRunAt?: number; +}; + +export type StatusViewInput = { + mission: StatusViewMission; + milestones: StatusViewMilestone[]; + features: StatusViewFeature[]; + now: number; + useColor: boolean; +}; + +const RESET = "\x1b[0m"; +const COLORS = { + green: "\x1b[32m", + yellow: "\x1b[33m", + blue: "\x1b[34m", + red: "\x1b[31m", + gray: "\x1b[90m", + cyan: "\x1b[36m", + bold: "\x1b[1m", +} as const; + +function color(text: string, code: keyof typeof COLORS, useColor: boolean): string { + return useColor ? `${COLORS[code]}${text}${RESET}` : text; +} + +function visibleLength(s: string): number { + return s.replace(/\x1b\[[0-9;]*m/g, "").length; +} + +function pad(s: string, width: number): string { + const len = visibleLength(s); + return len >= width ? s : s + " ".repeat(width - len); +} + +const FEATURE_STATUS_COLOR: Record = { + todo: "gray", + planned: "gray", + running: "yellow", + review: "blue", + blocked: "red", + done: "green", +}; + +const MILESTONE_STATUS_COLOR: Record = { + todo: "gray", + running: "yellow", + validating: "blue", + done: "green", + blocked: "red", +}; + +const MISSION_STATUS_COLOR: Record = { + planning: "cyan", + running: "yellow", + paused: "gray", + done: "green", + failed: "red", +}; + +function formatRelative(ms: number): string { + if (ms < 60_000) return `${Math.round(ms / 1000)}s ago`; + if (ms < 3_600_000) return `${Math.round(ms / 60_000)}m ago`; + if (ms < 86_400_000) return `${Math.round(ms / 3_600_000)}h ago`; + return `${Math.round(ms / 86_400_000)}d ago`; +} + +export function renderStatus(input: StatusViewInput): string { + const { mission, milestones, features, now, useColor } = input; + const lines: string[] = []; + + const title = color(mission.title, "bold", useColor); + const status = color(mission.status, MISSION_STATUS_COLOR[mission.status], useColor); + lines.push(`${title} (${mission.slug}) — ${status}`); + lines.push(""); + + const sortedMilestones = [...milestones].sort((a, b) => a.order - b.order); + + for (const m of sortedMilestones) { + const ms = color(`[${m.status}]`, MILESTONE_STATUS_COLOR[m.status], useColor); + lines.push(`${color(m.title, "bold", useColor)} ${ms}`); + + const milestoneFeatures = features.filter((f) => f.milestoneId === m.id); + if (milestoneFeatures.length === 0) { + lines.push(color(" (no features)", "gray", useColor)); + lines.push(""); + continue; + } + + const rows = milestoneFeatures.map((f) => ({ + slug: f.slug, + title: f.title, + status: color(f.status, FEATURE_STATUS_COLOR[f.status], useColor), + branch: f.branch ?? color("—", "gray", useColor), + fixers: String(f.fixerAttempts), + lastRun: f.lastRunAt ? formatRelative(now - f.lastRunAt) : color("—", "gray", useColor), + })); + + const widths = { + slug: Math.max(4, ...rows.map((r) => visibleLength(r.slug))), + title: Math.max(5, ...rows.map((r) => visibleLength(r.title))), + status: Math.max(6, ...rows.map((r) => visibleLength(r.status))), + branch: Math.max(6, ...rows.map((r) => visibleLength(r.branch))), + fixers: Math.max(3, ...rows.map((r) => visibleLength(r.fixers))), + lastRun: Math.max(8, ...rows.map((r) => visibleLength(r.lastRun))), + }; + + const header = [ + pad(color("slug", "bold", useColor), widths.slug), + pad(color("title", "bold", useColor), widths.title), + pad(color("status", "bold", useColor), widths.status), + pad(color("branch", "bold", useColor), widths.branch), + pad(color("fix", "bold", useColor), widths.fixers), + pad(color("last run", "bold", useColor), widths.lastRun), + ].join(" "); + lines.push(` ${header}`); + + for (const r of rows) { + lines.push( + ` ${[ + pad(r.slug, widths.slug), + pad(r.title, widths.title), + pad(r.status, widths.status), + pad(r.branch, widths.branch), + pad(r.fixers, widths.fixers), + pad(r.lastRun, widths.lastRun), + ].join(" ")}`, + ); + } + lines.push(""); + } + + return lines.join("\n").trimEnd(); +} diff --git a/.sandcastle/review-prompt.md b/.sandcastle/review-prompt.md deleted file mode 100644 index 2e2c75c..0000000 --- a/.sandcastle/review-prompt.md +++ /dev/null @@ -1,82 +0,0 @@ -# TASK - -Review the code changes on branch {{BRANCH}} for issue #{{ISSUE_NUMBER}}: {{ISSUE_TITLE}} - -You are an expert code reviewer focused on enhancing code clarity, consistency, and maintainability while preserving exact functionality. - -# CONTEXT - - - -!`gh issue view {{ISSUE_NUMBER}} --json number,title,body,labels --jq '.'` - - - - - -!`git diff main..HEAD --stat` - - - -Read the full diff with `git diff main..HEAD` (or per-file `git diff main..HEAD -- `) when you start. Pull issue comments with `gh issue view {{ISSUE_NUMBER}} --json comments` only if you need them. - -# REVIEW PROCESS - -## 1. Read the diff and look for anything dodgy - -Read the diff carefully. For anything that looks suspicious — fragile logic, unchecked assumptions, tricky conditions, implicit type coercions, missing guards — write a test that exercises it. Try to actually break it. If you can break it, fix it. - -## 2. Stress-test edge cases - -Go beyond the happy path. For every changed code path, think about what inputs or states could cause problems: - -- Empty arrays, empty strings, zero, negative numbers -- Missing optional fields, null values, undefined properties -- Rapid repeated calls, race conditions, state that changes mid-operation -- Off-by-one errors in loops or slice/substring operations -- Regressions in adjacent functionality - -Write tests for anything that isn't already covered. - -## 3. Analyze for code quality improvements - -Look for opportunities to: - -- Reduce unnecessary complexity and nesting -- Eliminate redundant code and abstractions -- Improve readability through clear variable and function names -- Consolidate related logic -- Remove unnecessary comments that describe obvious code -- Avoid nested ternary operators - prefer switch statements or if/else chains -- Choose clarity over brevity - explicit code is often better than overly compact code - -## 4. Maintain balance - -Avoid over-simplification that could: - -- Reduce code clarity or maintainability -- Create overly clever solutions that are hard to understand -- Combine too many concerns into single functions or components -- Remove helpful abstractions that improve code organization -- Make the code harder to debug or extend - -## 5. Apply project standards - -Follow the established coding standards in the project at @.sandcastle/CODING_STANDARDS.md. - -## 6. Preserve functionality - -Never change what the code does - only how it does it. All original features, outputs, and behaviors must remain intact. - -# EXECUTION - -1. Run `bun run ci` first to confirm the current state passes. When any check fails, read the **full error output** before re-running. Do not pipe to `head` / `tail` / narrow `grep` on the first failure — you will miss the actual error and re-run unnecessarily. Once you have the error, fix it and verify with one re-run. **Never run the same command twice with identical args** — if it appears to have failed, the answer is in the output you already have. `cd /home/agent/workspace && bun run ci` is the same command as `bun run ci` when you are already in `/home/agent/workspace`; don't re-run with a redundant `cd` prefix. -2. Attempt to reproduce the original bug with new test cases — if you can, fix it -3. Write edge case tests that stress the implementation -4. Make any code quality improvements directly on this branch -5. Run `bun run ci` again to ensure nothing is broken -6. Commit with a message starting with `REVIEW:` describing the refinements - -If the code is already clean, well-tested, and handles edge cases properly, do nothing. - -Once complete, output COMPLETE. diff --git a/apps/missions/.gitignore b/apps/missions/.gitignore new file mode 100644 index 0000000..8c5fbb9 --- /dev/null +++ b/apps/missions/.gitignore @@ -0,0 +1,2 @@ + +.env.local diff --git a/apps/missions/README.md b/apps/missions/README.md new file mode 100644 index 0000000..2345b14 --- /dev/null +++ b/apps/missions/README.md @@ -0,0 +1,39 @@ +# @contexture/missions + +Convex deployment for the Missions orchestrator. v1 runs `convex --local` (no cloud account needed); the schema is identical when promoted to cloud later. + +## Local dev + +```bash +bun run dev +``` + +This starts `convex dev --local`, which: + +- Downloads a Convex backend binary the first time +- Configures a local deployment at `http://127.0.0.1:3210` +- Writes `.env.local` (gitignored) with `CONVEX_DEPLOYMENT`, `CONVEX_URL`, `CONVEX_SITE_URL` +- Watches `convex/` and reapplies schema + functions on save +- Generates types in `convex/_generated/` + +The orchestrator (`.sandcastle/missions/orchestrator.ts`) reads `CONVEX_URL` from `.sandcastle/.env`. For local dev, copy the value from `apps/missions/.env.local`. + +## Tests + +```bash +bun run test +``` + +Uses [`convex-test`](https://www.npmjs.com/package/convex-test) — runs an in-memory mock of the Convex runtime; no live deployment needed. + +## Schema + +| Table | Purpose | +|---|---| +| `missions` | One row per mission (slug, title, objective, status) | +| `milestones` | Ordered list per mission, with success criteria + validation prompt | +| `features` | The unit of agent work. Status, deps, owned paths, branch, PR url | +| `runs` | Per-agent-invocation record (worker, reviewer, fixer, validator, replanner) | +| `events` | Append-only history of state transitions | + +Every status-changing mutation also writes an `events` row so the audit trail is canonical. diff --git a/apps/missions/convex/_generated/api.d.ts b/apps/missions/convex/_generated/api.d.ts new file mode 100644 index 0000000..9fd8021 --- /dev/null +++ b/apps/missions/convex/_generated/api.d.ts @@ -0,0 +1,57 @@ +/* eslint-disable */ +/** + * Generated `api` utility. + * + * THIS CODE IS AUTOMATICALLY GENERATED. + * + * To regenerate, run `npx convex dev`. + * @module + */ + +import type * as events from "../events.js"; +import type * as features from "../features.js"; +import type * as milestones from "../milestones.js"; +import type * as missions from "../missions.js"; +import type * as runs from "../runs.js"; + +import type { + ApiFromModules, + FilterApi, + FunctionReference, +} from "convex/server"; + +declare const fullApi: ApiFromModules<{ + events: typeof events; + features: typeof features; + milestones: typeof milestones; + missions: typeof missions; + runs: typeof runs; +}>; + +/** + * A utility for referencing Convex functions in your app's public API. + * + * Usage: + * ```js + * const myFunctionReference = api.myModule.myFunction; + * ``` + */ +export declare const api: FilterApi< + typeof fullApi, + FunctionReference +>; + +/** + * A utility for referencing Convex functions in your app's internal API. + * + * Usage: + * ```js + * const myFunctionReference = internal.myModule.myFunction; + * ``` + */ +export declare const internal: FilterApi< + typeof fullApi, + FunctionReference +>; + +export declare const components: {}; diff --git a/apps/missions/convex/_generated/api.js b/apps/missions/convex/_generated/api.js new file mode 100644 index 0000000..44bf985 --- /dev/null +++ b/apps/missions/convex/_generated/api.js @@ -0,0 +1,23 @@ +/* eslint-disable */ +/** + * Generated `api` utility. + * + * THIS CODE IS AUTOMATICALLY GENERATED. + * + * To regenerate, run `npx convex dev`. + * @module + */ + +import { anyApi, componentsGeneric } from "convex/server"; + +/** + * A utility for referencing Convex functions in your app's API. + * + * Usage: + * ```js + * const myFunctionReference = api.myModule.myFunction; + * ``` + */ +export const api = anyApi; +export const internal = anyApi; +export const components = componentsGeneric(); diff --git a/apps/missions/convex/_generated/dataModel.d.ts b/apps/missions/convex/_generated/dataModel.d.ts new file mode 100644 index 0000000..f97fd19 --- /dev/null +++ b/apps/missions/convex/_generated/dataModel.d.ts @@ -0,0 +1,60 @@ +/* eslint-disable */ +/** + * Generated data model types. + * + * THIS CODE IS AUTOMATICALLY GENERATED. + * + * To regenerate, run `npx convex dev`. + * @module + */ + +import type { + DataModelFromSchemaDefinition, + DocumentByName, + TableNamesInDataModel, + SystemTableNames, +} from "convex/server"; +import type { GenericId } from "convex/values"; +import schema from "../schema.js"; + +/** + * The names of all of your Convex tables. + */ +export type TableNames = TableNamesInDataModel; + +/** + * The type of a document stored in Convex. + * + * @typeParam TableName - A string literal type of the table name (like "users"). + */ +export type Doc = DocumentByName< + DataModel, + TableName +>; + +/** + * An identifier for a document in Convex. + * + * Convex documents are uniquely identified by their `Id`, which is accessible + * on the `_id` field. To learn more, see [Document IDs](https://docs.convex.dev/using/document-ids). + * + * Documents can be loaded using `db.get(tableName, id)` in query and mutation functions. + * + * IDs are just strings at runtime, but this type can be used to distinguish them from other + * strings when type checking. + * + * @typeParam TableName - A string literal type of the table name (like "users"). + */ +export type Id = + GenericId; + +/** + * A type describing your Convex data model. + * + * This type includes information about what tables you have, the type of + * documents stored in those tables, and the indexes defined on them. + * + * This type is used to parameterize methods like `queryGeneric` and + * `mutationGeneric` to make them type-safe. + */ +export type DataModel = DataModelFromSchemaDefinition; diff --git a/apps/missions/convex/_generated/server.d.ts b/apps/missions/convex/_generated/server.d.ts new file mode 100644 index 0000000..bec05e6 --- /dev/null +++ b/apps/missions/convex/_generated/server.d.ts @@ -0,0 +1,143 @@ +/* eslint-disable */ +/** + * Generated utilities for implementing server-side Convex query and mutation functions. + * + * THIS CODE IS AUTOMATICALLY GENERATED. + * + * To regenerate, run `npx convex dev`. + * @module + */ + +import { + ActionBuilder, + HttpActionBuilder, + MutationBuilder, + QueryBuilder, + GenericActionCtx, + GenericMutationCtx, + GenericQueryCtx, + GenericDatabaseReader, + GenericDatabaseWriter, +} from "convex/server"; +import type { DataModel } from "./dataModel.js"; + +/** + * Define a query in this Convex app's public API. + * + * This function will be allowed to read your Convex database and will be accessible from the client. + * + * @param func - The query function. It receives a {@link QueryCtx} as its first argument. + * @returns The wrapped query. Include this as an `export` to name it and make it accessible. + */ +export declare const query: QueryBuilder; + +/** + * Define a query that is only accessible from other Convex functions (but not from the client). + * + * This function will be allowed to read from your Convex database. It will not be accessible from the client. + * + * @param func - The query function. It receives a {@link QueryCtx} as its first argument. + * @returns The wrapped query. Include this as an `export` to name it and make it accessible. + */ +export declare const internalQuery: QueryBuilder; + +/** + * Define a mutation in this Convex app's public API. + * + * This function will be allowed to modify your Convex database and will be accessible from the client. + * + * @param func - The mutation function. It receives a {@link MutationCtx} as its first argument. + * @returns The wrapped mutation. Include this as an `export` to name it and make it accessible. + */ +export declare const mutation: MutationBuilder; + +/** + * Define a mutation that is only accessible from other Convex functions (but not from the client). + * + * This function will be allowed to modify your Convex database. It will not be accessible from the client. + * + * @param func - The mutation function. It receives a {@link MutationCtx} as its first argument. + * @returns The wrapped mutation. Include this as an `export` to name it and make it accessible. + */ +export declare const internalMutation: MutationBuilder; + +/** + * Define an action in this Convex app's public API. + * + * An action is a function which can execute any JavaScript code, including non-deterministic + * code and code with side-effects, like calling third-party services. + * They can be run in Convex's JavaScript environment or in Node.js using the "use node" directive. + * They can interact with the database indirectly by calling queries and mutations using the {@link ActionCtx}. + * + * @param func - The action. It receives an {@link ActionCtx} as its first argument. + * @returns The wrapped action. Include this as an `export` to name it and make it accessible. + */ +export declare const action: ActionBuilder; + +/** + * Define an action that is only accessible from other Convex functions (but not from the client). + * + * @param func - The function. It receives an {@link ActionCtx} as its first argument. + * @returns The wrapped function. Include this as an `export` to name it and make it accessible. + */ +export declare const internalAction: ActionBuilder; + +/** + * Define an HTTP action. + * + * The wrapped function will be used to respond to HTTP requests received + * by a Convex deployment if the requests matches the path and method where + * this action is routed. Be sure to route your httpAction in `convex/http.js`. + * + * @param func - The function. It receives an {@link ActionCtx} as its first argument + * and a Fetch API `Request` object as its second. + * @returns The wrapped function. Import this function from `convex/http.js` and route it to hook it up. + */ +export declare const httpAction: HttpActionBuilder; + +/** + * A set of services for use within Convex query functions. + * + * The query context is passed as the first argument to any Convex query + * function run on the server. + * + * This differs from the {@link MutationCtx} because all of the services are + * read-only. + */ +export type QueryCtx = GenericQueryCtx; + +/** + * A set of services for use within Convex mutation functions. + * + * The mutation context is passed as the first argument to any Convex mutation + * function run on the server. + */ +export type MutationCtx = GenericMutationCtx; + +/** + * A set of services for use within Convex action functions. + * + * The action context is passed as the first argument to any Convex action + * function run on the server. + */ +export type ActionCtx = GenericActionCtx; + +/** + * An interface to read from the database within Convex query functions. + * + * The two entry points are {@link DatabaseReader.get}, which fetches a single + * document by its {@link Id}, or {@link DatabaseReader.query}, which starts + * building a query. + */ +export type DatabaseReader = GenericDatabaseReader; + +/** + * An interface to read from and write to the database within Convex mutation + * functions. + * + * Convex guarantees that all writes within a single mutation are + * executed atomically, so you never have to worry about partial writes leaving + * your data in an inconsistent state. See [the Convex Guide](https://docs.convex.dev/understanding/convex-fundamentals/functions#atomicity-and-optimistic-concurrency-control) + * for the guarantees Convex provides your functions. + */ +export type DatabaseWriter = GenericDatabaseWriter; diff --git a/apps/missions/convex/_generated/server.js b/apps/missions/convex/_generated/server.js new file mode 100644 index 0000000..bf3d25a --- /dev/null +++ b/apps/missions/convex/_generated/server.js @@ -0,0 +1,93 @@ +/* eslint-disable */ +/** + * Generated utilities for implementing server-side Convex query and mutation functions. + * + * THIS CODE IS AUTOMATICALLY GENERATED. + * + * To regenerate, run `npx convex dev`. + * @module + */ + +import { + actionGeneric, + httpActionGeneric, + queryGeneric, + mutationGeneric, + internalActionGeneric, + internalMutationGeneric, + internalQueryGeneric, +} from "convex/server"; + +/** + * Define a query in this Convex app's public API. + * + * This function will be allowed to read your Convex database and will be accessible from the client. + * + * @param func - The query function. It receives a {@link QueryCtx} as its first argument. + * @returns The wrapped query. Include this as an `export` to name it and make it accessible. + */ +export const query = queryGeneric; + +/** + * Define a query that is only accessible from other Convex functions (but not from the client). + * + * This function will be allowed to read from your Convex database. It will not be accessible from the client. + * + * @param func - The query function. It receives a {@link QueryCtx} as its first argument. + * @returns The wrapped query. Include this as an `export` to name it and make it accessible. + */ +export const internalQuery = internalQueryGeneric; + +/** + * Define a mutation in this Convex app's public API. + * + * This function will be allowed to modify your Convex database and will be accessible from the client. + * + * @param func - The mutation function. It receives a {@link MutationCtx} as its first argument. + * @returns The wrapped mutation. Include this as an `export` to name it and make it accessible. + */ +export const mutation = mutationGeneric; + +/** + * Define a mutation that is only accessible from other Convex functions (but not from the client). + * + * This function will be allowed to modify your Convex database. It will not be accessible from the client. + * + * @param func - The mutation function. It receives a {@link MutationCtx} as its first argument. + * @returns The wrapped mutation. Include this as an `export` to name it and make it accessible. + */ +export const internalMutation = internalMutationGeneric; + +/** + * Define an action in this Convex app's public API. + * + * An action is a function which can execute any JavaScript code, including non-deterministic + * code and code with side-effects, like calling third-party services. + * They can be run in Convex's JavaScript environment or in Node.js using the "use node" directive. + * They can interact with the database indirectly by calling queries and mutations using the {@link ActionCtx}. + * + * @param func - The action. It receives an {@link ActionCtx} as its first argument. + * @returns The wrapped action. Include this as an `export` to name it and make it accessible. + */ +export const action = actionGeneric; + +/** + * Define an action that is only accessible from other Convex functions (but not from the client). + * + * @param func - The function. It receives an {@link ActionCtx} as its first argument. + * @returns The wrapped function. Include this as an `export` to name it and make it accessible. + */ +export const internalAction = internalActionGeneric; + +/** + * Define an HTTP action. + * + * The wrapped function will be used to respond to HTTP requests received + * by a Convex deployment if the requests matches the path and method where + * this action is routed. Be sure to route your httpAction in `convex/http.js`. + * + * @param func - The function. It receives an {@link ActionCtx} as its first argument + * and a Fetch API `Request` object as its second. + * @returns The wrapped function. Import this function from `convex/http.js` and route it to hook it up. + */ +export const httpAction = httpActionGeneric; diff --git a/apps/missions/convex/events.ts b/apps/missions/convex/events.ts new file mode 100644 index 0000000..7adb732 --- /dev/null +++ b/apps/missions/convex/events.ts @@ -0,0 +1,31 @@ +import { v } from 'convex/values'; +import { mutation, query } from './_generated/server'; + +export const listByMission = query({ + args: { + missionId: v.id('missions'), + limit: v.optional(v.number()), + }, + handler: async (ctx, { missionId, limit }) => { + return await ctx.db + .query('events') + .withIndex('by_mission', (q) => q.eq('missionId', missionId)) + .order('desc') + .take(limit ?? 100); + }, +}); + +export const append = mutation({ + args: { + missionId: v.id('missions'), + actor: v.union(v.literal('orchestrator'), v.literal('dashboard'), v.literal('user')), + kind: v.string(), + detail: v.any(), + }, + handler: async (ctx, args) => { + return await ctx.db.insert('events', { + ...args, + at: Date.now(), + }); + }, +}); diff --git a/apps/missions/convex/features.test.ts b/apps/missions/convex/features.test.ts new file mode 100644 index 0000000..c367e1e --- /dev/null +++ b/apps/missions/convex/features.test.ts @@ -0,0 +1,94 @@ +import { convexTest } from 'convex-test'; +import { describe, expect, it } from 'vitest'; +import { api } from './_generated/api'; +import schema from './schema'; + +const modules = import.meta.glob('./**/*.*s'); + +const seedPlan = { + slug: 'feat-test', + title: 'Feat Test', + objective: 'Test feature mutations', + milestones: [ + { + slug: 'm1', + title: 'M1', + successCriteria: ['ok'], + validationPrompt: 'check', + features: [ + { + slug: 'f1', + title: 'F1', + prompt: '...', + dependencies: [] as string[], + pathsOwned: [] as string[], + preferredAgent: 'claude' as const, + skillRefs: [] as string[], + }, + ], + }, + ], +}; + +describe('features.setStatus', () => { + it('updates status, branch, and PR url; appends an event', async () => { + const t = convexTest(schema, modules); + await t.mutation(api.missions.create, seedPlan); + const result = await t.query(api.missions.getWithChildren, { + slug: 'feat-test', + }); + if (!result) throw new Error('mission not found'); + const f1 = result.features[0]; + + await t.mutation(api.features.setStatus, { + featureId: f1._id, + status: 'running', + branch: 'mission/feat-test/f1', + }); + + const refreshed = await t.query(api.features.listByMission, { + missionId: result.mission._id, + }); + expect(refreshed[0].status).toBe('running'); + expect(refreshed[0].branch).toBe('mission/feat-test/f1'); + + const events = await t.query(api.events.listByMission, { + missionId: result.mission._id, + }); + expect(events.some((e) => e.kind === 'feature.statusChanged')).toBe(true); + }); +}); + +describe('features.replanMilestone', () => { + it('inserts new features into the milestone with status=todo', async () => { + const t = convexTest(schema, modules); + await t.mutation(api.missions.create, seedPlan); + const result = await t.query(api.missions.getWithChildren, { + slug: 'feat-test', + }); + if (!result) throw new Error('mission not found'); + + const milestoneId = result.milestones[0]._id; + const newIds = await t.mutation(api.features.replanMilestone, { + milestoneId, + newFeatures: [ + { + slug: 'fix-1', + title: 'Fix 1', + prompt: 'fix the thing', + dependencies: [], + pathsOwned: [], + preferredAgent: 'claude', + skillRefs: [], + }, + ], + }); + expect(newIds).toHaveLength(1); + + const features = await t.query(api.features.listByMilestone, { milestoneId }); + expect(features).toHaveLength(2); + const fix = features.find((f) => f.slug === 'fix-1'); + expect(fix?.status).toBe('todo'); + expect(fix?.fixerAttempts).toBe(0); + }); +}); diff --git a/apps/missions/convex/features.ts b/apps/missions/convex/features.ts new file mode 100644 index 0000000..d03692e --- /dev/null +++ b/apps/missions/convex/features.ts @@ -0,0 +1,130 @@ +import { v } from 'convex/values'; +import { mutation, query } from './_generated/server'; +import { featureStatus } from './schema'; + +export const listByMission = query({ + args: { missionId: v.id('missions') }, + handler: async (ctx, { missionId }) => { + return await ctx.db + .query('features') + .withIndex('by_mission', (q) => q.eq('missionId', missionId)) + .collect(); + }, +}); + +export const listByMilestone = query({ + args: { milestoneId: v.id('milestones') }, + handler: async (ctx, { milestoneId }) => { + return await ctx.db + .query('features') + .withIndex('by_milestone', (q) => q.eq('milestoneId', milestoneId)) + .collect(); + }, +}); + +export const setStatus = mutation({ + args: { + featureId: v.id('features'), + status: featureStatus, + branch: v.optional(v.string()), + pullRequestUrl: v.optional(v.string()), + }, + handler: async (ctx, { featureId, status, branch, pullRequestUrl }) => { + const feature = await ctx.db.get(featureId); + if (!feature) throw new Error(`Feature not found: ${featureId}`); + + const patch: { + status: typeof status; + branch?: string; + pullRequestUrl?: string; + } = { status }; + if (branch !== undefined) patch.branch = branch; + if (pullRequestUrl !== undefined) patch.pullRequestUrl = pullRequestUrl; + await ctx.db.patch(featureId, patch); + + await ctx.db.insert('events', { + missionId: feature.missionId, + at: Date.now(), + actor: 'orchestrator', + kind: 'feature.statusChanged', + detail: { featureId, from: feature.status, to: status }, + }); + }, +}); + +export const setReviewVerdict = mutation({ + args: { + featureId: v.id('features'), + verdict: v.union(v.literal('approved'), v.literal('changes_requested')), + }, + handler: async (ctx, { featureId, verdict }) => { + const feature = await ctx.db.get(featureId); + if (!feature) throw new Error(`Feature not found: ${featureId}`); + await ctx.db.patch(featureId, { reviewVerdict: verdict }); + await ctx.db.insert('events', { + missionId: feature.missionId, + at: Date.now(), + actor: 'orchestrator', + kind: 'feature.reviewed', + detail: { featureId, verdict }, + }); + }, +}); + +export const incrementFixerAttempts = mutation({ + args: { featureId: v.id('features') }, + handler: async (ctx, { featureId }) => { + const feature = await ctx.db.get(featureId); + if (!feature) throw new Error(`Feature not found: ${featureId}`); + await ctx.db.patch(featureId, { fixerAttempts: feature.fixerAttempts + 1 }); + }, +}); + +export const replanMilestone = mutation({ + args: { + milestoneId: v.id('milestones'), + newFeatures: v.array( + v.object({ + slug: v.string(), + title: v.string(), + prompt: v.string(), + dependencies: v.array(v.id('features')), + pathsOwned: v.array(v.string()), + preferredAgent: v.union(v.literal('claude'), v.literal('codex')), + skillRefs: v.array(v.string()), + }), + ), + }, + handler: async (ctx, { milestoneId, newFeatures }) => { + const milestone = await ctx.db.get(milestoneId); + if (!milestone) throw new Error(`Milestone not found: ${milestoneId}`); + + const inserted: string[] = []; + for (const f of newFeatures) { + const id = await ctx.db.insert('features', { + missionId: milestone.missionId, + milestoneId, + slug: f.slug, + title: f.title, + prompt: f.prompt, + dependencies: f.dependencies, + pathsOwned: f.pathsOwned, + preferredAgent: f.preferredAgent, + skillRefs: f.skillRefs, + status: 'todo', + fixerAttempts: 0, + }); + inserted.push(id); + } + + await ctx.db.insert('events', { + missionId: milestone.missionId, + at: Date.now(), + actor: 'orchestrator', + kind: 'milestone.replanned', + detail: { milestoneId, addedFeatureIds: inserted }, + }); + + return inserted; + }, +}); diff --git a/apps/missions/convex/milestones.ts b/apps/missions/convex/milestones.ts new file mode 100644 index 0000000..16a648e --- /dev/null +++ b/apps/missions/convex/milestones.ts @@ -0,0 +1,32 @@ +import { v } from 'convex/values'; +import { mutation, query } from './_generated/server'; +import { milestoneStatus } from './schema'; + +export const listByMission = query({ + args: { missionId: v.id('missions') }, + handler: async (ctx, { missionId }) => { + const milestones = await ctx.db + .query('milestones') + .withIndex('by_mission', (q) => q.eq('missionId', missionId)) + .collect(); + return milestones.sort((a, b) => a.order - b.order); + }, +}); + +export const setStatus = mutation({ + args: { milestoneId: v.id('milestones'), status: milestoneStatus }, + handler: async (ctx, { milestoneId, status }) => { + const milestone = await ctx.db.get(milestoneId); + if (!milestone) throw new Error(`Milestone not found: ${milestoneId}`); + + const now = Date.now(); + await ctx.db.patch(milestoneId, { status }); + await ctx.db.insert('events', { + missionId: milestone.missionId, + at: now, + actor: 'orchestrator', + kind: 'milestone.statusChanged', + detail: { milestoneId, from: milestone.status, to: status }, + }); + }, +}); diff --git a/apps/missions/convex/missions.test.ts b/apps/missions/convex/missions.test.ts new file mode 100644 index 0000000..30f96d6 --- /dev/null +++ b/apps/missions/convex/missions.test.ts @@ -0,0 +1,117 @@ +import { convexTest } from 'convex-test'; +import { describe, expect, it } from 'vitest'; +import { api } from './_generated/api'; +import schema from './schema'; + +const modules = import.meta.glob('./**/*.*s'); + +const samplePlan = { + slug: 'sample-mission', + title: 'Sample', + objective: 'Demonstrate things', + milestones: [ + { + slug: 'm1', + title: 'Milestone 1', + successCriteria: ['Criterion A'], + validationPrompt: 'Check things', + features: [ + { + slug: 'f1', + title: 'Feature 1', + prompt: 'Do thing 1', + dependencies: [] as string[], + pathsOwned: ['src/foo/**'], + preferredAgent: 'claude' as const, + skillRefs: [], + }, + { + slug: 'f2', + title: 'Feature 2', + prompt: 'Do thing 2', + dependencies: ['f1'], + pathsOwned: ['src/bar/**'], + preferredAgent: 'claude' as const, + skillRefs: [], + }, + ], + }, + ], +}; + +describe('missions.create', () => { + it('inserts mission with milestones and features, resolving slug deps to ids', async () => { + const t = convexTest(schema, modules); + await t.mutation(api.missions.create, samplePlan); + + const result = await t.query(api.missions.getWithChildren, { + slug: 'sample-mission', + }); + expect(result).not.toBeNull(); + if (!result) throw new Error('mission not found'); + + expect(result.mission.status).toBe('planning'); + expect(result.milestones).toHaveLength(1); + expect(result.features).toHaveLength(2); + + const f1 = result.features.find((f) => f.slug === 'f1'); + const f2 = result.features.find((f) => f.slug === 'f2'); + if (!f1 || !f2) throw new Error('features missing'); + + expect(f1.dependencies).toEqual([]); + expect(f2.dependencies).toEqual([f1._id]); + }); + + it('rejects duplicate mission slug', async () => { + const t = convexTest(schema, modules); + await t.mutation(api.missions.create, samplePlan); + await expect(t.mutation(api.missions.create, samplePlan)).rejects.toThrow(/already exists/); + }); + + it('rejects unknown dependency slug', async () => { + const t = convexTest(schema, modules); + const broken = { + ...samplePlan, + slug: 'broken', + milestones: [ + { + ...samplePlan.milestones[0], + features: [ + { + ...samplePlan.milestones[0].features[0], + dependencies: ['ghost'], + }, + ], + }, + ], + }; + await expect(t.mutation(api.missions.create, broken)).rejects.toThrow(/unknown feature: ghost/); + }); +}); + +describe('missions.pause / resume', () => { + it('pauses a running mission and resumes it', async () => { + const t = convexTest(schema, modules); + await t.mutation(api.missions.create, samplePlan); + await t.mutation(api.missions.setStatus, { + slug: 'sample-mission', + status: 'running', + }); + + await t.mutation(api.missions.pause, { slug: 'sample-mission' }); + let m = await t.query(api.missions.getBySlug, { slug: 'sample-mission' }); + expect(m?.status).toBe('paused'); + + await t.mutation(api.missions.resume, { slug: 'sample-mission' }); + m = await t.query(api.missions.getBySlug, { slug: 'sample-mission' }); + expect(m?.status).toBe('running'); + }); + + it('rejects resume on non-paused mission', async () => { + const t = convexTest(schema, modules); + await t.mutation(api.missions.create, samplePlan); + await expect(t.mutation(api.missions.resume, { slug: 'sample-mission' })).rejects.toThrow( + /not paused/, + ); + }); +}); diff --git a/apps/missions/convex/missions.ts b/apps/missions/convex/missions.ts new file mode 100644 index 0000000..a195d39 --- /dev/null +++ b/apps/missions/convex/missions.ts @@ -0,0 +1,224 @@ +import { v } from 'convex/values'; +import type { Id } from './_generated/dataModel'; +import { mutation, query } from './_generated/server'; +import { missionStatus } from './schema'; + +export const create = mutation({ + args: { + slug: v.string(), + title: v.string(), + objective: v.string(), + milestones: v.array( + v.object({ + slug: v.string(), + title: v.string(), + successCriteria: v.array(v.string()), + validationPrompt: v.string(), + features: v.array( + v.object({ + slug: v.string(), + title: v.string(), + prompt: v.string(), + dependencies: v.array(v.string()), + pathsOwned: v.array(v.string()), + preferredAgent: v.union(v.literal('claude'), v.literal('codex')), + skillRefs: v.array(v.string()), + }), + ), + }), + ), + }, + handler: async (ctx, plan) => { + const existing = await ctx.db + .query('missions') + .withIndex('by_slug', (q) => q.eq('slug', plan.slug)) + .unique(); + if (existing) throw new Error(`Mission already exists: ${plan.slug}`); + + const now = Date.now(); + const missionId = await ctx.db.insert('missions', { + slug: plan.slug, + title: plan.title, + objective: plan.objective, + status: 'planning', + createdAt: now, + updatedAt: now, + }); + + type FeatureSpec = (typeof plan.milestones)[number]['features'][number]; + const featureSlugToId = new Map>(); + const pendingFeatures: { tempId: Id<'features'>; spec: FeatureSpec }[] = []; + + for (let i = 0; i < plan.milestones.length; i++) { + const m = plan.milestones[i]; + const milestoneId = await ctx.db.insert('milestones', { + missionId, + order: i, + title: m.title, + successCriteria: m.successCriteria, + validationPrompt: m.validationPrompt, + status: 'todo', + }); + + for (const f of m.features) { + if (featureSlugToId.has(f.slug)) { + throw new Error(`Duplicate feature slug: ${f.slug}`); + } + const featureId = await ctx.db.insert('features', { + missionId, + milestoneId, + slug: f.slug, + title: f.title, + prompt: f.prompt, + dependencies: [], + pathsOwned: f.pathsOwned, + preferredAgent: f.preferredAgent, + skillRefs: f.skillRefs, + status: 'todo', + fixerAttempts: 0, + }); + featureSlugToId.set(f.slug, featureId); + pendingFeatures.push({ tempId: featureId, spec: f }); + } + } + + for (const { tempId, spec } of pendingFeatures) { + const depIds = spec.dependencies.map((depSlug) => { + const id = featureSlugToId.get(depSlug); + if (!id) { + throw new Error(`Feature ${spec.slug} depends on unknown feature: ${depSlug}`); + } + return id; + }); + if (depIds.length > 0) { + await ctx.db.patch(tempId, { dependencies: depIds }); + } + } + + await ctx.db.insert('events', { + missionId, + at: now, + actor: 'user', + kind: 'mission.created', + detail: { + milestoneCount: plan.milestones.length, + featureCount: pendingFeatures.length, + }, + }); + + return missionId; + }, +}); + +export const list = query({ + args: {}, + handler: async (ctx) => { + return await ctx.db.query('missions').collect(); + }, +}); + +export const getBySlug = query({ + args: { slug: v.string() }, + handler: async (ctx, { slug }) => { + return await ctx.db + .query('missions') + .withIndex('by_slug', (q) => q.eq('slug', slug)) + .unique(); + }, +}); + +export const getWithChildren = query({ + args: { slug: v.string() }, + handler: async (ctx, { slug }) => { + const mission = await ctx.db + .query('missions') + .withIndex('by_slug', (q) => q.eq('slug', slug)) + .unique(); + if (!mission) return null; + + const milestones = await ctx.db + .query('milestones') + .withIndex('by_mission', (q) => q.eq('missionId', mission._id)) + .collect(); + + const features = await ctx.db + .query('features') + .withIndex('by_mission', (q) => q.eq('missionId', mission._id)) + .collect(); + + return { + mission, + milestones: milestones.sort((a, b) => a.order - b.order), + features, + }; + }, +}); + +export const setStatus = mutation({ + args: { slug: v.string(), status: missionStatus }, + handler: async (ctx, { slug, status }) => { + const mission = await ctx.db + .query('missions') + .withIndex('by_slug', (q) => q.eq('slug', slug)) + .unique(); + if (!mission) throw new Error(`Mission not found: ${slug}`); + + const now = Date.now(); + await ctx.db.patch(mission._id, { status, updatedAt: now }); + await ctx.db.insert('events', { + missionId: mission._id, + at: now, + actor: 'orchestrator', + kind: 'mission.statusChanged', + detail: { from: mission.status, to: status }, + }); + return mission._id; + }, +}); + +export const pause = mutation({ + args: { slug: v.string() }, + handler: async (ctx, { slug }) => { + const mission = await ctx.db + .query('missions') + .withIndex('by_slug', (q) => q.eq('slug', slug)) + .unique(); + if (!mission) throw new Error(`Mission not found: ${slug}`); + + const now = Date.now(); + await ctx.db.patch(mission._id, { status: 'paused', updatedAt: now }); + await ctx.db.insert('events', { + missionId: mission._id, + at: now, + actor: 'user', + kind: 'mission.paused', + detail: {}, + }); + return mission._id; + }, +}); + +export const resume = mutation({ + args: { slug: v.string() }, + handler: async (ctx, { slug }) => { + const mission = await ctx.db + .query('missions') + .withIndex('by_slug', (q) => q.eq('slug', slug)) + .unique(); + if (!mission) throw new Error(`Mission not found: ${slug}`); + if (mission.status !== 'paused') { + throw new Error(`Mission ${slug} is not paused (status: ${mission.status})`); + } + + const now = Date.now(); + await ctx.db.patch(mission._id, { status: 'running', updatedAt: now }); + await ctx.db.insert('events', { + missionId: mission._id, + at: now, + actor: 'user', + kind: 'mission.resumed', + detail: {}, + }); + return mission._id; + }, +}); diff --git a/apps/missions/convex/runs.ts b/apps/missions/convex/runs.ts new file mode 100644 index 0000000..8f74e1c --- /dev/null +++ b/apps/missions/convex/runs.ts @@ -0,0 +1,50 @@ +import { v } from 'convex/values'; +import { mutation, query } from './_generated/server'; +import { agentRole, runOutcome } from './schema'; + +export const listByFeature = query({ + args: { featureId: v.id('features') }, + handler: async (ctx, { featureId }) => { + return await ctx.db + .query('runs') + .withIndex('by_feature', (q) => q.eq('featureId', featureId)) + .collect(); + }, +}); + +export const recordStart = mutation({ + args: { + missionId: v.id('missions'), + featureId: v.optional(v.id('features')), + milestoneId: v.optional(v.id('milestones')), + role: agentRole, + agent: v.string(), + branch: v.optional(v.string()), + }, + handler: async (ctx, args) => { + return await ctx.db.insert('runs', { + ...args, + startedAt: Date.now(), + }); + }, +}); + +export const recordEnd = mutation({ + args: { + runId: v.id('runs'), + outcome: runOutcome, + logUri: v.optional(v.string()), + }, + handler: async (ctx, { runId, outcome, logUri }) => { + const run = await ctx.db.get(runId); + if (!run) throw new Error(`Run not found: ${runId}`); + + const patch: { + finishedAt: number; + outcome: typeof outcome; + logUri?: string; + } = { finishedAt: Date.now(), outcome }; + if (logUri !== undefined) patch.logUri = logUri; + await ctx.db.patch(runId, patch); + }, +}); diff --git a/apps/missions/convex/schema.ts b/apps/missions/convex/schema.ts new file mode 100644 index 0000000..505e275 --- /dev/null +++ b/apps/missions/convex/schema.ts @@ -0,0 +1,100 @@ +import { defineSchema, defineTable } from 'convex/server'; +import { v } from 'convex/values'; + +export const missionStatus = v.union( + v.literal('planning'), + v.literal('running'), + v.literal('paused'), + v.literal('done'), + v.literal('failed'), +); + +export const milestoneStatus = v.union( + v.literal('todo'), + v.literal('running'), + v.literal('validating'), + v.literal('done'), + v.literal('blocked'), +); + +export const featureStatus = v.union( + v.literal('todo'), + v.literal('planned'), + v.literal('running'), + v.literal('review'), + v.literal('blocked'), + v.literal('done'), +); + +export const agentRole = v.union( + v.literal('worker'), + v.literal('reviewer'), + v.literal('fixer'), + v.literal('validator'), + v.literal('replanner'), +); + +export const runOutcome = v.union(v.literal('success'), v.literal('failure'), v.literal('aborted')); + +export default defineSchema({ + missions: defineTable({ + slug: v.string(), + title: v.string(), + objective: v.string(), + status: missionStatus, + createdAt: v.number(), + updatedAt: v.number(), + }).index('by_slug', ['slug']), + + milestones: defineTable({ + missionId: v.id('missions'), + order: v.number(), + title: v.string(), + successCriteria: v.array(v.string()), + validationPrompt: v.string(), + status: milestoneStatus, + }).index('by_mission', ['missionId', 'order']), + + features: defineTable({ + missionId: v.id('missions'), + milestoneId: v.id('milestones'), + slug: v.string(), + title: v.string(), + prompt: v.string(), + dependencies: v.array(v.id('features')), + pathsOwned: v.array(v.string()), + preferredAgent: v.union(v.literal('claude'), v.literal('codex')), + skillRefs: v.array(v.string()), + status: featureStatus, + branch: v.optional(v.string()), + pullRequestUrl: v.optional(v.string()), + reviewVerdict: v.optional(v.union(v.literal('approved'), v.literal('changes_requested'))), + fixerAttempts: v.number(), + }) + .index('by_milestone', ['milestoneId']) + .index('by_mission', ['missionId']) + .index('by_status', ['status']), + + runs: defineTable({ + missionId: v.id('missions'), + featureId: v.optional(v.id('features')), + milestoneId: v.optional(v.id('milestones')), + role: agentRole, + agent: v.string(), + startedAt: v.number(), + finishedAt: v.optional(v.number()), + outcome: v.optional(runOutcome), + branch: v.optional(v.string()), + logUri: v.optional(v.string()), + }) + .index('by_feature', ['featureId']) + .index('by_mission', ['missionId']), + + events: defineTable({ + missionId: v.id('missions'), + at: v.number(), + actor: v.union(v.literal('orchestrator'), v.literal('dashboard'), v.literal('user')), + kind: v.string(), + detail: v.any(), + }).index('by_mission', ['missionId', 'at']), +}); diff --git a/apps/missions/convex/test-types.d.ts b/apps/missions/convex/test-types.d.ts new file mode 100644 index 0000000..9ad4d32 --- /dev/null +++ b/apps/missions/convex/test-types.d.ts @@ -0,0 +1,3 @@ +interface ImportMeta { + glob(pattern: string): Record Promise>; +} diff --git a/apps/missions/package.json b/apps/missions/package.json new file mode 100644 index 0000000..82e907f --- /dev/null +++ b/apps/missions/package.json @@ -0,0 +1,27 @@ +{ + "name": "@contexture/missions", + "version": "0.1.0", + "private": true, + "type": "module", + "exports": { + ".": "./src/index.ts", + "./convex/_generated/api": "./convex/_generated/api.js", + "./convex/_generated/dataModel": "./convex/_generated/dataModel.d.ts" + }, + "scripts": { + "dev": "convex dev --local", + "deploy": "convex deploy", + "typecheck": "tsc --noEmit", + "test": "vitest run" + }, + "dependencies": { + "convex": "^1.37.0" + }, + "devDependencies": { + "@edge-runtime/vm": "^5.0.0", + "@types/node": "^25.5.0", + "convex-test": "^0.0.51", + "typescript": "^5", + "vitest": "^3.2.1" + } +} diff --git a/apps/missions/src/index.ts b/apps/missions/src/index.ts new file mode 100644 index 0000000..a2cedbf --- /dev/null +++ b/apps/missions/src/index.ts @@ -0,0 +1,2 @@ +export { api } from '../convex/_generated/api'; +export type { Doc, Id } from '../convex/_generated/dataModel'; diff --git a/apps/missions/tsconfig.json b/apps/missions/tsconfig.json new file mode 100644 index 0000000..aefe9c1 --- /dev/null +++ b/apps/missions/tsconfig.json @@ -0,0 +1,16 @@ +{ + "compilerOptions": { + "target": "esnext", + "module": "esnext", + "moduleResolution": "bundler", + "esModuleInterop": true, + "skipLibCheck": true, + "strict": true, + "noEmit": true, + "jsx": "preserve", + "lib": ["esnext", "dom"], + "types": ["node"] + }, + "include": ["convex/**/*", "src/**/*", "vitest.config.ts"], + "exclude": ["convex/_generated"] +} diff --git a/apps/missions/vitest.config.ts b/apps/missions/vitest.config.ts new file mode 100644 index 0000000..962a191 --- /dev/null +++ b/apps/missions/vitest.config.ts @@ -0,0 +1,8 @@ +import { defineConfig } from 'vitest/config'; + +export default defineConfig({ + test: { + environment: 'edge-runtime', + server: { deps: { inline: ['convex-test'] } }, + }, +}); diff --git a/biome.json b/biome.json index 4bc9b4e..f1795ac 100644 --- a/biome.json +++ b/biome.json @@ -15,7 +15,9 @@ "**/*.json", "**/*.jsonc", "!.sandcastle", - "!.claude/settings.local.json" + "!.claude/settings.local.json", + "!apps/missions/convex/_generated", + "!apps/missions/.env.local" ] }, "formatter": { diff --git a/bun.lock b/bun.lock index 41a4048..60f61dc 100644 --- a/bun.lock +++ b/bun.lock @@ -7,15 +7,20 @@ "dependencies": { "@ai-hero/sandcastle": "^0.5.5", "@anthropic-ai/sdk": "^0.91.1", + "@contexture/missions": "workspace:*", "@sindresorhus/slugify": "^3.0.0", + "convex": "^1.37.0", + "picomatch": "^4.0.4", "zod": "^4.3.6", }, "devDependencies": { "@biomejs/biome": "^2.4.9", "@types/bun": "^1.3.13", + "@types/picomatch": "^4.0.2", "husky": "^9.1.7", "lint-staged": "^16.4.0", "turbo": "^2.9.7", + "typescript": "^5.9.3", }, }, "apps/desktop": { @@ -90,6 +95,20 @@ "vitest": "^3.2.1", }, }, + "apps/missions": { + "name": "@contexture/missions", + "version": "0.1.0", + "dependencies": { + "convex": "^1.37.0", + }, + "devDependencies": { + "@edge-runtime/vm": "^5.0.0", + "@types/node": "^25.5.0", + "convex-test": "^0.0.51", + "typescript": "^5", + "vitest": "^3.2.1", + }, + }, "apps/web": { "name": "@contexture/web", "version": "0.1.0", @@ -321,6 +340,8 @@ "@contexture/desktop": ["@contexture/desktop@workspace:apps/desktop"], + "@contexture/missions": ["@contexture/missions@workspace:apps/missions"], + "@contexture/runtime": ["@contexture/runtime@workspace:packages/runtime"], "@contexture/stdlib": ["@contexture/stdlib@workspace:packages/stdlib"], @@ -347,6 +368,10 @@ "@ecies/ciphers": ["@ecies/ciphers@0.2.5", "", { "peerDependencies": { "@noble/ciphers": "^1.0.0" } }, "sha512-GalEZH4JgOMHYYcYmVqnFirFsjZHeoGMDt9IxEnM9F7GRUUyUksJ7Ou53L83WHJq3RWKD3AcBpo0iQh0oMpf8A=="], + "@edge-runtime/primitives": ["@edge-runtime/primitives@6.0.0", "", {}, "sha512-FqoxaBT+prPBHBwE1WXS1ocnu/VLTQyZ6NMUBAdbP7N2hsFTTxMC/jMu2D/8GAlMQfxeuppcPuCUk/HO3fpIvA=="], + + "@edge-runtime/vm": ["@edge-runtime/vm@5.0.0", "", { "dependencies": { "@edge-runtime/primitives": "6.0.0" } }, "sha512-NKBGBSIKUG584qrS1tyxVpX/AKJKQw5HgjYEnPLC0QsTw79JrGn+qUr8CXFb955Iy7GUdiiUv1rJ6JBGvaKb6w=="], + "@effect/cli": ["@effect/cli@0.74.0", "", { "dependencies": { "ini": "^4.1.3", "toml": "^3.0.0", "yaml": "^2.5.0" }, "peerDependencies": { "@effect/platform": "^0.95.0", "@effect/printer": "^0.48.0", "@effect/printer-ansi": "^0.48.0", "effect": "^3.20.0" } }, "sha512-vjMJWJWQ2zMRVcZJj2ZGr7vFgVoX6lsCuqAsNiN2ndWZAidkEJ6g1Euuib2V2nTXeWvRyd3FY2Fw2UvX48Uenw=="], "@effect/cluster": ["@effect/cluster@0.57.0", "", { "dependencies": { "kubernetes-types": "^1.30.0" }, "peerDependencies": { "@effect/platform": "^0.95.0", "@effect/rpc": "^0.74.0", "@effect/sql": "^0.50.0", "@effect/workflow": "^0.17.0", "effect": "^3.20.0" } }, "sha512-VjZoZ4hmgDb0GtGjktypTk/nArA3ntsXU2O9vOBzDjJLRKVBt7IS0/cllHrHwK5Jxkfz86B2k+Prw4/+nrLFlw=="], @@ -395,57 +420,57 @@ "@emnapi/runtime": ["@emnapi/runtime@1.9.1", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-VYi5+ZVLhpgK4hQ0TAjiQiZ6ol0oe4mBx7mVv7IflsiEp0OWoVsp/+f9Vc1hOhE0TtkORVrI1GvzyreqpgWtkA=="], - "@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.25.12", "", { "os": "aix", "cpu": "ppc64" }, "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA=="], + "@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.0", "", { "os": "aix", "cpu": "ppc64" }, "sha512-KuZrd2hRjz01y5JK9mEBSD3Vj3mbCvemhT466rSuJYeE/hjuBrHfjjcjMdTm/sz7au+++sdbJZJmuBwQLuw68A=="], - "@esbuild/android-arm": ["@esbuild/android-arm@0.25.12", "", { "os": "android", "cpu": "arm" }, "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg=="], + "@esbuild/android-arm": ["@esbuild/android-arm@0.27.0", "", { "os": "android", "cpu": "arm" }, "sha512-j67aezrPNYWJEOHUNLPj9maeJte7uSMM6gMoxfPC9hOg8N02JuQi/T7ewumf4tNvJadFkvLZMlAq73b9uwdMyQ=="], - "@esbuild/android-arm64": ["@esbuild/android-arm64@0.25.12", "", { "os": "android", "cpu": "arm64" }, "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg=="], + "@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.0", "", { "os": "android", "cpu": "arm64" }, "sha512-CC3vt4+1xZrs97/PKDkl0yN7w8edvU2vZvAFGD16n9F0Cvniy5qvzRXjfO1l94efczkkQE6g1x0i73Qf5uthOQ=="], - "@esbuild/android-x64": ["@esbuild/android-x64@0.25.12", "", { "os": "android", "cpu": "x64" }, "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg=="], + "@esbuild/android-x64": ["@esbuild/android-x64@0.27.0", "", { "os": "android", "cpu": "x64" }, "sha512-wurMkF1nmQajBO1+0CJmcN17U4BP6GqNSROP8t0X/Jiw2ltYGLHpEksp9MpoBqkrFR3kv2/te6Sha26k3+yZ9Q=="], - "@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.25.12", "", { "os": "darwin", "cpu": "arm64" }, "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg=="], + "@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.0", "", { "os": "darwin", "cpu": "arm64" }, "sha512-uJOQKYCcHhg07DL7i8MzjvS2LaP7W7Pn/7uA0B5S1EnqAirJtbyw4yC5jQ5qcFjHK9l6o/MX9QisBg12kNkdHg=="], - "@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.25.12", "", { "os": "darwin", "cpu": "x64" }, "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA=="], + "@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.0", "", { "os": "darwin", "cpu": "x64" }, "sha512-8mG6arH3yB/4ZXiEnXof5MK72dE6zM9cDvUcPtxhUZsDjESl9JipZYW60C3JGreKCEP+p8P/72r69m4AZGJd5g=="], - "@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.25.12", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg=="], + "@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.0", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-9FHtyO988CwNMMOE3YIeci+UV+x5Zy8fI2qHNpsEtSF83YPBmE8UWmfYAQg6Ux7Gsmd4FejZqnEUZCMGaNQHQw=="], - "@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.25.12", "", { "os": "freebsd", "cpu": "x64" }, "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ=="], + "@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.0", "", { "os": "freebsd", "cpu": "x64" }, "sha512-zCMeMXI4HS/tXvJz8vWGexpZj2YVtRAihHLk1imZj4efx1BQzN76YFeKqlDr3bUWI26wHwLWPd3rwh6pe4EV7g=="], - "@esbuild/linux-arm": ["@esbuild/linux-arm@0.25.12", "", { "os": "linux", "cpu": "arm" }, "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw=="], + "@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.0", "", { "os": "linux", "cpu": "arm" }, "sha512-t76XLQDpxgmq2cNXKTVEB7O7YMb42atj2Re2Haf45HkaUpjM2J0UuJZDuaGbPbamzZ7bawyGFUkodL+zcE+jvQ=="], - "@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.25.12", "", { "os": "linux", "cpu": "arm64" }, "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ=="], + "@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-AS18v0V+vZiLJyi/4LphvBE+OIX682Pu7ZYNsdUHyUKSoRwdnOsMf6FDekwoAFKej14WAkOef3zAORJgAtXnlQ=="], - "@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.25.12", "", { "os": "linux", "cpu": "ia32" }, "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA=="], + "@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.0", "", { "os": "linux", "cpu": "ia32" }, "sha512-Mz1jxqm/kfgKkc/KLHC5qIujMvnnarD9ra1cEcrs7qshTUSksPihGrWHVG5+osAIQ68577Zpww7SGapmzSt4Nw=="], - "@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.25.12", "", { "os": "linux", "cpu": "none" }, "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng=="], + "@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.0", "", { "os": "linux", "cpu": "none" }, "sha512-QbEREjdJeIreIAbdG2hLU1yXm1uu+LTdzoq1KCo4G4pFOLlvIspBm36QrQOar9LFduavoWX2msNFAAAY9j4BDg=="], - "@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.25.12", "", { "os": "linux", "cpu": "none" }, "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw=="], + "@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.0", "", { "os": "linux", "cpu": "none" }, "sha512-sJz3zRNe4tO2wxvDpH/HYJilb6+2YJxo/ZNbVdtFiKDufzWq4JmKAiHy9iGoLjAV7r/W32VgaHGkk35cUXlNOg=="], - "@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.25.12", "", { "os": "linux", "cpu": "ppc64" }, "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA=="], + "@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.0", "", { "os": "linux", "cpu": "ppc64" }, "sha512-z9N10FBD0DCS2dmSABDBb5TLAyF1/ydVb+N4pi88T45efQ/w4ohr/F/QYCkxDPnkhkp6AIpIcQKQ8F0ANoA2JA=="], - "@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.25.12", "", { "os": "linux", "cpu": "none" }, "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w=="], + "@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.0", "", { "os": "linux", "cpu": "none" }, "sha512-pQdyAIZ0BWIC5GyvVFn5awDiO14TkT/19FTmFcPdDec94KJ1uZcmFs21Fo8auMXzD4Tt+diXu1LW1gHus9fhFQ=="], - "@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.25.12", "", { "os": "linux", "cpu": "s390x" }, "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg=="], + "@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.0", "", { "os": "linux", "cpu": "s390x" }, "sha512-hPlRWR4eIDDEci953RI1BLZitgi5uqcsjKMxwYfmi4LcwyWo2IcRP+lThVnKjNtk90pLS8nKdroXYOqW+QQH+w=="], - "@esbuild/linux-x64": ["@esbuild/linux-x64@0.25.12", "", { "os": "linux", "cpu": "x64" }, "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw=="], + "@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.0", "", { "os": "linux", "cpu": "x64" }, "sha512-1hBWx4OUJE2cab++aVZ7pObD6s+DK4mPGpemtnAORBvb5l/g5xFGk0vc0PjSkrDs0XaXj9yyob3d14XqvnQ4gw=="], - "@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.25.12", "", { "os": "none", "cpu": "arm64" }, "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg=="], + "@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.0", "", { "os": "none", "cpu": "arm64" }, "sha512-6m0sfQfxfQfy1qRuecMkJlf1cIzTOgyaeXaiVaaki8/v+WB+U4hc6ik15ZW6TAllRlg/WuQXxWj1jx6C+dfy3w=="], - "@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.25.12", "", { "os": "none", "cpu": "x64" }, "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ=="], + "@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.0", "", { "os": "none", "cpu": "x64" }, "sha512-xbbOdfn06FtcJ9d0ShxxvSn2iUsGd/lgPIO2V3VZIPDbEaIj1/3nBBe1AwuEZKXVXkMmpr6LUAgMkLD/4D2PPA=="], - "@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.25.12", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A=="], + "@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.0", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-fWgqR8uNbCQ/GGv0yhzttj6sU/9Z5/Sv/VGU3F5OuXK6J6SlriONKrQ7tNlwBrJZXRYk5jUhuWvF7GYzGguBZQ=="], - "@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.25.12", "", { "os": "openbsd", "cpu": "x64" }, "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw=="], + "@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.0", "", { "os": "openbsd", "cpu": "x64" }, "sha512-aCwlRdSNMNxkGGqQajMUza6uXzR/U0dIl1QmLjPtRbLOx3Gy3otfFu/VjATy4yQzo9yFDGTxYDo1FfAD9oRD2A=="], - "@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.25.12", "", { "os": "none", "cpu": "arm64" }, "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg=="], + "@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.0", "", { "os": "none", "cpu": "arm64" }, "sha512-nyvsBccxNAsNYz2jVFYwEGuRRomqZ149A39SHWk4hV0jWxKM0hjBPm3AmdxcbHiFLbBSwG6SbpIcUbXjgyECfA=="], - "@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.25.12", "", { "os": "sunos", "cpu": "x64" }, "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w=="], + "@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.0", "", { "os": "sunos", "cpu": "x64" }, "sha512-Q1KY1iJafM+UX6CFEL+F4HRTgygmEW568YMqDA5UV97AuZSm21b7SXIrRJDwXWPzr8MGr75fUZPV67FdtMHlHA=="], - "@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.25.12", "", { "os": "win32", "cpu": "arm64" }, "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg=="], + "@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.0", "", { "os": "win32", "cpu": "arm64" }, "sha512-W1eyGNi6d+8kOmZIwi/EDjrL9nxQIQ0MiGqe/AWc6+IaHloxHSGoeRgDRKHFISThLmsewZ5nHFvGFWdBYlgKPg=="], - "@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.25.12", "", { "os": "win32", "cpu": "ia32" }, "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ=="], + "@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.0", "", { "os": "win32", "cpu": "ia32" }, "sha512-30z1aKL9h22kQhilnYkORFYt+3wp7yZsHWus+wSKAJR8JtdfI76LJ4SBdMsCopTR3z/ORqVu5L1vtnHZWVj4cQ=="], - "@esbuild/win32-x64": ["@esbuild/win32-x64@0.25.12", "", { "os": "win32", "cpu": "x64" }, "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA=="], + "@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.0", "", { "os": "win32", "cpu": "x64" }, "sha512-aIitBcjQeyOhMTImhLZmtxfdOcuNRpwlPNmlFKPcHQYPhEssw75Cl1TSXJXpMkzaua9FUetx/4OQKq7eJul5Cg=="], "@exodus/bytes": ["@exodus/bytes@1.15.0", "", { "peerDependencies": { "@noble/hashes": "^1.8.0 || ^2.0.0" }, "optionalPeers": ["@noble/hashes"] }, "sha512-UY0nlA+feH81UGSHv92sLEPLCeZFjXOuHhrIo0HQydScuQc8s0A7kL/UdgwgDq8g8ilksmuoF35YVTNphV2aBQ=="], @@ -1193,6 +1218,8 @@ "@types/pg-pool": ["@types/pg-pool@2.0.7", "", { "dependencies": { "@types/pg": "*" } }, "sha512-U4CwmGVQcbEuqpyju8/ptOKg6gEC+Tqsvj2xS9o1g71bUh8twxnC6ZL5rZKCsGN0iyH0CwgUyc9VR5owNQF9Ng=="], + "@types/picomatch": ["@types/picomatch@4.0.3", "", {}, "sha512-iG0T6+nYJ9FAPmx9SsUlnwcq1ZVRuCXcVEvWnntoPlrOpwtSTKNDC9uVAxTsC3PUvJ+99n4RpAcNgBbHX3JSnQ=="], + "@types/plist": ["@types/plist@3.0.5", "", { "dependencies": { "@types/node": "*", "xmlbuilder": ">=11.0.1" } }, "sha512-E6OCaRmAe4WDmWNsL/9RMqdkkzDCY1etutkflWk4c+AcjDU07Pcz1fQwTX0TQz+Pxqn9i4L1TU3UFpjnrcDgxA=="], "@types/react": ["@types/react@19.2.14", "", { "dependencies": { "csstype": "^3.2.2" } }, "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w=="], @@ -1485,6 +1512,10 @@ "convert-source-map": ["convert-source-map@2.0.0", "", {}, "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg=="], + "convex": ["convex@1.37.0", "", { "dependencies": { "esbuild": "0.27.0", "prettier": "^3.0.0", "ws": "8.18.0" }, "peerDependencies": { "@auth0/auth0-react": "^2.0.1", "@clerk/clerk-react": "^4.12.8 || ^5.0.0", "@clerk/react": "^6.4.3", "react": "^18.0.0 || ^19.0.0-0 || ^19.0.0" }, "optionalPeers": ["@auth0/auth0-react", "@clerk/clerk-react", "@clerk/react", "react"], "bin": { "convex": "bin/main.js" } }, "sha512-xGSx5edIsXCEex3OU2U2N0oyB/cOa9qGwKiImF9yOWqjqZgOkx39idtpdlwNBTBSt4S30oAvs4yeXY5xxPIX3A=="], + + "convex-test": ["convex-test@0.0.51", "", { "peerDependencies": { "convex": "^1.32.0" } }, "sha512-J+4YRpKGXJDfnQqiWUUT+ylNmNO36MpkuwqG3JG4ld+7QtroZGF8HqO4qzMmfv5ltm71rPbkBvi//MoMHjnVvQ=="], + "cookie": ["cookie@1.1.1", "", {}, "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ=="], "cookie-signature": ["cookie-signature@1.2.2", "", {}, "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg=="], @@ -1719,7 +1750,7 @@ "es6-error": ["es6-error@4.1.1", "", {}, "sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg=="], - "esbuild": ["esbuild@0.25.12", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.25.12", "@esbuild/android-arm": "0.25.12", "@esbuild/android-arm64": "0.25.12", "@esbuild/android-x64": "0.25.12", "@esbuild/darwin-arm64": "0.25.12", "@esbuild/darwin-x64": "0.25.12", "@esbuild/freebsd-arm64": "0.25.12", "@esbuild/freebsd-x64": "0.25.12", "@esbuild/linux-arm": "0.25.12", "@esbuild/linux-arm64": "0.25.12", "@esbuild/linux-ia32": "0.25.12", "@esbuild/linux-loong64": "0.25.12", "@esbuild/linux-mips64el": "0.25.12", "@esbuild/linux-ppc64": "0.25.12", "@esbuild/linux-riscv64": "0.25.12", "@esbuild/linux-s390x": "0.25.12", "@esbuild/linux-x64": "0.25.12", "@esbuild/netbsd-arm64": "0.25.12", "@esbuild/netbsd-x64": "0.25.12", "@esbuild/openbsd-arm64": "0.25.12", "@esbuild/openbsd-x64": "0.25.12", "@esbuild/openharmony-arm64": "0.25.12", "@esbuild/sunos-x64": "0.25.12", "@esbuild/win32-arm64": "0.25.12", "@esbuild/win32-ia32": "0.25.12", "@esbuild/win32-x64": "0.25.12" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg=="], + "esbuild": ["esbuild@0.27.0", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.0", "@esbuild/android-arm": "0.27.0", "@esbuild/android-arm64": "0.27.0", "@esbuild/android-x64": "0.27.0", "@esbuild/darwin-arm64": "0.27.0", "@esbuild/darwin-x64": "0.27.0", "@esbuild/freebsd-arm64": "0.27.0", "@esbuild/freebsd-x64": "0.27.0", "@esbuild/linux-arm": "0.27.0", "@esbuild/linux-arm64": "0.27.0", "@esbuild/linux-ia32": "0.27.0", "@esbuild/linux-loong64": "0.27.0", "@esbuild/linux-mips64el": "0.27.0", "@esbuild/linux-ppc64": "0.27.0", "@esbuild/linux-riscv64": "0.27.0", "@esbuild/linux-s390x": "0.27.0", "@esbuild/linux-x64": "0.27.0", "@esbuild/netbsd-arm64": "0.27.0", "@esbuild/netbsd-x64": "0.27.0", "@esbuild/openbsd-arm64": "0.27.0", "@esbuild/openbsd-x64": "0.27.0", "@esbuild/openharmony-arm64": "0.27.0", "@esbuild/sunos-x64": "0.27.0", "@esbuild/win32-arm64": "0.27.0", "@esbuild/win32-ia32": "0.27.0", "@esbuild/win32-x64": "0.27.0" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-jd0f4NHbD6cALCyGElNpGAOtWxSq46l9X/sWB0Nzd5er4Kz2YTm+Vl0qKFT9KUJvD8+fiO8AvoHhFvEatfVixA=="], "esbuild-register": ["esbuild-register@3.6.0", "", { "dependencies": { "debug": "^4.3.4" }, "peerDependencies": { "esbuild": ">=0.12 <1" } }, "sha512-H2/S7Pm8a9CL1uhp9OvjwrBh5Pvx0H8qVOxNu8Wed9Y7qv56MPtq+GGM8RJpq6glYJn9Wspr8uw7l55uyinNeg=="], @@ -2483,6 +2514,8 @@ "preact": ["preact@10.29.0", "", {}, "sha512-wSAGyk2bYR1c7t3SZ3jHcM6xy0lcBcDel6lODcs9ME6Th++Dx2KU+6D3HD8wMMKGA8Wpw7OMd3/4RGzYRpzwRg=="], + "prettier": ["prettier@3.8.3", "", { "bin": { "prettier": "bin/prettier.cjs" } }, "sha512-7igPTM53cGHMW8xWuVTydi2KO233VFiTNyF5hLJqpilHfmn8C8gPf+PS7dUT64YcXFbiMGZxS9pCSxL/Dxm/Jw=="], + "pretty-format": ["pretty-format@27.5.1", "", { "dependencies": { "ansi-regex": "^5.0.1", "ansi-styles": "^5.0.0", "react-is": "^17.0.1" } }, "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ=="], "pretty-ms": ["pretty-ms@9.3.0", "", { "dependencies": { "parse-ms": "^4.0.0" } }, "sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ=="], @@ -2963,7 +2996,7 @@ "wrappy": ["wrappy@1.0.2", "", {}, "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="], - "ws": ["ws@8.20.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA=="], + "ws": ["ws@8.18.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw=="], "wsl-utils": ["wsl-utils@0.3.1", "", { "dependencies": { "is-wsl": "^3.1.0", "powershell-utils": "^0.1.0" } }, "sha512-g/eziiSUNBSsdDJtCLB8bdYEUMj4jR7AGeUo96p/3dTafgjHhpF4RiCFPiRILwjQoDXx5MqkBr4fwWtR3Ky4Wg=="], @@ -3021,6 +3054,10 @@ "@effect/experimental/uuid": ["uuid@11.1.0", "", { "bin": { "uuid": "dist/esm/bin/uuid" } }, "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A=="], + "@effect/platform-node/ws": ["ws@8.20.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA=="], + + "@effect/platform-node-shared/ws": ["ws@8.20.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA=="], + "@effect/sql/uuid": ["uuid@11.1.0", "", { "bin": { "uuid": "dist/esm/bin/uuid" } }, "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A=="], "@electron/asar/commander": ["commander@5.1.0", "", {}, "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg=="], @@ -3149,6 +3186,10 @@ "@storybook/builder-vite/@storybook/csf-plugin": ["@storybook/csf-plugin@8.6.18", "", { "dependencies": { "unplugin": "^1.3.1" }, "peerDependencies": { "storybook": "^8.6.18" } }, "sha512-x1ioz/L0CwaelCkHci3P31YtvwayN3FBftvwQOPbvRh9qeb4Cpz5IdVDmyvSxxYwXN66uAORNoqgjTi7B4/y5Q=="], + "@storybook/core/esbuild": ["esbuild@0.25.12", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.25.12", "@esbuild/android-arm": "0.25.12", "@esbuild/android-arm64": "0.25.12", "@esbuild/android-x64": "0.25.12", "@esbuild/darwin-arm64": "0.25.12", "@esbuild/darwin-x64": "0.25.12", "@esbuild/freebsd-arm64": "0.25.12", "@esbuild/freebsd-x64": "0.25.12", "@esbuild/linux-arm": "0.25.12", "@esbuild/linux-arm64": "0.25.12", "@esbuild/linux-ia32": "0.25.12", "@esbuild/linux-loong64": "0.25.12", "@esbuild/linux-mips64el": "0.25.12", "@esbuild/linux-ppc64": "0.25.12", "@esbuild/linux-riscv64": "0.25.12", "@esbuild/linux-s390x": "0.25.12", "@esbuild/linux-x64": "0.25.12", "@esbuild/netbsd-arm64": "0.25.12", "@esbuild/netbsd-x64": "0.25.12", "@esbuild/openbsd-arm64": "0.25.12", "@esbuild/openbsd-x64": "0.25.12", "@esbuild/openharmony-arm64": "0.25.12", "@esbuild/sunos-x64": "0.25.12", "@esbuild/win32-arm64": "0.25.12", "@esbuild/win32-ia32": "0.25.12", "@esbuild/win32-x64": "0.25.12" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg=="], + + "@storybook/core/ws": ["ws@8.20.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA=="], + "@storybook/instrumenter/@vitest/utils": ["@vitest/utils@2.1.9", "", { "dependencies": { "@vitest/pretty-format": "2.1.9", "loupe": "^3.1.2", "tinyrainbow": "^1.2.0" } }, "sha512-v0psaMSkNJ3A2NMrUEHFRzJtDPFn+/VWZ5WxImB21T9fjucJRmS7xCS3ppEnARb9y11OAzaD+P2Ps+b+BGX5iQ=="], "@storybook/test/@testing-library/dom": ["@testing-library/dom@10.4.0", "", { "dependencies": { "@babel/code-frame": "^7.10.4", "@babel/runtime": "^7.12.5", "@types/aria-query": "^5.0.1", "aria-query": "5.3.0", "chalk": "^4.1.0", "dom-accessibility-api": "^0.5.9", "lz-string": "^1.5.0", "pretty-format": "^27.0.2" } }, "sha512-pemlzrSESWbdAloYml3bAJMEfNh1Z7EduzqPKprCH5S341frlpYnUEW0H72dLxa6IsYr+mPno20GiSm+h9dEdQ=="], @@ -3197,6 +3238,8 @@ "@types/yauzl/@types/node": ["@types/node@22.19.15", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-F0R/h2+dsy5wJAUe3tAU6oqa2qbWY5TpNfL/RGmo1y38hiyO1w3x2jPtt76wmuaJI4DQnOBu21cNXQ2STIUUWg=="], + "@vitest/browser/ws": ["ws@8.20.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA=="], + "@xyflow/react/zustand": ["zustand@4.5.7", "", { "dependencies": { "use-sync-external-store": "^1.2.2" }, "peerDependencies": { "@types/react": ">=16.8", "immer": ">=9.0.6", "react": ">=16.8" }, "optionalPeers": ["@types/react", "immer", "react"] }, "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw=="], "accepts/mime-types": ["mime-types@3.0.2", "", { "dependencies": { "mime-db": "^1.54.0" } }, "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A=="], @@ -3245,6 +3288,8 @@ "electron-publish/mime": ["mime@2.6.0", "", { "bin": { "mime": "cli.js" } }, "sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg=="], + "electron-vite/esbuild": ["esbuild@0.25.12", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.25.12", "@esbuild/android-arm": "0.25.12", "@esbuild/android-arm64": "0.25.12", "@esbuild/android-x64": "0.25.12", "@esbuild/darwin-arm64": "0.25.12", "@esbuild/darwin-x64": "0.25.12", "@esbuild/freebsd-arm64": "0.25.12", "@esbuild/freebsd-x64": "0.25.12", "@esbuild/linux-arm": "0.25.12", "@esbuild/linux-arm64": "0.25.12", "@esbuild/linux-ia32": "0.25.12", "@esbuild/linux-loong64": "0.25.12", "@esbuild/linux-mips64el": "0.25.12", "@esbuild/linux-ppc64": "0.25.12", "@esbuild/linux-riscv64": "0.25.12", "@esbuild/linux-s390x": "0.25.12", "@esbuild/linux-x64": "0.25.12", "@esbuild/netbsd-arm64": "0.25.12", "@esbuild/netbsd-x64": "0.25.12", "@esbuild/openbsd-arm64": "0.25.12", "@esbuild/openbsd-x64": "0.25.12", "@esbuild/openharmony-arm64": "0.25.12", "@esbuild/sunos-x64": "0.25.12", "@esbuild/win32-arm64": "0.25.12", "@esbuild/win32-ia32": "0.25.12", "@esbuild/win32-x64": "0.25.12" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg=="], + "electron-winstaller/fs-extra": ["fs-extra@7.0.1", "", { "dependencies": { "graceful-fs": "^4.1.2", "jsonfile": "^4.0.0", "universalify": "^0.1.0" } }, "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw=="], "esrecurse/estraverse": ["estraverse@5.3.0", "", {}, "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA=="], @@ -3261,6 +3306,8 @@ "happy-dom/whatwg-mimetype": ["whatwg-mimetype@3.0.0", "", {}, "sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q=="], + "happy-dom/ws": ["ws@8.20.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA=="], + "hast-util-raw/parse5": ["parse5@7.3.0", "", { "dependencies": { "entities": "^6.0.0" } }, "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw=="], "hosted-git-info/lru-cache": ["lru-cache@6.0.0", "", { "dependencies": { "yallist": "^4.0.0" } }, "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA=="], @@ -3509,6 +3556,58 @@ "@sentry/react/@sentry/browser/@sentry-internal/replay-canvas": ["@sentry-internal/replay-canvas@10.46.0", "", { "dependencies": { "@sentry-internal/replay": "10.46.0", "@sentry/core": "10.46.0" } }, "sha512-ub314MWUsekVCuoH0/HJbbimlI24SkV745UW2pj9xRbxOAEf1wjkmIzxKrMDbTgJGuEunug02XZVdJFJUzOcDw=="], + "@storybook/core/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.25.12", "", { "os": "aix", "cpu": "ppc64" }, "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA=="], + + "@storybook/core/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.25.12", "", { "os": "android", "cpu": "arm" }, "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg=="], + + "@storybook/core/esbuild/@esbuild/android-arm64": ["@esbuild/android-arm64@0.25.12", "", { "os": "android", "cpu": "arm64" }, "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg=="], + + "@storybook/core/esbuild/@esbuild/android-x64": ["@esbuild/android-x64@0.25.12", "", { "os": "android", "cpu": "x64" }, "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg=="], + + "@storybook/core/esbuild/@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.25.12", "", { "os": "darwin", "cpu": "arm64" }, "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg=="], + + "@storybook/core/esbuild/@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.25.12", "", { "os": "darwin", "cpu": "x64" }, "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA=="], + + "@storybook/core/esbuild/@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.25.12", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg=="], + + "@storybook/core/esbuild/@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.25.12", "", { "os": "freebsd", "cpu": "x64" }, "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ=="], + + "@storybook/core/esbuild/@esbuild/linux-arm": ["@esbuild/linux-arm@0.25.12", "", { "os": "linux", "cpu": "arm" }, "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw=="], + + "@storybook/core/esbuild/@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.25.12", "", { "os": "linux", "cpu": "arm64" }, "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ=="], + + "@storybook/core/esbuild/@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.25.12", "", { "os": "linux", "cpu": "ia32" }, "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA=="], + + "@storybook/core/esbuild/@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.25.12", "", { "os": "linux", "cpu": "none" }, "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng=="], + + "@storybook/core/esbuild/@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.25.12", "", { "os": "linux", "cpu": "none" }, "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw=="], + + "@storybook/core/esbuild/@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.25.12", "", { "os": "linux", "cpu": "ppc64" }, "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA=="], + + "@storybook/core/esbuild/@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.25.12", "", { "os": "linux", "cpu": "none" }, "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w=="], + + "@storybook/core/esbuild/@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.25.12", "", { "os": "linux", "cpu": "s390x" }, "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg=="], + + "@storybook/core/esbuild/@esbuild/linux-x64": ["@esbuild/linux-x64@0.25.12", "", { "os": "linux", "cpu": "x64" }, "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw=="], + + "@storybook/core/esbuild/@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.25.12", "", { "os": "none", "cpu": "arm64" }, "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg=="], + + "@storybook/core/esbuild/@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.25.12", "", { "os": "none", "cpu": "x64" }, "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ=="], + + "@storybook/core/esbuild/@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.25.12", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A=="], + + "@storybook/core/esbuild/@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.25.12", "", { "os": "openbsd", "cpu": "x64" }, "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw=="], + + "@storybook/core/esbuild/@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.25.12", "", { "os": "none", "cpu": "arm64" }, "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg=="], + + "@storybook/core/esbuild/@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.25.12", "", { "os": "sunos", "cpu": "x64" }, "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w=="], + + "@storybook/core/esbuild/@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.25.12", "", { "os": "win32", "cpu": "arm64" }, "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg=="], + + "@storybook/core/esbuild/@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.25.12", "", { "os": "win32", "cpu": "ia32" }, "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ=="], + + "@storybook/core/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.25.12", "", { "os": "win32", "cpu": "x64" }, "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA=="], + "@storybook/instrumenter/@vitest/utils/@vitest/pretty-format": ["@vitest/pretty-format@2.1.9", "", { "dependencies": { "tinyrainbow": "^1.2.0" } }, "sha512-KhRIdGV2U9HOUzxfiHmY8IFHTdqtOhIzCpd8WRdJiE7D/HUcZVD0EgQCVjm+Q9gkUXWgBvMmTtZgIG48wq7sOQ=="], "@storybook/instrumenter/@vitest/utils/tinyrainbow": ["tinyrainbow@1.2.0", "", {}, "sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ=="], @@ -3583,6 +3682,58 @@ "dmg-license/ajv/json-schema-traverse": ["json-schema-traverse@0.4.1", "", {}, "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="], + "electron-vite/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.25.12", "", { "os": "aix", "cpu": "ppc64" }, "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA=="], + + "electron-vite/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.25.12", "", { "os": "android", "cpu": "arm" }, "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg=="], + + "electron-vite/esbuild/@esbuild/android-arm64": ["@esbuild/android-arm64@0.25.12", "", { "os": "android", "cpu": "arm64" }, "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg=="], + + "electron-vite/esbuild/@esbuild/android-x64": ["@esbuild/android-x64@0.25.12", "", { "os": "android", "cpu": "x64" }, "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg=="], + + "electron-vite/esbuild/@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.25.12", "", { "os": "darwin", "cpu": "arm64" }, "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg=="], + + "electron-vite/esbuild/@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.25.12", "", { "os": "darwin", "cpu": "x64" }, "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA=="], + + "electron-vite/esbuild/@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.25.12", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg=="], + + "electron-vite/esbuild/@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.25.12", "", { "os": "freebsd", "cpu": "x64" }, "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ=="], + + "electron-vite/esbuild/@esbuild/linux-arm": ["@esbuild/linux-arm@0.25.12", "", { "os": "linux", "cpu": "arm" }, "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw=="], + + "electron-vite/esbuild/@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.25.12", "", { "os": "linux", "cpu": "arm64" }, "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ=="], + + "electron-vite/esbuild/@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.25.12", "", { "os": "linux", "cpu": "ia32" }, "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA=="], + + "electron-vite/esbuild/@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.25.12", "", { "os": "linux", "cpu": "none" }, "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng=="], + + "electron-vite/esbuild/@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.25.12", "", { "os": "linux", "cpu": "none" }, "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw=="], + + "electron-vite/esbuild/@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.25.12", "", { "os": "linux", "cpu": "ppc64" }, "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA=="], + + "electron-vite/esbuild/@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.25.12", "", { "os": "linux", "cpu": "none" }, "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w=="], + + "electron-vite/esbuild/@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.25.12", "", { "os": "linux", "cpu": "s390x" }, "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg=="], + + "electron-vite/esbuild/@esbuild/linux-x64": ["@esbuild/linux-x64@0.25.12", "", { "os": "linux", "cpu": "x64" }, "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw=="], + + "electron-vite/esbuild/@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.25.12", "", { "os": "none", "cpu": "arm64" }, "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg=="], + + "electron-vite/esbuild/@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.25.12", "", { "os": "none", "cpu": "x64" }, "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ=="], + + "electron-vite/esbuild/@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.25.12", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A=="], + + "electron-vite/esbuild/@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.25.12", "", { "os": "openbsd", "cpu": "x64" }, "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw=="], + + "electron-vite/esbuild/@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.25.12", "", { "os": "none", "cpu": "arm64" }, "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg=="], + + "electron-vite/esbuild/@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.25.12", "", { "os": "sunos", "cpu": "x64" }, "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w=="], + + "electron-vite/esbuild/@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.25.12", "", { "os": "win32", "cpu": "arm64" }, "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg=="], + + "electron-vite/esbuild/@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.25.12", "", { "os": "win32", "cpu": "ia32" }, "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ=="], + + "electron-vite/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.25.12", "", { "os": "win32", "cpu": "x64" }, "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA=="], + "electron-winstaller/fs-extra/jsonfile": ["jsonfile@4.0.0", "", { "optionalDependencies": { "graceful-fs": "^4.1.6" } }, "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg=="], "electron-winstaller/fs-extra/universalify": ["universalify@0.1.2", "", {}, "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg=="], diff --git a/package.json b/package.json index a460f29..b971aba 100644 --- a/package.json +++ b/package.json @@ -16,24 +16,31 @@ "format:check": "biome format .", "check": "biome check .", "check:no-suppressions": "bun scripts/check-no-suppressions.ts", - "ci": "TURBO_NO_UPDATE_NOTIFIER=1 turbo typecheck test --output-logs=errors-only && biome check . && bun run check:no-suppressions", + "typecheck:sandcastle": "tsc --noEmit -p .sandcastle/tsconfig.json", + "ci": "TURBO_NO_UPDATE_NOTIFIER=1 turbo typecheck test --output-logs=errors-only && bun run typecheck:sandcastle && biome check . && bun run check:no-suppressions", "prepare": "husky", - "sandcastle": "bun .sandcastle/main.ts", - "sandcastle:analyze": "bun .sandcastle/analyze.ts", - "sandcastle:build": "bunx sandcastle docker build-image", + "mission": "bun .sandcastle/main.ts", + "mission:analyze": "bun .sandcastle/analyze.ts", + "mission:build": "bunx sandcastle docker build-image", + "convex:dev": "bun --filter @contexture/missions dev", "cleanup": "bun scripts/cleanup-branches.ts" }, "devDependencies": { "@biomejs/biome": "^2.4.9", "@types/bun": "^1.3.13", + "@types/picomatch": "^4.0.2", "husky": "^9.1.7", "lint-staged": "^16.4.0", - "turbo": "^2.9.7" + "turbo": "^2.9.7", + "typescript": "^5.9.3" }, "dependencies": { "@ai-hero/sandcastle": "^0.5.5", "@anthropic-ai/sdk": "^0.91.1", + "@contexture/missions": "workspace:*", "@sindresorhus/slugify": "^3.0.0", + "convex": "^1.37.0", + "picomatch": "^4.0.4", "zod": "^4.3.6" }, "trustedDependencies": [ diff --git a/scripts/check-no-suppressions.ts b/scripts/check-no-suppressions.ts index 6019e17..e04a574 100644 --- a/scripts/check-no-suppressions.ts +++ b/scripts/check-no-suppressions.ts @@ -7,6 +7,7 @@ const ignoredDirectories = new Set([ '.git', '.next', '.turbo', + '_generated', 'build', 'coverage', 'dist', From cca2b3f056774c0dcf93ed10e19d01ad0356e798 Mon Sep 17 00:00:00 2001 From: Dave Hudson Date: Thu, 7 May 2026 14:25:19 +0100 Subject: [PATCH 2/2] fix(missions): auto-load .sandcastle/.env from main.ts; ignore local plan files `bun run mission` runs from the repo root, so Bun's built-in dotenv loader never reads `.sandcastle/.env`. Result: `CONVEX_URL` (and any other env the orchestrator needs) is missing and the CLI fails with "Missing required environment variable" even though the var is configured. Fix: load `.sandcastle/.env` explicitly inside `main.ts` based on the script's own location, so the CLI works regardless of CWD. Existing process env still wins over file values, matching standard dotenv behaviour. Also: - gitignore + biome-exclude `smoke-*-plan.json` and `mission-plan-*.json` so local smoke-test plan files don't pollute formatter output or get committed. - Include the Convex `ai-files` artefacts that `convex dev --local` installed into `apps/missions/` (AGENTS.md, CLAUDE.md, convex.json, _generated/ai/, skills-lock.json). They're stable Convex-shipped guidelines that improve agent quality when editing this package. --- .gitignore | 5 + .sandcastle/main.ts | 30 ++ apps/missions/AGENTS.md | 13 + apps/missions/CLAUDE.md | 13 + apps/missions/convex.json | 6 + .../convex/_generated/ai/ai-files.state.json | 6 + .../convex/_generated/ai/guidelines.md | 365 ++++++++++++++++++ apps/missions/skills-lock.json | 41 ++ biome.json | 4 +- 9 files changed, 482 insertions(+), 1 deletion(-) create mode 100644 apps/missions/AGENTS.md create mode 100644 apps/missions/CLAUDE.md create mode 100644 apps/missions/convex.json create mode 100644 apps/missions/convex/_generated/ai/ai-files.state.json create mode 100644 apps/missions/convex/_generated/ai/guidelines.md create mode 100644 apps/missions/skills-lock.json diff --git a/.gitignore b/.gitignore index 4678c5f..3996f61 100644 --- a/.gitignore +++ b/.gitignore @@ -32,3 +32,8 @@ playwright-report/ # Sensitive docs migrated to Paperclip issue documents (ONT-49) Marketing/ + +# Local mission plan files for smoke testing / experiments +smoke-*-plan.json +mission-plan-*.json + diff --git a/.sandcastle/main.ts b/.sandcastle/main.ts index 2e2e124..7567e9d 100644 --- a/.sandcastle/main.ts +++ b/.sandcastle/main.ts @@ -1,8 +1,38 @@ +import { dirname, resolve } from "node:path"; +import { fileURLToPath } from "node:url"; import { api } from "@contexture/missions"; import { Orchestrator } from "./missions/orchestrator"; import { planMission, readPlanFromFile } from "./missions/planner"; import { renderStatus } from "./missions/status-view"; +// Load .sandcastle/.env regardless of where `bun run mission` was invoked +// from. Bun's built-in dotenv loader only reads from process.cwd(), so the +// CLI works whether you run it from the repo root or from .sandcastle/. +async function loadSandcastleEnv(): Promise { + const here = dirname(fileURLToPath(import.meta.url)); + const envPath = resolve(here, ".env"); + const file = Bun.file(envPath); + if (!(await file.exists())) return; + const text = await file.text(); + for (const rawLine of text.split("\n")) { + const line = rawLine.trim(); + if (!line || line.startsWith("#")) continue; + const eq = line.indexOf("="); + if (eq === -1) continue; + const key = line.slice(0, eq).trim(); + let value = line.slice(eq + 1).trim(); + if ( + (value.startsWith('"') && value.endsWith('"')) || + (value.startsWith("'") && value.endsWith("'")) + ) { + value = value.slice(1, -1); + } + if (process.env[key] === undefined) process.env[key] = value; + } +} + +await loadSandcastleEnv(); + const HELP = `Usage: bun run mission [args] Commands: diff --git a/apps/missions/AGENTS.md b/apps/missions/AGENTS.md new file mode 100644 index 0000000..85c46a7 --- /dev/null +++ b/apps/missions/AGENTS.md @@ -0,0 +1,13 @@ + + +This project uses [Convex](https://convex.dev) as its backend. + +When working on Convex code, **always read +`convex/_generated/ai/guidelines.md` first** for important guidelines on +how to correctly use Convex APIs and patterns. The file contains rules that +override what you may have learned about Convex from training data. + +Convex agent skills for common tasks can be installed by running +`npx convex ai-files install`. + + diff --git a/apps/missions/CLAUDE.md b/apps/missions/CLAUDE.md new file mode 100644 index 0000000..85c46a7 --- /dev/null +++ b/apps/missions/CLAUDE.md @@ -0,0 +1,13 @@ + + +This project uses [Convex](https://convex.dev) as its backend. + +When working on Convex code, **always read +`convex/_generated/ai/guidelines.md` first** for important guidelines on +how to correctly use Convex APIs and patterns. The file contains rules that +override what you may have learned about Convex from training data. + +Convex agent skills for common tasks can be installed by running +`npx convex ai-files install`. + + diff --git a/apps/missions/convex.json b/apps/missions/convex.json new file mode 100644 index 0000000..cf84b5c --- /dev/null +++ b/apps/missions/convex.json @@ -0,0 +1,6 @@ +{ + "$schema": "node_modules/convex/schemas/convex.schema.json", + "aiFiles": { + "enabled": false + } +} diff --git a/apps/missions/convex/_generated/ai/ai-files.state.json b/apps/missions/convex/_generated/ai/ai-files.state.json new file mode 100644 index 0000000..d4f9a0a --- /dev/null +++ b/apps/missions/convex/_generated/ai/ai-files.state.json @@ -0,0 +1,6 @@ +{ + "guidelinesHash": "62d72acb9afcc18f658d88dd772f34b5b1da5fa60ef0402e57a784d97c458e57", + "agentsMdSectionHash": "5934f676ea9a332e7cd4a4f64aa23b59d926e9faca026c758d4b1f87d2101cc3", + "claudeMdHash": "5934f676ea9a332e7cd4a4f64aa23b59d926e9faca026c758d4b1f87d2101cc3", + "agentSkillsSha": "b86618b5c3c4789c9fed98e84bbc34b3e8e70f20" +} diff --git a/apps/missions/convex/_generated/ai/guidelines.md b/apps/missions/convex/_generated/ai/guidelines.md new file mode 100644 index 0000000..e41bedd --- /dev/null +++ b/apps/missions/convex/_generated/ai/guidelines.md @@ -0,0 +1,365 @@ +# Convex guidelines + +## Function guidelines + +### Http endpoint syntax + +- HTTP endpoints are defined in `convex/http.ts` and require an `httpAction` decorator. For example: + +```typescript +import { httpRouter } from "convex/server"; +import { httpAction } from "./_generated/server"; +const http = httpRouter(); +http.route({ + path: "/echo", + method: "POST", + handler: httpAction(async (ctx, req) => { + const body = await req.bytes(); + return new Response(body, { status: 200 }); + }), +}); +``` + +- HTTP endpoints are always registered at the exact path you specify in the `path` field. For example, if you specify `/api/someRoute`, the endpoint will be registered at `/api/someRoute`. + +### Validators + +- Below is an example of an array validator: + +```typescript +import { mutation } from "./_generated/server"; +import { v } from "convex/values"; + +export default mutation({ + args: { + simpleArray: v.array(v.union(v.string(), v.number())), + }, + handler: async (ctx, args) => { + //... + }, +}); +``` + +- Below is an example of a schema with validators that codify a discriminated union type: + +```typescript +import { defineSchema, defineTable } from "convex/server"; +import { v } from "convex/values"; + +export default defineSchema({ + results: defineTable( + v.union( + v.object({ + kind: v.literal("error"), + errorMessage: v.string(), + }), + v.object({ + kind: v.literal("success"), + value: v.number(), + }), + ), + ), +}); +``` + +- Here are the valid Convex types along with their respective validators: + Convex Type | TS/JS type | Example Usage | Validator for argument validation and schemas | Notes | + | ----------- | ------------| -----------------------| -----------------------------------------------| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + | Id | string | `doc._id` | `v.id(tableName)` | | + | Null | null | `null` | `v.null()` | JavaScript's `undefined` is not a valid Convex value. Functions the return `undefined` or do not return will return `null` when called from a client. Use `null` instead. | + | Int64 | bigint | `3n` | `v.int64()` | Int64s only support BigInts between -2^63 and 2^63-1. Convex supports `bigint`s in most modern browsers. | + | Float64 | number | `3.1` | `v.number()` | Convex supports all IEEE-754 double-precision floating point numbers (such as NaNs). Inf and NaN are JSON serialized as strings. | + | Boolean | boolean | `true` | `v.boolean()` | + | String | string | `"abc"` | `v.string()` | Strings are stored as UTF-8 and must be valid Unicode sequences. Strings must be smaller than the 1MB total size limit when encoded as UTF-8. | + | Bytes | ArrayBuffer | `new ArrayBuffer(8)` | `v.bytes()` | Convex supports first class bytestrings, passed in as `ArrayBuffer`s. Bytestrings must be smaller than the 1MB total size limit for Convex types. | + | Array | Array | `[1, 3.2, "abc"]` | `v.array(values)` | Arrays can have at most 8192 values. | + | Object | Object | `{a: "abc"}` | `v.object({property: value})` | Convex only supports "plain old JavaScript objects" (objects that do not have a custom prototype). Objects can have at most 1024 entries. Field names must be nonempty and not start with "$" or "_". | +| Record | Record | `{"a": "1", "b": "2"}` | `v.record(keys, values)` | Records are objects at runtime, but can have dynamic keys. Keys must be only ASCII characters, nonempty, and not start with "$" or "\_". | + +### Function registration + +- Use `internalQuery`, `internalMutation`, and `internalAction` to register internal functions. These functions are private and aren't part of an app's API. They can only be called by other Convex functions. These functions are always imported from `./_generated/server`. +- Use `query`, `mutation`, and `action` to register public functions. These functions are part of the public API and are exposed to the public Internet. Do NOT use `query`, `mutation`, or `action` to register sensitive internal functions that should be kept private. +- You CANNOT register a function through the `api` or `internal` objects. +- ALWAYS include argument validators for all Convex functions. This includes all of `query`, `internalQuery`, `mutation`, `internalMutation`, `action`, and `internalAction`. + +### Function calling + +- Use `ctx.runQuery` to call a query from a query, mutation, or action. +- Use `ctx.runMutation` to call a mutation from a mutation or action. +- Use `ctx.runAction` to call an action from an action. +- ONLY call an action from another action if you need to cross runtimes (e.g. from V8 to Node). Otherwise, pull out the shared code into a helper async function and call that directly instead. +- Try to use as few calls from actions to queries and mutations as possible. Queries and mutations are transactions, so splitting logic up into multiple calls introduces the risk of race conditions. +- All of these calls take in a `FunctionReference`. Do NOT try to pass the callee function directly into one of these calls. +- When using `ctx.runQuery`, `ctx.runMutation`, or `ctx.runAction` to call a function in the same file, specify a type annotation on the return value to work around TypeScript circularity limitations. For example, + +``` +export const f = query({ + args: { name: v.string() }, + handler: async (ctx, args) => { + return "Hello " + args.name; + }, +}); + +export const g = query({ + args: {}, + handler: async (ctx, args) => { + const result: string = await ctx.runQuery(api.example.f, { name: "Bob" }); + return null; + }, +}); +``` + +### Function references + +- Use the `api` object defined by the framework in `convex/_generated/api.ts` to call public functions registered with `query`, `mutation`, or `action`. +- Use the `internal` object defined by the framework in `convex/_generated/api.ts` to call internal (or private) functions registered with `internalQuery`, `internalMutation`, or `internalAction`. +- Convex uses file-based routing, so a public function defined in `convex/example.ts` named `f` has a function reference of `api.example.f`. +- A private function defined in `convex/example.ts` named `g` has a function reference of `internal.example.g`. +- Functions can also registered within directories nested within the `convex/` folder. For example, a public function `h` defined in `convex/messages/access.ts` has a function reference of `api.messages.access.h`. + +### Pagination + +- Define pagination using the following syntax: + +```ts +import { v } from "convex/values"; +import { query, mutation } from "./_generated/server"; +import { paginationOptsValidator } from "convex/server"; +export const listWithExtraArg = query({ + args: { paginationOpts: paginationOptsValidator, author: v.string() }, + handler: async (ctx, args) => { + return await ctx.db + .query("messages") + .withIndex("by_author", (q) => q.eq("author", args.author)) + .order("desc") + .paginate(args.paginationOpts); + }, +}); +``` + +Note: `paginationOpts` is an object with the following properties: + +- `numItems`: the maximum number of documents to return (the validator is `v.number()`) +- `cursor`: the cursor to use to fetch the next page of documents (the validator is `v.union(v.string(), v.null())`) +- A query that ends in `.paginate()` returns an object that has the following properties: +- page (contains an array of documents that you fetches) +- isDone (a boolean that represents whether or not this is the last page of documents) +- continueCursor (a string that represents the cursor to use to fetch the next page of documents) + +## Schema guidelines + +- Always define your schema in `convex/schema.ts`. +- Always import the schema definition functions from `convex/server`. +- System fields are automatically added to all documents and are prefixed with an underscore. The two system fields that are automatically added to all documents are `_creationTime` which has the validator `v.number()` and `_id` which has the validator `v.id(tableName)`. +- Always include all index fields in the index name. For example, if an index is defined as `["field1", "field2"]`, the index name should be "by_field1_and_field2". +- Index fields must be queried in the same order they are defined. If you want to be able to query by "field1" then "field2" and by "field2" then "field1", you must create separate indexes. +- Do not store unbounded lists as an array field inside a document (e.g. `v.array(v.object({...}))`). As the array grows it will hit the 1MB document size limit, and every update rewrites the entire document. Instead, create a separate table for the child items with a foreign key back to the parent. +- Separate high-churn operational data (e.g. heartbeats, online status, typing indicators) from stable profile data. Storing frequently updated fields on a shared document forces every write to contend with reads of the entire document. Instead, create a dedicated table for the high-churn data with a foreign key back to the parent record. + +## Authentication guidelines + +- Convex supports JWT-based authentication through `convex/auth.config.ts`. ALWAYS create this file when using authentication. Without it, `ctx.auth.getUserIdentity()` will always return `null`. +- Example `convex/auth.config.ts`: + +```typescript +export default { + providers: [ + { + domain: "https://your-auth-provider.com", + applicationID: "convex", + }, + ], +}; +``` + +The `domain` must be the issuer URL of the JWT provider. Convex fetches `{domain}/.well-known/openid-configuration` to discover the JWKS endpoint. The `applicationID` is checked against the JWT `aud` (audience) claim. + +- Use `ctx.auth.getUserIdentity()` to get the authenticated user's identity in any query, mutation, or action. This returns `null` if the user is not authenticated, or a `UserIdentity` object with fields like `subject`, `issuer`, `name`, `email`, etc. The `subject` field is the unique user identifier. +- In Convex `UserIdentity`, `tokenIdentifier` is guaranteed and is the canonical stable identifier for the authenticated identity. For any auth-linked database lookup or ownership check, prefer `identity.tokenIdentifier` over `identity.subject`. Do NOT use `identity.subject` alone as a global identity key. +- NEVER accept a `userId` or any user identifier as a function argument for authorization purposes. Always derive the user identity server-side via `ctx.auth.getUserIdentity()`. +- When using an external auth provider with Convex on the client, use `ConvexProviderWithAuth` instead of `ConvexProvider`: + +```tsx +import { ConvexProviderWithAuth, ConvexReactClient } from "convex/react"; + +const convex = new ConvexReactClient(process.env.NEXT_PUBLIC_CONVEX_URL!); + +function App({ children }: { children: React.ReactNode }) { + return ( + + {children} + + ); +} +``` + +The `useAuth` prop must return `{ isLoading, isAuthenticated, fetchAccessToken }`. Do NOT use plain `ConvexProvider` when authentication is needed — it will not send tokens with requests. + +## Typescript guidelines + +- You can use the helper typescript type `Id` imported from './\_generated/dataModel' to get the type of the id for a given table. For example if there is a table called 'users' you can use `Id<'users'>` to get the type of the id for that table. +- Use `Doc<"tableName">` from `./_generated/dataModel` to get the full document type for a table. +- Use `QueryCtx`, `MutationCtx`, `ActionCtx` from `./_generated/server` for typing function contexts. NEVER use `any` for ctx parameters — always use the proper context type. +- If you need to define a `Record` make sure that you correctly provide the type of the key and value in the type. For example a validator `v.record(v.id('users'), v.string())` would have the type `Record, string>`. Below is an example of using `Record` with an `Id` type in a query: + +```ts +import { query } from "./_generated/server"; +import { Doc, Id } from "./_generated/dataModel"; + +export const exampleQuery = query({ + args: { userIds: v.array(v.id("users")) }, + handler: async (ctx, args) => { + const idToUsername: Record, string> = {}; + for (const userId of args.userIds) { + const user = await ctx.db.get("users", userId); + if (user) { + idToUsername[user._id] = user.username; + } + } + + return idToUsername; + }, +}); +``` + +- Be strict with types, particularly around id's of documents. For example, if a function takes in an id for a document in the 'users' table, take in `Id<'users'>` rather than `string`. + +## Full text search guidelines + +- A query for "10 messages in channel '#general' that best match the query 'hello hi' in their body" would look like: + +const messages = await ctx.db +.query("messages") +.withSearchIndex("search_body", (q) => +q.search("body", "hello hi").eq("channel", "#general"), +) +.take(10); + +## Query guidelines + +- Do NOT use `filter` in queries. Instead, define an index in the schema and use `withIndex` instead. +- If the user does not explicitly tell you to return all results from a query you should ALWAYS return a bounded collection instead. So that is instead of using `.collect()` you should use `.take()` or paginate on database queries. This prevents future performance issues when tables grow in an unbounded way. +- Never use `.collect().length` to count rows. Convex has no built-in count operator, so if you need a count that stays efficient at scale, maintain a denormalized counter in a separate document and update it in your mutations. +- Convex queries do NOT support `.delete()`. If you need to delete all documents matching a query, use `.take(n)` to read them in batches, iterate over each batch calling `ctx.db.delete(row._id)`, and repeat until no more results are returned. +- Convex mutations are transactions with limits on the number of documents read and written. If a mutation needs to process more documents than fit in a single transaction (e.g. bulk deletion on a large table), process a batch with `.take(n)` and then call `ctx.scheduler.runAfter(0, api.myModule.myMutation, args)` to schedule itself to continue. This way each invocation stays within transaction limits. +- Use `.unique()` to get a single document from a query. This method will throw an error if there are multiple documents that match the query. +- When using async iteration, don't use `.collect()` or `.take(n)` on the result of a query. Instead, use the `for await (const row of query)` syntax. + +### Ordering + +- By default Convex always returns documents in ascending `_creationTime` order. +- You can use `.order('asc')` or `.order('desc')` to pick whether a query is in ascending or descending order. If the order isn't specified, it defaults to ascending. +- Document queries that use indexes will be ordered based on the columns in the index and can avoid slow table scans. + +## Mutation guidelines + +- Use `ctx.db.replace` to fully replace an existing document. This method will throw an error if the document does not exist. Syntax: `await ctx.db.replace('tasks', taskId, { name: 'Buy milk', completed: false })` +- Use `ctx.db.patch` to shallow merge updates into an existing document. This method will throw an error if the document does not exist. Syntax: `await ctx.db.patch('tasks', taskId, { completed: true })` + +## Action guidelines + +- Always add `"use node";` to the top of files containing actions that use Node.js built-in modules. +- Never add `"use node";` to a file that also exports queries or mutations. Only actions can run in the Node.js runtime; queries and mutations must stay in the default Convex runtime. If you need Node.js built-ins alongside queries or mutations, put the action in a separate file. +- `fetch()` is available in the default Convex runtime. You do NOT need `"use node";` just to use `fetch()`. +- Never use `ctx.db` inside of an action. Actions don't have access to the database. +- Below is an example of the syntax for an action: + +```ts +import { action } from "./_generated/server"; + +export const exampleAction = action({ + args: {}, + handler: async (ctx, args) => { + console.log("This action does not return anything"); + return null; + }, +}); +``` + +## Scheduling guidelines + +### Cron guidelines + +- Only use the `crons.interval` or `crons.cron` methods to schedule cron jobs. Do NOT use the `crons.hourly`, `crons.daily`, or `crons.weekly` helpers. +- Both cron methods take in a FunctionReference. Do NOT try to pass the function directly into one of these methods. +- Define crons by declaring the top-level `crons` object, calling some methods on it, and then exporting it as default. For example, + +```ts +import { cronJobs } from "convex/server"; +import { internal } from "./_generated/api"; +import { internalAction } from "./_generated/server"; + +const empty = internalAction({ + args: {}, + handler: async (ctx, args) => { + console.log("empty"); + }, +}); + +const crons = cronJobs(); + +// Run `internal.crons.empty` every two hours. +crons.interval("delete inactive users", { hours: 2 }, internal.crons.empty, {}); + +export default crons; +``` + +- You can register Convex functions within `crons.ts` just like any other file. +- If a cron calls an internal function, always import the `internal` object from '\_generated/api', even if the internal function is registered in the same file. + +## Testing guidelines + +- Use `convex-test` with `vitest` and `@edge-runtime/vm` to test Convex functions. Always install the latest versions of these packages. Configure vitest with `environment: "edge-runtime"` in `vitest.config.ts`. + +Test files go inside the `convex/` directory. You must pass a module map from `import.meta.glob` to `convexTest`: + +```typescript +/// +import { convexTest } from "convex-test"; +import { expect, test } from "vitest"; +import { api } from "./_generated/api"; +import schema from "./schema"; + +const modules = import.meta.glob("./**/*.ts"); + +test("some behavior", async () => { + const t = convexTest(schema, modules); + await t.mutation(api.messages.send, { body: "Hi!", author: "Sarah" }); + const messages = await t.query(api.messages.list); + expect(messages).toMatchObject([{ body: "Hi!", author: "Sarah" }]); +}); +``` + +The `modules` argument is required so convex-test can discover and load function files. The `/// ` directive is needed for TypeScript to recognize `import.meta.glob`. + +## File storage guidelines + +- The `ctx.storage.getUrl()` method returns a signed URL for a given file. It returns `null` if the file doesn't exist. +- Do NOT use the deprecated `ctx.storage.getMetadata` call for loading a file's metadata. + +Instead, query the `_storage` system table. For example, you can use `ctx.db.system.get` to get an `Id<"_storage">`. + +``` +import { query } from "./_generated/server"; +import { Id } from "./_generated/dataModel"; + +type FileMetadata = { + _id: Id<"_storage">; + _creationTime: number; + contentType?: string; + sha256: string; + size: number; +} + +export const exampleQuery = query({ + args: { fileId: v.id("_storage") }, + handler: async (ctx, args) => { + const metadata: FileMetadata | null = await ctx.db.system.get("_storage", args.fileId); + console.log(metadata); + return null; + }, +}); +``` + +- Convex storage stores items as `Blob` objects. You must convert all items to/from a `Blob` when using Convex storage. diff --git a/apps/missions/skills-lock.json b/apps/missions/skills-lock.json new file mode 100644 index 0000000..dc058d9 --- /dev/null +++ b/apps/missions/skills-lock.json @@ -0,0 +1,41 @@ +{ + "version": 1, + "skills": { + "convex": { + "source": "get-convex/agent-skills", + "sourceType": "github", + "skillPath": "skills/convex/SKILL.md", + "computedHash": "c5f3622c64ef550aac27d1dbc041f0c7c40d9119863c9fb8bac180b0498ee8ed" + }, + "convex-create-component": { + "source": "get-convex/agent-skills", + "sourceType": "github", + "skillPath": "skills/convex-create-component/SKILL.md", + "computedHash": "25b6f56cc6afa4237aa191f5bfa5b86f68b70dc7f1195b86d027bd85346cff41" + }, + "convex-migration-helper": { + "source": "get-convex/agent-skills", + "sourceType": "github", + "skillPath": "skills/convex-migration-helper/SKILL.md", + "computedHash": "8da4dee6f36c71b5d899b90ad7bd1d3730cf4dd35118f9ea856075df29809c04" + }, + "convex-performance-audit": { + "source": "get-convex/agent-skills", + "sourceType": "github", + "skillPath": "skills/convex-performance-audit/SKILL.md", + "computedHash": "d4f372ad6bed01b3a83983f5e8386017606e1bf8a97833c016af07f277157f96" + }, + "convex-quickstart": { + "source": "get-convex/agent-skills", + "sourceType": "github", + "skillPath": "skills/convex-quickstart/SKILL.md", + "computedHash": "8735052585ff81bb6ad4b362a7bb599413288e55d071c8ddf4f798b6d989ebac" + }, + "convex-setup-auth": { + "source": "get-convex/agent-skills", + "sourceType": "github", + "skillPath": "skills/convex-setup-auth/SKILL.md", + "computedHash": "b1a940758751c5b2fdc6ced105b19927a1655f0c1d4bd2fd5536dc3264202c00" + } + } +} diff --git a/biome.json b/biome.json index f1795ac..2e8e50e 100644 --- a/biome.json +++ b/biome.json @@ -17,7 +17,9 @@ "!.sandcastle", "!.claude/settings.local.json", "!apps/missions/convex/_generated", - "!apps/missions/.env.local" + "!apps/missions/.env.local", + "!smoke-*-plan.json", + "!mission-plan-*.json" ] }, "formatter": {