From dbdb0db1013eed4eb01356cdebe8720e40bb7a58 Mon Sep 17 00:00:00 2001 From: Julien Goux Date: Wed, 28 Jan 2026 14:30:41 +0100 Subject: [PATCH 1/4] yolo --- .gitignore | 3 + DECLARATIVE_SCHEMA.md | 280 ++++++++ PLAN.md | 983 ++++++++++++++++++++++++++ PULL_REQUEST.md | 441 ++++++++++++ cmd/dev.go | 36 + go.mod | 2 + go.sum | 5 + internal/db/diff/diff.go | 11 +- internal/db/diff/templates/pgdelta.ts | 4 +- internal/dev/debug.go | 15 + internal/dev/dev.go | 392 ++++++++++ internal/dev/differ.go | 270 +++++++ internal/dev/seed.go | 86 +++ internal/dev/shadow.go | 468 ++++++++++++ internal/dev/validator.go | 94 +++ internal/dev/watcher.go | 343 +++++++++ internal/utils/docker.go | 7 +- internal/utils/logger.go | 110 +++ pkg/config/config.go | 57 ++ 19 files changed, 3602 insertions(+), 5 deletions(-) create mode 100644 DECLARATIVE_SCHEMA.md create mode 100644 PLAN.md create mode 100644 PULL_REQUEST.md create mode 100644 cmd/dev.go create mode 100644 internal/dev/debug.go create mode 100644 internal/dev/dev.go create mode 100644 internal/dev/differ.go create mode 100644 internal/dev/seed.go create mode 100644 internal/dev/shadow.go create mode 100644 internal/dev/validator.go create mode 100644 internal/dev/watcher.go diff --git a/.gitignore b/.gitignore index e119d9725..5d4bc8e00 100644 --- a/.gitignore +++ b/.gitignore @@ -29,3 +29,6 @@ package-lock.json # Initialized by cli for local testing /supabase + +# Claude +.claude diff --git a/DECLARATIVE_SCHEMA.md b/DECLARATIVE_SCHEMA.md new file mode 100644 index 000000000..184fbf34c --- /dev/null +++ b/DECLARATIVE_SCHEMA.md @@ -0,0 +1,280 @@ +# Declarative Schemas in Supabase CLI + +This document explains how declarative schemas work internally in the Supabase CLI. + +## Overview + +Declarative schemas provide a way to define your desired database state in SQL files. Instead of writing sequential migrations, you declare what your schema should look like, and the CLI computes the necessary changes. + +``` +supabase/ +├── schemas/ # Declarative schema files (desired state) +│ ├── tables.sql +│ ├── functions.sql +│ └── types.sql +└── migrations/ # Traditional migrations (change history) + ├── 20240101000000_initial.sql + └── 20240102000000_add_users.sql +``` + +## Key Concepts + +### Schemas vs Migrations + +| Aspect | Schemas (Declarative) | Migrations (Imperative) | +|--------|----------------------|------------------------| +| **Purpose** | Define desired database state | Record sequential changes over time | +| **Storage** | `supabase/schemas/` | `supabase/migrations/` | +| **Tracking** | Not tracked in migration history | Tracked in `schema_migrations` table | +| **File Format** | Any `.sql` files | Versioned: `YYYYMMDDHHMMSS_name.sql` | +| **Use Case** | Development workflow | Production deployments | + +### The Shadow Database Pattern + +When diffing, the CLI creates a temporary "shadow database" that represents the desired state: + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Shadow Database Flow │ +├─────────────────────────────────────────────────────────────────┤ +│ 1. Create shadow DB container (fresh Postgres) │ +│ 2. Apply all migrations from supabase/migrations/ │ +│ 3. Apply declarative schemas from supabase/schemas/ │ +│ 4. Shadow DB now represents "desired state" │ +│ 5. Diff shadow DB vs local DB → generate migration SQL │ +└─────────────────────────────────────────────────────────────────┘ +``` + +## Internal Implementation + +### Directory Configuration + +Defined in `pkg/config/utils.go`: + +```go +var SchemasDir = filepath.Join(SupabaseDirPath, "schemas") +// Result: "supabase/schemas" +``` + +Can be customized in `config.toml`: + +```toml +[db.migrations] +schema_paths = ["schemas/**/*.sql", "legacy_schemas/core.sql"] +``` + +### Loading Schema Files + +From `internal/db/diff/diff.go`: + +```go +func loadDeclaredSchemas(fsys afero.Fs) ([]string, error) { + // 1. Check if schema_paths is configured in config.toml + if schemas := utils.Config.Db.Migrations.SchemaPaths; len(schemas) > 0 { + return schemas.Files(afero.NewIOFS(fsys)) + } + + // 2. Fall back to supabase/schemas/ directory + if exists, err := afero.DirExists(fsys, utils.SchemasDir); !exists { + return nil, nil // No schemas, that's OK + } + + // 3. Walk and collect all .sql files + var declared []string + afero.Walk(fsys, utils.SchemasDir, func(path string, info fs.FileInfo, err error) error { + if info.Mode().IsRegular() && filepath.Ext(info.Name()) == ".sql" { + declared = append(declared, path) + } + return nil + }) + return declared, nil +} +``` + +### Applying Schemas (Without Version Tracking) + +From `pkg/migration/seed.go`: + +```go +func SeedGlobals(ctx context.Context, pending []string, conn *pgx.Conn, fsys fs.FS) error { + for _, path := range pending { + globals, err := NewMigrationFromFile(path, fsys) + if err != nil { + return err + } + + // KEY: Skip inserting to migration history + globals.Version = "" + + // Execute the SQL statements + if err := globals.ExecBatch(ctx, conn); err != nil { + return err + } + } + return nil +} +``` + +The critical line is `globals.Version = ""` which prevents the schema from being recorded in the `schema_migrations` table. + +### The Diff Process + +From `internal/db/diff/diff.go`: + +```go +func DiffDatabase(ctx context.Context, config pgconn.Config, fsys afero.Fs, ...) (string, error) { + // 1. Create shadow database + shadow, err := CreateShadowDatabase(ctx, utils.Config.Db.ShadowPort) + defer utils.DockerRemove(shadow) + + // 2. Apply migrations to shadow + MigrateShadowDatabase(ctx, shadow, fsys) + + // 3. Apply declarative schemas to shadow's contrib_regression DB + if declared, err := loadDeclaredSchemas(fsys); len(declared) > 0 { + shadowConfig.Database = "contrib_regression" + migrateBaseDatabase(ctx, shadowConfig, declared, fsys) + } + + // 4. Compute diff: local DB → shadow DB + // Uses migra or pg-delta depending on configuration + diff := computeDiff(localDB, shadowDB) + + return diff, nil +} +``` + +### Why `contrib_regression` Database? + +The shadow database has two databases: +- `postgres` - Where migrations are applied +- `contrib_regression` - Where declarative schemas are applied + +This separation allows the diff tool to compare the full desired state (migrations + schemas) against the local database. + +## Workflow: From Schema to Migration + +### Development Flow + +``` +1. Edit supabase/schemas/tables.sql + ↓ +2. Run: supabase db diff -f add_profiles_table + ↓ +3. CLI creates shadow DB with schemas applied + ↓ +4. CLI diffs shadow vs local → generates SQL + ↓ +5. Migration saved: supabase/migrations/20240115120000_add_profiles_table.sql + ↓ +6. Migration can be pushed to production +``` + +### Dev Mode Flow (Hot Reload) + +``` +1. Run: supabase dev + ↓ +2. Watcher monitors supabase/schemas/ + ↓ +3. File change detected → debounce 500ms + ↓ +4. Validate SQL syntax (pg_query_go) + ↓ +5. Create shadow DB, apply schemas + ↓ +6. Diff shadow vs local + ↓ +7. Apply diff directly (NO migration file created) + ↓ +8. Local DB updated, session marked "dirty" + ↓ +9. On exit: "Run 'supabase db diff -f name' to create migration" +``` + +## File Processing + +### SQL Parsing + +Schema files are parsed using `NewMigrationFromFile` which: + +1. Reads the file content +2. Splits into individual SQL statements (handles `$$` blocks, comments) +3. Creates a `Migration` struct with `Statements` slice +4. Each statement is executed in order via `ExecBatch` + +### Supported SQL + +Any valid PostgreSQL SQL is supported: + +```sql +-- supabase/schemas/tables.sql +CREATE TABLE public.profiles ( + id uuid PRIMARY KEY DEFAULT gen_random_uuid(), + user_id uuid REFERENCES auth.users(id), + display_name text, + created_at timestamptz DEFAULT now() +); + +CREATE INDEX idx_profiles_user_id ON public.profiles(user_id); + +-- supabase/schemas/functions.sql +CREATE OR REPLACE FUNCTION public.get_profile(user_uuid uuid) +RETURNS public.profiles AS $$ + SELECT * FROM public.profiles WHERE user_id = user_uuid; +$$ LANGUAGE sql STABLE; + +-- supabase/schemas/policies.sql +CREATE POLICY "Users can view own profile" + ON public.profiles FOR SELECT + USING (auth.uid() = user_id); +``` + +## Configuration Options + +### config.toml + +```toml +[db] +# Shadow database port for diffing +shadow_port = 54320 + +[db.migrations] +# Enable/disable migrations feature +enabled = true + +# Custom schema file paths (glob patterns) +# If not set, defaults to supabase/schemas/**/*.sql +schema_paths = [ + "schemas/**/*.sql", + "shared_schemas/*.sql" +] +``` + +### Glob Pattern Support + +The `schema_paths` option supports: +- `**` - Recursive directory matching +- `*` - Single directory/file matching +- Multiple patterns (deduplicated) +- Results are sorted alphabetically + +## Key Files in Codebase + +| File | Purpose | +|------|---------| +| `pkg/config/utils.go` | Defines `SchemasDir` constant | +| `pkg/config/db.go` | `migrations.SchemaPaths` config | +| `internal/db/diff/diff.go` | `loadDeclaredSchemas`, `DiffDatabase` | +| `pkg/migration/seed.go` | `SeedGlobals` - applies schemas without tracking | +| `pkg/migration/file.go` | `NewMigrationFromFile`, `ExecBatch` | +| `internal/dev/dev.go` | Dev mode schema watching | +| `internal/dev/differ.go` | Hot reload diff/apply logic | + +## Design Philosophy + +1. **Schemas are for development** - Quick iteration without migration overhead +2. **Migrations are for deployment** - Immutable, versioned, auditable +3. **No automatic migration generation** - User explicitly runs `db diff` when ready +4. **Shadow database isolation** - Diffing doesn't affect local or remote databases +5. **Same SQL parser** - Schemas use the same parser as migrations for consistency diff --git a/PLAN.md b/PLAN.md new file mode 100644 index 000000000..1bafb6abe --- /dev/null +++ b/PLAN.md @@ -0,0 +1,983 @@ +# Plan: Supabase CLI `dev` Command - Declarative Schema Workflow + +## Overview + +Implement a new `supabase dev` command that provides a reactive development experience. The first workflow watches `supabase/schemas/` for changes and automatically applies them to the local database **without creating migration files**. + +**Core principle**: Migrations are an implementation detail for deployment. During development, users just want to evolve their schema and see changes reflected quickly. + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ supabase dev │ +├─────────────────────────────────────────────────────────────────┤ +│ DevSession │ +│ ├── Ensures local DB is running (starts if needed) │ +│ ├── Coordinates multiple watchers │ +│ └── Manages graceful shutdown │ +├─────────────────────────────────────────────────────────────────┤ +│ SchemaWatcher (first workflow) │ +│ ├── Watches supabase/schemas/*.sql │ +│ ├── Debounces file changes (500ms) │ +│ └── Triggers validation → diff → apply pipeline │ +├─────────────────────────────────────────────────────────────────┤ +│ SQLValidator (pre-diff gate) │ +│ ├── Uses pg_query_go (libpg_query bindings) │ +│ ├── Validates ALL .sql files in schemas folder │ +│ └── Blocks diff if any file has syntax errors │ +├─────────────────────────────────────────────────────────────────┤ +│ DevDiffer │ +│ ├── Uses pg-delta CLI (via Bun in Docker) │ +│ ├── Compares schema files vs local DB │ +│ └── Detects DROP statements │ +├─────────────────────────────────────────────────────────────────┤ +│ Applier │ +│ ├── Executes SQL directly (no migration file) │ +│ ├── Uses transactions when possible │ +│ └── Shows warnings for destructive changes │ +├─────────────────────────────────────────────────────────────────┤ +│ DevState │ +│ ├── Tracks "dirty" state (local differs from migrations) │ +│ └── Warns on exit if uncommitted changes exist │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### Pipeline Flow + +``` +File save → Debounce (500ms) → Validate ALL *.sql → Diff → Apply + │ + ↓ (if invalid) + Show error: + "Syntax error in users.sql:15:23 + unexpected token 'TABL'" + Wait for next save... +``` + +## Key Design Decisions + +### 1. When to Diff (Handling Mid-Edit Saves) + +- **500ms debounce** - Batch rapid saves (reuse existing pattern from `internal/functions/serve/watcher.go`) +- **SQL validation gate** - After debounce, validate ALL .sql files in schemas folder using Postgres parser before diffing +- **Non-blocking errors** - Parse/diff errors don't crash the watcher; just log and wait for fix + +### 1.1 SQL Validation Step (Pre-Diff Gate) + +After debounce fires, before running the diff: + +``` +File change detected → Debounce (500ms) → Validate ALL schemas/*.sql → Diff & Apply + ↓ + If any file invalid: + - Show error with filename + line + - Skip diff entirely + - Wait for next file change +``` + +**Why validate all files, not just the changed one?** +- The diff applies ALL schema files to a shadow DB +- If any file is invalid, the diff will fail anyway +- Validating all files gives immediate feedback about the actual problem + +**Implementation options for Postgres parser:** +1. **pg_query_go** (recommended) - Go bindings to libpg_query (Postgres's actual parser) + - No DB connection needed + - Exact same parser Postgres uses + - Returns detailed error position +2. **Local DB validation** - Connect to local Postgres and use `PREPARE` or parse via function + - Requires DB to be running + - Adds network round-trip latency + +### 2. Migration-less Local Development + +- Changes are applied directly to local DB using `ExecBatch` (reuse from `pkg/migration/file.go`) +- **No version recorded** in `schema_migrations` table +- User runs `supabase db diff -f migration_name` when ready to commit/deploy +- On exit, warn if local DB is "dirty" (has unapplied changes) + +### 3. Differ Strategy + +Use **pg-delta** (`@supabase/pg-delta` npm package) because: +- Supabase's own differ, optimized for Supabase schemas +- Handles Supabase-specific patterns (auth, storage, realtime) +- CLI interface for easy invocation + +**Implementation:** Run pg-delta CLI via Bun in Docker: + +```bash +docker run --rm \ + --network host \ + -v supabase_bun_cache:/bun-cache \ + -e BUN_INSTALL_CACHE_DIR=/bun-cache \ + oven/bun:canary-alpine \ + x @supabase/pg-delta@1.0.0-alpha.2 plan \ + --source "postgres://postgres:postgres@localhost:54321/postgres" \ + --target "postgres://postgres:postgres@localhost:54322/contrib_regression" \ + --integration supabase \ + --format sql \ + --role postgres +``` + +**CLI flags:** +- `--source` - Local database URL (current state) +- `--target` - Shadow database URL (desired state with declared schemas applied) +- `--integration supabase` - Use Supabase-specific schema filtering +- `--format sql` - Output raw SQL statements +- `--role postgres` - Execute as postgres role + +**Why Bun?** +- Much faster startup than edge-runtime (~100ms vs ~2s) +- `bun x` is like `npx` but faster +- Alpine image is lightweight (~50MB) +- `supabase_bun_cache` volume caches pg-delta package (only downloads once) + +### 4. Handling Destructive Changes + +- Detect DROP statements via regex pattern matching +- Show warnings with affected objects +- Apply anyway (in dev mode, speed > safety) +- Consider `--confirm-drops` flag for stricter mode + +## File Structure + +``` +cmd/ +└── dev.go # Cobra command definition + +internal/dev/ +├── session.go # DevSession orchestrator +├── feedback.go # Console output formatting +├── watcher/ +│ ├── watcher.go # Watcher interface +│ └── schema.go # Schema watcher (adapts existing debounce pattern) +├── validator/ +│ └── sql.go # SQL syntax validator using pg_query_go +├── differ/ +│ └── schema.go # DevDiffer using pg-schema-diff +└── state/ + └── state.go # DevState tracking +``` + +## Implementation Plan + +### Phase 1: Command Scaffolding +1. Create `cmd/dev.go` with Cobra command +2. Create `internal/dev/session.go` with basic lifecycle +3. Integrate with `internal/start/start.go` to ensure DB is running + +### Phase 2: Schema Watcher +1. Create `internal/dev/watcher/schema.go` +2. Adapt `debounceFileWatcher` from `internal/functions/serve/watcher.go` +3. Watch `supabase/schemas/*.sql` with 500ms debounce + +### Phase 3: SQL Validator (Pre-Diff Gate) +1. Add `github.com/pganalyze/pg_query_go/v6` dependency +2. Create `internal/dev/validator/sql.go` +3. Implement `ValidateSchemaFiles(paths []string) error` that: + - Parses each file with pg_query_go + - Returns first error with filename, line, column, and message + - Returns nil if all files are valid + +### Phase 4: Diff and Apply (using pg-delta via Bun) +1. Create `internal/dev/differ.go` +2. Implement `runPgDelta()` that: + - Creates `supabase_bun_cache` Docker volume (if not exists) + - Runs `oven/bun:canary-alpine` container with: + - `--network host` to access local databases + - `-v supabase_bun_cache:/bun-cache` for caching + - `-e BUN_INSTALL_CACHE_DIR=/bun-cache` + - Command: `x @supabase/pg-delta@1.0.0-alpha.2 plan --source --target --integration supabase --format sql --role postgres` +3. Parse output SQL and apply directly without version tracking + +### Phase 5: Feedback and State +1. Create `internal/dev/feedback.go` for colored console output +2. Create `internal/dev/state/state.go` for dirty state tracking +3. Show warnings for DROP statements +4. Warn on exit if dirty + +### Phase 6: Polish +1. Add `--no-start` flag (assume DB already running) +2. Handle edge cases (DB stops unexpectedly, etc.) +3. Document the workflow + +## Critical Files to Modify/Reference + +| File | Purpose | +|------|---------| +| `cmd/dev.go` | New file - command definition | +| `internal/dev/dev.go` | Main session orchestration | +| `internal/dev/watcher.go` | File watcher with debounce | +| `internal/dev/validator.go` | SQL syntax validator (pg_query_go v6) | +| `internal/dev/differ.go` | Diff and apply logic (pg-delta via Bun) | +| `internal/functions/serve/watcher.go` | Reference for file watcher pattern | +| `internal/utils/docker.go` | Reference for Docker volume/container patterns | +| `pkg/migration/file.go` | Reference for `ExecBatch` pattern | +| `internal/start/start.go` | Integration point for DB startup | +| `go.mod` | Add `github.com/pganalyze/pg_query_go/v6` dependency | + +## Example User Experience + +``` +$ supabase dev + +[14:32:15] Starting local database... +[14:32:18] Local database ready +[14:32:18] Watching supabase/schemas/ for changes... +[14:32:18] Press Ctrl+C to stop + +[14:32:45] Change detected: supabase/schemas/users.sql +[14:32:46] Applied: + CREATE TABLE public.profiles ( + id uuid PRIMARY KEY REFERENCES auth.users(id), + display_name text + ); + +[14:33:12] Change detected: supabase/schemas/users.sql +[14:33:12] Warning: DROP statement detected + DROP COLUMN display_name; +[14:33:13] Applied: + ALTER TABLE public.profiles DROP COLUMN display_name; + ALTER TABLE public.profiles ADD COLUMN full_name text; + +[14:33:45] Change detected: supabase/schemas/users.sql +[14:33:45] Syntax error in supabase/schemas/users.sql + Line 3, Column 8: syntax error at or near "TABL" + Waiting for valid SQL... + +^C +[14:35:00] Stopping... +[14:35:00] Warning: Local database has uncommitted schema changes! + Hint: Run 'supabase db diff -f migration_name' to create a migration +``` + +## Verification + +1. **Manual testing**: + - Run `supabase dev` + - Edit a schema file and save + - Verify change is applied to local DB + - Verify no migration file is created + - Run `supabase db diff` to see the accumulated changes + - Run `supabase db diff -f my_migration` to create migration when ready + +2. **Edge cases to test**: + - Save mid-edit (incomplete SQL) → validation error, no diff attempted + - Syntax error in one file while another is valid → validation catches it + - Rapid saves (debounce working) + - DROP statements (warning shown) + - Ctrl+C with dirty state (warning shown) + - DB not running at start (should start it) + +## Design Decisions (Confirmed) + +1. **Debounce duration**: **500ms** - Match existing pattern for fast feedback +2. **DROP statement handling**: **Apply immediately with warning** - Speed over safety in dev mode +3. **Initial state**: **Apply immediately on startup** - Sync local DB to match schema files + +## Future Optimization: Lazy Container Startup + +### Problem + +Currently, `supabase start` pulls and starts **all** containers sequentially, even when only the database is needed: + +``` +postgres, kong, gotrue, postgrest, storage-api, realtime, edge-runtime, +imgproxy, postgres-meta, studio, logflare, vector, mailpit... +``` + +This takes 30-60+ seconds on first run (image pulls) and 10-20 seconds on subsequent runs. For `supabase dev`, we only need Postgres immediately - other services are accessed on-demand. + +### Current Architecture + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ supabase start (current) │ +├─────────────────────────────────────────────────────────────────┤ +│ 1. Pull ALL images sequentially │ +│ 2. Start ALL containers sequentially │ +│ 3. Wait for ALL health checks │ +│ 4. Ready (~30-60s first run, ~10-20s subsequent) │ +└─────────────────────────────────────────────────────────────────┘ +``` + +Kong gateway already routes all API requests: +- `/auth/v1/*` → gotrue:9999 +- `/rest/v1/*` → postgrest:3000 +- `/storage/v1/*` → storage:5000 +- `/realtime/v1/*` → realtime:4000 +- `/functions/v1/*` → edge-runtime:8081 +- etc. + +### Proposed Architecture: Lazy Proxy + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ supabase dev (optimized) │ +├─────────────────────────────────────────────────────────────────┤ +│ 1. Start Postgres only (~3-5s) │ +│ 2. Start LazyProxy (replaces Kong initially) │ +│ 3. Ready for schema development immediately │ +├─────────────────────────────────────────────────────────────────┤ +│ LazyProxy (on first request to service) │ +│ ├── Intercept request to /auth/v1/* │ +│ ├── Pull gotrue image (if needed) │ +│ ├── Start gotrue container │ +│ ├── Wait for health check │ +│ ├── Forward request (and all subsequent requests) │ +│ └── Show "Starting auth service..." in CLI │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### Implementation Strategy + +#### Option A: Custom Go Proxy (Recommended) + +Build a lightweight reverse proxy in Go that: +1. Listens on Kong's port (8000) +2. Maps routes to container configs +3. On first request to a route: + - Returns "503 Service Starting" or holds the request + - Pulls image + starts container in background + - Once healthy, forwards request +4. Subsequent requests go directly to container + +```go +// internal/dev/proxy/lazy.go +type LazyProxy struct { + services map[string]*ServiceState // route prefix → state + mu sync.RWMutex +} + +type ServiceState struct { + Config ContainerConfig + Started bool + Starting bool + ContainerID string +} + +func (p *LazyProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { + service := p.routeToService(r.URL.Path) + if service == nil { + http.Error(w, "Not found", 404) + return + } + + if !service.Started { + p.startService(service) // blocks until healthy + } + + service.Proxy.ServeHTTP(w, r) +} +``` + +#### Option B: Kong with Lazy Backend Plugin + +Use Kong but with a custom plugin that: +1. Catches connection failures to backends +2. Triggers container start via Docker API +3. Retries after container is healthy + +This is more complex (requires Lua/Kong plugin development) but keeps the existing Kong setup. + +### Service Dependency Graph + +Some services have dependencies: +``` +postgres (required first) + ↓ +postgrest (needs postgres) +gotrue (needs postgres) +storage-api (needs postgres, gotrue for auth) +realtime (needs postgres) + ↓ +kong (needs all above for routing) +studio (needs kong, postgres-meta) +``` + +For lazy startup: +- **Immediate**: postgres +- **On-demand**: everything else, respecting dependencies + +### Configuration + +```toml +# config.toml +[dev] +lazy_services = true # default: true for `supabase dev` + +[dev.eager_services] +# Services to start immediately (not lazily) +# Useful if you know you'll need auth immediately +auth = false +rest = false +``` + +### CLI Integration + +``` +$ supabase dev + +Starting Postgres... done (3.2s) +Lazy proxy ready on localhost:54321 + +Watching supabase/schemas/ for changes... + +# User's app makes request to /auth/v1/signup +Starting auth service... done (4.1s) + +# User's app makes request to /rest/v1/profiles +Starting REST API... done (2.3s) +``` + +### Benefits + +1. **Faster iteration**: Schema development starts in ~5s instead of ~30s +2. **Lower resource usage**: Unused services don't consume memory +3. **Better DX**: Clear feedback when services start on-demand +4. **Backwards compatible**: `supabase start` unchanged, `supabase dev` uses lazy mode + +### Challenges + +1. **First request latency**: 2-5s delay on first request to a service +2. **Dependency ordering**: Must start dependencies before dependents +3. **Health check timing**: Need to wait for service to be truly ready +4. **WebSocket services**: Realtime needs special handling for persistent connections + +### Files to Create/Modify + +| File | Purpose | +|------|---------| +| `internal/dev/proxy/lazy.go` | Lazy proxy implementation | +| `internal/dev/proxy/routes.go` | Route → container mapping | +| `internal/dev/proxy/health.go` | Health check logic | +| `internal/start/start.go` | Add `--lazy` flag support | +| `pkg/config/config.go` | Add `[dev]` config section | + +### Migration Path + +1. **Phase 1**: Implement for `supabase dev` only (current scope) +2. **Phase 2**: Add `supabase start --lazy` flag for opt-in +3. **Phase 3**: Consider making lazy default for `supabase start` + +## Extensible Workflow Design + +The `dev` command supports multiple workflows, each with its own configuration section. This allows users to customize behavior based on their tooling (Supabase-native, Prisma, Drizzle, etc.). + +### Config Structure + +```toml +[dev.schemas] +# Database schema workflow +enabled = true # Set to false to disable this workflow +watch = ["schemas/**/*.sql"] # Glob patterns to watch (relative to supabase/) +on_change = "" # Custom command to run on change (overrides internal diff) +types = "" # Path for TypeScript types (empty = disabled) +debounce = 500 # Milliseconds to wait before triggering (default: 500) +sync_on_start = true # Apply schema on startup (default: true) + +[dev.functions] +# Edge functions workflow (future) +enabled = true +watch = ["functions/**/*.ts"] +# ... function-specific options +``` + +### Configuration Options + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `enabled` | `bool` | `true` | Enable/disable this workflow | +| `watch` | `string[]` | `["schemas/**/*.sql"]` | Glob patterns for files to watch (relative to `supabase/` directory) | +| `on_change` | `string` | `""` (empty = use internal differ) | Custom command to run when files change | +| `types` | `string` | `""` (empty = disabled) | Output path for TypeScript types | +| `debounce` | `int` | `500` | Milliseconds to wait after file change before triggering | +| `sync_on_start` | `bool` | `true` | Whether to apply schema changes on startup | + +**Important:** All `watch` paths are relative to the `supabase/` directory, not the project root. + +### How It Works + +``` +File change detected + ↓ + Debounce (500ms) + ↓ + on_change set? ─── Yes ──→ Run custom command + │ ↓ + No types set? ─── Yes ──→ Generate types + ↓ │ + Internal differ No + (pg-delta) ↓ + ↓ Done + Apply to local DB + ↓ + types set? ─── Yes ──→ Generate types + │ + No + ↓ + Done +``` + +### Typical Workflows + +#### 1. Supabase Users (Default) + +Users who write SQL directly in `supabase/schemas/`. + +**Config** (default, no config needed): +```toml +# No config needed - defaults work out of the box +``` + +**Workflow**: +``` +1. Edit supabase/schemas/tables.sql +2. Save file +3. CLI validates SQL syntax (pg_query_go) +4. CLI diffs schema files vs local DB (pg-delta) +5. CLI applies changes directly to local DB +6. (Optional) Types generated if configured +7. When ready: `supabase db diff -f migration_name` +``` + +**With TypeScript types**: +```toml +[dev.schemas] +types = "src/types/database.ts" +``` + +--- + +#### 2. Drizzle Users + +Users who define schemas in TypeScript using Drizzle ORM. + +**Config**: +```toml +[dev.schemas] +watch = ["../src/db/schema/**/*.ts"] # Use ../ to reach project root from supabase/ +on_change = "npx drizzle-kit push" +sync_on_start = false # Drizzle manages its own state +``` + +**Workflow**: +``` +1. Edit src/db/schema/users.ts (Drizzle schema) +2. Save file +3. CLI detects change, runs `npx drizzle-kit push` +4. Drizzle pushes changes directly to local DB +5. When ready: `npx drizzle-kit generate` for migrations +``` + +**Note**: Drizzle users typically use `drizzle-kit push` for dev and `drizzle-kit generate` for migrations. The CLI just watches and triggers their existing workflow. + +--- + +#### 3. Prisma Users + +Users who define schemas using Prisma ORM. + +**Config**: +```toml +[dev.schemas] +watch = ["../prisma/schema.prisma"] # Use ../ to reach project root from supabase/ +on_change = "npx prisma db push --skip-generate" +sync_on_start = false # Prisma manages its own state +``` + +**Workflow**: +``` +1. Edit prisma/schema.prisma +2. Save file +3. CLI detects change, runs `npx prisma db push --skip-generate` +4. Prisma pushes changes directly to local DB +5. When ready: `npx prisma migrate dev` for migrations +``` + +**Note**: `--skip-generate` avoids regenerating the Prisma client on every save. Users can run `npx prisma generate` separately when needed. + +--- + +#### 4. External Watch Mode (ORM handles everything) + +Users who prefer their ORM's built-in watch mode and don't need Supabase CLI to watch schemas at all. + +**Config**: +```toml +[dev.schemas] +enabled = false # Disable schema workflow entirely +``` + +**Workflow**: +``` +1. Run `supabase dev` (starts DB, but no schema watching) +2. In another terminal: run ORM's watch mode (e.g., `prisma studio`, custom watcher) +3. ORM handles schema changes and can call `supabase gen types typescript` if needed +4. When ready: use ORM's migration tooling +``` + +**Use cases**: +- Prisma users who prefer `prisma studio` or a custom dev script +- Teams with existing watch tooling they don't want to replace +- Users who only want `supabase dev` for edge functions workflow (future) + +**Note**: Even with `enabled = false`, users still benefit from `supabase dev` for: +- Automatic database startup +- Future workflows like edge functions (`[dev.functions]`) +- Unified dev experience across Supabase services + +--- + +### TypeScript Type Generation + +Type generation is **independent** of the schema sync method. It runs after changes are applied to the database, regardless of whether the internal differ or a custom `on_change` command was used. + +**Supported generators** (future): +- `supabase gen types typescript` (built-in) +- Custom command via config + +**Config**: +```toml +[dev.schemas] +types = "src/types/database.ts" +# or for custom generator: +# types_command = "npx prisma generate" +``` + +**When types are generated**: +1. After successful schema application (internal or external) +2. Only if `types` path is configured +3. Uses `supabase gen types typescript --local > ` + +--- + +### DX Improvements + +#### 1. Clear Status Feedback + +The CLI provides clear, structured output during the dev session: + +``` +[dev] Watching schemas/**/*.sql +[dev] On change: (internal differ) +[dev] Status: Applying changes... +[dev] ✓ Schema applied successfully +[dev] Status: Watching for changes... +``` + +#### 2. Validation on Startup + +Before starting the watch loop, the CLI validates: +- Watch patterns are valid glob syntax +- `on_change` command exists (if configured) - warns if not found in PATH +- `types` output directory exists - warns if parent directory missing +- Watch directories exist - creates `supabase/schemas/` if using default pattern + +#### 3. Dynamic Directory Watching + +When a new subdirectory is created within a watched path, it's automatically added to the watcher. This handles cases like: +``` +supabase/schemas/ +├── tables.sql +└── new-module/ # Created while dev is running + └── models.sql # Automatically watched +``` + +#### 4. Configurable Debounce + +The `debounce` option allows tuning the delay between file save and action trigger: +- **Lower values (100-300ms)**: Faster feedback, but may trigger on incomplete saves +- **Default (500ms)**: Good balance for most editors +- **Higher values (1000ms+)**: For slower machines or complex operations + +#### 5. Skip Initial Sync + +The `sync_on_start` option controls whether to apply schema on startup: +- **`true` (default)**: Ensures local DB matches schema files immediately +- **`false`**: Useful when using `on_change` with an ORM that's already in sync + +--- + +### Why This Design? + +1. **Backwards compatible** - No config needed for default Supabase workflow +2. **Tool agnostic** - Works with any ORM/tool that has a CLI +3. **Composable** - Type generation works with any schema tool +4. **Extensible** - Easy to add new workflows (`[dev.functions]`, `[dev.seed]`, etc.) + +--- + +## Open Question: "Valid but Incomplete" Schema Problem + +### The Problem + +Current validation only checks SQL syntax. But a statement can be **valid yet incomplete**: + +```sql +-- Step 1: User saves this (valid SQL!) +CREATE TABLE users (id uuid PRIMARY KEY); + +-- Step 2: User continues typing (also valid, but different!) +CREATE TABLE users (id uuid PRIMARY KEY, name text, email text); +``` + +If we diff after step 1, we create a table with 1 column. Then we have to ALTER to add columns. This creates: +- Unnecessary churn (multiple diffs for one logical change) +- Potential issues with constraints, foreign keys +- Confusing diff output + +### Replit's Approach (Reference) + +[Replit's automated migrations](https://blog.replit.com/production-databases-automated-migrations) takes a different approach: +- **Don't diff during development** - Let developers make any changes freely +- **Diff at deploy time** - Generate migration only when deploying to production +- **Minimal intervention** - Users shouldn't think about migrations during dev + +This works well for AI agents but may lose the "immediate feedback" benefit for human developers. + +### Proposed Solutions + +#### Option A: Explicit Sync Command +```toml +[dev.schemas] +auto_apply = false # New option, default: true +``` +- Changes are validated but NOT auto-applied +- User runs `supabase db sync` when ready +- **Pro**: User is always in control +- **Con**: Loses reactive feel + +#### Option B: Preview Mode with Confirmation +``` +[dev] Change detected: users.sql +[dev] Will apply: + CREATE TABLE users (id uuid PRIMARY KEY); +[dev] Press Enter to apply, or keep editing... +``` +- Show diff preview, wait for confirmation (Enter) or timeout +- **Pro**: Immediate feedback + user control +- **Con**: Requires interaction + +#### Option C: Smart Incompleteness Detection +- Detect "likely incomplete" patterns: + - Empty tables (0 columns) + - Tables with only PK + - Functions with empty bodies +- Warn but don't auto-apply for these cases +- **Pro**: Catches common cases automatically +- **Con**: Can't catch all cases + +#### Option D: Adaptive Debounce +- Short debounce (500ms) for small edits +- Longer debounce (2-3s) when: + - File was just created + - Major structural changes detected + - Rapid consecutive saves +- **Pro**: Automatic, no config needed +- **Con**: Feels inconsistent + +#### Option E: Hybrid (Recommended) + +Combine the best of all approaches: + +1. **Default behavior**: Auto-apply with 500ms debounce (current) +2. **New config option**: `auto_apply = false` for manual control +3. **Smart warnings**: Detect potentially incomplete schemas, show warning but apply +4. **Explicit command**: `supabase db sync` for manual trigger when `auto_apply = false` + +```toml +[dev.schemas] +auto_apply = true # Default: auto-apply on save +# auto_apply = false # Alternative: preview only, use `supabase db sync` to apply +``` + +### Recommendation + +**Start with current behavior (auto-apply)** but add: +1. `auto_apply = false` option for users who want explicit control +2. Smart warnings for "likely incomplete" schemas (empty tables, etc.) +3. `supabase db sync` command for manual application + +This gives users a choice: +- **Rapid prototyping**: `auto_apply = true` (default) - accept some churn for speed +- **Careful development**: `auto_apply = false` - diff on demand only + +--- + +## Performance Optimization: Persistent Shadow Database + +### Problem + +Currently, each diff cycle takes ~15s: +- Shadow DB container creation: ~11s (Docker overhead) +- Migration application: ~3s (same migrations every time) +- Schema application + diff: ~500ms + +This is too slow for a reactive dev experience. + +### Solution: Persistent Shadow with Template Database + +Keep the shadow container running and use PostgreSQL's `CREATE DATABASE ... TEMPLATE` for fast resets. + +#### Architecture + +``` +First run (cold start ~14s): + 1. Start persistent shadow container + 2. Apply all migrations → creates baseline state + 3. Snapshot baseline roles: SELECT rolname FROM pg_roles + 4. CREATE DATABASE shadow_template AS TEMPLATE + 5. Apply declared schemas to contrib_regression + 6. Diff + +Subsequent runs (fast path ~500ms): + 1. Clean cluster-wide objects (roles not in baseline) + 2. DROP DATABASE contrib_regression + 3. CREATE DATABASE contrib_regression TEMPLATE shadow_template + 4. Apply declared schemas + 5. Diff +``` + +#### Why Template + Role Tracking? + +PostgreSQL template databases only copy **database-scoped objects**: +- Tables, views, functions, triggers ✓ +- Extensions ✓ +- Schemas ✓ + +They do NOT copy **cluster-wide objects**: +- Roles (CREATE ROLE, ALTER ROLE) ✗ +- Role memberships ✗ +- Tablespaces ✗ + +If declared schemas contain `CREATE ROLE`, we must track and clean them explicitly. + +#### Implementation + +```go +// internal/dev/shadow.go + +type ShadowState struct { + ContainerID string + BaselineRoles []string // Roles after migrations, before declared schemas + TemplateReady bool + MigrationsHash string // Invalidate template if migrations change +} + +// EnsureShadowReady prepares the shadow database for diffing +func (s *ShadowState) EnsureShadowReady(ctx context.Context, fsys afero.Fs) error { + // Check if container exists and is healthy + if !s.isContainerHealthy(ctx) { + // Cold start: create container, apply migrations, create template + return s.coldStart(ctx, fsys) + } + + // Check if migrations changed (invalidates template) + currentHash := s.hashMigrations(fsys) + if currentHash != s.MigrationsHash { + return s.rebuildTemplate(ctx, fsys) + } + + // Fast path: reset from template + return s.resetFromTemplate(ctx) +} + +// resetFromTemplate quickly resets the database state +func (s *ShadowState) resetFromTemplate(ctx context.Context) error { + conn := s.connectToShadow(ctx) + defer conn.Close() + + // 1. Clean cluster-wide objects created by declared schemas + currentRoles := s.queryRoles(ctx, conn) + for _, role := range currentRoles { + if !slices.Contains(s.BaselineRoles, role) { + conn.Exec(ctx, fmt.Sprintf("DROP ROLE IF EXISTS %q", role)) + } + } + + // 2. Reset database from template + conn.Exec(ctx, "DROP DATABASE IF EXISTS contrib_regression") + conn.Exec(ctx, "CREATE DATABASE contrib_regression TEMPLATE shadow_template") + + return nil +} + +// coldStart creates container and builds initial template +func (s *ShadowState) coldStart(ctx context.Context, fsys afero.Fs) error { + // 1. Create and start shadow container + s.ContainerID = createShadowContainer(ctx) + waitForHealthy(ctx, s.ContainerID) + + // 2. Apply migrations + applyMigrations(ctx, s.ContainerID, fsys) + + // 3. Snapshot baseline roles + s.BaselineRoles = s.queryRoles(ctx, conn) + + // 4. Create template from current state + conn.Exec(ctx, "CREATE DATABASE shadow_template TEMPLATE contrib_regression") + s.TemplateReady = true + s.MigrationsHash = s.hashMigrations(fsys) + + return nil +} +``` + +#### Migration Hash Strategy + +Invalidate the template when migrations change: + +```go +func (s *ShadowState) hashMigrations(fsys afero.Fs) string { + h := sha256.New() + + // Walk migrations directory in sorted order + files, _ := afero.ReadDir(fsys, "supabase/migrations") + for _, f := range files { + content, _ := afero.ReadFile(fsys, filepath.Join("supabase/migrations", f.Name())) + h.Write([]byte(f.Name())) + h.Write(content) + } + + return hex.EncodeToString(h.Sum(nil)) +} +``` + +#### Container Lifecycle + +The shadow container is managed separately from the main `supabase start` containers: + +| Event | Action | +|-------|--------| +| `supabase dev` starts | Start shadow if not running | +| `supabase dev` file change | Reuse existing shadow | +| `supabase dev` exits | Keep shadow running (for next session) | +| `supabase stop` | Stop shadow container | +| Migrations change | Rebuild template (keep container) | + +#### Expected Performance + +| Scenario | Time | +|----------|------| +| First run (cold) | ~14s | +| Subsequent runs (warm) | ~500ms | +| After migration change | ~3s (rebuild template) | + +#### Files to Create/Modify + +| File | Purpose | +|------|---------| +| `internal/dev/shadow.go` | New - Shadow state management | +| `internal/dev/differ.go` | Modify - Use ShadowState instead of creating new container | +| `internal/stop/stop.go` | Modify - Stop shadow container on `supabase stop` | + +--- + +## Future Workflows (Out of Scope for Now) + +The dev command architecture supports adding more watchers later: +- **Edge functions** (`[dev.functions]`) - Watch and hot-reload edge functions +- **Seed data** (`[dev.seed]`) - Auto-apply seed files on change +- **Type generation** - Already supported via `types` option diff --git a/PULL_REQUEST.md b/PULL_REQUEST.md new file mode 100644 index 000000000..942940267 --- /dev/null +++ b/PULL_REQUEST.md @@ -0,0 +1,441 @@ +# feat: Add `supabase dev` command for reactive schema development + +## Summary + +This PR introduces a new `supabase dev` command that provides a reactive development experience for database schema changes. The command watches `supabase/schemas/` for SQL file changes and automatically validates, diffs, and applies them to the local database **without creating migration files**. + +**Core principle**: Migrations are an implementation detail for deployment. During development, users just want to evolve their schema and see changes reflected quickly. + +## Motivation + +Currently, developers working with Supabase schemas must manually run `supabase db diff` and `supabase db push` after every change. This creates friction in the development workflow, especially during rapid prototyping. The `supabase dev` command eliminates this friction by: + +1. Automatically detecting file changes +2. Validating SQL syntax before attempting to apply +3. Computing diffs and applying changes in real-time +4. Warning about destructive changes (DROP statements) +5. Tracking "dirty" state to remind users to create migrations before deployment + +## Features + +### 1. Reactive Schema Watching + +```bash +$ supabase dev + +[dev] Watching: [schemas/**/*.sql] +[dev] On change: (internal differ) +[dev] Press Ctrl+C to stop + +[dev] Applying initial schema state... +[dev] Initial sync complete +[dev] Watching for changes... + +[dev] Change detected: supabase/schemas/users.sql +✓ Schema changes applied successfully +Applied: + CREATE TABLE public.profiles (id uuid PRIMARY KEY, name text); + +[dev] Watching for changes... +``` + +### 2. SQL Validation Gate + +Before attempting to diff, all schema files are validated using `pg_query_go` (Go bindings to Postgres's actual parser). This provides immediate feedback about syntax errors: + +``` +[dev] Change detected: supabase/schemas/users.sql +Syntax error in supabase/schemas/users.sql + Line 3, Column 8: syntax error at or near "TABL" + Waiting for valid SQL... +``` + +### 3. DROP Statement Warnings + +Destructive changes are detected and highlighted: + +``` +Warning: Found DROP statements: + DROP TABLE public.old_table; +✓ Schema changes applied successfully +``` + +### 4. Dirty State Tracking + +On exit, the command warns if the local database has uncommitted schema changes: + +``` +^C +[dev] Stopping dev session... +Warning: Local database has uncommitted schema changes! + Run 'supabase db diff -f migration_name' to create a migration +``` + +### 5. Extensible Workflow Configuration + +The command supports multiple workflows through configuration: + +```toml +[dev.schemas] +enabled = true # Enable/disable schema workflow +watch = ["schemas/**/*.sql"] # Glob patterns to watch +on_change = "" # Custom command (e.g., "npx drizzle-kit push") +types = "src/types/database.ts" # Auto-generate TypeScript types +``` + +**Supported workflows:** +- **Supabase native** (default): SQL files in `supabase/schemas/` +- **Drizzle ORM**: `on_change = "npx drizzle-kit push"` +- **Prisma ORM**: `on_change = "npx prisma db push --skip-generate"` +- **Disabled**: `enabled = false` for users with their own watch tooling + +### 6. Automatic Seeding + +The dev command includes automatic seeding support that runs: +- **On startup**: After initial schema sync +- **On seed file changes**: When seed files are modified + +```bash +$ supabase dev + +[dev] Watching: [schemas/**/*.sql] +[dev] On change: (internal differ) +[dev] Seed: (internal) +[dev] Press Ctrl+C to stop + +[dev] Applying initial schema state... +[dev] Initial sync complete +[dev] Running initial seed... +Seeding data from seed.sql... +[dev] Initial seed complete +[dev] Watching for changes... + +# User edits seed.sql +[dev] Seed file change detected: supabase/seed.sql +[dev] Reseeding database... +Seeding data from seed.sql... +[dev] Reseed complete +[dev] Watching for changes... +``` + +**Configuration:** + +```toml +[dev.seed] +enabled = true # Enable/disable seed workflow (default: true) +on_change = "" # Custom command (e.g., "npx prisma db seed") +``` + +- Seed file patterns come from existing `[db.seed].sql_paths` config +- When `on_change` is empty, uses internal seeding from `[db.seed]` config +- Schema changes do NOT trigger reseeding (only seed file changes do) + +### 7. Migration File Detection + +The watcher also monitors the `migrations/` directory. When a migration file is created (e.g., by running `supabase db diff -f` in another terminal), the shadow template is invalidated: + +``` +[dev] Migration file changed - shadow template invalidated +[dev] Note: Run 'supabase db reset' or restart dev mode to apply new migrations +[dev] Watching for changes... +``` + +**Why we don't auto-apply migrations:** +- If you create a migration with `db diff -f`, your local DB already has those changes (that's what was diffed) +- Auto-applying would fail or cause conflicts +- The shadow template is invalidated so the next diff cycle uses the updated migrations +- For external migrations (e.g., from `git pull`), restart dev mode or run `db reset` + +### 8. Debug Logging + +Namespaced debug logging (similar to Node.js debug package): + +```bash +DEBUG=supabase:dev:* # All dev logs +DEBUG=supabase:dev:timing # Timing information +DEBUG=supabase:dev:watcher # File watcher logs +DEBUG=supabase:dev:sql # SQL statements being executed +``` + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ supabase dev │ +├─────────────────────────────────────────────────────────────────┤ +│ DevSession │ +│ ├── Ensures local DB is running (starts if needed) │ +│ ├── Coordinates schema and seed watching │ +│ ├── Runs initial schema sync + seed on startup │ +│ └── Manages graceful shutdown with cleanup │ +├─────────────────────────────────────────────────────────────────┤ +│ SchemaWatcher │ +│ ├── Watches configurable glob patterns + migrations/ + seeds │ +│ ├── Debounces file changes (500ms) │ +│ ├── Detects migration vs schema vs seed changes │ +│ ├── Ignores temp files (.swp, ~, .tmp, etc.) │ +│ └── Dynamically watches new subdirectories │ +├─────────────────────────────────────────────────────────────────┤ +│ SQLValidator │ +│ ├── Uses pg_query_go (libpg_query bindings) │ +│ ├── Validates ALL .sql files before diffing │ +│ └── Returns detailed error positions │ +├─────────────────────────────────────────────────────────────────┤ +│ Differ (pg-delta) │ +│ ├── Uses @supabase/pg-delta via Bun in Docker │ +│ ├── Compares local DB vs shadow DB with declared schemas │ +│ └── Outputs SQL migration statements │ +├─────────────────────────────────────────────────────────────────┤ +│ SeedRunner │ +│ ├── Runs on startup (after schema sync) + on seed file changes│ +│ ├── Supports custom on_change commands (e.g., Prisma, Drizzle)│ +│ └── Uses internal SeedData() when on_change is empty │ +├─────────────────────────────────────────────────────────────────┤ +│ Shadow Database (optimized) │ +│ ├── Persistent container with PostgreSQL template databases │ +│ ├── Fast reset via CREATE DATABASE ... TEMPLATE (~10ms) │ +│ ├── Tracks cluster-wide objects (roles) separately │ +│ └── Proper cleanup on exit │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### Pipeline Flow + +``` +File save → Debounce (500ms) → Validate ALL *.sql → Diff → Apply + │ + ↓ (if invalid) + Show error with location + Wait for next save... +``` + +## Performance Optimization: Persistent Shadow Database + +### Problem + +Without optimization, each diff cycle would take ~15s: +- Shadow DB container creation: ~11s (Docker overhead) +- Migration application: ~3s (same migrations every time) +- Schema application + diff: ~500ms + +### Solution: Template Database Approach + +The shadow container persists across file changes and uses PostgreSQL's `CREATE DATABASE ... TEMPLATE` for fast resets: + +**First run (cold start ~14s):** +1. Start persistent shadow container (named `supabase_db__shadow`) +2. Apply all migrations → creates baseline state +3. Snapshot baseline roles: `SELECT rolname FROM pg_roles` +4. `CREATE DATABASE shadow_template TEMPLATE contrib_regression` +5. Ready for diffing + +**Subsequent runs (fast path ~10ms):** +1. Clean cluster-wide objects (roles not in baseline) +2. `DROP DATABASE contrib_regression` +3. `CREATE DATABASE contrib_regression TEMPLATE shadow_template` +4. Apply declared schemas +5. Diff with pg-delta + +### Why Template + Role Tracking? + +PostgreSQL template databases only copy **database-scoped objects** (tables, views, functions, extensions). They do NOT copy **cluster-wide objects** (roles, role memberships). If declared schemas contain `CREATE ROLE`, we must track and clean them explicitly between diffs. + +## New Files + +| File | Description | +|------|-------------| +| `cmd/dev.go` | Cobra command definition | +| `internal/dev/dev.go` | Main session orchestration and lifecycle | +| `internal/dev/watcher.go` | File watcher with debounce and glob matching | +| `internal/dev/validator.go` | SQL syntax validator using pg_query_go | +| `internal/dev/differ.go` | Diff computation using pg-delta and application | +| `internal/dev/shadow.go` | Persistent shadow database management | +| `internal/dev/seed.go` | Seed runner with custom command support | +| `internal/dev/debug.go` | Namespaced debug loggers | + +## Modified Files + +| File | Changes | +|------|---------| +| `go.mod` / `go.sum` | Added `pg_query_go/v6` and `doublestar/v4` dependencies | +| `pkg/config/config.go` | Added `[dev.schemas]`, `[dev.functions]`, and `[dev.seed]` config sections | +| `internal/db/diff/diff.go` | Added `CreateShadowDatabaseWithName()` for named containers | +| `internal/utils/docker.go` | Fixed third-party image registry handling (e.g., `oven/bun`) | +| `internal/utils/logger.go` | Added `DebugLogger` with namespaced pattern matching | + +## New Dependencies + +- `github.com/pganalyze/pg_query_go/v6` - Go bindings to libpg_query for SQL validation +- `github.com/bmatcuk/doublestar/v4` - Glob pattern matching with `**` support + +## CLI Usage + +```bash +# Start dev mode (starts database if not running) +supabase dev + +# With debug logging +DEBUG=supabase:dev:* supabase dev +``` + +## Configuration Examples + +### Default (Supabase users) + +No configuration needed - works out of the box: + +```bash +# Edit supabase/schemas/tables.sql +# Changes are automatically applied to local database +# Run `supabase db diff -f migration_name` when ready +``` + +### Drizzle ORM + +```toml +[dev.schemas] +watch = ["../src/db/schema/**/*.ts"] +on_change = "npx drizzle-kit push" +``` + +### Prisma ORM + +```toml +[dev.schemas] +watch = ["../prisma/schema.prisma"] +on_change = "npx prisma db push --skip-generate" +``` + +### With TypeScript Types + +```toml +[dev.schemas] +types = "src/types/database.ts" +``` + +### Disabled (use external tooling) + +```toml +[dev.schemas] +enabled = false +``` + +### Custom Seed Command (Prisma) + +```toml +[dev.seed] +on_change = "npx prisma db seed" +``` + +### Custom Seed Command (Drizzle) + +```toml +[dev.seed] +on_change = "npx tsx ./scripts/seed.ts" +``` + +### Disable Seeding in Dev Mode + +```toml +[dev.seed] +enabled = false +``` + +## Testing + +### Manual Testing Checklist + +- [ ] Run `supabase dev` - database starts if not running +- [ ] Edit a schema file and save - changes applied automatically +- [ ] Introduce a syntax error - validation error shown, no diff attempted +- [ ] Save rapidly multiple times - debounce prevents multiple diffs +- [ ] Add a DROP statement - warning shown before applying +- [ ] Press Ctrl+C with changes applied - dirty state warning shown +- [ ] Run `supabase db diff` - accumulated changes visible +- [ ] Create new subdirectory in schemas/ - automatically watched +- [ ] Add a new migration file - shadow template invalidated (not auto-applied) +- [ ] Run with `DEBUG=supabase:dev:*` - debug logs visible +- [ ] Initial seed runs after schema sync on startup +- [ ] Edit seed file - database is reseeded automatically +- [ ] Schema change does NOT trigger reseed +- [ ] Custom `on_change` seed command works (e.g., Prisma) +- [ ] `[dev.seed] enabled = false` disables seeding + +### Edge Cases + +- Empty schema files (valid, skipped) +- Schema files with only comments (valid) +- Multiple syntax errors (first error shown) +- Shadow container already exists (reused) +- Database stops during dev session (detected, error shown) +- New migration file added while dev mode running (shadow invalidated, not auto-applied) +- Missing seed files (handled gracefully, warning shown) +- Seed errors don't crash dev mode (warning shown, continues watching) + +## Known Limitations + +### Running `db diff` while dev mode is active + +Currently, `supabase db diff -f` cannot run in parallel with `supabase dev` because both use the same shadow database port. If you try to run `db diff` while dev is running, it will fail with a port conflict. + +**Workarounds:** +1. Stop dev mode → run `db diff -f` → restart dev mode +2. Wait for the future "m" shortcut (runs within dev process, no port conflict) + +This will be addressed in a future PR by either: +- Using different ports for dev vs diff shadow databases +- Implementing the "m" shortcut that creates migrations from within dev mode + +## Next Steps (Future PRs) + +The following enhancements are planned for follow-up PRs: + +### 1. Drift Detection on `db push` + +When running `supabase db push`, warn if the local database has schema changes that aren't captured in migration files: + +``` +$ supabase db push + +Warning: Your local database has schema changes not captured in migrations: + - Table: public.profiles (new) + - Column: public.users.full_name (new) + +These changes will NOT be pushed to the remote database. +Run 'supabase db diff -f migration_name' to create a migration first. + +Continue anyway? [y/N] +``` + +This prevents accidentally deploying without the schema changes developed in dev mode. + +### 2. Interactive Terminal UI + +Enhance the dev mode terminal with: +- **Keyboard shortcuts**: Press `m` to create a migration, `r` to restart, `q` to quit +- **Project status**: Show linked project ID, current branch, dirty state +- **Better formatting**: Colored output, progress indicators + +### 3. Edge Functions Workflow + +The `[dev.functions]` config structure is already in place. Future work includes: +- Watch edge functions for changes +- Auto-deploy to local edge runtime +- Unified dev experience for schema + functions + +### 4. Additional Workflows + +- **Lazy service startup** - start only database immediately, other services on-demand +- **`auto_apply = false`** - preview mode requiring explicit sync command + +## Breaking Changes + +None. This is a new command that doesn't affect existing functionality. + +## Related Issues + + + +--- + +🤖 Generated with [Claude Code](https://claude.ai/code) diff --git a/cmd/dev.go b/cmd/dev.go new file mode 100644 index 000000000..651546c25 --- /dev/null +++ b/cmd/dev.go @@ -0,0 +1,36 @@ +package cmd + +import ( + "github.com/spf13/afero" + "github.com/spf13/cobra" + "github.com/supabase/cli/internal/dev" +) + +var devCmd = &cobra.Command{ + GroupID: groupLocalDev, + Use: "dev", + Short: "Start reactive development mode with auto-schema sync", + Long: `Start a development session that watches for schema changes +and automatically applies them to your local database. + +This command: +- Starts the local database if not running +- Watches supabase/schemas/ for changes +- Automatically diffs and applies schema changes +- Does NOT create migration files (use 'supabase db diff -f' for that) + +Enable debug logging with DEBUG environment variable: + DEBUG=supabase:dev:* - all dev logs + DEBUG=supabase:dev:timing - timing information + DEBUG=supabase:dev:watcher - file watcher logs + DEBUG=supabase:dev:sql - SQL statements being executed + +Press Ctrl+C to stop the development session.`, + RunE: func(cmd *cobra.Command, args []string) error { + return dev.Run(cmd.Context(), afero.NewOsFs()) + }, +} + +func init() { + rootCmd.AddCommand(devCmd) +} diff --git a/go.mod b/go.mod index 460c1da5a..18091e8af 100644 --- a/go.mod +++ b/go.mod @@ -40,6 +40,7 @@ require ( github.com/muesli/reflow v0.3.0 github.com/oapi-codegen/nullable v1.1.0 github.com/olekukonko/tablewriter v1.1.3 + github.com/pganalyze/pg_query_go/v6 v6.1.0 github.com/slack-go/slack v0.17.3 github.com/spf13/afero v1.15.0 github.com/spf13/cobra v1.10.2 @@ -109,6 +110,7 @@ require ( github.com/bitfield/gotestdox v0.2.2 // indirect github.com/bkielbasa/cyclop v1.2.3 // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect + github.com/bmatcuk/doublestar/v4 v4.10.0 // indirect github.com/bombsimon/wsl/v4 v4.7.0 // indirect github.com/breml/bidichk v0.3.3 // indirect github.com/breml/errchkjson v0.4.1 // indirect diff --git a/go.sum b/go.sum index 2b2e1494f..945649a54 100644 --- a/go.sum +++ b/go.sum @@ -132,6 +132,8 @@ github.com/bkielbasa/cyclop v1.2.3/go.mod h1:kHTwA9Q0uZqOADdupvcFJQtp/ksSnytRMe8 github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/bmatcuk/doublestar/v4 v4.10.0 h1:zU9WiOla1YA122oLM6i4EXvGW62DvKZVxIe6TYWexEs= +github.com/bmatcuk/doublestar/v4 v4.10.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bombsimon/wsl/v4 v4.7.0 h1:1Ilm9JBPRczjyUs6hvOPKvd7VL1Q++PL8M0SXBDf+jQ= github.com/bombsimon/wsl/v4 v4.7.0/go.mod h1:uV/+6BkffuzSAVYD+yGyld1AChO7/EuLrCF/8xTiapg= @@ -887,6 +889,8 @@ github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0 github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= +github.com/pganalyze/pg_query_go/v6 v6.1.0 h1:jG5ZLhcVgL1FAw4C/0VNQaVmX1SUJx71wBGdtTtBvls= +github.com/pganalyze/pg_query_go/v6 v6.1.0/go.mod h1:nvTHIuoud6e1SfrUaFwHqT0i4b5Nr+1rPWVds3B5+50= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= @@ -1428,6 +1432,7 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= diff --git a/internal/db/diff/diff.go b/internal/db/diff/diff.go index 744ad381b..eecc3480e 100644 --- a/internal/db/diff/diff.go +++ b/internal/db/diff/diff.go @@ -90,18 +90,25 @@ func findDropStatements(out string) []string { } func CreateShadowDatabase(ctx context.Context, port uint16) (string, error) { + return CreateShadowDatabaseWithName(ctx, port, "", true) +} + +// CreateShadowDatabaseWithName creates a shadow database container with a specific name. +// If name is empty, Docker will assign a random name. +// If autoRemove is true, the container will be automatically removed when stopped. +func CreateShadowDatabaseWithName(ctx context.Context, port uint16, name string, autoRemove bool) (string, error) { // Disable background workers in shadow database config := start.NewContainerConfig("-c", "max_worker_processes=0") hostPort := strconv.FormatUint(uint64(port), 10) hostConfig := container.HostConfig{ PortBindings: nat.PortMap{"5432/tcp": []nat.PortBinding{{HostPort: hostPort}}}, - AutoRemove: true, + AutoRemove: autoRemove, } networkingConfig := network.NetworkingConfig{} if utils.Config.Db.MajorVersion <= 14 { hostConfig.Tmpfs = map[string]string{"/docker-entrypoint-initdb.d": ""} } - return utils.DockerStart(ctx, config, hostConfig, networkingConfig, "") + return utils.DockerStart(ctx, config, hostConfig, networkingConfig, name) } func ConnectShadowDatabase(ctx context.Context, timeout time.Duration, options ...func(*pgx.ConnConfig)) (conn *pgx.Conn, err error) { diff --git a/internal/db/diff/templates/pgdelta.ts b/internal/db/diff/templates/pgdelta.ts index d809cb57a..cfdac0aa4 100644 --- a/internal/db/diff/templates/pgdelta.ts +++ b/internal/db/diff/templates/pgdelta.ts @@ -1,5 +1,5 @@ -import { createPlan } from "npm:@supabase/pg-delta@1.0.0-alpha.1"; -import { supabase } from "npm:@supabase/pg-delta@1.0.0-alpha.1/integrations/supabase"; +import { createPlan } from "npm:@supabase/pg-delta@1.0.0-alpha.2"; +import { supabase } from "npm:@supabase/pg-delta@1.0.0-alpha.2/integrations/supabase"; const source = Deno.env.get("SOURCE"); const target = Deno.env.get("TARGET"); diff --git a/internal/dev/debug.go b/internal/dev/debug.go new file mode 100644 index 000000000..97bb289ae --- /dev/null +++ b/internal/dev/debug.go @@ -0,0 +1,15 @@ +package dev + +import "github.com/supabase/cli/internal/utils" + +// Namespaced debug loggers for the dev package +// Enable via DEBUG environment variable: +// DEBUG=supabase:dev:* - all dev logs +// DEBUG=supabase:dev:timing - only timing logs +// DEBUG=supabase:dev:watcher - only watcher logs +// DEBUG=supabase:dev:sql - SQL statements being executed +var ( + timingLog = utils.NewDebugLogger("supabase:dev:timing") + watcherLog = utils.NewDebugLogger("supabase:dev:watcher") + sqlLog = utils.NewDebugLogger("supabase:dev:sql") +) diff --git a/internal/dev/dev.go b/internal/dev/dev.go new file mode 100644 index 000000000..c5202aef9 --- /dev/null +++ b/internal/dev/dev.go @@ -0,0 +1,392 @@ +package dev + +import ( + "context" + "fmt" + "os" + "os/exec" + "os/signal" + "path/filepath" + "strings" + "syscall" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/start" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +// Run starts the dev session +func Run(ctx context.Context, fsys afero.Fs) error { + // Load config first + if err := flags.LoadConfig(fsys); err != nil { + return err + } + + // Ensure local database is running + if err := ensureDbRunning(ctx, fsys); err != nil { + return err + } + + // Create and run the dev session + session := NewSession(ctx, fsys) + return session.Run() +} + +// ensureDbRunning starts the local database if it's not already running +func ensureDbRunning(ctx context.Context, fsys afero.Fs) error { + if err := utils.AssertSupabaseDbIsRunning(); err == nil { + fmt.Fprintln(os.Stderr, "Using existing local database") + return nil + } else if !errors.Is(err, utils.ErrNotRunning) { + return err + } + + fmt.Fprintln(os.Stderr, "Starting local database...") + return start.Run(ctx, fsys, nil, false) +} + +// Session manages the dev mode lifecycle +type Session struct { + ctx context.Context + cancel context.CancelFunc + fsys afero.Fs + dirty bool // tracks whether schema changes have been applied +} + +// NewSession creates a new dev session +func NewSession(ctx context.Context, fsys afero.Fs) *Session { + ctx, cancel := context.WithCancel(ctx) + return &Session{ + ctx: ctx, + cancel: cancel, + fsys: fsys, + dirty: false, + } +} + + +// Run starts the dev session main loop +func (s *Session) Run() error { + schemasConfig := &utils.Config.Dev.Schemas + + // Check if schemas workflow is enabled + if !schemasConfig.IsEnabled() { + fmt.Fprintln(os.Stderr, "[dev] Schema workflow is disabled in config") + fmt.Fprintln(os.Stderr, "[dev] Press Ctrl+C to stop") + + // Set up signal handling for graceful shutdown + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + <-sigCh + fmt.Fprintln(os.Stderr) + fmt.Fprintln(os.Stderr, "[dev] Stopping dev session...") + return nil + } + + // Get watch globs from config + watchGlobs := schemasConfig.Watch + if len(watchGlobs) == 0 { + // Fallback to default if not configured + watchGlobs = []string{"schemas/**/*.sql"} + } + + // Validate config on startup + s.validateConfig() + + // Create schemas directory if using default pattern and it doesn't exist + if exists, err := afero.DirExists(s.fsys, utils.SchemasDir); err != nil { + return errors.Errorf("failed to check schemas directory: %w", err) + } else if !exists { + fmt.Fprintf(os.Stderr, "[dev] Creating %s directory...\n", utils.Aqua(utils.SchemasDir)) + if err := s.fsys.MkdirAll(utils.SchemasDir, 0755); err != nil { + return errors.Errorf("failed to create schemas directory: %w", err) + } + } + + // Set up signal handling + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + + // Get seed globs from [db.seed] config (already resolved to absolute paths) + seedConfig := &utils.Config.Dev.Seed + var seedGlobs []string + if seedConfig.IsEnabled() && utils.Config.Db.Seed.Enabled { + seedGlobs = utils.Config.Db.Seed.SqlPaths + } + + // Display configuration + fmt.Fprintln(os.Stderr) + fmt.Fprintf(os.Stderr, "[dev] Watching: %v\n", watchGlobs) + if schemasConfig.OnChange != "" { + fmt.Fprintf(os.Stderr, "[dev] On change: %s\n", utils.Aqua(schemasConfig.OnChange)) + } else { + fmt.Fprintf(os.Stderr, "[dev] On change: %s\n", utils.Aqua("(internal differ)")) + } + if schemasConfig.Types != "" { + fmt.Fprintf(os.Stderr, "[dev] Types output: %s\n", utils.Aqua(schemasConfig.Types)) + } + if seedConfig.IsEnabled() { + if seedConfig.OnChange != "" { + fmt.Fprintf(os.Stderr, "[dev] Seed: %s\n", utils.Aqua(seedConfig.OnChange)) + } else if utils.Config.Db.Seed.Enabled && len(utils.Config.Db.Seed.SqlPaths) > 0 { + fmt.Fprintf(os.Stderr, "[dev] Seed: %s\n", utils.Aqua("(internal)")) + } + } + fmt.Fprintln(os.Stderr, "[dev] Press Ctrl+C to stop") + fmt.Fprintln(os.Stderr) + + // Create the schema watcher + watcher, err := NewSchemaWatcher(s.fsys, watchGlobs, seedGlobs) + if err != nil { + return err + } + defer watcher.Close() + + // Start the watcher + go watcher.Start() + + // Apply initial schema state + fmt.Fprintln(os.Stderr, "[dev] Applying initial schema state...") + if err := s.applySchemaChanges(); err != nil { + fmt.Fprintf(os.Stderr, "[dev] %s %s\n", utils.Yellow("Warning:"), err.Error()) + } else { + fmt.Fprintln(os.Stderr, "[dev] Initial sync complete") + } + + // Run initial seed (after schema sync) + if seedConfig.IsEnabled() { + fmt.Fprintln(os.Stderr, "[dev] Running initial seed...") + if err := s.runSeed(); err != nil { + fmt.Fprintf(os.Stderr, "[dev] %s %s\n", utils.Yellow("Warning:"), err.Error()) + } else { + fmt.Fprintln(os.Stderr, "[dev] Initial seed complete") + } + } + + fmt.Fprintln(os.Stderr, "[dev] Watching for changes...") + + // Main event loop + for { + select { + case <-s.ctx.Done(): + CleanupShadow(s.ctx) + return s.ctx.Err() + case <-sigCh: + fmt.Fprintln(os.Stderr) + fmt.Fprintln(os.Stderr, "[dev] Stopping dev session...") + CleanupShadow(s.ctx) + s.showDirtyWarning() + return nil + case <-watcher.RestartCh: + // Check if seeds changed - if so, reseed the database + if watcher.SeedsChanged() { + fmt.Fprintln(os.Stderr, "[dev] Reseeding database...") + if err := s.runSeed(); err != nil { + fmt.Fprintf(os.Stderr, "[dev] %s %s\n", utils.Red("Error:"), err.Error()) + } else { + fmt.Fprintln(os.Stderr, "[dev] Reseed complete") + } + fmt.Fprintln(os.Stderr, "[dev] Watching for changes...") + continue + } + + // Check if migrations changed - if so, just invalidate the shadow template + // We do NOT auto-apply migrations because: + // 1. If created by `db diff -f`, local DB already has those changes + // 2. If from external source (git pull), user should restart dev mode + if watcher.MigrationsChanged() { + fmt.Fprintln(os.Stderr, "[dev] Migration file changed - shadow template invalidated") + InvalidateShadowTemplate() + // Don't trigger schema diff - migrations need manual application + // The next schema file change will use the updated shadow + fmt.Fprintln(os.Stderr, "[dev] Note: Run 'supabase db reset' or restart dev mode to apply new migrations") + fmt.Fprintln(os.Stderr, "[dev] Watching for changes...") + continue + } + + fmt.Fprintln(os.Stderr, "[dev] Applying schema changes...") + if err := s.applySchemaChanges(); err != nil { + fmt.Fprintf(os.Stderr, "[dev] %s %s\n", utils.Red("Error:"), err.Error()) + } else { + fmt.Fprintln(os.Stderr, "[dev] Changes applied successfully") + } + fmt.Fprintln(os.Stderr, "[dev] Watching for changes...") + case err := <-watcher.ErrCh: + CleanupShadow(s.ctx) + return errors.Errorf("watcher error: %w", err) + } + } +} + +// validateConfig checks the configuration and warns about potential issues +func (s *Session) validateConfig() { + schemasCfg := &utils.Config.Dev.Schemas + seedCfg := &utils.Config.Dev.Seed + + // Warn if schema on_change command might not exist + if schemasCfg.OnChange != "" { + s.validateOnChangeCommand(schemasCfg.OnChange, "schemas") + } + + // Warn if seed on_change command might not exist + if seedCfg.OnChange != "" { + s.validateOnChangeCommand(seedCfg.OnChange, "seed") + } + + // Warn if types output directory doesn't exist + if schemasCfg.Types != "" { + dir := filepath.Dir(schemasCfg.Types) + if dir != "." && dir != "" { + if exists, _ := afero.DirExists(s.fsys, dir); !exists { + fmt.Fprintf(os.Stderr, "[dev] %s types output directory '%s' does not exist\n", utils.Yellow("Warning:"), dir) + } + } + } +} + +// validateOnChangeCommand checks if the on_change command exists +func (s *Session) validateOnChangeCommand(command, configSection string) { + cmdParts := strings.Fields(command) + if len(cmdParts) > 0 { + cmdName := cmdParts[0] + // Check if it's a known package manager command + if cmdName != "npx" && cmdName != "npm" && cmdName != "yarn" && cmdName != "pnpm" && cmdName != "bunx" { + if _, err := exec.LookPath(cmdName); err != nil { + fmt.Fprintf(os.Stderr, "[dev] %s %s on_change command '%s' not found in PATH\n", utils.Yellow("Warning:"), configSection, cmdName) + } + } + } +} + +// applySchemaChanges validates and applies schema changes to the local database +func (s *Session) applySchemaChanges() error { + schemasConfig := &utils.Config.Dev.Schemas + + // Step 0: Verify DB is still running + if err := utils.AssertSupabaseDbIsRunning(); err != nil { + return errors.Errorf("local database stopped unexpectedly: %w", err) + } + + // Check if we should use a custom on_change command + if schemasConfig.OnChange != "" { + return s.runCustomOnChange(schemasConfig.OnChange) + } + + // Step 1: Load all schema files + schemaFiles, err := loadSchemaFiles(s.fsys) + if err != nil { + return err + } + + if len(schemaFiles) == 0 { + fmt.Fprintln(os.Stderr, "No schema files found") + return nil + } + + // Step 2: Validate SQL syntax of all schema files + if err := ValidateSchemaFiles(schemaFiles, s.fsys); err != nil { + return err + } + + // Step 3: Run diff and apply changes + if err := s.diffAndApply(); err != nil { + return err + } + + // Step 4: Generate types if configured + if schemasConfig.Types != "" { + if err := s.generateTypes(schemasConfig.Types); err != nil { + fmt.Fprintf(os.Stderr, "%s Failed to generate types: %s\n", utils.Yellow("Warning:"), err.Error()) + } + } + + return nil +} + +// runCustomOnChange executes a custom command when files change +func (s *Session) runCustomOnChange(command string) error { + fmt.Fprintf(os.Stderr, "Running: %s\n", utils.Aqua(command)) + + cmd := exec.CommandContext(s.ctx, "sh", "-c", command) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Dir = utils.CurrentDirAbs + + if err := cmd.Run(); err != nil { + return errors.Errorf("on_change command failed: %w", err) + } + + s.dirty = true + + // Generate types if configured (runs after custom command too) + schemasConfig := &utils.Config.Dev.Schemas + if schemasConfig.Types != "" { + if err := s.generateTypes(schemasConfig.Types); err != nil { + fmt.Fprintf(os.Stderr, "%s Failed to generate types: %s\n", utils.Yellow("Warning:"), err.Error()) + } + } + + return nil +} + +// generateTypes generates TypeScript types and writes them to the configured path +func (s *Session) generateTypes(outputPath string) error { + fmt.Fprintf(os.Stderr, "Generating types to %s...\n", utils.Aqua(outputPath)) + + // Run supabase gen types typescript --local + cmd := exec.CommandContext(s.ctx, "supabase", "gen", "types", "typescript", "--local") + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return errors.Errorf("type generation failed: %s", string(exitErr.Stderr)) + } + return errors.Errorf("type generation failed: %w", err) + } + + // Write output to file + if err := afero.WriteFile(s.fsys, outputPath, output, 0644); err != nil { + return errors.Errorf("failed to write types file: %w", err) + } + + fmt.Fprintf(os.Stderr, "Types generated: %s\n", utils.Aqua(outputPath)) + return nil +} + +// diffAndApply runs the schema diff and applies changes to local DB +func (s *Session) diffAndApply() error { + applied, err := DiffAndApply(s.ctx, s.fsys, os.Stderr) + if err != nil { + return err + } + if applied { + s.dirty = true + } + return nil +} + +// showDirtyWarning warns if local DB has uncommitted schema changes +func (s *Session) showDirtyWarning() { + if !s.dirty { + return + } + fmt.Fprintf(os.Stderr, "%s Local database has uncommitted schema changes!\n", utils.Yellow("Warning:")) + fmt.Fprintf(os.Stderr, " Run '%s' to create a migration\n", utils.Aqua("supabase db diff -f migration_name")) +} + +// loadSchemaFiles returns all .sql files in the schemas directory +func loadSchemaFiles(fsys afero.Fs) ([]string, error) { + var files []string + err := afero.Walk(fsys, utils.SchemasDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.Mode().IsRegular() && len(path) > 4 && path[len(path)-4:] == ".sql" { + files = append(files, path) + } + return nil + }) + return files, err +} + diff --git a/internal/dev/differ.go b/internal/dev/differ.go new file mode 100644 index 000000000..ccb8e9976 --- /dev/null +++ b/internal/dev/differ.go @@ -0,0 +1,270 @@ +package dev + +import ( + "bytes" + "context" + "fmt" + "io" + "regexp" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/parser" +) + +// DiffResult contains the outcome of a schema diff +type DiffResult struct { + SQL string + HasDrops bool + Drops []string +} + +// https://github.com/djrobstep/migra/blob/master/migra/statements.py#L6 +var dropStatementPattern = regexp.MustCompile(`(?i)drop\s+`) + +// shadowState holds the persistent shadow database state +var shadowState = &ShadowState{} + +// DiffAndApply computes the diff between declared schemas and local DB, then applies changes. +// Returns true if any changes were applied (marking the session as dirty). +func DiffAndApply(ctx context.Context, fsys afero.Fs, w io.Writer) (bool, error) { + totalStart := time.Now() + + // Step 1: Ensure shadow database is ready (uses template for fast reset) + fmt.Fprintln(w, "Preparing shadow database...") + stepStart := time.Now() + shadowConfig, err := shadowState.EnsureShadowReady(ctx, fsys) + if err != nil { + return false, errors.Errorf("failed to prepare shadow database: %w", err) + } + timingLog.Printf("Shadow DB ready: %dms", time.Since(stepStart).Milliseconds()) + + // Step 2: Apply declared schemas to shadow + declared, err := loadSchemaFiles(fsys) + if err != nil { + return false, err + } + + if len(declared) > 0 { + fmt.Fprintln(w, "Applying declared schemas to shadow...") + stepStart = time.Now() + if err := shadowState.ApplyDeclaredSchemas(ctx, declared, fsys); err != nil { + return false, err + } + timingLog.Printf("Schemas applied to shadow: %dms", time.Since(stepStart).Milliseconds()) + } + + // Step 3: Diff local DB (current state) vs shadow (desired state) using pg-delta + localConfig := pgconn.Config{ + Host: utils.Config.Hostname, + Port: utils.Config.Db.Port, + User: "postgres", + Password: utils.Config.Db.Password, + Database: "postgres", + } + + fmt.Fprintln(w, "Computing diff with pg-delta...") + stepStart = time.Now() + // source = local DB (current state), target = shadow DB (desired state) + result, err := computeDiffPgDelta(ctx, localConfig, shadowConfig) + if err != nil { + return false, errors.Errorf("failed to compute diff: %w", err) + } + timingLog.Printf("pg-delta diff: %dms", time.Since(stepStart).Milliseconds()) + + // Log the computed diff SQL in debug mode + if result.SQL != "" { + sqlLog.Printf("pg-delta computed diff:\n%s", result.SQL) + } + + if result.SQL == "" { + fmt.Fprintf(w, "%s No schema changes detected\n", utils.Green("✓")) + timingLog.Printf("Total: %dms", time.Since(totalStart).Milliseconds()) + return false, nil + } + + // Step 4: Show warnings for DROP statements + if result.HasDrops { + fmt.Fprintf(w, "%s Found DROP statements:\n", utils.Yellow("Warning:")) + for _, drop := range result.Drops { + fmt.Fprintf(w, " %s\n", utils.Yellow(drop)) + } + } + + // Step 5: Apply changes to local database + fmt.Fprintln(w, "Applying changes to local database...") + stepStart = time.Now() + if err := applyDiff(ctx, localConfig, result.SQL); err != nil { + return false, errors.Errorf("failed to apply changes: %w", err) + } + timingLog.Printf("Applied to local DB: %dms", time.Since(stepStart).Milliseconds()) + + fmt.Fprintf(w, "%s Schema changes applied successfully\n", utils.Green("✓")) + showAppliedStatements(w, result.SQL) + + timingLog.Printf("Total: %dms", time.Since(totalStart).Milliseconds()) + return true, nil +} + +// CleanupShadow removes the persistent shadow container +func CleanupShadow(ctx context.Context) { + shadowState.Cleanup(ctx) +} + +// InvalidateShadowTemplate marks the shadow template as needing rebuild +// Call this when migrations change so the shadow rebuilds with new migrations +func InvalidateShadowTemplate() { + shadowState.TemplateReady = false + shadowState.MigrationsHash = "" + timingLog.Printf("Shadow template invalidated - will rebuild on next diff") +} + +const ( + // Bun image for running pg-delta CLI + bunImage = "oven/bun:canary-alpine" + // Volume name for caching Bun packages + bunCacheVolume = "supabase_bun_cache" + // pg-delta package version + pgDeltaPackage = "@supabase/pg-delta@1.0.0-alpha.2" +) + +// computeDiffPgDelta uses pg-delta (via Bun container) to compute the difference +// source = current state (local DB), target = desired state (shadow DB) +// +// pg-delta exit codes: +// - 0: No changes detected (stdout: "No changes detected.") +// - 2: Changes detected (stdout: SQL statements) +// - other: Error +func computeDiffPgDelta(ctx context.Context, source, target pgconn.Config) (*DiffResult, error) { + sourceURL := utils.ToPostgresURL(source) + targetURL := utils.ToPostgresURL(target) + + // Build the pg-delta CLI command + cmd := []string{ + "x", pgDeltaPackage, "plan", + "--source", sourceURL, + "--target", targetURL, + "--integration", "supabase", + "--format", "sql", + "--role", "postgres", + } + + var stdout, stderr bytes.Buffer + err := utils.DockerRunOnceWithConfig( + ctx, + container.Config{ + Image: bunImage, + Cmd: cmd, + Env: []string{"BUN_INSTALL_CACHE_DIR=/bun-cache"}, + }, + container.HostConfig{ + Binds: []string{bunCacheVolume + ":/bun-cache:rw"}, + NetworkMode: network.NetworkHost, + }, + network.NetworkingConfig{}, + "", + &stdout, + &stderr, + ) + + // Trim whitespace from output + output := strings.TrimSpace(stdout.String()) + + // Handle pg-delta exit codes: + // - Exit 0: No changes (output may be "No changes detected." or similar) + // - Exit 2: Changes detected (output contains SQL) + // - Other exits: Real errors + if err != nil { + // Check if it's exit code 2 (changes detected) - this is expected + if strings.Contains(err.Error(), "exit 2") { + // Exit 2 means changes were detected, stdout has the SQL + drops := findDropStatements(output) + return &DiffResult{ + SQL: output, + HasDrops: len(drops) > 0, + Drops: drops, + }, nil + } + // Any other error is a real failure + return nil, errors.Errorf("pg-delta failed: %w\n%s", err, stderr.String()) + } + + // Exit 0: No changes detected + // Check for "No changes" message or empty output + if output == "" || strings.Contains(strings.ToLower(output), "no changes") { + return &DiffResult{ + SQL: "", + HasDrops: false, + Drops: nil, + }, nil + } + + // Exit 0 but has SQL output - treat as changes (shouldn't normally happen) + drops := findDropStatements(output) + return &DiffResult{ + SQL: output, + HasDrops: len(drops) > 0, + Drops: drops, + }, nil +} + +// findDropStatements extracts DROP statements from SQL +func findDropStatements(sql string) []string { + lines, err := parser.SplitAndTrim(strings.NewReader(sql)) + if err != nil { + return nil + } + var drops []string + for _, line := range lines { + if dropStatementPattern.MatchString(line) { + drops = append(drops, line) + } + } + return drops +} + +// applyDiff executes the diff SQL on the target database without recording in migration history +func applyDiff(ctx context.Context, config pgconn.Config, sql string) error { + conn, err := utils.ConnectLocalPostgres(ctx, config) + if err != nil { + return err + } + defer conn.Close(context.Background()) + + // Parse the SQL into statements + m, err := migration.NewMigrationFromReader(strings.NewReader(sql)) + if err != nil { + return errors.Errorf("failed to parse diff SQL: %w", err) + } + + // Skip inserting to migration history (no version = no history entry) + m.Version = "" + + // Execute the statements + return m.ExecBatch(ctx, conn) +} + +// showAppliedStatements prints the applied SQL statements +func showAppliedStatements(w io.Writer, sql string) { + lines, err := parser.SplitAndTrim(strings.NewReader(sql)) + if err != nil { + return + } + + fmt.Fprintln(w, "Applied:") + for _, line := range lines { + // Skip empty lines and comments + trimmed := strings.TrimSpace(line) + if trimmed == "" || strings.HasPrefix(trimmed, "--") { + continue + } + fmt.Fprintf(w, " %s\n", line) + } +} diff --git a/internal/dev/seed.go b/internal/dev/seed.go new file mode 100644 index 000000000..03ac2bd88 --- /dev/null +++ b/internal/dev/seed.go @@ -0,0 +1,86 @@ +package dev + +import ( + "context" + "fmt" + "os" + "os/exec" + + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" +) + +// runSeed executes seeding based on configuration +func (s *Session) runSeed() error { + seedConfig := &utils.Config.Dev.Seed + + if !seedConfig.IsEnabled() { + return nil + } + + // Custom command takes precedence + if seedConfig.OnChange != "" { + return s.runCustomSeed(seedConfig.OnChange) + } + + // Internal seeding using [db.seed] config + return s.runInternalSeed() +} + +// runCustomSeed executes a custom seed command +func (s *Session) runCustomSeed(command string) error { + fmt.Fprintf(os.Stderr, "[dev] Running seed: %s\n", utils.Aqua(command)) + + cmd := exec.CommandContext(s.ctx, "sh", "-c", command) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Dir = utils.CurrentDirAbs + + return cmd.Run() +} + +// runInternalSeed uses the built-in SeedData from pkg/migration +func (s *Session) runInternalSeed() error { + // Check if base seed config is enabled + if !utils.Config.Db.Seed.Enabled { + return nil + } + + // Check if there are any seed paths configured + if len(utils.Config.Db.Seed.SqlPaths) == 0 { + return nil + } + + config := pgconn.Config{ + Host: utils.Config.Hostname, + Port: utils.Config.Db.Port, + User: "postgres", + Password: utils.Config.Db.Password, + Database: "postgres", + } + + conn, err := utils.ConnectLocalPostgres(s.ctx, config) + if err != nil { + return err + } + defer conn.Close(context.Background()) + + seeds, err := migration.GetPendingSeeds( + s.ctx, + utils.Config.Db.Seed.SqlPaths, + conn, + afero.NewIOFS(s.fsys), + ) + if err != nil { + return err + } + + if len(seeds) == 0 { + fmt.Fprintln(os.Stderr, "[dev] No pending seeds") + return nil + } + + return migration.SeedData(s.ctx, seeds, conn, afero.NewIOFS(s.fsys)) +} diff --git a/internal/dev/shadow.go b/internal/dev/shadow.go new file mode 100644 index 000000000..c40a5e5d0 --- /dev/null +++ b/internal/dev/shadow.go @@ -0,0 +1,468 @@ +package dev + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "path/filepath" + "sort" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/diff" + "github.com/supabase/cli/internal/db/start" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/migration" +) + +// ShadowState manages a persistent shadow database for fast diffing +type ShadowState struct { + ContainerID string + BaselineRoles []string // Roles after migrations, before declared schemas + TemplateReady bool + MigrationsHash string // Invalidate template if migrations change +} + +// shadowContainerName returns the name for the shadow container +func shadowContainerName() string { + return "supabase_db_" + utils.Config.ProjectId + "_shadow" +} + +// EnsureShadowReady prepares the shadow database for diffing +// Returns the shadow database config for connecting +func (s *ShadowState) EnsureShadowReady(ctx context.Context, fsys afero.Fs) (pgconn.Config, error) { + shadowConfig := pgconn.Config{ + Host: utils.Config.Hostname, + Port: utils.Config.Db.ShadowPort, + User: "postgres", + Password: utils.Config.Db.Password, + Database: "contrib_regression", + } + + // Check if container exists and is healthy + healthy, err := s.isContainerHealthy(ctx) + if err != nil { + return shadowConfig, err + } + + if !healthy { + // Cold start: create container, apply migrations, create template + timingLog.Printf("Shadow container not ready, performing cold start...") + if err := s.coldStart(ctx, fsys); err != nil { + return shadowConfig, err + } + return shadowConfig, nil + } + + // Check if migrations changed (invalidates template) + currentHash, err := s.hashMigrations(fsys) + if err != nil { + return shadowConfig, err + } + + if currentHash != s.MigrationsHash { + timingLog.Printf("Migrations changed, rebuilding template...") + if err := s.rebuildTemplate(ctx, fsys); err != nil { + return shadowConfig, err + } + return shadowConfig, nil + } + + // Fast path: reset from template + timingLog.Printf("Using fast path: reset from template") + if err := s.resetFromTemplate(ctx); err != nil { + return shadowConfig, err + } + + return shadowConfig, nil +} + +// isContainerHealthy checks if the shadow container exists and is healthy +func (s *ShadowState) isContainerHealthy(ctx context.Context) (bool, error) { + if s.ContainerID == "" { + // Try to find existing container by name + containers, err := utils.Docker.ContainerList(ctx, container.ListOptions{All: true}) + if err != nil { + return false, errors.Errorf("failed to list containers: %w", err) + } + + name := "/" + shadowContainerName() + for _, c := range containers { + for _, n := range c.Names { + if n == name { + s.ContainerID = c.ID + break + } + } + } + + if s.ContainerID == "" { + return false, nil + } + } + + // Check if container is running and healthy + inspect, err := utils.Docker.ContainerInspect(ctx, s.ContainerID) + if err != nil { + // Container doesn't exist anymore + s.ContainerID = "" + s.TemplateReady = false + return false, nil + } + + if !inspect.State.Running { + // Container exists but not running, start it + if err := utils.Docker.ContainerStart(ctx, s.ContainerID, container.StartOptions{}); err != nil { + return false, errors.Errorf("failed to start shadow container: %w", err) + } + // Wait for healthy + if err := start.WaitForHealthyService(ctx, utils.Config.Db.HealthTimeout, s.ContainerID); err != nil { + return false, errors.Errorf("shadow container unhealthy: %w", err) + } + } + + return s.TemplateReady, nil +} + +// coldStart creates container and builds initial template +func (s *ShadowState) coldStart(ctx context.Context, fsys afero.Fs) error { + // 1. Remove any existing shadow container + if s.ContainerID != "" { + _ = utils.Docker.ContainerRemove(ctx, s.ContainerID, container.RemoveOptions{Force: true}) + } + + // 2. Create and start shadow container with a proper name + name := shadowContainerName() + containerID, err := diff.CreateShadowDatabaseWithName(ctx, utils.Config.Db.ShadowPort, name, false) + if err != nil { + return errors.Errorf("failed to create shadow container: %w", err) + } + s.ContainerID = containerID + timingLog.Printf("Created shadow container: %s (%s)", name, containerID[:12]) + + // 3. Wait for healthy + if err := start.WaitForHealthyService(ctx, utils.Config.Db.HealthTimeout, s.ContainerID); err != nil { + return errors.Errorf("shadow container unhealthy: %w", err) + } + timingLog.Printf("Shadow container started") + + // 4. Apply migrations + if err := diff.MigrateShadowDatabase(ctx, s.ContainerID, fsys); err != nil { + return errors.Errorf("failed to migrate shadow: %w", err) + } + timingLog.Printf("Migrations applied to shadow") + + // 5. Snapshot baseline roles + baselineRoles, err := s.queryRoles(ctx) + if err != nil { + return errors.Errorf("failed to query baseline roles: %w", err) + } + s.BaselineRoles = baselineRoles + timingLog.Printf("Captured %d baseline roles", len(baselineRoles)) + + // 6. Create template from current state + if err := s.createTemplate(ctx); err != nil { + return errors.Errorf("failed to create template: %w", err) + } + + // 7. Store migrations hash + hash, err := s.hashMigrations(fsys) + if err != nil { + return err + } + s.MigrationsHash = hash + s.TemplateReady = true + + return nil +} + +// rebuildTemplate rebuilds the template after migrations change +func (s *ShadowState) rebuildTemplate(ctx context.Context, fsys afero.Fs) error { + // Connect and drop existing template + conn, err := s.connectPostgres(ctx) + if err != nil { + return err + } + defer conn.Close(ctx) + + // Drop template if exists + _, _ = conn.Exec(ctx, "DROP DATABASE IF EXISTS shadow_template") + _, _ = conn.Exec(ctx, "DROP DATABASE IF EXISTS contrib_regression") + + // Recreate contrib_regression and apply migrations + _, err = conn.Exec(ctx, "CREATE DATABASE contrib_regression") + if err != nil { + return errors.Errorf("failed to create contrib_regression: %w", err) + } + conn.Close(ctx) + + // Apply migrations + if err := diff.MigrateShadowDatabase(ctx, s.ContainerID, fsys); err != nil { + return errors.Errorf("failed to migrate shadow: %w", err) + } + + // Snapshot baseline roles + baselineRoles, err := s.queryRoles(ctx) + if err != nil { + return err + } + s.BaselineRoles = baselineRoles + + // Create new template + if err := s.createTemplate(ctx); err != nil { + return err + } + + // Update hash + hash, err := s.hashMigrations(fsys) + if err != nil { + return err + } + s.MigrationsHash = hash + s.TemplateReady = true + + return nil +} + +// resetFromTemplate quickly resets the database state +func (s *ShadowState) resetFromTemplate(ctx context.Context) error { + conn, err := s.connectPostgres(ctx) + if err != nil { + return err + } + defer conn.Close(ctx) + + // 1. Clean cluster-wide objects created by declared schemas + currentRoles, err := s.queryRolesWithConn(ctx, conn) + if err != nil { + return err + } + + for _, role := range currentRoles { + if !contains(s.BaselineRoles, role) { + timingLog.Printf("Dropping role created by declared schema: %s", role) + _, _ = conn.Exec(ctx, fmt.Sprintf("DROP ROLE IF EXISTS %q", role)) + } + } + + // 2. Terminate connections to contrib_regression + _, _ = conn.Exec(ctx, ` + SELECT pg_terminate_backend(pid) + FROM pg_stat_activity + WHERE datname = 'contrib_regression' AND pid <> pg_backend_pid() + `) + + // 3. Reset database from template + _, err = conn.Exec(ctx, "DROP DATABASE IF EXISTS contrib_regression") + if err != nil { + return errors.Errorf("failed to drop contrib_regression: %w", err) + } + + _, err = conn.Exec(ctx, "CREATE DATABASE contrib_regression TEMPLATE shadow_template") + if err != nil { + return errors.Errorf("failed to create from template: %w", err) + } + + timingLog.Printf("Database reset from template") + return nil +} + +// createTemplate creates the shadow_template database from current state +func (s *ShadowState) createTemplate(ctx context.Context) error { + conn, err := s.connectPostgres(ctx) + if err != nil { + return err + } + defer conn.Close(ctx) + + // Terminate connections to contrib_regression before using as template + _, _ = conn.Exec(ctx, ` + SELECT pg_terminate_backend(pid) + FROM pg_stat_activity + WHERE datname = 'contrib_regression' AND pid <> pg_backend_pid() + `) + + // Create template + _, err = conn.Exec(ctx, "CREATE DATABASE shadow_template TEMPLATE contrib_regression") + if err != nil { + return errors.Errorf("failed to create template: %w", err) + } + + timingLog.Printf("Template database created") + return nil +} + +// connectPostgres connects to the shadow's postgres database (not contrib_regression) +func (s *ShadowState) connectPostgres(ctx context.Context) (*pgx.Conn, error) { + config := pgconn.Config{ + Host: utils.Config.Hostname, + Port: utils.Config.Db.ShadowPort, + User: "postgres", + Password: utils.Config.Db.Password, + Database: "postgres", + } + return utils.ConnectLocalPostgres(ctx, config) +} + +// queryRoles returns all non-system roles +func (s *ShadowState) queryRoles(ctx context.Context) ([]string, error) { + conn, err := s.connectPostgres(ctx) + if err != nil { + return nil, err + } + defer conn.Close(ctx) + return s.queryRolesWithConn(ctx, conn) +} + +// queryRolesWithConn returns all non-system roles using existing connection +func (s *ShadowState) queryRolesWithConn(ctx context.Context, conn *pgx.Conn) ([]string, error) { + rows, err := conn.Query(ctx, ` + SELECT rolname FROM pg_roles + WHERE rolname NOT LIKE 'pg_%' + ORDER BY rolname + `) + if err != nil { + return nil, errors.Errorf("failed to query roles: %w", err) + } + defer rows.Close() + + var roles []string + for rows.Next() { + var role string + if err := rows.Scan(&role); err != nil { + return nil, err + } + roles = append(roles, role) + } + return roles, rows.Err() +} + +// hashMigrations computes a hash of all migration files +func (s *ShadowState) hashMigrations(fsys afero.Fs) (string, error) { + h := sha256.New() + + migrationsDir := filepath.Join(utils.SupabaseDirPath, "migrations") + files, err := afero.ReadDir(fsys, migrationsDir) + if err != nil { + // No migrations directory is valid + return hex.EncodeToString(h.Sum(nil)), nil + } + + // Sort files by name for consistent ordering + sort.Slice(files, func(i, j int) bool { + return files[i].Name() < files[j].Name() + }) + + for _, f := range files { + if f.IsDir() { + continue + } + content, err := afero.ReadFile(fsys, filepath.Join(migrationsDir, f.Name())) + if err != nil { + return "", errors.Errorf("failed to read migration %s: %w", f.Name(), err) + } + h.Write([]byte(f.Name())) + h.Write(content) + } + + // Also include seed files that affect shadow state + seedDir := filepath.Join(utils.SupabaseDirPath, "seed") + seedFiles, _ := afero.ReadDir(fsys, seedDir) + for _, f := range seedFiles { + if f.IsDir() { + continue + } + content, _ := afero.ReadFile(fsys, filepath.Join(seedDir, f.Name())) + h.Write([]byte(f.Name())) + h.Write(content) + } + + return hex.EncodeToString(h.Sum(nil)), nil +} + +// Cleanup removes the shadow container +func (s *ShadowState) Cleanup(ctx context.Context) { + // Use a fresh context with timeout for cleanup - the original context + // may be cancelled (e.g., from Ctrl+C) which would cause Docker API calls to fail + cleanupCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // If we have the container ID in memory, use it + if s.ContainerID != "" { + timingLog.Printf("Cleaning up shadow container: %s", s.ContainerID) + if err := utils.Docker.ContainerRemove(cleanupCtx, s.ContainerID, container.RemoveOptions{Force: true}); err != nil { + timingLog.Printf("Failed to remove shadow container: %v", err) + } else { + timingLog.Printf("Shadow container removed successfully") + } + s.ContainerID = "" + s.TemplateReady = false + return + } + + // Otherwise, look up the container by name + name := shadowContainerName() + containers, err := utils.Docker.ContainerList(cleanupCtx, container.ListOptions{All: true}) + if err != nil { + timingLog.Printf("Failed to list containers for cleanup: %v", err) + return + } + + expectedName := "/" + name + for _, c := range containers { + for _, n := range c.Names { + if n == expectedName { + timingLog.Printf("Found shadow container by name, removing: %s", c.ID) + if err := utils.Docker.ContainerRemove(cleanupCtx, c.ID, container.RemoveOptions{Force: true}); err != nil { + timingLog.Printf("Failed to remove shadow container: %v", err) + } else { + timingLog.Printf("Shadow container removed successfully") + } + s.TemplateReady = false + return + } + } + } +} + +// ApplyDeclaredSchemas applies declared schema files to the shadow database +func (s *ShadowState) ApplyDeclaredSchemas(ctx context.Context, schemas []string, fsys afero.Fs) error { + if len(schemas) == 0 { + return nil + } + + config := pgconn.Config{ + Host: utils.Config.Hostname, + Port: utils.Config.Db.ShadowPort, + User: "postgres", + Password: utils.Config.Db.Password, + Database: "contrib_regression", + } + + conn, err := utils.ConnectLocalPostgres(ctx, config) + if err != nil { + return errors.Errorf("failed to connect to shadow: %w", err) + } + defer conn.Close(ctx) + + if err := migration.SeedGlobals(ctx, schemas, conn, afero.NewIOFS(fsys)); err != nil { + return errors.Errorf("failed to apply declared schemas: %w", err) + } + + return nil +} + +// contains checks if a slice contains a string +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} diff --git a/internal/dev/validator.go b/internal/dev/validator.go new file mode 100644 index 000000000..612540038 --- /dev/null +++ b/internal/dev/validator.go @@ -0,0 +1,94 @@ +package dev + +import ( + "fmt" + + "github.com/go-errors/errors" + pg_query "github.com/pganalyze/pg_query_go/v6" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" +) + +// ValidationError represents a SQL syntax error with location info +type ValidationError struct { + File string + Line int + Column int + Message string +} + +func (e *ValidationError) Error() string { + return fmt.Sprintf("Syntax error in %s\n Line %d, Column %d: %s\n Waiting for valid SQL...", + e.File, e.Line, e.Column, e.Message) +} + +// ValidateSchemaFiles validates the SQL syntax of all schema files +// Returns nil if all files are valid, or the first error encountered +func ValidateSchemaFiles(files []string, fsys afero.Fs) error { + for _, file := range files { + if err := validateFile(file, fsys); err != nil { + return err + } + } + return nil +} + +// validateFile validates a single SQL file using pg_query (Postgres's actual parser) +func validateFile(path string, fsys afero.Fs) error { + content, err := afero.ReadFile(fsys, path) + if err != nil { + return errors.Errorf("failed to read %s: %w", path, err) + } + + // Empty files are valid + if len(content) == 0 { + return nil + } + + // Parse the SQL using pg_query (Postgres's actual parser) + _, err = pg_query.Parse(string(content)) + if err != nil { + return parseError(path, string(content), err) + } + + fmt.Fprintf(utils.GetDebugLogger(), "Validated: %s\n", path) + return nil +} + +// parseError converts a pg_query error into a ValidationError with location info +func parseError(file, content string, err error) error { + errMsg := err.Error() + + // Default to line 1, column 1 if we can't parse the position + line := 1 + column := 1 + + // Try to extract position from error message + // pg_query errors look like: "syntax error at or near \"xyz\" at position 123" + var pos int + if n, _ := fmt.Sscanf(errMsg, "syntax error at or near %*s at position %d", &pos); n == 1 && pos > 0 { + line, column = offsetToLineCol(content, pos) + } + + return &ValidationError{ + File: file, + Line: line, + Column: column, + Message: errMsg, + } +} + +// offsetToLineCol converts a byte offset to line and column numbers (1-indexed) +func offsetToLineCol(content string, offset int) (line, col int) { + line = 1 + col = 1 + for i := 0; i < len(content) && i < offset; i++ { + if content[i] == '\n' { + line++ + col = 1 + } else { + col++ + } + } + return line, col +} diff --git a/internal/dev/watcher.go b/internal/dev/watcher.go new file mode 100644 index 000000000..f50a773ee --- /dev/null +++ b/internal/dev/watcher.go @@ -0,0 +1,343 @@ +package dev + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "slices" + "strings" + "time" + + "github.com/bmatcuk/doublestar/v4" + "github.com/fsnotify/fsnotify" + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/config" +) + +const ( + // Default debounce duration for file changes + defaultDebounceDuration = 500 * time.Millisecond + restartEvents = fsnotify.Write | fsnotify.Create | fsnotify.Remove | fsnotify.Rename + + // Internal glob pattern for migrations (always watched) + migrationsGlob = "migrations/*.sql" +) + +var ( + // Directories to ignore + ignoredDirNames = []string{ + ".git", + "node_modules", + ".vscode", + ".idea", + ".DS_Store", + } + + // Patterns for ignoring file events + ignoredFilePatterns = []struct { + Prefix string + Suffix string + }{ + {Suffix: "~"}, // Common backup files + {Prefix: ".", Suffix: ".swp"}, // Vim swap files + {Prefix: ".", Suffix: ".swx"}, // Vim swap files (extended) + {Suffix: ".tmp"}, // Generic temp files + {Prefix: ".#"}, // Emacs lock files + } +) + +// SchemaWatcher watches for file changes based on configured glob patterns +type SchemaWatcher struct { + fsys afero.Fs + watcher *fsnotify.Watcher + restartTimer *time.Timer + RestartCh <-chan time.Time + ErrCh <-chan error + watchGlobs config.Glob // Glob patterns to match schema files + seedGlobs config.Glob // Glob patterns to match seed files + migrationsChanged bool // Track if migrations changed since last check + seedsChanged bool // Track if seeds changed since last check +} + +// NewSchemaWatcher creates a new watcher for the configured watch patterns +func NewSchemaWatcher(fsys afero.Fs, watchGlobs, seedGlobs config.Glob) (*SchemaWatcher, error) { + restartTimer := time.NewTimer(defaultDebounceDuration) + if !restartTimer.Stop() { + return nil, errors.New("failed to initialise timer") + } + + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, errors.Errorf("failed to create file watcher: %w", err) + } + + sw := &SchemaWatcher{ + fsys: fsys, + watcher: watcher, + restartTimer: restartTimer, + RestartCh: restartTimer.C, + ErrCh: watcher.Errors, + watchGlobs: watchGlobs, + seedGlobs: seedGlobs, + } + + // Add directories containing watched files + if err := sw.addWatchPaths(fsys); err != nil { + watcher.Close() + return nil, err + } + + return sw, nil +} + +// addWatchPaths adds directories that may contain watched files +func (w *SchemaWatcher) addWatchPaths(fsys afero.Fs) error { + // Extract base directories from glob patterns + dirs := make(map[string]struct{}) + for _, pattern := range w.watchGlobs { + // Get the base directory before any glob characters + baseDir := getGlobBaseDir(pattern) + if baseDir == "" { + baseDir = "." + } + // Make relative to supabase dir + fullPath := filepath.Join(utils.SupabaseDirPath, baseDir) + dirs[fullPath] = struct{}{} + } + + // Add seed directories (seed globs are already absolute paths from config resolution) + for _, pattern := range w.seedGlobs { + baseDir := getGlobBaseDir(pattern) + if baseDir == "" { + baseDir = "." + } + dirs[baseDir] = struct{}{} + } + + // Always watch migrations directory (internal, not user-configurable) + dirs[utils.MigrationsDir] = struct{}{} + + // Add each unique directory and its subdirectories + for dir := range dirs { + if err := w.watchDirRecursive(fsys, dir); err != nil { + // Skip if directory doesn't exist (will be created later) + if errors.Is(err, os.ErrNotExist) { + watcherLog.Printf("Watch directory does not exist (yet): %s", dir) + continue + } + return err + } + } + + return nil +} + +// watchDirRecursive adds a directory and all subdirectories to the watcher +func (w *SchemaWatcher) watchDirRecursive(fsys afero.Fs, rootDir string) error { + return afero.Walk(fsys, rootDir, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip ignored directories + if info.IsDir() { + if slices.Contains(ignoredDirNames, filepath.Base(path)) { + return filepath.SkipDir + } + if err := w.watcher.Add(path); err != nil { + return errors.Errorf("failed to watch directory %s: %w", path, err) + } + watcherLog.Printf("Watching directory: %s", path) + } + + return nil + }) +} + +// getGlobBaseDir extracts the base directory from a glob pattern +// e.g., "schemas/**/*.sql" -> "schemas" +// e.g., "src/db/schema/**/*.ts" -> "src/db/schema" +func getGlobBaseDir(pattern string) string { + // Find first glob metacharacter + for i, c := range pattern { + if c == '*' || c == '?' || c == '[' || c == '{' { + // Return directory up to this point + dir := filepath.Dir(pattern[:i]) + if dir == "." && i > 0 { + // Handle case like "schemas/**" where we want "schemas" + if idx := strings.LastIndex(pattern[:i], string(filepath.Separator)); idx >= 0 { + return pattern[:idx] + } + return pattern[:i] + } + return dir + } + } + // No glob characters, return the directory part + return filepath.Dir(pattern) +} + +// Start begins watching for file changes +func (w *SchemaWatcher) Start() { + for { + event, ok := <-w.watcher.Events + if !ok { + return + } + + // Handle new directory creation - add it to the watcher + if event.Has(fsnotify.Create) { + if info, err := os.Stat(event.Name); err == nil && info.IsDir() { + if !slices.Contains(ignoredDirNames, filepath.Base(event.Name)) { + if err := w.watcher.Add(event.Name); err == nil { + watcherLog.Printf("Added new directory to watch: %s", event.Name) + } + } + continue + } + } + + if isIgnoredFileEvent(event) { + watcherLog.Printf("Ignoring file event: %s (%s)", event.Name, event.Op.String()) + continue + } + + // Check if the file matches any of our watch globs + if !w.matchesWatchGlobs(event.Name) { + watcherLog.Printf("File does not match watch patterns: %s", event.Name) + continue + } + + // Skip empty files (e.g., newly created files with no content yet) + if w.isEmptyFile(event.Name) { + watcherLog.Printf("Skipping empty file: %s", event.Name) + continue + } + + // Track the type of file change + if w.isMigrationFile(event.Name) { + w.migrationsChanged = true + fmt.Fprintf(os.Stderr, "[dev] Migration change detected: %s\n", event.Name) + } else if w.isSeedFile(event.Name) { + w.seedsChanged = true + fmt.Fprintf(os.Stderr, "[dev] Seed file change detected: %s\n", event.Name) + } else { + fmt.Fprintf(os.Stderr, "[dev] Schema change detected: %s\n", event.Name) + } + + // Fire immediately when timer is inactive, without blocking this thread + if active := w.restartTimer.Reset(0); active { + w.restartTimer.Reset(defaultDebounceDuration) + } + } +} + +// matchesWatchGlobs checks if a file path matches any of the configured glob patterns +// or the internal migrations pattern +func (w *SchemaWatcher) matchesWatchGlobs(filePath string) bool { + // Convert to relative path from supabase directory for matching + relPath, err := filepath.Rel(utils.SupabaseDirPath, filePath) + if err != nil { + relPath = filePath + } + + // Check schema patterns + for _, pattern := range w.watchGlobs { + matched, err := doublestar.Match(pattern, relPath) + if err != nil { + watcherLog.Printf("Invalid glob pattern %s: %v", pattern, err) + continue + } + if matched { + return true + } + } + + // Check seed patterns (seed globs are already absolute paths from config resolution) + for _, pattern := range w.seedGlobs { + matched, err := doublestar.Match(pattern, filePath) + if err != nil { + watcherLog.Printf("Invalid glob pattern %s: %v", pattern, err) + continue + } + if matched { + return true + } + } + + // Check migrations pattern (always watched internally) + if matched, _ := doublestar.Match(migrationsGlob, relPath); matched { + return true + } + + return false +} + +// isMigrationFile checks if a file path is a migration file +func (w *SchemaWatcher) isMigrationFile(filePath string) bool { + relPath, err := filepath.Rel(utils.SupabaseDirPath, filePath) + if err != nil { + relPath = filePath + } + matched, _ := doublestar.Match(migrationsGlob, relPath) + return matched +} + +// MigrationsChanged returns true if migrations changed since last check and resets the flag +func (w *SchemaWatcher) MigrationsChanged() bool { + changed := w.migrationsChanged + w.migrationsChanged = false + return changed +} + +// isSeedFile checks if a file path matches seed patterns +func (w *SchemaWatcher) isSeedFile(filePath string) bool { + for _, pattern := range w.seedGlobs { + matched, _ := doublestar.Match(pattern, filePath) + if matched { + return true + } + } + return false +} + +// SeedsChanged returns true if seeds changed since last check and resets the flag +func (w *SchemaWatcher) SeedsChanged() bool { + changed := w.seedsChanged + w.seedsChanged = false + return changed +} + +// Close stops the watcher and releases resources +func (w *SchemaWatcher) Close() error { + return w.watcher.Close() +} + +// isEmptyFile checks if a file has no content (or only whitespace) +func (w *SchemaWatcher) isEmptyFile(path string) bool { + content, err := afero.ReadFile(w.fsys, path) + if err != nil { + // File might have been deleted, let the event through + return false + } + return len(strings.TrimSpace(string(content))) == 0 +} + +// isIgnoredFileEvent checks if a file event should be ignored +func isIgnoredFileEvent(event fsnotify.Event) bool { + if !event.Has(restartEvents) { + return true + } + + baseName := filepath.Base(event.Name) + for _, p := range ignoredFilePatterns { + if strings.HasPrefix(baseName, p.Prefix) && strings.HasSuffix(baseName, p.Suffix) { + return true + } + } + + return false +} diff --git a/internal/utils/docker.go b/internal/utils/docker.go index 6da0d5aa6..50d900169 100644 --- a/internal/utils/docker.go +++ b/internal/utils/docker.go @@ -199,8 +199,13 @@ func GetRegistryImageUrl(imageName string) string { if registry == "docker.io" { return imageName } - // Configure mirror registry + // Check if this is a third-party image (not supabase/) + // Third-party images like "oven/bun:tag" should be pulled from Docker Hub directly parts := strings.Split(imageName, "/") + if len(parts) >= 2 && parts[0] != "supabase" { + return imageName + } + // Configure mirror registry for Supabase images imageName = parts[len(parts)-1] return registry + "/supabase/" + imageName } diff --git a/internal/utils/logger.go b/internal/utils/logger.go index 0cde1df38..aca381919 100644 --- a/internal/utils/logger.go +++ b/internal/utils/logger.go @@ -1,8 +1,11 @@ package utils import ( + "fmt" "io" "os" + "strings" + "sync" "github.com/spf13/viper" ) @@ -13,3 +16,110 @@ func GetDebugLogger() io.Writer { } return io.Discard } + +// DebugLogger provides namespaced debug logging similar to the Node.js debug package. +// Enable via DEBUG environment variable with patterns: +// - DEBUG=supabase:dev:timing - only timing logs +// - DEBUG=supabase:dev:* - all dev logs +// - DEBUG=supabase:* - all supabase logs +// - DEBUG=* - all debug logs +type DebugLogger struct { + namespace string + enabled bool +} + +var ( + debugPatterns []string + debugPatternsOnce sync.Once +) + +// loadDebugPatterns parses the DEBUG environment variable once +func loadDebugPatterns() { + debugPatternsOnce.Do(func() { + debug := os.Getenv("DEBUG") + if debug == "" { + return + } + for _, pattern := range strings.Split(debug, ",") { + pattern = strings.TrimSpace(pattern) + if pattern != "" { + debugPatterns = append(debugPatterns, pattern) + } + } + }) +} + +// NewDebugLogger creates a namespaced debug logger. +// The namespace should use colon-separated segments, e.g., "supabase:dev:timing" +func NewDebugLogger(namespace string) *DebugLogger { + loadDebugPatterns() + return &DebugLogger{ + namespace: namespace, + enabled: isDebugEnabled(namespace), + } +} + +// isDebugEnabled checks if a namespace matches any DEBUG pattern +func isDebugEnabled(namespace string) bool { + for _, pattern := range debugPatterns { + if matchDebugPattern(pattern, namespace) { + return true + } + } + return false +} + +// matchDebugPattern checks if a namespace matches a debug pattern +// Supports * as wildcard at the end of a pattern +func matchDebugPattern(pattern, namespace string) bool { + // Exact match + if pattern == namespace { + return true + } + // Wildcard match: "supabase:*" matches "supabase:dev:timing" + if strings.HasSuffix(pattern, "*") { + prefix := strings.TrimSuffix(pattern, "*") + return strings.HasPrefix(namespace, prefix) + } + return false +} + +// Printf prints a formatted debug message if the namespace is enabled +func (d *DebugLogger) Printf(format string, args ...interface{}) { + if d.enabled { + fmt.Fprintf(os.Stderr, "[%s] %s\n", d.namespace, fmt.Sprintf(format, args...)) + } +} + +// Println prints a debug message if the namespace is enabled +func (d *DebugLogger) Println(args ...interface{}) { + if d.enabled { + fmt.Fprintf(os.Stderr, "[%s] %s\n", d.namespace, fmt.Sprint(args...)) + } +} + +// Writer returns an io.Writer that writes to stderr if enabled, otherwise discards +func (d *DebugLogger) Writer() io.Writer { + if d.enabled { + return &debugWriter{namespace: d.namespace} + } + return io.Discard +} + +// Enabled returns whether this logger is enabled +func (d *DebugLogger) Enabled() bool { + return d.enabled +} + +// debugWriter wraps writes with namespace prefix +type debugWriter struct { + namespace string +} + +func (w *debugWriter) Write(p []byte) (n int, err error) { + msg := strings.TrimSpace(string(p)) + if msg != "" { + fmt.Fprintf(os.Stderr, "[%s] %s\n", w.namespace, msg) + } + return len(p), nil +} diff --git a/pkg/config/config.go b/pkg/config/config.go index 8cf2e4a27..1cc1748f7 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -151,6 +151,7 @@ type ( Functions FunctionConfig `toml:"functions"` Analytics analytics `toml:"analytics"` Experimental experimental `toml:"experimental"` + Dev dev `toml:"dev"` } config struct { @@ -245,6 +246,30 @@ type ( Webhooks *webhooks `toml:"webhooks"` Inspect inspect `toml:"inspect"` } + + // Dev workflow configuration + dev struct { + Schemas devSchemas `toml:"schemas"` + Functions devFunctions `toml:"functions"` + Seed devSeed `toml:"seed"` + } + + devSchemas struct { + Enabled *bool `toml:"enabled"` + Watch Glob `toml:"watch"` + OnChange string `toml:"on_change"` + Types string `toml:"types"` + } + + devFunctions struct { + Enabled *bool `toml:"enabled"` + Watch Glob `toml:"watch"` + } + + devSeed struct { + Enabled *bool `toml:"enabled"` + OnChange string `toml:"on_change"` + } ) func (a *auth) Clone() auth { @@ -418,6 +443,14 @@ func NewConfig(editors ...ConfigEditor) config { EdgeRuntime: edgeRuntime{ Image: Images.EdgeRuntime, }, + Dev: dev{ + Schemas: devSchemas{ + Watch: Glob{"schemas/**/*.sql"}, + }, + Functions: devFunctions{ + Watch: Glob{"functions/**/*.ts"}, + }, + }, }} for _, apply := range editors { apply(&initial) @@ -1616,3 +1649,27 @@ func (e *experimental) validate() error { } return nil } + +// IsEnabled returns whether the schemas workflow is enabled (defaults to true) +func (d *devSchemas) IsEnabled() bool { + if d.Enabled == nil { + return true + } + return *d.Enabled +} + +// IsEnabled returns whether the functions workflow is enabled (defaults to true) +func (d *devFunctions) IsEnabled() bool { + if d.Enabled == nil { + return true + } + return *d.Enabled +} + +// IsEnabled returns whether the seed workflow is enabled (defaults to true) +func (d *devSeed) IsEnabled() bool { + if d.Enabled == nil { + return true + } + return *d.Enabled +} From e775f7c372736e44b2553ac65975b6e8cb48d677 Mon Sep 17 00:00:00 2001 From: Julien Goux Date: Wed, 28 Jan 2026 15:04:15 +0100 Subject: [PATCH 2/4] gofmt --- internal/dev/debug.go | 9 +++++---- internal/dev/dev.go | 2 -- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/internal/dev/debug.go b/internal/dev/debug.go index 97bb289ae..3b0c29cf1 100644 --- a/internal/dev/debug.go +++ b/internal/dev/debug.go @@ -4,10 +4,11 @@ import "github.com/supabase/cli/internal/utils" // Namespaced debug loggers for the dev package // Enable via DEBUG environment variable: -// DEBUG=supabase:dev:* - all dev logs -// DEBUG=supabase:dev:timing - only timing logs -// DEBUG=supabase:dev:watcher - only watcher logs -// DEBUG=supabase:dev:sql - SQL statements being executed +// +// DEBUG=supabase:dev:* - all dev logs +// DEBUG=supabase:dev:timing - only timing logs +// DEBUG=supabase:dev:watcher - only watcher logs +// DEBUG=supabase:dev:sql - SQL statements being executed var ( timingLog = utils.NewDebugLogger("supabase:dev:timing") watcherLog = utils.NewDebugLogger("supabase:dev:watcher") diff --git a/internal/dev/dev.go b/internal/dev/dev.go index c5202aef9..8b049b901 100644 --- a/internal/dev/dev.go +++ b/internal/dev/dev.go @@ -66,7 +66,6 @@ func NewSession(ctx context.Context, fsys afero.Fs) *Session { } } - // Run starts the dev session main loop func (s *Session) Run() error { schemasConfig := &utils.Config.Dev.Schemas @@ -389,4 +388,3 @@ func loadSchemaFiles(fsys afero.Fs) ([]string, error) { }) return files, err } - From 0b92817a142c55f080516fb0892a820ec7c31ff2 Mon Sep 17 00:00:00 2001 From: Julien Goux Date: Wed, 28 Jan 2026 15:43:06 +0100 Subject: [PATCH 3/4] seed --- cmd/dev.go | 51 +++++++++++++++++++++--------- internal/dev/seed.go | 75 +++++++++++++++++++++++++++++++++++++------- 2 files changed, 100 insertions(+), 26 deletions(-) diff --git a/cmd/dev.go b/cmd/dev.go index 651546c25..f700e3fba 100644 --- a/cmd/dev.go +++ b/cmd/dev.go @@ -9,21 +9,42 @@ import ( var devCmd = &cobra.Command{ GroupID: groupLocalDev, Use: "dev", - Short: "Start reactive development mode with auto-schema sync", - Long: `Start a development session that watches for schema changes -and automatically applies them to your local database. - -This command: -- Starts the local database if not running -- Watches supabase/schemas/ for changes -- Automatically diffs and applies schema changes -- Does NOT create migration files (use 'supabase db diff -f' for that) - -Enable debug logging with DEBUG environment variable: - DEBUG=supabase:dev:* - all dev logs - DEBUG=supabase:dev:timing - timing information - DEBUG=supabase:dev:watcher - file watcher logs - DEBUG=supabase:dev:sql - SQL statements being executed + Short: "Start reactive development mode with multiple workflows", + Long: `Start a development session that watches for file changes and +automatically applies them to your local environment. + +WORKFLOWS: + + schemas Watch schema files and auto-apply changes to local database + Configure via [dev.schemas] in config.toml + + seed Auto-run seeds on startup and when seed files change + Configure via [dev.seed] in config.toml + + functions (coming soon) Watch and auto-deploy edge functions + +The dev command starts the local database if not running, then enables +all configured workflows. Schema changes are applied directly without +creating migration files - use 'supabase db diff -f' when ready to commit. + +CONFIGURATION: + + [dev.schemas] + enabled = true # Enable schema workflow (default: true) + watch = ["schemas/**/*.sql"] # Glob patterns to watch + on_change = "" # Custom command (e.g., "npx drizzle-kit push") + types = "src/types/database.ts" # Auto-generate TypeScript types + + [dev.seed] + enabled = true # Enable seed workflow (default: true) + on_change = "" # Custom command (e.g., "npx prisma db seed") + +DEBUG LOGGING: + + DEBUG=supabase:dev:* All dev logs + DEBUG=supabase:dev:timing Timing information + DEBUG=supabase:dev:watcher File watcher logs + DEBUG=supabase:dev:sql SQL statements being executed Press Ctrl+C to stop the development session.`, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/internal/dev/seed.go b/internal/dev/seed.go index 03ac2bd88..4c1e23c0e 100644 --- a/internal/dev/seed.go +++ b/internal/dev/seed.go @@ -6,10 +6,13 @@ import ( "os" "os/exec" + "github.com/go-errors/errors" "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" "github.com/spf13/afero" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/pkg/migration" + "github.com/supabase/cli/pkg/parser" ) // runSeed executes seeding based on configuration @@ -41,7 +44,10 @@ func (s *Session) runCustomSeed(command string) error { return cmd.Run() } -// runInternalSeed uses the built-in SeedData from pkg/migration +// runInternalSeed always executes seed files in dev mode. +// Unlike pkg/migration.SeedData which skips already-applied seeds (only updating the hash), +// this function always re-executes the SQL. This is the expected behavior for dev mode +// where users want their seed changes to be applied immediately. func (s *Session) runInternalSeed() error { // Check if base seed config is enabled if !utils.Config.Db.Seed.Enabled { @@ -67,20 +73,67 @@ func (s *Session) runInternalSeed() error { } defer conn.Close(context.Background()) - seeds, err := migration.GetPendingSeeds( - s.ctx, - utils.Config.Db.Seed.SqlPaths, - conn, - afero.NewIOFS(s.fsys), - ) - if err != nil { + // Create seed table if needed (for hash tracking) + if err := migration.CreateSeedTable(s.ctx, conn); err != nil { return err } - if len(seeds) == 0 { - fmt.Fprintln(os.Stderr, "[dev] No pending seeds") + // Get seed file paths from config + seedPaths, err := utils.Config.Db.Seed.SqlPaths.Files(afero.NewIOFS(s.fsys)) + if err != nil { + fmt.Fprintln(os.Stderr, "WARN:", err) + } + + if len(seedPaths) == 0 { + fmt.Fprintln(os.Stderr, "[dev] No seed files found") return nil } - return migration.SeedData(s.ctx, seeds, conn, afero.NewIOFS(s.fsys)) + // For dev mode: always execute all seed files (don't use GetPendingSeeds) + for _, seedPath := range seedPaths { + fmt.Fprintf(os.Stderr, "Seeding data from %s...\n", seedPath) + + // Create seed file with hash + seed, err := migration.NewSeedFile(seedPath, afero.NewIOFS(s.fsys)) + if err != nil { + return err + } + + // Always execute the seed SQL (not just update hash like SeedData does for dirty seeds) + if err := executeSeedForDev(s.ctx, conn, seed, s.fsys); err != nil { + return err + } + } + + return nil +} + +// executeSeedForDev always executes the seed SQL and updates the hash. +// This differs from SeedFile.ExecBatchWithCache which skips SQL execution for "dirty" seeds. +func executeSeedForDev(ctx context.Context, conn *pgx.Conn, seed *migration.SeedFile, fsys afero.Fs) error { + // Open and parse the seed file + f, err := fsys.Open(seed.Path) + if err != nil { + return errors.Errorf("failed to open seed file: %w", err) + } + defer f.Close() + + lines, err := parser.SplitAndTrim(f) + if err != nil { + return errors.Errorf("failed to parse seed file: %w", err) + } + + // Build batch: all SQL statements + hash update + batch := pgx.Batch{} + for _, line := range lines { + batch.Queue(line) + } + // Update hash in seed_files table + batch.Queue(migration.UPSERT_SEED_FILE, seed.Path, seed.Hash) + + if err := conn.SendBatch(ctx, &batch).Close(); err != nil { + return errors.Errorf("failed to execute seed: %w", err) + } + + return nil } From 46bfe9ba6f88332f655d0e9ab9912086cbf5d037 Mon Sep 17 00:00:00 2001 From: Julien Goux Date: Thu, 29 Jan 2026 11:34:12 +0100 Subject: [PATCH 4/4] drift detection in push and onboarding flow in dev --- PLAN.md | 1072 +++++-------------------- PULL_REQUEST.md | 236 +++++- SEED.md | 155 ++++ cmd/db.go | 12 +- cmd/dev.go | 20 +- internal/bootstrap/bootstrap.go | 3 +- internal/db/push/drift.go | 202 +++++ internal/db/push/push.go | 53 +- internal/db/push/push_test.go | 18 +- internal/dev/dev.go | 31 +- internal/dev/onboarding/conflict.go | 44 + internal/dev/onboarding/detect.go | 67 ++ internal/dev/onboarding/flows.go | 134 ++++ internal/dev/onboarding/onboarding.go | 113 +++ 14 files changed, 1228 insertions(+), 932 deletions(-) create mode 100644 SEED.md create mode 100644 internal/db/push/drift.go create mode 100644 internal/dev/onboarding/conflict.go create mode 100644 internal/dev/onboarding/detect.go create mode 100644 internal/dev/onboarding/flows.go create mode 100644 internal/dev/onboarding/onboarding.go diff --git a/PLAN.md b/PLAN.md index 1bafb6abe..d5d7e02ef 100644 --- a/PLAN.md +++ b/PLAN.md @@ -1,983 +1,269 @@ -# Plan: Supabase CLI `dev` Command - Declarative Schema Workflow +# Plan: Streamlined `supabase dev` Onboarding Experience -## Overview +## Vision -Implement a new `supabase dev` command that provides a reactive development experience. The first workflow watches `supabase/schemas/` for changes and automatically applies them to the local database **without creating migration files**. - -**Core principle**: Migrations are an implementation detail for deployment. During development, users just want to evolve their schema and see changes reflected quickly. - -## Architecture - -``` -┌─────────────────────────────────────────────────────────────────┐ -│ supabase dev │ -├─────────────────────────────────────────────────────────────────┤ -│ DevSession │ -│ ├── Ensures local DB is running (starts if needed) │ -│ ├── Coordinates multiple watchers │ -│ └── Manages graceful shutdown │ -├─────────────────────────────────────────────────────────────────┤ -│ SchemaWatcher (first workflow) │ -│ ├── Watches supabase/schemas/*.sql │ -│ ├── Debounces file changes (500ms) │ -│ └── Triggers validation → diff → apply pipeline │ -├─────────────────────────────────────────────────────────────────┤ -│ SQLValidator (pre-diff gate) │ -│ ├── Uses pg_query_go (libpg_query bindings) │ -│ ├── Validates ALL .sql files in schemas folder │ -│ └── Blocks diff if any file has syntax errors │ -├─────────────────────────────────────────────────────────────────┤ -│ DevDiffer │ -│ ├── Uses pg-delta CLI (via Bun in Docker) │ -│ ├── Compares schema files vs local DB │ -│ └── Detects DROP statements │ -├─────────────────────────────────────────────────────────────────┤ -│ Applier │ -│ ├── Executes SQL directly (no migration file) │ -│ ├── Uses transactions when possible │ -│ └── Shows warnings for destructive changes │ -├─────────────────────────────────────────────────────────────────┤ -│ DevState │ -│ ├── Tracks "dirty" state (local differs from migrations) │ -│ └── Warns on exit if uncommitted changes exist │ -└─────────────────────────────────────────────────────────────────┘ -``` - -### Pipeline Flow - -``` -File save → Debounce (500ms) → Validate ALL *.sql → Diff → Apply - │ - ↓ (if invalid) - Show error: - "Syntax error in users.sql:15:23 - unexpected token 'TABL'" - Wait for next save... -``` - -## Key Design Decisions - -### 1. When to Diff (Handling Mid-Edit Saves) - -- **500ms debounce** - Batch rapid saves (reuse existing pattern from `internal/functions/serve/watcher.go`) -- **SQL validation gate** - After debounce, validate ALL .sql files in schemas folder using Postgres parser before diffing -- **Non-blocking errors** - Parse/diff errors don't crash the watcher; just log and wait for fix - -### 1.1 SQL Validation Step (Pre-Diff Gate) - -After debounce fires, before running the diff: - -``` -File change detected → Debounce (500ms) → Validate ALL schemas/*.sql → Diff & Apply - ↓ - If any file invalid: - - Show error with filename + line - - Skip diff entirely - - Wait for next file change -``` - -**Why validate all files, not just the changed one?** -- The diff applies ALL schema files to a shadow DB -- If any file is invalid, the diff will fail anyway -- Validating all files gives immediate feedback about the actual problem - -**Implementation options for Postgres parser:** -1. **pg_query_go** (recommended) - Go bindings to libpg_query (Postgres's actual parser) - - No DB connection needed - - Exact same parser Postgres uses - - Returns detailed error position -2. **Local DB validation** - Connect to local Postgres and use `PREPARE` or parse via function - - Requires DB to be running - - Adds network round-trip latency - -### 2. Migration-less Local Development - -- Changes are applied directly to local DB using `ExecBatch` (reuse from `pkg/migration/file.go`) -- **No version recorded** in `schema_migrations` table -- User runs `supabase db diff -f migration_name` when ready to commit/deploy -- On exit, warn if local DB is "dirty" (has unapplied changes) - -### 3. Differ Strategy - -Use **pg-delta** (`@supabase/pg-delta` npm package) because: -- Supabase's own differ, optimized for Supabase schemas -- Handles Supabase-specific patterns (auth, storage, realtime) -- CLI interface for easy invocation - -**Implementation:** Run pg-delta CLI via Bun in Docker: - -```bash -docker run --rm \ - --network host \ - -v supabase_bun_cache:/bun-cache \ - -e BUN_INSTALL_CACHE_DIR=/bun-cache \ - oven/bun:canary-alpine \ - x @supabase/pg-delta@1.0.0-alpha.2 plan \ - --source "postgres://postgres:postgres@localhost:54321/postgres" \ - --target "postgres://postgres:postgres@localhost:54322/contrib_regression" \ - --integration supabase \ - --format sql \ - --role postgres -``` - -**CLI flags:** -- `--source` - Local database URL (current state) -- `--target` - Shadow database URL (desired state with declared schemas applied) -- `--integration supabase` - Use Supabase-specific schema filtering -- `--format sql` - Output raw SQL statements -- `--role postgres` - Execute as postgres role - -**Why Bun?** -- Much faster startup than edge-runtime (~100ms vs ~2s) -- `bun x` is like `npx` but faster -- Alpine image is lightweight (~50MB) -- `supabase_bun_cache` volume caches pg-delta package (only downloads once) - -### 4. Handling Destructive Changes - -- Detect DROP statements via regex pattern matching -- Show warnings with affected objects -- Apply anyway (in dev mode, speed > safety) -- Consider `--confirm-drops` flag for stricter mode - -## File Structure - -``` -cmd/ -└── dev.go # Cobra command definition - -internal/dev/ -├── session.go # DevSession orchestrator -├── feedback.go # Console output formatting -├── watcher/ -│ ├── watcher.go # Watcher interface -│ └── schema.go # Schema watcher (adapts existing debounce pattern) -├── validator/ -│ └── sql.go # SQL syntax validator using pg_query_go -├── differ/ -│ └── schema.go # DevDiffer using pg-schema-diff -└── state/ - └── state.go # DevState tracking -``` - -## Implementation Plan - -### Phase 1: Command Scaffolding -1. Create `cmd/dev.go` with Cobra command -2. Create `internal/dev/session.go` with basic lifecycle -3. Integrate with `internal/start/start.go` to ensure DB is running - -### Phase 2: Schema Watcher -1. Create `internal/dev/watcher/schema.go` -2. Adapt `debounceFileWatcher` from `internal/functions/serve/watcher.go` -3. Watch `supabase/schemas/*.sql` with 500ms debounce - -### Phase 3: SQL Validator (Pre-Diff Gate) -1. Add `github.com/pganalyze/pg_query_go/v6` dependency -2. Create `internal/dev/validator/sql.go` -3. Implement `ValidateSchemaFiles(paths []string) error` that: - - Parses each file with pg_query_go - - Returns first error with filename, line, column, and message - - Returns nil if all files are valid - -### Phase 4: Diff and Apply (using pg-delta via Bun) -1. Create `internal/dev/differ.go` -2. Implement `runPgDelta()` that: - - Creates `supabase_bun_cache` Docker volume (if not exists) - - Runs `oven/bun:canary-alpine` container with: - - `--network host` to access local databases - - `-v supabase_bun_cache:/bun-cache` for caching - - `-e BUN_INSTALL_CACHE_DIR=/bun-cache` - - Command: `x @supabase/pg-delta@1.0.0-alpha.2 plan --source --target --integration supabase --format sql --role postgres` -3. Parse output SQL and apply directly without version tracking - -### Phase 5: Feedback and State -1. Create `internal/dev/feedback.go` for colored console output -2. Create `internal/dev/state/state.go` for dirty state tracking -3. Show warnings for DROP statements -4. Warn on exit if dirty - -### Phase 6: Polish -1. Add `--no-start` flag (assume DB already running) -2. Handle edge cases (DB stops unexpectedly, etc.) -3. Document the workflow - -## Critical Files to Modify/Reference - -| File | Purpose | -|------|---------| -| `cmd/dev.go` | New file - command definition | -| `internal/dev/dev.go` | Main session orchestration | -| `internal/dev/watcher.go` | File watcher with debounce | -| `internal/dev/validator.go` | SQL syntax validator (pg_query_go v6) | -| `internal/dev/differ.go` | Diff and apply logic (pg-delta via Bun) | -| `internal/functions/serve/watcher.go` | Reference for file watcher pattern | -| `internal/utils/docker.go` | Reference for Docker volume/container patterns | -| `pkg/migration/file.go` | Reference for `ExecBatch` pattern | -| `internal/start/start.go` | Integration point for DB startup | -| `go.mod` | Add `github.com/pganalyze/pg_query_go/v6` dependency | - -## Example User Experience +Transform `supabase dev` into the single entry point for onboarding and development: ``` $ supabase dev - -[14:32:15] Starting local database... -[14:32:18] Local database ready -[14:32:18] Watching supabase/schemas/ for changes... -[14:32:18] Press Ctrl+C to stop - -[14:32:45] Change detected: supabase/schemas/users.sql -[14:32:46] Applied: - CREATE TABLE public.profiles ( - id uuid PRIMARY KEY REFERENCES auth.users(id), - display_name text - ); - -[14:33:12] Change detected: supabase/schemas/users.sql -[14:33:12] Warning: DROP statement detected - DROP COLUMN display_name; -[14:33:13] Applied: - ALTER TABLE public.profiles DROP COLUMN display_name; - ALTER TABLE public.profiles ADD COLUMN full_name text; - -[14:33:45] Change detected: supabase/schemas/users.sql -[14:33:45] Syntax error in supabase/schemas/users.sql - Line 3, Column 8: syntax error at or near "TABL" - Waiting for valid SQL... - -^C -[14:35:00] Stopping... -[14:35:00] Warning: Local database has uncommitted schema changes! - Hint: Run 'supabase db diff -f migration_name' to create a migration +# Handles: init → link → pull → start → reactive dev mode ``` -## Verification - -1. **Manual testing**: - - Run `supabase dev` - - Edit a schema file and save - - Verify change is applied to local DB - - Verify no migration file is created - - Run `supabase db diff` to see the accumulated changes - - Run `supabase db diff -f my_migration` to create migration when ready - -2. **Edge cases to test**: - - Save mid-edit (incomplete SQL) → validation error, no diff attempted - - Syntax error in one file while another is valid → validation catches it - - Rapid saves (debounce working) - - DROP statements (warning shown) - - Ctrl+C with dirty state (warning shown) - - DB not running at start (should start it) - -## Design Decisions (Confirmed) - -1. **Debounce duration**: **500ms** - Match existing pattern for fast feedback -2. **DROP statement handling**: **Apply immediately with warning** - Speed over safety in dev mode -3. **Initial state**: **Apply immediately on startup** - Sync local DB to match schema files - -## Future Optimization: Lazy Container Startup - -### Problem - -Currently, `supabase start` pulls and starts **all** containers sequentially, even when only the database is needed: - -``` -postgres, kong, gotrue, postgrest, storage-api, realtime, edge-runtime, -imgproxy, postgres-meta, studio, logflare, vector, mailpit... -``` +And `supabase push` as the single command to deploy local changes to remote. -This takes 30-60+ seconds on first run (image pulls) and 10-20 seconds on subsequent runs. For `supabase dev`, we only need Postgres immediately - other services are accessed on-demand. - -### Current Architecture - -``` -┌─────────────────────────────────────────────────────────────────┐ -│ supabase start (current) │ -├─────────────────────────────────────────────────────────────────┤ -│ 1. Pull ALL images sequentially │ -│ 2. Start ALL containers sequentially │ -│ 3. Wait for ALL health checks │ -│ 4. Ready (~30-60s first run, ~10-20s subsequent) │ -└─────────────────────────────────────────────────────────────────┘ -``` +--- -Kong gateway already routes all API requests: -- `/auth/v1/*` → gotrue:9999 -- `/rest/v1/*` → postgrest:3000 -- `/storage/v1/*` → storage:5000 -- `/realtime/v1/*` → realtime:4000 -- `/functions/v1/*` → edge-runtime:8081 -- etc. +## Implementation Overview -### Proposed Architecture: Lazy Proxy +### Phase 1: Create Onboarding Module -``` -┌─────────────────────────────────────────────────────────────────┐ -│ supabase dev (optimized) │ -├─────────────────────────────────────────────────────────────────┤ -│ 1. Start Postgres only (~3-5s) │ -│ 2. Start LazyProxy (replaces Kong initially) │ -│ 3. Ready for schema development immediately │ -├─────────────────────────────────────────────────────────────────┤ -│ LazyProxy (on first request to service) │ -│ ├── Intercept request to /auth/v1/* │ -│ ├── Pull gotrue image (if needed) │ -│ ├── Start gotrue container │ -│ ├── Wait for health check │ -│ ├── Forward request (and all subsequent requests) │ -│ └── Show "Starting auth service..." in CLI │ -└─────────────────────────────────────────────────────────────────┘ -``` +**New file: `internal/dev/onboarding/onboarding.go`** -### Implementation Strategy - -#### Option A: Custom Go Proxy (Recommended) - -Build a lightweight reverse proxy in Go that: -1. Listens on Kong's port (8000) -2. Maps routes to container configs -3. On first request to a route: - - Returns "503 Service Starting" or holds the request - - Pulls image + starts container in background - - Once healthy, forwards request -4. Subsequent requests go directly to container +Create a dedicated module to orchestrate the setup flow: ```go -// internal/dev/proxy/lazy.go -type LazyProxy struct { - services map[string]*ServiceState // route prefix → state - mu sync.RWMutex -} +package onboarding -type ServiceState struct { - Config ContainerConfig - Started bool - Starting bool - ContainerID string +type State struct { + ConfigExists bool // supabase/config.toml exists + ProjectLinked bool // .temp/project-ref exists + HasMigrations bool // migrations/*.sql exist + HasFunctions bool // functions/* exist } -func (p *LazyProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { - service := p.routeToService(r.URL.Path) - if service == nil { - http.Error(w, "Not found", 404) - return - } - - if !service.Started { - p.startService(service) // blocks until healthy - } - - service.Proxy.ServeHTTP(w, r) +type Options struct { + Interactive bool + SkipInit bool + SkipLink bool + SkipPull bool } -``` - -#### Option B: Kong with Lazy Backend Plugin - -Use Kong but with a custom plugin that: -1. Catches connection failures to backends -2. Triggers container start via Docker API -3. Retries after container is healthy - -This is more complex (requires Lua/Kong plugin development) but keeps the existing Kong setup. - -### Service Dependency Graph -Some services have dependencies: +func DetectState(ctx context.Context, fsys afero.Fs) *State +func Run(ctx context.Context, fsys afero.Fs, opts Options) error ``` -postgres (required first) - ↓ -postgrest (needs postgres) -gotrue (needs postgres) -storage-api (needs postgres, gotrue for auth) -realtime (needs postgres) - ↓ -kong (needs all above for routing) -studio (needs kong, postgres-meta) -``` - -For lazy startup: -- **Immediate**: postgres -- **On-demand**: everything else, respecting dependencies - -### Configuration -```toml -# config.toml -[dev] -lazy_services = true # default: true for `supabase dev` +### Phase 2: State Detection Functions -[dev.eager_services] -# Services to start immediately (not lazily) -# Useful if you know you'll need auth immediately -auth = false -rest = false -``` +**New file: `internal/dev/onboarding/detect.go`** -### CLI Integration +Reuse existing detection patterns: +- `ConfigExists()` - check `utils.ConfigPath` ("supabase/config.toml") +- `ProjectLinked()` - check `utils.ProjectRefPath` (".temp/project-ref") +- `HasMigrations()` - check `utils.MigrationsDir` +- `HasFunctions()` - check `utils.FunctionsDir` -``` -$ supabase dev +### Phase 3: Flow Integration Functions -Starting Postgres... done (3.2s) -Lazy proxy ready on localhost:54321 +**New file: `internal/dev/onboarding/flows.go`** -Watching supabase/schemas/ for changes... +Wrap existing commands as callable functions: -# User's app makes request to /auth/v1/signup -Starting auth service... done (4.1s) - -# User's app makes request to /rest/v1/profiles -Starting REST API... done (2.3s) -``` - -### Benefits - -1. **Faster iteration**: Schema development starts in ~5s instead of ~30s -2. **Lower resource usage**: Unused services don't consume memory -3. **Better DX**: Clear feedback when services start on-demand -4. **Backwards compatible**: `supabase start` unchanged, `supabase dev` uses lazy mode - -### Challenges - -1. **First request latency**: 2-5s delay on first request to a service -2. **Dependency ordering**: Must start dependencies before dependents -3. **Health check timing**: Need to wait for service to be truly ready -4. **WebSocket services**: Realtime needs special handling for persistent connections +```go +// Init flow - reuses internal/init.Run() +func RunInitFlow(ctx context.Context, fsys afero.Fs, interactive bool) error -### Files to Create/Modify +// Link flow - prompts for project, reuses internal/link.Run() +func PromptLinkChoice(ctx context.Context) (LinkChoice, error) +func RunLinkFlow(ctx context.Context, fsys afero.Fs) error -| File | Purpose | -|------|---------| -| `internal/dev/proxy/lazy.go` | Lazy proxy implementation | -| `internal/dev/proxy/routes.go` | Route → container mapping | -| `internal/dev/proxy/health.go` | Health check logic | -| `internal/start/start.go` | Add `--lazy` flag support | -| `pkg/config/config.go` | Add `[dev]` config section | - -### Migration Path - -1. **Phase 1**: Implement for `supabase dev` only (current scope) -2. **Phase 2**: Add `supabase start --lazy` flag for opt-in -3. **Phase 3**: Consider making lazy default for `supabase start` - -## Extensible Workflow Design - -The `dev` command supports multiple workflows, each with its own configuration section. This allows users to customize behavior based on their tooling (Supabase-native, Prisma, Drizzle, etc.). - -### Config Structure - -```toml -[dev.schemas] -# Database schema workflow -enabled = true # Set to false to disable this workflow -watch = ["schemas/**/*.sql"] # Glob patterns to watch (relative to supabase/) -on_change = "" # Custom command to run on change (overrides internal diff) -types = "" # Path for TypeScript types (empty = disabled) -debounce = 500 # Milliseconds to wait before triggering (default: 500) -sync_on_start = true # Apply schema on startup (default: true) - -[dev.functions] -# Edge functions workflow (future) -enabled = true -watch = ["functions/**/*.ts"] -# ... function-specific options +// Pull flow - pulls everything: schema, functions, storage config, auth config +func RunPullFlow(ctx context.Context, fsys afero.Fs) error ``` -### Configuration Options +### Phase 4: Modify Dev Command -| Option | Type | Default | Description | -|--------|------|---------|-------------| -| `enabled` | `bool` | `true` | Enable/disable this workflow | -| `watch` | `string[]` | `["schemas/**/*.sql"]` | Glob patterns for files to watch (relative to `supabase/` directory) | -| `on_change` | `string` | `""` (empty = use internal differ) | Custom command to run when files change | -| `types` | `string` | `""` (empty = disabled) | Output path for TypeScript types | -| `debounce` | `int` | `500` | Milliseconds to wait after file change before triggering | -| `sync_on_start` | `bool` | `true` | Whether to apply schema changes on startup | +**Modify: `internal/dev/dev.go`** -**Important:** All `watch` paths are relative to the `supabase/` directory, not the project root. - -### How It Works - -``` -File change detected - ↓ - Debounce (500ms) - ↓ - on_change set? ─── Yes ──→ Run custom command - │ ↓ - No types set? ─── Yes ──→ Generate types - ↓ │ - Internal differ No - (pg-delta) ↓ - ↓ Done - Apply to local DB - ↓ - types set? ─── Yes ──→ Generate types - │ - No - ↓ - Done -``` +Update `Run()` to orchestrate onboarding before starting dev session: -### Typical Workflows +```go +func Run(ctx context.Context, fsys afero.Fs, opts RunOptions) error { + // 1. Detect current state + state := onboarding.DetectState(ctx, fsys) -#### 1. Supabase Users (Default) + // 2. Init if needed + if !state.ConfigExists && opts.Interactive { + onboarding.RunInitFlow(ctx, fsys, true) + } -Users who write SQL directly in `supabase/schemas/`. + // 3. Offer to link ONLY after fresh init (not on every run) + if !state.ProjectLinked && opts.Interactive && justInitialized { + if choice == LinkChoiceYes { + onboarding.RunLinkFlow(ctx, fsys) + // 4. Pull everything from remote after linking + onboarding.RunPullFlow(ctx, fsys) // pulls schema, functions, storage, auth + } + } -**Config** (default, no config needed): -```toml -# No config needed - defaults work out of the box + // 5. Existing dev flow: ensure DB running, start session + ensureDbRunning(ctx, fsys) + return session.Run() +} ``` -**Workflow**: -``` -1. Edit supabase/schemas/tables.sql -2. Save file -3. CLI validates SQL syntax (pg_query_go) -4. CLI diffs schema files vs local DB (pg-delta) -5. CLI applies changes directly to local DB -6. (Optional) Types generated if configured -7. When ready: `supabase db diff -f migration_name` -``` +**Modify: `cmd/dev.go`** -**With TypeScript types**: -```toml -[dev.schemas] -types = "src/types/database.ts" +Add new flag: +```go +devFlags.BoolVar(&skipOnboarding, "skip-onboarding", false, "Skip interactive setup wizard") ``` ---- +### Phase 5: Handle Conflicts -#### 2. Drizzle Users +**New file: `internal/dev/onboarding/conflict.go`** -Users who define schemas in TypeScript using Drizzle ORM. +When linking to a project that has existing local migrations: -**Config**: -```toml -[dev.schemas] -watch = ["../src/db/schema/**/*.ts"] # Use ../ to reach project root from supabase/ -on_change = "npx drizzle-kit push" -sync_on_start = false # Drizzle manages its own state -``` +```go +type ConflictAction int +const ( + ConflictMerge // Pull remote, keep local migrations + ConflictReplace // Replace local with remote + ConflictKeepLocal // Skip pull, keep local +) -**Workflow**: +func PromptConflictResolution(ctx context.Context) (ConflictAction, error) ``` -1. Edit src/db/schema/users.ts (Drizzle schema) -2. Save file -3. CLI detects change, runs `npx drizzle-kit push` -4. Drizzle pushes changes directly to local DB -5. When ready: `npx drizzle-kit generate` for migrations -``` - -**Note**: Drizzle users typically use `drizzle-kit push` for dev and `drizzle-kit generate` for migrations. The CLI just watches and triggers their existing workflow. --- -#### 3. Prisma Users +## User Experience Flow -Users who define schemas using Prisma ORM. - -**Config**: -```toml -[dev.schemas] -watch = ["../prisma/schema.prisma"] # Use ../ to reach project root from supabase/ -on_change = "npx prisma db push --skip-generate" -sync_on_start = false # Prisma manages its own state ``` +$ supabase dev -**Workflow**: +┌─────────────────────────────────────────────────────────┐ +│ No Supabase project found. Let's set one up! │ +│ │ +│ [Creating supabase/config.toml...] │ +│ [Creating supabase/.gitignore...] │ +│ │ +│ Generate VS Code settings for Deno? [Y/n] │ +└─────────────────────────────────────────────────────────┘ + +┌─────────────────────────────────────────────────────────┐ +│ Do you have a remote Supabase project to connect? │ +│ │ +│ > Yes, link to existing project │ +│ No, I'm starting fresh │ +└─────────────────────────────────────────────────────────┘ + +┌─────────────────────────────────────────────────────────┐ +│ Select a project to link: │ +│ │ +│ > my-app (org: acme, region: us-east-1) │ +│ other-project (org: acme, region: eu-west-1) │ +└─────────────────────────────────────────────────────────┘ + +┌─────────────────────────────────────────────────────────┐ +│ Pulling from remote project... │ +│ │ +│ ✓ Schema pulled │ +│ Created: supabase/migrations/20240115_remote_schema.sql│ +│ │ +│ ✓ Edge Functions pulled (3 found) │ +│ Created: supabase/functions/hello-world/index.ts │ +│ Created: supabase/functions/auth-hook/index.ts │ +│ Created: supabase/functions/stripe-webhook/index.ts │ +│ │ +│ ✓ Storage config synced (2 buckets) │ +│ ✓ Auth config synced │ +└─────────────────────────────────────────────────────────┘ + +┌─────────────────────────────────────────────────────────┐ +│ Starting local database... │ +│ Starting Supabase services... │ +│ │ +│ ✓ Local development environment ready! │ +│ │ +│ API URL: http://127.0.0.1:54321 │ +│ Studio: http://127.0.0.1:54323 │ +│ │ +│ Watching for changes... │ +└─────────────────────────────────────────────────────────┘ ``` -1. Edit prisma/schema.prisma -2. Save file -3. CLI detects change, runs `npx prisma db push --skip-generate` -4. Prisma pushes changes directly to local DB -5. When ready: `npx prisma migrate dev` for migrations -``` - -**Note**: `--skip-generate` avoids regenerating the Prisma client on every save. Users can run `npx prisma generate` separately when needed. --- -#### 4. External Watch Mode (ORM handles everything) - -Users who prefer their ORM's built-in watch mode and don't need Supabase CLI to watch schemas at all. +## Files to Create/Modify -**Config**: -```toml -[dev.schemas] -enabled = false # Disable schema workflow entirely -``` - -**Workflow**: -``` -1. Run `supabase dev` (starts DB, but no schema watching) -2. In another terminal: run ORM's watch mode (e.g., `prisma studio`, custom watcher) -3. ORM handles schema changes and can call `supabase gen types typescript` if needed -4. When ready: use ORM's migration tooling -``` - -**Use cases**: -- Prisma users who prefer `prisma studio` or a custom dev script -- Teams with existing watch tooling they don't want to replace -- Users who only want `supabase dev` for edge functions workflow (future) +### New Files +| File | Purpose | +|------|---------| +| `internal/dev/onboarding/onboarding.go` | Main orchestration and State type | +| `internal/dev/onboarding/detect.go` | State detection functions | +| `internal/dev/onboarding/flows.go` | Init, Link, Pull flow wrappers | +| `internal/dev/onboarding/conflict.go` | Conflict resolution prompts | -**Note**: Even with `enabled = false`, users still benefit from `supabase dev` for: -- Automatic database startup -- Future workflows like edge functions (`[dev.functions]`) -- Unified dev experience across Supabase services +### Modified Files +| File | Changes | +|------|---------| +| `internal/dev/dev.go` | Add onboarding call at start of Run() | +| `cmd/dev.go` | Add `--skip-onboarding` flag | --- -### TypeScript Type Generation +## Pull Scope -Type generation is **independent** of the schema sync method. It runs after changes are applied to the database, regardless of whether the internal differ or a custom `on_change` command was used. +When pulling from a linked remote project, pull **everything available**: -**Supported generators** (future): -- `supabase gen types typescript` (built-in) -- Custom command via config +**Always pulled:** +- Schema/migrations (via `db pull`) -**Config**: -```toml -[dev.schemas] -types = "src/types/database.ts" -# or for custom generator: -# types_command = "npx prisma generate" -``` +**Pulled if found on remote (with progress indication):** +- Edge Functions (via `functions download`) +- Storage bucket configurations (write to config.toml) +- Auth provider configurations (write to config.toml) -**When types are generated**: -1. After successful schema application (internal or external) -2. Only if `types` path is configured -3. Uses `supabase gen types typescript --local > ` +This creates a complete local replica of the remote project configuration. --- -### DX Improvements - -#### 1. Clear Status Feedback - -The CLI provides clear, structured output during the dev session: - -``` -[dev] Watching schemas/**/*.sql -[dev] On change: (internal differ) -[dev] Status: Applying changes... -[dev] ✓ Schema applied successfully -[dev] Status: Watching for changes... -``` - -#### 2. Validation on Startup +## Non-Interactive Mode -Before starting the watch loop, the CLI validates: -- Watch patterns are valid glob syntax -- `on_change` command exists (if configured) - warns if not found in PATH -- `types` output directory exists - warns if parent directory missing -- Watch directories exist - creates `supabase/schemas/` if using default pattern +For CI/CD and scripts: -#### 3. Dynamic Directory Watching +```bash +# Fails fast if not initialized +supabase dev --skip-onboarding -When a new subdirectory is created within a watched path, it's automatically added to the watcher. This handles cases like: -``` -supabase/schemas/ -├── tables.sql -└── new-module/ # Created while dev is running - └── models.sql # Automatically watched +# Or use environment variables +SUPABASE_PROJECT_ID=xyz supabase dev --skip-onboarding ``` -#### 4. Configurable Debounce - -The `debounce` option allows tuning the delay between file save and action trigger: -- **Lower values (100-300ms)**: Faster feedback, but may trigger on incomplete saves -- **Default (500ms)**: Good balance for most editors -- **Higher values (1000ms+)**: For slower machines or complex operations - -#### 5. Skip Initial Sync - -The `sync_on_start` option controls whether to apply schema on startup: -- **`true` (default)**: Ensures local DB matches schema files immediately -- **`false`**: Useful when using `on_change` with an ORM that's already in sync - --- -### Why This Design? +## Error Handling -1. **Backwards compatible** - No config needed for default Supabase workflow -2. **Tool agnostic** - Works with any ORM/tool that has a CLI -3. **Composable** - Type generation works with any schema tool -4. **Extensible** - Easy to add new workflows (`[dev.functions]`, `[dev.seed]`, etc.) +1. **No config + non-interactive**: Clear error with suggestion to run `supabase init` +2. **Docker not running**: Detected in `ensureDbRunning`, suggest starting Docker +3. **API errors during link**: Show error, allow retry or continue without linking +4. **Pull failures**: Log warning, continue to dev mode (partial setup > complete failure) +5. **User cancellation (Ctrl+C)**: Graceful exit, can resume on next `supabase dev` --- -## Open Question: "Valid but Incomplete" Schema Problem - -### The Problem +## Verification Plan -Current validation only checks SQL syntax. But a statement can be **valid yet incomplete**: - -```sql --- Step 1: User saves this (valid SQL!) -CREATE TABLE users (id uuid PRIMARY KEY); - --- Step 2: User continues typing (also valid, but different!) -CREATE TABLE users (id uuid PRIMARY KEY, name text, email text); -``` - -If we diff after step 1, we create a table with 1 column. Then we have to ALTER to add columns. This creates: -- Unnecessary churn (multiple diffs for one logical change) -- Potential issues with constraints, foreign keys -- Confusing diff output - -### Replit's Approach (Reference) - -[Replit's automated migrations](https://blog.replit.com/production-databases-automated-migrations) takes a different approach: -- **Don't diff during development** - Let developers make any changes freely -- **Diff at deploy time** - Generate migration only when deploying to production -- **Minimal intervention** - Users shouldn't think about migrations during dev - -This works well for AI agents but may lose the "immediate feedback" benefit for human developers. - -### Proposed Solutions - -#### Option A: Explicit Sync Command -```toml -[dev.schemas] -auto_apply = false # New option, default: true -``` -- Changes are validated but NOT auto-applied -- User runs `supabase db sync` when ready -- **Pro**: User is always in control -- **Con**: Loses reactive feel - -#### Option B: Preview Mode with Confirmation -``` -[dev] Change detected: users.sql -[dev] Will apply: - CREATE TABLE users (id uuid PRIMARY KEY); -[dev] Press Enter to apply, or keep editing... -``` -- Show diff preview, wait for confirmation (Enter) or timeout -- **Pro**: Immediate feedback + user control -- **Con**: Requires interaction - -#### Option C: Smart Incompleteness Detection -- Detect "likely incomplete" patterns: - - Empty tables (0 columns) - - Tables with only PK - - Functions with empty bodies -- Warn but don't auto-apply for these cases -- **Pro**: Catches common cases automatically -- **Con**: Can't catch all cases - -#### Option D: Adaptive Debounce -- Short debounce (500ms) for small edits -- Longer debounce (2-3s) when: - - File was just created - - Major structural changes detected - - Rapid consecutive saves -- **Pro**: Automatic, no config needed -- **Con**: Feels inconsistent - -#### Option E: Hybrid (Recommended) - -Combine the best of all approaches: - -1. **Default behavior**: Auto-apply with 500ms debounce (current) -2. **New config option**: `auto_apply = false` for manual control -3. **Smart warnings**: Detect potentially incomplete schemas, show warning but apply -4. **Explicit command**: `supabase db sync` for manual trigger when `auto_apply = false` - -```toml -[dev.schemas] -auto_apply = true # Default: auto-apply on save -# auto_apply = false # Alternative: preview only, use `supabase db sync` to apply -``` - -### Recommendation - -**Start with current behavior (auto-apply)** but add: -1. `auto_apply = false` option for users who want explicit control -2. Smart warnings for "likely incomplete" schemas (empty tables, etc.) -3. `supabase db sync` command for manual application - -This gives users a choice: -- **Rapid prototyping**: `auto_apply = true` (default) - accept some churn for speed -- **Careful development**: `auto_apply = false` - diff on demand only +1. **Fresh directory test**: Run `supabase dev` in empty directory, verify full onboarding flow +2. **Existing project test**: Run `supabase dev` in initialized project, verify it skips init +3. **Linked project test**: Run `supabase dev` in linked project, verify it skips link prompt +4. **Non-interactive test**: Run `supabase dev --skip-onboarding` without config, verify error +5. **Cancellation test**: Cancel at each step, verify can resume +6. **Conflict test**: Have local migrations, link to project with different schema, verify conflict prompt --- -## Performance Optimization: Persistent Shadow Database - -### Problem - -Currently, each diff cycle takes ~15s: -- Shadow DB container creation: ~11s (Docker overhead) -- Migration application: ~3s (same migrations every time) -- Schema application + diff: ~500ms - -This is too slow for a reactive dev experience. - -### Solution: Persistent Shadow with Template Database +## Design Decisions -Keep the shadow container running and use PostgreSQL's `CREATE DATABASE ... TEMPLATE` for fast resets. +1. **Link prompt only after init**: The "do you want to link?" prompt appears only immediately after a fresh init, not on every dev run. If user skips, they must run `supabase link` manually. -#### Architecture - -``` -First run (cold start ~14s): - 1. Start persistent shadow container - 2. Apply all migrations → creates baseline state - 3. Snapshot baseline roles: SELECT rolname FROM pg_roles - 4. CREATE DATABASE shadow_template AS TEMPLATE - 5. Apply declared schemas to contrib_regression - 6. Diff - -Subsequent runs (fast path ~500ms): - 1. Clean cluster-wide objects (roles not in baseline) - 2. DROP DATABASE contrib_regression - 3. CREATE DATABASE contrib_regression TEMPLATE shadow_template - 4. Apply declared schemas - 5. Diff -``` - -#### Why Template + Role Tracking? - -PostgreSQL template databases only copy **database-scoped objects**: -- Tables, views, functions, triggers ✓ -- Extensions ✓ -- Schemas ✓ - -They do NOT copy **cluster-wide objects**: -- Roles (CREATE ROLE, ALTER ROLE) ✗ -- Role memberships ✗ -- Tablespaces ✗ - -If declared schemas contain `CREATE ROLE`, we must track and clean them explicitly. - -#### Implementation - -```go -// internal/dev/shadow.go - -type ShadowState struct { - ContainerID string - BaselineRoles []string // Roles after migrations, before declared schemas - TemplateReady bool - MigrationsHash string // Invalidate template if migrations change -} - -// EnsureShadowReady prepares the shadow database for diffing -func (s *ShadowState) EnsureShadowReady(ctx context.Context, fsys afero.Fs) error { - // Check if container exists and is healthy - if !s.isContainerHealthy(ctx) { - // Cold start: create container, apply migrations, create template - return s.coldStart(ctx, fsys) - } - - // Check if migrations changed (invalidates template) - currentHash := s.hashMigrations(fsys) - if currentHash != s.MigrationsHash { - return s.rebuildTemplate(ctx, fsys) - } - - // Fast path: reset from template - return s.resetFromTemplate(ctx) -} - -// resetFromTemplate quickly resets the database state -func (s *ShadowState) resetFromTemplate(ctx context.Context) error { - conn := s.connectToShadow(ctx) - defer conn.Close() - - // 1. Clean cluster-wide objects created by declared schemas - currentRoles := s.queryRoles(ctx, conn) - for _, role := range currentRoles { - if !slices.Contains(s.BaselineRoles, role) { - conn.Exec(ctx, fmt.Sprintf("DROP ROLE IF EXISTS %q", role)) - } - } - - // 2. Reset database from template - conn.Exec(ctx, "DROP DATABASE IF EXISTS contrib_regression") - conn.Exec(ctx, "CREATE DATABASE contrib_regression TEMPLATE shadow_template") - - return nil -} - -// coldStart creates container and builds initial template -func (s *ShadowState) coldStart(ctx context.Context, fsys afero.Fs) error { - // 1. Create and start shadow container - s.ContainerID = createShadowContainer(ctx) - waitForHealthy(ctx, s.ContainerID) - - // 2. Apply migrations - applyMigrations(ctx, s.ContainerID, fsys) - - // 3. Snapshot baseline roles - s.BaselineRoles = s.queryRoles(ctx, conn) - - // 4. Create template from current state - conn.Exec(ctx, "CREATE DATABASE shadow_template TEMPLATE contrib_regression") - s.TemplateReady = true - s.MigrationsHash = s.hashMigrations(fsys) - - return nil -} -``` - -#### Migration Hash Strategy - -Invalidate the template when migrations change: - -```go -func (s *ShadowState) hashMigrations(fsys afero.Fs) string { - h := sha256.New() - - // Walk migrations directory in sorted order - files, _ := afero.ReadDir(fsys, "supabase/migrations") - for _, f := range files { - content, _ := afero.ReadFile(fsys, filepath.Join("supabase/migrations", f.Name())) - h.Write([]byte(f.Name())) - h.Write(content) - } - - return hex.EncodeToString(h.Sum(nil)) -} -``` - -#### Container Lifecycle - -The shadow container is managed separately from the main `supabase start` containers: - -| Event | Action | -|-------|--------| -| `supabase dev` starts | Start shadow if not running | -| `supabase dev` file change | Reuse existing shadow | -| `supabase dev` exits | Keep shadow running (for next session) | -| `supabase stop` | Stop shadow container | -| Migrations change | Rebuild template (keep container) | - -#### Expected Performance - -| Scenario | Time | -|----------|------| -| First run (cold) | ~14s | -| Subsequent runs (warm) | ~500ms | -| After migration change | ~3s (rebuild template) | - -#### Files to Create/Modify - -| File | Purpose | -|------|---------| -| `internal/dev/shadow.go` | New - Shadow state management | -| `internal/dev/differ.go` | Modify - Use ShadowState instead of creating new container | -| `internal/stop/stop.go` | Modify - Stop shadow container on `supabase stop` | +2. **Push stays simple**: `supabase push` will NOT have onboarding. If not linked, it fails with a clear message to run `supabase link` first. This keeps push predictable for CI/CD. --- -## Future Workflows (Out of Scope for Now) +## Future Enhancements (Out of Scope) -The dev command architecture supports adding more watchers later: -- **Edge functions** (`[dev.functions]`) - Watch and hot-reload edge functions -- **Seed data** (`[dev.seed]`) - Auto-apply seed files on change -- **Type generation** - Already supported via `types` option +1. **Project creation**: Offer to create a new remote project if user doesn't have one +2. **Template selection**: Offer starter templates during init +3. **Selective pull**: Let user choose what to pull via checkboxes diff --git a/PULL_REQUEST.md b/PULL_REQUEST.md index 942940267..630901914 100644 --- a/PULL_REQUEST.md +++ b/PULL_REQUEST.md @@ -84,9 +84,11 @@ types = "src/types/database.ts" # Auto-generate TypeScript types ``` **Supported workflows:** -- **Supabase native** (default): SQL files in `supabase/schemas/` -- **Drizzle ORM**: `on_change = "npx drizzle-kit push"` -- **Prisma ORM**: `on_change = "npx prisma db push --skip-generate"` +- **Supabase native** (default): SQL files in `supabase/schemas/`, uses internal differ +- **Custom tooling**: Any external command via `on_change`: + - Drizzle: `on_change = "npx drizzle-kit push"` + - Prisma: `on_change = "npx prisma db push --skip-generate"` + - Or any custom script/command - **Disabled**: `enabled = false` for users with their own watch tooling ### 6. Automatic Seeding @@ -199,14 +201,166 @@ DEBUG=supabase:dev:sql # SQL statements being executed └─────────────────────────────────────────────────────────────────┘ ``` -### Pipeline Flow +### Workflow Diagrams +#### Dev Session Lifecycle + +```mermaid +flowchart TD + Start([supabase dev]) --> CheckDB{Database running?} + CheckDB -->|No| StartDB[Start local database] + CheckDB -->|Yes| UseExisting[Use existing database] + StartDB --> InitSchema + UseExisting --> InitSchema + + InitSchema[Initial schema sync] --> InitSeed{Seed enabled?} + InitSeed -->|Yes| RunSeed[Run initial seed] + InitSeed -->|No| Watch + RunSeed --> Watch + + Watch[Watch for file changes] --> Event{Event type?} + + Event -->|Schema file| SchemaFlow[Schema workflow] + Event -->|Seed file| SeedFlow[Seed workflow] + Event -->|Migration file| MigrationFlow[Invalidate shadow template] + Event -->|Ctrl+C| Shutdown + + SchemaFlow --> Watch + SeedFlow --> Watch + MigrationFlow --> Watch + + Shutdown{Dirty state?} -->|Yes| WarnDirty[Warn: uncommitted changes] + Shutdown -->|No| Cleanup + WarnDirty --> Cleanup + Cleanup[Cleanup shadow container] --> End([Exit]) +``` + +#### Schema Workflow + +```mermaid +flowchart TD + FileChange([Schema file changed]) --> Debounce[Debounce 500ms] + Debounce --> CheckEnabled{schemas.enabled?} + + CheckEnabled -->|No| Skip([Skip]) + CheckEnabled -->|Yes| CheckOnChange{on_change set?} + + %% Custom command path + CheckOnChange -->|Yes| RunCustom[Run custom command] + RunCustom --> CustomResult{Success?} + CustomResult -->|Yes| MarkDirty[Mark session dirty] + CustomResult -->|No| ShowError1[Show error] + ShowError1 --> Done + + %% Internal differ path + CheckOnChange -->|No| LoadFiles[Load all schema files] + LoadFiles --> Validate[Validate SQL syntax] + + Validate --> ValidResult{Valid?} + ValidResult -->|No| ShowSyntaxError[Show syntax error with location] + ShowSyntaxError --> Done([Wait for next change]) + + ValidResult -->|Yes| PrepareShadow[Prepare shadow DB] + PrepareShadow --> ApplyShadow[Apply schemas to shadow] + ApplyShadow --> Diff[Diff local vs shadow] + + Diff --> HasChanges{Changes detected?} + HasChanges -->|No| NoChanges[No schema changes] + NoChanges --> Done + + HasChanges -->|Yes| CheckDrops{DROP statements?} + CheckDrops -->|Yes| WarnDrops[Show DROP warning] + CheckDrops -->|No| Apply + WarnDrops --> Apply + + Apply[Apply changes to local DB] --> MarkDirty + MarkDirty --> GenTypes{types configured?} + GenTypes -->|Yes| GenerateTypes[Generate TypeScript types] + GenTypes -->|No| Done + GenerateTypes --> Done + + style RunCustom fill:#e1f5fe + style Diff fill:#fff3e0 + style Apply fill:#e8f5e9 +``` + +#### Seed Workflow + +```mermaid +flowchart TD + Trigger([Seed triggered]) --> Source{Trigger source?} + + Source -->|Startup| AfterSchema[After initial schema sync] + Source -->|File change| FileChange[Seed file modified] + + AfterSchema --> CheckEnabled + FileChange --> CheckEnabled{seed.enabled?} + + CheckEnabled -->|No| Skip([Skip]) + CheckEnabled -->|Yes| CheckOnChange{on_change set?} + + %% Custom command path + CheckOnChange -->|Yes| RunCustom[Run custom command] + RunCustom --> CustomResult{Success?} + CustomResult -->|Yes| Done([Done]) + CustomResult -->|No| ShowError[Show error, continue watching] + ShowError --> Done + + %% Internal seed path + CheckOnChange -->|No| CheckDbSeed{db.seed.enabled?} + CheckDbSeed -->|No| NoSeed[No seed config] + NoSeed --> Done + + CheckDbSeed -->|Yes| LoadPaths[Load seed file paths] + LoadPaths --> HasFiles{Files found?} + HasFiles -->|No| NoFiles[No seed files found] + NoFiles --> Done + + HasFiles -->|Yes| ForEach[For each seed file] + ForEach --> ParseSQL[Parse SQL statements] + ParseSQL --> Execute[Execute all statements] + Execute --> UpdateHash[Update hash in seed_files table] + UpdateHash --> MoreFiles{More files?} + MoreFiles -->|Yes| ForEach + MoreFiles -->|No| Done + + style RunCustom fill:#e1f5fe + style Execute fill:#e8f5e9 ``` -File save → Debounce (500ms) → Validate ALL *.sql → Diff → Apply - │ - ↓ (if invalid) - Show error with location - Wait for next save... + +#### Shadow Database: Cold Start (~14s) + +```mermaid +flowchart TD + Start([Diff requested]) --> CheckContainer{Shadow container exists?} + CheckContainer -->|Yes| FastPath([Use fast path]) + CheckContainer -->|No| CreateContainer[Create shadow container] + CreateContainer --> ApplyMigrations[Apply all migrations] + ApplyMigrations --> SnapshotRoles[Snapshot baseline roles] + SnapshotRoles --> CreateTemplate[CREATE DATABASE shadow_template] + CreateTemplate --> Ready([Template ready]) + + style CreateContainer fill:#fff3e0 + style ApplyMigrations fill:#fff3e0 + style CreateTemplate fill:#fff3e0 +``` + +#### Shadow Database: Fast Path (~10ms) + +```mermaid +flowchart TD + Start([Diff requested]) --> CleanRoles[Clean non-baseline roles] + CleanRoles --> DropDB[DROP DATABASE contrib_regression] + DropDB --> CloneTemplate[CREATE DATABASE ... TEMPLATE shadow_template] + CloneTemplate --> ApplySchemas[Apply declared schemas] + ApplySchemas --> RunDiff[Run pg-delta diff] + RunDiff --> Done([Diff complete]) + + style CleanRoles fill:#e8f5e9 + style DropDB fill:#e8f5e9 + style CloneTemplate fill:#e8f5e9 + style ApplySchemas fill:#e8f5e9 + style RunDiff fill:#e8f5e9 ``` ## Performance Optimization: Persistent Shadow Database @@ -271,11 +425,20 @@ PostgreSQL template databases only copy **database-scoped objects** (tables, vie ## CLI Usage ```bash -# Start dev mode (starts database if not running) supabase dev +``` + +**Workflows** (configured via `config.toml`): +- `schemas` - Watch schema files, auto-apply to local database +- `seed` - Run seeds on startup and when seed files change +- `functions` - (coming soon) Watch and auto-deploy edge functions + +**Flags**: None currently. All configuration is done via `config.toml`. -# With debug logging -DEBUG=supabase:dev:* supabase dev +**Debug logging** (via environment variable): +```bash +DEBUG=supabase:dev:* supabase dev # All dev logs +DEBUG=supabase:dev:timing supabase dev # Timing information only ``` ## Configuration Examples @@ -423,10 +586,53 @@ The `[dev.functions]` config structure is already in place. Future work includes - Auto-deploy to local edge runtime - Unified dev experience for schema + functions -### 4. Additional Workflows +### 4. Interactive Setup Wizard + +On first run of `supabase dev`, offer an interactive setup flow to configure the dev workflow: + +``` +$ supabase dev + +Welcome to Supabase Dev Mode! Let's configure your workflow. + +? Are you using supabase-js in your project? (Y/n) Y +? Generate TypeScript types automatically? (Y/n) Y +? Where should types be saved? src/types/database.ts + +? How do you manage your database schema? + > Supabase SQL files (supabase/schemas/*.sql) + Drizzle ORM + Prisma ORM + Other / I'll configure manually + +Configuration saved to config.toml: + [dev.schemas] + types = "src/types/database.ts" + +Starting dev mode... +``` + +This would: +- Detect existing project setup (package.json for supabase-js, prisma/schema.prisma, drizzle config) +- Pre-fill sensible defaults based on detection +- Write configuration to `config.toml` +- Only run on first invocation (or with `--setup` flag) + +## Platform Enhancement Ideas + +Beyond the `dev` command, this work highlighted a broader DX improvement opportunity: + +### Lazy Service Startup with Proxy + +Currently, `supabase start` spins up all services (postgres, auth, storage, realtime, etc.), which can take significant time. A more efficient approach: + +1. **Start only postgres initially** - The database is the core dependency +2. **Add a lightweight proxy** (Kong or similar) in front of other services +3. **Lazy-start services on first request** - Proxy holds the request, starts the container, then forwards + +This would dramatically reduce cold start time for users who only need the database (common during schema development). Services like Auth, Storage, and Realtime would start on-demand when actually accessed. -- **Lazy service startup** - start only database immediately, other services on-demand -- **`auto_apply = false`** - preview mode requiring explicit sync command +This is a platform-wide architectural change, not specific to the `dev` command. ## Breaking Changes diff --git a/SEED.md b/SEED.md new file mode 100644 index 000000000..c833e2bc8 --- /dev/null +++ b/SEED.md @@ -0,0 +1,155 @@ +# Seed Behavior in `supabase dev` + +**Status: FIXED** - See `internal/dev/seed.go` for implementation. + +## How Seeding Works in Dev Mode + +When you run `supabase dev`, seeds are executed: +1. **On startup** - After initial schema sync +2. **On seed file change** - When any file matching `[db.seed].sql_paths` is modified + +## How Database State is Handled Before Reseeding + +**Important:** The dev command does NOT automatically truncate or erase database state before reseeding. The seed file itself is responsible for managing existing data. + +### Recommended Patterns + +#### Pattern 1: TRUNCATE at the start (recommended for dev) + +```sql +-- seed.sql +TRUNCATE public.users, public.posts RESTART IDENTITY CASCADE; + +INSERT INTO public.users (id, email, name) VALUES + ('11111111-1111-1111-1111-111111111111', 'alice@example.com', 'Alice'), + ('22222222-2222-2222-2222-222222222222', 'bob@example.com', 'Bob'); + +INSERT INTO public.posts (user_id, title) VALUES + ('11111111-1111-1111-1111-111111111111', 'Hello World'), + ('22222222-2222-2222-2222-222222222222', 'My First Post'); +``` + +This is the simplest approach for development - it clears all data and re-inserts fresh. + +#### Pattern 2: Upsert (INSERT ... ON CONFLICT) + +```sql +-- seed.sql +INSERT INTO public.users (id, email, name) VALUES + ('11111111-1111-1111-1111-111111111111', 'alice@example.com', 'Alice'), + ('22222222-2222-2222-2222-222222222222', 'bob@example.com', 'Bob') +ON CONFLICT (id) DO UPDATE SET + email = EXCLUDED.email, + name = EXCLUDED.name; +``` + +This approach updates existing rows and inserts new ones, preserving any additional data. + +#### Pattern 3: Delete then Insert + +```sql +-- seed.sql +DELETE FROM public.posts WHERE user_id IN ( + '11111111-1111-1111-1111-111111111111', + '22222222-2222-2222-2222-222222222222' +); +DELETE FROM public.users WHERE id IN ( + '11111111-1111-1111-1111-111111111111', + '22222222-2222-2222-2222-222222222222' +); + +INSERT INTO public.users (id, email, name) VALUES + ('11111111-1111-1111-1111-111111111111', 'alice@example.com', 'Alice'), + ('22222222-2222-2222-2222-222222222222', 'bob@example.com', 'Bob'); +``` + +This approach is more surgical - only removes specific seed data before re-inserting. + +### Why We Don't Auto-Truncate + +1. **User data preservation** - You might have manually added data you want to keep +2. **Flexibility** - Different projects need different strategies +3. **Explicit is better** - The seed file clearly shows what will happen +4. **Production safety** - Same seed files can be used in different contexts + +--- + +## Original Investigation: Why Seeds Weren't Re-Applying + +### Problem + +When editing `seed.sql` during `supabase dev`, the file change was detected but the seed data was not actually re-applied. The output showed: + +``` +[dev] Seed file change detected: supabase/seed.sql +[dev] Reseeding database... +Updating seed hash to supabase/seed.sql... +[dev] Reseed complete +``` + +Notice "Updating seed hash" instead of "Seeding data from" - this was the key symptom. + +### Root Cause + +The issue was in `pkg/migration/file.go`: + +```go +func (m *SeedFile) ExecBatchWithCache(ctx context.Context, conn *pgx.Conn, fsys fs.FS) error { + lines, err := parseFile(m.Path, fsys) + // ... + batch := pgx.Batch{} + if !m.Dirty { // <-- KEY LINE + for _, line := range lines { + batch.Queue(line) // SQL only queued if NOT dirty + } + } + batch.Queue(UPSERT_SEED_FILE, m.Path, m.Hash) // Hash always updated + // ... +} +``` + +**When `Dirty` is `true` (file was modified), the SQL statements were NOT executed - only the hash was updated.** + +### Why This Design Exists + +This behavior is intentional for `supabase db push` and `supabase start`: + +- When you pull a project that was already seeded on another machine, you don't want to re-run seeds +- You just want to mark them as "known" by updating the hash +- This prevents duplicate data or conflicts + +### The Fix + +For `supabase dev`, we bypass `GetPendingSeeds` entirely and always execute seed SQL: + +```go +// internal/dev/seed.go + +// executeSeedForDev always executes the seed SQL and updates the hash. +// This differs from SeedFile.ExecBatchWithCache which skips SQL execution for "dirty" seeds. +func executeSeedForDev(ctx context.Context, conn *pgx.Conn, seed *migration.SeedFile, fsys afero.Fs) error { + f, err := fsys.Open(seed.Path) + // ... + lines, err := parser.SplitAndTrim(f) + // ... + + // Build batch: all SQL statements + hash update + batch := pgx.Batch{} + for _, line := range lines { + batch.Queue(line) // Always execute SQL + } + batch.Queue(migration.UPSERT_SEED_FILE, seed.Path, seed.Hash) + + return conn.SendBatch(ctx, &batch).Close() +} +``` + +This keeps the existing `pkg/migration` code unchanged for `db push` and `start`, while giving dev mode the "always re-seed" behavior users expect. + +## Behavior Comparison + +| Command | Seed Behavior | +|---------|---------------| +| `supabase start` | Runs seeds once, skips if already applied (hash matches) | +| `supabase db push` | Runs seeds once, skips if already applied (hash matches) | +| `supabase dev` | Always re-runs seeds on file change | diff --git a/cmd/db.go b/cmd/db.go index 94af92908..9b419213b 100644 --- a/cmd/db.go +++ b/cmd/db.go @@ -137,16 +137,17 @@ var ( }, } - dryRun bool - includeAll bool - includeRoles bool - includeSeed bool + dryRun bool + includeAll bool + includeRoles bool + includeSeed bool + skipDriftCheck bool dbPushCmd = &cobra.Command{ Use: "push", Short: "Push new migrations to the remote database", RunE: func(cmd *cobra.Command, args []string) error { - return push.Run(cmd.Context(), dryRun, includeAll, includeRoles, includeSeed, flags.DbConfig, afero.NewOsFs()) + return push.Run(cmd.Context(), dryRun, includeAll, includeRoles, includeSeed, skipDriftCheck, flags.DbConfig, afero.NewOsFs()) }, } @@ -289,6 +290,7 @@ func init() { pushFlags.BoolVar(&includeRoles, "include-roles", false, "Include custom roles from "+utils.CustomRolesPath+".") pushFlags.BoolVar(&includeSeed, "include-seed", false, "Include seed data from your config.") pushFlags.BoolVar(&dryRun, "dry-run", false, "Print the migrations that would be applied, but don't actually apply them.") + pushFlags.BoolVar(&skipDriftCheck, "skip-drift-check", false, "Skip checking for uncommitted local schema changes.") pushFlags.String("db-url", "", "Pushes to the database specified by the connection string (must be percent-encoded).") pushFlags.Bool("linked", true, "Pushes to the linked project.") pushFlags.Bool("local", false, "Pushes to the local database.") diff --git a/cmd/dev.go b/cmd/dev.go index f700e3fba..732ca65c6 100644 --- a/cmd/dev.go +++ b/cmd/dev.go @@ -1,11 +1,16 @@ package cmd import ( + "os" + "github.com/spf13/afero" "github.com/spf13/cobra" "github.com/supabase/cli/internal/dev" + "golang.org/x/term" ) +var skipOnboarding bool + var devCmd = &cobra.Command{ GroupID: groupLocalDev, Use: "dev", @@ -13,6 +18,14 @@ var devCmd = &cobra.Command{ Long: `Start a development session that watches for file changes and automatically applies them to your local environment. +If no Supabase project exists, dev will guide you through setup: + 1. Initialize config.toml if missing + 2. Optionally link to a remote Supabase project + 3. Pull schema/functions from remote if linked + 4. Start local development environment + +Use --skip-onboarding to bypass the setup wizard. + WORKFLOWS: schemas Watch schema files and auto-apply changes to local database @@ -48,10 +61,15 @@ DEBUG LOGGING: Press Ctrl+C to stop the development session.`, RunE: func(cmd *cobra.Command, args []string) error { - return dev.Run(cmd.Context(), afero.NewOsFs()) + opts := dev.RunOptions{ + SkipOnboarding: skipOnboarding, + Interactive: term.IsTerminal(int(os.Stdin.Fd())), + } + return dev.Run(cmd.Context(), afero.NewOsFs(), opts) }, } func init() { + devCmd.Flags().BoolVar(&skipOnboarding, "skip-onboarding", false, "Skip the interactive setup wizard") rootCmd.AddCommand(devCmd) } diff --git a/internal/bootstrap/bootstrap.go b/internal/bootstrap/bootstrap.go index 8a07e2530..cf0ca71fb 100644 --- a/internal/bootstrap/bootstrap.go +++ b/internal/bootstrap/bootstrap.go @@ -121,7 +121,8 @@ func Run(ctx context.Context, starter StarterTemplate, fsys afero.Fs, options .. } policy.Reset() if err := backoff.RetryNotify(func() error { - return push.Run(ctx, false, false, true, true, config, fsys) + // Skip drift check during bootstrap since we're setting up a new project + return push.Run(ctx, false, false, true, true, true, config, fsys) }, policy, utils.NewErrorCallback()); err != nil { return err } diff --git a/internal/db/push/drift.go b/internal/db/push/drift.go new file mode 100644 index 000000000..b717fe555 --- /dev/null +++ b/internal/db/push/drift.go @@ -0,0 +1,202 @@ +package push + +import ( + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + tea "github.com/charmbracelet/bubbletea" + "github.com/go-errors/errors" + "github.com/jackc/pgconn" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/diff" + "github.com/supabase/cli/internal/db/start" + "github.com/supabase/cli/internal/migration/new" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/pkg/parser" +) + +// DriftResult contains drift detection outcome +type DriftResult struct { + HasDrift bool + DiffSQL string + Drops []string // DROP statements found +} + +// DriftAction represents user's choice when drift is detected +type DriftAction int + +const ( + DriftActionCreateMigration DriftAction = iota + DriftActionContinue + DriftActionCancel +) + +// https://github.com/djrobstep/migra/blob/master/migra/statements.py#L6 +var dropStatementPattern = regexp.MustCompile(`(?i)drop\s+`) + +// CheckLocalDrift compares local database state against what migrations would produce. +// It creates a shadow database, applies all migrations, then diffs local DB against shadow. +// Returns the diff SQL (empty if no drift). +func CheckLocalDrift(ctx context.Context, fsys afero.Fs) (*DriftResult, error) { + fmt.Fprintln(os.Stderr, "Checking for uncommitted schema changes...") + + // 1. Create shadow database + shadow, err := diff.CreateShadowDatabase(ctx, utils.Config.Db.ShadowPort) + if err != nil { + return nil, errors.Errorf("failed to create shadow database: %w", err) + } + defer utils.DockerRemove(shadow) + + // 2. Wait for shadow to be healthy + if err := start.WaitForHealthyService(ctx, utils.Config.Db.HealthTimeout, shadow); err != nil { + return nil, errors.Errorf("shadow database unhealthy: %w", err) + } + + // 3. Apply migrations to shadow (this is the "expected" state) + if err := diff.MigrateShadowDatabase(ctx, shadow, fsys); err != nil { + return nil, errors.Errorf("failed to migrate shadow: %w", err) + } + + // 4. Configure connections + localConfig := pgconn.Config{ + Host: utils.Config.Hostname, + Port: utils.Config.Db.Port, + User: "postgres", + Password: utils.Config.Db.Password, + Database: "postgres", + } + + shadowConfig := pgconn.Config{ + Host: utils.Config.Hostname, + Port: utils.Config.Db.ShadowPort, + User: "postgres", + Password: utils.Config.Db.Password, + Database: "postgres", + } + + // 5. Diff: shadow (source/expected) vs local (target/actual) + // This gives us SQL to transform shadow -> local + // i.e., the changes that exist in local but not in migrations + diffSQL, err := diff.DiffPgDelta(ctx, shadowConfig, localConfig, nil) + if err != nil { + return nil, errors.Errorf("failed to compute drift: %w", err) + } + + result := &DriftResult{ + HasDrift: strings.TrimSpace(diffSQL) != "", + DiffSQL: diffSQL, + } + + if result.HasDrift { + result.Drops = findDropStatements(diffSQL) + } + + return result, nil +} + +// findDropStatements extracts DROP statements from SQL +func findDropStatements(sql string) []string { + lines, err := parser.SplitAndTrim(strings.NewReader(sql)) + if err != nil { + return nil + } + var drops []string + for _, line := range lines { + if dropStatementPattern.MatchString(line) { + drops = append(drops, line) + } + } + return drops +} + +// FormatDriftWarning formats the warning message for display +func FormatDriftWarning(result *DriftResult) string { + var sb strings.Builder + + sb.WriteString("\n") + sb.WriteString(utils.Yellow("Warning:") + " Local database has uncommitted schema changes!\n\n") + sb.WriteString("The following changes exist in your local database but NOT in your migration files:\n\n") + + // Format the SQL with indentation + for _, line := range strings.Split(strings.TrimSpace(result.DiffSQL), "\n") { + if strings.TrimSpace(line) != "" { + sb.WriteString(" " + line + "\n") + } + } + + sb.WriteString("\n") + sb.WriteString("These changes will NOT be applied to the remote database.\n") + + return sb.String() +} + +// PromptDriftAction asks user what to do about detected drift +func PromptDriftAction(ctx context.Context) (DriftAction, error) { + items := []utils.PromptItem{ + {Summary: "Create a migration with these changes", Index: int(DriftActionCreateMigration)}, + {Summary: "Continue pushing without these changes", Index: int(DriftActionContinue)}, + {Summary: "Cancel", Index: int(DriftActionCancel)}, + } + + choice, err := utils.PromptChoice(ctx, "What would you like to do?", items, tea.WithOutput(os.Stderr)) + if err != nil { + if errors.Is(err, context.Canceled) { + return DriftActionCancel, nil + } + return DriftActionCancel, err + } + + return DriftAction(choice.Index), nil +} + +// CreateMigrationFromDrift creates a migration file using the already-computed SQL. +// Returns the path to the created migration file. +func CreateMigrationFromDrift(ctx context.Context, sql string, fsys afero.Fs) (string, error) { + // Prompt for migration name + console := utils.NewConsole() + name, err := console.PromptText(ctx, "Migration name: ") + if err != nil { + return "", errors.Errorf("failed to read migration name: %w", err) + } + + name = strings.TrimSpace(name) + if name == "" { + return "", errors.New("migration name cannot be empty") + } + + // Sanitize the name (replace spaces with underscores, remove special chars) + name = sanitizeMigrationName(name) + + // Generate the migration path + path := new.GetMigrationPath(utils.GetCurrentTimestamp(), name) + + // Ensure migrations directory exists + if err := utils.MkdirIfNotExistFS(fsys, filepath.Dir(path)); err != nil { + return "", errors.Errorf("failed to create migrations directory: %w", err) + } + + // Write the migration file + if err := utils.WriteFile(path, []byte(sql), fsys); err != nil { + return "", errors.Errorf("failed to write migration file: %w", err) + } + + return path, nil +} + +// sanitizeMigrationName cleans up a migration name for use in a filename +func sanitizeMigrationName(name string) string { + // Replace spaces with underscores + name = strings.ReplaceAll(name, " ", "_") + // Remove any characters that aren't alphanumeric or underscores + var result strings.Builder + for _, r := range name { + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '_' { + result.WriteRune(r) + } + } + return result.String() +} diff --git a/internal/db/push/push.go b/internal/db/push/push.go index 6960702d1..c067ecbad 100644 --- a/internal/db/push/push.go +++ b/internal/db/push/push.go @@ -17,10 +17,56 @@ import ( "github.com/supabase/cli/pkg/vault" ) -func Run(ctx context.Context, dryRun, ignoreVersionMismatch bool, includeRoles, includeSeed bool, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { +func Run(ctx context.Context, dryRun, ignoreVersionMismatch bool, includeRoles, includeSeed, skipDriftCheck bool, config pgconn.Config, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { if dryRun { fmt.Fprintln(os.Stderr, "DRY RUN: migrations will *not* be pushed to the database.") } + + // Check for local drift before connecting to remote + // Only check when: + // 1. Not skipped via flag + // 2. Not pushing to local database (drift is only meaningful for remote) + // 3. Local database is running + var newMigration string + if !skipDriftCheck && !utils.IsLocalDatabase(config) { + if err := utils.AssertSupabaseDbIsRunning(); err == nil { + result, err := CheckLocalDrift(ctx, fsys) + if err != nil { + // Non-fatal warning - don't block push if drift check fails + fmt.Fprintf(os.Stderr, "%s Failed to check for drift: %s\n\n", utils.Yellow("Warning:"), err.Error()) + } else if result.HasDrift { + fmt.Fprint(os.Stderr, FormatDriftWarning(result)) + + if dryRun { + // In dry-run mode, just show what would happen + fmt.Fprintln(os.Stderr, "\nWould prompt to create migration or continue.") + } else { + action, err := PromptDriftAction(ctx) + if err != nil { + return err + } + switch action { + case DriftActionCreateMigration: + // Create migration using SQL already in memory (no redundant diff!) + path, err := CreateMigrationFromDrift(ctx, result.DiffSQL, fsys) + if err != nil { + return err + } + newMigration = path + fmt.Fprintf(os.Stderr, "\nCreated migration: %s\n\n", utils.Bold(path)) + case DriftActionContinue: + // Continue without creating migration + fmt.Fprintln(os.Stderr) + case DriftActionCancel: + return errors.New(context.Canceled) + } + } + } else { + fmt.Fprintln(os.Stderr, utils.Green("✓")+" No uncommitted schema changes detected.\n") + } + } + } + conn, err := utils.ConnectByConfig(ctx, config, options...) if err != nil { return err @@ -32,6 +78,11 @@ func Run(ctx context.Context, dryRun, ignoreVersionMismatch bool, includeRoles, } else if pending, err = up.GetPendingMigrations(ctx, ignoreVersionMismatch, conn, fsys); err != nil { return err } + + // If we created a new migration from drift, add it to the pending list + if newMigration != "" { + pending = append(pending, newMigration) + } var seeds []migration.SeedFile if includeSeed { // TODO: flag should override config but we don't resolve glob paths when seed is disabled. diff --git a/internal/db/push/push_test.go b/internal/db/push/push_test.go index b5d0d2c7f..3c6279475 100644 --- a/internal/db/push/push_test.go +++ b/internal/db/push/push_test.go @@ -40,7 +40,7 @@ func TestMigrationPush(t *testing.T) { conn.Query(migration.LIST_MIGRATION_VERSION). Reply("SELECT 0") // Run test - err := Run(context.Background(), true, false, true, true, dbConfig, fsys, conn.Intercept) + err := Run(context.Background(), true, false, true, true, true, dbConfig, fsys, conn.Intercept) // Check error assert.NoError(t, err) }) @@ -54,7 +54,7 @@ func TestMigrationPush(t *testing.T) { conn.Query(migration.LIST_MIGRATION_VERSION). Reply("SELECT 0") // Run test - err := Run(context.Background(), false, false, false, false, dbConfig, fsys, conn.Intercept) + err := Run(context.Background(), false, false, false, false, true, dbConfig, fsys, conn.Intercept) // Check error assert.NoError(t, err) }) @@ -63,7 +63,7 @@ func TestMigrationPush(t *testing.T) { // Setup in-memory fs fsys := afero.NewMemMapFs() // Run test - err := Run(context.Background(), false, false, false, false, pgconn.Config{}, fsys) + err := Run(context.Background(), false, false, false, false, true, pgconn.Config{}, fsys) // Check error assert.ErrorContains(t, err, "invalid port (outside range)") }) @@ -77,7 +77,7 @@ func TestMigrationPush(t *testing.T) { conn.Query(migration.LIST_MIGRATION_VERSION). ReplyError(pgerrcode.InvalidCatalogName, `database "target" does not exist`) // Run test - err := Run(context.Background(), false, false, false, false, pgconn.Config{ + err := Run(context.Background(), false, false, false, false, true, pgconn.Config{ Host: "db.supabase.co", Port: 5432, User: "admin", @@ -104,7 +104,7 @@ func TestMigrationPush(t *testing.T) { Query(migration.INSERT_MIGRATION_VERSION, "0", "test", nil). ReplyError(pgerrcode.NotNullViolation, `null value in column "version" of relation "schema_migrations"`) // Run test - err := Run(context.Background(), false, false, false, false, dbConfig, fsys, conn.Intercept) + err := Run(context.Background(), false, false, false, false, true, dbConfig, fsys, conn.Intercept) // Check error assert.ErrorContains(t, err, `ERROR: null value in column "version" of relation "schema_migrations" (SQLSTATE 23502)`) assert.ErrorContains(t, err, "At statement: 0\n"+migration.INSERT_MIGRATION_VERSION) @@ -128,7 +128,7 @@ func TestPushAll(t *testing.T) { Query(migration.INSERT_MIGRATION_VERSION, "0", "test", nil). Reply("INSERT 0 1") // Run test - err := Run(context.Background(), false, false, true, true, dbConfig, fsys, conn.Intercept) + err := Run(context.Background(), false, false, true, true, true, dbConfig, fsys, conn.Intercept) // Check error assert.NoError(t, err) }) @@ -145,7 +145,7 @@ func TestPushAll(t *testing.T) { conn.Query(migration.LIST_MIGRATION_VERSION). Reply("SELECT 0") // Run test - err := Run(context.Background(), false, false, true, true, dbConfig, fsys, conn.Intercept) + err := Run(context.Background(), false, false, true, true, true, dbConfig, fsys, conn.Intercept) // Check error assert.ErrorIs(t, err, context.Canceled) }) @@ -161,7 +161,7 @@ func TestPushAll(t *testing.T) { conn.Query(migration.LIST_MIGRATION_VERSION). Reply("SELECT 0") // Run test - err := Run(context.Background(), false, false, true, false, dbConfig, fsys, conn.Intercept) + err := Run(context.Background(), false, false, true, false, true, dbConfig, fsys, conn.Intercept) // Check error assert.ErrorIs(t, err, os.ErrPermission) }) @@ -191,7 +191,7 @@ func TestPushAll(t *testing.T) { Query(migration.UPSERT_SEED_FILE, seedPath, digest). ReplyError(pgerrcode.NotNullViolation, `null value in column "hash" of relation "seed_files"`) // Run test - err := Run(context.Background(), false, false, false, true, dbConfig, fsys, conn.Intercept) + err := Run(context.Background(), false, false, false, true, true, dbConfig, fsys, conn.Intercept) // Check error assert.ErrorContains(t, err, `ERROR: null value in column "hash" of relation "seed_files" (SQLSTATE 23502)`) }) diff --git a/internal/dev/dev.go b/internal/dev/dev.go index 8b049b901..3711ed8af 100644 --- a/internal/dev/dev.go +++ b/internal/dev/dev.go @@ -12,24 +12,41 @@ import ( "github.com/go-errors/errors" "github.com/spf13/afero" + "github.com/supabase/cli/internal/dev/onboarding" "github.com/supabase/cli/internal/start" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/flags" ) -// Run starts the dev session -func Run(ctx context.Context, fsys afero.Fs) error { - // Load config first - if err := flags.LoadConfig(fsys); err != nil { - return err +// RunOptions configures the dev command behavior +type RunOptions struct { + SkipOnboarding bool + Interactive bool +} + +// Run starts the dev session with optional onboarding +func Run(ctx context.Context, fsys afero.Fs, opts RunOptions) error { + // Step 1: Run onboarding if not skipped + if !opts.SkipOnboarding { + onboardingOpts := onboarding.Options{ + Interactive: opts.Interactive, + } + if _, err := onboarding.Run(ctx, fsys, onboardingOpts); err != nil { + return err + } + } else { + // Skip onboarding, just load config directly + if err := flags.LoadConfig(fsys); err != nil { + return err + } } - // Ensure local database is running + // Step 2: Ensure local database is running if err := ensureDbRunning(ctx, fsys); err != nil { return err } - // Create and run the dev session + // Step 3: Create and run the dev session session := NewSession(ctx, fsys) return session.Run() } diff --git a/internal/dev/onboarding/conflict.go b/internal/dev/onboarding/conflict.go new file mode 100644 index 000000000..40d2e47a6 --- /dev/null +++ b/internal/dev/onboarding/conflict.go @@ -0,0 +1,44 @@ +package onboarding + +import ( + "context" + + "github.com/supabase/cli/internal/utils" +) + +// ConflictAction represents how to handle local/remote conflicts +type ConflictAction int + +const ( + ConflictMerge ConflictAction = iota // Pull remote, keep local migrations + ConflictReplace // Replace local with remote + ConflictKeepLocal // Skip pull, keep local +) + +// PromptConflictResolution asks user how to handle conflicts between local and remote +func PromptConflictResolution(ctx context.Context) (ConflictAction, error) { + items := []utils.PromptItem{ + { + Summary: "Pull remote schema as new migration", + Details: "keeps existing local migrations", + Index: int(ConflictMerge), + }, + { + Summary: "Replace local migrations with remote", + Details: "removes existing local migrations", + Index: int(ConflictReplace), + }, + { + Summary: "Keep local, skip remote pull", + Details: "no changes to local files", + Index: int(ConflictKeepLocal), + }, + } + + choice, err := utils.PromptChoice(ctx, "Local migrations already exist. How would you like to handle the remote schema?", items) + if err != nil { + return ConflictKeepLocal, err + } + + return ConflictAction(choice.Index), nil +} diff --git a/internal/dev/onboarding/detect.go b/internal/dev/onboarding/detect.go new file mode 100644 index 000000000..e017731c5 --- /dev/null +++ b/internal/dev/onboarding/detect.go @@ -0,0 +1,67 @@ +package onboarding + +import ( + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +// DetectState checks the current project state +func DetectState(fsys afero.Fs) *State { + return &State{ + ConfigExists: ConfigExists(fsys), + ProjectLinked: ProjectLinked(fsys), + HasMigrations: HasMigrations(fsys), + HasFunctions: HasFunctions(fsys), + } +} + +// ConfigExists checks if supabase/config.toml exists +func ConfigExists(fsys afero.Fs) bool { + exists, _ := afero.Exists(fsys, utils.ConfigPath) + return exists +} + +// ProjectLinked checks if project is linked to remote +func ProjectLinked(fsys afero.Fs) bool { + err := flags.LoadProjectRef(fsys) + return err == nil +} + +// HasMigrations checks if local migrations exist +func HasMigrations(fsys afero.Fs) bool { + exists, err := afero.DirExists(fsys, utils.MigrationsDir) + if err != nil || !exists { + return false + } + entries, err := afero.ReadDir(fsys, utils.MigrationsDir) + if err != nil { + return false + } + // Check for .sql files + for _, entry := range entries { + if !entry.IsDir() && len(entry.Name()) > 4 && entry.Name()[len(entry.Name())-4:] == ".sql" { + return true + } + } + return false +} + +// HasFunctions checks if local functions exist +func HasFunctions(fsys afero.Fs) bool { + exists, err := afero.DirExists(fsys, utils.FunctionsDir) + if err != nil || !exists { + return false + } + entries, err := afero.ReadDir(fsys, utils.FunctionsDir) + if err != nil { + return false + } + // Check for function directories (excluding import_map.json and .env) + for _, entry := range entries { + if entry.IsDir() { + return true + } + } + return false +} diff --git a/internal/dev/onboarding/flows.go b/internal/dev/onboarding/flows.go new file mode 100644 index 000000000..e1e77b862 --- /dev/null +++ b/internal/dev/onboarding/flows.go @@ -0,0 +1,134 @@ +package onboarding + +import ( + "context" + "fmt" + "os" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/db/pull" + "github.com/supabase/cli/internal/functions/download" + _init "github.com/supabase/cli/internal/init" + "github.com/supabase/cli/internal/link" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +// LinkChoice represents user's choice about linking +type LinkChoice int + +const ( + LinkChoiceYes LinkChoice = iota + LinkChoiceNo +) + +// RunInitFlow runs the project initialization +func RunInitFlow(ctx context.Context, fsys afero.Fs) error { + params := utils.InitParams{} + return _init.Run(ctx, fsys, true, params) +} + +// PromptLinkChoice asks if user wants to link to remote project +func PromptLinkChoice(ctx context.Context) (LinkChoice, error) { + items := []utils.PromptItem{ + {Summary: "Yes, link to existing project", Index: int(LinkChoiceYes)}, + {Summary: "No, I'm starting fresh", Index: int(LinkChoiceNo)}, + } + + choice, err := utils.PromptChoice(ctx, "Do you have a remote Supabase project to connect?", items) + if err != nil { + return LinkChoiceNo, err + } + + return LinkChoice(choice.Index), nil +} + +// RunLinkFlow prompts for project selection and links +func RunLinkFlow(ctx context.Context, fsys afero.Fs) error { + // Use existing project selection from flags package + if err := flags.PromptProjectRef(ctx, "Select a project to link:"); err != nil { + return err + } + + // Run link with selected project + return link.Run(ctx, flags.ProjectRef, false, fsys) +} + +// RunPullFlow pulls everything from the linked remote project: +// - Schema/migrations +// - Edge Functions +// - Storage config (via link, already done) +// - Auth config (via link, already done) +func RunPullFlow(ctx context.Context, fsys afero.Fs) error { + projectRef := flags.ProjectRef + if projectRef == "" { + return errors.New("No project linked. Run 'supabase link' first.") + } + + // Pull schema/migrations + fmt.Fprintln(os.Stderr, "Pulling schema from remote database...") + if err := pullSchema(ctx, fsys); err != nil { + fmt.Fprintf(os.Stderr, " %s Schema pull: %v\n", utils.Yellow("Warning:"), err) + } else { + fmt.Fprintln(os.Stderr, " "+utils.Green("✓")+" Schema pulled") + } + + // Pull Edge Functions + fmt.Fprintln(os.Stderr, "Pulling Edge Functions...") + if count, err := pullFunctions(ctx, fsys); err != nil { + fmt.Fprintf(os.Stderr, " %s Functions pull: %v\n", utils.Yellow("Warning:"), err) + } else if count > 0 { + fmt.Fprintf(os.Stderr, " %s Edge Functions pulled (%d found)\n", utils.Green("✓"), count) + } else { + fmt.Fprintln(os.Stderr, " "+utils.Green("✓")+" No Edge Functions found") + } + + // Storage and Auth config are already synced during link.Run() + fmt.Fprintln(os.Stderr, " "+utils.Green("✓")+" Storage config synced") + fmt.Fprintln(os.Stderr, " "+utils.Green("✓")+" Auth config synced") + + return nil +} + +// pullSchema pulls the database schema from remote +func pullSchema(ctx context.Context, fsys afero.Fs) error { + // Get database config for connection + config := flags.DbConfig + + // Run pull with default name + return pull.Run(ctx, nil, config, "remote_schema", fsys) +} + +// pullFunctions pulls all Edge Functions from remote +func pullFunctions(ctx context.Context, fsys afero.Fs) (int, error) { + projectRef := flags.ProjectRef + + // List functions from remote + resp, err := utils.GetSupabase().V1ListAllFunctionsWithResponse(ctx, projectRef) + if err != nil { + return 0, errors.Errorf("failed to list functions: %w", err) + } + if resp.JSON200 == nil { + return 0, errors.Errorf("unexpected response: %s", string(resp.Body)) + } + + functions := *resp.JSON200 + if len(functions) == 0 { + return 0, nil + } + + // Download each function + downloaded := 0 + for _, fn := range functions { + fmt.Fprintf(os.Stderr, " Downloading %s...\n", utils.Aqua(fn.Slug)) + // Use server-side unbundle (no Docker required) + if err := download.Run(ctx, fn.Slug, projectRef, false, false, fsys); err != nil { + fmt.Fprintf(os.Stderr, " %s Failed to download %s: %v\n", utils.Yellow("Warning:"), fn.Slug, err) + continue + } + downloaded++ + } + + return downloaded, nil +} diff --git a/internal/dev/onboarding/onboarding.go b/internal/dev/onboarding/onboarding.go new file mode 100644 index 000000000..3c2b12cb4 --- /dev/null +++ b/internal/dev/onboarding/onboarding.go @@ -0,0 +1,113 @@ +package onboarding + +import ( + "context" + "fmt" + "os" + + "github.com/go-errors/errors" + "github.com/spf13/afero" + "github.com/supabase/cli/internal/utils" + "github.com/supabase/cli/internal/utils/flags" +) + +// State represents the current project setup state +type State struct { + ConfigExists bool // supabase/config.toml exists + ProjectLinked bool // .temp/project-ref exists + HasMigrations bool // migrations/*.sql exist + HasFunctions bool // functions/* exist +} + +// Options configures onboarding behavior +type Options struct { + Interactive bool +} + +// Result contains the outcome of onboarding +type Result struct { + JustInitialized bool // Whether we just ran init + JustLinked bool // Whether we just ran link +} + +// Run executes the onboarding flow based on current state +// Returns a Result indicating what actions were taken +func Run(ctx context.Context, fsys afero.Fs, opts Options) (*Result, error) { + result := &Result{} + + // Step 1: Detect current state + state := DetectState(fsys) + + // Step 2: Init if needed + if !state.ConfigExists { + if !opts.Interactive { + return nil, errors.New("No Supabase project found. Run 'supabase init' first or use 'supabase dev' interactively.") + } + + fmt.Fprintln(os.Stderr, utils.Bold("No Supabase project found. Let's set one up!")) + fmt.Fprintln(os.Stderr) + + if err := RunInitFlow(ctx, fsys); err != nil { + return nil, err + } + + result.JustInitialized = true + fmt.Fprintln(os.Stderr) + fmt.Fprintln(os.Stderr, "Finished "+utils.Aqua("supabase init")+".") + fmt.Fprintln(os.Stderr) + } + + // Reload config after potential init + if err := flags.LoadConfig(fsys); err != nil { + return nil, err + } + + // Step 3: Offer to link ONLY after fresh init (not on every run) + // Re-detect state since we may have just initialized + state = DetectState(fsys) + + if !state.ProjectLinked && opts.Interactive && result.JustInitialized { + choice, err := PromptLinkChoice(ctx) + if err != nil { + return nil, err + } + + if choice == LinkChoiceYes { + if err := RunLinkFlow(ctx, fsys); err != nil { + return nil, err + } + result.JustLinked = true + + // Step 4: Pull everything from remote after linking + fmt.Fprintln(os.Stderr) + fmt.Fprintln(os.Stderr, utils.Bold("Pulling from remote project...")) + fmt.Fprintln(os.Stderr) + + // Detect conflicts before pulling + if state.HasMigrations { + action, err := PromptConflictResolution(ctx) + if err != nil { + return nil, err + } + + switch action { + case ConflictKeepLocal: + fmt.Fprintln(os.Stderr, "Keeping local migrations, skipping remote pull.") + return result, nil + case ConflictReplace: + // TODO: Clear local migrations before pulling + fmt.Fprintln(os.Stderr, "Replacing local migrations with remote schema.") + case ConflictMerge: + fmt.Fprintln(os.Stderr, "Pulling remote schema as new migration.") + } + } + + if err := RunPullFlow(ctx, fsys); err != nil { + // Log warning but continue - partial setup is better than failure + fmt.Fprintf(os.Stderr, "%s Failed to pull from remote: %v\n", utils.Yellow("Warning:"), err) + } + } + } + + return result, nil +}