diff --git a/packages/1-framework/1-core/framework-components/src/control-migration-types.ts b/packages/1-framework/1-core/framework-components/src/control-migration-types.ts index bf802d5235..bdad9e2832 100644 --- a/packages/1-framework/1-core/framework-components/src/control-migration-types.ts +++ b/packages/1-framework/1-core/framework-components/src/control-migration-types.ts @@ -23,8 +23,65 @@ import type { TargetBoundComponentDescriptor } from './framework-components'; * - 'additive': Adds new structures without modifying existing ones (safe) * - 'widening': Relaxes constraints or expands types (generally safe) * - 'destructive': Removes or alters existing structures (potentially unsafe) + * - 'data': Data transformation operation (e.g., backfill, type conversion) */ -export type MigrationOperationClass = 'additive' | 'widening' | 'destructive'; +export type MigrationOperationClass = 'additive' | 'widening' | 'destructive' | 'data'; + +// ============================================================================ +// Data Transform Operation +// ============================================================================ + +/** + * A lowered query statement as stored in ops.json. + * Contains the SQL string and parameter values — ready for execution. + * Lowering from query builder AST to SQL happens at verify time. + */ +export interface SerializedQueryPlan { + readonly sql: string; + readonly params: readonly unknown[]; +} + +/** + * A data transform operation within a migration edge. + * + * Data transforms are authored in TypeScript using the query builder, + * serialized to JSON ASTs at verification time, and rendered to SQL + * by the target adapter at apply time. + * + * The `name` serves as the invariant identity — it's recorded in the + * ledger and used for invariant-aware routing via environment refs. + * + * In draft state (before verification), `check` and `run` are null. + * After verification, they contain the serialized query ASTs. + */ +export interface DataTransformOperation extends MigrationPlanOperation { + readonly operationClass: 'data'; + /** + * The invariant name for this data transform. + * Recorded in the ledger on successful edge completion. + * Used by environment refs to declare required invariants. + */ + readonly name: string; + /** + * Path to the TypeScript source file that produced this operation. + * Not part of edgeId computation — for traceability only. + */ + readonly source: string; + /** + * Serialized check query plan, or a boolean literal. + * - SerializedQueryPlan: describes violations; empty result = already applied. + * - false: always run (no check). + * - true: always skip. + * - null: not yet serialized (draft state). + */ + readonly check: SerializedQueryPlan | boolean | null; + /** + * Serialized run query plans. + * - Array of serialized query plans to execute sequentially. + * - null: not yet serialized (draft state). + */ + readonly run: readonly SerializedQueryPlan[] | null; +} /** * Policy defining which operation classes are allowed during a migration. @@ -37,6 +94,16 @@ export interface MigrationOperationPolicy { // Plan Types (Display-Oriented) // ============================================================================ +/** + * Minimal shape for operation descriptors at the framework level. + * Targets produce richer types; this captures just enough for the + * framework to scaffold migration.ts files and pass descriptors through. + */ +export interface OperationDescriptor { + readonly kind: string; + readonly [key: string]: unknown; +} + /** * A single migration operation for display purposes. * Contains only the fields needed for CLI output (tree view, JSON envelope). @@ -270,4 +337,43 @@ export interface TargetMigrationsCapability< contract: Contract | null, frameworkComponents?: ReadonlyArray>, ): unknown; + + /** + * Plans a migration using the descriptor-based planner. + * Returns operation descriptors and whether data migration is needed. + * The caller decides whether to resolve immediately or scaffold migration.ts. + */ + planWithDescriptors?(context: { + readonly fromContract: Contract | null; + readonly toContract: Contract; + readonly frameworkComponents?: ReadonlyArray< + TargetBoundComponentDescriptor + >; + }): + | { + readonly ok: true; + readonly descriptors: readonly OperationDescriptor[]; + readonly needsDataMigration: boolean; + } + | { + readonly ok: false; + readonly conflicts: readonly MigrationPlannerConflict[]; + }; + + /** + * Resolves operation descriptors into target-specific migration plan operations + * with SQL/DDL, prechecks, and postchecks. Called by `migration verify` to + * serialize migration.ts into ops.json. + */ + resolveDescriptors?( + descriptors: readonly OperationDescriptor[], + context: { + readonly fromContract: Contract | null; + readonly toContract: Contract; + readonly schemaName?: string; + readonly frameworkComponents?: ReadonlyArray< + TargetBoundComponentDescriptor + >; + }, + ): readonly MigrationPlanOperation[]; } diff --git a/packages/1-framework/1-core/framework-components/src/control-result-types.ts b/packages/1-framework/1-core/framework-components/src/control-result-types.ts index 94f969ed8e..71eadbbfe9 100644 --- a/packages/1-framework/1-core/framework-components/src/control-result-types.ts +++ b/packages/1-framework/1-core/framework-components/src/control-result-types.ts @@ -31,7 +31,7 @@ export interface VerifyDatabaseResult { }; } -export interface SchemaIssue { +export interface BaseSchemaIssue { readonly kind: | 'missing_table' | 'missing_column' @@ -57,11 +57,22 @@ export interface SchemaIssue { readonly column?: string; readonly indexOrConstraint?: string; readonly typeName?: string; + readonly dependencyId?: string; readonly expected?: string; readonly actual?: string; readonly message: string; } +export interface EnumValuesChangedIssue { + readonly kind: 'enum_values_changed'; + readonly typeName: string; + readonly addedValues: readonly string[]; + readonly removedValues: readonly string[]; + readonly message: string; +} + +export type SchemaIssue = BaseSchemaIssue | EnumValuesChangedIssue; + export interface SchemaVerificationNode { readonly status: 'pass' | 'warn' | 'fail'; readonly kind: string; diff --git a/packages/1-framework/1-core/framework-components/src/exports/control.ts b/packages/1-framework/1-core/framework-components/src/exports/control.ts index fe95de0972..82ccc44c0c 100644 --- a/packages/1-framework/1-core/framework-components/src/exports/control.ts +++ b/packages/1-framework/1-core/framework-components/src/exports/control.ts @@ -15,6 +15,7 @@ export type { ControlTargetInstance, } from '../control-instances'; export type { + DataTransformOperation, MigrationOperationClass, MigrationOperationPolicy, MigrationPlan, @@ -29,10 +30,14 @@ export type { MigrationRunnerFailure, MigrationRunnerResult, MigrationRunnerSuccessValue, + OperationDescriptor, + SerializedQueryPlan, TargetMigrationsCapability, } from '../control-migration-types'; export type { + BaseSchemaIssue, EmitContractResult, + EnumValuesChangedIssue, IntrospectSchemaResult, OperationContext, SchemaIssue, diff --git a/packages/1-framework/3-tooling/cli/package.json b/packages/1-framework/3-tooling/cli/package.json index 81f2b3c423..58f57df0f3 100644 --- a/packages/1-framework/3-tooling/cli/package.json +++ b/packages/1-framework/3-tooling/cli/package.json @@ -53,6 +53,7 @@ "@prisma-next/tsconfig": "workspace:*", "@prisma-next/tsdown": "workspace:*", "@types/node": "catalog:", + "@vitest/ui": "4.0.17", "tsdown": "catalog:", "typescript": "catalog:", "vitest": "catalog:" @@ -94,6 +95,10 @@ "types": "./dist/commands/contract-infer.d.mts", "import": "./dist/commands/contract-infer.mjs" }, + "./commands/migration-new": { + "types": "./dist/commands/migration-new.d.mts", + "import": "./dist/commands/migration-new.mjs" + }, "./commands/migration-plan": { "types": "./dist/commands/migration-plan.d.mts", "import": "./dist/commands/migration-plan.mjs" diff --git a/packages/1-framework/3-tooling/cli/src/cli.ts b/packages/1-framework/3-tooling/cli/src/cli.ts index 583b24d00d..cc83a19844 100644 --- a/packages/1-framework/3-tooling/cli/src/cli.ts +++ b/packages/1-framework/3-tooling/cli/src/cli.ts @@ -12,6 +12,7 @@ import { createDbSignCommand } from './commands/db-sign'; import { createDbUpdateCommand } from './commands/db-update'; import { createDbVerifyCommand } from './commands/db-verify'; import { createMigrationApplyCommand } from './commands/migration-apply'; +import { createMigrationNewCommand } from './commands/migration-new'; import { createMigrationPlanCommand } from './commands/migration-plan'; import { createMigrationRefCommand } from './commands/migration-ref'; import { createMigrationShowCommand } from './commands/migration-show'; @@ -227,6 +228,9 @@ migrationCommand.configureHelp({ const migrationPlanCommand = createMigrationPlanCommand(); migrationCommand.addCommand(migrationPlanCommand); +const migrationNewCommand = createMigrationNewCommand(); +migrationCommand.addCommand(migrationNewCommand); + const migrationShowCommand = createMigrationShowCommand(); migrationCommand.addCommand(migrationShowCommand); diff --git a/packages/1-framework/3-tooling/cli/src/commands/migration-apply.ts b/packages/1-framework/3-tooling/cli/src/commands/migration-apply.ts index b7ddaaf994..593aa5d579 100644 --- a/packages/1-framework/3-tooling/cli/src/commands/migration-apply.ts +++ b/packages/1-framework/3-tooling/cli/src/commands/migration-apply.ts @@ -1,7 +1,7 @@ import { EMPTY_CONTRACT_HASH } from '@prisma-next/migration-tools/constants'; import { findPathWithDecision } from '@prisma-next/migration-tools/dag'; import { readRefs, resolveRef } from '@prisma-next/migration-tools/refs'; -import type { AttestedMigrationBundle, MigrationGraph } from '@prisma-next/migration-tools/types'; +import type { AttestedMigrationBundle } from '@prisma-next/migration-tools/types'; import { MigrationToolsError } from '@prisma-next/migration-tools/types'; import { notOk, ok, type Result } from '@prisma-next/utils/result'; import { Command } from 'commander'; @@ -20,7 +20,8 @@ import { } from '../utils/cli-errors'; import { addGlobalOptions, - loadMigrationBundles, + loadAllBundles, + type MigrationBundleSet, maskConnectionUrl, readContractEnvelope, resolveMigrationPaths, @@ -194,10 +195,15 @@ async function executeMigrationApplyCommand( } // Read migrations and build migration chain model (offline — no DB needed) - let bundles: readonly AttestedMigrationBundle[]; - let graph: MigrationGraph; + let migrations: MigrationBundleSet; try { - ({ bundles, graph } = await loadMigrationBundles(migrationsDir)); + migrations = await loadAllBundles(migrationsDir); + if (migrations.drafts.length > 0 && !flags.quiet) { + ui.warn( + `${migrations.drafts.length} draft migration(s) found: ${migrations.drafts.map((d) => d.dirName).join(', ')}. ` + + "Run 'prisma-next migration verify --dir ' to attest before applying.", + ); + } } catch (error) { if (MigrationToolsError.is(error)) { return notOk(mapMigrationToolsError(error)); @@ -218,7 +224,7 @@ async function executeMigrationApplyCommand( const marker = await client.readMarker(); // --- No attested migrations on disk --- - if (bundles.length === 0) { + if (migrations.attested.length === 0) { if (marker?.storageHash) { return notOk( errorRuntime('Database has state but no migrations exist', { @@ -266,22 +272,27 @@ async function executeMigrationApplyCommand( const markerHash = marker?.storageHash; - if (markerHash !== undefined && !graph.nodes.has(markerHash)) { + if (markerHash !== undefined && !migrations.graph.nodes.has(markerHash)) { return notOk( errorRuntime('Database marker does not match any known migration', { why: `The database marker hash "${markerHash}" is not found in the migration history at ${migrationsRelative}`, fix: 'Ensure the migrations directory matches this database. If the database was managed with `db init` or `db update`, run `prisma-next db sign` to update the marker.', - meta: { markerHash, knownNodes: [...graph.nodes] }, + meta: { markerHash, knownNodes: [...migrations.graph.nodes] }, }), ); } - if (!graph.nodes.has(destinationHash)) { + if (!migrations.graph.nodes.has(destinationHash)) { + const matchingDraft = migrations.drafts.find((d) => d.manifest.to === destinationHash); return notOk( errorRuntime('Current contract has no planned migration path', { - why: `Current contract hash "${destinationHash}" is not present in the migration history at ${migrationsRelative}`, - fix: 'Run `prisma-next migration plan` to create a migration for the current contract, then re-run apply.', - meta: { destinationHash, knownNodes: [...graph.nodes] }, + why: matchingDraft + ? `A draft migration exists at "${matchingDraft.dirName}" but has not been attested` + : `Current contract hash "${destinationHash}" is not present in the migration history at ${migrationsRelative}`, + fix: matchingDraft + ? `Run 'prisma-next migration verify --dir ${migrationsRelative}/${matchingDraft.dirName}' to attest, then re-run apply.` + : 'Run `prisma-next migration plan` to create a migration for the current contract, then re-run apply.', + meta: { destinationHash, knownNodes: [...migrations.graph.nodes] }, }), ); } @@ -291,7 +302,7 @@ async function executeMigrationApplyCommand( // "No marker" means the database is fresh — start from the empty contract hash. const originHash = markerHash ?? EMPTY_CONTRACT_HASH; - const decision = findPathWithDecision(graph, originHash, destinationHash, refName); + const decision = findPathWithDecision(migrations.graph, originHash, destinationHash, refName); if (!decision) { return notOk( errorRuntime('No migration path from current state to target', { @@ -318,7 +329,7 @@ async function executeMigrationApplyCommand( }); } - const bundleByDir = new Map(bundles.map((b) => [b.dirName, b])); + const bundleByDir = new Map(migrations.attested.map((b) => [b.dirName, b])); const pendingMigrations: MigrationApplyStep[] = []; for (const migration of pendingPath) { const pkg = bundleByDir.get(migration.dirName); @@ -389,7 +400,7 @@ export function createMigrationApplyCommand(): Command { addGlobalOptions(command) .option('--db ', 'Database connection string') .option('--config ', 'Path to prisma-next.config.ts') - .option('--ref ', 'Target ref name from migrations/refs.json') + .option('--ref ', 'Target ref name from migrations/refs/') .action(async (options: MigrationApplyCommandOptions) => { const flags = parseGlobalFlags(options); const startTime = Date.now(); diff --git a/packages/1-framework/3-tooling/cli/src/commands/migration-new.ts b/packages/1-framework/3-tooling/cli/src/commands/migration-new.ts new file mode 100644 index 0000000000..ce56146dff --- /dev/null +++ b/packages/1-framework/3-tooling/cli/src/commands/migration-new.ts @@ -0,0 +1,260 @@ +/** + * `migration new` — scaffolds a migration package with a migration.ts file + * for manual authoring. The user writes operation descriptors and data + * transforms; `migration verify` resolves them to ops.json. + */ + +import { readFileSync } from 'node:fs'; +import type { Contract } from '@prisma-next/contract/types'; +import { EMPTY_CONTRACT_HASH } from '@prisma-next/migration-tools/constants'; +import { findLatestMigration, reconstructGraph } from '@prisma-next/migration-tools/dag'; +import { + formatMigrationDirName, + readMigrationsDir, + writeMigrationPackage, +} from '@prisma-next/migration-tools/io'; +import { scaffoldMigrationTs } from '@prisma-next/migration-tools/migration-ts'; +import type { MigrationManifest } from '@prisma-next/migration-tools/types'; +import { isAttested, MigrationToolsError } from '@prisma-next/migration-tools/types'; +import { notOk, ok, type Result } from '@prisma-next/utils/result'; +import { Command } from 'commander'; +import { join, relative, resolve } from 'pathe'; +import { loadConfig } from '../config-loader'; +import { type CliStructuredError, errorRuntime, errorUnexpected } from '../utils/cli-errors'; +import { + addGlobalOptions, + resolveMigrationPaths, + setCommandDescriptions, + setCommandExamples, +} from '../utils/command-helpers'; +import { formatStyledHeader } from '../utils/formatters/styled'; +import type { CommonCommandOptions } from '../utils/global-flags'; +import { parseGlobalFlags } from '../utils/global-flags'; +import { handleResult } from '../utils/result-handler'; +import { TerminalUI } from '../utils/terminal-ui'; + +interface MigrationNewOptions extends CommonCommandOptions { + readonly name?: string; + readonly from?: string; + readonly config?: string; +} + +interface MigrationNewResult { + readonly ok: true; + readonly dir: string; + readonly from: string; + readonly to: string; + readonly summary: string; +} + +async function executeMigrationNewCommand( + options: MigrationNewOptions, +): Promise> { + const config = await loadConfig(options.config); + const { migrationsDir, migrationsRelative } = resolveMigrationPaths(options.config, config); + + // Read the emitted contract (destination) + const contractPath = config.contract?.output ?? 'contract.json'; + const contractPathAbsolute = resolve( + options.config ? resolve(options.config, '..') : process.cwd(), + contractPath, + ); + + let contractJsonContent: string; + try { + contractJsonContent = readFileSync(contractPathAbsolute, 'utf-8'); + } catch (error) { + if (error instanceof Error && (error as { code?: string }).code === 'ENOENT') { + return notOk( + errorRuntime(`Contract file not found at ${contractPathAbsolute}`, { + why: `Contract file not found at ${contractPathAbsolute}`, + fix: 'Run `prisma-next contract emit` first to generate the contract', + }), + ); + } + throw error; + } + + let toContractJson: Contract; + try { + toContractJson = JSON.parse(contractJsonContent) as Contract; + } catch (error) { + return notOk( + errorRuntime('Contract JSON is invalid', { + why: `Failed to parse ${contractPathAbsolute}: ${error instanceof Error ? error.message : String(error)}`, + fix: 'Run `prisma-next contract emit` to regenerate the contract', + }), + ); + } + + const toStorageHash = ( + (toContractJson as unknown as Record)['storage'] as + | Record + | undefined + )?.['storageHash'] as string | undefined; + if (!toStorageHash) { + return notOk( + errorRuntime('Contract is missing storageHash', { + why: `Contract at ${contractPathAbsolute} has no storageHash`, + fix: 'Run `prisma-next contract emit` to regenerate the contract', + }), + ); + } + + // Determine "from" hash + let fromContract: Contract | null = null; + let fromHash: string = EMPTY_CONTRACT_HASH; + + try { + const packages = await readMigrationsDir(migrationsDir); + const attested = packages.filter(isAttested); + + if (attested.length > 0) { + const graph = reconstructGraph(attested); + + if (options.from) { + // Explicit --from: find the migration with matching to hash + const match = attested.find((p) => p.manifest.to.startsWith(options.from!)); + if (!match) { + return notOk( + errorRuntime('Starting contract not found', { + why: `No migration with to hash matching "${options.from}" exists in ${migrationsRelative}`, + fix: 'Check that the --from hash matches a known migration target hash.', + }), + ); + } + fromHash = match.manifest.to; + fromContract = match.manifest.toContract; + } else { + const latestMigration = findLatestMigration(graph); + if (latestMigration) { + fromHash = latestMigration.to; + const leafPkg = attested.find( + (p) => p.manifest.migrationId === latestMigration.migrationId, + ); + if (leafPkg) { + fromContract = leafPkg.manifest.toContract; + } + } + } + } + } catch (error) { + if (MigrationToolsError.is(error)) { + return notOk( + errorRuntime(error.message, { + why: error.why, + fix: error.fix, + meta: { code: error.code }, + }), + ); + } + throw error; + } + + // Check for no-op + if (fromHash === toStorageHash) { + return notOk( + errorRuntime('No changes detected', { + why: 'The from and to contract hashes are identical — there is nothing to migrate.', + fix: 'Change the contract and run `prisma-next contract emit` before creating a new migration.', + }), + ); + } + + // Build manifest and write package + const timestamp = new Date(); + const slug = options.name ?? 'migration'; + const dirName = formatMigrationDirName(timestamp, slug); + const packageDir = join(migrationsDir, dirName); + + const manifest: MigrationManifest = { + from: fromHash, + to: toStorageHash, + migrationId: null, + kind: 'regular', + fromContract, + toContract: toContractJson, + hints: { + used: [], + applied: [], + plannerVersion: '1.0.0', + planningStrategy: 'manual', + }, + labels: [], + createdAt: timestamp.toISOString(), + }; + + try { + // Write package with empty ops (draft) + await writeMigrationPackage(packageDir, manifest, []); + + // Scaffold migration.ts + await scaffoldMigrationTs(packageDir); + + return ok({ + ok: true as const, + dir: relative(process.cwd(), packageDir), + from: fromHash, + to: toStorageHash, + summary: `Scaffolded migration at ${relative(process.cwd(), packageDir)}`, + }); + } catch (error) { + return notOk( + errorUnexpected(error instanceof Error ? error.message : String(error), { + why: `Failed to scaffold migration: ${error instanceof Error ? error.message : String(error)}`, + }), + ); + } +} + +export function createMigrationNewCommand(): Command { + const command = new Command('new'); + setCommandDescriptions( + command, + 'Scaffold a new migration for manual authoring', + 'Creates a migration package with a migration.ts file for manual authoring.\n' + + 'Write operation descriptors and data transforms in migration.ts, then run\n' + + '`migration verify` to resolve and attest the package.', + ); + setCommandExamples(command, [ + 'prisma-next migration new --name split-name', + 'prisma-next migration new --name custom-fk --from sha256:abc...', + ]); + addGlobalOptions(command) + .option('--name ', 'Migration name (used in directory name)') + .option('--from ', 'Starting contract hash (default: latest migration target)') + .option('--config ', 'Path to prisma-next.config.ts') + .action(async (options: MigrationNewOptions) => { + const flags = parseGlobalFlags(options); + const ui = new TerminalUI({ color: flags.color, interactive: flags.interactive }); + + if (!flags.json && !flags.quiet) { + const header = formatStyledHeader({ + command: 'migration new', + description: 'Scaffold a new migration', + details: [], + flags, + }); + ui.stderr(header); + } + + const result = await executeMigrationNewCommand(options); + + const exitCode = handleResult(result, flags, ui, (value) => { + if (flags.json) { + ui.output(JSON.stringify(value, null, 2)); + } else if (!flags.quiet) { + ui.output(`\nScaffolded migration at ${value.dir}`); + ui.output(` from: ${value.from}`); + ui.output(` to: ${value.to}`); + ui.output( + `\nEdit migration.ts, then run \`prisma-next migration verify --dir ${value.dir}\` to attest.`, + ); + } + }); + + process.exit(exitCode); + }); + + return command; +} diff --git a/packages/1-framework/3-tooling/cli/src/commands/migration-plan.ts b/packages/1-framework/3-tooling/cli/src/commands/migration-plan.ts index e2823b794e..93b48c769e 100644 --- a/packages/1-framework/3-tooling/cli/src/commands/migration-plan.ts +++ b/packages/1-framework/3-tooling/cli/src/commands/migration-plan.ts @@ -1,16 +1,24 @@ import { readFile } from 'node:fs/promises'; import type { Contract } from '@prisma-next/contract/types'; -import { createControlStack } from '@prisma-next/framework-components/control'; +import type { OperationDescriptor } from '@prisma-next/framework-components/control'; import { attestMigration } from '@prisma-next/migration-tools/attestation'; import { EMPTY_CONTRACT_HASH } from '@prisma-next/migration-tools/constants'; import { findLatestMigration } from '@prisma-next/migration-tools/dag'; -import { formatMigrationDirName, writeMigrationPackage } from '@prisma-next/migration-tools/io'; +import { + formatMigrationDirName, + writeMigrationOps, + writeMigrationPackage, +} from '@prisma-next/migration-tools/io'; +import { + evaluateMigrationTs, + scaffoldMigrationTs, +} from '@prisma-next/migration-tools/migration-ts'; import { type MigrationManifest, MigrationToolsError } from '@prisma-next/migration-tools/types'; import { notOk, ok, type Result } from '@prisma-next/utils/result'; import { Command } from 'commander'; import { join, relative } from 'pathe'; import { loadConfig } from '../config-loader'; -import { extractOperationStatements } from '../control-api/operations/extract-operation-statements'; +import { extractSqlDdl } from '../control-api/operations/extract-sql-ddl'; import { type CliErrorConflict, type CliStructuredError, @@ -24,7 +32,7 @@ import { import { addGlobalOptions, getTargetMigrations, - loadMigrationBundles, + loadAllBundles, resolveContractPath, resolveMigrationPaths, setCommandDescriptions, @@ -55,7 +63,7 @@ export interface MigrationPlanResult { readonly label: string; readonly operationClass: string; }[]; - readonly sql?: readonly string[] | undefined; + readonly sql?: readonly string[]; readonly summary: string; readonly timings: { readonly total: number; @@ -159,7 +167,18 @@ async function executeMigrationPlanCommand( let fromHash: string = EMPTY_CONTRACT_HASH; try { - const { bundles, graph } = await loadMigrationBundles(migrationsDir); + const { attested: bundles, drafts, graph } = await loadAllBundles(migrationsDir); + + // Check if a draft migration already targets this contract + const existingDraft = drafts.find((d) => d.manifest.to === toStorageHash); + if (existingDraft) { + return notOk( + errorRuntime('A draft migration to this contract already exists', { + why: `Draft migration at "${existingDraft.dirName}" already targets ${toStorageHash}`, + fix: `Run 'prisma-next migration verify --dir ${migrationsRelative}/${existingDraft.dirName}' to attest it, or delete it and re-plan.`, + }), + ); + } if (options.from) { const resolved = resolveBundleByPrefix(bundles, options.from); @@ -219,98 +238,142 @@ async function executeMigrationPlanCommand( }), ); } - const stack = createControlStack({ - family: config.family, - target: config.target, - adapter: config.adapter, - extensionPacks: config.extensionPacks ?? [], - }); - const familyInstance = config.family.create(stack); const frameworkComponents = assertFrameworkComponentsCompatible( config.family.familyId, config.target.targetId, [config.target, config.adapter, ...(config.extensionPacks ?? [])], ); - const planner = migrations.createPlanner(familyInstance); - const fromSchemaIR = migrations.contractToSchema(fromContract, frameworkComponents); - const plannerResult = planner.plan({ - contract: toContractJson, - schema: fromSchemaIR, - policy: { allowedOperationClasses: ['additive', 'widening', 'destructive'] }, - frameworkComponents, - }); - if (plannerResult.kind === 'failure') { - return notOk( - errorMigrationPlanningFailed({ - conflicts: plannerResult.conflicts as readonly CliErrorConflict[], - }), - ); - } + // Use descriptor-based planner if available, fall back to old planner + if (migrations.planWithDescriptors) { + const descriptorResult = migrations.planWithDescriptors({ + fromContract, + toContract: toContractJson, + frameworkComponents, + }); - if (plannerResult.plan.operations.length === 0) { - return notOk( - errorMigrationPlanningFailed({ - conflicts: [ - { - kind: 'unsupportedChange', - summary: - 'Contract changed but planner produced no operations. ' + - 'This indicates unsupported or ignored changes (e.g. removals, type changes, or a planner/contract mismatch).', - }, - ], - }), - ); - } + if (!descriptorResult.ok) { + return notOk( + errorMigrationPlanningFailed({ + conflicts: descriptorResult.conflicts as readonly CliErrorConflict[], + }), + ); + } - // Build manifest and write migration package - const timestamp = new Date(); - const slug = options.name ?? 'migration'; - const dirName = formatMigrationDirName(timestamp, slug); - const packageDir = join(migrationsDir, dirName); - - const manifest: MigrationManifest = { - from: fromHash, - to: toStorageHash, - migrationId: null, - kind: 'regular', - fromContract, - toContract: toContractJson, - hints: { - used: [], - applied: [], - plannerVersion: '1.0.0', - planningStrategy: 'diff', - }, - labels: [], - createdAt: timestamp.toISOString(), - }; + if (descriptorResult.descriptors.length === 0) { + return notOk( + errorMigrationPlanningFailed({ + conflicts: [ + { + kind: 'unsupportedChange', + summary: + 'Contract changed but planner produced no operations. ' + + 'This indicates unsupported or ignored changes.', + }, + ], + }), + ); + } - try { - await writeMigrationPackage(packageDir, manifest, plannerResult.plan.operations); - const migrationId = await attestMigration(packageDir); + // Build manifest and write migration package + const timestamp = new Date(); + const slug = options.name ?? 'migration'; + const dirName = formatMigrationDirName(timestamp, slug); + const packageDir = join(migrationsDir, dirName); - const sql = extractOperationStatements(config.family.familyId, plannerResult.plan.operations); - const result: MigrationPlanResult = { - ok: true, - noOp: false, + const manifest: MigrationManifest = { from: fromHash, to: toStorageHash, - migrationId, - dir: relative(process.cwd(), packageDir), - operations: plannerResult.plan.operations.map((op) => ({ - id: op.id, - label: op.label, - operationClass: op.operationClass, - })), - sql, - summary: `Planned ${plannerResult.plan.operations.length} operation(s)`, - timings: { total: Date.now() - startTime }, + migrationId: null, + kind: 'regular', + fromContract, + toContract: toContractJson, + hints: { + used: [], + applied: [], + plannerVersion: '2.0.0', + planningStrategy: 'descriptors', + }, + labels: [], + createdAt: timestamp.toISOString(), }; - return ok(result); - } catch (error) { - return notOk(mapMigrationToolsError(error)); + + try { + // Always write migration.ts with the descriptors + // Write package with empty ops first (draft) + await writeMigrationPackage(packageDir, manifest, []); + await scaffoldMigrationTs(packageDir, { + descriptors: descriptorResult.descriptors, + contractJsonPath: contractPathAbsolute, + }); + + if (descriptorResult.needsDataMigration) { + // Draft — user must fill in dataTransform and run verify + const result: MigrationPlanResult = { + ok: true, + noOp: false, + from: fromHash, + to: toStorageHash, + dir: relative(process.cwd(), packageDir), + operations: descriptorResult.descriptors.map((d) => ({ + id: (d as { kind: string }).kind, + label: (d as { kind: string }).kind, + operationClass: 'data' as const, + })), + sql: [], + summary: `Planned ${descriptorResult.descriptors.length} operation(s) — data migration required. Edit migration.ts and run \`migration verify --dir ${relative(process.cwd(), packageDir)}\` to attest.`, + timings: { total: Date.now() - startTime }, + }; + return ok(result); + } + + // No data migration — evaluate, resolve, write ops, attest + const evaluatedDescriptors = await evaluateMigrationTs(packageDir); + + if (!migrations.resolveDescriptors) { + throw new Error('Target does not support resolveDescriptors'); + } + + const resolvedOps = migrations.resolveDescriptors( + evaluatedDescriptors as OperationDescriptor[], + { + fromContract, + toContract: toContractJson, + frameworkComponents, + }, + ); + + await writeMigrationOps(packageDir, resolvedOps); + const migrationId = await attestMigration(packageDir); + + const sql = extractSqlDdl(resolvedOps); + const result: MigrationPlanResult = { + ok: true, + noOp: false, + from: fromHash, + to: toStorageHash, + migrationId, + dir: relative(process.cwd(), packageDir), + operations: resolvedOps.map((op) => ({ + id: op.id, + label: op.label, + operationClass: op.operationClass, + })), + sql, + summary: `Planned ${resolvedOps.length} operation(s)`, + timings: { total: Date.now() - startTime }, + }; + return ok(result); + } catch (error) { + return notOk(mapMigrationToolsError(error)); + } } + + return notOk( + errorTargetMigrationNotSupported({ + why: `Target "${config.target.id}" does not support planWithDescriptors`, + }), + ); } export function createMigrationPlanCommand(): Command { diff --git a/packages/1-framework/3-tooling/cli/src/commands/migration-status.ts b/packages/1-framework/3-tooling/cli/src/commands/migration-status.ts index efadab7303..ce62310894 100644 --- a/packages/1-framework/3-tooling/cli/src/commands/migration-status.ts +++ b/packages/1-framework/3-tooling/cli/src/commands/migration-status.ts @@ -5,10 +5,11 @@ import { findPathWithDecision, findReachableLeaves, } from '@prisma-next/migration-tools/dag'; +import type { Refs } from '@prisma-next/migration-tools/refs'; import { readRefs, resolveRef } from '@prisma-next/migration-tools/refs'; import type { AttestedMigrationBundle, - MigrationBundle, + DraftMigrationBundle, MigrationChainEntry, MigrationGraph, } from '@prisma-next/migration-tools/types'; @@ -23,7 +24,7 @@ import { createControlClient } from '../control-api/client'; import { type CliStructuredError, errorRuntime, errorUnexpected } from '../utils/cli-errors'; import { addGlobalOptions, - loadMigrationBundles, + loadAllBundles, maskConnectionUrl, readContractEnvelope, resolveMigrationPaths, @@ -94,7 +95,8 @@ export interface MigrationStatusResult { readonly summary: string; readonly diagnostics: readonly StatusDiagnostic[]; readonly graph?: MigrationGraph; - readonly bundles?: readonly MigrationBundle[]; + readonly bundles?: readonly AttestedMigrationBundle[]; + readonly drafts?: readonly DraftMigrationBundle[]; readonly edgeStatuses?: readonly EdgeStatus[]; readonly activeRefHash?: string; readonly activeRefName?: string; @@ -225,7 +227,7 @@ export function deriveEdgeStatuses( */ function buildMigrationEntries( chain: readonly MigrationChainEntry[], - packages: readonly MigrationBundle[], + packages: readonly AttestedMigrationBundle[], mode: 'online' | 'offline', markerHash: string | undefined, edgeStatuses?: readonly EdgeStatus[], @@ -355,7 +357,7 @@ async function executeMigrationStatusCommand( let activeRefName: string | undefined; let activeRefHash: string | undefined; - let allRefs: Record = {}; + let allRefs: Refs = {}; try { allRefs = await readRefs(refsPath); } catch (error) { @@ -373,23 +375,27 @@ async function executeMigrationStatusCommand( if (options.ref) { activeRefName = options.ref; - try { - activeRefHash = resolveRef(allRefs, activeRefName); - } catch (error) { - if (MigrationToolsError.is(error)) { - return notOk( - errorRuntime(error.message, { - why: error.why, - fix: error.fix, - meta: { code: error.code }, - }), - ); + const refHash = allRefs[activeRefName]; + if (refHash) { + activeRefHash = refHash; + } else { + try { + activeRefHash = resolveRef(allRefs, activeRefName); + } catch (error) { + if (MigrationToolsError.is(error)) { + return notOk( + errorRuntime(error.message, { + why: error.why, + fix: error.fix, + meta: { code: error.code }, + }), + ); + } + throw error; } - throw error; } } - // todo: can't we derive this without modifying the StatusRef obj const statusRefs: StatusRef[] = Object.entries(allRefs).map(([name, hash]) => ({ name, hash, @@ -431,9 +437,10 @@ async function executeMigrationStatusCommand( } let attested: readonly AttestedMigrationBundle[]; + let drafts: readonly DraftMigrationBundle[]; let graph: MigrationGraph; try { - ({ bundles: attested, graph } = await loadMigrationBundles(migrationsDir)); + ({ attested, drafts, graph } = await loadAllBundles(migrationsDir)); } catch (error) { if (MigrationToolsError.is(error)) { return notOk( @@ -447,6 +454,17 @@ async function executeMigrationStatusCommand( ); } + if (drafts.length > 0) { + diagnostics.push({ + code: 'MIGRATION.DRAFTS', + severity: 'warn', + message: `${drafts.length} draft migration(s) found: ${drafts.map((d) => d.dirName).join(', ')}`, + hints: [ + "Run 'prisma-next migration verify --dir ' to attest draft migrations before applying", + ], + }); + } + if (attested.length === 0) { if (contractHash !== EMPTY_CONTRACT_HASH) { diagnostics.push({ @@ -689,6 +707,7 @@ async function executeMigrationStatusCommand( ...ifDefined('pathDecision', pathDecision), graph, bundles: attested, + ...(drafts.length > 0 ? { drafts } : {}), edgeStatuses, ...ifDefined('activeRefHash', activeRefHash), ...ifDefined('activeRefName', activeRefName), @@ -712,7 +731,7 @@ export function createMigrationStatusCommand(): Command { addGlobalOptions(command) .option('--db ', 'Database connection string') .option('--config ', 'Path to prisma-next.config.ts') - .option('--ref ', 'Target ref name from migrations/refs.json') + .option('--ref ', 'Target ref name from migrations/refs/') .option('--graph', 'Show the full migration graph with all branches') .option('--limit ', 'Maximum number of migrations to display (default: 10)') .option('--all', 'Show full history (disables truncation)') @@ -749,17 +768,24 @@ export function createMigrationStatusCommand(): Command { activeRefHash: statusResult.activeRefHash, activeRefName: statusResult.activeRefName, edgeStatuses: statusResult.edgeStatuses, + draftEdges: statusResult.drafts?.map((d) => ({ + from: d.manifest.from, + to: d.manifest.to, + dirName: d.dirName, + })), }); const graphToRender = options.graph || statusResult.diverged ? renderInput.graph : extractRelevantSubgraph(renderInput.graph, renderInput.relevantPaths); + const dagreOptions = + !options.graph && isLinearGraph(graphToRender) ? { ranksep: 1 } : undefined; const renderOptions = { ...renderInput.options, colorize, ...ifDefined('limit', limit), - ...(isLinearGraph(graphToRender) ? { dagreOptions: { ranksep: 1 } } : {}), + ...ifDefined('dagreOptions', dagreOptions), }; const graphOutput = graphRenderer.render(graphToRender, renderOptions); ui.log(graphOutput); diff --git a/packages/1-framework/3-tooling/cli/src/commands/migration-verify.ts b/packages/1-framework/3-tooling/cli/src/commands/migration-verify.ts index 1294ae88fc..b23f29a378 100644 --- a/packages/1-framework/3-tooling/cli/src/commands/migration-verify.ts +++ b/packages/1-framework/3-tooling/cli/src/commands/migration-verify.ts @@ -1,16 +1,22 @@ +import type { OperationDescriptor } from '@prisma-next/framework-components/control'; import { attestMigration, verifyMigration } from '@prisma-next/migration-tools/attestation'; +import { readMigrationPackage, writeMigrationOps } from '@prisma-next/migration-tools/io'; +import { evaluateMigrationTs, hasMigrationTs } from '@prisma-next/migration-tools/migration-ts'; import { MigrationToolsError } from '@prisma-next/migration-tools/types'; import { ifDefined } from '@prisma-next/utils/defined'; import { notOk, ok, type Result } from '@prisma-next/utils/result'; import { Command } from 'commander'; +import { loadConfig } from '../config-loader'; import { type CliStructuredError, errorRuntime, errorUnexpected } from '../utils/cli-errors'; import { addGlobalOptions, + getTargetMigrations, setCommandDescriptions, setCommandExamples, } from '../utils/command-helpers'; import { formatMigrationVerifyCommandOutput } from '../utils/formatters/migrations'; import { formatStyledHeader } from '../utils/formatters/styled'; +import { assertFrameworkComponentsCompatible } from '../utils/framework-components'; import type { CommonCommandOptions } from '../utils/global-flags'; import { type GlobalFlags, parseGlobalFlags } from '../utils/global-flags'; import { handleResult } from '../utils/result-handler'; @@ -18,6 +24,7 @@ import { TerminalUI } from '../utils/terminal-ui'; interface MigrationVerifyOptions extends CommonCommandOptions { readonly dir: string; + readonly config?: string; } export interface MigrationVerifyResult { @@ -48,6 +55,37 @@ async function executeMigrationVerifyCommand( } try { + // If migration.ts exists, always evaluate and resolve to ops.json. + // This ensures ops.json is always fresh relative to migration.ts. + if (await hasMigrationTs(dir)) { + const pkg = await readMigrationPackage(dir); + const descriptors = await evaluateMigrationTs(dir); + + const config = await loadConfig(options.config); + const migrations = getTargetMigrations(config.target); + if (!migrations?.resolveDescriptors) { + throw new Error( + 'Target does not support resolveDescriptors. ' + + 'Cannot verify a migration package with migration.ts.', + ); + } + + const frameworkComponents = assertFrameworkComponentsCompatible( + config.family.familyId, + config.target.targetId, + [config.target, config.adapter, ...(config.extensionPacks ?? [])], + ); + + const resolvedOps = migrations.resolveDescriptors(descriptors as OperationDescriptor[], { + fromContract: pkg.manifest.fromContract, + toContract: pkg.manifest.toContract, + frameworkComponents, + }); + + await writeMigrationOps(dir, resolvedOps); + } + + // Now verify/attest with the (potentially updated) ops.json const result = await verifyMigration(dir); if (result.ok) { @@ -73,16 +111,16 @@ async function executeMigrationVerifyCommand( }); } - return notOk( - errorRuntime('migrationId mismatch — migration has been modified', { - why: `stored=${result.storedMigrationId}, computed=${result.computedMigrationId}`, - fix: 'If the change was intentional, set "migrationId" to null in migration.json and rerun `migration verify` to re-attest. Otherwise, restore the original migration.', - meta: { - storedMigrationId: result.storedMigrationId, - computedMigrationId: result.computedMigrationId, - }, - }), - ); + // Mismatch — ops.json was just rewritten by migration.ts evaluation above, + // so this means the stored migrationId is stale. Re-attest. + const migrationId = await attestMigration(dir); + return ok({ + ok: true, + status: 'attested', + dir, + migrationId, + summary: `Migration re-attested with migrationId: ${migrationId}`, + }); } catch (error) { if (MigrationToolsError.is(error)) { return notOk( @@ -108,11 +146,12 @@ export function createMigrationVerifyCommand(): Command { 'Verify a migration package migrationId', 'Recomputes the content-addressed migrationId for a migration package and compares\n' + 'it against the stored value. Draft migrations (migrationId: null) are automatically\n' + - 'attested.', + 'attested. If migration.ts exists, it is always re-evaluated and ops.json is refreshed.', ); setCommandExamples(command, ['prisma-next migration verify --dir migrations/20250101-add-users']); addGlobalOptions(command) .requiredOption('--dir ', 'Path to the migration package directory') + .option('--config ', 'Path to prisma-next.config.ts (required when migration.ts exists)') .action(async (options: MigrationVerifyOptions) => { const flags = parseGlobalFlags(options); const ui = new TerminalUI({ color: flags.color, interactive: flags.interactive }); diff --git a/packages/1-framework/3-tooling/cli/src/control-api/operations/migration-apply.ts b/packages/1-framework/3-tooling/cli/src/control-api/operations/migration-apply.ts index f651467fee..01c22cf345 100644 --- a/packages/1-framework/3-tooling/cli/src/control-api/operations/migration-apply.ts +++ b/packages/1-framework/3-tooling/cli/src/control-api/operations/migration-apply.ts @@ -117,7 +117,7 @@ export async function executeMigrationApply { + const all = await readMigrationsDir(migrationsDir); + const attested = all.filter(isAttested); + const drafts = all.filter(isDraft); + const graph = reconstructGraph(attested); + return { attested, drafts, graph }; +} + /** * The subset of the emitted contract.json that the framework layer can * safely type. The emitter adds these fields on top of the family-specific diff --git a/packages/1-framework/3-tooling/cli/src/utils/formatters/graph-migration-mapper.ts b/packages/1-framework/3-tooling/cli/src/utils/formatters/graph-migration-mapper.ts index ce6663777c..9a4625b634 100644 --- a/packages/1-framework/3-tooling/cli/src/utils/formatters/graph-migration-mapper.ts +++ b/packages/1-framework/3-tooling/cli/src/utils/formatters/graph-migration-mapper.ts @@ -39,6 +39,12 @@ export interface EdgeStatus { readonly status: EdgeStatusKind; } +export interface DraftEdge { + readonly from: string; + readonly to: string; + readonly dirName: string; +} + export interface MigrationGraphInput { readonly graph: MigrationGraph; readonly mode: 'online' | 'offline'; @@ -52,6 +58,8 @@ export interface MigrationGraphInput { * icons (✓/⧗) are baked into edge labels. Undefined in offline mode. */ readonly edgeStatuses?: readonly EdgeStatus[] | undefined; + /** Draft migrations to render as dashed edges. */ + readonly draftEdges?: readonly DraftEdge[] | undefined; } export interface MigrationRenderInput { @@ -99,22 +107,9 @@ export function migrationGraphToRenderInput(input: MigrationGraphInput): Migrati }); } - // Detached contract node (not in graph) - if (contractHash !== EMPTY_CONTRACT_HASH && !graph.nodes.has(contractHash)) { - const detachedMarkers: NodeMarker[] = []; - if (mode === 'online' && markerHash === contractHash) { - detachedMarkers.push({ kind: 'db' }); - } - detachedMarkers.push({ kind: 'contract', planned: false }); - nodeList.push({ - id: shortHash(contractHash), - markers: detachedMarkers, - style: 'detached', - }); - } - // Build edges const edgeList: GraphEdge[] = []; + for (const [, entries] of graph.forwardChain) { for (const entry of entries) { const status = statusByDirName.get(entry.dirName); @@ -209,6 +204,30 @@ export function migrationGraphToRenderInput(input: MigrationGraphInput): Migrati spineTargetHash = lastEdge?.to ?? EMPTY_CONTRACT_HASH; } + // Contract not in attested graph — connect from spine target with a dashed edge + if (contractHash !== EMPTY_CONTRACT_HASH && !graph.nodes.has(contractHash)) { + const contractMarkers: NodeMarker[] = []; + if (mode === 'online' && markerHash === contractHash) { + contractMarkers.push({ kind: 'db' }); + } + contractMarkers.push({ kind: 'contract', planned: false }); + nodeList.push({ + id: shortHash(contractHash), + markers: contractMarkers, + }); + + const matchingDraft = input.draftEdges?.find((d) => d.to === contractHash); + const fromHash = matchingDraft?.from ?? spineTargetHash; + if (graph.nodes.has(fromHash) || fromHash === spineTargetHash) { + edgeList.push({ + from: toShortId(fromHash), + to: shortHash(contractHash), + ...ifDefined('label', matchingDraft ? `${matchingDraft.dirName} [draft]` : undefined), + style: 'dashed', + }); + } + } + return { graph: new RenderGraph(nodeList, edgeList), options: { diff --git a/packages/1-framework/3-tooling/cli/src/utils/formatters/graph-render.ts b/packages/1-framework/3-tooling/cli/src/utils/formatters/graph-render.ts index bd29e30985..9733e7df83 100644 --- a/packages/1-framework/3-tooling/cli/src/utils/formatters/graph-render.ts +++ b/packages/1-framework/3-tooling/cli/src/utils/formatters/graph-render.ts @@ -155,6 +155,7 @@ const DIR = { down: 2, left: 4, right: 8, + dashed: 16, } as const; /** Arrow characters for edge termination (one cell before the target node). */ @@ -178,6 +179,13 @@ const BOX_CHAR: Record = { [DIR.left | DIR.right | DIR.down]: '┬', [DIR.left | DIR.right | DIR.up]: '┴', [DIR.up | DIR.down | DIR.left | DIR.right]: '┼', + // Dashed variants — straight segments only, corners fall back to solid + [DIR.up | DIR.dashed]: '┊', + [DIR.down | DIR.dashed]: '┊', + [DIR.up | DIR.down | DIR.dashed]: '┊', + [DIR.left | DIR.dashed]: '┈', + [DIR.right | DIR.dashed]: '┈', + [DIR.left | DIR.right | DIR.dashed]: '┈', }; // --------------------------------------------------------------------------- @@ -303,26 +311,41 @@ class CharGrid { } /** Stamp a horizontal edge segment from x1 to x2 at row y. */ - markHorizontal(y: number, x1: number, x2: number, color?: ColorFn, priority?: number): void { + markHorizontal( + y: number, + x1: number, + x2: number, + color?: ColorFn, + priority?: number, + extraBits = 0, + ): void { const lo = Math.min(x1, x2); const hi = Math.max(x1, x2); /* v8 ignore next -- @preserve */ if (lo === hi) return; - this.addConnection(lo, y, DIR.right, color, priority); + this.addConnection(lo, y, DIR.right | extraBits, color, priority); for (let x = lo + 1; x < hi; x++) - this.addConnection(x, y, DIR.left | DIR.right, color, priority); - this.addConnection(hi, y, DIR.left, color, priority); + this.addConnection(x, y, DIR.left | DIR.right | extraBits, color, priority); + this.addConnection(hi, y, DIR.left | extraBits, color, priority); } /** Stamp a vertical edge segment from y1 to y2 at column x. */ - markVertical(x: number, y1: number, y2: number, color?: ColorFn, priority?: number): void { + markVertical( + x: number, + y1: number, + y2: number, + color?: ColorFn, + priority?: number, + extraBits = 0, + ): void { const lo = Math.min(y1, y2); const hi = Math.max(y1, y2); /* v8 ignore next -- @preserve */ if (lo === hi) return; - this.addConnection(x, lo, DIR.down, color, priority); - for (let y = lo + 1; y < hi; y++) this.addConnection(x, y, DIR.up | DIR.down, color, priority); - this.addConnection(x, hi, DIR.up, color, priority); + this.addConnection(x, lo, DIR.down | extraBits, color, priority); + for (let y = lo + 1; y < hi; y++) + this.addConnection(x, y, DIR.up | DIR.down | extraBits, color, priority); + this.addConnection(x, hi, DIR.up | extraBits, color, priority); } /** Place literal text at (x, y). Each character occupies one cell. Text stamps override connections. */ @@ -394,7 +417,8 @@ class CharGrid { color = label.color; } else { const conn = this.connections.get(k) ?? 0; - ch = BOX_CHAR[conn] ?? ' '; + // Dashed corners don't exist — strip the bit and fall back to solid + ch = BOX_CHAR[conn] ?? BOX_CHAR[conn & ~DIR.dashed] ?? ' '; color = conn === 0 ? undefined : this.cellColors.get(k)?.color; } @@ -840,8 +864,17 @@ function selectBestVariant( */ export function extractSubgraph(graph: RenderGraph, path: readonly string[]): RenderGraph { const pathIndex = new Map(path.map((id, i) => [id, i])); - const filteredNodes = graph.nodes.filter((n) => pathIndex.has(n.id) || n.style === 'detached'); + const nodeSet = new Set(path); + // Always keep dashed edges and their endpoints + for (const e of graph.edges) { + if (e.style === 'dashed') { + nodeSet.add(e.from); + nodeSet.add(e.to); + } + } + const filteredNodes = graph.nodes.filter((n) => nodeSet.has(n.id)); const filteredEdges = graph.edges.filter((e) => { + if (e.style === 'dashed') return true; const fromIdx = pathIndex.get(e.from); const toIdx = pathIndex.get(e.to); return fromIdx !== undefined && toIdx !== undefined && fromIdx < toIdx; @@ -877,8 +910,17 @@ export function extractRelevantSubgraph( } } - const filteredNodes = graph.nodes.filter((n) => nodeSet.has(n.id) || n.style === 'detached'); - const filteredEdges = graph.edges.filter((e) => edgePairs.has(`${e.from}\0${e.to}`)); + // Always keep dashed (draft) edges and their endpoints + const dashedEdges = graph.edges.filter((e) => e.style === 'dashed'); + for (const e of dashedEdges) { + nodeSet.add(e.from); + nodeSet.add(e.to); + } + + const filteredNodes = graph.nodes.filter((n) => nodeSet.has(n.id)); + const filteredEdges = graph.edges.filter( + (e) => edgePairs.has(`${e.from}\0${e.to}`) || e.style === 'dashed', + ); return new RenderGraph(filteredNodes, filteredEdges); } @@ -950,11 +992,6 @@ export function truncateGraph( } } - // Also include detached nodes (they're appended at the bottom, not in the graph) - for (const n of graph.nodes) { - if (n.style === 'detached') reachable.add(n.id); - } - const truncatedNodes = graph.nodes.filter((n) => reachable.has(n.id)); const truncatedEdges = graph.edges.filter((e) => reachable.has(e.from) && reachable.has(e.to)); const elidedCount = spine.length - 1 - effectiveEdges; @@ -995,7 +1032,7 @@ function layoutAndRender(graph: RenderGraph, options: GraphRenderOptions, elided const colorize = options.colorize ?? true; const colors = buildColors(colorize); - const layoutNodes = graph.nodes.filter((n) => n.style !== 'detached'); + const layoutNodes = graph.nodes; const layoutNodeIds = new Set(layoutNodes.map((n) => n.id)); const requestedRoot = options.rootId ?? layoutNodes[0]?.id ?? '∅'; const rootId = layoutNodeIds.has(requestedRoot) @@ -1018,13 +1055,6 @@ function layoutAndRender(graph: RenderGraph, options: GraphRenderOptions, elided const edgeNames: string[] = []; for (let i = 0; i < graph.edges.length; i++) { const edge = graph.edges[i]!; - const fromDetached = graph.nodeById.get(edge.from)?.style === 'detached'; - const toDetached = graph.nodeById.get(edge.to)?.style === 'detached'; - /* v8 ignore next 3 -- @preserve */ - if (fromDetached || toDetached) { - edgeNames.push(''); - continue; - } const name = `e${i}`; edgeNames.push(name); g.setEdge(edge.from, edge.to, { label: edge.label ?? '' }, name); @@ -1095,11 +1125,15 @@ function layoutAndRender(graph: RenderGraph, options: GraphRenderOptions, elided const { poly } = selectBestVariant(src, dagrePoints, tgt, edge.label, grid); + const dashedBit = edge.style === 'dashed' ? DIR.dashed : 0; for (let j = 0; j < poly.length - 1; j++) { const a = poly[j]!; const b = poly[j + 1]!; - if (a.y === b.y) grid.markHorizontal(a.y, a.x, b.x, edgeColor, priority); - else if (a.x === b.x) grid.markVertical(a.x, a.y, b.y, edgeColor, priority); + if (a.y === b.y) { + grid.markHorizontal(a.y, a.x, b.x, edgeColor, priority, dashedBit); + } else if (a.x === b.x) { + grid.markVertical(a.x, a.y, b.y, edgeColor, priority, dashedBit); + } } drawnEdges.push({ edge, poly, role: entry.role, srcY: src.y }); @@ -1212,42 +1246,6 @@ function layoutAndRender(graph: RenderGraph, options: GraphRenderOptions, elided } } - // --- Detached nodes --- - const detachedNodes = graph.nodes.filter((n) => n.style === 'detached'); - if (detachedNodes.length > 0) { - // Align detached nodes with the bottom-most node in the graph so the - // dashed connector visually continues from the last rendered node. - let bottomNodeX = nodePos.values().next().value?.x ?? 0; - let bottomNodeY = -1; - for (const [, pos] of nodePos) { - if (pos.y > bottomNodeY) { - bottomNodeY = pos.y; - bottomNodeX = pos.x; - } - } - const spineX = bottomNodeX; - let bottomY = grid.getMaxY() + 1; - - for (const node of detachedNodes) { - grid.stampText(spineX, bottomY, '┊', colors.branch); - bottomY++; - grid.stampText(spineX, bottomY, '◇', colors.branch); - grid.stampText(spineX + 2, bottomY, node.id, dim); - - const tags = buildInlineTags(node.markers ?? [], colors); - if (tags.length > 0) { - let bx = spineX + 2 + node.id.length; - for (const tag of tags) { - grid.stampText(bx, bottomY, ' '); - bx++; - grid.stampText(bx, bottomY, tag.text, tag.color); - bx += tag.text.length; - } - } - bottomY++; - } - } - return grid.render(); } @@ -1315,11 +1313,11 @@ export const graphRenderer: GraphRenderer = { render, }; -/** True if the graph is a single linear chain (no branching), ignoring detached nodes. */ +/** True if the graph is a single linear chain (no branching), ignoring dashed edges. */ export function isLinearGraph(graph: RenderGraph): boolean { for (const node of graph.nodes) { - if (node.style === 'detached') continue; - if (graph.outgoing(node.id).length > 1) return false; + const solidOutgoing = graph.outgoing(node.id).filter((e) => e.style !== 'dashed'); + if (solidOutgoing.length > 1) return false; } return true; } diff --git a/packages/1-framework/3-tooling/cli/src/utils/formatters/graph-types.ts b/packages/1-framework/3-tooling/cli/src/utils/formatters/graph-types.ts index 20f118c586..5bf5ae83d4 100644 --- a/packages/1-framework/3-tooling/cli/src/utils/formatters/graph-types.ts +++ b/packages/1-framework/3-tooling/cli/src/utils/formatters/graph-types.ts @@ -33,6 +33,12 @@ export interface GraphEdge { * - `'unreachable'` — magenta (CVD-safe: DB on a different branch) */ readonly colorHint?: 'applied' | 'pending' | 'unreachable'; + /** + * Edge rendering style. + * - `'solid'` (default) — normal edge with `│` connector + * - `'dashed'` — draft/phantom edge with `┊` connector, excluded from path resolution + */ + readonly style?: 'solid' | 'dashed'; } /** diff --git a/packages/1-framework/3-tooling/cli/test/utils/formatters/__snapshots__/graph-render.test.ts.snap b/packages/1-framework/3-tooling/cli/test/utils/formatters/__snapshots__/graph-render.test.ts.snap index 26e362d378..f0fe204ea0 100644 --- a/packages/1-framework/3-tooling/cli/test/utils/formatters/__snapshots__/graph-render.test.ts.snap +++ b/packages/1-framework/3-tooling/cli/test/utils/formatters/__snapshots__/graph-render.test.ts.snap @@ -136,7 +136,10 @@ exports[`Graph renderer — render full graph > renders Detached contract node 1 ▾ ○ B prod ◆ db ┊ -◇ planned ◆ contract" +┊ +┊ +▾ +○ planned ◇ contract" `; exports[`Graph renderer — render full graph > renders Diamond (convergence) 1`] = ` @@ -418,7 +421,10 @@ exports[`Graph renderer — render full graph > renders Long spine with varied b ▾ ○ S14 prod ┊ - ◇ planned ◆ contract" + ┊ + ┊ + ▾ + ○ planned ◇ contract" `; exports[`Graph renderer — render full graph > renders Multi-branch (3 branches) 1`] = ` @@ -497,7 +503,10 @@ exports[`Graph renderer — render full graph > renders Multi-team workflow (7 r └─────────▾─────────┘ ▾ ○ qa00001 qa ○ re1ea5e prod ◆ db ┊ - ◇ c0n7rac ◆ contract" + ┊ + ┊ + ▾ + ○ c0n7rac ◇ contract" `; exports[`Graph renderer — render full graph > renders Multiple rollbacks + branch 1`] = ` @@ -609,7 +618,10 @@ exports[`Graph renderer — render full graph > renders Realistic team workflow ▾ ▾ ○ t9u0v1w staging ○ p6q7r8s prod ◆ db ┊ - ◇ x2y3z4a ◇ contract" + ┊ + ┊ + ▾ + ○ x2y3z4a ◇ contract" `; exports[`Graph renderer — render full graph > renders Refs + DB combined 1`] = ` @@ -903,7 +915,10 @@ exports[`render with truncation > renders truncated graph with detached contract ▾ ○ S14 prod ┊ - ◇ planned ◆ contract" + ┊ + ┊ + ▾ + ○ planned ◇ contract" `; exports[`render with truncation > renders truncated graph with ⋮ indicator 1`] = ` diff --git a/packages/1-framework/3-tooling/cli/test/utils/formatters/graph-migration-mapper.test.ts b/packages/1-framework/3-tooling/cli/test/utils/formatters/graph-migration-mapper.test.ts index d125eb3f13..a655f0156b 100644 --- a/packages/1-framework/3-tooling/cli/test/utils/formatters/graph-migration-mapper.test.ts +++ b/packages/1-framework/3-tooling/cli/test/utils/formatters/graph-migration-mapper.test.ts @@ -180,8 +180,8 @@ describe('migrationGraphToRenderInput', () => { } }); - // Contract hash not in graph — a detached node is created with - // planned:false (contract exists but no migration targets it yet). + // Contract hash not in graph — a dashed edge connects the spine target + // to the contract node (planned:false). it('detached contract node — contract not in graph', () => { const graph = buildGraph([entry(ROOT, 'A', 'm1')]); const result = migrationGraphToRenderInput( @@ -193,13 +193,16 @@ describe('migrationGraphToRenderInput', () => { }), ); - const detached = result.graph.nodes.find((n) => n.style === 'detached'); - expect(detached).toBeDefined(); - expect(detached!.markers).toContainEqual({ kind: 'contract', planned: false }); + const contractNode = result.graph.nodes.find((n) => n.id === sid('DETACHED_HASH')); + expect(contractNode).toBeDefined(); + expect(contractNode!.markers).toContainEqual({ kind: 'contract', planned: false }); + const dashedEdge = result.graph.edges.find((e) => e.style === 'dashed'); + expect(dashedEdge).toBeDefined(); + expect(dashedEdge!.to).toBe(sid('DETACHED_HASH')); }); - // marker === contract, both off-graph. The single detached node should - // carry both db and contract markers. + // marker === contract, both off-graph. The contract node carries + // both db and contract markers, connected by a dashed edge. it('detached contract matching db marker — both markers on one node', () => { const graph = buildGraph([entry(ROOT, 'A', 'm1')]); const result = migrationGraphToRenderInput( @@ -211,10 +214,12 @@ describe('migrationGraphToRenderInput', () => { }), ); - const detached = result.graph.nodes.find((n) => n.style === 'detached'); - expect(detached).toBeDefined(); - expect(detached!.markers).toContainEqual({ kind: 'db' }); - expect(detached!.markers).toContainEqual({ kind: 'contract', planned: false }); + const contractNode = result.graph.nodes.find((n) => n.id === sid('OFF_GRAPH')); + expect(contractNode).toBeDefined(); + expect(contractNode!.markers).toContainEqual({ kind: 'db' }); + expect(contractNode!.markers).toContainEqual({ kind: 'contract', planned: false }); + const dashedEdge = result.graph.edges.find((e) => e.style === 'dashed'); + expect(dashedEdge).toBeDefined(); }); it('sha256:-prefixed hashes are shortened correctly', () => { diff --git a/packages/1-framework/3-tooling/cli/test/utils/formatters/graph-render.test.ts b/packages/1-framework/3-tooling/cli/test/utils/formatters/graph-render.test.ts index d1a3303479..3991e3dd40 100644 --- a/packages/1-framework/3-tooling/cli/test/utils/formatters/graph-render.test.ts +++ b/packages/1-framework/3-tooling/cli/test/utils/formatters/graph-render.test.ts @@ -133,14 +133,18 @@ describe('extractRelevantSubgraph', () => { expect(multi.edges).toHaveLength(single.edges.length); }); - it('preserves detached nodes', () => { + it('preserves dashed edges and their targets', () => { const path = ['∅', 'A']; const graph = new RenderGraph( - [{ id: '∅' }, { id: 'A' }, { id: 'detached', style: 'detached' }], - [{ from: '∅', to: 'A' }], + [{ id: '∅' }, { id: 'A' }, { id: 'contract' }], + [ + { from: '∅', to: 'A' }, + { from: 'A', to: 'contract', style: 'dashed' }, + ], ); const sub = extractRelevantSubgraph(graph, [path]); - expect(sub.nodes.some((n) => n.id === 'detached')).toBe(true); + expect(sub.nodes.some((n) => n.id === 'contract')).toBe(true); + expect(sub.edges.some((e) => e.style === 'dashed')).toBe(true); }); it('returns empty for no paths', () => { @@ -222,10 +226,10 @@ describe('truncateGraph', () => { expect(result.graph.nodes.some((n) => n.id === 'B4a')).toBe(false); }); - it('preserves detached nodes even when truncated', () => { + it('preserves dashed edges and their targets when truncated', () => { const result = truncateGraph(longSpineWithBranches.graph, longSpine, 3); - expect(result.graph.nodes.some((n) => n.style === 'detached')).toBe(true); expect(result.graph.nodes.some((n) => n.id === 'planned')).toBe(true); + expect(result.graph.edges.some((e) => e.style === 'dashed')).toBe(true); }); it('handles single-node spine', () => { @@ -264,7 +268,7 @@ describe('render with truncation', { timeout: GRAPH_TIMEOUT }, () => { }); expect(output).toContain('earlier migrations'); expect(output).toContain('planned'); - expect(output).toContain('◆ contract'); + expect(output).toContain('◇ contract'); expect(output).toMatchSnapshot(); }); @@ -404,12 +408,12 @@ describe('Graph renderer — colorize and markers', { timeout: GRAPH_TIMEOUT }, expect(output).toContain('B'); }); - it('renders detached node without markers', () => { + it('renders dashed edge to node without markers', () => { const graph = new RenderGraph( - [{ id: '∅' }, { id: 'A' }, { id: 'orphan', style: 'detached' }], + [{ id: '∅' }, { id: 'A' }, { id: 'orphan' }], [ { from: '∅', to: 'A' }, - { from: 'A', to: 'orphan', label: 'to-detached' }, + { from: 'A', to: 'orphan', label: 'to-detached', style: 'dashed' }, ], ); const output = graphRenderer.render(graph, { @@ -418,7 +422,7 @@ describe('Graph renderer — colorize and markers', { timeout: GRAPH_TIMEOUT }, colorize: false, }); expect(output).toContain('orphan'); - expect(output).toContain('◇'); + expect(output).toContain('┊'); }); it('renders with default colorize and rootId', () => { diff --git a/packages/1-framework/3-tooling/cli/test/utils/formatters/test-graphs.ts b/packages/1-framework/3-tooling/cli/test/utils/formatters/test-graphs.ts index 1d87819874..9c812c5cb1 100644 --- a/packages/1-framework/3-tooling/cli/test/utils/formatters/test-graphs.ts +++ b/packages/1-framework/3-tooling/cli/test/utils/formatters/test-graphs.ts @@ -25,6 +25,10 @@ function edge(from: string, to: string, label?: string): GraphEdge { return label !== undefined ? { from, to, label } : { from, to }; } +function dashedEdge(from: string, to: string, label?: string): GraphEdge { + return { from, to, style: 'dashed', ...(label !== undefined ? { label } : {}) }; +} + const db: NodeMarker = { kind: 'db' }; const contract: NodeMarker = { kind: 'contract', planned: true }; const unplanned: NodeMarker = { kind: 'contract', planned: false }; @@ -295,11 +299,11 @@ const complex = testGraph( { spineTarget: 'F', rootId: '∅' }, ); -// 19. Detached contract node +// 19. Detached contract node (connected by dashed edge) const detachedContract = testGraph( 'Detached contract node', - [...nodes('∅', 'A'), node('B', [ref('prod', true), db]), node('planned', [contract], 'detached')], - [edge('∅', 'A', 'init'), edge('A', 'B', 'add_users')], + [...nodes('∅', 'A'), node('B', [ref('prod', true), db]), node('planned', [unplanned])], + [edge('∅', 'A', 'init'), edge('A', 'B', 'add_users'), dashedEdge('B', 'planned')], { spineTarget: 'B', rootId: '∅' }, ); @@ -498,7 +502,7 @@ const teamWorkflow = testGraph( ...nodes('sha256:∅', 'a1b2c3d', 'e4f5g6h', 'f7a8b9c', 'g0h1i2j', 'k3l4m5n'), node('p6q7r8s', [ref('prod', true), db]), node('t9u0v1w', [ref('staging')]), - node('x2y3z4a', [unplanned], 'detached'), + node('x2y3z4a', [unplanned]), ], [ edge('sha256:∅', 'a1b2c3d', 'init'), @@ -509,6 +513,7 @@ const teamWorkflow = testGraph( edge('g0h1i2j', 'k3l4m5n', 'merge_bob'), edge('k3l4m5n', 'p6q7r8s', 'add_tags'), edge('k3l4m5n', 't9u0v1w', 'staging_experiment'), + dashedEdge('p6q7r8s', 'x2y3z4a'), ], { spineTarget: 'p6q7r8s', rootId: 'sha256:∅' }, ); @@ -531,7 +536,7 @@ const multiTeamWorkflow = testGraph( node('dev0002', [ref('dev-bob')]), node('pre0001', [ref('preview-1')]), node('pre0002', [ref('preview-2')]), - node('c0n7rac', [contract], 'detached'), + node('c0n7rac', [unplanned]), ], [ edge('∅', 'abc1234', 'init'), @@ -550,6 +555,7 @@ const multiTeamWorkflow = testGraph( edge('dev0002', 'pre0002', 'preview_bob'), edge('5ta9e01', 'mer9e01', 'rollback_staging'), edge('qa00001', 'f1x0001', 'rollback_qa'), + dashedEdge('re1ea5e', 'c0n7rac'), ], { spineTarget: 're1ea5e', rootId: '∅' }, ); @@ -585,8 +591,7 @@ const longSpineWithBranches = testGraph( ...nodes('B4a', 'B4b', 'B4c'), ...nodes('R1'), node('B5a', [ref('staging')]), - // detached - node('planned', [contract], 'detached'), + node('planned', [unplanned]), ], [ // spine @@ -621,6 +626,8 @@ const longSpineWithBranches = testGraph( edge('R1', 'S09', '20260110_complete_rollback'), // branch: 1-edge from S12 edge('S12', 'B5a', '20260112_staging_deploy'), + // dashed edge to contract + dashedEdge('S14', 'planned'), ], { spineTarget: 'S14', rootId: '∅' }, ); diff --git a/packages/1-framework/3-tooling/cli/tsdown.config.ts b/packages/1-framework/3-tooling/cli/tsdown.config.ts index fe9571f121..c15c4801d6 100644 --- a/packages/1-framework/3-tooling/cli/tsdown.config.ts +++ b/packages/1-framework/3-tooling/cli/tsdown.config.ts @@ -13,6 +13,7 @@ export default defineConfig({ 'src/commands/db-verify.ts', 'src/commands/contract-emit.ts', 'src/commands/migration-apply.ts', + 'src/commands/migration-new.ts', 'src/commands/migration-plan.ts', 'src/commands/migration-ref.ts', 'src/commands/migration-show.ts', diff --git a/packages/1-framework/3-tooling/migration/package.json b/packages/1-framework/3-tooling/migration/package.json index 9751b4f4fb..749776ff3e 100644 --- a/packages/1-framework/3-tooling/migration/package.json +++ b/packages/1-framework/3-tooling/migration/package.json @@ -60,6 +60,10 @@ "types": "./dist/exports/constants.d.mts", "import": "./dist/exports/constants.mjs" }, + "./migration-ts": { + "types": "./dist/exports/migration-ts.d.mts", + "import": "./dist/exports/migration-ts.mjs" + }, "./package.json": "./package.json" }, "repository": { diff --git a/packages/1-framework/3-tooling/migration/src/attestation.ts b/packages/1-framework/3-tooling/migration/src/attestation.ts index a2deec3a8f..9cbc745d28 100644 --- a/packages/1-framework/3-tooling/migration/src/attestation.ts +++ b/packages/1-framework/3-tooling/migration/src/attestation.ts @@ -1,9 +1,7 @@ import { createHash } from 'node:crypto'; -import { writeFile } from 'node:fs/promises'; import { canonicalizeContract } from '@prisma-next/contract/hashing'; -import { join } from 'pathe'; import { canonicalizeJson } from './canonicalize-json'; -import { readMigrationPackage } from './io'; +import { readMigrationPackage, writeMigrationManifest } from './io'; import type { MigrationManifest, MigrationOps } from './types'; export interface VerifyResult { @@ -49,7 +47,7 @@ export async function attestMigration(dir: string): Promise { const migrationId = computeMigrationId(pkg.manifest, pkg.ops); const updated = { ...pkg.manifest, migrationId }; - await writeFile(join(dir, 'migration.json'), JSON.stringify(updated, null, 2)); + await writeMigrationManifest(dir, updated); return migrationId; } diff --git a/packages/1-framework/3-tooling/migration/src/errors.ts b/packages/1-framework/3-tooling/migration/src/errors.ts index 82c8e0b777..6ffc24cbd3 100644 --- a/packages/1-framework/3-tooling/migration/src/errors.ts +++ b/packages/1-framework/3-tooling/migration/src/errors.ts @@ -135,6 +135,14 @@ export function errorInvalidRefs(refsPath: string, reason: string): MigrationToo }); } +export function errorInvalidRefFile(filePath: string, reason: string): MigrationToolsError { + return new MigrationToolsError('MIGRATION.INVALID_REF_FILE', 'Invalid ref file', { + why: `Ref file at "${filePath}" is invalid: ${reason}`, + fix: 'Ensure the ref file contains valid JSON with { "hash": "sha256:<64 hex chars>", "invariants": ["..."] }.', + details: { path: filePath, reason }, + }); +} + export function errorInvalidRefName(refName: string): MigrationToolsError { return new MigrationToolsError('MIGRATION.INVALID_REF_NAME', 'Invalid ref name', { why: `Ref name "${refName}" is invalid. Names must be lowercase alphanumeric with hyphens or forward slashes (no "." or ".." segments).`, diff --git a/packages/1-framework/3-tooling/migration/src/exports/io.ts b/packages/1-framework/3-tooling/migration/src/exports/io.ts index 44ab15c400..c9aa0069f1 100644 --- a/packages/1-framework/3-tooling/migration/src/exports/io.ts +++ b/packages/1-framework/3-tooling/migration/src/exports/io.ts @@ -2,5 +2,7 @@ export { formatMigrationDirName, readMigrationPackage, readMigrationsDir, + writeMigrationManifest, + writeMigrationOps, writeMigrationPackage, } from '../io'; diff --git a/packages/1-framework/3-tooling/migration/src/exports/migration-ts.ts b/packages/1-framework/3-tooling/migration/src/exports/migration-ts.ts new file mode 100644 index 0000000000..4041a0b1d1 --- /dev/null +++ b/packages/1-framework/3-tooling/migration/src/exports/migration-ts.ts @@ -0,0 +1,6 @@ +export type { ScaffoldOptions } from '../migration-ts'; +export { + evaluateMigrationTs, + hasMigrationTs, + scaffoldMigrationTs, +} from '../migration-ts'; diff --git a/packages/1-framework/3-tooling/migration/src/exports/types.ts b/packages/1-framework/3-tooling/migration/src/exports/types.ts index f2af694d8b..0e9d5ba227 100644 --- a/packages/1-framework/3-tooling/migration/src/exports/types.ts +++ b/packages/1-framework/3-tooling/migration/src/exports/types.ts @@ -2,13 +2,15 @@ export { MigrationToolsError } from '../errors'; export type { AttestedMigrationBundle, AttestedMigrationManifest, + BaseMigrationBundle, + BaseMigrationBundle as MigrationBundle, + BaseMigrationBundle as MigrationPackage, + DraftMigrationBundle, DraftMigrationManifest, - MigrationBundle, - MigrationBundle as MigrationPackage, MigrationChainEntry, MigrationGraph, MigrationHints, MigrationManifest, MigrationOps, } from '../types'; -export { isAttested } from '../types'; +export { isAttested, isDraft } from '../types'; diff --git a/packages/1-framework/3-tooling/migration/src/io.ts b/packages/1-framework/3-tooling/migration/src/io.ts index 940234223d..fc594413fd 100644 --- a/packages/1-framework/3-tooling/migration/src/io.ts +++ b/packages/1-framework/3-tooling/migration/src/io.ts @@ -8,7 +8,7 @@ import { errorInvalidSlug, errorMissingFile, } from './errors'; -import type { MigrationBundle, MigrationManifest, MigrationOps } from './types'; +import type { BaseMigrationBundle, MigrationManifest, MigrationOps } from './types'; const MANIFEST_FILE = 'migration.json'; const OPS_FILE = 'ops.json'; @@ -48,7 +48,7 @@ const MigrationManifestSchema = type({ const MigrationOpSchema = type({ id: 'string', label: 'string', - operationClass: "'additive' | 'widening' | 'destructive'", + operationClass: "'additive' | 'widening' | 'destructive' | 'data'", }); // Intentionally shallow: operation-specific payload validation is owned by planner/runner layers. @@ -74,7 +74,18 @@ export async function writeMigrationPackage( await writeFile(join(dir, OPS_FILE), JSON.stringify(ops, null, 2), { flag: 'wx' }); } -export async function readMigrationPackage(dir: string): Promise { +export async function writeMigrationManifest( + dir: string, + manifest: MigrationManifest, +): Promise { + await writeFile(join(dir, MANIFEST_FILE), `${JSON.stringify(manifest, null, 2)}\n`); +} + +export async function writeMigrationOps(dir: string, ops: MigrationOps): Promise { + await writeFile(join(dir, OPS_FILE), `${JSON.stringify(ops, null, 2)}\n`); +} + +export async function readMigrationPackage(dir: string): Promise { const manifestPath = join(dir, MANIFEST_FILE); const opsPath = join(dir, OPS_FILE); @@ -142,7 +153,7 @@ function validateOps(ops: unknown, filePath: string): asserts ops is MigrationOp export async function readMigrationsDir( migrationsRoot: string, -): Promise { +): Promise { let entries: string[]; try { entries = await readdir(migrationsRoot); @@ -153,7 +164,7 @@ export async function readMigrationsDir( throw error; } - const packages: MigrationBundle[] = []; + const packages: BaseMigrationBundle[] = []; for (const entry of entries.sort()) { const entryPath = join(migrationsRoot, entry); diff --git a/packages/1-framework/3-tooling/migration/src/migration-ts.ts b/packages/1-framework/3-tooling/migration/src/migration-ts.ts new file mode 100644 index 0000000000..a12cb31a94 --- /dev/null +++ b/packages/1-framework/3-tooling/migration/src/migration-ts.ts @@ -0,0 +1,196 @@ +/** + * Utilities for scaffolding and evaluating migration.ts files. + * + * - scaffoldMigrationTs: writes a migration.ts file with boilerplate + * - evaluateMigrationTs: loads migration.ts via native Node import, returns descriptors + * + * Shared by migration plan (scaffold), migration new (scaffold), and + * migration verify (evaluate). + */ + +import { stat, writeFile } from 'node:fs/promises'; +import type { OperationDescriptor } from '@prisma-next/framework-components/control'; +import { join, relative, resolve } from 'pathe'; + +const MIGRATION_TS_FILE = 'migration.ts'; + +/** + * Options for scaffolding a migration.ts file. + */ +export interface ScaffoldOptions { + /** Operation descriptors to serialize as builder calls. */ + readonly descriptors?: readonly OperationDescriptor[]; + /** Absolute path to contract.json — used to derive contract.d.ts import for typed builders. */ + readonly contractJsonPath?: string; +} + +function serializeQueryInput(input: unknown): string { + if (typeof input === 'boolean') return String(input); + if (typeof input === 'symbol') return 'TODO /* fill in using db.sql.from(...) */'; + if (input === null || input === undefined) return 'null'; + if (Array.isArray(input)) { + if (input.length === 0) return '[]'; + if (input.every((item) => typeof item === 'symbol')) + return '[TODO /* fill in using db.sql.from(...) */]'; + return `[${input.map(serializeQueryInput).join(', ')}]`; + } + return JSON.stringify(input); +} + +function descriptorToBuilderCall(desc: OperationDescriptor): string { + switch (desc.kind) { + case 'createTable': + return `createTable(${JSON.stringify(desc['table'])})`; + case 'dropTable': + return `dropTable(${JSON.stringify(desc['table'])})`; + case 'addColumn': { + const args = [JSON.stringify(desc['table']), JSON.stringify(desc['column'])]; + if (desc['overrides']) { + args.push(JSON.stringify(desc['overrides'])); + } + return `addColumn(${args.join(', ')})`; + } + case 'dropColumn': + return `dropColumn(${JSON.stringify(desc['table'])}, ${JSON.stringify(desc['column'])})`; + case 'alterColumnType': { + const opts: Record = {}; + if (desc['using']) opts['using'] = desc['using']; + if (desc['toType']) opts['toType'] = desc['toType']; + const hasOpts = Object.keys(opts).length > 0; + return hasOpts + ? `alterColumnType(${JSON.stringify(desc['table'])}, ${JSON.stringify(desc['column'])}, ${JSON.stringify(opts)})` + : `alterColumnType(${JSON.stringify(desc['table'])}, ${JSON.stringify(desc['column'])})`; + } + case 'setNotNull': + return `setNotNull(${JSON.stringify(desc['table'])}, ${JSON.stringify(desc['column'])})`; + case 'dropNotNull': + return `dropNotNull(${JSON.stringify(desc['table'])}, ${JSON.stringify(desc['column'])})`; + case 'setDefault': + return `setDefault(${JSON.stringify(desc['table'])}, ${JSON.stringify(desc['column'])})`; + case 'dropDefault': + return `dropDefault(${JSON.stringify(desc['table'])}, ${JSON.stringify(desc['column'])})`; + case 'addPrimaryKey': + return `addPrimaryKey(${JSON.stringify(desc['table'])})`; + case 'addUnique': + return `addUnique(${JSON.stringify(desc['table'])}, ${JSON.stringify(desc['columns'])})`; + case 'addForeignKey': + return `addForeignKey(${JSON.stringify(desc['table'])}, ${JSON.stringify(desc['columns'])})`; + case 'dropConstraint': + return `dropConstraint(${JSON.stringify(desc['table'])}, ${JSON.stringify(desc['constraintName'])})`; + case 'createIndex': + return `createIndex(${JSON.stringify(desc['table'])}, ${JSON.stringify(desc['columns'])})`; + case 'dropIndex': + return `dropIndex(${JSON.stringify(desc['table'])}, ${JSON.stringify(desc['indexName'])})`; + case 'createEnumType': + return desc['values'] + ? `createEnumType(${JSON.stringify(desc['typeName'])}, ${JSON.stringify(desc['values'])})` + : `createEnumType(${JSON.stringify(desc['typeName'])})`; + case 'addEnumValues': + return `addEnumValues(${JSON.stringify(desc['typeName'])}, ${JSON.stringify(desc['values'])})`; + case 'dropEnumType': + return `dropEnumType(${JSON.stringify(desc['typeName'])})`; + case 'renameType': + return `renameType(${JSON.stringify(desc['fromName'])}, ${JSON.stringify(desc['toName'])})`; + case 'createDependency': + return `createDependency(${JSON.stringify(desc['dependencyId'])})`; + case 'dataTransform': + return `dataTransform(${JSON.stringify(desc['name'])}, {\n check: ${serializeQueryInput(desc['check'])},\n run: ${serializeQueryInput(desc['run'])},\n })`; + default: + throw new Error(`Unknown descriptor kind: ${desc.kind}`); + } +} + +/** + * Scaffolds a migration.ts file in the given package directory. + * Serializes operation descriptors as builder calls that the user can edit. + * On verify, this file is re-evaluated to produce the final ops. + */ +export async function scaffoldMigrationTs( + packageDir: string, + options: ScaffoldOptions = {}, +): Promise { + const filePath = join(packageDir, MIGRATION_TS_FILE); + + const descriptors = options.descriptors ?? []; + const hasDataTransform = descriptors.some((d) => d.kind === 'dataTransform'); + + const lines: string[] = []; + + if (hasDataTransform && options.contractJsonPath) { + const relativeContractDts = relative(packageDir, options.contractJsonPath).replace( + /\.json$/, + '.d', + ); + lines.push(`import type { Contract } from "${relativeContractDts}"`); + lines.push(`import { createBuilders } from "@prisma-next/target-postgres/migration-builders"`); + lines.push(''); + const importList = [...new Set(descriptors.map((d) => d.kind))]; + importList.push('TODO'); + lines.push(`const { ${importList.join(', ')} } = createBuilders()`); + } else { + const importList = [...new Set(descriptors.map((d) => d.kind))]; + if (importList.length === 0) { + importList.push('createTable'); + } + lines.push( + `import { ${importList.join(', ')} } from "@prisma-next/target-postgres/migration-builders"`, + ); + } + + const calls = descriptors.map((d) => ` ${descriptorToBuilderCall(d)},`).join('\n'); + const body = calls.length > 0 ? `\n${calls}\n` : ''; + + lines.push(''); + lines.push(`export default () => [${body}]`); + lines.push(''); + + await writeFile(filePath, lines.join('\n')); +} + +/** + * Checks whether a migration.ts file exists in the package directory. + */ +export async function hasMigrationTs(packageDir: string): Promise { + try { + const s = await stat(join(packageDir, MIGRATION_TS_FILE)); + return s.isFile(); + } catch { + return false; + } +} + +/** + * Evaluates a migration.ts file by loading it via native Node import. + * Returns the result of calling the default export (expected to be a + * function returning an array of operation descriptors). + * + * Requires Node ≥24 for native TypeScript support. + */ +export async function evaluateMigrationTs(packageDir: string): Promise { + const filePath = resolve(join(packageDir, MIGRATION_TS_FILE)); + + try { + await stat(filePath); + } catch { + throw new Error(`migration.ts not found at "${filePath}"`); + } + + // Use native Node TS import (Node ≥24, stable type stripping) + const mod = (await import(filePath)) as { default?: unknown }; + + if (typeof mod.default !== 'function') { + throw new Error( + `migration.ts must export a default function returning an operation list. Got: ${typeof mod.default}`, + ); + } + + const result: unknown = mod.default(); + + if (!Array.isArray(result)) { + throw new Error( + `migration.ts default export must return an array of operations. Got: ${typeof result}`, + ); + } + + return result; +} diff --git a/packages/1-framework/3-tooling/migration/src/types.ts b/packages/1-framework/3-tooling/migration/src/types.ts index 835b4cf441..6bf3016ee7 100644 --- a/packages/1-framework/3-tooling/migration/src/types.ts +++ b/packages/1-framework/3-tooling/migration/src/types.ts @@ -54,7 +54,7 @@ export type MigrationOps = readonly MigrationPlanOperation[]; * An on-disk migration directory containing a manifest and operations. * The manifest may be draft or attested. */ -export interface MigrationBundle { +export interface BaseMigrationBundle { readonly dirName: string; readonly dirPath: string; readonly manifest: MigrationManifest; @@ -65,10 +65,14 @@ export interface MigrationBundle { * A bundle known to be attested (migrationId is a string). * Use this after filtering bundles to attested-only. */ -export interface AttestedMigrationBundle extends MigrationBundle { +export interface AttestedMigrationBundle extends BaseMigrationBundle { readonly manifest: AttestedMigrationManifest; } +export interface DraftMigrationBundle extends BaseMigrationBundle { + readonly manifest: DraftMigrationManifest; +} + /** * An entry in the migration graph. Only attested migrations appear in the * graph, so `migrationId` is always a string. @@ -93,6 +97,10 @@ export interface MigrationGraph { * Type guard that narrows a MigrationBundle to an AttestedMigrationBundle. * Use with `.filter(isAttested)` to get a typed array of attested bundles. */ -export function isAttested(bundle: MigrationBundle): bundle is AttestedMigrationBundle { +export function isAttested(bundle: BaseMigrationBundle): bundle is AttestedMigrationBundle { return typeof bundle.manifest.migrationId === 'string'; } + +export function isDraft(bundle: BaseMigrationBundle): bundle is DraftMigrationBundle { + return bundle.manifest.migrationId === null; +} diff --git a/packages/1-framework/3-tooling/migration/tsdown.config.ts b/packages/1-framework/3-tooling/migration/tsdown.config.ts index faaaea1b23..b834cd40cd 100644 --- a/packages/1-framework/3-tooling/migration/tsdown.config.ts +++ b/packages/1-framework/3-tooling/migration/tsdown.config.ts @@ -8,6 +8,7 @@ export default defineConfig({ 'exports/dag': 'src/exports/dag.ts', 'exports/refs': 'src/exports/refs.ts', 'exports/constants': 'src/exports/constants.ts', + 'exports/migration-ts': 'src/exports/migration-ts.ts', }, exports: { enabled: false }, }); diff --git a/packages/2-sql/4-lanes/sql-builder/package.json b/packages/2-sql/4-lanes/sql-builder/package.json index 7ff09e5cab..5c1f545545 100644 --- a/packages/2-sql/4-lanes/sql-builder/package.json +++ b/packages/2-sql/4-lanes/sql-builder/package.json @@ -6,7 +6,7 @@ "description": "SQL builder lane for Prisma Next", "scripts": { "build": "tsdown", - "emit": "node ../../../1-framework/3-tooling/cli/dist/cli.js contract emit --config test/fixtures/prisma-next.config.ts", + "emit": "cd ../../../../test/integration && node ../../packages/1-framework/3-tooling/cli/dist/cli.js contract emit --config test/sql-builder/fixtures/prisma-next.config.ts && cp test/sql-builder/fixtures/generated/contract.json test/sql-builder/fixtures/generated/contract.d.ts ../../packages/2-sql/4-lanes/sql-builder/test/fixtures/generated/", "emit:check": "pnpm emit && git diff --exit-code test/fixtures/generated/", "test": "vitest run --passWithNoTests", "~test:coverage": "vitest run --coverage --passWithNoTests", @@ -23,22 +23,15 @@ }, "devDependencies": { "@prisma-next/adapter-postgres": "workspace:*", - "@prisma-next/cli": "workspace:*", "@prisma-next/contract": "workspace:*", - "@prisma-next/driver-postgres": "workspace:*", "@prisma-next/extension-pgvector": "workspace:*", - "@prisma-next/family-sql": "workspace:*", "@prisma-next/ids": "workspace:*", "@prisma-next/sql-contract": "workspace:*", - "@prisma-next/sql-runtime": "workspace:*", "@prisma-next/sql-contract-ts": "workspace:*", - "@prisma-next/target-postgres": "workspace:*", "@prisma-next/test-utils": "workspace:*", "@prisma-next/utils": "workspace:*", "@prisma-next/tsconfig": "workspace:*", "@prisma-next/tsdown": "workspace:*", - "@types/pg": "catalog:", - "pg": "catalog:", "tsdown": "catalog:", "typescript": "catalog:", "vitest": "catalog:" diff --git a/packages/2-sql/4-lanes/sql-builder/src/exports/types.ts b/packages/2-sql/4-lanes/sql-builder/src/exports/types.ts index 0b9f526105..5e6e1f91a7 100644 --- a/packages/2-sql/4-lanes/sql-builder/src/exports/types.ts +++ b/packages/2-sql/4-lanes/sql-builder/src/exports/types.ts @@ -1,7 +1,7 @@ export type { AggregateFunctions, Expression, Functions } from '../expression'; export type { ResolveRow } from '../resolve'; export type { GatedMethod, QueryContext, Scope, ScopeField, Subquery } from '../scope'; -export type { Db } from '../types/db'; +export type { Db, TableProxyContract } from '../types/db'; export type { GroupedQuery } from '../types/grouped-query'; export type { DeleteQuery, InsertQuery, UpdateQuery } from '../types/mutation-query'; export type { SelectQuery } from '../types/select-query'; diff --git a/packages/2-sql/4-lanes/sql-builder/src/runtime/index.ts b/packages/2-sql/4-lanes/sql-builder/src/runtime/index.ts index be62a74ce4..ac47d601c0 100644 --- a/packages/2-sql/4-lanes/sql-builder/src/runtime/index.ts +++ b/packages/2-sql/4-lanes/sql-builder/src/runtime/index.ts @@ -1,3 +1,4 @@ +export type { Db } from '../types/db'; export { ExpressionImpl } from './expression-impl'; export { createFieldProxy } from './field-proxy'; export { createAggregateFunctions, createFunctions } from './functions'; diff --git a/packages/2-sql/4-lanes/sql-builder/test/fixtures/contract.ts b/packages/2-sql/4-lanes/sql-builder/test/fixtures/contract.ts index 7172c2e6ea..deaa96a822 100644 --- a/packages/2-sql/4-lanes/sql-builder/test/fixtures/contract.ts +++ b/packages/2-sql/4-lanes/sql-builder/test/fixtures/contract.ts @@ -1,95 +1 @@ -import { int4Column, textColumn } from '@prisma-next/adapter-postgres/column-types'; -import { vectorColumn } from '@prisma-next/extension-pgvector/column-types'; -import pgvector from '@prisma-next/extension-pgvector/pack'; -import sqlFamily from '@prisma-next/family-sql/pack'; -import { uuidv4 } from '@prisma-next/ids'; -import { defineContract, field, model, rel } from '@prisma-next/sql-contract-ts/contract-builder'; -import postgresPack from '@prisma-next/target-postgres/pack'; - -const UserBase = model('User', { - fields: { - id: field.column(int4Column).id(), - name: field.column(textColumn), - email: field.column(textColumn), - invitedById: field.column(int4Column).optional().column('invited_by_id'), - }, -}); - -const Post = model('Post', { - fields: { - id: field.column(int4Column).id(), - title: field.column(textColumn), - userId: field.column(int4Column).column('user_id'), - views: field.column(int4Column), - embedding: field.column(vectorColumn).optional(), - }, - relations: { - comments: rel.hasMany(() => Comment, { by: 'postId' }), - author: rel.belongsTo(UserBase, { from: 'userId', to: 'id' }), - }, -}).sql({ table: 'posts' }); - -const Comment = model('Comment', { - fields: { - id: field.column(int4Column).id(), - body: field.column(textColumn), - postId: field.column(int4Column).column('post_id'), - }, -}).sql({ table: 'comments' }); - -const Profile = model('Profile', { - fields: { - id: field.column(int4Column).id(), - userId: field.column(int4Column).column('user_id'), - bio: field.column(textColumn), - }, -}).sql({ table: 'profiles' }); - -const Article = model('Article', { - fields: { - id: field.generated(uuidv4()).id(), - title: field.column(textColumn), - }, -}).sql({ table: 'articles' }); - -const User = UserBase.relations({ - invitedUsers: rel.hasMany(() => UserBase, { by: 'invitedById' }), - invitedBy: rel.belongsTo(UserBase, { from: 'invitedById', to: 'id' }), - posts: rel.hasMany(() => Post, { by: 'userId' }), - profile: rel.hasOne(Profile, { by: 'userId' }), -}).sql({ table: 'users' }); - -export const contract = defineContract({ - family: sqlFamily, - target: postgresPack, - extensionPacks: { pgvector }, - capabilities: { - sql: { - lateral: true, - returning: true, - jsonAgg: true, - enums: true, - foreignKeys: true, - autoIndexesForeignKeys: false, - }, - postgres: { - partialIndex: true, - deferrableConstraints: true, - savepoints: true, - transactionalDDL: true, - distinctOn: true, - }, - pgvector: { - ivfflat: true, - hnsw: true, - vector: true, - }, - }, - models: { - User, - Post, - Comment, - Profile, - Article, - }, -}); +export { default as contract } from './generated/contract.json' with { type: 'json' }; diff --git a/packages/2-sql/9-family/package.json b/packages/2-sql/9-family/package.json index ed3855eb11..a62e1d5647 100644 --- a/packages/2-sql/9-family/package.json +++ b/packages/2-sql/9-family/package.json @@ -13,7 +13,6 @@ "clean": "rm -rf dist dist-tsc dist-tsc-prod coverage .tmp-output" }, "dependencies": { - "@prisma-next/cli": "workspace:*", "@prisma-next/contract": "workspace:*", "@prisma-next/emitter": "workspace:*", "@prisma-next/framework-components": "workspace:*", @@ -47,6 +46,7 @@ "exports": { "./control": "./dist/control.mjs", "./control-adapter": "./dist/control-adapter.mjs", + "./operation-descriptors": "./dist/operation-descriptors.mjs", "./pack": "./dist/pack.mjs", "./runtime": "./dist/runtime.mjs", "./schema-verify": "./dist/schema-verify.mjs", diff --git a/packages/2-sql/9-family/src/core/migrations/contract-to-schema-ir.ts b/packages/2-sql/9-family/src/core/migrations/contract-to-schema-ir.ts index f5cc64c1cf..3e5e9be989 100644 --- a/packages/2-sql/9-family/src/core/migrations/contract-to-schema-ir.ts +++ b/packages/2-sql/9-family/src/core/migrations/contract-to-schema-ir.ts @@ -260,5 +260,10 @@ function deriveAnnotations( annotationNamespace: string, ): SqlAnnotations | undefined { if (!storage.types || Object.keys(storage.types).length === 0) return undefined; - return { [annotationNamespace]: { storageTypes: storage.types } }; + // Re-key by nativeType to match the structure produced by introspection + const byNativeType: Record = {}; + for (const typeInstance of Object.values(storage.types)) { + byNativeType[typeInstance.nativeType] = typeInstance; + } + return { [annotationNamespace]: { storageTypes: byNativeType } }; } diff --git a/packages/2-sql/9-family/src/core/migrations/descriptor-schemas.ts b/packages/2-sql/9-family/src/core/migrations/descriptor-schemas.ts new file mode 100644 index 0000000000..1c94474f57 --- /dev/null +++ b/packages/2-sql/9-family/src/core/migrations/descriptor-schemas.ts @@ -0,0 +1,172 @@ +/** + * Arktype schemas for SQL migration operation descriptors. + * + * These schemas are the source of truth for descriptor shapes. + * TypeScript types are derived via `typeof schema.infer`. + */ + +import { type } from 'arktype'; + +// ============================================================================ +// Table descriptors +// ============================================================================ + +export const CreateTableSchema = type({ kind: "'createTable'", table: 'string' }); +export const DropTableSchema = type({ kind: "'dropTable'", table: 'string' }); + +// ============================================================================ +// Column descriptors +// ============================================================================ + +export const AddColumnSchema = type({ + kind: "'addColumn'", + table: 'string', + column: 'string', + 'overrides?': { 'nullable?': 'boolean' }, +}); + +export const DropColumnSchema = type({ kind: "'dropColumn'", table: 'string', column: 'string' }); + +export const AlterColumnTypeSchema = type({ + kind: "'alterColumnType'", + table: 'string', + column: 'string', + 'using?': 'string', + 'toType?': 'string', +}); + +export const SetNotNullSchema = type({ kind: "'setNotNull'", table: 'string', column: 'string' }); +export const DropNotNullSchema = type({ kind: "'dropNotNull'", table: 'string', column: 'string' }); +export const SetDefaultSchema = type({ kind: "'setDefault'", table: 'string', column: 'string' }); +export const DropDefaultSchema = type({ kind: "'dropDefault'", table: 'string', column: 'string' }); + +// ============================================================================ +// Constraint descriptors +// ============================================================================ + +export const AddPrimaryKeySchema = type({ kind: "'addPrimaryKey'", table: 'string' }); +export const AddUniqueSchema = type({ kind: "'addUnique'", table: 'string', columns: 'string[]' }); +export const AddForeignKeySchema = type({ + kind: "'addForeignKey'", + table: 'string', + columns: 'string[]', +}); +export const DropConstraintSchema = type({ + kind: "'dropConstraint'", + table: 'string', + constraintName: 'string', +}); + +// ============================================================================ +// Index descriptors +// ============================================================================ + +export const CreateIndexSchema = type({ + kind: "'createIndex'", + table: 'string', + columns: 'string[]', +}); +export const DropIndexSchema = type({ + kind: "'dropIndex'", + table: 'string', + indexName: 'string', +}); + +// ============================================================================ +// Type descriptors +// ============================================================================ + +export const CreateEnumTypeSchema = type({ + kind: "'createEnumType'", + typeName: 'string', + 'values?': 'string[]', +}); +export const AddEnumValuesSchema = type({ + kind: "'addEnumValues'", + typeName: 'string', + values: 'string[]', +}); +export const DropEnumTypeSchema = type({ kind: "'dropEnumType'", typeName: 'string' }); +export const RenameTypeSchema = type({ + kind: "'renameType'", + fromName: 'string', + toName: 'string', +}); + +// ============================================================================ +// Dependency descriptors +// ============================================================================ + +export const CreateDependencySchema = type({ + kind: "'createDependency'", + dependencyId: 'string', +}); + +// ============================================================================ +// Data transform descriptor +// ============================================================================ + +export const DataTransformSchema = type({ + kind: "'dataTransform'", + name: 'string', + source: 'string', + check: 'boolean | Function | symbol | object', + run: '(Function | symbol | object)[]', +}); + +// ============================================================================ +// Union schema +// ============================================================================ + +export const SqlMigrationOpDescriptorSchema = type.or( + CreateTableSchema, + DropTableSchema, + AddColumnSchema, + DropColumnSchema, + AlterColumnTypeSchema, + SetNotNullSchema, + DropNotNullSchema, + SetDefaultSchema, + DropDefaultSchema, + AddPrimaryKeySchema, + AddUniqueSchema, + AddForeignKeySchema, + DropConstraintSchema, + CreateIndexSchema, + DropIndexSchema, + CreateEnumTypeSchema, + AddEnumValuesSchema, + DropEnumTypeSchema, + RenameTypeSchema, + CreateDependencySchema, + DataTransformSchema, +); + +export const MigrationDescriptorArraySchema = SqlMigrationOpDescriptorSchema.array(); + +// ============================================================================ +// Derived types +// ============================================================================ + +export type CreateTableDescriptor = typeof CreateTableSchema.infer; +export type DropTableDescriptor = typeof DropTableSchema.infer; +export type AddColumnDescriptor = typeof AddColumnSchema.infer; +export type DropColumnDescriptor = typeof DropColumnSchema.infer; +export type AlterColumnTypeDescriptor = typeof AlterColumnTypeSchema.infer; +export type SetNotNullDescriptor = typeof SetNotNullSchema.infer; +export type DropNotNullDescriptor = typeof DropNotNullSchema.infer; +export type SetDefaultDescriptor = typeof SetDefaultSchema.infer; +export type DropDefaultDescriptor = typeof DropDefaultSchema.infer; +export type AddPrimaryKeyDescriptor = typeof AddPrimaryKeySchema.infer; +export type AddUniqueDescriptor = typeof AddUniqueSchema.infer; +export type AddForeignKeyDescriptor = typeof AddForeignKeySchema.infer; +export type DropConstraintDescriptor = typeof DropConstraintSchema.infer; +export type CreateIndexDescriptor = typeof CreateIndexSchema.infer; +export type DropIndexDescriptor = typeof DropIndexSchema.infer; +export type CreateEnumTypeDescriptor = typeof CreateEnumTypeSchema.infer; +export type AddEnumValuesDescriptor = typeof AddEnumValuesSchema.infer; +export type DropEnumTypeDescriptor = typeof DropEnumTypeSchema.infer; +export type RenameTypeDescriptor = typeof RenameTypeSchema.infer; +export type CreateDependencyDescriptor = typeof CreateDependencySchema.infer; +export type DataTransformDescriptor = typeof DataTransformSchema.infer; +export type SqlMigrationOpDescriptor = typeof SqlMigrationOpDescriptorSchema.infer; diff --git a/packages/2-sql/9-family/src/core/migrations/operation-descriptors.ts b/packages/2-sql/9-family/src/core/migrations/operation-descriptors.ts new file mode 100644 index 0000000000..c54df6ebe5 --- /dev/null +++ b/packages/2-sql/9-family/src/core/migrations/operation-descriptors.ts @@ -0,0 +1,213 @@ +/** + * SQL migration operation descriptors — builder functions and re-exports. + * + * Types are defined by arktype schemas in descriptor-schemas.ts and derived + * via `typeof schema.infer`. This file provides the builder functions that + * construct descriptors, plus re-exports the types and schemas. + */ + +import type { SqlQueryPlan } from '@prisma-next/sql-relational-core/plan'; +import { ifDefined } from '@prisma-next/utils/defined'; + +// Re-export types and schemas from the schema source of truth +export type { + AddColumnDescriptor, + AddEnumValuesDescriptor, + AddForeignKeyDescriptor, + AddPrimaryKeyDescriptor, + AddUniqueDescriptor, + AlterColumnTypeDescriptor, + CreateDependencyDescriptor, + CreateEnumTypeDescriptor, + CreateIndexDescriptor, + CreateTableDescriptor, + DataTransformDescriptor, + DropColumnDescriptor, + DropConstraintDescriptor, + DropDefaultDescriptor, + DropEnumTypeDescriptor, + DropIndexDescriptor, + DropNotNullDescriptor, + DropTableDescriptor, + RenameTypeDescriptor, + SetDefaultDescriptor, + SetNotNullDescriptor, + SqlMigrationOpDescriptor, +} from './descriptor-schemas'; + +export { MigrationDescriptorArraySchema } from './descriptor-schemas'; + +import type { + AddColumnDescriptor, + AddEnumValuesDescriptor, + AddForeignKeyDescriptor, + AddPrimaryKeyDescriptor, + AddUniqueDescriptor, + AlterColumnTypeDescriptor, + CreateDependencyDescriptor, + CreateEnumTypeDescriptor, + CreateIndexDescriptor, + CreateTableDescriptor, + DropColumnDescriptor, + DropConstraintDescriptor, + DropDefaultDescriptor, + DropEnumTypeDescriptor, + DropIndexDescriptor, + DropNotNullDescriptor, + DropTableDescriptor, + RenameTypeDescriptor, + SetDefaultDescriptor, + SetNotNullDescriptor, +} from './descriptor-schemas'; + +// ============================================================================ +// Data transform support types (not validated by arktype — runtime values) +// ============================================================================ + +/** Something that can produce a SqlQueryPlan via .build(). */ +export interface Buildable { + build(): SqlQueryPlan; +} + +/** + * Sentinel value for unimplemented data transform queries. + * The scaffold renders this as a TODO comment. The resolver throws if it encounters one. + */ +export const TODO = Symbol.for('prisma-next.migration.todo'); +export type TodoMarker = typeof TODO; + +// ============================================================================ +// Builder functions +// ============================================================================ + +export function createTable(table: string): CreateTableDescriptor { + return { kind: 'createTable', table }; +} + +export function dropTable(table: string): DropTableDescriptor { + return { kind: 'dropTable', table }; +} + +export function addColumn( + table: string, + column: string, + overrides?: { nullable?: boolean }, +): AddColumnDescriptor { + return { kind: 'addColumn', table, column, ...ifDefined('overrides', overrides) }; +} + +export function dropColumn(table: string, column: string): DropColumnDescriptor { + return { kind: 'dropColumn', table, column }; +} + +export function alterColumnType( + table: string, + column: string, + opts?: string | { using?: string; toType?: string }, +): AlterColumnTypeDescriptor { + const using = typeof opts === 'string' ? opts : opts?.using; + const toType = typeof opts === 'string' ? undefined : opts?.toType; + return { + kind: 'alterColumnType', + table, + column, + ...ifDefined('using', using), + ...ifDefined('toType', toType), + }; +} + +export function setNotNull(table: string, column: string): SetNotNullDescriptor { + return { kind: 'setNotNull', table, column }; +} + +export function dropNotNull(table: string, column: string): DropNotNullDescriptor { + return { kind: 'dropNotNull', table, column }; +} + +export function setDefault(table: string, column: string): SetDefaultDescriptor { + return { kind: 'setDefault', table, column }; +} + +export function dropDefault(table: string, column: string): DropDefaultDescriptor { + return { kind: 'dropDefault', table, column }; +} + +export function addPrimaryKey(table: string): AddPrimaryKeyDescriptor { + return { kind: 'addPrimaryKey', table }; +} + +export function addUnique(table: string, columns: readonly string[]): AddUniqueDescriptor { + return { kind: 'addUnique', table, columns: [...columns] }; +} + +export function addForeignKey(table: string, columns: readonly string[]): AddForeignKeyDescriptor { + return { kind: 'addForeignKey', table, columns: [...columns] }; +} + +export function dropConstraint(table: string, constraintName: string): DropConstraintDescriptor { + return { kind: 'dropConstraint', table, constraintName }; +} + +export function createIndex(table: string, columns: readonly string[]): CreateIndexDescriptor { + return { kind: 'createIndex', table, columns: [...columns] }; +} + +export function dropIndex(table: string, indexName: string): DropIndexDescriptor { + return { kind: 'dropIndex', table, indexName }; +} + +export function createEnumType( + typeName: string, + values?: readonly string[], +): CreateEnumTypeDescriptor { + return { + kind: 'createEnumType', + typeName, + ...ifDefined('values', values ? [...values] : undefined), + }; +} + +export function addEnumValues( + typeName: string, + values: readonly string[], +): AddEnumValuesDescriptor { + return { kind: 'addEnumValues', typeName, values: [...values] }; +} + +export function dropEnumType(typeName: string): DropEnumTypeDescriptor { + return { kind: 'dropEnumType', typeName }; +} + +export function renameType(fromName: string, toName: string): RenameTypeDescriptor { + return { kind: 'renameType', fromName, toName }; +} + +export function createDependency(dependencyId: string): CreateDependencyDescriptor { + return { kind: 'createDependency', dependencyId }; +} + +/** + * All structural builder functions keyed by descriptor kind. + */ +export const builders = { + createTable, + dropTable, + addColumn, + dropColumn, + alterColumnType, + setNotNull, + dropNotNull, + setDefault, + dropDefault, + addPrimaryKey, + addUnique, + addForeignKey, + dropConstraint, + createIndex, + dropIndex, + createEnumType, + addEnumValues, + dropEnumType, + renameType, + createDependency, +} as const; diff --git a/packages/2-sql/9-family/src/core/migrations/types.ts b/packages/2-sql/9-family/src/core/migrations/types.ts index da317645f7..a57bcf3bc5 100644 --- a/packages/2-sql/9-family/src/core/migrations/types.ts +++ b/packages/2-sql/9-family/src/core/migrations/types.ts @@ -22,6 +22,7 @@ import type { SchemaIssue, } from '@prisma-next/framework-components/control'; import type { SqlStorage, StorageTypeInstance } from '@prisma-next/sql-contract/types'; +import type { SqlOperationDescriptor } from '@prisma-next/sql-operations'; import type { SqlSchemaIR } from '@prisma-next/sql-schema-ir/types'; import type { Result } from '@prisma-next/utils/result'; import type { SqlControlFamilyInstance } from '../control-instance'; @@ -119,6 +120,8 @@ export interface SqlControlStaticContributions { readonly pslTypeDescriptors?: () => { readonly scalarTypeDescriptors: ReadonlyMap; }; + /** Query operation descriptors for the migration DSL client. Same static data as runtime descriptors. */ + readonly queryOperations?: () => ReadonlyArray; } export interface ComponentDatabaseDependency { diff --git a/packages/2-sql/9-family/src/core/schema-verify/verify-helpers.ts b/packages/2-sql/9-family/src/core/schema-verify/verify-helpers.ts index a8834e0e43..8ef469b890 100644 --- a/packages/2-sql/9-family/src/core/schema-verify/verify-helpers.ts +++ b/packages/2-sql/9-family/src/core/schema-verify/verify-helpers.ts @@ -493,6 +493,7 @@ export function verifyDatabaseDependencies( if (!isSatisfied) { const depIssue: SchemaIssue = { kind: 'dependency_missing', + dependencyId: dependency.id, message: `Dependency "${dependency.id}" is missing from database`, }; issues.push(depIssue); diff --git a/packages/2-sql/9-family/src/exports/operation-descriptors.ts b/packages/2-sql/9-family/src/exports/operation-descriptors.ts new file mode 100644 index 0000000000..55bc02645b --- /dev/null +++ b/packages/2-sql/9-family/src/exports/operation-descriptors.ts @@ -0,0 +1,52 @@ +export type { + AddColumnDescriptor, + AddEnumValuesDescriptor, + AddForeignKeyDescriptor, + AddPrimaryKeyDescriptor, + AddUniqueDescriptor, + AlterColumnTypeDescriptor, + CreateDependencyDescriptor, + CreateEnumTypeDescriptor, + CreateIndexDescriptor, + CreateTableDescriptor, + DataTransformDescriptor, + DropColumnDescriptor, + DropConstraintDescriptor, + DropDefaultDescriptor, + DropEnumTypeDescriptor, + DropIndexDescriptor, + DropNotNullDescriptor, + DropTableDescriptor, + RenameTypeDescriptor, + SetDefaultDescriptor, + SetNotNullDescriptor, + SqlMigrationOpDescriptor, +} from '../core/migrations/descriptor-schemas'; + +export { + addColumn, + addEnumValues, + addForeignKey, + addPrimaryKey, + addUnique, + alterColumnType, + type Buildable, + builders, + createDependency, + createEnumType, + createIndex, + createTable, + dropColumn, + dropConstraint, + dropDefault, + dropEnumType, + dropIndex, + dropNotNull, + dropTable, + MigrationDescriptorArraySchema, + renameType, + setDefault, + setNotNull, + TODO, + type TodoMarker, +} from '../core/migrations/operation-descriptors'; diff --git a/packages/2-sql/9-family/test/contract-to-schema-ir.test.ts b/packages/2-sql/9-family/test/contract-to-schema-ir.test.ts index 587ca04b23..2a148f0665 100644 --- a/packages/2-sql/9-family/test/contract-to-schema-ir.test.ts +++ b/packages/2-sql/9-family/test/contract-to-schema-ir.test.ts @@ -398,7 +398,7 @@ describe('contractToSchemaIR', () => { expect(result.tables['T']!.columns['embedding']!.nativeType).toBe('vector'); expect((result.annotations as Record)?.['pg']).toMatchObject({ storageTypes: { - Embedding: { + vector: { codecId: 'pgvector/vector@1', nativeType: 'vector', typeParams: { dimensions: 1536 }, @@ -431,7 +431,7 @@ describe('contractToSchemaIR', () => { }); expect((result.annotations as Record)?.['custom']).toMatchObject({ storageTypes: { - Embedding: { + vector: { codecId: 'pgvector/vector@1', nativeType: 'vector', typeParams: { dimensions: 1536 }, diff --git a/packages/2-sql/9-family/tsdown.config.ts b/packages/2-sql/9-family/tsdown.config.ts index 5f6b4faa99..fa4dfc4b7d 100644 --- a/packages/2-sql/9-family/tsdown.config.ts +++ b/packages/2-sql/9-family/tsdown.config.ts @@ -4,6 +4,7 @@ export default defineConfig({ entry: [ 'src/exports/control.ts', 'src/exports/control-adapter.ts', + 'src/exports/operation-descriptors.ts', 'src/exports/pack.ts', 'src/exports/runtime.ts', 'src/exports/verify.ts', diff --git a/packages/3-extensions/pgvector/package.json b/packages/3-extensions/pgvector/package.json index 6c0c7ed1c1..5103b28e1c 100644 --- a/packages/3-extensions/pgvector/package.json +++ b/packages/3-extensions/pgvector/package.json @@ -14,7 +14,6 @@ "clean": "rm -rf dist dist-tsc dist-tsc-prod coverage .tmp-output" }, "dependencies": { - "@prisma-next/cli": "workspace:*", "@prisma-next/contract": "workspace:*", "@prisma-next/contract-authoring": "workspace:*", "@prisma-next/family-sql": "workspace:*", diff --git a/packages/3-extensions/pgvector/src/exports/control.ts b/packages/3-extensions/pgvector/src/exports/control.ts index 0603e7824c..6ac8d0a72f 100644 --- a/packages/3-extensions/pgvector/src/exports/control.ts +++ b/packages/3-extensions/pgvector/src/exports/control.ts @@ -3,7 +3,7 @@ import type { ComponentDatabaseDependencies, SqlControlExtensionDescriptor, } from '@prisma-next/family-sql/control'; -import { pgvectorPackMeta } from '../core/descriptor-meta'; +import { pgvectorPackMeta, pgvectorQueryOperations } from '../core/descriptor-meta'; const PGVECTOR_CODEC_ID = 'pg/vector@1' as const; @@ -75,6 +75,7 @@ const pgvectorExtensionDescriptor: SqlControlExtensionDescriptor<'postgres'> = { }, }, }, + queryOperations: () => pgvectorQueryOperations, databaseDependencies: pgvectorDatabaseDependencies, create: () => ({ familyId: 'sql' as const, diff --git a/packages/3-targets/3-targets/postgres/package.json b/packages/3-targets/3-targets/postgres/package.json index 5dfd5ef60d..20671e4e5c 100644 --- a/packages/3-targets/3-targets/postgres/package.json +++ b/packages/3-targets/3-targets/postgres/package.json @@ -15,11 +15,12 @@ "clean": "rm -rf dist dist-tsc dist-tsc-prod coverage .tmp-output" }, "dependencies": { - "@prisma-next/cli": "workspace:*", "@prisma-next/contract": "workspace:*", "@prisma-next/family-sql": "workspace:*", "@prisma-next/framework-components": "workspace:*", + "@prisma-next/sql-builder": "workspace:*", "@prisma-next/sql-contract": "workspace:*", + "@prisma-next/sql-operations": "workspace:*", "@prisma-next/sql-errors": "workspace:*", "@prisma-next/sql-relational-core": "workspace:*", "@prisma-next/sql-runtime": "workspace:*", @@ -45,6 +46,7 @@ ], "exports": { "./control": "./dist/control.mjs", + "./migration-builders": "./dist/migration-builders.mjs", "./pack": "./dist/pack.mjs", "./runtime": "./dist/runtime.mjs", "./package.json": "./package.json" diff --git a/packages/3-targets/3-targets/postgres/src/core/migrations/descriptor-planner.ts b/packages/3-targets/3-targets/postgres/src/core/migrations/descriptor-planner.ts new file mode 100644 index 0000000000..ff0eec93bc --- /dev/null +++ b/packages/3-targets/3-targets/postgres/src/core/migrations/descriptor-planner.ts @@ -0,0 +1,466 @@ +/** + * Descriptor-based migration planner. + * + * Takes schema issues (from verifySqlSchema) and emits MigrationOpDescriptor[]. + * Migration strategies consume issues they recognize and produce specialized op + * sequences (e.g., NOT NULL backfill → addColumn(nullable) + dataTransform + setNotNull). + * Remaining issues get default descriptor mapping. + * + * This planner does NOT produce SqlMigrationPlanOperation — that's the resolver's job. + * The separation means the same descriptors work for both planner-generated and + * user-authored migrations. + */ + +import type { Contract } from '@prisma-next/contract/types'; +import type { SqlPlannerConflict } from '@prisma-next/family-sql/control'; +import type { SchemaIssue } from '@prisma-next/framework-components/control'; +import type { SqlStorage } from '@prisma-next/sql-contract/types'; +import type { Result } from '@prisma-next/utils/result'; +import { notOk, ok } from '@prisma-next/utils/result'; +import { + addColumn, + addForeignKey, + addPrimaryKey, + addUnique, + alterColumnType, + createDependency, + createEnumType, + createIndex, + createTable, + dropColumn, + dropConstraint, + dropDefault, + dropIndex, + dropNotNull, + dropTable, + type MigrationOpDescriptor, + setDefault, + setNotNull, +} from './operation-descriptors'; +import { + type MigrationStrategy, + migrationPlanStrategies, + type StrategyContext, +} from './planner-strategies'; + +export type { MigrationStrategy, StrategyContext }; + +// ============================================================================ +// Issue kind ordering (dependency order) +// ============================================================================ + +const ISSUE_KIND_ORDER: Record = { + // Dependencies and types first + dependency_missing: 1, + type_missing: 2, + type_values_mismatch: 3, + enum_values_changed: 3, + + // Drops (reconciliation — clear the way for creates) + // FKs dropped first (they depend on other constraints) + extra_foreign_key: 10, + extra_unique_constraint: 11, + extra_primary_key: 12, + extra_index: 13, + extra_default: 14, + extra_column: 15, + extra_table: 16, + + // Tables before columns + missing_table: 20, + + // Columns before constraints + missing_column: 30, + + // Reconciliation alters (on existing objects) + type_mismatch: 40, + nullability_mismatch: 41, + default_missing: 42, + default_mismatch: 43, + + // Constraints after columns exist + primary_key_mismatch: 50, + unique_constraint_mismatch: 51, + index_mismatch: 52, + foreign_key_mismatch: 60, +}; + +function issueOrder(issue: SchemaIssue): number { + return ISSUE_KIND_ORDER[issue.kind] ?? 99; +} + +// ============================================================================ +// Conflict helpers +// ============================================================================ + +function issueConflict( + kind: SqlPlannerConflict['kind'], + summary: string, + location?: SqlPlannerConflict['location'], +): SqlPlannerConflict { + return { + kind, + summary, + why: 'Use `migration new` to author a custom migration for this change.', + ...(location ? { location } : {}), + }; +} + +// ============================================================================ +// Default issue-to-descriptor mapping +// ============================================================================ + +function isMissing(issue: SchemaIssue): boolean { + if (issue.kind === 'enum_values_changed') return false; + return issue.actual === undefined; +} + +function mapIssue( + issue: SchemaIssue, + ctx: StrategyContext, +): Result { + switch (issue.kind) { + // Additive — missing structures + case 'missing_table': { + if (!issue.table) + return notOk( + issueConflict('unsupportedOperation', 'Missing table issue has no table name'), + ); + const contractTable = ctx.toContract.storage.tables[issue.table]; + if (!contractTable) { + return notOk( + issueConflict( + 'unsupportedOperation', + `Table "${issue.table}" reported missing but not found in destination contract`, + ), + ); + } + const ops: MigrationOpDescriptor[] = [createTable(issue.table)]; + for (const index of contractTable.indexes) { + ops.push(createIndex(issue.table, [...index.columns])); + } + const explicitIndexColumnSets = new Set( + contractTable.indexes.map((idx) => idx.columns.join(',')), + ); + for (const fk of contractTable.foreignKeys) { + if (fk.constraint) { + ops.push(addForeignKey(issue.table, [...fk.columns])); + } + if (fk.index && !explicitIndexColumnSets.has(fk.columns.join(','))) { + ops.push(createIndex(issue.table, [...fk.columns])); + } + } + for (const unique of contractTable.uniques) { + ops.push(addUnique(issue.table, [...unique.columns])); + } + return ok(ops); + } + + case 'missing_column': + if (!issue.table || !issue.column) + return notOk( + issueConflict('unsupportedOperation', 'Missing column issue has no table/column name'), + ); + return ok([addColumn(issue.table, issue.column)]); + + case 'default_missing': + if (!issue.table || !issue.column) + return notOk( + issueConflict('unsupportedOperation', 'Default missing issue has no table/column name'), + ); + return ok([setDefault(issue.table, issue.column)]); + + // Destructive — extra structures + case 'extra_table': + if (!issue.table) + return notOk(issueConflict('unsupportedOperation', 'Extra table issue has no table name')); + return ok([dropTable(issue.table)]); + + case 'extra_column': + if (!issue.table || !issue.column) + return notOk( + issueConflict('unsupportedOperation', 'Extra column issue has no table/column name'), + ); + return ok([dropColumn(issue.table, issue.column)]); + + case 'extra_index': + if (!issue.table || !issue.indexOrConstraint) + return notOk( + issueConflict('unsupportedOperation', 'Extra index issue has no table/index name'), + ); + return ok([dropIndex(issue.table, issue.indexOrConstraint)]); + + case 'extra_unique_constraint': + case 'extra_foreign_key': + case 'extra_primary_key': + if (!issue.table || !issue.indexOrConstraint) + return notOk( + issueConflict( + 'unsupportedOperation', + 'Extra constraint issue has no table/constraint name', + ), + ); + return ok([dropConstraint(issue.table, issue.indexOrConstraint)]); + + case 'extra_default': + if (!issue.table || !issue.column) + return notOk( + issueConflict('unsupportedOperation', 'Extra default issue has no table/column name'), + ); + return ok([dropDefault(issue.table, issue.column)]); + + // Nullability changes + case 'nullability_mismatch': { + if (!issue.table || !issue.column) + return notOk( + issueConflict('nullabilityConflict', 'Nullability mismatch has no table/column name'), + ); + const column = ctx.toContract.storage.tables[issue.table]?.columns[issue.column]; + if (!column) + return notOk( + issueConflict( + 'nullabilityConflict', + `Column "${issue.table}"."${issue.column}" not found in destination contract`, + ), + ); + return ok( + column.nullable + ? [dropNotNull(issue.table, issue.column)] + : [setNotNull(issue.table, issue.column)], + ); + } + + // Type changes + case 'type_mismatch': + if (!issue.table || !issue.column) + return notOk(issueConflict('typeMismatch', 'Type mismatch has no table/column name')); + return ok([alterColumnType(issue.table, issue.column)]); + + // Default changes + case 'default_mismatch': + if (!issue.table || !issue.column) + return notOk( + issueConflict('unsupportedOperation', 'Default mismatch has no table/column name'), + ); + return ok([setDefault(issue.table, issue.column)]); + + // Constraints — missing (actual undefined) vs mismatched (actual defined) + case 'primary_key_mismatch': + if (!issue.table) + return notOk(issueConflict('indexIncompatible', 'Primary key issue has no table name')); + if (isMissing(issue)) return ok([addPrimaryKey(issue.table)]); + return notOk( + issueConflict( + 'indexIncompatible', + `Primary key on "${issue.table}" has different columns (expected: ${issue.expected}, actual: ${issue.actual})`, + { table: issue.table }, + ), + ); + + case 'unique_constraint_mismatch': + if (!issue.table) + return notOk( + issueConflict('indexIncompatible', 'Unique constraint issue has no table name'), + ); + if (isMissing(issue) && issue.expected) { + const columns = issue.expected.split(', '); + return ok([addUnique(issue.table, columns)]); + } + return notOk( + issueConflict( + 'indexIncompatible', + `Unique constraint on "${issue.table}" differs (expected: ${issue.expected}, actual: ${issue.actual})`, + { table: issue.table }, + ), + ); + + case 'index_mismatch': + if (!issue.table) + return notOk(issueConflict('indexIncompatible', 'Index issue has no table name')); + if (isMissing(issue) && issue.expected) { + const columns = issue.expected.split(', '); + return ok([createIndex(issue.table, columns)]); + } + return notOk( + issueConflict( + 'indexIncompatible', + `Index on "${issue.table}" differs (expected: ${issue.expected}, actual: ${issue.actual})`, + { table: issue.table }, + ), + ); + + case 'foreign_key_mismatch': + if (!issue.table) + return notOk(issueConflict('foreignKeyConflict', 'Foreign key issue has no table name')); + if (isMissing(issue) && issue.expected) { + const arrowIdx = issue.expected.indexOf(' -> '); + if (arrowIdx >= 0) { + const columns = issue.expected.slice(0, arrowIdx).split(', '); + return ok([addForeignKey(issue.table, columns)]); + } + } + return notOk( + issueConflict( + 'foreignKeyConflict', + `Foreign key on "${issue.table}" differs (expected: ${issue.expected}, actual: ${issue.actual})`, + { table: issue.table }, + ), + ); + + // Types + case 'type_missing': { + if (!issue.typeName) + return notOk(issueConflict('unsupportedOperation', 'Type missing issue has no typeName')); + const typeInstance = ctx.toContract.storage.types?.[issue.typeName]; + if (!typeInstance) { + return notOk( + issueConflict( + 'unsupportedOperation', + `Type "${issue.typeName}" reported missing but not found in destination contract`, + ), + ); + } + // TODO: codec-specific descriptor dispatch should be driven by a registry, not hardcoded prefix checks + if (typeInstance.codecId.startsWith('pg/enum')) { + return ok([createEnumType(issue.typeName)]); + } + return notOk( + issueConflict( + 'unsupportedOperation', + `Type "${issue.typeName}" uses codec "${typeInstance.codecId}" — only enum types are supported by the descriptor planner`, + ), + ); + } + + case 'type_values_mismatch': + return notOk( + issueConflict( + 'unsupportedOperation', + `Type "${issue.typeName ?? 'unknown'}" values differ — type alteration not yet supported by descriptor planner`, + ), + ); + + // Dependencies + case 'dependency_missing': + if (!issue.dependencyId) + return notOk( + issueConflict('unsupportedOperation', 'Dependency missing issue has no dependencyId'), + ); + return ok([createDependency(issue.dependencyId)]); + default: + return notOk( + issueConflict( + 'unsupportedOperation', + `Unhandled issue kind: ${(issue as SchemaIssue).kind}`, + ), + ); + } +} + +// ============================================================================ +// Planner entry point +// ============================================================================ + +export interface DescriptorPlannerOptions { + readonly issues: readonly SchemaIssue[]; + readonly toContract: Contract; + readonly fromContract: Contract | null; + readonly strategies?: readonly MigrationStrategy[]; +} + +export interface DescriptorPlannerValue { + readonly descriptors: readonly MigrationOpDescriptor[]; + readonly needsDataMigration: boolean; +} + +export function planDescriptors( + options: DescriptorPlannerOptions, +): Result { + const context: StrategyContext = { + toContract: options.toContract, + fromContract: options.fromContract, + }; + + const strategies = options.strategies ?? migrationPlanStrategies; + + // Phase 1: Pattern matching — consume recognized issues + let remaining = options.issues; + const patternOps: MigrationOpDescriptor[] = []; + + for (const strategy of strategies) { + const result = strategy(remaining, context); + if (result.kind === 'match') { + remaining = result.issues; + patternOps.push(...result.ops); + } + } + + // Phase 2: Sort remaining issues by dependency order + const sorted = [...remaining].sort((a, b) => issueOrder(a) - issueOrder(b)); + + // Phase 3: Map remaining issues to descriptors, collecting conflicts + const defaultOps: MigrationOpDescriptor[] = []; + const conflicts: SqlPlannerConflict[] = []; + + for (const issue of sorted) { + const result = mapIssue(issue, context); + if (result.ok) { + defaultOps.push(...result.value); + } else { + conflicts.push(result.failure); + } + } + + if (conflicts.length > 0) { + return notOk(conflicts); + } + + // Phase 4: Order descriptors by operation kind + const depOps = defaultOps.filter( + (op) => + op.kind === 'createDependency' || + op.kind === 'createEnumType' || + op.kind === 'addEnumValues' || + op.kind === 'dropEnumType' || + op.kind === 'renameType', + ); + const dropOps = defaultOps.filter( + (op) => + op.kind === 'dropTable' || + op.kind === 'dropColumn' || + op.kind === 'dropConstraint' || + op.kind === 'dropIndex' || + op.kind === 'dropDefault', + ); + const tableOps = defaultOps.filter((op) => op.kind === 'createTable'); + const columnOps = defaultOps.filter((op) => op.kind === 'addColumn'); + const alterOps = defaultOps.filter( + (op) => + op.kind === 'alterColumnType' || + op.kind === 'setNotNull' || + op.kind === 'dropNotNull' || + op.kind === 'setDefault', + ); + const constraintOps = defaultOps.filter( + (op) => + op.kind === 'addPrimaryKey' || + op.kind === 'addUnique' || + op.kind === 'createIndex' || + op.kind === 'addForeignKey', + ); + + const descriptors: MigrationOpDescriptor[] = [ + ...depOps, + ...dropOps, + ...tableOps, + ...columnOps, + ...patternOps, + ...alterOps, + ...constraintOps, + ]; + + return ok({ + descriptors, + needsDataMigration: descriptors.some((op) => op.kind === 'dataTransform'), + }); +} diff --git a/packages/3-targets/3-targets/postgres/src/core/migrations/operation-descriptors.ts b/packages/3-targets/3-targets/postgres/src/core/migrations/operation-descriptors.ts new file mode 100644 index 0000000000..76589d75d0 --- /dev/null +++ b/packages/3-targets/3-targets/postgres/src/core/migrations/operation-descriptors.ts @@ -0,0 +1,169 @@ +/** + * Postgres migration operation descriptors. + * + * Re-exports all structural SQL descriptors from @prisma-next/family-sql + * and adds data transform support with typed query builder callbacks. + */ + +import type { Db, TableProxyContract } from '@prisma-next/sql-builder/types'; +import type { SqlQueryPlan } from '@prisma-next/sql-relational-core/plan'; + +// Re-export structural descriptors from sql-family +export { + type AddColumnDescriptor, + type AddEnumValuesDescriptor, + type AddForeignKeyDescriptor, + type AddPrimaryKeyDescriptor, + type AddUniqueDescriptor, + type AlterColumnTypeDescriptor, + addColumn, + addEnumValues, + addForeignKey, + addPrimaryKey, + addUnique, + alterColumnType, + type Buildable, + type CreateDependencyDescriptor, + type CreateEnumTypeDescriptor, + type CreateIndexDescriptor, + type CreateTableDescriptor, + createDependency, + createEnumType, + createIndex, + createTable, + type DropColumnDescriptor, + type DropConstraintDescriptor, + type DropDefaultDescriptor, + type DropEnumTypeDescriptor, + type DropIndexDescriptor, + type DropNotNullDescriptor, + type DropTableDescriptor, + dropColumn, + dropConstraint, + dropDefault, + dropEnumType, + dropIndex, + dropNotNull, + dropTable, + type RenameTypeDescriptor, + renameType, + type SetDefaultDescriptor, + type SetNotNullDescriptor, + type SqlMigrationOpDescriptor, + setDefault, + setNotNull, + TODO, + type TodoMarker, +} from '@prisma-next/family-sql/operation-descriptors'; + +import { + type Buildable, + type DataTransformDescriptor, + type SqlMigrationOpDescriptor, + builders as structuralBuilders, + TODO, + type TodoMarker, +} from '@prisma-next/family-sql/operation-descriptors'; + +export type { DataTransformDescriptor }; + +// ============================================================================ +// Typed data transform inputs (for createBuilders()) +// ============================================================================ + +/** + * A single query plan input — callback, pre-built plan, or TODO placeholder. + * @template TContract - The contract type for the Db client. Defaults to any + * (untyped). Use createBuilders() to get typed callbacks. + */ +// biome-ignore lint/suspicious/noExplicitAny: default is untyped; createBuilders narrows this +export type QueryPlanInput = + | ((db: Db) => Buildable) + | SqlQueryPlan + | TodoMarker; + +/** Run input — a callback returning one or many buildables, or a pre-built plan/TODO. */ +// biome-ignore lint/suspicious/noExplicitAny: default is untyped; createBuilders narrows this +export type RunInput = + | ((db: Db) => Buildable | readonly Buildable[]) + | SqlQueryPlan + | TodoMarker; + +// ============================================================================ +// Postgres descriptor union = SQL structural + data transforms +// ============================================================================ + +export type PostgresMigrationOpDescriptor = SqlMigrationOpDescriptor | DataTransformDescriptor; + +/** @deprecated Use PostgresMigrationOpDescriptor */ +export type MigrationOpDescriptor = PostgresMigrationOpDescriptor; + +// ============================================================================ +// Data transform builder +// ============================================================================ + +function resolveInput(input: QueryPlanInput): QueryPlanInput { + if (typeof input === 'symbol' || typeof input === 'function') return input; + if ('build' in input && typeof (input as Buildable).build === 'function') { + return (input as Buildable).build(); + } + return input; +} + +// biome-ignore lint/suspicious/noExplicitAny: default is untyped; createBuilders narrows this +export function dataTransform( + name: string, + options: { + check: QueryPlanInput | Buildable | boolean; + run: RunInput | Buildable; + }, +): DataTransformDescriptor { + const check = + typeof options.check === 'boolean' + ? options.check + : resolveInput(options.check as QueryPlanInput); + + const run: (symbol | object | ((...args: never[]) => unknown))[] = []; + if (typeof options.run === 'function') { + run.push(options.run); + } else if (typeof options.run === 'symbol') { + run.push(options.run); + } else { + run.push(resolveInput(options.run as QueryPlanInput)); + } + return { + kind: 'dataTransform' as const, + name, + source: 'migration.ts', + check, + run, + }; +} + +/** + * Creates typed migration builder functions parameterized by the contract type. + * The dataTransform callback receives a fully typed Db client. + * + * Usage: + * ```typescript + * import type { Contract } from "../../src/prisma/contract.d" + * import { createBuilders } from "@prisma-next/target-postgres/migration-builders" + * + * const { addColumn, dataTransform, setNotNull } = createBuilders() + * ``` + */ +export function createBuilders() { + return { + ...structuralBuilders, + dataTransform: dataTransform, + TODO, + }; +} + +/** + * All builder functions keyed by descriptor kind. + */ +export const builders = { + ...structuralBuilders, + dataTransform, +} as const; diff --git a/packages/3-targets/3-targets/postgres/src/core/migrations/operation-resolver.ts b/packages/3-targets/3-targets/postgres/src/core/migrations/operation-resolver.ts new file mode 100644 index 0000000000..8040773a40 --- /dev/null +++ b/packages/3-targets/3-targets/postgres/src/core/migrations/operation-resolver.ts @@ -0,0 +1,929 @@ +/** + * Resolves thin operation descriptors into SqlMigrationPlanOperation objects + * by looking up contract types and calling existing planner SQL helpers. + * + * This is the bridge between the ergonomic builder API (descriptors) and + * the planner's SQL generation pipeline. It runs at verification time. + */ + +import { createPostgresAdapter } from '@prisma-next/adapter-postgres/adapter'; +import type { Contract } from '@prisma-next/contract/types'; +import type { + CodecControlHooks, + ComponentDatabaseDependency, + SqlMigrationPlanOperation, +} from '@prisma-next/family-sql/control'; +import type { + DataTransformOperation, + SerializedQueryPlan, +} from '@prisma-next/framework-components/control'; +import type { SqlStorage, StorageColumn, StorageTable } from '@prisma-next/sql-contract/types'; +import type { SqlQueryPlan } from '@prisma-next/sql-relational-core/plan'; +import { lowerSqlPlan } from '@prisma-next/sql-runtime'; +import { ifDefined } from '@prisma-next/utils/defined'; +import type { + AddColumnDescriptor, + AddEnumValuesDescriptor, + AddForeignKeyDescriptor, + AddPrimaryKeyDescriptor, + AddUniqueDescriptor, + AlterColumnTypeDescriptor, + CreateDependencyDescriptor, + CreateEnumTypeDescriptor, + CreateIndexDescriptor, + CreateTableDescriptor, + DataTransformDescriptor, + DropColumnDescriptor, + DropConstraintDescriptor, + DropDefaultDescriptor, + DropEnumTypeDescriptor, + DropIndexDescriptor, + DropNotNullDescriptor, + DropTableDescriptor, + MigrationOpDescriptor, + RenameTypeDescriptor, + SetDefaultDescriptor, + SetNotNullDescriptor, +} from './operation-descriptors'; +import { + buildAddColumnSql, + buildColumnDefaultSql, + buildCreateTableSql, + buildForeignKeySql, +} from './planner-ddl-builders'; +import { + buildExpectedFormatType, + columnExistsCheck, + columnNullabilityCheck, + columnTypeCheck, + constraintExistsCheck, + qualifyTableName, + toRegclassLiteral, +} from './planner-sql-checks'; +import type { OperationClass, PostgresPlanTargetDetails } from './planner-target-details'; + +export interface OperationResolverContext { + readonly toContract: Contract; + readonly schemaName: string; + readonly codecHooks: Map; + readonly dependencies?: readonly ComponentDatabaseDependency[]; + readonly db?: unknown; +} + +type ResolvedOp = SqlMigrationPlanOperation; + +function getTable(contract: Contract, tableName: string): StorageTable | undefined { + return contract.storage.tables[tableName]; +} + +function getColumn( + contract: Contract, + tableName: string, + columnName: string, +): StorageColumn | undefined { + return getTable(contract, tableName)?.columns[columnName]; +} + +function targetDetails( + objectType: OperationClass, + name: string, + schema: string, + table?: string, +): { readonly id: 'postgres'; readonly details: PostgresPlanTargetDetails } { + return { + id: 'postgres', + details: { schema, objectType, name, ...ifDefined('table', table) }, + }; +} + +function step(description: string, sql: string) { + return { description, sql }; +} + +function resolveCreateTable( + desc: CreateTableDescriptor, + ctx: OperationResolverContext, +): ResolvedOp { + const table = getTable(ctx.toContract, desc.table); + if (!table) throw new Error(`Table "${desc.table}" not found in destination contract`); + const qualified = qualifyTableName(ctx.schemaName, desc.table); + return { + id: `table.${desc.table}`, + label: `Create table "${desc.table}"`, + summary: `Creates table "${desc.table}"`, + operationClass: 'additive', + target: targetDetails('table', desc.table, ctx.schemaName), + precheck: [ + step( + `ensure table "${desc.table}" does not exist`, + `SELECT to_regclass(${toRegclassLiteral(ctx.schemaName, desc.table)}) IS NULL`, + ), + ], + execute: [ + step(`create table "${desc.table}"`, buildCreateTableSql(qualified, table, ctx.codecHooks)), + ], + postcheck: [ + step( + `verify table "${desc.table}" exists`, + `SELECT to_regclass(${toRegclassLiteral(ctx.schemaName, desc.table)}) IS NOT NULL`, + ), + ], + }; +} + +function resolveDropTable(desc: DropTableDescriptor, ctx: OperationResolverContext): ResolvedOp { + const qualified = qualifyTableName(ctx.schemaName, desc.table); + return { + id: `dropTable.${desc.table}`, + label: `Drop table "${desc.table}"`, + operationClass: 'destructive', + target: targetDetails('table', desc.table, ctx.schemaName), + precheck: [ + step( + `ensure table "${desc.table}" exists`, + `SELECT to_regclass(${toRegclassLiteral(ctx.schemaName, desc.table)}) IS NOT NULL`, + ), + ], + execute: [step(`drop table "${desc.table}"`, `DROP TABLE ${qualified}`)], + postcheck: [ + step( + `verify table "${desc.table}" does not exist`, + `SELECT to_regclass(${toRegclassLiteral(ctx.schemaName, desc.table)}) IS NULL`, + ), + ], + }; +} + +function resolveAddColumn(desc: AddColumnDescriptor, ctx: OperationResolverContext): ResolvedOp { + const contractColumn = getColumn(ctx.toContract, desc.table, desc.column); + if (!contractColumn) + throw new Error(`Column "${desc.table}"."${desc.column}" not found in destination contract`); + // Apply overrides — e.g., nullable: true for the add-nullable → backfill → setNotNull pattern + const column: StorageColumn = { + ...contractColumn, + nullable: + desc.overrides?.nullable !== undefined ? desc.overrides.nullable : contractColumn.nullable, + }; + const qualified = qualifyTableName(ctx.schemaName, desc.table); + return { + id: `column.${desc.table}.${desc.column}`, + label: `Add column "${desc.column}" to "${desc.table}"`, + operationClass: 'additive', + target: targetDetails('column', desc.column, ctx.schemaName, desc.table), + precheck: [ + step( + `ensure column "${desc.column}" is missing`, + columnExistsCheck({ + schema: ctx.schemaName, + table: desc.table, + column: desc.column, + exists: false, + }), + ), + ], + execute: [ + step( + `add column "${desc.column}"`, + buildAddColumnSql(qualified, desc.column, column, ctx.codecHooks), + ), + ], + postcheck: [ + step( + `verify column "${desc.column}" exists`, + columnExistsCheck({ schema: ctx.schemaName, table: desc.table, column: desc.column }), + ), + ], + }; +} + +function resolveDropColumn(desc: DropColumnDescriptor, ctx: OperationResolverContext): ResolvedOp { + const qualified = qualifyTableName(ctx.schemaName, desc.table); + return { + id: `dropColumn.${desc.table}.${desc.column}`, + label: `Drop column "${desc.column}" from "${desc.table}"`, + operationClass: 'destructive', + target: targetDetails('column', desc.column, ctx.schemaName, desc.table), + precheck: [ + step( + `ensure column "${desc.column}" exists`, + columnExistsCheck({ schema: ctx.schemaName, table: desc.table, column: desc.column }), + ), + ], + execute: [ + step( + `drop column "${desc.column}"`, + `ALTER TABLE ${qualified} DROP COLUMN ${quoteId(desc.column)}`, + ), + ], + postcheck: [ + step( + `verify column "${desc.column}" does not exist`, + columnExistsCheck({ + schema: ctx.schemaName, + table: desc.table, + column: desc.column, + exists: false, + }), + ), + ], + }; +} + +function resolveAlterColumnType( + desc: AlterColumnTypeDescriptor, + ctx: OperationResolverContext, +): ResolvedOp { + const column = getColumn(ctx.toContract, desc.table, desc.column); + if (!column) + throw new Error(`Column "${desc.table}"."${desc.column}" not found in destination contract`); + const qualified = qualifyTableName(ctx.schemaName, desc.table); + const qualifiedTargetType = desc.toType + ? qualifyName(ctx.schemaName, desc.toType) + : buildExpectedFormatType(column, ctx.codecHooks); + // format_type() returns unqualified names for types in search_path + const formatTypeExpected = desc.toType ?? buildExpectedFormatType(column, ctx.codecHooks); + return { + id: `alterType.${desc.table}.${desc.column}`, + label: `Alter type of "${desc.table}"."${desc.column}" to ${desc.toType ?? column.nativeType}`, + operationClass: 'destructive', + target: targetDetails('column', desc.column, ctx.schemaName, desc.table), + precheck: [ + step( + `ensure column "${desc.column}" exists`, + columnExistsCheck({ schema: ctx.schemaName, table: desc.table, column: desc.column }), + ), + ], + execute: [ + step( + `alter type of "${desc.column}"`, + `ALTER TABLE ${qualified} ALTER COLUMN ${quoteId(desc.column)} TYPE ${qualifiedTargetType}${desc.using ? ` USING ${desc.using}` : ` USING ${quoteId(desc.column)}::${qualifiedTargetType}`}`, + ), + ], + postcheck: [ + step( + `verify column "${desc.column}" has type "${formatTypeExpected}"`, + columnTypeCheck({ + schema: ctx.schemaName, + table: desc.table, + column: desc.column, + expectedType: formatTypeExpected, + }), + ), + ], + meta: { warning: 'TABLE_REWRITE' }, + }; +} + +function resolveSetNotNull(desc: SetNotNullDescriptor, ctx: OperationResolverContext): ResolvedOp { + const qualified = qualifyTableName(ctx.schemaName, desc.table); + return { + id: `alterNullability.${desc.table}.${desc.column}`, + label: `Set NOT NULL on "${desc.table}"."${desc.column}"`, + operationClass: 'destructive', + target: targetDetails('column', desc.column, ctx.schemaName, desc.table), + precheck: [ + step( + `ensure column "${desc.column}" exists`, + columnExistsCheck({ schema: ctx.schemaName, table: desc.table, column: desc.column }), + ), + step( + `ensure no NULL values in "${desc.column}"`, + `SELECT NOT EXISTS (SELECT 1 FROM ${qualified} WHERE ${quoteId(desc.column)} IS NULL)`, + ), + ], + execute: [ + step( + `set NOT NULL on "${desc.column}"`, + `ALTER TABLE ${qualified} ALTER COLUMN ${quoteId(desc.column)} SET NOT NULL`, + ), + ], + postcheck: [ + step( + `verify column "${desc.column}" is NOT NULL`, + columnNullabilityCheck({ + schema: ctx.schemaName, + table: desc.table, + column: desc.column, + nullable: false, + }), + ), + ], + }; +} + +function resolveDropNotNull( + desc: DropNotNullDescriptor, + ctx: OperationResolverContext, +): ResolvedOp { + const qualified = qualifyTableName(ctx.schemaName, desc.table); + return { + id: `alterNullability.${desc.table}.${desc.column}`, + label: `Drop NOT NULL on "${desc.table}"."${desc.column}"`, + operationClass: 'widening', + target: targetDetails('column', desc.column, ctx.schemaName, desc.table), + precheck: [ + step( + `ensure column "${desc.column}" exists`, + columnExistsCheck({ schema: ctx.schemaName, table: desc.table, column: desc.column }), + ), + ], + execute: [ + step( + `drop NOT NULL on "${desc.column}"`, + `ALTER TABLE ${qualified} ALTER COLUMN ${quoteId(desc.column)} DROP NOT NULL`, + ), + ], + postcheck: [ + step( + `verify column "${desc.column}" is nullable`, + columnNullabilityCheck({ + schema: ctx.schemaName, + table: desc.table, + column: desc.column, + nullable: true, + }), + ), + ], + }; +} + +function resolveSetDefault(desc: SetDefaultDescriptor, ctx: OperationResolverContext): ResolvedOp { + const column = getColumn(ctx.toContract, desc.table, desc.column); + if (!column) + throw new Error(`Column "${desc.table}"."${desc.column}" not found in destination contract`); + const defaultSql = buildColumnDefaultSql(column.default, column); + if (!defaultSql) + throw new Error( + `Column "${desc.table}"."${desc.column}" has no default in destination contract`, + ); + const qualified = qualifyTableName(ctx.schemaName, desc.table); + return { + id: `setDefault.${desc.table}.${desc.column}`, + label: `Set default on "${desc.table}"."${desc.column}"`, + operationClass: 'additive', + target: targetDetails('column', desc.column, ctx.schemaName, desc.table), + precheck: [ + step( + `ensure column "${desc.column}" exists`, + columnExistsCheck({ schema: ctx.schemaName, table: desc.table, column: desc.column }), + ), + ], + execute: [ + step( + `set default on "${desc.column}"`, + `ALTER TABLE ${qualified} ALTER COLUMN ${quoteId(desc.column)} ${defaultSql}`, + ), + ], + postcheck: [], + }; +} + +function resolveDropDefault( + desc: DropDefaultDescriptor, + ctx: OperationResolverContext, +): ResolvedOp { + const qualified = qualifyTableName(ctx.schemaName, desc.table); + return { + id: `dropDefault.${desc.table}.${desc.column}`, + label: `Drop default on "${desc.table}"."${desc.column}"`, + operationClass: 'destructive', + target: targetDetails('column', desc.column, ctx.schemaName, desc.table), + precheck: [ + step( + `ensure column "${desc.column}" exists`, + columnExistsCheck({ schema: ctx.schemaName, table: desc.table, column: desc.column }), + ), + ], + execute: [ + step( + `drop default on "${desc.column}"`, + `ALTER TABLE ${qualified} ALTER COLUMN ${quoteId(desc.column)} DROP DEFAULT`, + ), + ], + postcheck: [], + }; +} + +function resolveAddPrimaryKey( + desc: AddPrimaryKeyDescriptor, + ctx: OperationResolverContext, +): ResolvedOp { + const table = getTable(ctx.toContract, desc.table); + if (!table?.primaryKey) + throw new Error(`Table "${desc.table}" has no primary key in destination contract`); + const constraintName = table.primaryKey.name ?? `${desc.table}_pkey`; + const qualified = qualifyTableName(ctx.schemaName, desc.table); + const columnList = table.primaryKey.columns.map(quoteId).join(', '); + return { + id: `primaryKey.${desc.table}.${constraintName}`, + label: `Add primary key on "${desc.table}"`, + operationClass: 'additive', + target: targetDetails('primaryKey', constraintName, ctx.schemaName, desc.table), + precheck: [ + step( + `ensure primary key "${constraintName}" does not exist`, + constraintExistsCheck({ + constraintName, + schema: ctx.schemaName, + table: desc.table, + exists: false, + }), + ), + ], + execute: [ + step( + `add primary key "${constraintName}"`, + `ALTER TABLE ${qualified} ADD CONSTRAINT ${quoteId(constraintName)} PRIMARY KEY (${columnList})`, + ), + ], + postcheck: [ + step( + `verify primary key "${constraintName}" exists`, + constraintExistsCheck({ constraintName, schema: ctx.schemaName, table: desc.table }), + ), + ], + }; +} + +function resolveAddUnique(desc: AddUniqueDescriptor, ctx: OperationResolverContext): ResolvedOp { + const table = getTable(ctx.toContract, desc.table); + const unique = table?.uniques?.find((u) => u.columns.join(',') === desc.columns.join(',')); + const constraintName = unique?.name ?? `${desc.table}_${desc.columns.join('_')}_key`; + const qualified = qualifyTableName(ctx.schemaName, desc.table); + const columnList = desc.columns.map(quoteId).join(', '); + return { + id: `unique.${desc.table}.${constraintName}`, + label: `Add unique constraint on "${desc.table}" (${desc.columns.join(', ')})`, + operationClass: 'additive', + target: targetDetails('unique', constraintName, ctx.schemaName, desc.table), + precheck: [ + step( + `ensure constraint "${constraintName}" does not exist`, + constraintExistsCheck({ + constraintName, + schema: ctx.schemaName, + table: desc.table, + exists: false, + }), + ), + ], + execute: [ + step( + `add unique constraint "${constraintName}"`, + `ALTER TABLE ${qualified} ADD CONSTRAINT ${quoteId(constraintName)} UNIQUE (${columnList})`, + ), + ], + postcheck: [ + step( + `verify constraint "${constraintName}" exists`, + constraintExistsCheck({ constraintName, schema: ctx.schemaName, table: desc.table }), + ), + ], + }; +} + +function resolveAddForeignKey( + desc: AddForeignKeyDescriptor, + ctx: OperationResolverContext, +): ResolvedOp { + const table = getTable(ctx.toContract, desc.table); + const fk = table?.foreignKeys?.find((f) => f.columns.join(',') === desc.columns.join(',')); + + if (!fk) { + throw new Error( + `Foreign key on "${desc.table}" (${desc.columns.join(', ')}) not found in destination contract. ` + + 'Ensure the FK is declared in the contract before authoring a migration that adds it.', + ); + } + + const fkName = fk.name ?? `${desc.table}_${desc.columns.join('_')}_fkey`; + + return { + id: `foreignKey.${desc.table}.${fkName}`, + label: `Add foreign key "${fkName}" on "${desc.table}"`, + operationClass: 'additive', + target: targetDetails('foreignKey', fkName, ctx.schemaName, desc.table), + precheck: [ + step( + `ensure FK "${fkName}" does not exist`, + constraintExistsCheck({ + constraintName: fkName, + schema: ctx.schemaName, + table: desc.table, + exists: false, + }), + ), + ], + execute: [ + step(`add FK "${fkName}"`, buildForeignKeySql(ctx.schemaName, desc.table, fkName, fk)), + ], + postcheck: [ + step( + `verify FK "${fkName}" exists`, + constraintExistsCheck({ + constraintName: fkName, + schema: ctx.schemaName, + table: desc.table, + }), + ), + ], + }; +} + +function resolveDropConstraint( + desc: DropConstraintDescriptor, + ctx: OperationResolverContext, +): ResolvedOp { + const qualified = qualifyTableName(ctx.schemaName, desc.table); + return { + id: `dropConstraint.${desc.table}.${desc.constraintName}`, + label: `Drop constraint "${desc.constraintName}" on "${desc.table}"`, + operationClass: 'destructive', + target: targetDetails('unique', desc.constraintName, ctx.schemaName, desc.table), + precheck: [ + step( + `ensure constraint "${desc.constraintName}" exists`, + constraintExistsCheck({ + constraintName: desc.constraintName, + schema: ctx.schemaName, + table: desc.table, + }), + ), + ], + execute: [ + step( + `drop constraint "${desc.constraintName}"`, + `ALTER TABLE ${qualified} DROP CONSTRAINT ${quoteId(desc.constraintName)}`, + ), + ], + postcheck: [ + step( + `verify constraint "${desc.constraintName}" does not exist`, + constraintExistsCheck({ + constraintName: desc.constraintName, + schema: ctx.schemaName, + table: desc.table, + exists: false, + }), + ), + ], + }; +} + +function resolveCreateIndex( + desc: CreateIndexDescriptor, + ctx: OperationResolverContext, +): ResolvedOp { + const table = getTable(ctx.toContract, desc.table); + const index = table?.indexes?.find((i) => i.columns.join(',') === desc.columns.join(',')); + const indexName = index?.name ?? `${desc.table}_${desc.columns.join('_')}_idx`; + const qualified = qualifyTableName(ctx.schemaName, desc.table); + const columnList = desc.columns.map(quoteId).join(', '); + return { + id: `index.${desc.table}.${indexName}`, + label: `Create index "${indexName}" on "${desc.table}"`, + operationClass: 'additive', + target: targetDetails('index', indexName, ctx.schemaName, desc.table), + precheck: [ + step( + `ensure index "${indexName}" does not exist`, + `SELECT to_regclass(${toRegclassLiteral(ctx.schemaName, indexName)}) IS NULL`, + ), + ], + execute: [ + step( + `create index "${indexName}"`, + `CREATE INDEX ${quoteId(indexName)} ON ${qualified} (${columnList})`, + ), + ], + postcheck: [ + step( + `verify index "${indexName}" exists`, + `SELECT to_regclass(${toRegclassLiteral(ctx.schemaName, indexName)}) IS NOT NULL`, + ), + ], + }; +} + +function resolveDropIndex(desc: DropIndexDescriptor, ctx: OperationResolverContext): ResolvedOp { + return { + id: `dropIndex.${desc.table}.${desc.indexName}`, + label: `Drop index "${desc.indexName}"`, + operationClass: 'destructive', + target: targetDetails('index', desc.indexName, ctx.schemaName, desc.table), + precheck: [ + step( + `ensure index "${desc.indexName}" exists`, + `SELECT to_regclass(${toRegclassLiteral(ctx.schemaName, desc.indexName)}) IS NOT NULL`, + ), + ], + execute: [ + step( + `drop index "${desc.indexName}"`, + `DROP INDEX ${qualifyTableName(ctx.schemaName, desc.indexName)}`, + ), + ], + postcheck: [ + step( + `verify index "${desc.indexName}" does not exist`, + `SELECT to_regclass(${toRegclassLiteral(ctx.schemaName, desc.indexName)}) IS NULL`, + ), + ], + }; +} + +function enumTypeExistsCheck(schemaName: string, nativeType: string, exists = true): string { + const clause = exists ? 'EXISTS' : 'NOT EXISTS'; + return `SELECT ${clause} ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON t.typnamespace = n.oid + WHERE n.nspname = '${escapeLiteral(schemaName)}' + AND t.typname = '${escapeLiteral(nativeType)}' +)`; +} + +function resolveCreateEnumType( + desc: CreateEnumTypeDescriptor, + ctx: OperationResolverContext, +): ResolvedOp { + // When explicit values are provided (e.g., temp type in rebuild recipe), use them directly. + // The typeName may be a temp name not in the contract. + const nativeType = desc.typeName; + let values: readonly string[]; + if (desc.values) { + values = desc.values; + } else { + const typeInstance = ctx.toContract.storage.types?.[desc.typeName]; + if (!typeInstance) { + throw new Error(`Type "${desc.typeName}" not found in destination contract storage.types`); + } + const typeValues = typeInstance.typeParams?.['values']; + if ( + !Array.isArray(typeValues) || + !typeValues.every((v): v is string => typeof v === 'string') + ) { + throw new Error(`Type "${desc.typeName}" has no valid enum values in typeParams`); + } + values = typeValues; + } + const qualifiedType = qualifyName(ctx.schemaName, nativeType); + const literalValues = values.map((v) => `'${escapeLiteral(v)}'`).join(', '); + return { + id: `type.${nativeType}`, + label: `Create enum type "${nativeType}"`, + operationClass: 'additive', + target: targetDetails('type', nativeType, ctx.schemaName), + precheck: [ + step( + `ensure type "${nativeType}" does not exist`, + enumTypeExistsCheck(ctx.schemaName, nativeType, false), + ), + ], + execute: [ + step( + `create enum type "${nativeType}"`, + `CREATE TYPE ${qualifiedType} AS ENUM (${literalValues})`, + ), + ], + postcheck: [ + step(`verify type "${nativeType}" exists`, enumTypeExistsCheck(ctx.schemaName, nativeType)), + ], + }; +} + +function resolveAddEnumValues( + desc: AddEnumValuesDescriptor, + ctx: OperationResolverContext, +): ResolvedOp { + const typeInstance = ctx.toContract.storage.types?.[desc.typeName]; + if (!typeInstance) { + throw new Error(`Type "${desc.typeName}" not found in destination contract storage.types`); + } + const qualifiedType = qualifyName(ctx.schemaName, typeInstance.nativeType); + return { + id: `type.${desc.typeName}.addValues`, + label: `Add values to enum type "${desc.typeName}": ${desc.values.join(', ')}`, + operationClass: 'additive', + target: targetDetails('type', desc.typeName, ctx.schemaName), + precheck: [ + step( + `ensure type "${typeInstance.nativeType}" exists`, + enumTypeExistsCheck(ctx.schemaName, typeInstance.nativeType), + ), + ], + execute: desc.values.map((value) => + step( + `add value '${value}' to enum "${typeInstance.nativeType}"`, + `ALTER TYPE ${qualifiedType} ADD VALUE '${escapeLiteral(value)}'`, + ), + ), + postcheck: [ + step( + `verify type "${typeInstance.nativeType}" exists`, + enumTypeExistsCheck(ctx.schemaName, typeInstance.nativeType), + ), + ], + }; +} + +function resolveDropEnumType( + desc: DropEnumTypeDescriptor, + ctx: OperationResolverContext, +): ResolvedOp { + const qualified = qualifyName(ctx.schemaName, desc.typeName); + return { + id: `type.${desc.typeName}.drop`, + label: `Drop enum type "${desc.typeName}"`, + operationClass: 'destructive', + target: targetDetails('type', desc.typeName, ctx.schemaName), + precheck: [ + step( + `ensure type "${desc.typeName}" exists`, + enumTypeExistsCheck(ctx.schemaName, desc.typeName), + ), + ], + execute: [step(`drop enum type "${desc.typeName}"`, `DROP TYPE ${qualified}`)], + postcheck: [ + step( + `verify type "${desc.typeName}" removed`, + enumTypeExistsCheck(ctx.schemaName, desc.typeName, false), + ), + ], + }; +} + +function resolveRenameType(desc: RenameTypeDescriptor, ctx: OperationResolverContext): ResolvedOp { + const qualifiedFrom = qualifyName(ctx.schemaName, desc.fromName); + return { + id: `type.${desc.fromName}.rename`, + label: `Rename type "${desc.fromName}" to "${desc.toName}"`, + operationClass: 'destructive', + target: targetDetails('type', desc.fromName, ctx.schemaName), + precheck: [ + step( + `ensure type "${desc.fromName}" exists`, + enumTypeExistsCheck(ctx.schemaName, desc.fromName), + ), + ], + execute: [ + step( + `rename type "${desc.fromName}" to "${desc.toName}"`, + `ALTER TYPE ${qualifiedFrom} RENAME TO ${quoteId(desc.toName)}`, + ), + ], + postcheck: [ + step(`verify type "${desc.toName}" exists`, enumTypeExistsCheck(ctx.schemaName, desc.toName)), + ], + }; +} + +function resolveCreateDependency( + desc: CreateDependencyDescriptor, + ctx: OperationResolverContext, +): readonly ResolvedOp[] { + const dep = ctx.dependencies?.find((d) => d.id === desc.dependencyId); + if (!dep) { + throw new Error( + `Dependency "${desc.dependencyId}" not found in resolver context. ` + + 'Ensure frameworkComponents are passed to resolveDescriptors.', + ); + } + return dep.install as readonly ResolvedOp[]; +} + +const postgresAdapter = createPostgresAdapter(); + +function lowerToSql(plan: SqlQueryPlan, contract: Contract): SerializedQueryPlan { + const lowered = lowerSqlPlan(postgresAdapter, contract, plan); + return { sql: lowered.sql, params: lowered.params }; +} + +function resolveBuildable(input: unknown, contract: Contract): SerializedQueryPlan { + if ( + typeof input === 'object' && + input !== null && + 'build' in input && + typeof (input as { build: unknown }).build === 'function' + ) { + return lowerToSql((input as { build(): unknown }).build() as SqlQueryPlan, contract); + } + return lowerToSql(input as SqlQueryPlan, contract); +} + +/** Resolves a single QueryPlanInput to one or more lowered SQL statements. */ +function resolvePlanInput( + input: symbol | object | ((...args: never[]) => unknown), + db: unknown, + contract: Contract, +): readonly SerializedQueryPlan[] { + if (typeof input === 'symbol') { + throw new Error( + 'Data transform contains an unimplemented TODO placeholder. ' + + 'Fill in the check/run queries in migration.ts before running verify.', + ); + } + if (typeof input === 'function') { + const result = input(db as never); + if (Array.isArray(result)) { + return result.map((item) => resolveBuildable(item, contract)); + } + return [resolveBuildable(result, contract)]; + } + return [resolveBuildable(input, contract)]; +} + +function resolveCheck( + check: DataTransformDescriptor['check'], + db: unknown, + contract: Contract, +): SerializedQueryPlan | boolean | null { + if (typeof check === 'boolean') return check; + const resolved = resolvePlanInput(check, db, contract); + const first = resolved[0]; + if (!first) return null; + return first; +} + +function resolveDataTransform( + desc: DataTransformDescriptor, + ctx: OperationResolverContext, +): DataTransformOperation { + const { db, toContract } = ctx; + return { + id: `data_migration.${desc.name}`, + label: `Data transform: ${desc.name}`, + operationClass: 'data', + name: desc.name, + source: desc.source, + check: resolveCheck(desc.check, db, toContract), + run: desc.run.flatMap((input) => resolvePlanInput(input, db, toContract)), + }; +} + +import { + escapeLiteral, + qualifyName, + quoteIdentifier as quoteId, +} from '@prisma-next/adapter-postgres/control'; + +/** + * Resolves an array of operation descriptors into SqlMigrationPlanOperation objects. + * Most descriptors resolve 1:1, but createType and createDependency may expand to multiple ops. + */ +export function resolveOperations( + descriptors: readonly MigrationOpDescriptor[], + context: OperationResolverContext, +): readonly (ResolvedOp | DataTransformOperation)[] { + return descriptors.flatMap((desc) => resolveOperation(desc, context)); +} + +function resolveOperation( + desc: MigrationOpDescriptor, + ctx: OperationResolverContext, +): readonly (ResolvedOp | DataTransformOperation)[] { + switch (desc.kind) { + case 'createTable': + return [resolveCreateTable(desc, ctx)]; + case 'dropTable': + return [resolveDropTable(desc, ctx)]; + case 'addColumn': + return [resolveAddColumn(desc, ctx)]; + case 'dropColumn': + return [resolveDropColumn(desc, ctx)]; + case 'alterColumnType': + return [resolveAlterColumnType(desc, ctx)]; + case 'setNotNull': + return [resolveSetNotNull(desc, ctx)]; + case 'dropNotNull': + return [resolveDropNotNull(desc, ctx)]; + case 'setDefault': + return [resolveSetDefault(desc, ctx)]; + case 'dropDefault': + return [resolveDropDefault(desc, ctx)]; + case 'addPrimaryKey': + return [resolveAddPrimaryKey(desc, ctx)]; + case 'addUnique': + return [resolveAddUnique(desc, ctx)]; + case 'addForeignKey': + return [resolveAddForeignKey(desc, ctx)]; + case 'dropConstraint': + return [resolveDropConstraint(desc, ctx)]; + case 'createIndex': + return [resolveCreateIndex(desc, ctx)]; + case 'dropIndex': + return [resolveDropIndex(desc, ctx)]; + case 'createEnumType': + return [resolveCreateEnumType(desc, ctx)]; + case 'addEnumValues': + return [resolveAddEnumValues(desc, ctx)]; + case 'dropEnumType': + return [resolveDropEnumType(desc, ctx)]; + case 'renameType': + return [resolveRenameType(desc, ctx)]; + case 'createDependency': + return resolveCreateDependency(desc, ctx); + case 'dataTransform': + return [resolveDataTransform(desc, ctx)]; + } +} diff --git a/packages/3-targets/3-targets/postgres/src/core/migrations/planner-reconciliation.ts b/packages/3-targets/3-targets/postgres/src/core/migrations/planner-reconciliation.ts index 928048e9e4..8394a91f84 100644 --- a/packages/3-targets/3-targets/postgres/src/core/migrations/planner-reconciliation.ts +++ b/packages/3-targets/3-targets/postgres/src/core/migrations/planner-reconciliation.ts @@ -100,6 +100,7 @@ function isAdditiveIssue(issue: SchemaIssue): boolean { switch (issue.kind) { case 'type_missing': case 'type_values_mismatch': + case 'enum_values_changed': case 'missing_table': case 'missing_column': case 'dependency_missing': @@ -709,11 +710,12 @@ function convertIssueToConflict(issue: SchemaIssue): SqlPlannerConflict | null { function buildConflict(kind: SqlPlannerConflict['kind'], issue: SchemaIssue): SqlPlannerConflict { const location = buildConflictLocation(issue); + const base = issue.kind !== 'enum_values_changed' ? issue : undefined; const meta = - issue.expected || issue.actual + base?.expected || base?.actual ? Object.freeze({ - ...ifDefined('expected', issue.expected), - ...ifDefined('actual', issue.actual), + ...ifDefined('expected', base.expected), + ...ifDefined('actual', base.actual), }) : undefined; @@ -735,19 +737,24 @@ function sortSchemaIssues(issues: readonly SchemaIssue[]): readonly SchemaIssue[ if (kindCompare !== 0) { return kindCompare; } - const tableCompare = compareStrings(a.table, b.table); + const aBase = a.kind !== 'enum_values_changed' ? a : undefined; + const bBase = b.kind !== 'enum_values_changed' ? b : undefined; + const tableCompare = compareStrings(aBase?.table, bBase?.table); if (tableCompare !== 0) { return tableCompare; } - const columnCompare = compareStrings(a.column, b.column); + const columnCompare = compareStrings(aBase?.column, bBase?.column); if (columnCompare !== 0) { return columnCompare; } - return compareStrings(a.indexOrConstraint, b.indexOrConstraint); + return compareStrings(aBase?.indexOrConstraint, bBase?.indexOrConstraint); }); } function buildConflictLocation(issue: SchemaIssue) { + if (issue.kind === 'enum_values_changed') { + return { type: issue.typeName }; + } const location = { ...ifDefined('table', issue.table), ...ifDefined('column', issue.column), diff --git a/packages/3-targets/3-targets/postgres/src/core/migrations/planner-strategies.ts b/packages/3-targets/3-targets/postgres/src/core/migrations/planner-strategies.ts new file mode 100644 index 0000000000..1705e191fb --- /dev/null +++ b/packages/3-targets/3-targets/postgres/src/core/migrations/planner-strategies.ts @@ -0,0 +1,262 @@ +/** + * Migration strategies for the descriptor-based planner. + * + * Each strategy examines the issue list, consumes issues it handles, + * and returns the ops to handle them. The planner chains strategies, + * then handles whatever's left with default issue-to-descriptor mapping. + * + * Different strategy sets are used for different contexts: + * - `migration plan`: data-safe strategies (dataTransform for NOT NULL, type changes, etc.) + * - `db update`: dev-push strategies (temp defaults, destructive type changes, no data transforms) + */ + +import type { Contract } from '@prisma-next/contract/types'; +import type { SchemaIssue } from '@prisma-next/framework-components/control'; +import type { SqlStorage } from '@prisma-next/sql-contract/types'; +import { + addColumn, + addEnumValues, + alterColumnType, + createEnumType, + dataTransform, + dropEnumType, + type MigrationOpDescriptor, + renameType, + setNotNull, + TODO, +} from './operation-descriptors'; + +// ============================================================================ +// Strategy types +// ============================================================================ + +/** Context passed to each migration strategy — the from/to contracts for the migration. */ +export interface StrategyContext { + readonly toContract: Contract; + readonly fromContract: Contract | null; +} + +/** + * A migration strategy examines schema issues, consumes the ones it handles, + * and returns the descriptor ops to address them. Returns `'no_match'` if + * none of the issues are relevant. The planner chains strategies in order — + * earlier strategies consume issues before later ones see them. + */ +export type MigrationStrategy = ( + issues: readonly SchemaIssue[], + context: StrategyContext, +) => + | { kind: 'match'; issues: readonly SchemaIssue[]; ops: readonly MigrationOpDescriptor[] } + | { kind: 'no_match' }; + +// ============================================================================ +// Recipes +// ============================================================================ + +const REBUILD_SUFFIX = '__prisma_next_new'; + +/** + * Produces the descriptor sequence for rebuilding a Postgres enum type: + * createEnumType(temp, values) → alterColumnType(USING cast) per column → dropEnumType(old) → renameType(temp, old) + * + * Used by the enum change strategy for value removal and reorder scenarios. + * Finds all columns referencing the enum via `typeRef` in the destination contract. + */ +function enumRebuildRecipe( + typeName: string, + ctx: StrategyContext, +): readonly MigrationOpDescriptor[] { + const toType = ctx.toContract.storage.types?.[typeName]; + if (!toType) return []; + const nativeType = toType.nativeType; + const desiredValues = (toType.typeParams['values'] ?? []) as readonly string[]; + const tempName = `${nativeType}${REBUILD_SUFFIX}`; + + const columnRefs: { table: string; column: string }[] = []; + for (const [tableName, table] of Object.entries(ctx.toContract.storage.tables)) { + for (const [columnName, column] of Object.entries(table.columns)) { + if (column.typeRef === typeName) { + columnRefs.push({ table: tableName, column: columnName }); + } + } + } + + return [ + createEnumType(tempName, desiredValues), + ...columnRefs.map((ref) => + alterColumnType(ref.table, ref.column, { + toType: tempName, + using: `${ref.column}::text::${tempName}`, + }), + ), + dropEnumType(nativeType), + renameType(tempName, nativeType), + ]; +} + +// ============================================================================ +// Data-safe strategies (for `migration plan`) +// ============================================================================ + +/** + * NOT NULL backfill strategy. + * + * When a missing column is NOT NULL without a default, the planner can't just + * add it — existing rows would violate the constraint. Instead, emit: + * addColumn(nullable) → dataTransform (user fills in backfill) → setNotNull + */ +export const notNullBackfillStrategy: MigrationStrategy = (issues, ctx) => { + const matched: SchemaIssue[] = []; + const ops: MigrationOpDescriptor[] = []; + + for (const issue of issues) { + if (issue.kind !== 'missing_column' || !issue.table || !issue.column) continue; + + const column = ctx.toContract.storage.tables[issue.table]?.columns[issue.column]; + if (!column) continue; + if (column.nullable === true || column.default !== undefined) continue; + + matched.push(issue); + ops.push( + addColumn(issue.table, issue.column, { nullable: true }), + dataTransform(`backfill-${issue.table}-${issue.column}`, { + check: TODO, + run: TODO, + }), + setNotNull(issue.table, issue.column), + ); + } + + if (matched.length === 0) return { kind: 'no_match' }; + return { + kind: 'match', + issues: issues.filter((i) => !matched.includes(i)), + ops, + }; +}; + +/** + * Unsafe type change strategy. + * + * Safe widenings (int4 → int8) emit alterColumnType directly. + * Unsafe changes emit dataTransform for user to handle conversion. + */ +export const typeChangeStrategy: MigrationStrategy = (issues, ctx) => { + const matched: SchemaIssue[] = []; + const ops: MigrationOpDescriptor[] = []; + + const SAFE_WIDENINGS = new Set(['int2→int4', 'int2→int8', 'int4→int8', 'float4→float8']); + function isSafeWidening(fromType: string, toType: string): boolean { + return SAFE_WIDENINGS.has(`${fromType}→${toType}`); + } + + for (const issue of issues) { + if (issue.kind !== 'type_mismatch') continue; + if (!issue.table || !issue.column) continue; + const fromColumn = ctx.fromContract?.storage.tables[issue.table]?.columns[issue.column]; + const toColumn = ctx.toContract?.storage.tables[issue.table]?.columns[issue.column]; + if (!fromColumn || !toColumn) continue; + const fromType = fromColumn.nativeType; + const toType = toColumn.nativeType; + if (fromType === toType) continue; + matched.push(issue); + if (isSafeWidening(fromType, toType)) { + ops.push(alterColumnType(issue.table, issue.column)); + } else { + ops.push( + dataTransform(`typechange-${issue.table}-${issue.column}`, { + check: TODO, + run: TODO, + }), + alterColumnType(issue.table, issue.column), + ); + } + } + if (matched.length === 0) return { kind: 'no_match' }; + return { + kind: 'match', + issues: issues.filter((i) => !matched.includes(i)), + ops, + }; +}; + +/** + * Nullable → NOT NULL tightening strategy. + * + * When an existing column changes from nullable to NOT NULL, existing rows + * may have NULLs that violate the constraint. Emit: + * dataTransform (user fills in NULL handling) → setNotNull + */ +export const nullableTighteningStrategy: MigrationStrategy = (issues, ctx) => { + const matched: SchemaIssue[] = []; + const ops: MigrationOpDescriptor[] = []; + + for (const issue of issues) { + if (issue.kind !== 'nullability_mismatch' || !issue.table || !issue.column) continue; + + const column = ctx.toContract.storage.tables[issue.table]?.columns[issue.column]; + if (!column) continue; + if (column.nullable === true) continue; + + matched.push(issue); + ops.push( + dataTransform(`handle-nulls-${issue.table}-${issue.column}`, { + check: TODO, + run: TODO, + }), + setNotNull(issue.table, issue.column), + ); + } + + if (matched.length === 0) return { kind: 'no_match' }; + return { + kind: 'match', + issues: issues.filter((i) => !matched.includes(i)), + ops, + }; +}; + +/** + * Enum value change strategy. + * + * When enum values change between contracts: + * - Add only → addEnumValues + * - Reorder (same values, different order) → rebuild recipe (no data transform) + * - Removal → dataTransform (user migrates rows) + rebuild recipe + */ +export const enumChangeStrategy: MigrationStrategy = (issues, ctx) => { + const matched: SchemaIssue[] = []; + const ops: MigrationOpDescriptor[] = []; + + for (const issue of issues) { + if (issue.kind !== 'enum_values_changed') continue; + matched.push(issue); + + if (issue.removedValues.length > 0) { + ops.push( + dataTransform(`migrate-${issue.typeName}-values`, { check: TODO, run: TODO }), + ...enumRebuildRecipe(issue.typeName, ctx), + ); + } else if (issue.addedValues.length === 0) { + // Reorder only — rebuild without data transform + ops.push(...enumRebuildRecipe(issue.typeName, ctx)); + } else { + ops.push(addEnumValues(issue.typeName, issue.addedValues)); + } + } + + if (matched.length === 0) return { kind: 'no_match' }; + return { + kind: 'match', + issues: issues.filter((i) => !matched.includes(i)), + ops, + }; +}; + +/** Default strategy set for `migration plan` — data-safe, requires user input for destructive changes. */ +export const migrationPlanStrategies: readonly MigrationStrategy[] = [ + enumChangeStrategy, + notNullBackfillStrategy, + typeChangeStrategy, + nullableTighteningStrategy, +]; diff --git a/packages/3-targets/3-targets/postgres/src/core/migrations/runner.ts b/packages/3-targets/3-targets/postgres/src/core/migrations/runner.ts index 837abd4093..79208bd142 100644 --- a/packages/3-targets/3-targets/postgres/src/core/migrations/runner.ts +++ b/packages/3-targets/3-targets/postgres/src/core/migrations/runner.ts @@ -17,6 +17,7 @@ import type { import { runnerFailure, runnerSuccess } from '@prisma-next/family-sql/control'; import { verifySqlSchema } from '@prisma-next/family-sql/schema-verify'; import { readMarker } from '@prisma-next/family-sql/verify'; +import type { DataTransformOperation } from '@prisma-next/framework-components/control'; import { SqlQueryError } from '@prisma-next/sql-errors'; import { ifDefined } from '@prisma-next/utils/defined'; import type { Result } from '@prisma-next/utils/result'; @@ -46,6 +47,18 @@ const DEFAULT_CONFIG: RunnerConfig = { const LOCK_DOMAIN = 'prisma_next.contract.marker'; +function isDataTransformOperation(op: unknown): op is DataTransformOperation { + return ( + typeof op === 'object' && + op !== null && + 'operationClass' in op && + (op as { operationClass: string }).operationClass === 'data' && + 'name' in op && + 'check' in op && + 'run' in op + ); +} + /** * Deep clones and freezes a record object to prevent mutation. * Recursively clones nested objects and arrays to ensure complete isolation. @@ -190,6 +203,19 @@ class PostgresMigrationRunner implements SqlMigrationRunner['driver'], + op: DataTransformOperation, + options: { runIdempotency: boolean }, + ): Promise> { + // Step 1: Check (skip guard) + if (options.runIdempotency && op.check !== null && op.check !== false) { + if (op.check === true) { + // Always skip + return okVoid(); + } + const checkResult = await driver.query(op.check.sql, op.check.params); + if (checkResult.rows.length === 0) { + // No violations — already applied, skip + return okVoid(); + } + } + + // Step 2: Execute run steps + if (op.run) { + for (const plan of op.run) { + try { + await driver.query(plan.sql, plan.params); + } catch (error: unknown) { + if (SqlQueryError.is(error)) { + return runnerFailure( + 'EXECUTION_FAILED', + `Data transform "${op.name}" failed: ${error.message}`, + { + why: error.message, + meta: { + operationId: op.id, + dataTransformName: op.name, + sql: plan.sql, + sqlState: error.sqlState, + }, + }, + ); + } + throw error; + } + } + } + + // Step 3: Post-run validation (check again) + if (op.check !== null && op.check !== false && op.check !== true) { + const checkResult = await driver.query(op.check.sql, op.check.params); + if (checkResult.rows.length > 0) { + return runnerFailure( + 'POSTCHECK_FAILED', + `Data transform "${op.name}" did not resolve all violations (${checkResult.rows.length} remaining)`, + { + why: `After executing the data transform, the check query still returns ${checkResult.rows.length} violation(s).`, + meta: { + operationId: op.id, + dataTransformName: op.name, + remainingViolations: checkResult.rows.length, + }, + }, + ); + } + } + + return okVoid(); + } + private async ensureControlTables( driver: SqlMigrationRunnerExecuteOptions['driver'], ): Promise { diff --git a/packages/3-targets/3-targets/postgres/src/exports/control.ts b/packages/3-targets/3-targets/postgres/src/exports/control.ts index 1dfa3625b2..335010a332 100644 --- a/packages/3-targets/3-targets/postgres/src/exports/control.ts +++ b/packages/3-targets/3-targets/postgres/src/exports/control.ts @@ -1,23 +1,87 @@ +import { + normalizeSchemaNativeType, + parsePostgresDefault, +} from '@prisma-next/adapter-postgres/control'; import type { ColumnDefault, Contract } from '@prisma-next/contract/types'; import type { SqlControlFamilyInstance, SqlControlTargetDescriptor, } from '@prisma-next/family-sql/control'; -import { contractToSchemaIR, extractCodecControlHooks } from '@prisma-next/family-sql/control'; +import { + collectInitDependencies, + contractToSchemaIR, + extractCodecControlHooks, +} from '@prisma-next/family-sql/control'; +import { MigrationDescriptorArraySchema } from '@prisma-next/family-sql/operation-descriptors'; +import { verifySqlSchema } from '@prisma-next/family-sql/schema-verify'; import type { TargetBoundComponentDescriptor } from '@prisma-next/framework-components/components'; import type { ControlTargetInstance, MigrationPlanner, MigrationRunner, + OperationDescriptor, } from '@prisma-next/framework-components/control'; +import { sql } from '@prisma-next/sql-builder/runtime'; import type { SqlStorage, StorageColumn } from '@prisma-next/sql-contract/types'; +import type { SqlOperationEntry } from '@prisma-next/sql-operations'; import { ifDefined } from '@prisma-next/utils/defined'; +import { type } from 'arktype'; import { postgresTargetDescriptorMeta } from '../core/descriptor-meta'; +import { planDescriptors } from '../core/migrations/descriptor-planner'; +import { resolveOperations } from '../core/migrations/operation-resolver'; import { createPostgresMigrationPlanner } from '../core/migrations/planner'; import { renderDefaultLiteral } from '../core/migrations/planner-ddl-builders'; import type { PostgresPlanTargetDetails } from '../core/migrations/planner-target-details'; import { createPostgresMigrationRunner } from '../core/migrations/runner'; +function parseDescriptors(descriptors: readonly OperationDescriptor[]) { + const result = MigrationDescriptorArraySchema([...descriptors]); + if (result instanceof type.errors) { + throw new Error(`Invalid migration descriptors:\n${result.summary}`); + } + return result; +} + +function collectQueryOperationTypes( + frameworkComponents?: ReadonlyArray>, +): Readonly> { + const entries: Record = {}; + if (!frameworkComponents) return entries; + for (const component of frameworkComponents) { + const ops = ( + component as { + queryOperations?: () => ReadonlyArray<{ method: string } & SqlOperationEntry>; + } + ).queryOperations?.(); + if (!ops) continue; + for (const { method, ...entry } of ops) { + entries[method] = entry; + } + } + return entries; +} + +/** + * Creates a SQL DSL client for migration authoring. + * Only the fields used by the builder are populated — operations, codecs, + * and types are unused by sql() and stubbed to satisfy the ExecutionContext type. + */ +function createMigrationDsl( + toContract: Contract, + frameworkComponents?: ReadonlyArray>, +) { + const queryOperationTypes = collectQueryOperationTypes(frameworkComponents); + // sql() only reads contract, queryOperations.entries(), and applyMutationDefaults + // from the context. The other fields are for runtime execution, not query building. + return sql({ + context: { + contract: toContract, + queryOperations: { entries: () => queryOperationTypes }, + applyMutationDefaults: () => [], + } as never, + }); +} + function buildNativeTypeExpander( frameworkComponents?: ReadonlyArray>, ) { @@ -77,6 +141,67 @@ const postgresTargetDescriptor: SqlControlTargetDescriptor<'postgres', PostgresP frameworkComponents: frameworkComponents ?? [], }); }, + planWithDescriptors(context) { + const toContract = context.toContract as Contract; + const fromContract = context.fromContract as Contract | null; + + // Synthesize schema IR from the fromContract (same as contractToSchema flow) + const expander = buildNativeTypeExpander(context.frameworkComponents); + const fromSchemaIR = contractToSchemaIR(fromContract, { + annotationNamespace: 'pg', + ...ifDefined('expandNativeType', expander), + renderDefault: postgresRenderDefault, + frameworkComponents: context.frameworkComponents ?? [], + }); + + // Collect schema issues via verifier + const verifyResult = verifySqlSchema({ + contract: toContract, + schema: fromSchemaIR, + strict: true, + typeMetadataRegistry: new Map(), + frameworkComponents: context.frameworkComponents ?? [], + normalizeDefault: parsePostgresDefault, + normalizeNativeType: normalizeSchemaNativeType, + }); + + // Run descriptor planner + const planResult = planDescriptors({ + issues: verifyResult.schema.issues, + toContract, + fromContract, + }); + if (!planResult.ok) { + return { ok: false as const, conflicts: planResult.failure }; + } + + return { + ok: true as const, + // Safe — PostgresMigrationOpDescriptor satisfies OperationDescriptor structurally. + // TS can't verify due to discriminated-union-to-index-signature limitation. + descriptors: planResult.value.descriptors as unknown as OperationDescriptor[], + needsDataMigration: planResult.value.needsDataMigration, + }; + }, + + resolveDescriptors(descriptors, context) { + const validated = parseDescriptors(descriptors); + const codecHooks = context.frameworkComponents + ? extractCodecControlHooks(context.frameworkComponents) + : new Map(); + const dependencies = context.frameworkComponents + ? collectInitDependencies(context.frameworkComponents) + : []; + const toContract = context.toContract as Contract; + const db = createMigrationDsl(toContract, context.frameworkComponents); + return resolveOperations(validated, { + toContract, + schemaName: context.schemaName ?? 'public', + codecHooks, + dependencies, + db, + }); + }, }, create(): ControlTargetInstance<'sql', 'postgres'> { return { diff --git a/packages/3-targets/3-targets/postgres/src/exports/migration-builders.ts b/packages/3-targets/3-targets/postgres/src/exports/migration-builders.ts new file mode 100644 index 0000000000..9d52ec9e4d --- /dev/null +++ b/packages/3-targets/3-targets/postgres/src/exports/migration-builders.ts @@ -0,0 +1,57 @@ +// Re-export everything from the local operation-descriptors module, +// which itself re-exports structural descriptors from sql-family +// and adds postgres-specific data transform support. +export { + type AddColumnDescriptor, + type AddEnumValuesDescriptor, + type AddForeignKeyDescriptor, + type AddPrimaryKeyDescriptor, + type AddUniqueDescriptor, + type AlterColumnTypeDescriptor, + addColumn, + addEnumValues, + addForeignKey, + addPrimaryKey, + addUnique, + alterColumnType, + type Buildable, + builders, + type CreateDependencyDescriptor, + type CreateEnumTypeDescriptor, + type CreateIndexDescriptor, + type CreateTableDescriptor, + createBuilders, + createDependency, + createEnumType, + createIndex, + createTable, + type DataTransformDescriptor, + type DropColumnDescriptor, + type DropConstraintDescriptor, + type DropDefaultDescriptor, + type DropEnumTypeDescriptor, + type DropIndexDescriptor, + type DropNotNullDescriptor, + type DropTableDescriptor, + dataTransform, + dropColumn, + dropConstraint, + dropDefault, + dropEnumType, + dropIndex, + dropNotNull, + dropTable, + type MigrationOpDescriptor, + type PostgresMigrationOpDescriptor, + type QueryPlanInput, + type RenameTypeDescriptor, + type RunInput, + renameType, + type SetDefaultDescriptor, + type SetNotNullDescriptor, + type SqlMigrationOpDescriptor, + setDefault, + setNotNull, + TODO, + type TodoMarker, +} from '../core/migrations/operation-descriptors'; diff --git a/packages/3-targets/3-targets/postgres/test/migrations/descriptor-planner.scenarios.md b/packages/3-targets/3-targets/postgres/test/migrations/descriptor-planner.scenarios.md new file mode 100644 index 0000000000..5407c67ff4 --- /dev/null +++ b/packages/3-targets/3-targets/postgres/test/migrations/descriptor-planner.scenarios.md @@ -0,0 +1,70 @@ +# Descriptor Planner Test Scenarios + +Tests for `planWithDescriptors` covering the full path: contractToSchemaIR → verifySqlSchema → planDescriptors. + +Each test builds a from-contract (or null) and a to-contract, runs the planner, and asserts the descriptors produced. + +## Additive — fresh database (from = null) + +1. **Single table** — creates table with columns + PK +2. **Table with FK** — creates table + addForeignKey + createIndex (backing) +3. **Table with explicit indexes and uniques** — creates table + createIndex + addUnique +4. **Table with enum type column** — createEnumType before createTable +5. **Multiple tables with FK between them** — both tables created, FK at end + +## Additive — existing contract + +6. **New nullable column** — plain addColumn (no pattern match) +7. **New NOT NULL column with default** — plain addColumn (has default, no pattern match) +8. **New NOT NULL column without default** — pattern match: addColumn(nullable) + dataTransform + setNotNull, needsDataMigration = true +9. **Multiple NOT NULL columns without defaults** — multiple dataTransform ops +10. **New table alongside existing** — only new table gets ops, existing table untouched +11. **New FK column on existing table** — addColumn + addForeignKey + createIndex + +## Reconciliation — drops + +12. **Drop table** — dropTable +13. **Drop column** — dropColumn +14. **Drop index** — dropIndex +15. **Drop FK** — dropConstraint +16. **Drop unique constraint** — dropConstraint +17. **Drop default** — dropDefault + +## Reconciliation — alters + +18. **Type change** (int4 → int8) — alterColumnType +19. **Nullable → NOT NULL** — setNotNull +20. **NOT NULL → nullable** — dropNotNull +21. **Default changed** — setDefault +22. **Default added** — setDefault +23. **Default removed** — dropDefault + +## Types + +24. **New enum type** — createEnumType descriptor +25. **Enum values added** — conflict (not yet supported) +26. **Enum values removed** — conflict (not yet supported) +27. **Unknown codec type missing** — conflict + +## Dependencies + +28. **Missing database dependency** — createDependency descriptor + +## Ordering + +29. **Drops before creates** — drop ops precede additive ops in descriptor list +30. **Types/deps before tables** — createEnumType and createDependency before createTable +31. **Tables before columns** — createTable before addColumn +32. **Pattern ops (dataTransform) between columns and constraints** — addColumn(nullable) → dataTransform → setNotNull positioned correctly +33. **FK after table it references** — addForeignKey after both tables exist + +## Combined / realistic + +34. **Vertical table split** (S5) — new table with FK to existing, addForeignKey + createIndex +35. **Mixed additive + destructive** — some tables added, some dropped, some columns altered +36. **No-op** — identical contracts produce empty descriptors + +## Old planner parity + +37. **NOT NULL without default produces dataTransform, not temp default** — verify the old hack is gone +38. **Column with typeParams** (char(36)) — verify type expansion works in resolver diff --git a/packages/3-targets/3-targets/postgres/test/migrations/descriptor-planner.scenarios.test.ts b/packages/3-targets/3-targets/postgres/test/migrations/descriptor-planner.scenarios.test.ts new file mode 100644 index 0000000000..53fb20e0e5 --- /dev/null +++ b/packages/3-targets/3-targets/postgres/test/migrations/descriptor-planner.scenarios.test.ts @@ -0,0 +1,1183 @@ +/** + * Descriptor planner scenario tests. + * + * Tests the full planWithDescriptors path: contractToSchemaIR → verifySqlSchema → planDescriptors. + * Each test builds a from-contract (or null) and a to-contract, runs the planner, + * and asserts the descriptors produced. + * + * See descriptor-planner.scenarios.md for the full scenario list. + */ + +import postgresAdapterDescriptor from '@prisma-next/adapter-postgres/control'; +import type { Contract } from '@prisma-next/contract/types'; +import { coreHash, profileHash } from '@prisma-next/contract/types'; +import pgvectorDescriptor from '@prisma-next/extension-pgvector/control'; +import type { TargetBoundComponentDescriptor } from '@prisma-next/framework-components/components'; +import type { + ForeignKey, + Index, + SqlStorage, + StorageColumn, + StorageTable, + UniqueConstraint, +} from '@prisma-next/sql-contract/types'; +import { ifDefined } from '@prisma-next/utils/defined'; +import { describe, expect, it } from 'vitest'; +import postgresTargetDescriptor from '../../src/exports/control'; + +// ============================================================================ +// Test helpers +// ============================================================================ + +const defaultComponents: ReadonlyArray> = [ + postgresTargetDescriptor as TargetBoundComponentDescriptor<'sql', 'postgres'>, + postgresAdapterDescriptor as TargetBoundComponentDescriptor<'sql', 'postgres'>, +]; + +function col( + nativeType: string, + codecId: string, + opts?: { + nullable?: boolean; + default?: StorageColumn['default']; + typeParams?: Record; + typeRef?: string; + }, +): StorageColumn { + return { + nativeType, + codecId, + nullable: opts?.nullable ?? false, + ...(opts?.default !== undefined ? { default: opts.default } : {}), + ...(opts?.typeParams !== undefined ? { typeParams: opts.typeParams } : {}), + ...(opts?.typeRef !== undefined ? { typeRef: opts.typeRef } : {}), + }; +} + +const textCol = (opts?: { nullable?: boolean; default?: StorageColumn['default'] }) => + col('text', 'pg/text@1', opts); +const intCol = (opts?: { nullable?: boolean; default?: StorageColumn['default'] }) => + col('int4', 'pg/int4@1', opts); +const uuidCol = (opts?: { nullable?: boolean }) => col('uuid', 'pg/uuid@1', opts); +const boolCol = (opts?: { nullable?: boolean; default?: StorageColumn['default'] }) => + col('boolean', 'pg/bool@1', opts); +function table( + columns: Record, + opts?: { + primaryKey?: { columns: string[] }; + uniques?: UniqueConstraint[]; + indexes?: Index[]; + foreignKeys?: ForeignKey[]; + }, +): StorageTable { + return { + columns, + primaryKey: opts?.primaryKey ?? { columns: [Object.keys(columns)[0]!] }, + uniques: opts?.uniques ?? [], + indexes: opts?.indexes ?? [], + foreignKeys: opts?.foreignKeys ?? [], + }; +} + +function contract( + tables: Record, + extras?: { types?: Contract['storage']['types'] }, +): Contract { + return { + target: 'postgres', + targetFamily: 'sql', + profileHash: profileHash('sha256:test'), + storage: { + tables, + storageHash: coreHash(`sha256:${JSON.stringify(tables)}`), + ...ifDefined('types', extras?.types), + }, + roots: {}, + models: {}, + capabilities: {}, + extensionPacks: {}, + meta: {}, + }; +} + +function plan( + from: Contract | null, + to: Contract, + components?: ReadonlyArray>, +) { + const migrations = postgresTargetDescriptor.migrations!; + return migrations.planWithDescriptors!({ + fromContract: from, + toContract: to, + frameworkComponents: components ?? defaultComponents, + }); +} + +function descriptorKinds(result: { ok: true; descriptors: readonly { kind: string }[] }) { + return result.descriptors.map((d) => d.kind); +} + +function descriptorSummary(result: { ok: true; descriptors: readonly Record[] }) { + return result.descriptors.map((d) => { + const parts = [d['kind']]; + if (d['table']) parts.push(d['table'] as string); + if (d['column']) parts.push(d['column'] as string); + if (d['typeName']) parts.push(d['typeName'] as string); + if (d['dependencyId']) parts.push(d['dependencyId'] as string); + return parts.join('.'); + }); +} + +// ============================================================================ +// Additive — fresh database (from = null) +// ============================================================================ + +describe('additive — fresh database', () => { + it('1: single table with columns + PK', () => { + const to = contract({ + user: table({ id: uuidCol(), email: textCol() }), + }); + const result = plan(null, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorKinds(result)).toEqual(['createTable']); + expect(result.needsDataMigration).toBe(false); + }); + + it('2: table with FK + backing index', () => { + const to = contract({ + user: table({ id: uuidCol(), email: textCol() }), + post: table( + { id: uuidCol(), userId: uuidCol(), title: textCol() }, + { + foreignKeys: [ + { + columns: ['userId'], + references: { table: 'user', columns: ['id'] }, + constraint: true, + index: true, + }, + ], + }, + ), + }); + const result = plan(null, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorSummary(result)).toEqual([ + 'createTable.user', + 'createTable.post', + 'addForeignKey.post', + 'createIndex.post', + ]); + expect(result.needsDataMigration).toBe(false); + }); + + it('3: table with explicit indexes and uniques', () => { + const to = contract({ + user: table( + { id: uuidCol(), email: textCol(), name: textCol() }, + { + uniques: [{ columns: ['email'] }], + indexes: [{ columns: ['name'] }], + }, + ), + }); + const result = plan(null, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorSummary(result)).toEqual([ + 'createTable.user', + 'createIndex.user', + 'addUnique.user', + ]); + }); + + it('4: table with enum type column', () => { + const to = contract( + { + user: table({ + id: uuidCol(), + role: col('user_role', 'pg/enum@1'), + }), + }, + { + types: { + UserRole: { + codecId: 'pg/enum@1', + nativeType: 'user_role', + typeParams: { values: ['admin', 'user'] }, + }, + }, + }, + ); + const result = plan(null, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorKinds(result)).toContain('createEnumType'); + expect(descriptorKinds(result)).toContain('createTable'); + }); + + it('5: multiple tables with FK between them', () => { + const to = contract({ + user: table({ id: uuidCol(), email: textCol() }), + post: table( + { id: uuidCol(), userId: uuidCol(), title: textCol() }, + { + foreignKeys: [ + { + columns: ['userId'], + references: { table: 'user', columns: ['id'] }, + constraint: true, + index: true, + }, + ], + }, + ), + comment: table( + { id: uuidCol(), postId: uuidCol(), body: textCol() }, + { + foreignKeys: [ + { + columns: ['postId'], + references: { table: 'post', columns: ['id'] }, + constraint: true, + index: true, + }, + ], + }, + ), + }); + const result = plan(null, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + const kinds = descriptorKinds(result); + expect(kinds.filter((k) => k === 'createTable')).toHaveLength(3); + expect(kinds.filter((k) => k === 'addForeignKey')).toHaveLength(2); + expect(kinds.filter((k) => k === 'createIndex')).toHaveLength(2); + }); +}); + +// ============================================================================ +// Additive — existing contract +// ============================================================================ + +describe('additive — existing contract', () => { + const base = contract({ + user: table({ id: uuidCol(), email: textCol() }), + }); + + it('6: new nullable column', () => { + const to = contract({ + user: table({ id: uuidCol(), email: textCol(), bio: textCol({ nullable: true }) }), + }); + const result = plan(base, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorSummary(result)).toEqual(['addColumn.user.bio']); + expect(result.needsDataMigration).toBe(false); + }); + + it('7: new NOT NULL column with default', () => { + const to = contract({ + user: table({ + id: uuidCol(), + email: textCol(), + active: boolCol({ default: { kind: 'literal', value: true } }), + }), + }); + const result = plan(base, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorSummary(result)).toEqual(['addColumn.user.active']); + expect(result.needsDataMigration).toBe(false); + }); + + it('8: new NOT NULL column without default → data migration', () => { + const to = contract({ + user: table({ id: uuidCol(), email: textCol(), name: textCol() }), + }); + const result = plan(base, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorSummary(result)).toEqual([ + 'addColumn.user.name', + 'dataTransform', + 'setNotNull.user.name', + ]); + expect(result.needsDataMigration).toBe(true); + // addColumn should have nullable override + const addCol = result.descriptors[0] as Record; + expect(addCol['overrides']).toEqual({ nullable: true }); + }); + + it('9: multiple NOT NULL columns without defaults', () => { + const to = contract({ + user: table({ id: uuidCol(), email: textCol(), firstName: textCol(), lastName: textCol() }), + }); + const result = plan(base, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + const kinds = descriptorKinds(result); + expect(kinds.filter((k) => k === 'dataTransform')).toHaveLength(2); + expect(result.needsDataMigration).toBe(true); + }); + + it('10: new table alongside existing (existing untouched)', () => { + const to = contract({ + user: table({ id: uuidCol(), email: textCol() }), + post: table({ id: uuidCol(), title: textCol() }), + }); + const result = plan(base, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorSummary(result)).toEqual(['createTable.post']); + }); + + it('11: new FK column on existing table', () => { + const from = contract({ + user: table({ id: uuidCol(), email: textCol() }), + post: table({ id: uuidCol(), title: textCol() }), + }); + const to = contract({ + user: table({ id: uuidCol(), email: textCol() }), + post: table( + { id: uuidCol(), title: textCol(), userId: uuidCol() }, + { + foreignKeys: [ + { + columns: ['userId'], + references: { table: 'user', columns: ['id'] }, + constraint: true, + index: true, + }, + ], + }, + ), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + const summary = descriptorSummary(result); + expect(summary).toContain('addColumn.post.userId'); + expect(summary).toContain('addForeignKey.post'); + expect(summary).toContain('createIndex.post'); + }); +}); + +// ============================================================================ +// Reconciliation — drops +// ============================================================================ + +describe('reconciliation — drops', () => { + it('12: drop table', () => { + const from = contract({ + user: table({ id: uuidCol(), email: textCol() }), + post: table({ id: uuidCol(), title: textCol() }), + }); + const to = contract({ + user: table({ id: uuidCol(), email: textCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorSummary(result)).toEqual(['dropTable.post']); + }); + + it('13: drop column', () => { + const from = contract({ + user: table({ id: uuidCol(), email: textCol(), name: textCol() }), + }); + const to = contract({ + user: table({ id: uuidCol(), email: textCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorSummary(result)).toEqual(['dropColumn.user.name']); + }); + + it('14: drop index', () => { + const from = contract({ + user: table( + { id: uuidCol(), email: textCol() }, + { indexes: [{ columns: ['email'], name: 'user_email_idx' }] }, + ), + }); + const to = contract({ + user: table({ id: uuidCol(), email: textCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorKinds(result)).toEqual(['dropIndex']); + }); + + it('15: drop FK', () => { + const from = contract({ + user: table({ id: uuidCol() }), + post: table( + { id: uuidCol(), userId: uuidCol() }, + { + foreignKeys: [ + { + columns: ['userId'], + references: { table: 'user', columns: ['id'] }, + constraint: true, + index: true, + }, + ], + }, + ), + }); + const to = contract({ + user: table({ id: uuidCol() }), + post: table({ id: uuidCol(), userId: uuidCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + const kinds = descriptorKinds(result); + expect(kinds).toContain('dropConstraint'); + expect(kinds).toContain('dropIndex'); + }); + + it('16: drop unique constraint', () => { + const from = contract({ + user: table({ id: uuidCol(), email: textCol() }, { uniques: [{ columns: ['email'] }] }), + }); + const to = contract({ + user: table({ id: uuidCol(), email: textCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorKinds(result)).toEqual(['dropConstraint']); + }); + + it('17: drop default', () => { + const from = contract({ + user: table({ + id: uuidCol(), + active: boolCol({ default: { kind: 'literal', value: true } }), + }), + }); + const to = contract({ + user: table({ id: uuidCol(), active: boolCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorKinds(result)).toEqual(['dropDefault']); + }); +}); + +// ============================================================================ +// Reconciliation — alters +// ============================================================================ + +describe('reconciliation — alters', () => { + it('18: safe widening type change (int4 → int8) — alterColumnType, no data migration', () => { + const from = contract({ + user: table({ id: uuidCol(), age: intCol() }), + }); + const to = contract({ + user: table({ id: uuidCol(), age: col('int8', 'pg/int8@1') }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorSummary(result)).toEqual(['alterColumnType.user.age']); + expect(result.needsDataMigration).toBe(false); + }); + + it('18b: unsafe type change (text → int4) — dataTransform + alterColumnType', () => { + const from = contract({ + user: table({ id: uuidCol(), score: textCol() }), + }); + const to = contract({ + user: table({ id: uuidCol(), score: intCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorKinds(result)).toEqual(['dataTransform', 'alterColumnType']); + expect(result.needsDataMigration).toBe(true); + }); + + it('19: nullable → NOT NULL — dataTransform + setNotNull (existing NULLs may violate)', () => { + const from = contract({ + user: table({ id: uuidCol(), name: textCol({ nullable: true }) }), + }); + const to = contract({ + user: table({ id: uuidCol(), name: textCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorKinds(result)).toEqual(['dataTransform', 'setNotNull']); + expect(result.needsDataMigration).toBe(true); + const dt = result.descriptors[0] as Record; + expect(dt['name']).toBe('handle-nulls-user-name'); + }); + + it('20: NOT NULL → nullable', () => { + const from = contract({ + user: table({ id: uuidCol(), name: textCol() }), + }); + const to = contract({ + user: table({ id: uuidCol(), name: textCol({ nullable: true }) }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorSummary(result)).toEqual(['dropNotNull.user.name']); + }); + + it('21: default changed', () => { + const from = contract({ + user: table({ + id: uuidCol(), + active: boolCol({ default: { kind: 'literal', value: true } }), + }), + }); + const to = contract({ + user: table({ + id: uuidCol(), + active: boolCol({ default: { kind: 'literal', value: false } }), + }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorKinds(result)).toEqual(['setDefault']); + }); + + it('22: default added', () => { + const from = contract({ + user: table({ id: uuidCol(), active: boolCol() }), + }); + const to = contract({ + user: table({ + id: uuidCol(), + active: boolCol({ default: { kind: 'literal', value: true } }), + }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorKinds(result)).toEqual(['setDefault']); + }); + + it('23: default removed', () => { + const from = contract({ + user: table({ + id: uuidCol(), + active: boolCol({ default: { kind: 'literal', value: true } }), + }), + }); + const to = contract({ + user: table({ id: uuidCol(), active: boolCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorKinds(result)).toEqual(['dropDefault']); + }); +}); + +// ============================================================================ +// Types +// ============================================================================ + +describe('types', () => { + const enumType = (values: string[]) => ({ + codecId: 'pg/enum@1' as const, + nativeType: 'user_role', + typeParams: { values }, + }); + + it('24: new enum type', () => { + const to = contract( + { user: table({ id: uuidCol(), role: col('user_role', 'pg/enum@1') }) }, + { types: { UserRole: enumType(['admin', 'user']) } }, + ); + const result = plan(null, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + const enumDesc = result.descriptors.find((d) => d.kind === 'createEnumType'); + expect(enumDesc).toBeDefined(); + expect((enumDesc as Record)['typeName']).toBe('UserRole'); + // createEnumType should come before createTable + const kinds = descriptorKinds(result); + expect(kinds.indexOf('createEnumType')).toBeLessThan(kinds.indexOf('createTable')); + }); + + it('25: enum values added → addEnumValues', () => { + const from = contract( + { user: table({ id: uuidCol(), role: col('user_role', 'pg/enum@1') }) }, + { types: { UserRole: enumType(['admin']) } }, + ); + const to = contract( + { user: table({ id: uuidCol(), role: col('user_role', 'pg/enum@1') }) }, + { types: { UserRole: enumType(['admin', 'user']) } }, + ); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorKinds(result)).toEqual(['addEnumValues']); + const desc = result.descriptors[0] as unknown as { + typeName: string; + values: readonly string[]; + }; + expect(desc.typeName).toBe('UserRole'); + expect(desc.values).toEqual(['user']); + }); + + it('26: enum values removed → dataTransform + enum rebuild recipe', () => { + const from = contract( + { + user: table({ + id: uuidCol(), + role: col('user_role', 'pg/enum@1', { typeRef: 'UserRole' }), + }), + }, + { types: { UserRole: enumType(['admin', 'user']) } }, + ); + const to = contract( + { + user: table({ + id: uuidCol(), + role: col('user_role', 'pg/enum@1', { typeRef: 'UserRole' }), + }), + }, + { types: { UserRole: enumType(['admin']) } }, + ); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorKinds(result)).toEqual([ + 'dataTransform', + 'createEnumType', + 'alterColumnType', + 'dropEnumType', + 'renameType', + ]); + }); + + // Verifier doesn't produce type_missing for types referenced by a missing_table — the type + // is silently created as part of createTable. Needs verifier-level unknown codec detection. + it.fails('27: unknown codec type missing → conflict', () => { + const to = contract( + { user: table({ id: uuidCol(), data: col('custom_type', 'unknown/codec@1') }) }, + { + types: { + CustomType: { + codecId: 'unknown/codec@1', + nativeType: 'custom_type', + typeParams: { foo: 'bar' }, + }, + }, + }, + ); + const result = plan(null, to); + // Should fail with conflict about unsupported codec type + expect(result.ok).toBe(false); + if (result.ok) return; + expect(result.conflicts[0]?.summary).toContain('codec'); + }); +}); + +// ============================================================================ +// Dependencies +// ============================================================================ + +describe('dependencies', () => { + const componentsWithPgvector: ReadonlyArray> = [ + ...defaultComponents, + pgvectorDescriptor as TargetBoundComponentDescriptor<'sql', 'postgres'>, + ]; + + it('28: missing database dependency → createDependency', () => { + const to = contract({ + post: table({ id: uuidCol(), embedding: col('vector', 'pg/vector@1', { nullable: true }) }), + }); + const result = plan(null, to, componentsWithPgvector); + expect(result.ok).toBe(true); + if (!result.ok) return; + const depDesc = result.descriptors.find((d) => d.kind === 'createDependency'); + expect(depDesc).toBeDefined(); + expect((depDesc as Record)['dependencyId']).toBe('postgres.extension.vector'); + }); + + it('28b: pgvector dependency comes before table using vector column', () => { + const to = contract({ + post: table({ + id: uuidCol(), + title: textCol(), + embedding: col('vector', 'pg/vector@1', { nullable: true }), + }), + }); + const result = plan(null, to, componentsWithPgvector); + expect(result.ok).toBe(true); + if (!result.ok) return; + const kinds = descriptorKinds(result); + expect(kinds).toContain('createDependency'); + expect(kinds).toContain('createTable'); + expect(kinds.indexOf('createDependency')).toBeLessThan(kinds.indexOf('createTable')); + }); +}); + +// ============================================================================ +// Ordering +// ============================================================================ + +describe('ordering', () => { + it('30: types and deps before tables', () => { + const componentsWithPgvector: ReadonlyArray> = + [ + ...defaultComponents, + pgvectorDescriptor as TargetBoundComponentDescriptor<'sql', 'postgres'>, + ]; + const to = contract( + { + user: table({ id: uuidCol(), role: col('user_role', 'pg/enum@1') }), + post: table({ id: uuidCol(), embedding: col('vector', 'pg/vector@1', { nullable: true }) }), + }, + { + types: { + UserRole: { + codecId: 'pg/enum@1', + nativeType: 'user_role', + typeParams: { values: ['admin', 'user'] }, + }, + }, + }, + ); + const result = plan(null, to, componentsWithPgvector); + expect(result.ok).toBe(true); + if (!result.ok) return; + const kinds = descriptorKinds(result); + // createEnumType and createDependency should come before any createTable + const firstTable = kinds.indexOf('createTable'); + for (let i = 0; i < kinds.length; i++) { + if (kinds[i] === 'createEnumType' || kinds[i] === 'createDependency') { + expect(i).toBeLessThan(firstTable); + } + } + }); + + it('29: drops before creates', () => { + const from = contract({ + old_table: table({ id: uuidCol() }), + }); + const to = contract({ + new_table: table({ id: uuidCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + const kinds = descriptorKinds(result); + const dropIdx = kinds.indexOf('dropTable'); + const createIdx = kinds.indexOf('createTable'); + expect(dropIdx).toBeLessThan(createIdx); + }); + + it('31: tables before columns', () => { + const from = contract({ + user: table({ id: uuidCol() }), + }); + const to = contract({ + user: table({ id: uuidCol(), email: textCol({ nullable: true }) }), + post: table({ id: uuidCol(), title: textCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + const kinds = descriptorKinds(result); + const tableIdx = kinds.indexOf('createTable'); + const colIdx = kinds.indexOf('addColumn'); + expect(tableIdx).toBeLessThan(colIdx); + }); + + it('32: pattern ops between columns and constraints', () => { + const from = contract({ + user: table({ id: uuidCol(), email: textCol() }), + }); + const to = contract({ + user: table( + { id: uuidCol(), email: textCol(), name: textCol() }, + { uniques: [{ columns: ['name'] }] }, + ), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + // name is NOT NULL without default → pattern match + // addColumn(nullable) → dataTransform → setNotNull, then addUnique after + const kinds = descriptorKinds(result); + const dtIdx = kinds.indexOf('dataTransform'); + const uniqueIdx = kinds.indexOf('addUnique'); + expect(dtIdx).toBeLessThan(uniqueIdx); + }); +}); + +// ============================================================================ +// Combined / realistic +// ============================================================================ + +describe('combined / realistic', () => { + it('34: vertical table split (S5) — new table with FK to existing', () => { + const from = contract({ + user: table({ + id: uuidCol(), + email: textCol(), + bio: textCol({ nullable: true }), + avatarUrl: textCol({ nullable: true }), + }), + }); + const to = contract({ + user: table({ id: uuidCol(), email: textCol() }), + user_profile: table( + { + id: uuidCol(), + userId: uuidCol(), + bio: textCol({ nullable: true }), + avatarUrl: textCol({ nullable: true }), + }, + { + foreignKeys: [ + { + columns: ['userId'], + references: { table: 'user', columns: ['id'] }, + constraint: true, + index: true, + }, + ], + }, + ), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + const summary = descriptorSummary(result); + expect(summary).toContain('dropColumn.user.bio'); + expect(summary).toContain('dropColumn.user.avatarUrl'); + expect(summary).toContain('createTable.user_profile'); + expect(summary).toContain('addForeignKey.user_profile'); + expect(summary).toContain('createIndex.user_profile'); + }); + + it('36: no-op — identical contracts', () => { + const c = contract({ + user: table({ id: uuidCol(), email: textCol() }), + }); + const result = plan(c, c); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(result.descriptors).toEqual([]); + expect(result.needsDataMigration).toBe(false); + }); +}); + +// ============================================================================ +// Old planner parity +// ============================================================================ + +describe('old planner parity', () => { + it('37: NOT NULL without default produces dataTransform, not temp default', () => { + const from = contract({ + user: table({ id: uuidCol(), email: textCol() }), + }); + const to = contract({ + user: table({ id: uuidCol(), email: textCol(), age: intCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorKinds(result)).toContain('dataTransform'); + expect(result.needsDataMigration).toBe(true); + // Should NOT contain a plain addColumn (that would apply NOT NULL directly) + const plainAddCol = result.descriptors.find( + (d) => d.kind === 'addColumn' && !(d as Record)['overrides'], + ); + expect(plainAddCol).toBeUndefined(); + }); + + it('38: column with typeParams (char(36)) resolves correctly', () => { + const to = contract({ + user: table({ + id: col('character', 'sql/char@1', { typeParams: { length: 36 } }), + email: textCol(), + }), + }); + const result = plan(null, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(descriptorKinds(result)).toEqual(['createTable']); + }); +}); + +// ============================================================================ +// Data-safety gaps — scenarios that SHOULD detect data migration needs +// These tests document expected behavior we haven't implemented yet. +// ============================================================================ + +describe('data-safety gaps', () => { + it('S1: computed backfill — NOT NULL column on non-empty table needs data migration', () => { + // This is handled by the notNullBackfillStrategy (test 8) + // Verify the dataTransform has a meaningful structure + const from = contract({ + user: table({ id: uuidCol(), email: textCol() }), + }); + const to = contract({ + user: table({ id: uuidCol(), email: textCol(), displayName: textCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(result.needsDataMigration).toBe(true); + const dt = result.descriptors.find((d) => d.kind === 'dataTransform') as + | Record + | undefined; + expect(dt).toBeDefined(); + expect(dt!['name']).toBe('backfill-user-displayName'); + }); + + it('S2: type change (text → int) emits dataTransform + alterColumnType via typeChangeStrategy', () => { + const from = contract({ + user: table({ id: uuidCol(), foo: textCol() }), + }); + const to = contract({ + user: table({ id: uuidCol(), foo: intCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(result.needsDataMigration).toBe(true); + expect(descriptorKinds(result)).toEqual(['dataTransform', 'alterColumnType']); + }); + + it('S2b: lossy type change (float → int) emits dataTransform + alterColumnType', () => { + const from = contract({ + product: table({ id: uuidCol(), price: col('float8', 'pg/float8@1') }), + }); + const to = contract({ + product: table({ id: uuidCol(), price: col('int4', 'pg/int4@1') }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(result.needsDataMigration).toBe(true); + expect(descriptorKinds(result)).toEqual(['dataTransform', 'alterColumnType']); + }); + + // Descriptor ordering: dropColumn comes before pattern ops (dataTransform) instead of after. + // Needs a phase ordering fix so drops happen after pattern ops for column split scenarios. + it.fails('S3: column split (name → firstName + lastName) — new NOT NULL columns detected', () => { + const from = contract({ + user: table({ id: uuidCol(), name: textCol() }), + }); + const to = contract({ + user: table({ id: uuidCol(), firstName: textCol(), lastName: textCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + // Should detect 2 NOT NULL columns + drop of old column + expect(result.needsDataMigration).toBe(true); + const kinds = descriptorKinds(result); + expect(kinds.filter((k) => k === 'dataTransform')).toHaveLength(2); + expect(kinds).toContain('dropColumn'); + // The old column should remain available during the data transform + // so the user can reference it in the backfill (e.g. split_part(name, ...)). + // Drops must come AFTER pattern ops for this to work. + const lastDt = kinds.lastIndexOf('dataTransform'); + const dropIdx = kinds.indexOf('dropColumn'); + expect(dropIdx).toBeGreaterThan(lastDt); + }); + + it('S5: vertical table split — new table starts empty, no auto-detected data migration', () => { + // The profile table is NEW — zero rows. NOT NULL constraints on empty tables are fine. + // The real need (INSERT data from user into profile) isn't inferrable from the structural + // diff — the planner sees "new table + dropped columns" but can't know they're related. + // This is a `migration new` scenario where the user authors the data migration manually. + const from = contract({ + user: table({ id: uuidCol(), email: textCol(), bio: textCol({ nullable: true }) }), + }); + const to = contract({ + user: table({ id: uuidCol(), email: textCol() }), + profile: table( + { id: uuidCol(), userId: uuidCol(), bio: textCol({ nullable: true }) }, + { + foreignKeys: [ + { + columns: ['userId'], + references: { table: 'user', columns: ['id'] }, + constraint: true, + index: true, + }, + ], + }, + ), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + expect(result.needsDataMigration).toBe(false); + // Structural ops: drop columns from user, create profile with FK + index + const summary = descriptorSummary(result); + expect(summary).toContain('dropColumn.user.bio'); + expect(summary).toContain('createTable.profile'); + expect(summary).toContain('addForeignKey.profile'); + }); + + it('S13: nullable → NOT NULL on existing column with potential violations', () => { + const from = contract({ + user: table({ id: uuidCol(), phone: textCol({ nullable: true }) }), + }); + const to = contract({ + user: table({ id: uuidCol(), phone: textCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + // Currently emits just setNotNull — but if table has NULL values, this fails at apply time + // Should detect this as needing a data migration (user decides what to do with NULLs) + expect(result.needsDataMigration).toBe(true); + }); + + it('S14: data seeding — new table with NOT NULL FK to new lookup table', () => { + const from = contract({ + user: table({ id: uuidCol(), countryCode: textCol() }), + }); + const to = contract({ + country: table({ id: uuidCol(), code: textCol() }), + user: table( + { id: uuidCol(), countryId: uuidCol() }, + { + foreignKeys: [ + { + columns: ['countryId'], + references: { table: 'country', columns: ['id'] }, + constraint: true, + index: true, + }, + ], + }, + ), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + // countryId is NOT NULL without default on existing user table → data migration + // Also: country table needs seed data before FK can be applied + expect(result.needsDataMigration).toBe(true); + }); + + it('S18: multi-tenant — NOT NULL FK column added to multiple existing tables', () => { + const from = contract({ + user: table({ id: uuidCol(), email: textCol() }), + order: table({ id: uuidCol(), total: intCol() }), + }); + const to = contract({ + tenant: table({ id: uuidCol(), name: textCol() }), + user: table( + { id: uuidCol(), email: textCol(), tenantId: uuidCol() }, + { + foreignKeys: [ + { + columns: ['tenantId'], + references: { table: 'tenant', columns: ['id'] }, + constraint: true, + index: true, + }, + ], + }, + ), + order: table( + { id: uuidCol(), total: intCol(), tenantId: uuidCol() }, + { + foreignKeys: [ + { + columns: ['tenantId'], + references: { table: 'tenant', columns: ['id'] }, + constraint: true, + index: true, + }, + ], + }, + ), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + // tenantId on user and order are both NOT NULL without default → 2 data transforms + expect(result.needsDataMigration).toBe(true); + const kinds = descriptorKinds(result); + expect(kinds.filter((k) => k === 'dataTransform')).toHaveLength(2); + }); +}); + +// ============================================================================ +// Mixed / complex +// ============================================================================ + +describe('mixed / complex', () => { + it('35: mixed additive + destructive in same plan', () => { + const from = contract({ + user: table({ id: uuidCol(), email: textCol(), oldField: textCol({ nullable: true }) }), + legacy: table({ id: uuidCol() }), + }); + const to = contract({ + user: table({ id: uuidCol(), email: textCol(), newField: textCol({ nullable: true }) }), + fresh: table({ id: uuidCol(), name: textCol() }), + }); + const result = plan(from, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + const kinds = descriptorKinds(result); + // Drops + expect(kinds).toContain('dropColumn'); + expect(kinds).toContain('dropTable'); + // Creates + expect(kinds).toContain('createTable'); + expect(kinds).toContain('addColumn'); + // Drops should come before creates + const lastDrop = Math.max(kinds.lastIndexOf('dropColumn'), kinds.lastIndexOf('dropTable')); + const firstCreate = Math.min( + kinds.indexOf('createTable') === -1 ? Number.POSITIVE_INFINITY : kinds.indexOf('createTable'), + kinds.indexOf('addColumn') === -1 ? Number.POSITIVE_INFINITY : kinds.indexOf('addColumn'), + ); + expect(lastDrop).toBeLessThan(firstCreate); + }); + + it('33: FK references table created in same plan', () => { + const to = contract({ + author: table({ id: uuidCol(), name: textCol() }), + book: table( + { id: uuidCol(), authorId: uuidCol(), title: textCol() }, + { + foreignKeys: [ + { + columns: ['authorId'], + references: { table: 'author', columns: ['id'] }, + constraint: true, + index: true, + }, + ], + }, + ), + }); + const result = plan(null, to); + expect(result.ok).toBe(true); + if (!result.ok) return; + const summary = descriptorSummary(result); + // Both tables created, FK comes after both + const authorIdx = summary.indexOf('createTable.author'); + const bookIdx = summary.indexOf('createTable.book'); + const fkIdx = summary.findIndex((s) => s.startsWith('addForeignKey')); + expect(authorIdx).toBeLessThan(fkIdx); + expect(bookIdx).toBeLessThan(fkIdx); + }); +}); diff --git a/packages/3-targets/3-targets/postgres/tsdown.config.ts b/packages/3-targets/3-targets/postgres/tsdown.config.ts index 374b96f698..91b8baf01a 100644 --- a/packages/3-targets/3-targets/postgres/tsdown.config.ts +++ b/packages/3-targets/3-targets/postgres/tsdown.config.ts @@ -1,5 +1,10 @@ import { defineConfig } from '@prisma-next/tsdown'; export default defineConfig({ - entry: ['src/exports/control.ts', 'src/exports/runtime.ts', 'src/exports/pack.ts'], + entry: [ + 'src/exports/control.ts', + 'src/exports/runtime.ts', + 'src/exports/pack.ts', + 'src/exports/migration-builders.ts', + ], }); diff --git a/packages/3-targets/6-adapters/postgres/package.json b/packages/3-targets/6-adapters/postgres/package.json index 7059618af6..8178edb8cb 100644 --- a/packages/3-targets/6-adapters/postgres/package.json +++ b/packages/3-targets/6-adapters/postgres/package.json @@ -18,7 +18,6 @@ "clean": "rm -rf dist dist-tsc dist-tsc-prod coverage .tmp-output" }, "dependencies": { - "@prisma-next/cli": "workspace:*", "@prisma-next/contract": "workspace:*", "@prisma-next/contract-authoring": "workspace:*", "@prisma-next/family-sql": "workspace:*", diff --git a/packages/3-targets/6-adapters/postgres/src/core/enum-control-hooks.ts b/packages/3-targets/6-adapters/postgres/src/core/enum-control-hooks.ts index 38f4ab7efc..b4516768de 100644 --- a/packages/3-targets/6-adapters/postgres/src/core/enum-control-hooks.ts +++ b/packages/3-targets/6-adapters/postgres/src/core/enum-control-hooks.ts @@ -130,7 +130,6 @@ function getEnumValues(typeInstance: StorageTypeInstance): readonly string[] | n * Uses optional chaining to simplify navigation through the annotations structure. */ function readExistingEnumValues(schema: SqlSchemaIR, nativeType: string): readonly string[] | null { - // Schema annotations.pg.storageTypes is populated by introspection const storageTypes = (schema.annotations?.['pg'] as Record | undefined)?.[ 'storageTypes' ] as Record | undefined; @@ -698,18 +697,24 @@ export const pgEnumControlHooks: CodecControlHooks = { }, ]; } - if (!arraysEqual(existing, desired)) { - return [ - { - kind: 'type_values_mismatch', - typeName, - expected: desired.join(', '), - actual: existing.join(', '), - message: `Type "${typeName}" values do not match contract`, - }, - ]; - } - return []; + const diff = determineEnumDiff(existing, desired); + if (diff.kind === 'unchanged') return []; + const existingSet = new Set(existing); + const desiredSet = new Set(desired); + const addedValues = desired.filter((v) => !existingSet.has(v)); + const removedValues = existing.filter((v) => !desiredSet.has(v)); + return [ + { + kind: 'enum_values_changed' as const, + typeName, + addedValues, + removedValues, + message: + diff.kind === 'add_values' + ? `Enum type "${typeName}" needs new values: ${addedValues.join(', ')}` + : `Enum type "${typeName}" values changed (requires rebuild): +[${addedValues.join(', ')}] -[${removedValues.join(', ')}]`, + }, + ]; }, introspectTypes: async ({ driver, schemaName }) => { const namespace = schemaName ?? 'public'; diff --git a/packages/3-targets/6-adapters/postgres/test/enum-control-hooks.basic.test.ts b/packages/3-targets/6-adapters/postgres/test/enum-control-hooks.basic.test.ts index c00066aa29..be2472f081 100644 --- a/packages/3-targets/6-adapters/postgres/test/enum-control-hooks.basic.test.ts +++ b/packages/3-targets/6-adapters/postgres/test/enum-control-hooks.basic.test.ts @@ -546,10 +546,10 @@ describe('pgEnumControlHooks.verifyType', () => { expect(issues).toMatchObject([ { - kind: 'type_values_mismatch', + kind: 'enum_values_changed', typeName: 'Role', - expected: 'ADMIN', - actual: 'USER', + addedValues: ['ADMIN'], + removedValues: ['USER'], }, ]); }); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index c7a1b96ea3..4adc62ebd6 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -63,7 +63,7 @@ importers: version: 24.10.4 '@vitest/coverage-v8': specifier: ^4.0.17 - version: 4.0.17(vitest@4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1)) + version: 4.0.17(vitest@4.0.17) dependency-cruiser: specifier: ^16.3.3 version: 16.10.4 @@ -90,7 +90,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) examples/mongo-demo: dependencies: @@ -169,7 +169,7 @@ importers: version: 7.3.1(@types/node@24.10.4)(jiti@2.6.1)(tsx@4.20.6)(yaml@2.8.1) vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) examples/prisma-next-demo: dependencies: @@ -296,7 +296,7 @@ importers: version: 7.3.1(@types/node@24.10.4)(jiti@2.6.1)(tsx@4.20.6)(yaml@2.8.1) vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/0-config/tsconfig: {} @@ -338,7 +338,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/1-framework/0-foundation/utils: devDependencies: @@ -356,7 +356,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/1-framework/1-core/config: dependencies: @@ -387,7 +387,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/1-framework/1-core/errors: dependencies: @@ -412,7 +412,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/1-framework/1-core/framework-components: dependencies: @@ -440,7 +440,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/1-framework/1-core/operations: devDependencies: @@ -461,7 +461,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/1-framework/2-authoring/contract: devDependencies: @@ -479,7 +479,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/1-framework/2-authoring/ids: dependencies: @@ -510,7 +510,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/1-framework/2-authoring/psl-parser: dependencies: @@ -532,7 +532,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/1-framework/2-authoring/psl-printer: dependencies: @@ -560,7 +560,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/1-framework/3-tooling/cli: dependencies: @@ -652,6 +652,9 @@ importers: '@types/node': specifier: 'catalog:' version: 24.10.4 + '@vitest/ui': + specifier: 4.0.17 + version: 4.0.17(vitest@4.0.17) tsdown: specifier: 'catalog:' version: 0.18.4(typescript@5.9.3) @@ -660,7 +663,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/1-framework/3-tooling/emitter: dependencies: @@ -703,7 +706,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/1-framework/3-tooling/migration: dependencies: @@ -737,7 +740,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/1-framework/3-tooling/vite-plugin-contract-emit: dependencies: @@ -765,7 +768,7 @@ importers: version: 7.3.1(@types/node@24.10.4)(jiti@2.6.1)(tsx@4.20.6)(yaml@2.8.1) vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/1-framework/4-runtime/runtime-executor: dependencies: @@ -799,7 +802,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-mongo-family/1-foundation/mongo-codec: dependencies: @@ -827,7 +830,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-mongo-family/1-foundation/mongo-contract: dependencies: @@ -855,7 +858,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-mongo-family/1-foundation/mongo-value: devDependencies: @@ -873,7 +876,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-mongo-family/2-authoring/contract-psl: dependencies: @@ -907,7 +910,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-mongo-family/2-authoring/contract-ts: dependencies: @@ -944,7 +947,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-mongo-family/3-tooling/emitter: dependencies: @@ -978,7 +981,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-mongo-family/3-tooling/mongo-schema-ir: dependencies: @@ -1003,7 +1006,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-mongo-family/4-query/query-ast: dependencies: @@ -1037,7 +1040,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-mongo-family/5-query-builders/orm: dependencies: @@ -1089,7 +1092,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-mongo-family/5-query-builders/pipeline-builder: dependencies: @@ -1120,7 +1123,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-mongo-family/6-transport/mongo-lowering: dependencies: @@ -1145,7 +1148,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-mongo-family/6-transport/mongo-wire: dependencies: @@ -1167,7 +1170,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-mongo-family/7-runtime: dependencies: @@ -1231,7 +1234,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-mongo-family/9-family: dependencies: @@ -1277,7 +1280,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-sql/1-core/contract: dependencies: @@ -1308,7 +1311,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-sql/1-core/errors: devDependencies: @@ -1329,7 +1332,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-sql/1-core/operations: dependencies: @@ -1360,7 +1363,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-sql/1-core/schema-ir: dependencies: @@ -1385,7 +1388,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-sql/2-authoring/contract-psl: dependencies: @@ -1431,7 +1434,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-sql/2-authoring/contract-ts: dependencies: @@ -1483,7 +1486,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-sql/3-tooling/emitter: dependencies: @@ -1520,7 +1523,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-sql/4-lanes/query-builder: devDependencies: @@ -1544,7 +1547,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-sql/4-lanes/relational-core: dependencies: @@ -1593,7 +1596,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-sql/4-lanes/sql-builder: dependencies: @@ -1610,21 +1613,12 @@ importers: '@prisma-next/adapter-postgres': specifier: workspace:* version: link:../../../3-targets/6-adapters/postgres - '@prisma-next/cli': - specifier: workspace:* - version: link:../../../1-framework/3-tooling/cli '@prisma-next/contract': specifier: workspace:* version: link:../../../1-framework/0-foundation/contract - '@prisma-next/driver-postgres': - specifier: workspace:* - version: link:../../../3-targets/7-drivers/postgres '@prisma-next/extension-pgvector': specifier: workspace:* version: link:../../../3-extensions/pgvector - '@prisma-next/family-sql': - specifier: workspace:* - version: link:../../9-family '@prisma-next/ids': specifier: workspace:* version: link:../../../1-framework/2-authoring/ids @@ -1634,12 +1628,6 @@ importers: '@prisma-next/sql-contract-ts': specifier: workspace:* version: link:../../2-authoring/contract-ts - '@prisma-next/sql-runtime': - specifier: workspace:* - version: link:../../5-runtime - '@prisma-next/target-postgres': - specifier: workspace:* - version: link:../../../3-targets/3-targets/postgres '@prisma-next/test-utils': specifier: workspace:* version: link:../../../../test/utils @@ -1652,12 +1640,6 @@ importers: '@prisma-next/utils': specifier: workspace:* version: link:../../../1-framework/0-foundation/utils - '@types/pg': - specifier: 'catalog:' - version: 8.16.0 - pg: - specifier: 'catalog:' - version: 8.16.3 tsdown: specifier: 'catalog:' version: 0.18.4(typescript@5.9.3) @@ -1666,7 +1648,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-sql/5-runtime: dependencies: @@ -1724,13 +1706,10 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/2-sql/9-family: dependencies: - '@prisma-next/cli': - specifier: workspace:* - version: link:../../1-framework/3-tooling/cli '@prisma-next/contract': specifier: workspace:* version: link:../../1-framework/0-foundation/contract @@ -1800,7 +1779,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/3-extensions/paradedb: dependencies: @@ -1825,13 +1804,10 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/3-extensions/pgvector: dependencies: - '@prisma-next/cli': - specifier: workspace:* - version: link:../../1-framework/3-tooling/cli '@prisma-next/contract': specifier: workspace:* version: link:../../1-framework/0-foundation/contract @@ -1889,7 +1865,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/3-extensions/postgres: dependencies: @@ -1947,7 +1923,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/3-extensions/sql-orm-client: dependencies: @@ -2026,7 +2002,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/3-extensions/sqlite: dependencies: @@ -2078,7 +2054,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/3-mongo-target/1-mongo-target: dependencies: @@ -2100,7 +2076,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/3-mongo-target/2-mongo-adapter: dependencies: @@ -2167,7 +2143,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/3-mongo-target/3-mongo-driver: dependencies: @@ -2210,13 +2186,10 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/3-targets/3-targets/postgres: dependencies: - '@prisma-next/cli': - specifier: workspace:* - version: link:../../../1-framework/3-tooling/cli '@prisma-next/contract': specifier: workspace:* version: link:../../../1-framework/0-foundation/contract @@ -2226,12 +2199,18 @@ importers: '@prisma-next/framework-components': specifier: workspace:* version: link:../../../1-framework/1-core/framework-components + '@prisma-next/sql-builder': + specifier: workspace:* + version: link:../../../2-sql/4-lanes/sql-builder '@prisma-next/sql-contract': specifier: workspace:* version: link:../../../2-sql/1-core/contract '@prisma-next/sql-errors': specifier: workspace:* version: link:../../../2-sql/1-core/errors + '@prisma-next/sql-operations': + specifier: workspace:* + version: link:../../../2-sql/1-core/operations '@prisma-next/sql-relational-core': specifier: workspace:* version: link:../../../2-sql/4-lanes/relational-core @@ -2274,7 +2253,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/3-targets/3-targets/sqlite: dependencies: @@ -2314,13 +2293,10 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/3-targets/6-adapters/postgres: dependencies: - '@prisma-next/cli': - specifier: workspace:* - version: link:../../../1-framework/3-tooling/cli '@prisma-next/contract': specifier: workspace:* version: link:../../../1-framework/0-foundation/contract @@ -2384,7 +2360,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/3-targets/6-adapters/sqlite: dependencies: @@ -2451,7 +2427,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/3-targets/7-drivers/postgres: dependencies: @@ -2515,7 +2491,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages/3-targets/7-drivers/sqlite: dependencies: @@ -2561,7 +2537,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) test/e2e/framework: dependencies: @@ -2649,7 +2625,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) test/integration: dependencies: @@ -2797,7 +2773,7 @@ importers: version: 7.3.1(@types/node@24.10.4)(jiti@2.6.1)(tsx@4.20.6)(yaml@2.8.1) vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) test/integration/test/fixtures/cli/cli-e2e-test-app: dependencies: @@ -2920,7 +2896,7 @@ importers: version: 5.9.3 vitest: specifier: 'catalog:' - version: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + version: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) packages: @@ -3478,6 +3454,9 @@ packages: '@oxc-project/types@0.106.0': resolution: {integrity: sha512-QdsH3rZq480VnOHSHgPYOhjL8O8LBdcnSjM408BpPCCUc0JYYZPG9Gafl9i3OcGk/7137o+gweb4cCv3WAUykg==} + '@polka/url@1.0.0-next.29': + resolution: {integrity: sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww==} + '@prisma/debug@7.1.0': resolution: {integrity: sha512-pPAckG6etgAsEBusmZiFwM9bldLSNkn++YuC4jCTJACdK5hLOVnOzX7eSL2FgaU6Gomd6wIw21snUX2dYroMZQ==} @@ -3986,6 +3965,11 @@ packages: '@vitest/spy@4.0.17': resolution: {integrity: sha512-I1bQo8QaP6tZlTomQNWKJE6ym4SHf3oLS7ceNjozxxgzavRAgZDc06T7kD8gb9bXKEgcLNt00Z+kZO6KaJ62Ew==} + '@vitest/ui@4.0.17': + resolution: {integrity: sha512-hRDjg6dlDz7JlZAvjbiCdAJ3SDG+NH8tjZe21vjxfvT2ssYAn72SRXMge3dKKABm3bIJ3C+3wdunIdur8PHEAw==} + peerDependencies: + vitest: 4.0.17 + '@vitest/utils@4.0.17': resolution: {integrity: sha512-RG6iy+IzQpa9SB8HAFHJ9Y+pTzI+h8553MrciN9eC6TFBErqrQaTas4vG+MVj8S4uKk8uTT2p0vgZPnTdxd96w==} @@ -4406,6 +4390,9 @@ packages: picomatch: optional: true + fflate@0.8.2: + resolution: {integrity: sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A==} + fill-range@7.1.1: resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} engines: {node: '>=8'} @@ -4418,6 +4405,9 @@ packages: resolution: {integrity: sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==} engines: {node: '>=8'} + flatted@3.4.2: + resolution: {integrity: sha512-PjDse7RzhcPkIJwy5t7KPWQSZ9cAbzQXcafsetQoD7sOJRQlGikNbx7yZp2OotDnJyrDcbyRq3Ttb18iYOqkxA==} + follow-redirects@1.15.11: resolution: {integrity: sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==} engines: {node: '>=4.0'} @@ -4780,6 +4770,10 @@ packages: moo@0.5.2: resolution: {integrity: sha512-iSAJLHYKnX41mKcJKjqvnAN9sf0LMDTXDEvFv+ffuRR9a1MIuXLjMNL6EsnDHSkKLTWNqQQ5uo61P4EbU4NU+Q==} + mrmime@2.0.1: + resolution: {integrity: sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ==} + engines: {node: '>=10'} + ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} @@ -5154,6 +5148,10 @@ packages: resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==} engines: {node: '>=14'} + sirv@3.0.2: + resolution: {integrity: sha512-2wcC/oGxHis/BoHkkPwldgiPSYcpZK3JU28WoMVv55yHJgcZ8rlXvuG9iZggz+sU1d4bRgIGASwyWqjxu3FM0g==} + engines: {node: '>=18'} + sisteransi@1.0.5: resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==} @@ -5269,6 +5267,10 @@ packages: resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} engines: {node: '>=8.0'} + totalist@3.0.1: + resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==} + engines: {node: '>=6'} + tough-cookie@6.0.0: resolution: {integrity: sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w==} engines: {node: '>=16'} @@ -5929,6 +5931,8 @@ snapshots: '@oxc-project/types@0.106.0': {} + '@polka/url@1.0.0-next.29': {} + '@prisma/debug@7.1.0': {} '@prisma/dev@0.19.1(typescript@5.9.3)': @@ -6263,7 +6267,7 @@ snapshots: transitivePeerDependencies: - '@swc/helpers' - '@vitest/coverage-v8@4.0.17(vitest@4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1))': + '@vitest/coverage-v8@4.0.17(vitest@4.0.17)': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.17 @@ -6275,7 +6279,7 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.0.3 - vitest: 4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + vitest: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) '@vitest/expect@4.0.17': dependencies: @@ -6311,6 +6315,17 @@ snapshots: '@vitest/spy@4.0.17': {} + '@vitest/ui@4.0.17(vitest@4.0.17)': + dependencies: + '@vitest/utils': 4.0.17 + fflate: 0.8.2 + flatted: 3.4.2 + pathe: 2.0.3 + sirv: 3.0.2 + tinyglobby: 0.2.15 + tinyrainbow: 3.0.3 + vitest: 4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1) + '@vitest/utils@4.0.17': dependencies: '@vitest/pretty-format': 4.0.17 @@ -6749,6 +6764,8 @@ snapshots: optionalDependencies: picomatch: 4.0.3 + fflate@0.8.2: {} + fill-range@7.1.1: dependencies: to-regex-range: 5.0.1 @@ -6764,6 +6781,8 @@ snapshots: locate-path: 5.0.0 path-exists: 4.0.0 + flatted@3.4.2: {} + follow-redirects@1.15.11(debug@4.4.3): optionalDependencies: debug: 4.4.3 @@ -7130,6 +7149,8 @@ snapshots: moo@0.5.2: {} + mrmime@2.0.1: {} + ms@2.1.3: {} nano-spawn@2.0.0: {} @@ -7496,6 +7517,12 @@ snapshots: signal-exit@4.1.0: {} + sirv@3.0.2: + dependencies: + '@polka/url': 1.0.0-next.29 + mrmime: 2.0.1 + totalist: 3.0.1 + sisteransi@1.0.5: {} slice-ansi@7.1.2: @@ -7613,6 +7640,8 @@ snapshots: dependencies: is-number: 7.0.0 + totalist@3.0.1: {} + tough-cookie@6.0.0: dependencies: tldts: 7.0.25 @@ -7745,7 +7774,7 @@ snapshots: tsx: 4.20.6 yaml: 2.8.1 - vitest@4.0.17(@types/node@24.10.4)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1): + vitest@4.0.17(@types/node@24.10.4)(@vitest/ui@4.0.17)(jiti@2.6.1)(jsdom@28.1.0(@noble/hashes@2.0.1))(tsx@4.20.6)(yaml@2.8.1): dependencies: '@vitest/expect': 4.0.17 '@vitest/mocker': 4.0.17(vite@7.3.1(@types/node@24.10.4)(jiti@2.6.1)(tsx@4.20.6)(yaml@2.8.1)) @@ -7769,6 +7798,7 @@ snapshots: why-is-node-running: 2.3.0 optionalDependencies: '@types/node': 24.10.4 + '@vitest/ui': 4.0.17(vitest@4.0.17) jsdom: 28.1.0(@noble/hashes@2.0.1) transitivePeerDependencies: - jiti diff --git a/projects/graph-based-migrations/plans/data-migrations-plan.md b/projects/graph-based-migrations/plans/data-migrations-plan.md new file mode 100644 index 0000000000..cf1ab19cbe --- /dev/null +++ b/projects/graph-based-migrations/plans/data-migrations-plan.md @@ -0,0 +1,98 @@ +# Data Migrations Plan + +## Summary + +Data migration support for prisma-next's graph-based migration system. All migrations (structural + data) are authored as TypeScript operation chains using `createBuilders()`, serialized to SQL at verification time, and executed at apply time. Data transforms are first-class operations with typed query builder callbacks. + +**Spec:** `projects/graph-based-migrations/specs/data-migrations-spec.md` + +## Prerequisites + +- [x] Refs refactored to per-file directory model with invariants +- [x] Phantom `@prisma-next/cli` dependencies removed from target-postgres, adapter-postgres, extension-pgvector, family-sql +- [x] sql-builder integration tests moved to integration test package (breaks cycle, enables target-postgres → sql-builder dependency) + +## Milestone 1: Descriptor-based planner + +- [x] Operation descriptors, resolver, migration strategy pipeline +- [x] migration.ts scaffolded with real builder calls +- [x] Plan → evaluate → resolve → ops.json → attest round-trip +- [x] New tables emit FK, index, unique, enum, dependency descriptors +- [x] Old planner fallback removed from `migration plan` +- [x] Verifier path for types/deps in contract-to-contract planning +- [x] Enum value add/remove/reorder support via primitive descriptors and rebuild recipe +- [x] `SchemaIssue` discriminated union: `BaseSchemaIssue | EnumValuesChangedIssue` +- [x] `contractToSchemaIR` re-keys storage types by nativeType for correct verifier hook resolution +- [x] Structural descriptors moved to `@prisma-next/family-sql/operation-descriptors` (shared across SQL targets) +- [x] New primitives: `addEnumValues`, `dropEnumType`, `renameType`, `alterColumnType` with `using`/`toType` +- [ ] Descriptor ordering — drops after pattern ops for column split (1 failing test) +- [ ] Transaction descriptors — `transaction([...ops])` and `noTransaction(op)` per spec R6 +- [ ] `ALTER TYPE ADD VALUE` requires `noTransaction` — blocked on R6 +- [ ] Delete old planner once `db update` migrated + +## Milestone 2: Data migration detection + +- [x] NOT NULL backfill strategy — addColumn(nullable) + dataTransform + setNotNull +- [x] Unsafe type change strategy — safe widenings → alterColumnType, unsafe → dataTransform +- [x] Nullable tightening strategy — nullable → NOT NULL → dataTransform + setNotNull +- [x] Enum change strategy — add-only → addEnumValues, removal → dataTransform + rebuild recipe, reorder → rebuild recipe +- [x] Scenario test suite (48 tests, 2 known failures as gap map) +- [x] Strategies extracted to `planner-strategies.ts`, pluggable via `planDescriptors({ strategies })` +- [ ] Dev push strategies for `db update` (temp defaults, destructive type changes, no data transforms) +- [ ] Unknown codec type detection (test 27 — verifier doesn't flag types with no codec hooks) + +## Milestone 3: Unified TS authoring + +- [x] migration.ts scaffold/evaluate utilities +- [x] `migration new` command +- [x] `migration verify` re-evaluates migration.ts on every run +- [x] Runner data transform lifecycle (check → run → check) +- [x] Query builder integration via typed callbacks: `(db) => db.user.update(...)` +- [x] `createBuilders()` for typed dataTransform callbacks with full autocomplete +- [x] SQL lowered at verify time via postgres adapter, stored in ops.json as `{ sql, params }` +- [x] `queryOperations` on control descriptors for extension function support (e.g., pgvector cosineDistance) +- [x] Scaffold generates `createBuilders()` with contract type import when data transforms detected +- [x] Multi-statement data transforms: `(db) => [insert, update]` from single callback +- [x] Scaffold serializes `createEnumType` values, `alterColumnType` using/toType, `dropEnumType`, `renameType` + +## Milestone 4: CLI polish + +- [x] Draft migration visibility (status/apply/plan + dashed graph edges) +- [ ] migration show for data transforms +- [ ] Verify hardening (error cases, output display) +- [ ] Stale directory cleanup on plan failure — when the non-draft path (evaluate → resolve → write ops → attest) fails after `writeMigrationPackage` + `scaffoldMigrationTs`, a partial package is left on disk. Either write to a temp dir and rename on success, or clean up in the catch block. + +## Milestone 5: Graph integration + +- [ ] Migration edges carry data transform metadata +- [ ] Invariant-aware path selection from environment refs +- [ ] Ledger records data transform names + +## E2E verified scenarios + +Tested against real Postgres database with data: +- [x] Initial creation with enum type + data insert +- [x] Enum value removal with data transform (moderator → user) + rebuild recipe +- [x] Enum value reorder (admin,user → user,admin) + rebuild recipe +- [x] Enum value rename (user → member) via USING clause in alterColumnType +- [ ] Enum value addition — blocked on `noTransaction` runner support (ALTER TYPE ADD VALUE can't run in a transaction) + +## Open Items + +1. **Query builder expressiveness**: UPDATE SET column = other_column (column references in SET) not supported. INSERT...SELECT, subqueries with joins also gaps. The SQL builder AST is DML-focused and lacks DDL-oriented nodes (CASE expressions, type casts). Enum USING clauses require raw SQL strings. Tracked in spec. + +2. **Stale serialization**: If migration.ts is edited after verify, ops.json is stale. Re-running verify re-evaluates. No mtime-based detection. + +3. **Editing already-applied migrations**: `migration verify` re-attests silently even if the migration was already applied. Filed in issue-triage.md. + +4. **`db update` migration path**: `db update` and `db init` use the old reconciliation planner. Needs dev-push strategies (temp defaults, destructive type changes, no data transforms) before the old planner can be deleted. Only 2 production call sites: `db-update.ts` and `db-init.ts`. + +5. **Enum value rename UX**: Removal + addition can't be automatically distinguished from a rename. The scaffold generates a rebuild recipe with TODO in the USING clause — user must provide the value mapping manually (same pattern as column renames). + +6. **Enum rebuild fails if column has a default**: `ALTER COLUMN TYPE` fails when a default expression is bound to the old enum type. The rebuild recipe must drop defaults before the type change and recreate them after the rename. + +7. **Legacy MigrationBundle/MigrationPackage aliases**: `exports/types.ts` re-exports `BaseMigrationBundle` as `MigrationBundle` and `MigrationPackage` for backwards compat. One consumer (`migration-show.ts`) still uses `MigrationBundle`. Should be migrated to canonical types. + +8. **Draft-only repos show "No migrations found"**: When `attested.length === 0` but drafts exist, `migration status` returns early with no graph — draft edges aren't visualized. The diagnostic warns about drafts but the graph isn't rendered. + +9. **Chained draft edges dropped in graph rendering**: In `graph-migration-mapper.ts`, when a draft's `from` hash references another draft (not in `graph.nodes`), the dashed edge is silently dropped. Fix: traverse `input.draftEdges` chain to find an attested ancestor, or materialize intermediate draft nodes/edges. diff --git a/projects/graph-based-migrations/specs/april-milestone.md b/projects/graph-based-migrations/specs/april-milestone.md new file mode 100644 index 0000000000..1782f7cd82 --- /dev/null +++ b/projects/graph-based-migrations/specs/april-milestone.md @@ -0,0 +1,80 @@ +# April Milestone: Ready for External Contributions + +**Goal**: External authors can meaningfully contribute to Prisma Next — SQL database targets, Postgres extensions, middleware, framework integrations, and query DSL extensions. This accelerates progress towards EA/GA. + +**Key constraint**: System design decisions must be stable, even if user-facing APIs are still changing. Contributors need confidence that the interfaces they build against won't be reworked. + +--- + +## Approach: architectural validation, not polish + +We have five weeks (Mar 31 – May 2, with an offsite in Week 3). The goal for each workstream is to **validate the architecture** — prove that the design decisions hold under real conditions. It is not to polish the experience for users. That's May. + +Each workstream is roughly independent, has a single owner, and that owner should progress through it as fast as possible. Each workstream has a priority-ordered queue of **validation points** (VP). Each VP identifies an architectural assumption to test, describes a user story that would prove it, lists the concrete tasks to get there, and defines a **stop condition** — the minimum result that answers the question. Everything beyond the stop condition is deferred. Work proceeds top-down: finish or explicitly stop VP1 before starting VP2. If you finish your workstream early, move to assist on another. + +The team's instinct is to perfect. The constraint is: prove the architecture works first, then we polish. + +--- + +## Workstreams + +### 1. Migration system + +The migration system uses a graph-based data structure for migration history. This is architecturally powerful but unfamiliar to every user coming from linear migration systems (Rails, Django, Prisma ORM, etc.). + +**Already validated**: Branch, diverge, merge, and conflict resolution — the graph model's core value proposition for team workflows is implemented and working. + +**Key risks**: + +- **Data migrations are the highest architectural risk.** The graph model's core invariant (route equivalence) breaks when data matters. If we can't extend routing to handle data invariants, the graph model may need fundamental rework — and we'd rather discover that now than after stabilizing the API. We have a theoretical model ([data-migrations.md](0-references/data-migrations.md), [data-migrations-solutions.md](0-references/data-migrations-solutions.md)) but it is entirely unproven. Prisma ORM had no data migration support, so we have no prior art to lean on. +- The graph-based model is our biggest UX bet. If common use cases aren't dead simple, the power of the graph is irrelevant. + +#### Priority queue + +**VP1: Data migrations work in the graph model** *(highest risk)* + +The entire migration graph is built on the assumption that any migration from contract state A to contract state B is functionally equivalent to any other A→B migration. Data migrations break this assumption — two databases at the same contract hash can have meaningfully different data. The routing model must be extended to define "done" as "contract hash H reached **and** required data invariants satisfied." + +User story: I can define a migration that includes a data transformation (e.g. split `name` into `firstName` + `lastName`). I can apply that migration on my local database and a production database. If there are two paths to the destination contract, I have a simple way to ensure that my CD pipeline for production chooses the path that includes my invariant. `plan`, `apply`, and `status` all operate in a way which is aware of the invariant. + +Tasks: + +1. **Design the data migration representation** — what does a data migration node look like in the graph? What does the runner execute? How is the postcondition checked? Decide between Model A (co-located with structural migrations) and Model B (independent). Key open decisions: routing policy when multiple invariant-satisfying routes exist, and the concrete format of environment refs. Output: a concrete type definition and a sketch of the runner changes, not a document. See [data-migrations.md](0-references/data-migrations.md) and [data-migrations-solutions.md](0-references/data-migrations-solutions.md) for the theoretical model. +2. **Implement a data migration end-to-end** — a migration that splits `name` into `firstName` + `lastName`, with a postcondition invariant. `plan`, `apply`, and `status` all understand "done = hash + invariant satisfied." +3. **Invariant-aware routing** — create a graph with two paths to the same contract hash, only one of which includes the data migration. The system selects the path that satisfies the required invariant. The user has a way to declare which invariants are required for a given environment. + +Stop condition: A graph with two paths to the same contract hash. One path includes a data migration (split `name` → `firstName` + `lastName`) with a postcondition invariant. `plan` selects the invariant-satisfying path for an environment that requires it. `apply` executes it. `status` reports "done" only when both the contract hash and the invariant are satisfied. Then stop — routing optimizations, invariant composition, and ref UX are May. + +**VP2: Users can author migrations by hand** *(table-stakes)* + +Manual SQL and data migrations both require this. Every migration system has an escape hatch where the user writes the migration themselves. + +User story: I run a command like `migration new `, and the system scaffolds a migration file for me — pre-populated with the source and destination contract hashes and any boilerplate. I fill in the migration logic (raw SQL, data transformation, or both). The system integrates my hand-authored migration into the graph as a first-class node. I don't create files manually or write raw JSON. + +Tasks: + +1. **Design the manual authoring surface** — a TypeScript file (e.g. `migration.ts`) where the user uses utility functions to describe the migration, producing a migration data structure when executed. Structural migrations (raw DDL) and data migrations may have different authoring shapes — decide whether they share a surface or not. +2. **Scaffold command** — a CLI command (e.g. `migration new `) that creates the migration file with the correct graph coordinates already filled in. The user shouldn't have to know how to describe "from this contract state to that contract state" — the CLI resolves hashes and generates the skeleton. This is how systems like Active Record work: you never create migration files by hand. +3. **Implement manual SQL migration** — user writes a `.ts` file with raw SQL, it becomes a node in the graph, the runner executes it. + +Stop condition: A user runs `migration new`, gets a scaffolded `.ts` file, writes raw SQL in it, and the runner executes it as a first-class graph node. Then stop — polish of the authoring API, documentation, and support for exotic migration shapes are May. + +**VP3: The graph scales with large contracts** *(quick pass/fail)* + +Every migration node encodes the full contract content. For projects with large contracts, this could cause performance and storage problems. + +Tasks: + +1. **Generate a 100+ model contract, create a series of migrations, measure** — graph operation time, migration history size on disk, plan/diff time. Pass/fail. + +Stop condition: Numbers in hand. If acceptable, move on. If not, file an issue with the measurements and move on — optimization is May. + +**Deferred (May)**: + +- Ergonomic graph operations (rebase, squash, etc.) +- Polished CLI visualization +- Planner coverage for every schema change case +- Refs UX validation (depends on data migration model existing first) +- Will users understand "refs"? (UX question — refs need to exist before we can test comprehension) + + diff --git a/projects/graph-based-migrations/specs/chat.md b/projects/graph-based-migrations/specs/chat.md new file mode 100644 index 0000000000..9ae5c4e5fb --- /dev/null +++ b/projects/graph-based-migrations/specs/chat.md @@ -0,0 +1,497 @@ +This is a really interesting design problem. Let me think through it carefully. +The core tension +You have content-addressable schema states where the schema hash determines identity. Pure structural migrations are path-independent — any sequence of DDL operations that gets you from state A to state B is equivalent. But data migrations break this: the path matters because data was transformed along the way, and different paths may leave data in different shapes even if the schema ends up identical. +So the question is: how do you track "what happened to the data" in a system whose fundamental abstraction is path-independent? +Approach 1: Data invariants as post-conditions on states +Your instinct about invariants is good but I'd refine it. Rather than tracking invariants accumulated along a path, attach data post-conditions to each state node. A post-condition is a predicate about the data that must hold after arriving at that state. For example: + +"All rows in users have a non-null email" +"orders.total equals the sum of order_items.price * quantity for that order" +"accounts.balance is denominated in cents (integer), not dollars (float)" + +The key insight: when you arrive at a state via different paths, the post-conditions must all hold regardless of path. If a data migration is required to establish a post-condition, then every path to that state must include a migration that establishes it. This makes post-conditions a property of the state, not the path, which fits your content-addressable model better. +The downside: this doesn't capture how the data was transformed, only what's true afterward. You lose provenance. +Approach 2: Typed edges with migration semantics +Make edges in your graph carry typed migration descriptors rather than arbitrary code. Instead of "run this script," an edge says: +rename_column(users, name -> full_name) +backfill(users.email_verified, default: false) +split_column(users.name -> users.first_name, users.last_name, separator: " ") +merge_tables(user_addresses + user_contacts -> user_info, key: user_id) +transform_column(orders.total, multiply: 100, comment: "dollars to cents") +This is your "recipes" idea, and I think it's the strongest approach. Here's why it solves the path problem: +Commutativity analysis. If your recipe set is small and well-defined, you can reason about which operations commute. rename_column and backfill on different columns commute. split_column and merge_tables on overlapping columns don't. This lets you determine whether two paths are data-equivalent even though they traverse different edges — you check whether the sequence of typed operations on each path produces the same result up to reordering of commutative ops. +Invertibility. Many recipes have natural inverses: rename(a→b) inverts with rename(b→a), split inverts with concat, backfill(default: X) inverts with drop_column. This means you can compute the net data transformation for a path and compare paths by their net effect. +Composition. Two recipes can often be composed into one: rename(a→b) then rename(b→c) collapses to rename(a→c). This gives you a kind of algebra over data migrations. +Approach 3: Hybrid — recipes + escape hatch with content hashing +In practice you'll hit cases that don't fit recipes. My suggestion: + +Recipes for the common 80%. Define a closed set of typed, invertible, composable data migration operations. These are first-class citizens in your graph edges. +Custom migrations as opaque transforms with declared invariants. When someone needs arbitrary code, they write a custom migration but must declare pre-conditions and post-conditions. The system can't reason about commutativity for these, so it treats them as barriers — any path containing a custom migration is incompatible with paths that don't contain it (or contain a different one) unless they arrive at a state whose post-conditions are satisfied. +Data state fingerprinting. For custom migrations, after execution, compute a fingerprint of the affected data (e.g., hash a sample or hash the column statistics). Store this on the state node. When two paths converge at a state, compare fingerprints. If they match, the paths were data-equivalent despite using different custom migrations. + +Approach 4: Separate the data migration graph +Another option entirely: don't mix data migrations into the schema graph at all. Maintain two parallel structures: + +Schema graph: content-addressable, path-independent, exactly as you have it. +Data migration log: a linear, append-only sequence of data transformations that have been applied to this specific database instance. + +The data migration log is instance-specific, not universal. When you deploy schema state X to a database, you look at that database's migration log and determine which data migrations still need to run. The schema graph stays clean and the messiness of data migration history lives where it belongs — on the specific database that has specific data. +This is arguably the most pragmatic approach. It acknowledges that data migrations are fundamentally about this data in this database, not about abstract state transitions. +My recommendation +I'd combine approaches 2 and 4: + +Keep your schema graph pure and content-addressable. +Define a recipe algebra for typed data migrations (rename, backfill, split, merge, transform, cast, etc.) with known commutativity and invertibility properties. +Attach recipes to edges in the schema graph as recommended data migrations, but track actual applied migrations per-database-instance in a separate log. +When computing a migration plan from state A to state B, find a path, collect the recipes along it, simplify/reorder using commutativity rules, and present a concrete plan. +For custom migrations, require declared post-conditions, treat them as barriers for path equivalence, and fingerprint the result. + +This gives you the theoretical cleanliness of content-addressable schema states, the practical power of typed composable data operations, and the honest acknowledgment that what actually happened to a specific database's data is an instance-level concern. +Want me to dig deeper into any of these? The recipe algebra in particular has some interesting design decisions around what the minimal useful set of operations would be and how to formalize commutativity.Yes I wanted to talk about the recipe algebra. Is there not some academic literature on something like this?Mar 28Good instinct — yes, there's quite a bit of relevant work, though it's scattered across several fields. Let me map out what's out there. +Bidirectional transformations (BX) +This is probably the most directly relevant body of work. The BX community studies "lenses" — pairs of transformations (get/put) that maintain consistency between two data structures. The seminal paper is Foster et al.'s "Combinators for Bidirectional Tree Transformations" (2007) from the Penn PLT group (Boomerang language). The key idea: you define small, composable, invertible transformations and compose them into larger ones, with well-defined laws governing round-tripping. +The lens laws are essentially your commutativity/invertibility requirements formalized: + +GetPut: if you read then write back, nothing changes. +PutGet: if you write then read, you get back what you wrote. + +Bohannon, Pierce, and Vaughan extended this to relational lenses specifically for database schemas in "Relational Lenses: A Language for Updatable Views" (PODS 2006). This is very close to what you want — they define combinators like select, project, join, and rename over relational data with bidirectional semantics. +Algebraic approaches to schema evolution +Curino et al.'s PRISM system (SIGMOD 2008, "Schema Evolution in Wikipedia") defines a closed set of Schema Modification Operators (SMOs) that are essentially your recipes: + +ADD COLUMN, DROP COLUMN, RENAME COLUMN +ADD TABLE, DROP TABLE, RENAME TABLE +MERGE TABLES, SPLIT TABLE +COPY COLUMN, MOVE COLUMN + +They proved that this set is complete for relational schema evolution — any schema change can be expressed as a composition of SMOs. More importantly for you, they showed how to automatically derive data migration queries from SMO sequences and how to reason about equivalence of SMO sequences through rewrite rules. This is almost exactly your recipe algebra. +Categorical database theory +David Spivak's work on functorial data migration (2012 and onward) takes a category-theoretic approach. A schema is a category (objects = tables, morphisms = foreign keys), and a migration is a functor between categories. The three fundamental operations are: + +Δ (pullback/restriction): analogous to projection, dropping structure +Σ (left pushforward): analogous to joining/merging, introducing unions +Π (right pushforward): analogous to product/pairing, introducing combinatorial products + +These three operations, composed freely, can express any data migration between relational schemas. The categorical framework gives you commutativity and associativity for free via functorial composition laws. Spivak implemented this in a tool called FQL/CQL (Categorical Query Language). +This is the most theoretically satisfying framework but probably overkill for a practical ORM. Still, the decomposition into Δ/Σ/Π is useful as a mental model for classifying your recipes. +Patch theory (Darcs, Camp, Pijul) +The version control world has studied exactly this problem in a different domain. Darcs's patch theory defines a set of operations with explicit commutation rules: for patches A and B, either they commute (AB = B'A' for some transformed B', A') or they conflict. This gives you: + +A formal framework for when reordering is safe +A way to compute "merge" when two branches of your graph reconverge +A theory of inverse patches + +Pijul (and the associated academic work by Samuel Mimram and Cinzia Di Giusto on "A Categorical Theory of Patches") took this further with a category-theoretic foundation. The parallel to your problem is direct: schema states are like file states, migrations are patches, and you want to know when two sequences of patches are equivalent. +What I'd actually use +For a practical recipe algebra in an ORM, I'd draw most heavily from PRISM's SMO framework, borrowing the categorical intuition from Spivak for the type system and the commutation machinery from patch theory. Concretely: +Define your recipes as a small set of typed operations, each with a known inverse and known commutation relations with every other operation. Build a rewrite system that can normalize any sequence of recipes into a canonical form. Two paths are data-equivalent iff their canonical forms are identical. +The PRISM paper is the most practical starting point. Spivak's CQL papers give you the theoretical foundation if you want to prove properties. The Boomerang/lens papers give you the compositional semantics. +Want me to sketch out what a concrete minimal recipe set might look like with explicit commutation rules?Yes it doesn't feel like many of the smid actually have to do with data migrations. Maybe we can discuss or try to nearly exhaustively enumerate the possible scenarios in which a data migration is necessary due to a schema change?Mar 28You're right — most SMOs are pure structural changes where the data just moves mechanically. Let me think about when a schema change actually forces you to make decisions about data. +The key question is: when does a schema transition require information that isn't present in the old schema, the new schema, or the data itself? That's when you need a data migration — when there's an information gap that requires human intent. +Cases where data migration is necessary +1. Column added with a non-trivial default +New NOT NULL column where the value can't be a static default. Example: you add users.display_name and want it populated from first_name || ' ' || last_name. The schema change alone doesn't tell you how to derive the value. A static default ("" or "unknown") is mechanical, but a computed backfill is a data migration. +2. Column type change with lossy or ambiguous conversion +Changing price FLOAT to price INTEGER — do you truncate, round, or multiply by 100 (cents)? Changing VARCHAR to ENUM — what happens to values that don't match any variant? Changing timezone-naive to timezone-aware timestamps — what timezone do you assume for existing data? The schema diff says "type changed" but doesn't say how to map values. +3. Column split +One column becomes two or more. name → first_name + last_name. The schema says the old column is gone and new ones exist, but parsing/splitting logic requires intent. Where do you split? What about edge cases (mononyms, multiple spaces)? +4. Column merge +Two or more columns become one. first_name + last_name → full_name. Which goes first? What separator? What if one is null? +5. Table split (vertical or horizontal) +One table becomes two. Vertical: users → users + user_profiles (which columns go where is structural, but if any data transformation happens during the split, that's a migration). Horizontal: orders → active_orders + archived_orders — the partitioning predicate is a data migration decision. +6. Table merge +Two tables become one. Key conflicts, deduplication strategy, which record wins on conflict — these are all data decisions. +7. Semantic reinterpretation without structural change +This is the sneaky one. The schema hasn't changed at all (or barely), but the meaning of the data has changed. Example: price was in dollars, now it's in cents. status integer values meant {1: active, 2: inactive}, now they mean {1: pending, 2: active, 3: inactive}. Same column, same type, different semantics. Your content-addressable schema might not even detect this as a state change. +8. Denormalization / materialization +Adding a derived column that needs to be backfilled from a computation over related data. orders.item_count computed from COUNT(order_items). accounts.balance computed from SUM(transactions.amount). The schema says "new column," the data migration is the aggregation query. +9. Normalization / extraction +Pulling repeated data out into a new table. orders.customer_name, orders.customer_email → customers table + orders.customer_id FK. You need deduplication logic: what constitutes "same customer"? Match on email? Name? Both? +10. Key/identity changes +Changing a natural key to a surrogate key or vice versa. Changing a primary key column. Composite key → single key. This requires generating new identifiers and updating all references — the mapping from old identity to new identity is a data decision. +11. Encoding/format changes +Stored JSON blob restructuring. Changing text encoding. Changing date format strings. Changing how NULLs are represented in a domain sense (empty string vs NULL vs sentinel value). +12. Data-dependent constraint enforcement +Adding a UNIQUE constraint when duplicates exist — which duplicate do you keep? Adding a NOT NULL constraint when nulls exist — what value replaces them? Adding a CHECK constraint when violations exist — how do you fix violating rows? +What's NOT a data migration +For contrast, these are pure structural operations with no information gap: + +Adding a nullable column (NULL is the default, no decision needed) +Dropping a column (data is just discarded, no ambiguity) +Renaming a column (mechanical — though you might argue the mapping is information, it's typically inferred from the diff) +Adding/dropping an index (no data change) +Adding a FK constraint when data already satisfies it +Lossless type widening (INT → BIGINT, VARCHAR(50) → VARCHAR(100)) + +Patterns in the list +Looking at this, the cases cluster around a few fundamental information gaps: + +Value derivation: where does new data come from? (cases 1, 8) +Value mapping: how do old values map to new representations? (cases 2, 7, 11) +Structural decomposition: how does one thing become many? (cases 3, 5) +Structural composition: how do many things become one, and what about conflicts? (cases 4, 6, 9) +Identity transformation: how do old identities map to new ones? (case 10) +Constraint reconciliation: how do you fix data that violates new rules? (case 12) + +If your recipe set covers these six categories, I think you'd capture nearly everything. Want to work through what the concrete recipes for each category would look like?Sure, but we should aim to think in tiny generic operators that have useful propertiesMar 28Right. Let me think about what the minimal set of atomic operators would be, where each one does exactly one thing and has clear algebraic properties. +The goal is: each operator captures exactly one decision about data, is as small as possible, and composes into the complex scenarios above. +The atoms +backfill(table, column, expression) +Populates a column's values from an expression over the same row. This is the most fundamental operator — it's the one that introduces new information. The expression could reference other columns in the same row. + +Inverse: none in general (it's a lossy overwrite of whatever was there before, typically NULLs) +Commutes with: any backfill on a different column, any operator on a different table +Doesn't commute with: another backfill on the same column (last write wins) + +map(table, column, mapping) +Transforms existing values in a column according to a mapping function. This is distinct from backfill because the input is the column's own current value, not other columns. + +Examples: multiply(100), cast(int, round), lookup({1: "active", 2: "inactive"}), regex_replace(...) +Inverse: only if the mapping is bijective. multiply(100) inverts with divide(100). lookup inverts if the map is bijective. Lossy mappings (like round) have no inverse. +Commutes with: map/backfill on different columns, operators on different tables +Doesn't commute with: another map on the same column (composition order matters unless both are linear) + +copy(source_table, source_column, target_table, target_column, join_condition) +Copies data from one table/column to another via a join. This is the cross-table version of backfill. + +Covers: denormalization, materialization, moving data during table splits +Inverse: copy in the other direction (if data still exists) +Commutes with: copies involving disjoint target columns + +deduplicate(table, match_columns, strategy) +Reduces multiple rows to one based on a match key, with a strategy for which row wins (first, latest, max(column), etc.). + +Covers: normalization/extraction, table merges with conflicts, unique constraint enforcement +Inverse: none (information is destroyed) +Commutes with: operators on different tables + +filter(table, predicate, action) +Handles rows that match a predicate. Action is either delete or move(target_table). + +Covers: horizontal table splits, constraint reconciliation (delete violating rows), archival +Inverse: move is invertible if you track where rows went. delete is not. +Commutes with: filters on different tables, filters on same table with disjoint predicates + +generate(table, column, strategy) +Produces new values that don't derive from existing data. Strategy is something like uuid, sequence, hash(columns...). + +Covers: surrogate key introduction, identity changes +Inverse: none (generated values are arbitrary) +Commutes with: almost everything (it's self-contained) + +That might be it +Let me check against the scenarios: +ScenarioDecompositionComputed backfillbackfillLossy type changemapColumn splitbackfill(t, new1, expr1) + backfill(t, new2, expr2)Column mergebackfill(t, merged, expr_over_old_cols)Table split (vertical)copy source→target for moved columnsTable split (horizontal)filter(t, predicate, move(t2))Table mergecopy + deduplicate if conflictsSemantic reinterpretationmapDenormalizationcopy with joinNormalization/extractioncopy + deduplicateKey changegenerate + copy (to update FKs)Encoding changemapConstraint reconciliationfilter(delete) or map to fix values or backfill to fill nulls +Six operators, covers everything I could enumerate. Notably missing from most schema migration tools: deduplicate and filter are rarely first-class, but they capture real decisions people make. +Commutativity matrix +The short version: two operators commute if they touch disjoint (table, column) pairs. When they overlap: + +backfill then map on same column ≠ map then backfill (backfill overwrites what map produced, vs map transforms what backfill produced) +map then map on same column = function composition (order matters, m2 ∘ m1 ≠ m1 ∘ m2 in general) +filter(delete) then backfill ≠ backfill then filter(delete) (you might backfill rows that get deleted, or delete rows before backfill — different work but same result if the filter predicate doesn't reference the backfilled column) + +So the precise rule would be: two operators commute iff their read sets and write sets are disjoint. An operator's read set is every (table, column) it reads; write set is every (table, column) it modifies. This is exactly the Bernstein conditions from concurrency theory, which is a nice property — it means you get canonical forms via topological sort of the dependency graph. +What this gives you +With these six operators, you can: + +Normalize any migration path into a canonical sequence by sorting commutative operators into a fixed order +Compare paths by checking if their canonical forms are identical +Detect conflicts at graph merge points by checking if non-commutative operators appear in different orders +Partially invert a migration by inverting the invertible subset and flagging the rest as lossy + +The expression language inside backfill and map is where most of the real complexity hides. You could keep that as opaque SQL/code initially and still get useful algebraic reasoning from the operator structure alone. Or you could define a small expression DSL and get deeper analysis — but that's a separable decision. +What's your reaction? Does this feel like the right granularity?Let's walk through a real complicated example with made up tables and see what would happenMar 28Let's do it. I'll construct something messy enough to be realistic. +Starting schema +An e-commerce system that grew organically: +orders ( + id SERIAL PRIMARY KEY, + customer_name VARCHAR, + customer_email VARCHAR, + customer_phone VARCHAR, + shipping_street VARCHAR, + shipping_city VARCHAR, + shipping_zip VARCHAR, + shipping_country VARCHAR(2), + total FLOAT, -- dollars, like 29.99 + status INTEGER, -- 1=pending, 2=paid, 3=shipped, 4=delivered, 5=cancelled + notes TEXT, -- free text, sometimes contains JSON, sometimes plain text + created_at TIMESTAMP, -- timezone-naive, was always stored in US Eastern + updated_at TIMESTAMP +) + +order_items ( + id SERIAL PRIMARY KEY, + order_id INTEGER REFERENCES orders, + product_name VARCHAR, + product_sku VARCHAR, + unit_price FLOAT, + quantity INTEGER +) +Target schema +Someone got their act together: +customers ( + id UUID PRIMARY KEY, + name VARCHAR NOT NULL, + email VARCHAR NOT NULL UNIQUE, + phone VARCHAR +) + +addresses ( + id UUID PRIMARY KEY, + customer_id UUID REFERENCES customers, + street VARCHAR NOT NULL, + city VARCHAR NOT NULL, + zip VARCHAR NOT NULL, + country VARCHAR(2) NOT NULL, + label VARCHAR -- 'shipping', 'billing', etc. +) + +products ( + id UUID PRIMARY KEY, + name VARCHAR NOT NULL, + sku VARCHAR NOT NULL UNIQUE +) + +orders ( + id UUID PRIMARY KEY, + legacy_id INTEGER UNIQUE, + customer_id UUID REFERENCES customers NOT NULL, + shipping_address_id UUID REFERENCES addresses NOT NULL, + total_cents BIGINT NOT NULL, + status VARCHAR NOT NULL CHECK (status IN ('pending','paid','shipped','delivered','cancelled')), + created_at TIMESTAMPTZ NOT NULL, + updated_at TIMESTAMPTZ NOT NULL +) + +order_items ( + id UUID PRIMARY KEY, + order_id UUID REFERENCES orders NOT NULL, + product_id UUID REFERENCES products NOT NULL, + unit_price_cents BIGINT NOT NULL, + quantity INTEGER NOT NULL +) + +order_notes ( + id UUID PRIMARY KEY, + order_id UUID REFERENCES orders NOT NULL, + content TEXT NOT NULL, + is_structured BOOLEAN NOT NULL DEFAULT false, + created_at TIMESTAMPTZ NOT NULL +) +Now let's decompose +The structural diff is huge — new tables, dropped columns, type changes everywhere. But the data migration decisions are the interesting part. Let me walk through each operator needed: +Step 1: Generate surrogate keys +generate(orders, _new_uuid, uuid) +generate(order_items, _new_uuid, uuid) +We need UUIDs for every existing row before we can rewire references. These are pure generation — no decision, no data dependency. +Step 2: Extract customers +This is the hardest part. Multiple orders might be from the "same" customer but with inconsistent data. First we need to decide what "same" means: +deduplicate(orders, [email], strategy: latest(updated_at)) + → produces: customers_staging(name, email, phone) +This is one operator but it encodes a big decision: we're matching on email and taking the most recent name/phone when there are conflicts. Someone might reasonably want match on email+name or keep all phone numbers. The choice of match columns and strategy is the data migration. +Then generate identities for the new entities: +generate(customers_staging, id, uuid) +Step 3: Extract addresses +Similar problem — deduplicate and extract: +deduplicate(orders, [shipping_street, shipping_city, shipping_zip, shipping_country], strategy: first) + → produces: addresses_staging(street, city, zip, country) +generate(addresses_staging, id, uuid) +backfill(addresses_staging, customer_id, + lookup via orders.customer_email → customers_staging.email → customers_staging.id) +backfill(addresses_staging, label, literal("shipping")) +That backfill of customer_id is a copy really — it's a cross-table join: +copy(customers_staging, id, addresses_staging, customer_id, + join: addresses_staging._source_email = customers_staging.email) +Step 4: Extract products +deduplicate(order_items, [product_sku], strategy: latest) + → produces: products_staging(name, sku) +generate(products_staging, id, uuid) +Decision here: when the same SKU has different product names across orders, which name wins? +Step 5: Rewire orders +copy(customers_staging, id, orders, customer_id, + join: orders.customer_email = customers_staging.email) +copy(addresses_staging, id, orders, shipping_address_id, + join: orders.shipping_* = addresses_staging.*) +Step 6: Rewire order_items +copy(products_staging, id, order_items, product_id, + join: order_items.product_sku = products_staging.sku) +copy(orders, _new_uuid, order_items, _new_order_id, + join: order_items.order_id = orders.id) +Step 7: Transform values +Now the type/semantic changes: +map(orders, total, multiply(100)) -- dollars → cents +map(order_items, unit_price, multiply(100)) -- dollars → cents +map(orders, status, lookup({ + 1: "pending", 2: "paid", 3: "shipped", + 4: "delivered", 5: "cancelled" +})) +map(orders, created_at, assume_tz("America/New_York")) +map(orders, updated_at, assume_tz("America/New_York")) +Every one of these is a decision. The multiply(100) assumes prices were in whole dollars and cents, not fractional cents. The timezone assumption could be wrong for some records. +Step 8: Handle the notes column +This is messy — sometimes JSON, sometimes plain text, going into a separate table: +copy(orders, notes, order_notes, content, + join: order_notes.order_id = orders._new_uuid) +backfill(order_notes, is_structured, expr: is_valid_json(content)) +backfill(order_notes, created_at, expr: source.orders.created_at) +filter(order_notes, predicate: content IS NULL, action: delete) +Full operator sequence +-- identity generation (all commute with each other) +generate(orders, _new_uuid, uuid) +generate(order_items, _new_uuid, uuid) + +-- extraction (deduplicate → generate → copy pattern) +deduplicate(orders, [email], latest(updated_at)) → customers_staging +generate(customers_staging, id, uuid) +deduplicate(orders, [shipping_street, shipping_city, shipping_zip, shipping_country], first) → addresses_staging +generate(addresses_staging, id, uuid) +deduplicate(order_items, [product_sku], latest) → products_staging +generate(products_staging, id, uuid) + +-- cross-references (copies, order matters within a table) +copy(customers_staging, id, addresses_staging, customer_id, ...) +backfill(addresses_staging, label, "shipping") +copy(customers_staging, id, orders, customer_id, ...) +copy(addresses_staging, id, orders, shipping_address_id, ...) +copy(products_staging, id, order_items, product_id, ...) +copy(orders, _new_uuid, order_items, _new_order_id, ...) + +-- value transforms (commute with each other, different columns) +map(orders, total, multiply(100)) +map(order_items, unit_price, multiply(100)) +map(orders, status, lookup({1:"pending", ...})) +map(orders, created_at, assume_tz("America/New_York")) +map(orders, updated_at, assume_tz("America/New_York")) + +-- notes extraction +copy(orders, notes, order_notes, content, ...) +backfill(order_notes, is_structured, is_valid_json(content)) +filter(order_notes, content IS NULL, delete) +What we can see now +The dependency graph has clear phases. The generate ops have no dependencies. The deduplicate ops depend only on source data. The copy ops depend on generate and deduplicate having run. The map ops on orders are independent of the extraction ops (they touch different columns). So there's significant parallelism and the canonical ordering is a partial order, not a total one. +If someone took a different path through the schema graph — say, an intermediate state where they extracted customers first, then later did the price conversion in a separate migration — the canonical form would be the same, because deduplicate(orders, [email], ...) commutes with map(orders, total, multiply(100)) (disjoint read/write sets). +But if one path used deduplicate(orders, [email], latest) and another used deduplicate(orders, [name, email], first), those are genuinely different migrations and the system should flag them as incompatible. +What jumps out at you? I'm noticing that deduplicate is doing a lot of heavy lifting and might want to be decomposed further.Perhaps. But I'm wondering whether we can realistically generate these from a diff. We know the before and after state of the database, but withoutnst least asking the user questions like "did you create this column or did you rename from another" and so on, there's no way for us to know. But even just asking the user the right questions IOW knowing the set of possible operations that can transform one database to the other seems hardMar 28You're hitting the fundamental problem. The structural diff between two schemas is ambiguous — it's a set of column additions and removals, and you can't tell whether a removed column and an added column are actually a rename, a split, a semantic reinterpretation, or genuinely unrelated. +Let me think about what the diff actually gives us and where the ambiguity lives. +What a structural diff produces +Given before and after schemas, you can mechanically determine: + +Tables added +Tables removed +Columns added (within a surviving table) +Columns removed (within a surviving table) +Type changes (same table, same column name, different type) +Constraint changes (nullability, uniqueness, checks, FKs) + +That's it. Everything else is interpretation. +The ambiguity space +When you see a removed column and an added column, the possible explanations are: + +Unrelated: old column was dropped, new column is genuinely new +Rename: same data, new name +Rename + transform: same data, new name, values changed (rename + map) +Split: old column became multiple new columns +Merge: multiple old columns became one new column + +When you see a removed table and an added table: + +Unrelated: old table dropped, new table is fresh +Rename: same table, new name +Split: one table became two +Merge: two tables became one +Normalization/extraction: data pulled out into new table with FK back + +And you literally cannot distinguish these from the diff alone. A rename looks exactly like a drop + add. +But we can narrow it down with heuristics +Here's what I think is actually tractable. You score candidate interpretations: +Column-level signals: + +Same name, different type → almost certainly a type change, ask about conversion strategy +Removed column + added column with same type in same table → likely rename, ask to confirm +Removed column + multiple added columns of compatible types → possible split +Multiple removed columns + one added column → possible merge +Added NOT NULL column with no default → definitely needs a backfill decision +New FK column pointing at a new table, old table had denormalized columns matching the new table's shape → almost certainly normalization/extraction + +Table-level signals: + +Removed table + new table with subset of same columns → likely rename or evolution +Removed table + new table + new FK from existing table to new table → extraction +Two removed tables + one new table with union of columns → merge +One removed table + two new tables that partition its columns → vertical split + +Name similarity: + +Levenshtein or token overlap between removed and added names (customer_name → name on a new customers table is a strong signal) + +The question tree +Rather than generating the migration, you generate a set of questions from the diff. Each question resolves one ambiguity. Something like: +Diff shows: orders.customer_name removed, customers.name added + +Possible interpretations: + a) orders.customer_name was extracted to customers.name (confidence: high) + → implies: deduplicate + copy + b) orders.customer_name was dropped, customers.name is unrelated (confidence: low) + → implies: backfill(customers, name, ???) + +Question: "Was customers.name extracted from orders.customer_name?" + If yes → follow-up: "How should duplicate customer names be resolved?" + Options: [keep latest, keep first, manual review] +So the system would: + +Compute the structural diff +Generate candidate interpretations ranked by heuristic confidence +For each ambiguity, produce a question +User answers collapse the ambiguity into concrete operators +Any remaining "added NOT NULL column" without a source gets a backfill question + +What questions are actually needed +Let me enumerate the question types: +"Is this a rename?" — shown whenever a column/table disappears and a plausibly similar one appears. This is the most common question. If yes, no data migration needed (just structural). If no, you might need a backfill or a copy. +"What's the source?" — for any new NOT NULL column that doesn't match a removed column. "Where does this data come from?" Options: static default, expression over existing columns, external source (implies manual migration). +"How to convert?" — for type changes that aren't lossless. FLOAT → BIGINT: multiply by what? TIMESTAMP → TIMESTAMPTZ: assume what timezone? INTEGER → VARCHAR with a CHECK constraint: what's the mapping? +"How to deduplicate?" — whenever an extraction is detected (denormalized data → new table). "Multiple orders have different values for customer_name with the same email. Which wins?" +"What about violations?" — when a new constraint is added. "47 rows have NULL in a now-NOT-NULL column. What value should they get?" Or "12 rows violate the new UNIQUE constraint. How to resolve?" +"How to partition?" — if a table split is detected. "What determines which rows go to which table?" +The realistic workflow +I think the honest design is: + +User defines before and after contracts (your content-addressable states) +System computes structural diff +System runs heuristics to generate a draft migration plan with high-confidence interpretations pre-filled and low-confidence ones as questions +User answers questions and confirms/overrides the draft +System emits a concrete sequence of your six operators +That operator sequence is stored on the edge in the graph + +The recipe algebra isn't what the user interacts with directly — it's the compilation target. The user interacts with questions and answers. The system compiles those answers into operators. The operators are what get stored, compared, and reasoned about algebraically. +This also means you can re-derive the questions from the diff at any time, and you can validate that a stored operator sequence is consistent with the diff it claims to implement. +The hard part isn't really the algebra — it's making the heuristics good enough that users typically only need to answer 2-3 questions rather than 15. Does that feel right to you?I think the hard part is actually the algebra tbh. I'm not convinced it covers all scenariosMar 28Fair. Let me try to break it. Let me think about what scenarios might not decompose cleanly into the six operators. +Trying to break the algebra +Multi-row aggregation backfill +You add orders.item_count and want it populated from COUNT(order_items WHERE order_items.order_id = orders.id). This isn't backfill because backfill is defined as an expression over the same row. It's not copy because copy moves a value from one place to another — there's no single source value, there's an aggregation. +We need something like aggregate(source_table, target_table, target_column, join, agg_function). Or we stretch copy to include aggregation, but then copy is doing two things. +Conditional transformation +map(orders, status, lookup({1: "pending", ...})) works when the mapping is total. But what if you need: "if created_at is before 2020, the status values meant something different"? The mapping depends on another column. map as defined takes one column's value as input. A conditional map is really backfill with a CASE expression, but then backfill and map blur together. +Actually — is map just a special case of backfill where the expression only references the column itself? If so maybe we should collapse them. +Row-level splitting +One row becomes multiple rows. A tags VARCHAR column containing "red,blue,green" gets normalized into a tags junction table with three rows. None of our operators create new rows from existing data. deduplicate reduces rows, filter removes rows, copy copies values but the row count in the target is determined by the join. Actually maybe copy with the right join does handle this — if order_tags is pre-populated with one row per tag value, then copy fills in the FK. But who creates those rows? The unpacking of "red,blue,green" into three rows is a new operation. +Call it expand(source_table, source_column, target_table, split_strategy) maybe. Or unnest. +Cross-row data dependency +"Set each employee's manager_name based on looking up employees.name where employees.id = employees.manager_id." This is a self-join copy. copy handles this if we allow source and target to be the same table, but it's worth noting as a case. +Ordered/positional operations +"Split the address text field into street (everything before the first newline) and city_state_zip (everything after)." This is positional parsing. It's a backfill with a complex expression, which is fine, but it means the expression language inside backfill needs to be pretty powerful — regex, string splitting, array indexing. +External data enrichment +"Look up country codes from an external geocoding API to populate addresses.country." No operator handles this because the data source is outside the database. This might be out of scope — you could say "custom migration" — but it does happen. +Revised operator set +I think the honest minimal set is: +derive(table, column, expression) — collapses backfill and map into one. The expression can reference any column in the same row, including the target column itself. map is just derive where the expression only reads the column being written. This is cleaner. +derive_across(target_table, target_column, source_table, join, expression) — the cross-table version. Expression can include aggregates. This collapses copy and handles the aggregation case. The join determines row matching, the expression determines the value. +deduplicate(table, match_columns, strategy) — unchanged. +expand(source_table, source_column, target_table, strategy) — one row becomes many. Strategy is something like split(delimiter), unnest_json_array, regex_matches. Produces rows in the target with an FK back to the source. +filter(table, predicate, action) — unchanged. +generate(table, column, strategy) — unchanged. +So: derive, derive_across, deduplicate, expand, filter, generate. +Still six, but different six. The big change is derive absorbing backfill and map, derive_across absorbing copy and adding aggregation, and expand being new. +What's still missing +The expression language inside derive and derive_across is doing a ton of work. derive(orders, total, self * 100) and derive(orders, status, CASE WHEN created_at < '2020' THEN ... END) are wildly different in complexity. If we leave the expression language as "arbitrary SQL" we get completeness but lose the ability to reason about commutativity at the expression level. +There might also be cases I haven't thought of. What does your intuition say — does anything come to mind that doesn't fit these six? diff --git a/projects/graph-based-migrations/specs/data-migration-scenarios.md b/projects/graph-based-migrations/specs/data-migration-scenarios.md new file mode 100644 index 0000000000..6a119e0e5e --- /dev/null +++ b/projects/graph-based-migrations/specs/data-migration-scenarios.md @@ -0,0 +1,257 @@ +# Data Migration Scenarios + +A near-exhaustive enumeration of schema evolutions that require data migrations. For each scenario: what the user is doing, why a purely structural migration is insufficient, what information gap the user must fill, and how it maps to our solution. + +**Authoring model**: Data migrations are authored using the ORM/query builder, serialized to JSON ASTs at verification time, and rendered to SQL by the target adapter at apply time. Scenarios that can't be expressed via the query builder are noted. + +--- + +## S1. Computed backfill (new NOT NULL column) + +**Schema change**: Add a new NOT NULL column to an existing table. + +**Example**: Add `display_name VARCHAR NOT NULL` to `users`. + +**Why data migration is needed**: The column must have a value for every existing row before the NOT NULL constraint can be applied. A static default may not be appropriate — the value might need to be derived from existing data (e.g., `display_name = first_name || ' ' || last_name`). + +**Information gap**: Where does the value come from? The system knows a value is needed but can't know the derivation logic. + +**Solution fit**: Planner auto-detects (NOT NULL without default). User authors an UPDATE via the query builder. Phasing ensures the nullable column exists (phase 1) before the data migration populates it (phase 2), then NOT NULL is applied (phase 3). Check query: `client.users.findFirst({ where: { displayName: null } })`. + +--- + +## S2. Lossy type change + +**Schema change**: Change a column's type where the conversion is ambiguous or lossy. + +**Example**: `price FLOAT` → `price_cents BIGINT`, or `status INTEGER` → `status VARCHAR`. + +**Why data migration is needed**: The database can't infer the conversion. `FLOAT 29.99` → `BIGINT` could mean `30` (round), `29` (truncate), or `2999` (multiply by 100). An integer enum `{1,2,3}` → varchar requires a lookup table the system doesn't have. + +**Information gap**: What is the mapping from old values to new values? + +**Solution fit**: Planner auto-detects (non-widening type change). For same-column changes, uses temp column strategy — creates a temp column of the target type, user writes the conversion, planner drops original and renames temp. For column renames with type change, standard phasing handles it (add new column, data migration, drop old). Query builder expressiveness for conversion expressions (multiply, cast, lookup) needs validation (OQ-5). + +--- + +## S3. Column split + +**Schema change**: Remove one column, add two or more columns. + +**Example**: `name VARCHAR` → `first_name VARCHAR` + `last_name VARCHAR`. + +**Why data migration is needed**: The system sees a column removal and column additions — it doesn't know they're related. Even if it infers a split, the parsing logic is ambiguous (split on space? first space? last space? what about mononyms?). + +**Information gap**: How is the single value decomposed into multiple values? + +**Solution fit**: Planner detects the NOT NULL adds and scaffolds. User writes an UPDATE using the query builder with expression functions (e.g., `split_part`). Old `name` column available in phase 2 because drops are phase 3. Check query: `client.users.findFirst({ where: { firstName: null } })`. This is the April VP1 canonical example. + +--- + +## S4. Column merge + +**Schema change**: Remove two or more columns, add one column. + +**Example**: `first_name` + `last_name` → `full_name`. + +**Why data migration is needed**: The system can't know the combination logic — concatenation order, separator, null handling. + +**Information gap**: How are multiple values combined into one? + +**Solution fit**: Same mechanics as S3 — planner detects NOT NULL add, user writes UPDATE via query builder, old columns available in phase 2. + +--- + +## S5. Table split (vertical) + +**Schema change**: One table becomes two, with a FK connecting them. + +**Example**: `users(id, name, email, bio, avatar_url)` → `users(id, name, email)` + `user_profiles(id, user_id FK, bio, avatar_url)`. + +**Why data migration is needed**: Existing rows in `users` need corresponding rows created in `user_profiles`, with data copied to the right columns and FKs set up correctly. + +**Information gap**: Which columns move to which table? (Often inferrable from the schema diff, but the actual data copying still requires execution.) + +**Solution fit**: Planner detects NOT NULL FK on new table. User writes INSERT...SELECT via the query builder to copy data from the old table to the new one. Old columns available in phase 2. FK constraint applied in phase 3. Requires the query builder to support INSERT...SELECT (OQ-5). + +--- + +## S6. Table split (horizontal) + +**Schema change**: One table becomes two with the same schema, rows partitioned between them. + +**Example**: `orders` → `active_orders` + `archived_orders`. + +**Why data migration is needed**: The system has no way to know the partitioning predicate — which rows go where. + +**Information gap**: What predicate determines which rows go to which table? + +**Solution fit**: **Detection gap** — may not trigger auto-detection if new tables don't have NOT NULL columns without defaults (OQ-4). User must manually create the data migration. The execution model works: user writes INSERT...SELECT with WHERE predicates via the query builder, old table available in phase 2. Requires INSERT...SELECT support (OQ-5). + +--- + +## S7. Table merge + +**Schema change**: Two tables become one. + +**Example**: `user_addresses` + `user_contacts` → `user_info`. + +**Why data migration is needed**: Key conflicts and deduplication. If both tables have a row for the same user, which values win? How are columns mapped? + +**Information gap**: Deduplication strategy and column mapping. + +**Solution fit**: Planner detects NOT NULL columns on new table. User writes INSERT...SELECT and UPDATE with joins via the query builder. Old tables available in phase 2. Complex deduplication logic may push query builder expressiveness (OQ-5). + +--- + +## S8. Semantic reinterpretation (no structural change) + +**Schema change**: None — the schema is identical, but the meaning of values changes. + +**Example**: `price` column was storing dollars, now represents cents. Or status integer codes are remapped. + +**Why data migration is needed**: The contract hash doesn't change (schema is identical), but existing data needs transformation. Without a data migration, existing rows silently have wrong semantics. + +**Information gap**: What is the old-to-new value mapping? (Note: this is also a case where the planner cannot auto-detect the need — the user must initiate it.) + +**Solution fit**: **Out of scope for v1.** No structural change means no graph edge to attach the data migration to. Related to "pure data migrations (A→A)" which are deferred. The user would need to pair this with a structural change (e.g., rename the column) or handle it outside the migration system. + +--- + +## S9. Denormalization / materialized column + +**Schema change**: Add a column whose value is derived from data in other tables. + +**Example**: Add `orders.item_count` backfilled from `COUNT(order_items WHERE order_id = orders.id)`. + +**Why data migration is needed**: The value is a cross-table aggregation — can't be expressed as a column default. Requires a query against related tables to populate. + +**Information gap**: What is the aggregation/derivation expression, and which tables/joins are involved? + +**Solution fit**: Planner detects NOT NULL add. User writes an UPDATE with subquery via the query builder. Requires subquery/aggregation support in UPDATE context (OQ-5). + +--- + +## S10. Normalization / extraction + +**Schema change**: Denormalized data in an existing table is extracted into a new table with a FK back. + +**Example**: `orders(customer_name, customer_email)` → new `customers(id, name, email)` table + `orders.customer_id FK`. + +**Why data migration is needed**: Rows in the new table must be created from distinct values in the existing table (deduplication), then FKs in the original table must be populated to point at the new rows. + +**Information gap**: What constitutes a "unique" entity for deduplication (match on email? name? both?)? When duplicates have conflicting values, which wins? + +**Solution fit**: Planner detects NOT NULL columns on new table and NOT NULL FK. User writes INSERT...SELECT DISTINCT and UPDATE with joins via the query builder. Complex scenario — deduplication, UUID generation, FK wiring. May push query builder expressiveness limits (OQ-5). FK constraint applied in phase 3 catches incomplete wiring. + +--- + +## S11. Key / identity change + +**Schema change**: Change the primary key column type or strategy. + +**Example**: `users.id SERIAL` → `users.id UUID`, with all FK references updated. + +**Why data migration is needed**: New identifiers must be generated for every existing row. Every table with a FK referencing the old key must be updated to use the new identifier. This cascades across the entire FK graph. + +**Information gap**: How are new identifiers generated (UUIDv4? UUIDv7? deterministic hash of old ID?)? The cascading FK updates are mechanical but must be orchestrated correctly. + +**Solution fit**: Planner detects type changes per-column, but **cross-table coordination is a gap** (OQ-3). The planner would need FK graph awareness to emit coordinated temp columns across all referencing tables. For v1, the user authors this manually via `migration new`. The execution model works — the query builder can express the UPDATE with joins — but scaffolding is manual. + +--- + +## S12. Encoding / format change + +**Schema change**: Column type may or may not change, but the encoding or format of stored values changes. + +**Example**: JSON blob restructured (keys renamed, nesting changed), date format changed (`YYYY-MM-DD` → ISO 8601 with timezone), timestamp becomes timezone-aware (`TIMESTAMP` → `TIMESTAMPTZ` — what timezone to assume for existing values?). + +**Why data migration is needed**: Existing values need transformation to match the new format. The system can't know the mapping without user input. + +**Information gap**: What is the transformation from old format to new? For timezone-naive → timezone-aware: what timezone should be assumed for existing data? + +**Solution fit**: Type changes (TIMESTAMP → TIMESTAMPTZ) are auto-detected. User writes conversion via query builder. Same-type format changes (JSON restructuring) are S8 territory — no structural diff, deferred. Query builder needs to support timezone and JSON functions (OQ-5). + +--- + +## S13. Data-dependent constraint enforcement + +**Schema change**: Add a constraint (UNIQUE, NOT NULL, CHECK, FK) to a column that may have existing violations. + +**Example**: Add `UNIQUE` to `users.email` when duplicates exist. Add `NOT NULL` to `users.phone` when nulls exist. Add `CHECK (age >= 0)` when negative values exist. + +**Why data migration is needed**: The constraint will be rejected by the database if existing data violates it. The user must decide how to resolve violations before the constraint can be applied. + +**Information gap**: How to handle violations — delete duplicates (which one to keep?), replace nulls (with what value?), fix invalid values (how?)? + +**Solution fit**: **Op partitioning gap** (OQ-1). Constraint additions (UNIQUE, CHECK) are classified as `additive` and would run in phase 1, before the data migration. They need to run in phase 3, after violations are fixed. Workaround: user can use `migration new` with manual op ordering. Proper fix requires the operation dependency model. + +--- + +## S14. Data seeding + +**Schema change**: Add a new table that must contain reference data before other tables can FK to it. + +**Example**: Add a `countries` lookup table. Existing `addresses.country_code` column becomes a FK to `countries.code`. The `countries` table must be populated with reference data first. + +**Why data migration is needed**: The new table is empty after creation. FK constraints on existing tables will fail unless the reference data exists. This data doesn't come from existing tables — it's external. + +**Information gap**: What is the reference data? (Often a static dataset, but the system can't generate it.) + +**Solution fit**: Planner detects NOT NULL FK. User writes INSERT statements via query builder to seed the reference data, then UPDATE to wire FKs. Check convention: `return false` (always run — seeding is idempotent via upsert or the check doesn't map cleanly to "find violations"). Table created in phase 1, FK applied in phase 3. + +--- + +## S15. Soft delete ↔ hard delete + +**Schema change**: Add or remove a `deleted_at TIMESTAMP` column, changing deletion semantics. + +**Example (soft → hard)**: Remove `deleted_at` column. Rows where `deleted_at IS NOT NULL` must be actually deleted first, or the "deleted" data persists in the table without any marker. + +**Example (hard → soft)**: Add `deleted_at` column. Existing rows should have `deleted_at = NULL` (trivial — nullable column add). But if there's historical data about deletions elsewhere (audit log), the user may want to backfill. + +**Why data migration is needed**: Removing `deleted_at` without deleting soft-deleted rows leaves ghost data. The system can detect the column removal but can't know that rows need filtering. + +**Information gap**: Should soft-deleted rows be hard-deleted, archived to another table, or left as-is? + +**Solution fit**: **No auto-detection** — the planner can't infer semantic meaning of `deleted_at` from a column drop. User creates the data migration manually. The execution model works: user writes DELETE via query builder, old column available in phase 2 (drop is phase 3). + +--- + +## S16. Encryption / hashing — OUT OF SCOPE + +**Schema change**: Column type may change (e.g., `VARCHAR` → `BYTEA`), or stay the same but values must be transformed. + +**Example**: `users.password VARCHAR` (plaintext) → bcrypt hashed. `users.ssn VARCHAR` → encrypted with application-level key. + +**Why data migration is needed**: Values must be irreversibly transformed. The transformation requires external dependencies (bcrypt library, encryption keys) that are outside the database. + +**Out of scope**: This requires application-level code (Node.js crypto libraries, key management) that cannot be expressed as database queries. The query builder cannot produce bcrypt hashes or encrypt with application keys. Must be handled outside the migration system — e.g., a separate script run before or after the migration. + +--- + +## S17. Audit trail backfill — PARTIALLY SUPPORTED + +**Schema change**: Add `created_by`/`modified_by`/`created_at` columns that should be populated from historical data. + +**Example**: Add `users.created_by UUID REFERENCES admins(id) NOT NULL` to an existing table. + +**Why data migration is needed**: Historical values may exist in audit logs, another table, or an external system. A static default ("system") may be acceptable but loses provenance. + +**Information gap**: Where does the historical data come from? Is a static default acceptable, or must real values be sourced? + +**Solution fit**: Planner detects NOT NULL add. If the source is another database table (e.g., audit log table), the user can write an UPDATE with join via the query builder. If the source is an external system, this is out of scope (same as S16 — requires application-level code). A static default is trivially expressible. + +--- + +## S18. Multi-tenant isolation + +**Schema change**: Add a `tenant_id` column (NOT NULL, FK) to existing tables in a system being converted to multi-tenant. + +**Example**: Add `tenant_id UUID NOT NULL REFERENCES tenants(id)` to `users`, `orders`, and all other tables. + +**Why data migration is needed**: Every existing row needs a tenant assignment. If there's currently one implicit tenant, a default works. If data must be partitioned across tenants based on some logic, the user must specify the mapping. + +**Information gap**: How are existing rows assigned to tenants? Single default tenant, or a mapping based on existing data (e.g., domain in email, organization FK)? + +**Solution fit**: Planner detects NOT NULL adds on each table. Single data migration handles all tables — user writes multiple UPDATE statements via query builder. Simple case (single default tenant) is a literal value assignment. Complex case (mapping based on existing data) requires UPDATE with expressions or joins. Check query: `client.users.findFirst({ where: { tenantId: null } })`. diff --git a/projects/graph-based-migrations/specs/data-migrations-solutions.md b/projects/graph-based-migrations/specs/data-migrations-solutions.md new file mode 100644 index 0000000000..9a594a801d --- /dev/null +++ b/projects/graph-based-migrations/specs/data-migrations-solutions.md @@ -0,0 +1,196 @@ +# Data migrations (solutions): compatibility, routing, and UX + +## Table of contents + +- 1. Why this document exists +- 2. Compatibility: when is it safe to run a data migration? + - 2.1 Contract-based compatibility via schema-verify + - 2.2 Explicit schema requirements (preferred) + - 2.3 Typed query interfaces (derived from compatible schema) +- 3. Who owns required invariants (source of truth) + - 3.1 Recommended: store invariants alongside the environment ref head + - 3.2 Promotion workflow (how refs get updated) +- 4. Two integration models + - 4.1 Model A: co-located with schema migrations (packages contribute invariants) + - 4.2 Model B: independent data migrations (applied when schema allows) +- 5. How routing changes when invariants exist + - 5.1 Priority and equivalence when multiple routes exist + - 5.2 Fail-closed cases and diagnostics +- 6. Pure data migrations (no schema change) +- 7. UX: what plan/apply/status should explain +- 8. Optional follow-up: a ledger (optimization only) +- 9. Conclusion and open decisions + +--- + +## 1. Why this document exists + +[data-migrations.md](./data-migrations.md) describes the *goal*: model data transformations as guarded transitions and define desired end state as “contract hash + required invariants”. + +This document focuses on *how we could implement that goal* without muddying the theory: + +- how to decide whether a data migration can run (compatibility), +- how to integrate data migrations with structural routing, +- what the CLI must explain, +- and what we still need to decide as a team. + +## 2. Compatibility: when is it safe to run a data migration? + +Data migrations must be able to query/update safely. That means they need the schema they expect. + +### 2.1 Contract-based compatibility via schema-verify + +If a data migration has access to a full contract (not just a hash), we can use the existing schema-verify operation to ask: + +> “Is the current database schema compatible with this contract?” + +This turns “schema is a superset of what I need” into a concrete, checkable property. + +### 2.2 Explicit schema requirements (preferred) + +Even if a contract is available, it is usually better practice for a data migration to declare explicit requirements: + +- required tables/columns/types/constraints +- required capabilities +- required indexes (for performance) + +This avoids relying on “match contract C” as an all-or-nothing proxy, and it keeps migrations auditable. + +### 2.3 Typed query interfaces (derived from compatible schema) + +Once we have a compatibility check, the runner can provide a typed query interface derived from the contract/schema used for compatibility. +This keeps authoring safe (you can’t write queries against columns that don’t exist). + +## 3. Who owns required invariants (source of truth) + +The theory assumes we can say: “to be done, invariants {I1, I2, …} must hold”. + +We need an explicit owner for that set. + +### 3.1 Recommended: store invariants alongside the environment ref head + +A clean model is to treat the environment’s ref head as the declaration of desired state: + +- the ref already answers “what contract state should production/staging be at?” +- and it can also answer “what invariants must hold in that environment?” + +This keeps invariants out of contract hashing and makes promotions explicit and reviewable. + +It also keeps ownership consistent: the same thing that says “prod should be at contract hash H” also says “prod requires invariants {I}”. + +### 3.2 Promotion workflow (how refs get updated) + +A concrete workflow that fits this model: + +1. On a topic branch, a developer changes the contract and plans migrations starting from the current environment ref (e.g. production head). +2. They update the environment ref to say: “move from X → Y” and (optionally) “require invariant Z”. +3. They run apply locally against a representative database to validate behavior. +4. They commit the planned migrations, updated contract artifacts, and the updated ref. +5. CI gates the ref update with the same tooling/safety checks we provide. +6. Promotion becomes an explicit second step: + - staging ref can move first, then production ref later, or + - an automated pipeline step can fast-forward production ref to whatever staging ref currently declares. + +Alternative ownership models (still possible, but less aligned with the ref workflow): + +- **Alongside the contract** (e.g. a `data-invariants.json` next to contract output) +- **Inside the contract** (more cohesive, but complicates hashing and contract evolution) +- **Environment policy** (prod vs staging vs dev can require different invariants) + +We don’t need to decide this to explore models A/B, but we do need it for CLI semantics (“up to date”). + +## 4. Two integration models + +### 4.1 Model A: co-located with schema migrations (packages contribute invariants) + +In this model, a single migration package can contain: + +- structural operations that move contract A → B +- data operations that establish invariant(s) I + +The effective destination becomes: “contract B with invariants I satisfied”. + +When multiple A→B packages exist, route choice can’t treat them as interchangeable; it must choose a package/path that establishes the required invariants. + +### 4.2 Model B: independent data migrations (applied when schema allows) + +In this model, data migrations are independent of structural transitions. + +The runner: + +1. routes schema markerHash → targetHash +2. applies data migrations as soon as their schema requirements are met +3. continues until: + - target hash is reached, and + - required invariants are satisfied + +This avoids baking “priority” into the structural routing layer; invariants are enforced by the invariant layer. + +## 5. How routing changes when invariants exist + +### 5.1 Priority and equivalence when multiple routes exist + +Once invariants matter, “shortest path” cannot mean only “fewest structural steps”. + +A reasonable policy is: + +- **first**: choose a route that can satisfy required invariants +- **then**: minimize steps / risk / time +- **then**: deterministic tie-break + +This is also where teams feel the pull of golden history (“just pick the latest”), so the CLI needs to make the policy explicit. + +### 5.2 Fail-closed cases and diagnostics + +Apply should fail closed (with clear diagnostics) when: + +- a required invariant has no provider migration +- a provider exists but its schema requirements are not satisfiable on any reachable schema route +- there are multiple possible ways to satisfy invariants but they are not provably equivalent and no selection was provided + +## 6. Pure data migrations (no schema change) + +Pure data migrations are naturally expressed as “invariant enforcers”: + +- they don’t move the contract hash +- they become relevant when the desired state (or policy) requires their invariant + +In a hash-only router, A→A would never be selected; the invariant model makes them first-class. + +## 7. UX: what plan/apply/status should explain + +The CLI needs to communicate both dimensions of state: + +- **schema**: current marker hash, desired hash, chosen route of structural migrations +- **data**: required invariants, which currently hold, and which migrations will establish the rest + +The core reframing for teams used to golden history is: + +> We show “what must be true”, not “what number you are in a global sequence”. + +## 8. Optional follow-up: a ledger (optimization only) + +Repeatedly checking invariants can be expensive. + +We can add a minimal ledger to: + +- avoid repeated checks +- improve auditability and debugging + +But the semantic model should not depend on it: + +- correctness comes from pre/postcondition checks +- the ledger is an optimization and a diagnostic surface + +## 9. Conclusion and open decisions + +We can preserve routing-first semantics with data migrations if we treat “done” as: + +- target contract hash reached, and +- required invariants satisfied + +Open decisions: + +- What is the concrete format/location of environment refs, and who updates them (human vs CI/CD automation)? +- Do we want Model A, Model B, or both (with a clear preference)? +- What is the default routing policy when multiple invariant-satisfying routes exist? diff --git a/projects/graph-based-migrations/specs/data-migrations-spec.md b/projects/graph-based-migrations/specs/data-migrations-spec.md new file mode 100644 index 0000000000..ed0fd75cdf --- /dev/null +++ b/projects/graph-based-migrations/specs/data-migrations-spec.md @@ -0,0 +1,572 @@ +# Summary + +Data migrations are data transformation operations that execute as part of a migration edge in the graph. All migrations — structural and data — are authored in TypeScript as operation chains, lowered to SQL at verification time via the target adapter, and stored as `{ sql, params }` in `ops.json`. At apply time, the runner executes the SQL directly. Data transforms are first-class operations in the chain, positioned by the planner at the correct point between structural ops. The system tracks data migrations as named invariants on graph edges, enabling invariant-aware routing. + +# Description + +Prisma Next's graph-based migration system models schema evolution as a directed graph of contract-hash states connected by structural migration edges. This works well when migrations are purely structural (path-independent), but breaks down when data transformations are involved — two databases at the same contract hash can have meaningfully different data depending on which path was taken. + +Data migrations solve this by allowing data transform operations to be part of the operation chain on graph edges. The system doesn't reason about what the transforms do; it tracks that named data migrations were applied, and routes through paths that satisfy required invariants. This preserves the graph model's flexibility for structural routing while adding data-awareness without collapsing to linear history. + +The primary user is a backend developer who knows SQL but doesn't think about migration theory. They want to describe what should happen and have the system handle safety. The system should detect when data migrations are needed, scaffold the appropriate operations, and let the user fill in the data transformation logic. + +# Requirements + +These are the problems the system must solve. The Solution section describes how each is addressed. + +## R0. No arbitrary code execution at apply time + +Migrations must not involve executing arbitrary TypeScript at apply time. The authoring surface is TypeScript, but the output is lowered SQL stored in `ops.json` that can be inspected, audited, and shipped to a SaaS runner without trusting user code. This is critical because: (1) migrations will eventually be serialized and shipped to a hosted service, where executing arbitrary code is a non-starter, (2) even locally, importing a TypeScript module executes top-level code, which is a security risk in team settings, (3) lowered SQL enables plan-time visibility — reviewers see exactly what will execute. + +## R1. Users can express data transformations during schema migration + +Schema evolution often requires data transformations that the database cannot perform automatically: backfilling computed values, converting between types with ambiguous mappings, splitting/merging columns or tables, resolving constraint violations, seeding reference data. The system must provide a way for users to express data transformation queries as part of a migration. See [data-migration-scenarios.md](./data-migration-scenarios.md) for the full scenario enumeration. + +## R2. Data migrations cover a wide range of schema evolution scenarios + +The system must handle the common patterns — computed backfill, lossy type changes, column split/merge, table split/merge, normalization/extraction, key identity changes, constraint enforcement, data seeding. The query builder is the sole authoring surface for v1; if it can't express a scenario, that's either a gap to fill in the query builder or an out-of-scope limitation. Scenarios requiring application-level libraries (e.g., bcrypt hashing) or external data sources are out of scope and must be handled outside the migration system. + +### Scenario coverage summary + +See [data-migration-scenarios.md](./data-migration-scenarios.md) for full details per scenario. + +| Scenario | Auto-detect | Execution model | Known gaps | +|----------|------------|-----------------|------------| +| S1. Computed backfill | Yes (NOT NULL) | Full | — | +| S2. Lossy type change | Yes (type change) | Full (temp column for same-name) | QB expression support in SET (OQ-5) | +| S3. Column split | Yes (NOT NULL) | Full | QB expression support (OQ-5) | +| S4. Column merge | Yes (NOT NULL) | Full | QB expression support (OQ-5) | +| S5. Table split (vertical) | Yes (NOT NULL FK) | Full | QB INSERT...SELECT (OQ-5) | +| S6. Table split (horizontal) | No (detection gap, OQ-4) | Full (manual authoring) | QB INSERT...SELECT (OQ-5) | +| S7. Table merge | Yes (NOT NULL) | Full | QB INSERT...SELECT, joins (OQ-5) | +| S8. Semantic reinterpretation | No (no structural change) | **Out of scope** — deferred (pure data migration A→A) | — | +| S9. Denormalization | Yes (NOT NULL) | Full | QB subqueries in UPDATE (OQ-5) | +| S10. Normalization/extraction | Yes (NOT NULL + FK) | Full | QB INSERT...SELECT, joins (OQ-5) | +| S11. Key/identity change | Per-column only | Full (manual authoring) | Cross-table coordination (OQ-3) | +| S12. Encoding/format change | Type changes only | Full for type changes | Same-type format changes are S8 territory | +| S13. Constraint enforcement | Yes | Full | — | +| S14. Data seeding | Yes (NOT NULL FK) | Full | Check uses `return false` (always run) | +| S15. Soft↔hard delete | No (semantic) | Full (manual authoring) | — | +| S16. Encryption/hashing | — | **Out of scope** — requires app-level libraries | — | +| S17. Audit trail backfill | Yes (NOT NULL) if source is in DB | Partial — DB sources work, external sources out of scope | — | +| S18. Multi-tenant isolation | Yes (NOT NULL) | Full | — | + +## R3. Data migrations are safe to retry after partial failure + +If a migration fails midway (crash, timeout, constraint violation), re-running it must not corrupt data or produce duplicate effects. The system needs a mechanism to determine whether a data migration has already been applied, and skip it if so. + +## R4. Users don't accidentally skip required data transformations + +When the planner detects a structural change that implies data migration is needed (e.g., adding a NOT NULL column without a default), it must ensure the user addresses it before the migration can be applied. An unimplemented data migration keeps the package in draft state — `migration verify` cannot attest it, and `migration apply` rejects unattested packages. + +## R5. Data migration queries have access to both old and new schema state + +During a data migration, the user's queries need to read from old columns/tables (to get existing data) and write to new columns/tables (to populate transformed data). The old schema must not yet be torn down, and the new schema must already be partially set up. + +## R6. Data migrations work on tables of all sizes + +Small tables can be migrated within a single transaction for atomicity. Large tables may require batched updates outside of a transaction, or DDL that can't run in a transaction (e.g., `CREATE INDEX CONCURRENTLY`). The execution model must accommodate both extremes. + +## R7. Data migrations participate in the graph model + +The migration graph must be aware of data migrations. When multiple paths exist to the same contract hash, the system must be able to distinguish paths based on what data transformations they include, and select appropriately. + +## R8. Environments can declare which data migrations are required + +Different environments (production, staging, dev) may need different data migration guarantees. The system must allow environments to declare which named data migrations must have been applied, and route accordingly. + +## R9. Users can author migrations manually + +Users need to be able to write their own migration (structural DDL, data transformations, or both) without relying on the planner. This should use the same authoring surface as planner-generated migrations. + +## R10. Planning works offline (no database connection required) + +Per ADR 169, migration planning must not require a live database connection. Detection of data migration needs and scaffolding must work from contract diffs alone. + +## R11. Post-apply verification catches schema mismatches + +After a migration (including any data migration) completes, the system must verify that the database schema matches the destination contract. This is the hard safety net — if the migration didn't produce the expected schema state, apply fails. + +## R12. No special rollback mechanism + +Reverting a migration is just another migration in the opposite direction. The system should not introduce rollback-specific machinery for data migrations. A migration S2→S1 is an ordinary graph edge that can carry its own data migration. + +# Solution + +## Constraints + +These apply across the entire solution: + +- Only lowered SQL (`{ sql, params }`) is stored in `ops.json`. Lowering from query builder ASTs to SQL happens at verify time via the target adapter. No TypeScript is loaded or executed at apply time. +- User-authored data migration queries should be idempotent. The required `check` query provides the primary retry-safety mechanism, but truly idempotent `run` queries are the safest approach. + +## Unified TypeScript authoring model (R0, R1, R2, R9) + +All migrations — structural and data — are authored as TypeScript files that return a list of operations. The file is evaluated at verification time; data transform callbacks are called with a typed DSL client, the query builder produces `SqlQueryPlan` ASTs which are lowered to SQL via the postgres adapter, and the resulting `{ sql, params }` pairs are written to `ops.json`. At apply time, only the lowered SQL is loaded and executed — no TypeScript, no AST deserialization. + +The planner generates these TypeScript files. When the planner detects patterns (NOT NULL without default, type change, nullable tightening), it scaffolds `migration.ts` with the appropriate operation sequence and `TODO` placeholders for data transforms. The user fills in the callbacks using the typed DSL client. The user can also author migration files manually via `migration new`. + +`createBuilders()` returns all builder functions with `dataTransform` typed so callbacks receive `Db` — full autocomplete on table names, columns, and query methods. A data transform is just another operation in the chain: + +```typescript +// migrations/0003_split_name/migration.ts +import type { Contract } from "../../src/prisma/contract.d" +import { createBuilders } from "@prisma-next/target-postgres/migration-builders" + +const { addColumn, dropColumn, setNotNull, dataTransform } = createBuilders() + +export default () => [ + addColumn("user", "firstName", { nullable: true }), + addColumn("user", "lastName", { nullable: true }), + dataTransform("split-user-name", { + check: (db) => db.user.select('id').where((f, fns) => fns.eq(f.firstName, null)).limit(1), + run: (db) => [ + db.user.update({ firstName: 'unnamed' }).where((f, fns) => fns.eq(f.firstName, null)), + db.user.update({ lastName: 'unnamed' }).where((f, fns) => fns.eq(f.lastName, null)), + ], + }), + setNotNull("user", "firstName"), + setNotNull("user", "lastName"), + dropColumn("user", "name"), +] +``` + +The `check` and `run` callbacks receive a typed `Db` client and return `Buildable` (query chain) or `Buildable[]`. At verification time, the resolver creates the DSL client from the contract and framework components, calls the callbacks, calls `.build()` on the results to capture `SqlQueryPlan` ASTs, and lowers them to SQL via the postgres adapter. The lowered `{ sql, params }` pairs are stored in `ops.json`. At apply time, the runner executes the SQL directly — no AST deserialization or lowering needed. + +The DSL client supports extension query functions (e.g., pgvector's `cosineDistance`) via `queryOperations` exposed on control descriptors (see open question #11). + +### Strategies (R2) + +Common patterns are encapsulated as strategies — functions that expand to correctly-ordered sequences of primitive operations: + +```typescript +import { columnSplit } from '@prisma-next/migration' + +export default () => + columnSplit("users", "name", { + columns: ["first_name", "last_name"], + transform: (client) => client.users.update({ + firstName: expr("split_part(name, ' ', 1)"), + lastName: expr("split_part(name, ' ', 2)"), + }).where({ firstName: null }), + check: (client) => client.users.findFirst({ where: { firstName: null } }), + }) +``` + +`columnSplit` internally produces: addColumn(first_name) → addColumn(last_name) → dataTransform → setNotNull(first_name) → setNotNull(last_name) → dropColumn(name). The ordering is correct by construction. + +The planner detects when a strategy applies and scaffolds the appropriate call. The user provides only the information gap — how to derive the new values from the old. Building a library of strategies is future DX work; for v1, the planner emits raw operation sequences with the data transform positioned correctly. + +### Serialization lifecycle + +The migration TS file integrates with the existing Draft → Attested → Applied lifecycle: + +1. **Scaffold (Draft)**: `migration plan` produces a `migration.ts` file. If a data migration is needed, the data transform's `check` and `run` are unimplemented. The package is in draft state (no `edgeId`). +2. **Author (Draft)**: User fills in the data transform logic using the query builder. Still draft — the TS hasn't been evaluated. +3. **Verify/Attest**: `migration verify` evaluates the TypeScript, captures all operation ASTs (structural and data), serializes them as JSON into `ops.json`. The `edgeId` is computed from the serialized content. The package is now attested. +4. **Apply**: `migration apply` reads the serialized ASTs from `ops.json`, the target adapter renders them to SQL, and executes them sequentially. No TypeScript is loaded. + +The `migration.ts` file remains in the package as source code for reference, but is not part of the `edgeId` computation. + +### Representation in ops.json + +All operations — structural and data — are entries in `ops.json`. A data transform entry has two states: + +**Draft** (before verification): +```json +{ + "id": "data_migration.split-user-name", + "operationClass": "data", + "source": "migration.ts", + "check": null, + "run": null +} +``` + +**Attested** (after verification): +```json +{ + "id": "data_migration.split-user-name", + "operationClass": "data", + "source": "migration.ts", + "check": { /* serialized query AST */ }, + "run": [{ /* serialized query AST */ }] +} +``` + +Structural operations are serialized from their operation builders (`addColumn`, `setNotNull`, etc.) at the same verification step. The runner processes all operations sequentially. + +### Manual authoring — `migration new` (R9) + +`migration new` scaffolds a `migration.ts` with an empty operation list. The user writes operations using the same builders and `dataTransform` calls. This is the escape hatch for when the user wants full control — structural ops, data transforms, or both. + +`migration new` derives `from` hash from the current migration graph state and `to` hash from the current emitted contract. Both can be overridden with `--from` and `--to` flags. + +## Retry safety — required `check` (R3) + +`check(client)` is **required** on every `dataTransform`. It returns one of: + +- **A query AST** (the common case): the query describes *violations* — rows that indicate the migration still needs to run. Empty result = already applied (skip `run`). Non-empty result = needs to run. This is efficient (`LIMIT 1` for early exit) and the violation rows are useful for diagnostics. +- **`false`**: always run. For seeding, idempotent-by-construction cases, or when a meaningful check isn't worth writing. +- **`true`**: always skip. Use with caution. + +The check executes in two roles: + +- **Before `run` (retry)**: determines whether to skip `run`. If the check returns no violations, the data migration is already complete. +- **After `run` (validation)**: confirms that `run` did its job. If violations remain, the migration fails *before* subsequent tightening operations — producing a meaningful diagnostic ("47 rows still have first_name IS NULL") instead of a cryptic database error from a later SET NOT NULL. + +The execution sequence for a data transform operation is: check → (skip or run) → check again → (fail or proceed). + +## Detection and scaffolding (R4, R10) + +The planner detects structural changes that imply a data migration is needed: + +- NOT NULL column added without a default +- Non-widening type change (e.g., FLOAT → INTEGER) +- Existing nullable column becoming NOT NULL + +Detection works offline (no database connection required). The planner scaffolds when the structural diff *could* need a data migration, even if affected tables might be empty at runtime. + +When detection triggers, the planner produces a `migration.ts` with the structural operations and a `dataTransform` with unimplemented `check` and `run`. The unimplemented callbacks prevent `migration verify` from attesting the package — it stays in draft state until the user fills them in. + +For non-widening type changes on the same column (e.g., `price FLOAT` → `price BIGINT`), the planner uses a **temp column strategy**: it emits addColumn(temp) → dataTransform → dropColumn(original) → renameColumn(temp → original) in the correct order. + +## Planner-managed operation ordering (R5) + +The planner emits operations in the correct order directly. There is no generic class-based partitioning framework — the planner knows the full contract and positions each operation (structural and data) where it belongs: + +- Additive ops (create tables, add nullable columns) come first +- Data transforms come after the schema state they need is set up +- Tightening ops (SET NOT NULL, UNIQUE, CHECK constraints) come after the data transforms that populate/fix the data +- Destructive ops (drop columns, drop tables) come last + +This ordering is the planner's responsibility because it sees the full contract diff and understands cross-table dependencies (FKs, referenced constraints). Strategies encapsulate common ordering patterns, but the planner makes the decisions. + +### Transaction modes (R6) + +Individual operations or groups of operations can carry transaction annotations. The runner respects these when executing: + +| Mode | Behavior | Use case | +|------|----------|----------| +| `inline` (default) | All operations run in a single transaction. Full atomicity. | Small/fast migrations. | +| `isolated` | Specific operations run in their own transaction. | Data transforms on medium tables. | +| `unmanaged` | Specific operations run without transaction wrapping. | DDL that can't run in a transaction, large batch operations. | + +The transaction model is composable — the user annotates individual operations or groups rather than declaring a single mode for the entire migration. + +## Graph integration (R7, R8) + +The invariant carried by the system is "named data migration X was applied." This is recorded in the ledger when the migration edge completes successfully. The name comes from the `dataTransform`'s first argument. + +The router finds candidate paths via DFS, collecting data migration names along each path. Path selection: + +1. Filter to paths that satisfy required invariants (from environment refs) +2. Prefer paths with more invariants (do the most complete migration) +3. Tie-break by shortest path / deterministic ordering + +Environment refs declare desired state as target contract hash + required data migration names. A ref update is explicit and reviewable. + +## Operation builder API + +The operation builders are the primitives that both the planner and manual authoring use to construct migration operation chains. Each builder produces one or more operation entries that serialize to `ops.json`. The builders map 1:1 to the operations the Postgres planner currently produces. + +### Table operations + +```typescript +createTable(tableName, { columns, primaryKey?, uniques?, indexes?, foreignKeys? }) +// → operationClass: 'additive' + +dropTable(tableName) +// → operationClass: 'destructive' +``` + +### Column operations + +```typescript +addColumn(tableName, columnName, { type, nullable?, default? }) +// → operationClass: 'additive' +// When NOT NULL without default on non-empty table: uses temporary identity default + +dropColumn(tableName, columnName) +// → operationClass: 'destructive' + +alterColumnType(tableName, columnName, { newType }) +// → operationClass: 'destructive' +// Warning: may cause table rewrite + +setNotNull(tableName, columnName) +// → operationClass: 'destructive' +// Precheck: no NULL values exist + +dropNotNull(tableName, columnName) +// → operationClass: 'widening' + +setDefault(tableName, columnName, defaultValue) +// → operationClass: 'additive' (new default) or 'widening' (change default) + +dropDefault(tableName, columnName) +// → operationClass: 'destructive' +``` + +### Constraint operations + +```typescript +addPrimaryKey(tableName, { columns, constraintName? }) +// → operationClass: 'additive' + +addUnique(tableName, { columns, constraintName? }) +// → operationClass: 'additive' + +addForeignKey(tableName, { columns, references: { table, columns }, onDelete?, onUpdate?, constraintName? }) +// → operationClass: 'additive' + +dropConstraint(tableName, constraintName) +// → operationClass: 'destructive' +``` + +### Index operations + +```typescript +createIndex(tableName, { columns, indexName?, unique? }) +// → operationClass: 'additive' + +dropIndex(tableName, indexName) +// → operationClass: 'destructive' +``` + +### Data transform operations + +```typescript +dataTransform(name, { + check: (client) => QueryAST | boolean, + run: (client) => QueryAST | QueryAST[], +}) +// → operationClass: 'data' +// name becomes the invariant identity recorded in the ledger +``` + +### Type operations + +```typescript +createType(typeName, definition) +// → operationClass: 'additive' +// For enums, domains, composite types +``` + +### Annotations + +```typescript +transaction([...ops]) +// Wraps a group of operations in a single transaction + +noTransaction(op) +// Marks an operation to run outside any transaction +``` + +### Design notes + +- Each builder produces an operation descriptor — a thin reference to contract elements by name. The resolver converts descriptors to `SqlMigrationPlanOperation` objects (with SQL, prechecks, postchecks) using the contract context and existing planner SQL generation helpers (`planner-ddl-builders.ts`, `planner-sql-checks.ts`). +- **Builders are target-specific** (Postgres builders produce Postgres SQL). This is an intentional decision: the alternative is a target-agnostic builder layer that each adapter must implement separately. Since `ops.json` already contains target-specific SQL, and the planner already produces target-specific operations, the builders should too. For a new target (MySQL, MongoDB), the builders would be reimplemented with the same ergonomic API but different internal SQL generation — the API surface is the shared contract, not the implementation. Shared logic can be factored out internally. +- Builders live alongside the planner in the target package (e.g., `packages/3-targets/3-targets/postgres/`), not in the framework tooling package. The planner reuses the same helpers. +- The `dataTransform` builder accepts typed callbacks `(db: Db) => Buildable | Buildable[]`. At resolve time, the resolver creates a DSL client from the contract, calls the callbacks, calls `.build()` on the results, and lowers the resulting `SqlQueryPlan` ASTs to `{ sql, params }` via the postgres adapter. The lowered SQL is stored in `ops.json`. +- The planner uses these same builders to construct its output. When the planner emits TS, it writes calls to these builders in the correct order. +- **Descriptor resolution goes through `TargetMigrationsCapability`**. The operation descriptors are target-agnostic thin data, but resolving them to `SqlMigrationPlanOperation` (with SQL, prechecks, postchecks) is target-specific work. The CLI is target-agnostic and cannot import directly from a target package like `@prisma-next/target-postgres`. Instead, the target exposes a `resolveDescriptors(descriptors, context)` method on `TargetMigrationsCapability` — the same interface that already provides `createPlanner` and `createRunner`. The CLI loads the config, gets the target, and calls `target.migrations.resolveDescriptors()`. This follows the same pattern as `migration plan` (which calls `target.migrations.createPlanner()`) and `migration apply` (which calls `target.migrations.createRunner()`). +- **Descriptors reference contract elements by name, not by value.** Descriptors are intentionally thin — `addColumn("users", "email")` carries the column name as a string, not a full `StorageColumn` definition. The resolver looks up the actual `StorageColumn` from the destination contract (which has `codecId`, `typeParams`, `default`, etc.) and passes it to the existing SQL generation helpers. This avoids duplicating contract information in the migration: the contract is the single source of truth for column types, constraint details, and FK definitions. The descriptors are a join key into the contract, not a copy of it. This means descriptors can only reference things that exist in the destination contract — which is correct, since the destination contract describes the schema state after the migration. +- **Descriptors support overrides for intermediate states.** A migration often needs to express intermediate schema states that differ from the destination contract — for example, adding a column as nullable first, backfilling it, then tightening to NOT NULL. The destination contract says NOT NULL, but the `addColumn` step needs to be nullable. Descriptors accept an optional `overrides` parameter for this: `addColumn("users", "foo", { nullable: true })` adds the column as nullable regardless of what the contract says. The resolver applies overrides on top of the contract-looked-up definition. This keeps the contract as the source of truth while allowing the migration to express the multi-step pattern. Currently only `addColumn` supports `nullable` override — this is the primary case where intermediate state diverges from the destination contract. Other overrides can be added as needed. + +## Post-apply verification (R11) + +The existing post-apply schema verification (introspect database, compare against destination contract) serves as the hard safety net. No additional verification mechanism is needed — the runner already does this for structural migrations, and it naturally extends to cover migrations with data transforms. + +## Rollback (R12) + +No special rollback mechanism. Reverting state S1→S2 is a new migration S2→S1 — an ordinary graph edge that can carry its own data transforms if needed. + +## Applicability to document databases + +The unified migration model is designed to work for document databases (MongoDB, etc.), not just SQL targets. + +**Why this works**: The contract in prisma-next represents the *application's* data model, not the database's schema. A document database may be schemaless at the storage layer, but the application domain is never schemaless — there are always expected shapes, types, and relationships. The contract makes this explicit and manageable. When evolving from one contract to another, the operations are semantically equivalent regardless of target: backfill a new field, reshape a document, split a collection, deduplicate records. + +**What transfers without modification**: The serialization lifecycle (TS → JSON AST → adapter execution), the `check`/`run` contract, the graph model (edges, invariants, routing), the ledger, the Draft → Attested → Applied lifecycle, retry safety, and transaction modes. + +**The key difference**: For SQL targets, many operations are structural DDL. For document databases, schema evolution *is* data transformation. The data transform mechanism becomes the primary migration surface. The operation chain model handles both naturally — a MongoDB migration is just a chain of data transforms with no structural ops. + +**What requires target-specific work**: A MongoDB-flavored query builder client that produces MongoDB-shaped AST nodes. The JSON AST format has lower impedance mismatch for MongoDB than SQL, since MongoDB operations are natively JSON. + +# Key Decisions + +These document the major design choices, the alternatives considered, and why we chose this approach. + +## D1. TypeScript-authored, AST-serialized — unified for structural and data + +**Decision**: All migrations are authored as TypeScript operation chains, serialized to JSON ASTs at verification time, and rendered to target-specific queries at apply time. Data transforms are operations in the chain, not a separate mechanism. + +**Alternatives considered**: +- **Operator algebra**: SMO-style typed operators with commutativity analysis. Rejected: expression language does "enormous work," opaque escape hatch needed for anything it can't express. +- **Arbitrary code execution at apply time**: Rejected: security risk for SaaS, top-level code execution on import, no auditability. +- **Separate `data-migration.ts` file**: Data migration as a separate file alongside `ops.json`. Rejected in favor of unified operation chain: simpler model, no class-based partitioning needed, data transforms positioned naturally in the sequence. + +**Why unified chain**: One authoring surface for everything. The planner produces TS that includes both structural ops and data transforms in the correct order. Strategies encapsulate common patterns. Manual authoring (`migration new`) uses the same surface. No need for a separate partitioning framework. + +## D2. Name over semantic postconditions; honest about what invariants are + +**Decision**: The system tracks data migrations by **name** (identity, human-readable). The invariant is "named data migration X was applied." + +**What this actually is**: Functionally, this is the same as carrying around proof that specific migrations ran. For any path segment that has data migrations, the model degenerates to "you must take this specific path" — which is linear history for that segment. Data migrations are inherently path-dependent; we're not trying to make them path-independent. The graph's flexibility only helps for structural-only segments. + +**Why name**: The name is stable under code changes (fixing a bug in the migration doesn't change its identity), human-readable in CLI output and ref files, and serves as the primary key for invariant requirements. + +**Alternative considered — semantic postconditions**: Carry checkable predicates about data state ("all phone numbers match E.164"). Problem: we can't exhaustively cover all possible postcondition checks with a typed representation. The required `check` on `dataTransform` gives us user-authored postconditions for retry safety without pretending the system can reason about them. + +## D3. Required `check` postcondition + +**Decision**: Every `dataTransform` must include a `check` that returns a query AST (violations — empty = done), `false` (always run), or `true` (always skip). + +**Why required**: Solves three problems: (1) retry safety — check before run, skip if done, (2) post-run validation — check after run, fail before tightening ops if violations remain, (3) forces the user to think about "done." + +## D4. Single-edge, planner-managed ordering + +**Decision**: A migration is a single graph edge with an ordered operation chain. The planner positions operations (structural and data) in the correct order. + +**Alternatives considered**: +- Split into multiple edges (additive → data → destructive). Rejected: requires synthesizing intermediate contracts, creates graph noise. +- Generic class-based partitioning. Rejected: doesn't handle constraint ops correctly (classified as additive but semantically tightening), and the planner already knows the right order. + +**Why planner-managed**: The planner sees the full contract diff and understands cross-table dependencies. It positions each operation where it belongs. Strategies encapsulate common patterns but the planner makes the decisions. This eliminates the op partitioning edge case (OQ-1 from earlier versions). + +## D5. Co-located with edges, not independent + +**Decision**: Data transforms are operations within migration edges, not independent artifacts. + +**Why**: A data transform needs a specific schema to run against. It has a natural home in the edge that creates that schema. Co-location means the structural path determines which transforms run — no separate routing needed. + +## D6. Temp column strategy for same-column type changes + +**Decision**: When a column's type changes without a rename (e.g., `price FLOAT` → `price BIGINT`), the planner emits addColumn(temp) → dataTransform → dropColumn(original) → renameColumn(temp). + +**Why**: The only approach that gives the user a writable column of the correct target type while old data is still readable. + +**Future refinement — `USING` clause**: For simple conversions expressible as a single SQL expression, `ALTER COLUMN TYPE ... USING` is simpler. The planner could offer common patterns and fall back to temp column when the user needs complex logic. + +## D7. Planner detects, scaffolds with context, prevents accidental no-ops + +**Decision**: The planner auto-detects data migration needs and scaffolds a `migration.ts` with the correct operation sequence, including an unimplemented `dataTransform`. The package stays in draft state until the user fills in the transform logic and runs `migration verify`. + +# Acceptance Criteria + +## Authoring and serialization + +- [x] Migration TS files returning operation lists are recognized during verification +- [x] `check` and `run` receive a typed `Db` client via `createBuilders()` +- [x] `run` callback returns `Buildable | Buildable[]` — resolver calls `.build()` and lowers to SQL +- [x] `migration verify` evaluates the TypeScript, resolves callbacks, lowers to SQL, writes `{ sql, params }` to `ops.json` +- [x] No TypeScript is loaded at `migration apply` time — only lowered SQL from `ops.json` +- [x] The `migration.ts` source file is not part of the `edgeId` computation; only serialized ops are +- [x] A package with `TODO` sentinel in dataTransform prevents attestation (resolver throws) +- [x] `migration apply` rejects draft (unattested) packages + +## Detection and scaffolding + +- [x] `migration plan` scaffolds a `migration.ts` with `dataTransform` when it detects a NOT NULL column without default +- [x] `migration plan` scaffolds when it detects a non-widening type change +- [x] `migration plan` scaffolds when it detects a nullable → NOT NULL change +- [x] Scaffolded `dataTransform` includes `TODO` placeholder with comment +- [x] Scaffold generates `createBuilders()` with contract type import +- [x] An unimplemented `dataTransform` (TODO sentinel) prevents attestation + +## Execution + +- [x] Operations execute in the order they appear in the chain +- [x] Data transform check runs before and after the transform's run step +- [ ] `inline`: all operations in one transaction; failure rolls back everything +- [ ] `isolated`: annotated operations get their own transaction +- [ ] `unmanaged`: annotated operations run without transaction wrapping +- [x] On retry, check determines whether to skip the data transform's run step + +## Graph integration + +- [ ] Data migration name (from `dataTransform`) is recorded in ledger on edge completion +- [ ] Router selects path satisfying required invariants from environment ref +- [ ] When no invariants are required, router prefers path with more data migrations +- [ ] Environment ref can declare required data migration names alongside target contract hash + +## Rollback + +- [ ] A migration S2→S1 with data transforms works identically to S1→S2 + +# Non-goals + +- **Multiple data transforms per edge requiring dependency analysis between them**: For v1, data transforms in a single chain are ordered by the planner or manually by the user. Cross-transform dependency analysis is future work. +- **Pure data migrations (A→A)**: Data-only transformations with no schema change. ADR 039 currently rejects self-loops. +- **Strategy library**: Pre-built strategies (`columnSplit`, `nonNullBackfill`, `typeChange`, `tableExtraction`) are future DX work. For v1, the planner emits raw operation sequences. +- **Arbitrary code execution**: Scenarios requiring application-level libraries (e.g., bcrypt hashing, S16) or external data sources are out of scope. +- **Raw SQL escape hatch**: The query builder is the sole authoring surface. SQL is lowered from query builder ASTs at verify time. A future raw SQL escape hatch could be added as a builder method (e.g., `db.raw("UPDATE ...")`) that produces a `SqlQueryPlan` with the SQL embedded. +- **Runtime no-op detection**: Mock-style verification that transforms actually modified data. Future safety layer. +- **Content hash drift detection**: Descoped — the `migration.ts` is not part of `edgeId`, serialized ASTs have integrity via `edgeId`, and cross-environment comparison requires shared state. +- **Question-tree UX**: Interactive diff-driven authoring. Future layer. +- **Invariant management CLI**: The ref format supports invariants but there's no CLI surface for managing them yet. For v1, edit ref JSON files directly. + +# Open Questions + +1. **Cross-table coordinated migrations (S11)**: PK type changes cascade across the FK graph. The planner needs FK graph awareness to emit coordinated ops across all referencing tables. For v1, user-authored manually. + +2. **Environment ref format**: **Resolved.** Refs refactored to `migrations/refs/.json` with `{ hash, invariants: string[] }`. + +3. **Table drop detection gap (S6)**: Horizontal table splits may not trigger auto-detection. Known gap for v1. + +4. **Query builder expressiveness**: UPDATE SET column = other_column (column-to-column references in SET values) is not supported by the query builder. INSERT...SELECT, subqueries with joins also not available. Users can express most backfills with literal values. Extension query functions (e.g., pgvector cosineDistance) are supported via `queryOperations` on control descriptors. The SQL builder AST is DML-focused (SELECT/INSERT/UPDATE/DELETE) and lacks DDL-oriented nodes like CASE expressions or type casts. Enum value mappings in `alterColumnType` USING clauses must be written as raw SQL strings in migration.ts. A future `CaseExpr` AST node would allow typed USING expressions. + +5. **Operation builder API design**: **Resolved.** `createBuilders()` returns typed builders. `dataTransform` callbacks receive `Db` with full autocomplete. SQL lowered at verify time. + +6. **Planner TS output format**: **Resolved.** The planner scaffolds `migration.ts` with `createBuilders()` and builder calls. For data transforms, `TODO` placeholders are generated. The evaluate → resolve → lower → ops.json pipeline is complete. + +7. **Contract changes after `migration new` require recreating the migration**: If the user runs `migration new`, writes their migration.ts, then realizes they need to tweak the contract (e.g., forgot NOT NULL), the `to` hash and `toContract` in `migration.json` are stale. There's no way to refresh them without deleting the package and starting over, losing the migration.ts work. A `migration refresh` command (or having verify detect the stale contract) would solve this — re-read the emitted contract and update the manifest's `to` hash + `toContract` without touching migration.ts. + +8. **`OperationDescriptor` is a loose index-signature type**: The framework-level `OperationDescriptor` is `{ kind: string; [key: string]: unknown }` because the framework doesn't know target-specific descriptor shapes. This means scaffold code generation (`descriptorToBuilderCall`) uses untyped property access. The proper fix is a generic `TargetMigrationsCapability<..., TDescriptor>` parameter so the descriptor type flows through, but this requires reworking the capability interface. Fine for v1 since the scaffold serializer is the only consumer. + +9. **Codec-specific type descriptors are hardcoded**: The descriptor planner maps `type_missing` to `createEnumType` by checking `codecId.startsWith('pg/enum')`. Non-enum codec types return a conflict. The proper approach is a codec hook registry that maps codec IDs to descriptor emitters, so each codec can declare what descriptors it needs. This avoids hardcoding prefix checks in the planner and scales to future codec types. Fine for v1 since enums are the only codec type that creates Postgres types. + +10. **Draft edge anchoring with `--ref`**: When a draft migration exists and `--ref` targets a different node than the draft's source, `migration status` shows the contract node connected from the draft's source — not from the ref target. Handling this properly would require drawing dashed edges from both nodes to the contract, which could drastically change the graph shape and the dashed edge rendering doesn't support corners (only straight vertical/horizontal segments resolve to dashed characters; corners fall back to solid). Acceptable limitation for now. + +11. **`queryOperations` duplication across control and runtime descriptors**: Extension packages (e.g., pgvector) have two descriptor exports: `./control` (used by CLI for migration planning, schema verification, DDL generation) and `./runtime` (used by app code for query execution). These are different types (`SqlControlExtensionDescriptor` vs `SqlRuntimeExtensionDescriptor`) with different fields, but they share static metadata from `descriptor-meta.ts`. + + Query operation definitions (e.g., pgvector's `cosineDistance` with its args, return type, and SQL lowering template) are static data defined in `descriptor-meta.ts`. Previously only wired to the runtime descriptor (`queryOperations: () => pgvectorQueryOperations`), because the control plane never needed to build queries. + + Data transform callbacks in migration.ts need a typed DSL client (`db.user.update(...)`) at resolve time. The resolver lives in target-postgres's control export, which only has access to control descriptors via `frameworkComponents`. To build the DSL client, it needs the `queryOperationTypes` record (method names → lowering templates) that comes from iterating `queryOperations()` on contributors. + + The flow: `frameworkComponents` (control descriptors) → iterate `queryOperations()` → build `BuilderContext.queryOperationTypes` → `sql()` creates DSL client → resolver passes client to callbacks → callbacks return `Buildable` → `.build()` captures `SqlQueryPlan`. + + Fix: add optional `queryOperations` to the control descriptor interface. Each extension adds one line to its control export pointing to the same static array from `descriptor-meta.ts`. This is duplication of wiring (one line per extension), not duplication of data. Long-term, the control/runtime descriptor split may benefit from a shared base that carries common static metadata like query operations, codec type metadata, and operation signatures — all of which are already shared via `descriptor-meta.ts` imports. + +12. **No static verification that ops transform fromContract to toContract**: Attestation hashes the manifest + ops for integrity but does not verify that the operations actually transform the source contract's schema into the destination contract's schema. For planner-generated migrations this is correct by construction (the planner derives ops from the contract diff). For user-authored migrations (`migration new`), the ops could be wrong — the only validation is post-apply (the runner introspects the live database and compares against the destination contract). A static check would require an `applyOpsToSchema(fromSchema, ops) → resultSchema` function that simulates the ops against the source schema and compares the result to the destination contract. This does not exist. The gap means user-authored migrations can be attested and committed even if they are structurally incorrect — the error surfaces only at apply time against a real database. + +# Other Considerations + +## Security + +- No TypeScript is executed at apply time. Only lowered SQL from ops.json is executed. +- Data migration SQL runs with the same database permissions as the migration runner. +- The `migration.ts` source is evaluated only at verification time on the author's machine. + +## Observability + +- The runner logs data migration start/completion/failure with the migration name and transaction mode. +- The ledger records which named data migrations have been applied to which database instance. + +# References + +- [data-migrations.md](./data-migrations.md) — Theory: invariants, guarded transitions, desired state model +- [data-migrations-solutions.md](./data-migrations-solutions.md) — Solution exploration: compatibility, routing, integration models +- [data-migration-scenarios.md](./data-migration-scenarios.md) — 18 schema evolution scenarios walked through against the design +- [data-migrations-response.md](./data-migrations-response.md) — Feedback on spec: unified TS authoring model, strategies, operation chains +- [april-milestone.md](./april-milestone.md) — VP1: prove data migrations work in the graph model +- [chat.md](./chat.md) — Design exploration: operator algebra, scenario enumeration, question-tree UX +- Planner implementation: `packages/3-targets/3-targets/postgres/src/core/migrations/planner.ts` +- Runner implementation: `packages/3-targets/3-targets/postgres/src/core/migrations/runner.ts` +- Operation types: `packages/1-framework/1-core/migration/control-plane/src/migrations.ts` +- ADR 037 — Transaction semantics and compensation +- ADR 038 — Operation idempotency classes +- ADR 039 — Graph integrity and validation +- ADR 044 — Pre/post check vocabulary +- ADR 169 — Offline planning and containerization diff --git a/projects/graph-based-migrations/specs/data-migrations.md b/projects/graph-based-migrations/specs/data-migrations.md new file mode 100644 index 0000000000..d6090c7ba3 --- /dev/null +++ b/projects/graph-based-migrations/specs/data-migrations.md @@ -0,0 +1,114 @@ +# Data migrations (theory): invariants and desired state + +## Table of contents + +- 1. Why this document exists +- 2. The problem (in plain language) +- 3. The core idea: data migrations are guarded transitions +- 4. What “invariant” means (and why it’s the right abstraction) +- 5. The desired end state: contract hash + required data invariants +- 6. Constraints a data migration must respect +- 7. Where to go next (solutions doc) +- 8. Conclusion + +## 1. Why this document exists + +Our migration history discussion intentionally focuses on structural state: “reach contract hash H”. + +Data migrations complicate that, because they introduce a second notion of state that is not captured by a contract hash: **the content of the database**. + +This document records the current design direction for data migrations in a way that: + +- keeps the structural routing model ergonomic in the simple case, and +- makes data correctness explicit and machine-checkable when it matters. + +## 2. The problem (in plain language) + +If migrations are purely structural, then “database state” is mostly “schema state”, and contract hashes do a good job describing it. + +Once you start performing non-trivial data transformations, two databases can have: + +- the same schema / contract hash, but +- meaningfully different content. + +In that world, “schema state == desired” does not imply “data state == desired”. + +This is exactly why teams slip back into “golden history” thinking: a single linear history is a crude way of saying “we know these data transformations happened”. + +We want a model that says that directly. + +## 3. The core idea: data migrations are guarded transitions + +We treat a data migration the same way we treat a structural migration: + +- it is a transition guarded by a **precondition** and a **postcondition**, +- and it is safe to retry because completion is observable. + +Concretely: + +- **Precondition**: the database is in a data state where applying the migration makes sense (often “needs change”). +- **Execution**: statements/steps that move data toward the desired shape. +- **Postcondition**: a check that proves “done”. + +If the postcondition already holds, the migration is a no-op — that’s the idempotence story. + +## 4. What “invariant” means (and why it’s the right abstraction) + +In this document, a **data invariant** is: + +- a named property we want to be true (e.g. “all user phone numbers normalized”), and +- a checkable predicate that can confirm whether it holds. + +This is a better abstraction than “did we run migration X?” because: + +- it is verifiable, +- it is composable, +- and it decouples correctness from a single canonical history. + +## 5. The desired end state: contract hash + required data invariants + +In a world with data migrations, “desired state” is not just a contract hash. + +It is: + +- **target contract hash** (structural state), plus +- **required data invariants** (data state). + +In practice, this “desired state” needs an owner. A clean model is to treat the environment’s **ref head** as the declaration of desired state: + +- the ref already answers “what contract state should production/staging be at?” +- and it can also answer “what data invariants must hold in that environment?” + +That makes promotions explicit and reviewable: a ref update can say “move production from X → Y, and require invariant Z to hold”. + +This solves a major failure mode of schema-only routing: + +- a database can be structurally “up to date” while still missing required data correctness properties. + +## 6. Constraints a data migration must respect + +The theory above is only useful if the system can apply data migrations safely. That implies a few constraints: + +- **Guarded**: the migration must have a “done-ness” check (postcondition) so retries and partial failure are safe. +- **Selective / idempotent**: running the migration twice should either do nothing the second time or only touch rows that still need it. +- **Schema-aware**: the migration must only run when the database schema supports the queries it needs (more on this in the solutions doc). + +## 7. Where to go next (solutions doc) + +This document intentionally stops at “what we’re trying to model”. + +For concrete solution options (compatibility checks, how to integrate with routing, UX implications, and an optional ledger optimization), see: + +- [data-migrations-solutions.md](./data-migrations-solutions.md) + +## 8. Conclusion + +Data migrations are easiest to reason about when we treat them as **guarded transitions** whose completion is defined by **data invariants**. + +That leads to a crisp statement of desired end state: + +- reach the target **contract hash**, and +- satisfy the required **data invariants**. + +A practical ownership model is that environment refs own this declaration of “done”: schema target + required invariants. + diff --git a/projects/graph-based-migrations/specs/planner-replacement-proposal.md b/projects/graph-based-migrations/specs/planner-replacement-proposal.md new file mode 100644 index 0000000000..dd9c1fe89c --- /dev/null +++ b/projects/graph-based-migrations/specs/planner-replacement-proposal.md @@ -0,0 +1,271 @@ +# Proposal: Descriptor-Based Planner Replacement + +## Context + +The current Postgres planner produces `SqlMigrationPlanOperation[]` directly — assembling SQL, prechecks, postchecks, and target details inline. It has significant complexity around the temporary default strategy (a workaround for adding NOT NULL columns without data migrations). With data migrations now supported, this workaround is unnecessary — the planner can emit `addColumn` (nullable) + `dataTransformDraft` + `setNotNull` instead. + +The operation resolver already exists and can convert descriptors into `SqlMigrationPlanOperation[]` using the same SQL helpers the planner uses. This proposal replaces the planner's SQL generation with descriptor emission, unifying the code path for planner-generated and user-authored migrations. + +## Prerequisites + +### Augment SchemaIssue with missing constraint kinds + +The verifier (`verifySqlSchema`) currently reports `missing_table` and `missing_column` but not missing constraints, indexes, or FKs. The planner's additive builders detect these by iterating the contract against the schema — duplicating the verifier's job. + +**Add these issue kinds to SchemaIssue:** + +| New issue kind | When it fires | Fields needed | +|---|---|---| +| `missing_primary_key` | Contract table has PK, schema table doesn't | `table`, `expected` (column list) | +| `missing_unique_constraint` | Contract has unique, schema doesn't (checked via column set matching, including unique indexes) | `table`, `indexOrConstraint` (name), `expected` (columns) | +| `missing_foreign_key` | Contract has FK with `constraint: true`, schema doesn't | `table`, `indexOrConstraint` (name), `expected` (columns + references) | +| `missing_index` | Contract has index, schema doesn't (checked via column set matching) | `table`, `indexOrConstraint` (name), `expected` (columns) | +| `missing_fk_backing_index` | FK has `index: true`, no matching user-declared or existing index | `table`, `indexOrConstraint` (generated name), `expected` (FK columns) | + +**Where to implement:** `packages/2-sql/3-tooling/family/src/core/schema-verify/verify-sql-schema.ts` + +**Impact:** The verifier is used by `db verify`, `migration plan`, and `db update`. Adding missing-constraint detection is additive — existing consumers see more issues but don't break (they already handle unknown issue kinds gracefully). + +## The replacement planner + +### Input +Same as today: destination contract + source schema IR + policy + framework components + +### Output +`MigrationOpDescriptor[]` instead of `SqlMigrationPlanOperation[]` + +### Flow + +``` +1. Extract codec hooks (once) +2. Collect schema issues (verifySqlSchema — now with missing constraint kinds) +3. Build pre-computed schema lookups (for dedup — same as today) +4. Iterate issues in dependency order, emit descriptors: + + For each issue: + missing_table → createTable(tableName) + missing_column → addColumn(tableName, columnName) + + if NOT NULL without default: + addColumn(tableName, columnName, { nullable: true }) + + dataTransformDraft(name, source) + + setNotNull(tableName, columnName) + missing_primary_key → addPrimaryKey(tableName) + missing_unique → addUnique(tableName, columns) + missing_index → createIndex(tableName, columns) + missing_fk_index → createIndex(tableName, fkColumns) + missing_foreign_key → addForeignKey(tableName, columns) + extra_table → dropTable(tableName) [if destructive allowed] + extra_column → dropColumn(tableName, colName) [if destructive allowed] + extra_index → dropIndex(tableName, indexName) [if destructive allowed] + extra_unique → dropConstraint(tableName, name) [if destructive allowed] + extra_foreign_key → dropConstraint(tableName, name) [if destructive allowed] + extra_primary_key → dropConstraint(tableName, name) [if destructive allowed] + nullability_mismatch (→NOT NULL) → setNotNull(tableName, colName) [destructive] + nullability_mismatch (→nullable) → dropNotNull(tableName, colName) [widening] + type_mismatch → alterColumnType(tableName, colName) [destructive] + default_missing → setDefault(tableName, colName) [additive] + default_mismatch → setDefault(tableName, colName) [widening] + extra_default → dropDefault(tableName, colName) [destructive] + +5. Handle database dependencies (extensions) — emit descriptors or pass through as-is +6. Handle storage types (codec hooks) — emit descriptors or pass through as-is +7. Filter by policy, convert disallowed ops to conflicts +8. Return descriptors (or resolve immediately if backward compat needed) +``` + +### Ordering + +The current planner uses strict ordering: deps → types → reconciliation → tables → columns → PK → unique → index → FK-index → FK. This is a dependency order. + +The descriptor-based planner preserves this by emitting descriptors in the same order. The issue iteration can be sorted into this order by kind: + +``` +1. dependency_missing +2. type_missing / type_values_mismatch +3. extra_* (reconciliation drops — before creates to avoid name conflicts) +4. missing_table +5. missing_column +6. nullability_mismatch, type_mismatch, default_* (reconciliation alters) +7. missing_primary_key +8. missing_unique_constraint +9. missing_index +10. missing_fk_backing_index +11. missing_foreign_key +``` + +This is the same ordering the current planner achieves through its multi-pass pipeline, just expressed as a sort on issue kinds. + +### What gets removed + +| Current planner code | Reason for removal | +|---|---| +| `buildTableOperations` | Replaced by `missing_table` issue → `createTable` descriptor | +| `buildColumnOperations` | Replaced by `missing_column` issue → `addColumn` descriptor | +| `buildAddColumnOperation` | Replaced by descriptor emission | +| `buildPrimaryKeyOperations` | Replaced by `missing_primary_key` issue → `addPrimaryKey` descriptor | +| `buildUniqueOperations` | Replaced by `missing_unique` issue → `addUnique` descriptor | +| `buildIndexOperations` | Replaced by `missing_index` issue → `createIndex` descriptor | +| `buildFkBackingIndexOperations` | Replaced by `missing_fk_backing_index` issue → `createIndex` descriptor | +| `buildForeignKeyOperations` | Replaced by `missing_foreign_key` issue → `addForeignKey` descriptor | +| `canUseSharedTemporaryDefaultStrategy` | Unnecessary — data migrations replace temp defaults | +| `resolveIdentityValue` / identity value maps | Unnecessary — data migrations replace temp defaults | +| `buildAddNotNullColumnWithTemporaryDefaultOperation` (recipe) | Unnecessary — replaced by addColumn(nullable) + dataTransform + setNotNull | +| All inline SQL generation in builder methods | Moved to resolver (already exists) | + +### What gets preserved + +| Code | Why preserved | +|---|---| +| Schema lookup pre-computation (`buildSchemaLookupMap`) | Still needed by verifier for constraint dedup | +| Policy enforcement / conflict generation | Same logic, just operates on descriptors instead of ops | +| Codec hook delegation (`buildStorageTypeOperations`) | Hooks already produce operations; can pass through or wrap | +| `buildDatabaseDependencyOperations` | Extensions produce their own ops; can pass through or wrap | +| Operation deduplication by ID | Same pattern, applied to descriptors | +| `sortedEntries` for deterministic ordering | Still needed for reproducible plans | + +### What the resolver needs to handle (new) + +The resolver already handles all the additive descriptors. For the reconciliation descriptors, it needs: + +| Descriptor | Resolver work | Already implemented? | +|---|---|---| +| `dropTable` | Generate DROP TABLE + checks | Yes | +| `dropColumn` | Generate ALTER TABLE DROP COLUMN + checks | Yes | +| `dropConstraint` | Generate ALTER TABLE DROP CONSTRAINT + checks | Yes | +| `dropIndex` | Generate DROP INDEX + checks | Yes | +| `alterColumnType` | Generate ALTER COLUMN TYPE + USING + warning | Yes | +| `setNotNull` | Generate ALTER COLUMN SET NOT NULL + NULL check | Yes | +| `dropNotNull` | Generate ALTER COLUMN DROP NOT NULL | Yes | +| `setDefault` | Generate ALTER COLUMN SET DEFAULT (from contract) | Yes | +| `dropDefault` | Generate ALTER COLUMN DROP DEFAULT | Yes | + +All reconciliation operations are already handled by the resolver. + +## Pattern matching on issues + +Before the planner does its default issue-to-descriptor mapping, it runs pattern matchers. Each matcher is a function that examines the issue list, optionally consumes issues it recognizes, and returns the ops to handle them: + +```typescript +interface PatternContext { + readonly toContract: SqlContract; + readonly fromContract: SqlContract | null; +} + +type PatternMatcher = ( + issues: readonly SchemaIssue[], + context: PatternContext, +) => + | { kind: 'match'; issues: readonly SchemaIssue[]; ops: readonly MigrationOpDescriptor[] } + | { kind: 'no_match' }; +``` + +The planner chains matchers, then handles whatever's left with default mapping: + +```typescript +let remaining = issues; +const ops: MigrationOpDescriptor[] = []; + +for (const matcher of matchers) { + const result = matcher(remaining, context); + if (result.kind === 'match') { + remaining = result.issues; // issues with matched ones removed + ops.push(...result.ops); + } +} + +// Default handling for remaining issues +for (const issue of remaining) { + ops.push(...defaultOpsForIssue(issue)); +} +``` + +Each matcher looks at the full issue list, pulls out what it handles, returns the rest. No framework, no registry — just functions with a type signature. + +### v1 matcher: NOT NULL backfill + +```typescript +const notNullBackfillMatcher: PatternMatcher = (issues, ctx) => { + const matched: SchemaIssue[] = []; + const ops: MigrationOpDescriptor[] = []; + + for (const issue of issues) { + if (issue.kind !== 'missing_column' || !issue.table || !issue.column) continue; + const column = ctx.toContract.storage.tables[issue.table]?.columns[issue.column]; + if (!column || column.nullable !== false || column.default !== undefined) continue; + + matched.push(issue); + ops.push( + addColumn(issue.table, issue.column, { nullable: true }), + dataTransformDraft(`backfill-${issue.table}-${issue.column}`, 'migration.ts'), + setNotNull(issue.table, issue.column), + ); + } + + if (matched.length === 0) return { kind: 'no_match' }; + return { + kind: 'match', + issues: issues.filter(i => !matched.includes(i)), + ops, + }; +}; +``` + +### Future matchers (examples, not v1) + +**Column rename** — consumes `extra_column` + `missing_column` with same type on same table: +```typescript +const columnRenameMatcher: PatternMatcher = (issues, ctx) => { + // Match pairs of extra+missing columns with compatible types + // Consume both issues, emit renameColumn or add→copy→drop sequence +}; +``` + +**Column split** — consumes `extra_column` + multiple `missing_column` on same table: +```typescript +const columnSplitMatcher: PatternMatcher = (issues, ctx) => { + // Match one extra + N missing where types are compatible + // Consume all, emit addColumn(nullable) × N + dataTransform + setNotNull × N + dropColumn +}; +``` + +Matchers are ordered by specificity — more specific patterns (column split) should run before less specific ones (NOT NULL backfill) to avoid partial matches. A split's `missing_column` issues would be consumed before the backfill matcher sees them. + +## Migration output + +The planner currently writes `ops.json` directly (via `writeMigrationPackage`). With the descriptor-based planner: + +**Option A — Resolve immediately:** Planner emits descriptors → resolver converts to ops → write ops.json. Same output as today. Backward compatible. The planner is internally descriptor-based but externally produces ops. + +**Option B — Write descriptors + migration.ts:** Planner emits descriptors → write them as a `migration.ts` file (the descriptors ARE the builder calls) → ops.json stays empty (draft) → user runs verify to resolve. This is the full TS-authoring model. + +**Recommendation:** Option A for v1 (backward compat, no workflow change), with Option B as a flag or future default. The planner returns descriptors internally; the caller decides whether to resolve immediately or scaffold migration.ts. + +## Implementation plan + +1. **Augment SchemaIssue** with missing constraint kinds in verifier (~200 lines) +2. **Write the descriptor-based planner** as a new method alongside the existing one (~300 lines) +3. **Wire it in** behind a flag or as a replacement +4. **Add data migration detection** in the missing_column handler +5. **Remove temp default code** (canUseSharedTemporaryDefaultStrategy, resolveIdentityValue, recipe) +6. **Tests:** Run existing planner integration tests against the new planner, verify same ops output + +## Improvement: SchemaIssue should be a discriminated union + +`SchemaIssue` currently has all fields except `kind` and `message` as optional. This forces defensive null checks in the descriptor planner even though a `missing_column` issue will always have `table` and `column`. The type should be a discriminated union where each issue kind carries its fields as required: + +```typescript +type SchemaIssue = + | { kind: 'missing_table'; table: string; message: string } + | { kind: 'missing_column'; table: string; column: string; message: string } + | { kind: 'extra_index'; table: string; indexOrConstraint: string; message: string } + // etc. +``` + +This would eliminate all the defensive checks in `mapIssue` and let the switch narrow the type automatically. It's a framework-level type change (`@prisma-next/core-control-plane`) that affects multiple packages — worth doing but not a blocker for the descriptor planner. + +## Risks + +- **Verifier changes** affect `db verify` — must ensure new issue kinds don't cause false positives in existing verify workflows +- **Operation ordering** must be exactly preserved — any reordering could break FK dependencies +- **createTable** currently inlines all columns + PK in one DDL statement. If we emit per-column descriptors for new tables, the SQL output changes (CREATE TABLE with no columns, then ALTER TABLE ADD COLUMN for each). This may be undesirable — keeping `createTable` as a single descriptor that the resolver handles as one DDL statement is better. +- **Codec hook operations** currently produce `SqlMigrationPlanOperation` directly. The descriptor-based planner would need to either pass these through or define a descriptor for them. diff --git a/projects/graph-based-migrations/walkthrough.md b/projects/graph-based-migrations/walkthrough.md new file mode 100644 index 0000000000..44300da3b9 --- /dev/null +++ b/projects/graph-based-migrations/walkthrough.md @@ -0,0 +1,162 @@ +## Intent + +Replace the table-based `migration status` output with an ASCII graph renderer so users can see migration topology — branches, forks, diamonds, and rollback cycles — directly in their terminal. The default view answers "what would `migration apply` do?" by showing only the relevant subgraph (the apply path plus any forks that explain why action is needed). An opt-in `--graph` flag shows the full migration graph. This eliminates the `AMBIGUOUS_LEAF` crash on divergent graphs and introduces three-state edge status (applied/pending/unreachable) so users can see exactly which migrations are actionable. + +## Change map + +- **Implementation**: + - [packages/1-framework/3-tooling/cli/src/utils/formatters/graph-render.ts](packages/1-framework/3-tooling/cli/src/utils/formatters/graph-render.ts) — Dagre-based graph renderer (~1300 lines) + - [packages/1-framework/3-tooling/cli/src/utils/formatters/graph-types.ts](packages/1-framework/3-tooling/cli/src/utils/formatters/graph-types.ts) — `RenderGraph`, `GraphNode`, `GraphEdge`, `GraphRenderOptions` + - [packages/1-framework/3-tooling/cli/src/utils/formatters/graph-migration-mapper.ts](packages/1-framework/3-tooling/cli/src/utils/formatters/graph-migration-mapper.ts) — Maps `MigrationGraph` → generic `RenderGraph` with markers and status icons + - [packages/1-framework/3-tooling/cli/src/commands/migration-status.ts](packages/1-framework/3-tooling/cli/src/commands/migration-status.ts) — Command logic: target resolution, edge status derivation, CLI handler rewrite + - [packages/1-framework/3-tooling/cli/src/commands/migration-plan.ts (L163–L193)](packages/1-framework/3-tooling/cli/src/commands/migration-plan.ts:163-193) — `--from` prefix matching +- **Tests (evidence)**: + - [packages/1-framework/3-tooling/cli/test/utils/formatters/graph-render.test.ts](packages/1-framework/3-tooling/cli/test/utils/formatters/graph-render.test.ts) — Renderer unit + snapshot tests (35+ topologies) + - [packages/1-framework/3-tooling/cli/test/utils/formatters/test-graphs.ts](packages/1-framework/3-tooling/cli/test/utils/formatters/test-graphs.ts) — Shared test graph definitions + - [packages/1-framework/3-tooling/cli/test/commands/derive-edge-statuses.test.ts](packages/1-framework/3-tooling/cli/test/commands/derive-edge-statuses.test.ts) — `deriveEdgeStatuses` unit tests + - [packages/1-framework/3-tooling/cli/test/commands/migration-plan.test.ts](packages/1-framework/3-tooling/cli/test/commands/migration-plan.test.ts) — `--from` prefix matching tests + - [packages/1-framework/3-tooling/cli/test/utils/formatters/__snapshots__/graph-render.test.ts.snap](packages/1-framework/3-tooling/cli/test/utils/formatters/__snapshots__/graph-render.test.ts.snap) — 939 lines of snapshot output + - [test/integration/test/cli-journeys/migration-status-diagnostics.e2e.test.ts](test/integration/test/cli-journeys/migration-status-diagnostics.e2e.test.ts) — 13 diagnostic journey tests (offline, online, divergent, JSON shape) + +## The story + +1. **Introduce a domain-agnostic graph renderer.** A new `graph-render.ts` module uses Dagre for Sugiyama-style layout (rank assignment + coordinate placement), then stamps the result onto a `CharGrid` — a sparse character canvas that resolves box-drawing junctions, color priority, and label placement. The renderer knows nothing about migrations; it accepts `RenderGraph` (nodes with markers, edges with optional labels and color hints) and produces ASCII output. This is the bulk of new code (~1300 lines). + +2. **Define generic graph types as a stable interface boundary.** `graph-types.ts` introduces `RenderGraph` (immutable directed graph with adjacency-list indexing), `GraphNode` (with typed markers: db, contract, ref, custom), `GraphEdge` (with `colorHint` for CVD-safe edge coloring), and `GraphRenderOptions` (spine target, truncation limit, dagre overrides). These types form the contract between the migration-specific mapper and the domain-agnostic renderer. + +3. **Build the migration-to-graph mapping layer.** `graph-migration-mapper.ts` translates `MigrationGraph` + CLI status info into `RenderGraph`. It computes relevant paths with continuity-aware routing (tries marker→contract and ref→contract independently, not BFS shortest path from root), bakes status icons (`✓`/`⧗`/`✗`) into edge labels, sets `colorHint` for CVD-safe coloring, and attaches typed markers (db, contract, ref) to nodes. + +4. **Rewrite target resolution to eliminate `findLeaf`.** The old code called `findLeaf(graph)` as a fallback, which threw `AMBIGUOUS_LEAF` on divergent graphs. The new resolution chain is: `--ref` hash → contract hash (if in graph) → single reachable leaf → diverged full-graph fallback with a `MIGRATION.DIVERGED` diagnostic. This means `migration status` never crashes on a valid graph shape. + +5. **Add three-state edge status derivation.** `deriveEdgeStatuses` uses path analysis across the full graph to assign per-edge status: applied (root→marker path), pending (marker→target path, plus target→contract when a ref is active), and unreachable (root→target edges that are neither applied nor pending — the DB is on a different branch). Empty DB treats root as the effective marker. + +6. **Replace the CLI output pipeline.** The old `formatMigrationStatusOutput` (table-based formatter) is removed. The CLI handler now: (a) maps the status result to `RenderGraph` via `migrationGraphToRenderInput`, (b) extracts the relevant subgraph for the default view or passes the full graph for `--graph`, (c) detects linear graphs to use compact layout (`ranksep: 1`), (d) renders via `graphRenderer.render()`, and (e) appends a legend and summary with diagnostics. `--limit`/`--all` flags control truncation (default 10). + +7. **Handle edge cases with early bail-outs and diagnostics.** Marker-not-in-graph (DB managed via `db update`) bails out early with recovery hints. Contract-ahead fires when no migration produces the current contract hash. Detached contract nodes render with dashed connectors when the contract is not in the graph. + +8. **Add prefix matching for `migration plan --from`.** As a cross-command improvement discovered during testing, `--from` now supports prefix matching (with or without `sha256:` scheme), with disambiguation when multiple candidates match. + +## Behavior changes & evidence + +- **Adds Dagre-based ASCII graph renderer for migration topology visualization**: Introduces `GraphRenderer` interface with a single `render(graph, options)` method backed by Dagre layout. Supports linear chains, branches, diamonds, rollback cycles, detached contract nodes, and truncation with `┊` elided-history indicator. CVD-safe palette (cyan/yellow/magenta, no red/green). Box-drawing characters for connectors. Deterministic output. + - **Why**: The old table view could not represent branches, forks, or multi-leaf graphs. Users had no way to see the topology of their migration graph. + - **Implementation**: + - [packages/1-framework/3-tooling/cli/src/utils/formatters/graph-render.ts](packages/1-framework/3-tooling/cli/src/utils/formatters/graph-render.ts) + - [packages/1-framework/3-tooling/cli/src/utils/formatters/graph-types.ts](packages/1-framework/3-tooling/cli/src/utils/formatters/graph-types.ts) + - **Tests**: + - [packages/1-framework/3-tooling/cli/test/utils/formatters/graph-render.test.ts (L27–L45)](packages/1-framework/3-tooling/cli/test/utils/formatters/graph-render.test.ts:27-45) — Snapshot tests for all 35+ topologies, determinism, no-color output + - [packages/1-framework/3-tooling/cli/test/utils/formatters/__snapshots__/graph-render.test.ts.snap](packages/1-framework/3-tooling/cli/test/utils/formatters/__snapshots__/graph-render.test.ts.snap) — 939 lines of snapshot output + +- **Adds migration-to-graph mapping with status icons and continuity-aware path computation**: `migrationGraphToRenderInput` translates the migration domain into generic graph types, baking status icons into edge labels and computing relevant paths for the default subgraph view. Path computation tries both marker→contract and ref→contract independently (not root→contract BFS) so diamond topologies show both legs. + - **Why**: The renderer is domain-agnostic by design. The mapper is the single place where migration semantics (marker position, ref targets, edge status) are projected onto the generic graph types. + - **Implementation**: + - [packages/1-framework/3-tooling/cli/src/utils/formatters/graph-migration-mapper.ts](packages/1-framework/3-tooling/cli/src/utils/formatters/graph-migration-mapper.ts) + - **Tests**: + - [packages/1-framework/3-tooling/cli/test/utils/formatters/graph-render.test.ts (L99–L157)](packages/1-framework/3-tooling/cli/test/utils/formatters/graph-render.test.ts:99-157) — `extractRelevantSubgraph` multi-path union, deduplication, marker preservation + +- **Adds three-state edge status derivation (`deriveEdgeStatuses`)**: Computes applied/pending/unreachable status per edge using graph-wide path analysis. Applied = root→marker. Pending = marker→target (+ target→contract for ref scenarios). Unreachable = root→target edges that are neither — meaning the DB is on a different branch. Empty DB treats root as effective marker. + - **Why**: The old two-state model (applied/pending on a linear chain) couldn't represent the case where the DB marker is on a different branch from the target. "Unreachable" communicates that `apply` can't reach these edges without the DB first changing branches. + - **Implementation**: + - [packages/1-framework/3-tooling/cli/src/commands/migration-status.ts (L148–L220)](packages/1-framework/3-tooling/cli/src/commands/migration-status.ts:148-220) + - **Tests**: + - [packages/1-framework/3-tooling/cli/test/commands/derive-edge-statuses.test.ts](packages/1-framework/3-tooling/cli/test/commands/derive-edge-statuses.test.ts) — Linear chain, empty DB, fully applied, branching (unreachable), diamond (no double-count), ref with contract beyond, no contract, off-graph contract + - [test/integration/test/cli-journeys/migration-status-diagnostics.e2e.test.ts](test/integration/test/cli-journeys/migration-status-diagnostics.e2e.test.ts) — End-to-end validation of applied/pending/unreachable in real scenarios + +- **Target resolution replaces `findLeaf` with contract-hash-first fallback chain**: `migration status` no longer calls `findLeaf(graph)`. Target resolution: `--ref` hash → contract hash (if in graph) → single reachable leaf → diverged (full-graph view with `MIGRATION.DIVERGED` diagnostic). Divergent graphs render successfully with all branches visible instead of crashing. + - **Why**: `findLeaf` throws `AMBIGUOUS_LEAF` on multi-leaf graphs. `migration status` is a read-only display command — it should never hard-fail on a valid graph shape. + - **Implementation**: + - [packages/1-framework/3-tooling/cli/src/commands/migration-status.ts (L449–L471)](packages/1-framework/3-tooling/cli/src/commands/migration-status.ts:449-471) + - **Tests**: + - Diverged graph handling is tested via the `MIGRATION.DIVERGED` diagnostic path. No dedicated unit test; behavior verified through the command flow. + +- **Adds truncation with marker-aware expansion (`--limit` / `--all`)**: Long graphs truncate to the last N edges (default 10) with a `┊ (M earlier migrations)` indicator. Truncation expands beyond the limit when needed to keep contract and DB markers visible. `--all` disables truncation. + - **Why**: Real-world migration graphs can have hundreds of edges. Truncation keeps the default output focused on the recent, actionable portion while never hiding the user's current position. + - **Implementation**: + - [packages/1-framework/3-tooling/cli/src/utils/formatters/graph-render.ts (L1246–L1300)](packages/1-framework/3-tooling/cli/src/utils/formatters/graph-render.ts:1246-1300) — `truncateGraph`, `findSpinePath`, `render` with truncation + - **Tests**: + - [packages/1-framework/3-tooling/cli/test/utils/formatters/graph-render.test.ts (L181–L314)](packages/1-framework/3-tooling/cli/test/utils/formatters/graph-render.test.ts:181-314) — Truncation at various limits, marker-aware expansion, detached node preservation, elided indicator rendering + +- **Adds `isLinearGraph` for compact linear layout**: Detects single-chain graphs (no branching, ignoring detached nodes) and passes `ranksep: 1` so the output is vertically compact. + - **Why**: Linear chains are the common case. Without compact layout, Dagre inserts 4 rows between each rank, wasting vertical space for graphs that don't need multi-column layout. + - **Implementation**: + - [packages/1-framework/3-tooling/cli/src/utils/formatters/graph-render.ts (L1311–L1317)](packages/1-framework/3-tooling/cli/src/utils/formatters/graph-render.ts:1311-1317) + - **Tests**: + - [packages/1-framework/3-tooling/cli/test/utils/formatters/graph-render.test.ts (L317–L357)](packages/1-framework/3-tooling/cli/test/utils/formatters/graph-render.test.ts:317-357) — Single node, linear chain, branching (false), detached nodes (ignored), empty graph + +- **Adds marker-not-in-graph early bail-out with recovery diagnostics**: When the DB marker is off-graph and differs from the contract, `migration status` bails out early — no graph is rendered, only a `MIGRATION.MARKER_NOT_IN_GRAPH` diagnostic with actionable hints (db sign, db update, contract infer, db verify). When marker equals contract (both off-graph), proceeds normally — the detached node shows both markers. + - **Why**: When the marker is off-graph, no edge can be reliably marked as applied. Rendering a full graph with no statuses and misleading diagnostics is worse than a clear error with recovery guidance. + - **Implementation**: + - [packages/1-framework/3-tooling/cli/src/commands/migration-status.ts (L516–L555)](packages/1-framework/3-tooling/cli/src/commands/migration-status.ts:516-555) + - **Tests**: + - [test/integration/test/cli-journeys/migration-status-diagnostics.e2e.test.ts](test/integration/test/cli-journeys/migration-status-diagnostics.e2e.test.ts) — "marker off-graph, mismatches contract" journey test + +- **Adds `MIGRATION.NO_MARKER` diagnostic for fresh databases**: When connected to a database that has no marker row (never initialized), emits a warning with a hint to run `migration apply`. Previously this state was silent — the user saw pending migrations but no explanation of *why* they were pending. + - **Implementation**: + - [packages/1-framework/3-tooling/cli/src/commands/migration-status.ts (L562–L569)](packages/1-framework/3-tooling/cli/src/commands/migration-status.ts:562-569) + - **Tests**: + - [test/integration/test/cli-journeys/migration-status-diagnostics.e2e.test.ts](test/integration/test/cli-journeys/migration-status-diagnostics.e2e.test.ts) — "fresh DB, migrations exist — MIGRATION.NO_MARKER" journey test + +- **Adds `--graph` flag for full migration graph**: The `--graph` flag renders the entire migration graph (all branches, diamonds, cycles) instead of just the relevant subgraph. When the graph is diverged (no single target), `--graph` is used automatically. + - **Why**: The default view is optimized for "what do I need to do?" The full graph view is for orientation — understanding the overall topology. + - **Implementation**: + - [packages/1-framework/3-tooling/cli/src/commands/migration-status.ts (L681–L683)](packages/1-framework/3-tooling/cli/src/commands/migration-status.ts:681-683) — Flag definition + - [packages/1-framework/3-tooling/cli/src/commands/migration-status.ts (L724–L725)](packages/1-framework/3-tooling/cli/src/commands/migration-status.ts:724-725) — Routing: `--graph` or diverged → full graph, else → relevant subgraph + - **Tests**: + - Full-graph rendering is exercised by the snapshot tests for all topologies in `graph-render.test.ts`. + +- **Adds prefix matching for `migration plan --from`**: `--from` now supports prefix matching with or without the `sha256:` scheme. Ambiguous prefixes (matching multiple migrations) return a clear error. + - **Why**: Users copy short hashes from `migration status` output and should be able to use them directly. + - **Implementation**: + - [packages/1-framework/3-tooling/cli/src/commands/migration-plan.ts (L163–L193)](packages/1-framework/3-tooling/cli/src/commands/migration-plan.ts:163-193) + - **Tests**: + - [packages/1-framework/3-tooling/cli/test/commands/migration-plan.test.ts (L248–L388)](packages/1-framework/3-tooling/cli/test/commands/migration-plan.test.ts:248-388) — Prefix without scheme, prefix with scheme, ambiguous prefix rejection + +- **Removes `formatMigrationStatusOutput` and privatizes helpers**: `formatMigrationStatusOutput` (the old table formatter) is deleted from `migrations.ts`. `resolveDisplayChain` and `buildMigrationEntries` are made module-private (no longer exported). No behavior change — these were only consumed internally. + - **Implementation**: + - [packages/1-framework/3-tooling/cli/src/utils/formatters/migrations.ts](packages/1-framework/3-tooling/cli/src/utils/formatters/migrations.ts) + - [packages/1-framework/3-tooling/cli/src/commands/migration-status.ts](packages/1-framework/3-tooling/cli/src/commands/migration-status.ts) — `buildMigrationEntries` and `resolveDisplayChain` now `function` (not `export function`) + +- **Replaces graph jargon in user-facing messages**: "edge(s)" → "migration(s)" in ref distance summaries. "marker" → "database marker" in no-path message. `MIGRATION.MARKER_DIVERGED` → `MIGRATION.MARKER_NOT_IN_GRAPH` with clearer messaging. No behavior change beyond wording. + - **Implementation**: + - [packages/1-framework/3-tooling/cli/src/commands/migration-status.ts (L793–L808)](packages/1-framework/3-tooling/cli/src/commands/migration-status.ts:793-808) + +- **Adds `@dagrejs/dagre` dependency**: New production dependency for graph layout. + - **Implementation**: + - [packages/1-framework/3-tooling/cli/package.json](packages/1-framework/3-tooling/cli/package.json) — `"@dagrejs/dagre": "^3.0.0"` + +## Compatibility / migration / risk + +- **Breaking change to CLI output format**: `migration status` output changes from a table to an ASCII graph. Scripts parsing the old table format will break. The `--json` output is preserved and should be used for programmatic consumption. +- **JSON output shape expanded**: `MigrationStatusResult` gains internal fields (`graph`, `bundles`, `edgeStatuses`, `activeRefHash`, `activeRefName`, `diverged`) but they are stripped before JSON serialization. The `--json` output shape is backward-compatible except that `MigrationStatusEntry.status` now includes `'unreachable'` in addition to `'applied' | 'pending' | 'unknown'`. +- **Diagnostic code renamed**: `MIGRATION.MARKER_DIVERGED` → `MIGRATION.MARKER_NOT_IN_GRAPH`. Any automation matching on diagnostic codes will need updating. +- **`findLeaf` no longer called from `migration status`**: The `AMBIGUOUS_LEAF` error can no longer be triggered by `migration status`. This is intentional — divergent graphs now render successfully. +- **New dependency**: `@dagrejs/dagre ^3.0.0` added to the CLI package. Pure JS, no native modules. +- **Performance**: Dagre performance with 1000+ nodes is untested (spec targets <100ms). This is noted as an open item. + +## Known issues + +- **`deriveEdgeStatuses` uses graph path, not ledger, for applied status** (high): After `db update`, edges show as `✓ applied` even though no migrations were executed. Requires adding `readLedger()` to the control plane stack and threading `dirName`/`migrationId` through the runner. See [issue triage](projects/graph-based-migrations/issue-triage.md). +- **Cross-branch contract diagnostic gap** (medium): When `--ref staging` is used and the contract is reachable only from a different branch, no diagnostic fires. The detached contract node also renders on the wrong branch. +- **Unreachable vs. backward edge color overlap** (low): Both use magenta. These are different concepts and should be visually distinct. +- **`MigrationStatusResult` conflates internal and public shapes** (medium): The `--json` handler manually strips internal fields. A dedicated public shape type would provide compile-time safety. + +## Follow-ups + +- **User-facing language audit**: Deferred to a separate PR ([TML-2097](https://linear.app/prisma-company/issue/TML-2097)) — some internal terms ("spine", "leaf") may still appear in edge cases. +- **JSON output field names**: Exact shape agreed conceptually but not finalized. +- **`migration plan` needs optional `--db` support**: So `--from` defaults to the DB marker when online, preventing unusable migrations. +- **Integration/journey tests for `migration status`**: Added — 13 diagnostic journey tests in `migration-status-diagnostics.e2e.test.ts`. + +## Non-goals / intentionally out of scope + +- **Interactive TUI**: No scrolling, selection, or keyboard navigation. +- **Graphviz/DOT/SVG export**: ASCII only; DOT renderer was deleted. +- **Graph editing/manipulation**: Read-only display. +- **Rewriting `findLeaf` / `findPath`**: These remain for `migration plan` and `migration apply`. + +## Sources + +- Linear: [TML-2100](https://linear.app/prisma-company/issue/TML-2100) +- Spec: [migration-status-graph-rendering.spec.md](projects/graph-based-migrations/specs/migration-status-graph-rendering.spec.md) +- Plan: [migration-status-graph-rendering-plan.md](projects/graph-based-migrations/plans/migration-status-graph-rendering-plan.md) +- Issue triage: [issue-triage.md](projects/graph-based-migrations/issue-triage.md) diff --git a/test/integration/test/cli-journeys/data-transform.e2e.test.ts b/test/integration/test/cli-journeys/data-transform.e2e.test.ts new file mode 100644 index 0000000000..53a28927ff --- /dev/null +++ b/test/integration/test/cli-journeys/data-transform.e2e.test.ts @@ -0,0 +1,163 @@ +/** + * Data Transform Authoring Surface (Journey: migration new → verify → ops) + * + * Tests the authoring pipeline end-to-end: + * 1. Set up a project with a base contract, plan + apply initial migration + * 2. Swap to additive contract (adds a nullable column) + * 3. Emit the new contract + * 4. migration new → scaffolds package with migration.ts + * 5. Fill in migration.ts with descriptors + dataTransform (raw_sql) + * 6. migration verify → evaluates TS, resolves descriptors, writes ops.json, attests + * 7. Inspect ops.json — verify the ops are correct + * 8. migration apply → executes ops including data transform + * 9. Verify data was transformed + */ + +import { readdirSync, readFileSync, writeFileSync } from 'node:fs'; +import { join } from 'pathe'; +import { describe, expect, it } from 'vitest'; +import { withTempDir } from '../utils/cli-test-helpers'; +import { + type JourneyContext, + runContractEmit, + runMigrationApply, + runMigrationNew, + runMigrationPlan, + runMigrationVerify, + setupJourney, + sql, + swapContract, + timeouts, + useDevDatabase, +} from '../utils/journey-test-helpers'; + +withTempDir(({ createTempDir }) => { + describe('Journey: Data Transform Authoring', () => { + const db = useDevDatabase(); + + it( + 'migration new → fill migration.ts → verify → apply → data correct', + async () => { + const ctx: JourneyContext = setupJourney({ + connectionString: db.connectionString, + createTempDir, + }); + + // Step 1: Emit base contract, plan, apply (creates user table with id + email) + const emit0 = await runContractEmit(ctx); + expect(emit0.exitCode, `emit base: ${emit0.stderr}`).toBe(0); + + const plan0 = await runMigrationPlan(ctx, ['--name', 'initial']); + expect(plan0.exitCode, `plan initial: ${plan0.stderr}`).toBe(0); + + const apply0 = await runMigrationApply(ctx); + expect(apply0.exitCode, `apply initial: ${apply0.stderr}`).toBe(0); + + // Step 2: Insert test data + await sql( + db.connectionString, + `INSERT INTO "public"."user" (id, email) VALUES (1, 'alice@example.com'), (2, 'bob@test.org')`, + ); + + // Step 3: Swap to additive contract (adds nullable 'name' column) + swapContract(ctx, 'contract-additive'); + const emit1 = await runContractEmit(ctx); + expect(emit1.exitCode, `emit additive: ${emit1.stderr}`).toBe(0); + + // Step 4: migration new → scaffolds package + const newResult = await runMigrationNew(ctx, ['--name', 'add-name']); + expect(newResult.exitCode, `migration new: ${newResult.stdout}\n${newResult.stderr}`).toBe( + 0, + ); + + // Find the scaffolded migration directory + const migrationsDir = join(ctx.testDir, 'migrations'); + const migrationDirs = readdirSync(migrationsDir) + .filter((d) => d.includes('add_name')) + .sort(); + expect(migrationDirs.length, 'scaffolded migration dir exists').toBe(1); + const migrationDir = join(migrationsDir, migrationDirs[0]!); + + // Verify migration.ts was scaffolded + const migrationTsPath = join(migrationDir, 'migration.ts'); + const scaffoldContent = readFileSync(migrationTsPath, 'utf-8'); + expect(scaffoldContent).toContain('export default'); + + // Verify ops.json is empty (draft) + const opsBeforeVerify = JSON.parse(readFileSync(join(migrationDir, 'ops.json'), 'utf-8')); + expect(opsBeforeVerify).toEqual([]); + + // Verify manifest is draft (migrationId: null) + const manifestBefore = JSON.parse( + readFileSync(join(migrationDir, 'migration.json'), 'utf-8'), + ); + expect(manifestBefore.migrationId).toBeNull(); + + // Step 5: Fill in migration.ts with descriptors using typed query builder + const migrationTs = ` +import { createBuilders } from "@prisma-next/target-postgres/migration-builders" + +const { addColumn, dataTransform } = createBuilders() + +export default () => [ + addColumn("user", "name"), + dataTransform("backfill-user-name", { + check: false, + run: (db) => db.user.update({ name: "unknown" }).where((f, fns) => fns.eq(f.name, null)), + }), +] +`; + writeFileSync(migrationTsPath, migrationTs); + + // Step 6: migration verify → evaluates TS, resolves, attests + const verify = await runMigrationVerify(ctx, [ + '--dir', + migrationDir, + '--config', + ctx.configPath, + ]); + expect(verify.exitCode, `verify: ${verify.stdout}\n${verify.stderr}`).toBe(0); + + // Step 7: Inspect ops.json after verify + const opsAfterVerify = JSON.parse(readFileSync(join(migrationDir, 'ops.json'), 'utf-8')); + expect(opsAfterVerify.length).toBeGreaterThan(0); + + // Should have an addColumn op + const addColumnOp = opsAfterVerify.find( + (op: { id: string }) => op.id === 'column.user.name', + ); + expect(addColumnOp, 'addColumn op exists').toBeDefined(); + expect(addColumnOp.operationClass).toBe('additive'); + + // Should have a data transform op + const dataTransformOp = opsAfterVerify.find( + (op: { id: string }) => op.id === 'data_migration.backfill-user-name', + ); + expect(dataTransformOp, 'dataTransform op exists').toBeDefined(); + expect(dataTransformOp.operationClass).toBe('data'); + + // Manifest should now be attested + const manifestAfter = JSON.parse( + readFileSync(join(migrationDir, 'migration.json'), 'utf-8'), + ); + expect(manifestAfter.migrationId).not.toBeNull(); + expect(manifestAfter.migrationId).toMatch(/^sha256:/); + + // Step 8: migration apply → executes ops + const apply1 = await runMigrationApply(ctx); + expect(apply1.exitCode, `apply: ${apply1.stdout}\n${apply1.stderr}`).toBe(0); + + // Step 9: Verify data was transformed + const result = await sql( + db.connectionString, + `SELECT id, email, "name" FROM "public"."user" ORDER BY id`, + ); + expect(result.rows).toEqual([ + { id: 1, email: 'alice@example.com', name: 'unknown' }, + { id: 2, email: 'bob@test.org', name: 'unknown' }, + ]); + }, + timeouts.spinUpPpgDev, + ); + }); +}); diff --git a/test/integration/test/cli-journeys/migration-apply-edge-cases.e2e.test.ts b/test/integration/test/cli-journeys/migration-apply-edge-cases.e2e.test.ts index 8128fbaf47..bd951eb825 100644 --- a/test/integration/test/cli-journeys/migration-apply-edge-cases.e2e.test.ts +++ b/test/integration/test/cli-journeys/migration-apply-edge-cases.e2e.test.ts @@ -72,14 +72,14 @@ withTempDir(({ createTempDir }) => { const db = useDevDatabase(); it( - 'resumes from last successful migration after empty-table precheck failure', + 'resumes from last successful migration after unique constraint violation', async () => { const ctx: JourneyContext = setupJourney({ connectionString: db.connectionString, createTempDir, }); - // Plan and apply initial migration (creates user table) + // Plan and apply initial migration (creates user table with id + email) const emit0 = await runContractEmit(ctx); expect(emit0.exitCode, 'emit base').toBe(0); const plan0 = await runMigrationPlan(ctx, ['--name', 'initial']); @@ -92,24 +92,22 @@ withTempDir(({ createTempDir }) => { ); expect(firstResult.migrationsApplied, 'applied 1').toBe(1); - // Insert data so a NOT NULL column addition will fail + // Insert rows with duplicate emails await sql( db.connectionString, - `INSERT INTO "user" (id, email) VALUES (1, 'user@example.com')`, + `INSERT INTO "user" (id, email) VALUES (1, 'dup@example.com'), (2, 'dup@example.com')`, ); - // Plan a migration that adds a non-nullable column (will fail on existing rows) - swapContract(ctx, 'contract-additive-required'); + // Plan migration that adds a unique constraint on email + swapContract(ctx, 'contract-unique-email'); const emit1 = await runContractEmit(ctx); - expect(emit1.exitCode, 'emit additive-required').toBe(0); - const plan1 = await runMigrationPlan(ctx, ['--name', 'add-required-name']); - expect(plan1.exitCode, 'plan add-required-name').toBe(0); + expect(emit1.exitCode, 'emit unique-email').toBe(0); + const plan1 = await runMigrationPlan(ctx, ['--name', 'add-unique-email']); + expect(plan1.exitCode, 'plan add-unique-email').toBe(0); - // Apply fails because the planner's empty-table precheck rejects adding - // a NOT NULL + UNIQUE column to a non-empty table (temporary default - // strategy is disabled when the column has a UNIQUE constraint). + // Apply fails because duplicate emails violate the unique constraint const applyFail = await runMigrationApply(ctx, ['--json']); - expect(applyFail.exitCode, 'apply fails on non-empty table precheck').toBe(1); + expect(applyFail.exitCode, 'apply fails on duplicate key').toBe(1); // Marker stays at the first migration's target hash const marker = await sql( @@ -121,9 +119,13 @@ withTempDir(({ createTempDir }) => { firstResult.markerHash, ); - // Fix: remove conflicting data, then resume - await sql(db.connectionString, 'DELETE FROM "user"'); + // Fix: deduplicate emails + await sql( + db.connectionString, + `UPDATE "user" SET email = 'unique@example.com' WHERE id = 2`, + ); + // Resume: apply succeeds now that duplicates are resolved const applyResume = await runMigrationApply(ctx, ['--json']); expect(applyResume.exitCode, 'resume succeeds').toBe(0); diff --git a/test/integration/test/cli.migration-apply.e2e.test.ts b/test/integration/test/cli.migration-apply.e2e.test.ts index 96b8e65d3e..0dfc23bcf7 100644 --- a/test/integration/test/cli.migration-apply.e2e.test.ts +++ b/test/integration/test/cli.migration-apply.e2e.test.ts @@ -314,19 +314,18 @@ withTempDir(({ createTempDir }) => { const firstApply = JSON.parse(consoleOutput.join('\n').trim()) as MigrationApplyResult; expect(firstApply.migrationsApplied).toBe(1); - // Insert data so a later NOT NULL column addition will fail. + // Insert rows with duplicate emails so a unique constraint will fail. await withClient(connectionString, async (client) => { - await client.query(`INSERT INTO "user" (id, email) VALUES (1, 'user@example.com')`); + await client.query( + `INSERT INTO "user" (id, email) VALUES (1, 'dup@example.com'), (2, 'dup@example.com')`, + ); }); - // Plan second migration that adds a NOT NULL + UNIQUE column. - // The UNIQUE constraint prevents the planner's temporary-default - // strategy (a uniform default would violate uniqueness), so the - // planner falls back to an empty-table precheck that fails here. + // Plan second migration that adds a unique constraint on email. replaceInFileOrThrow( contractPath!, ' email: field.column(textColumn),\n', - ` email: field.column(textColumn),\n required_name: field.column(textColumn).unique({ name: 'user_required_name_key' }),\n`, + ` email: field.column(textColumn).unique({ name: 'user_email_key' }),\n`, ); await emitContract(testDir, configPath); @@ -334,12 +333,11 @@ withTempDir(({ createTempDir }) => { '--config', configPath, '--name', - 'add_required_name', + 'add_unique_email', '--no-color', ]); - // Apply fails: the empty-table precheck rejects adding a NOT NULL + UNIQUE - // column to a non-empty table. + // Apply fails: duplicate emails violate the unique constraint. consoleOutput.length = 0; let failed = false; try { @@ -349,9 +347,6 @@ withTempDir(({ createTempDir }) => { } expect(failed).toBe(true); expect(getExitCode()).toBe(1); - const errorOutput = stripAnsi(consoleOutput.join('\n')); - expect(errorOutput).toContain('failed during precheck'); - expect(errorOutput).toContain('is empty before adding NOT NULL column'); // Marker must remain at the first migration hash (resume point). const migrationsDir = join(testDir, 'migrations'); @@ -371,9 +366,9 @@ withTempDir(({ createTempDir }) => { expect(marker.rows[0]?.core_hash).toBe(firstMigration!.manifest.to); }); - // Make second migration runnable, then re-run apply; it should resume from marker. + // Fix: deduplicate emails, then re-run apply; it should resume from marker. await withClient(connectionString, async (client) => { - await client.query('DELETE FROM "user"'); + await client.query(`UPDATE "user" SET email = 'unique@example.com' WHERE id = 2`); }); consoleOutput.length = 0; diff --git a/test/integration/test/fixtures/cli/cli-e2e-test-app/fixtures/cli-journeys/contract-unique-email.ts b/test/integration/test/fixtures/cli/cli-e2e-test-app/fixtures/cli-journeys/contract-unique-email.ts new file mode 100644 index 0000000000..d08b567fa8 --- /dev/null +++ b/test/integration/test/fixtures/cli/cli-e2e-test-app/fixtures/cli-journeys/contract-unique-email.ts @@ -0,0 +1,17 @@ +import { int4Column, textColumn } from '@prisma-next/adapter-postgres/column-types'; +import sqlFamily from '@prisma-next/family-sql/pack'; +import { defineContract, field, model } from '@prisma-next/sql-contract-ts/contract-builder'; +import postgresPack from '@prisma-next/target-postgres/pack'; + +export const contract = defineContract({ + family: sqlFamily, + target: postgresPack, + models: { + User: model('User', { + fields: { + id: field.column(int4Column).id(), + email: field.column(textColumn).unique({ name: 'user_email_key' }), + }, + }).sql({ table: 'user' }), + }, +}); diff --git a/packages/2-sql/4-lanes/sql-builder/test/integration/distinct.test.ts b/test/integration/test/sql-builder/distinct.test.ts similarity index 100% rename from packages/2-sql/4-lanes/sql-builder/test/integration/distinct.test.ts rename to test/integration/test/sql-builder/distinct.test.ts diff --git a/packages/2-sql/4-lanes/sql-builder/test/integration/execution.test.ts b/test/integration/test/sql-builder/execution.test.ts similarity index 100% rename from packages/2-sql/4-lanes/sql-builder/test/integration/execution.test.ts rename to test/integration/test/sql-builder/execution.test.ts diff --git a/packages/2-sql/4-lanes/sql-builder/test/integration/extension-functions.test.ts b/test/integration/test/sql-builder/extension-functions.test.ts similarity index 100% rename from packages/2-sql/4-lanes/sql-builder/test/integration/extension-functions.test.ts rename to test/integration/test/sql-builder/extension-functions.test.ts diff --git a/test/integration/test/sql-builder/fixtures/contract.ts b/test/integration/test/sql-builder/fixtures/contract.ts new file mode 100644 index 0000000000..7172c2e6ea --- /dev/null +++ b/test/integration/test/sql-builder/fixtures/contract.ts @@ -0,0 +1,95 @@ +import { int4Column, textColumn } from '@prisma-next/adapter-postgres/column-types'; +import { vectorColumn } from '@prisma-next/extension-pgvector/column-types'; +import pgvector from '@prisma-next/extension-pgvector/pack'; +import sqlFamily from '@prisma-next/family-sql/pack'; +import { uuidv4 } from '@prisma-next/ids'; +import { defineContract, field, model, rel } from '@prisma-next/sql-contract-ts/contract-builder'; +import postgresPack from '@prisma-next/target-postgres/pack'; + +const UserBase = model('User', { + fields: { + id: field.column(int4Column).id(), + name: field.column(textColumn), + email: field.column(textColumn), + invitedById: field.column(int4Column).optional().column('invited_by_id'), + }, +}); + +const Post = model('Post', { + fields: { + id: field.column(int4Column).id(), + title: field.column(textColumn), + userId: field.column(int4Column).column('user_id'), + views: field.column(int4Column), + embedding: field.column(vectorColumn).optional(), + }, + relations: { + comments: rel.hasMany(() => Comment, { by: 'postId' }), + author: rel.belongsTo(UserBase, { from: 'userId', to: 'id' }), + }, +}).sql({ table: 'posts' }); + +const Comment = model('Comment', { + fields: { + id: field.column(int4Column).id(), + body: field.column(textColumn), + postId: field.column(int4Column).column('post_id'), + }, +}).sql({ table: 'comments' }); + +const Profile = model('Profile', { + fields: { + id: field.column(int4Column).id(), + userId: field.column(int4Column).column('user_id'), + bio: field.column(textColumn), + }, +}).sql({ table: 'profiles' }); + +const Article = model('Article', { + fields: { + id: field.generated(uuidv4()).id(), + title: field.column(textColumn), + }, +}).sql({ table: 'articles' }); + +const User = UserBase.relations({ + invitedUsers: rel.hasMany(() => UserBase, { by: 'invitedById' }), + invitedBy: rel.belongsTo(UserBase, { from: 'invitedById', to: 'id' }), + posts: rel.hasMany(() => Post, { by: 'userId' }), + profile: rel.hasOne(Profile, { by: 'userId' }), +}).sql({ table: 'users' }); + +export const contract = defineContract({ + family: sqlFamily, + target: postgresPack, + extensionPacks: { pgvector }, + capabilities: { + sql: { + lateral: true, + returning: true, + jsonAgg: true, + enums: true, + foreignKeys: true, + autoIndexesForeignKeys: false, + }, + postgres: { + partialIndex: true, + deferrableConstraints: true, + savepoints: true, + transactionalDDL: true, + distinctOn: true, + }, + pgvector: { + ivfflat: true, + hnsw: true, + vector: true, + }, + }, + models: { + User, + Post, + Comment, + Profile, + Article, + }, +}); diff --git a/test/integration/test/sql-builder/fixtures/generated/contract.d.ts b/test/integration/test/sql-builder/fixtures/generated/contract.d.ts new file mode 100644 index 0000000000..9b6f03223b --- /dev/null +++ b/test/integration/test/sql-builder/fixtures/generated/contract.d.ts @@ -0,0 +1,506 @@ +// ⚠️ GENERATED FILE - DO NOT EDIT +// This file is automatically generated by 'prisma-next contract emit'. +// To regenerate, run: prisma-next contract emit +import type { CodecTypes as PgTypes } from '@prisma-next/adapter-postgres/codec-types'; +import type { JsonValue } from '@prisma-next/adapter-postgres/codec-types'; +import type { Char } from '@prisma-next/adapter-postgres/codec-types'; +import type { Varchar } from '@prisma-next/adapter-postgres/codec-types'; +import type { Numeric } from '@prisma-next/adapter-postgres/codec-types'; +import type { Bit } from '@prisma-next/adapter-postgres/codec-types'; +import type { VarBit } from '@prisma-next/adapter-postgres/codec-types'; +import type { Timestamp } from '@prisma-next/adapter-postgres/codec-types'; +import type { Timestamptz } from '@prisma-next/adapter-postgres/codec-types'; +import type { Time } from '@prisma-next/adapter-postgres/codec-types'; +import type { Timetz } from '@prisma-next/adapter-postgres/codec-types'; +import type { Interval } from '@prisma-next/adapter-postgres/codec-types'; +import type { CodecTypes as PgVectorTypes } from '@prisma-next/extension-pgvector/codec-types'; +import type { Vector } from '@prisma-next/extension-pgvector/codec-types'; +import type { OperationTypes as PgVectorOperationTypes } from '@prisma-next/extension-pgvector/operation-types'; +import type { QueryOperationTypes as PgVectorQueryOperationTypes } from '@prisma-next/extension-pgvector/operation-types'; + +import type { + ContractWithTypeMaps, + TypeMaps as TypeMapsType, +} from '@prisma-next/sql-contract/types'; +import type { + Contract as ContractType, + ExecutionHashBase, + ProfileHashBase, + StorageHashBase, +} from '@prisma-next/contract/types'; + +export type StorageHash = + StorageHashBase<'sha256:16af8b9b7863e5312d0bea826c6e7391d5645a8a2c6a365aaf25f51fdcde7391'>; +export type ExecutionHash = + ExecutionHashBase<'sha256:4d09909b2e09a240919c201ce4a5e63c3a2ec70515932e145dccca82936d8be5'>; +export type ProfileHash = + ProfileHashBase<'sha256:03c90a412dcfe182a475d25eae8cbf5c56fba67009defe8713eaf0b7fbd66b13'>; + +export type CodecTypes = PgTypes & PgVectorTypes; +export type OperationTypes = PgVectorOperationTypes; +export type LaneCodecTypes = CodecTypes; +export type QueryOperationTypes = PgVectorQueryOperationTypes; +type DefaultLiteralValue = CodecId extends keyof CodecTypes + ? CodecTypes[CodecId]['output'] + : _Encoded; + +export type FieldOutputTypes = { + readonly Article: { readonly id: Char<36>; readonly title: CodecTypes['pg/text@1']['output'] }; + readonly Comment: { + readonly id: CodecTypes['pg/int4@1']['output']; + readonly body: CodecTypes['pg/text@1']['output']; + readonly postId: CodecTypes['pg/int4@1']['output']; + }; + readonly Post: { + readonly id: CodecTypes['pg/int4@1']['output']; + readonly title: CodecTypes['pg/text@1']['output']; + readonly userId: CodecTypes['pg/int4@1']['output']; + readonly views: CodecTypes['pg/int4@1']['output']; + readonly embedding: CodecTypes['pg/vector@1']['output'] | null; + }; + readonly Profile: { + readonly id: CodecTypes['pg/int4@1']['output']; + readonly userId: CodecTypes['pg/int4@1']['output']; + readonly bio: CodecTypes['pg/text@1']['output']; + }; + readonly User: { + readonly id: CodecTypes['pg/int4@1']['output']; + readonly name: CodecTypes['pg/text@1']['output']; + readonly email: CodecTypes['pg/text@1']['output']; + readonly invitedById: CodecTypes['pg/int4@1']['output'] | null; + }; +}; +export type TypeMaps = TypeMapsType< + CodecTypes, + OperationTypes, + QueryOperationTypes, + FieldOutputTypes +>; + +type ContractBase = ContractType< + { + readonly tables: { + readonly articles: { + columns: { + readonly id: { + readonly nativeType: 'character'; + readonly codecId: 'sql/char@1'; + readonly nullable: false; + readonly typeParams: { readonly length: 36 }; + }; + readonly title: { + readonly nativeType: 'text'; + readonly codecId: 'pg/text@1'; + readonly nullable: false; + }; + }; + primaryKey: { readonly columns: readonly ['id'] }; + uniques: readonly []; + indexes: readonly []; + foreignKeys: readonly []; + }; + readonly comments: { + columns: { + readonly id: { + readonly nativeType: 'int4'; + readonly codecId: 'pg/int4@1'; + readonly nullable: false; + }; + readonly body: { + readonly nativeType: 'text'; + readonly codecId: 'pg/text@1'; + readonly nullable: false; + }; + readonly post_id: { + readonly nativeType: 'int4'; + readonly codecId: 'pg/int4@1'; + readonly nullable: false; + }; + }; + primaryKey: { readonly columns: readonly ['id'] }; + uniques: readonly []; + indexes: readonly []; + foreignKeys: readonly []; + }; + readonly posts: { + columns: { + readonly id: { + readonly nativeType: 'int4'; + readonly codecId: 'pg/int4@1'; + readonly nullable: false; + }; + readonly title: { + readonly nativeType: 'text'; + readonly codecId: 'pg/text@1'; + readonly nullable: false; + }; + readonly user_id: { + readonly nativeType: 'int4'; + readonly codecId: 'pg/int4@1'; + readonly nullable: false; + }; + readonly views: { + readonly nativeType: 'int4'; + readonly codecId: 'pg/int4@1'; + readonly nullable: false; + }; + readonly embedding: { + readonly nativeType: 'vector'; + readonly codecId: 'pg/vector@1'; + readonly nullable: true; + }; + }; + primaryKey: { readonly columns: readonly ['id'] }; + uniques: readonly []; + indexes: readonly []; + foreignKeys: readonly []; + }; + readonly profiles: { + columns: { + readonly id: { + readonly nativeType: 'int4'; + readonly codecId: 'pg/int4@1'; + readonly nullable: false; + }; + readonly user_id: { + readonly nativeType: 'int4'; + readonly codecId: 'pg/int4@1'; + readonly nullable: false; + }; + readonly bio: { + readonly nativeType: 'text'; + readonly codecId: 'pg/text@1'; + readonly nullable: false; + }; + }; + primaryKey: { readonly columns: readonly ['id'] }; + uniques: readonly []; + indexes: readonly []; + foreignKeys: readonly []; + }; + readonly users: { + columns: { + readonly id: { + readonly nativeType: 'int4'; + readonly codecId: 'pg/int4@1'; + readonly nullable: false; + }; + readonly name: { + readonly nativeType: 'text'; + readonly codecId: 'pg/text@1'; + readonly nullable: false; + }; + readonly email: { + readonly nativeType: 'text'; + readonly codecId: 'pg/text@1'; + readonly nullable: false; + }; + readonly invited_by_id: { + readonly nativeType: 'int4'; + readonly codecId: 'pg/int4@1'; + readonly nullable: true; + }; + }; + primaryKey: { readonly columns: readonly ['id'] }; + uniques: readonly []; + indexes: readonly []; + foreignKeys: readonly []; + }; + }; + readonly types: Record; + readonly storageHash: StorageHash; + }, + { + readonly Article: { + readonly fields: { + readonly id: { + readonly nullable: false; + readonly type: { + readonly kind: 'scalar'; + readonly codecId: 'sql/char@1'; + readonly typeParams: { readonly length: 36 }; + }; + }; + readonly title: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/text@1' }; + }; + }; + readonly relations: Record; + readonly storage: { + readonly table: 'articles'; + readonly fields: { + readonly id: { readonly column: 'id' }; + readonly title: { readonly column: 'title' }; + }; + }; + }; + readonly Comment: { + readonly fields: { + readonly id: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/int4@1' }; + }; + readonly body: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/text@1' }; + }; + readonly postId: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/int4@1' }; + }; + }; + readonly relations: Record; + readonly storage: { + readonly table: 'comments'; + readonly fields: { + readonly id: { readonly column: 'id' }; + readonly body: { readonly column: 'body' }; + readonly postId: { readonly column: 'post_id' }; + }; + }; + }; + readonly Post: { + readonly fields: { + readonly id: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/int4@1' }; + }; + readonly title: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/text@1' }; + }; + readonly userId: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/int4@1' }; + }; + readonly views: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/int4@1' }; + }; + readonly embedding: { + readonly nullable: true; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/vector@1' }; + }; + }; + readonly relations: { + readonly comments: { + readonly to: 'Comment'; + readonly cardinality: '1:N'; + readonly on: { + readonly localFields: readonly ['id']; + readonly targetFields: readonly ['postId']; + }; + }; + readonly author: { + readonly to: 'User'; + readonly cardinality: 'N:1'; + readonly on: { + readonly localFields: readonly ['userId']; + readonly targetFields: readonly ['id']; + }; + }; + }; + readonly storage: { + readonly table: 'posts'; + readonly fields: { + readonly id: { readonly column: 'id' }; + readonly title: { readonly column: 'title' }; + readonly userId: { readonly column: 'user_id' }; + readonly views: { readonly column: 'views' }; + readonly embedding: { readonly column: 'embedding' }; + }; + }; + }; + readonly Profile: { + readonly fields: { + readonly id: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/int4@1' }; + }; + readonly userId: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/int4@1' }; + }; + readonly bio: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/text@1' }; + }; + }; + readonly relations: Record; + readonly storage: { + readonly table: 'profiles'; + readonly fields: { + readonly id: { readonly column: 'id' }; + readonly userId: { readonly column: 'user_id' }; + readonly bio: { readonly column: 'bio' }; + }; + }; + }; + readonly User: { + readonly fields: { + readonly id: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/int4@1' }; + }; + readonly name: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/text@1' }; + }; + readonly email: { + readonly nullable: false; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/text@1' }; + }; + readonly invitedById: { + readonly nullable: true; + readonly type: { readonly kind: 'scalar'; readonly codecId: 'pg/int4@1' }; + }; + }; + readonly relations: { + readonly invitedUsers: { + readonly to: 'User'; + readonly cardinality: '1:N'; + readonly on: { + readonly localFields: readonly ['id']; + readonly targetFields: readonly ['invitedById']; + }; + }; + readonly invitedBy: { + readonly to: 'User'; + readonly cardinality: 'N:1'; + readonly on: { + readonly localFields: readonly ['invitedById']; + readonly targetFields: readonly ['id']; + }; + }; + readonly posts: { + readonly to: 'Post'; + readonly cardinality: '1:N'; + readonly on: { + readonly localFields: readonly ['id']; + readonly targetFields: readonly ['userId']; + }; + }; + readonly profile: { + readonly to: 'Profile'; + readonly cardinality: '1:1'; + readonly on: { + readonly localFields: readonly ['id']; + readonly targetFields: readonly ['userId']; + }; + }; + }; + readonly storage: { + readonly table: 'users'; + readonly fields: { + readonly id: { readonly column: 'id' }; + readonly name: { readonly column: 'name' }; + readonly email: { readonly column: 'email' }; + readonly invitedById: { readonly column: 'invited_by_id' }; + }; + }; + }; + } +> & { + readonly target: 'postgres'; + readonly targetFamily: 'sql'; + readonly roots: { + readonly users: 'User'; + readonly posts: 'Post'; + readonly comments: 'Comment'; + readonly profiles: 'Profile'; + readonly articles: 'Article'; + }; + readonly capabilities: { + readonly pgvector: { readonly hnsw: true; readonly ivfflat: true; readonly vector: true }; + readonly postgres: { + readonly deferrableConstraints: true; + readonly distinctOn: true; + readonly jsonAgg: true; + readonly lateral: true; + readonly limit: true; + readonly orderBy: true; + readonly partialIndex: true; + readonly 'pgvector/cosine': true; + readonly returning: true; + readonly savepoints: true; + readonly transactionalDDL: true; + }; + readonly sql: { + readonly autoIndexesForeignKeys: false; + readonly defaultInInsert: true; + readonly enums: true; + readonly foreignKeys: true; + readonly jsonAgg: true; + readonly lateral: true; + readonly returning: true; + }; + }; + readonly extensionPacks: { + readonly pgvector: { + readonly capabilities: { readonly postgres: { readonly 'pgvector/cosine': true } }; + readonly familyId: 'sql'; + readonly id: 'pgvector'; + readonly kind: 'extension'; + readonly targetId: 'postgres'; + readonly types: { + readonly codecTypes: { + readonly import: { + readonly alias: 'PgVectorTypes'; + readonly named: 'CodecTypes'; + readonly package: '@prisma-next/extension-pgvector/codec-types'; + }; + readonly typeImports: readonly [ + { + readonly alias: 'Vector'; + readonly named: 'Vector'; + readonly package: '@prisma-next/extension-pgvector/codec-types'; + }, + ]; + }; + readonly operationTypes: { + readonly import: { + readonly alias: 'PgVectorOperationTypes'; + readonly named: 'OperationTypes'; + readonly package: '@prisma-next/extension-pgvector/operation-types'; + }; + }; + readonly queryOperationTypes: { + readonly import: { + readonly alias: 'PgVectorQueryOperationTypes'; + readonly named: 'QueryOperationTypes'; + readonly package: '@prisma-next/extension-pgvector/operation-types'; + }; + }; + readonly storage: readonly [ + { + readonly familyId: 'sql'; + readonly nativeType: 'vector'; + readonly targetId: 'postgres'; + readonly typeId: 'pg/vector@1'; + }, + ]; + }; + readonly version: '0.0.1'; + }; + }; + readonly execution: { + readonly executionHash: ExecutionHash; + readonly mutations: { + readonly defaults: readonly [ + { + readonly ref: { readonly table: 'articles'; readonly column: 'id' }; + readonly onCreate: { readonly kind: 'generator'; readonly id: 'uuidv4' }; + }, + ]; + }; + }; + readonly meta: {}; + + readonly profileHash: ProfileHash; +}; + +export type Contract = ContractWithTypeMaps; + +export type Tables = Contract['storage']['tables']; +export type Models = Contract['models']; diff --git a/test/integration/test/sql-builder/fixtures/generated/contract.json b/test/integration/test/sql-builder/fixtures/generated/contract.json new file mode 100644 index 0000000000..dea476fbec --- /dev/null +++ b/test/integration/test/sql-builder/fixtures/generated/contract.json @@ -0,0 +1,568 @@ +{ + "schemaVersion": "1", + "targetFamily": "sql", + "target": "postgres", + "profileHash": "sha256:03c90a412dcfe182a475d25eae8cbf5c56fba67009defe8713eaf0b7fbd66b13", + "roots": { + "articles": "Article", + "comments": "Comment", + "posts": "Post", + "profiles": "Profile", + "users": "User" + }, + "models": { + "Article": { + "fields": { + "id": { + "nullable": false, + "type": { + "codecId": "sql/char@1", + "kind": "scalar", + "typeParams": { + "length": 36 + } + } + }, + "title": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + } + }, + "relations": {}, + "storage": { + "fields": { + "id": { + "column": "id" + }, + "title": { + "column": "title" + } + }, + "table": "articles" + } + }, + "Comment": { + "fields": { + "body": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + }, + "id": { + "nullable": false, + "type": { + "codecId": "pg/int4@1", + "kind": "scalar" + } + }, + "postId": { + "nullable": false, + "type": { + "codecId": "pg/int4@1", + "kind": "scalar" + } + } + }, + "relations": {}, + "storage": { + "fields": { + "body": { + "column": "body" + }, + "id": { + "column": "id" + }, + "postId": { + "column": "post_id" + } + }, + "table": "comments" + } + }, + "Post": { + "fields": { + "embedding": { + "nullable": true, + "type": { + "codecId": "pg/vector@1", + "kind": "scalar" + } + }, + "id": { + "nullable": false, + "type": { + "codecId": "pg/int4@1", + "kind": "scalar" + } + }, + "title": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + }, + "userId": { + "nullable": false, + "type": { + "codecId": "pg/int4@1", + "kind": "scalar" + } + }, + "views": { + "nullable": false, + "type": { + "codecId": "pg/int4@1", + "kind": "scalar" + } + } + }, + "relations": { + "author": { + "cardinality": "N:1", + "on": { + "localFields": [ + "userId" + ], + "targetFields": [ + "id" + ] + }, + "to": "User" + }, + "comments": { + "cardinality": "1:N", + "on": { + "localFields": [ + "id" + ], + "targetFields": [ + "postId" + ] + }, + "to": "Comment" + } + }, + "storage": { + "fields": { + "embedding": { + "column": "embedding" + }, + "id": { + "column": "id" + }, + "title": { + "column": "title" + }, + "userId": { + "column": "user_id" + }, + "views": { + "column": "views" + } + }, + "table": "posts" + } + }, + "Profile": { + "fields": { + "bio": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + }, + "id": { + "nullable": false, + "type": { + "codecId": "pg/int4@1", + "kind": "scalar" + } + }, + "userId": { + "nullable": false, + "type": { + "codecId": "pg/int4@1", + "kind": "scalar" + } + } + }, + "relations": {}, + "storage": { + "fields": { + "bio": { + "column": "bio" + }, + "id": { + "column": "id" + }, + "userId": { + "column": "user_id" + } + }, + "table": "profiles" + } + }, + "User": { + "fields": { + "email": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + }, + "id": { + "nullable": false, + "type": { + "codecId": "pg/int4@1", + "kind": "scalar" + } + }, + "invitedById": { + "nullable": true, + "type": { + "codecId": "pg/int4@1", + "kind": "scalar" + } + }, + "name": { + "nullable": false, + "type": { + "codecId": "pg/text@1", + "kind": "scalar" + } + } + }, + "relations": { + "invitedBy": { + "cardinality": "N:1", + "on": { + "localFields": [ + "invitedById" + ], + "targetFields": [ + "id" + ] + }, + "to": "User" + }, + "invitedUsers": { + "cardinality": "1:N", + "on": { + "localFields": [ + "id" + ], + "targetFields": [ + "invitedById" + ] + }, + "to": "User" + }, + "posts": { + "cardinality": "1:N", + "on": { + "localFields": [ + "id" + ], + "targetFields": [ + "userId" + ] + }, + "to": "Post" + }, + "profile": { + "cardinality": "1:1", + "on": { + "localFields": [ + "id" + ], + "targetFields": [ + "userId" + ] + }, + "to": "Profile" + } + }, + "storage": { + "fields": { + "email": { + "column": "email" + }, + "id": { + "column": "id" + }, + "invitedById": { + "column": "invited_by_id" + }, + "name": { + "column": "name" + } + }, + "table": "users" + } + } + }, + "storage": { + "storageHash": "sha256:16af8b9b7863e5312d0bea826c6e7391d5645a8a2c6a365aaf25f51fdcde7391", + "tables": { + "articles": { + "columns": { + "id": { + "codecId": "sql/char@1", + "nativeType": "character", + "nullable": false, + "typeParams": { + "length": 36 + } + }, + "title": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + } + }, + "foreignKeys": [], + "indexes": [], + "primaryKey": { + "columns": [ + "id" + ] + }, + "uniques": [] + }, + "comments": { + "columns": { + "body": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + }, + "id": { + "codecId": "pg/int4@1", + "nativeType": "int4", + "nullable": false + }, + "post_id": { + "codecId": "pg/int4@1", + "nativeType": "int4", + "nullable": false + } + }, + "foreignKeys": [], + "indexes": [], + "primaryKey": { + "columns": [ + "id" + ] + }, + "uniques": [] + }, + "posts": { + "columns": { + "embedding": { + "codecId": "pg/vector@1", + "nativeType": "vector", + "nullable": true + }, + "id": { + "codecId": "pg/int4@1", + "nativeType": "int4", + "nullable": false + }, + "title": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + }, + "user_id": { + "codecId": "pg/int4@1", + "nativeType": "int4", + "nullable": false + }, + "views": { + "codecId": "pg/int4@1", + "nativeType": "int4", + "nullable": false + } + }, + "foreignKeys": [], + "indexes": [], + "primaryKey": { + "columns": [ + "id" + ] + }, + "uniques": [] + }, + "profiles": { + "columns": { + "bio": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + }, + "id": { + "codecId": "pg/int4@1", + "nativeType": "int4", + "nullable": false + }, + "user_id": { + "codecId": "pg/int4@1", + "nativeType": "int4", + "nullable": false + } + }, + "foreignKeys": [], + "indexes": [], + "primaryKey": { + "columns": [ + "id" + ] + }, + "uniques": [] + }, + "users": { + "columns": { + "email": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + }, + "id": { + "codecId": "pg/int4@1", + "nativeType": "int4", + "nullable": false + }, + "invited_by_id": { + "codecId": "pg/int4@1", + "nativeType": "int4", + "nullable": true + }, + "name": { + "codecId": "pg/text@1", + "nativeType": "text", + "nullable": false + } + }, + "foreignKeys": [], + "indexes": [], + "primaryKey": { + "columns": [ + "id" + ] + }, + "uniques": [] + } + } + }, + "execution": { + "executionHash": "sha256:4d09909b2e09a240919c201ce4a5e63c3a2ec70515932e145dccca82936d8be5", + "mutations": { + "defaults": [ + { + "onCreate": { + "id": "uuidv4", + "kind": "generator" + }, + "ref": { + "column": "id", + "table": "articles" + } + } + ] + } + }, + "capabilities": { + "pgvector": { + "hnsw": true, + "ivfflat": true, + "vector": true + }, + "postgres": { + "deferrableConstraints": true, + "distinctOn": true, + "jsonAgg": true, + "lateral": true, + "limit": true, + "orderBy": true, + "partialIndex": true, + "pgvector/cosine": true, + "returning": true, + "savepoints": true, + "transactionalDDL": true + }, + "sql": { + "defaultInInsert": true, + "enums": true, + "foreignKeys": true, + "jsonAgg": true, + "lateral": true, + "returning": true + } + }, + "extensionPacks": { + "pgvector": { + "capabilities": { + "postgres": { + "pgvector/cosine": true + } + }, + "familyId": "sql", + "id": "pgvector", + "kind": "extension", + "targetId": "postgres", + "types": { + "codecTypes": { + "import": { + "alias": "PgVectorTypes", + "named": "CodecTypes", + "package": "@prisma-next/extension-pgvector/codec-types" + }, + "typeImports": [ + { + "alias": "Vector", + "named": "Vector", + "package": "@prisma-next/extension-pgvector/codec-types" + } + ] + }, + "operationTypes": { + "import": { + "alias": "PgVectorOperationTypes", + "named": "OperationTypes", + "package": "@prisma-next/extension-pgvector/operation-types" + } + }, + "queryOperationTypes": { + "import": { + "alias": "PgVectorQueryOperationTypes", + "named": "QueryOperationTypes", + "package": "@prisma-next/extension-pgvector/operation-types" + } + }, + "storage": [ + { + "familyId": "sql", + "nativeType": "vector", + "targetId": "postgres", + "typeId": "pg/vector@1" + } + ] + }, + "version": "0.0.1" + } + }, + "meta": {}, + "_generated": { + "warning": "⚠️ GENERATED FILE - DO NOT EDIT", + "message": "This file is automatically generated by \"prisma-next contract emit\".", + "regenerate": "To regenerate, run: prisma-next contract emit" + } +} \ No newline at end of file diff --git a/packages/2-sql/4-lanes/sql-builder/test/fixtures/prisma-next.config.ts b/test/integration/test/sql-builder/fixtures/prisma-next.config.ts similarity index 100% rename from packages/2-sql/4-lanes/sql-builder/test/fixtures/prisma-next.config.ts rename to test/integration/test/sql-builder/fixtures/prisma-next.config.ts diff --git a/packages/2-sql/4-lanes/sql-builder/test/integration/group-by.test.ts b/test/integration/test/sql-builder/group-by.test.ts similarity index 100% rename from packages/2-sql/4-lanes/sql-builder/test/integration/group-by.test.ts rename to test/integration/test/sql-builder/group-by.test.ts diff --git a/packages/2-sql/4-lanes/sql-builder/test/integration/join.test.ts b/test/integration/test/sql-builder/join.test.ts similarity index 100% rename from packages/2-sql/4-lanes/sql-builder/test/integration/join.test.ts rename to test/integration/test/sql-builder/join.test.ts diff --git a/packages/2-sql/4-lanes/sql-builder/test/integration/mutation-defaults.test.ts b/test/integration/test/sql-builder/mutation-defaults.test.ts similarity index 100% rename from packages/2-sql/4-lanes/sql-builder/test/integration/mutation-defaults.test.ts rename to test/integration/test/sql-builder/mutation-defaults.test.ts diff --git a/packages/2-sql/4-lanes/sql-builder/test/integration/mutation.test.ts b/test/integration/test/sql-builder/mutation.test.ts similarity index 100% rename from packages/2-sql/4-lanes/sql-builder/test/integration/mutation.test.ts rename to test/integration/test/sql-builder/mutation.test.ts diff --git a/packages/2-sql/4-lanes/sql-builder/test/integration/order-by.test.ts b/test/integration/test/sql-builder/order-by.test.ts similarity index 100% rename from packages/2-sql/4-lanes/sql-builder/test/integration/order-by.test.ts rename to test/integration/test/sql-builder/order-by.test.ts diff --git a/packages/2-sql/4-lanes/sql-builder/test/integration/pagination.test.ts b/test/integration/test/sql-builder/pagination.test.ts similarity index 100% rename from packages/2-sql/4-lanes/sql-builder/test/integration/pagination.test.ts rename to test/integration/test/sql-builder/pagination.test.ts diff --git a/packages/2-sql/4-lanes/sql-builder/test/integration/select.test.ts b/test/integration/test/sql-builder/select.test.ts similarity index 100% rename from packages/2-sql/4-lanes/sql-builder/test/integration/select.test.ts rename to test/integration/test/sql-builder/select.test.ts diff --git a/packages/2-sql/4-lanes/sql-builder/test/integration/setup.ts b/test/integration/test/sql-builder/setup.ts similarity index 96% rename from packages/2-sql/4-lanes/sql-builder/test/integration/setup.ts rename to test/integration/test/sql-builder/setup.ts index 1a5e3c7c43..2218456c0a 100644 --- a/packages/2-sql/4-lanes/sql-builder/test/integration/setup.ts +++ b/test/integration/test/sql-builder/setup.ts @@ -3,6 +3,7 @@ import postgresDriver from '@prisma-next/driver-postgres/runtime'; import pgvector from '@prisma-next/extension-pgvector/runtime'; import { emptyCodecLookup } from '@prisma-next/framework-components/codec'; import { instantiateExecutionStack } from '@prisma-next/framework-components/execution'; +import { sql } from '@prisma-next/sql-builder/runtime'; import { validateContract } from '@prisma-next/sql-contract/validate'; import type { ExecutionContext } from '@prisma-next/sql-relational-core/query-lane-context'; import { @@ -16,9 +17,8 @@ import postgresTarget from '@prisma-next/target-postgres/runtime'; import { createDevDatabase, timeouts } from '@prisma-next/test-utils'; import { Client } from 'pg'; import { afterAll, beforeAll } from 'vitest'; -import { sql } from '../../src/runtime/sql'; -import { contract } from '../fixtures/contract'; -import type { Contract } from '../fixtures/generated/contract'; +import { contract } from './fixtures/contract'; +import type { Contract } from './fixtures/generated/contract'; export { timeouts }; diff --git a/packages/2-sql/4-lanes/sql-builder/test/integration/subquery.test.ts b/test/integration/test/sql-builder/subquery.test.ts similarity index 100% rename from packages/2-sql/4-lanes/sql-builder/test/integration/subquery.test.ts rename to test/integration/test/sql-builder/subquery.test.ts diff --git a/packages/2-sql/4-lanes/sql-builder/test/integration/where.test.ts b/test/integration/test/sql-builder/where.test.ts similarity index 100% rename from packages/2-sql/4-lanes/sql-builder/test/integration/where.test.ts rename to test/integration/test/sql-builder/where.test.ts diff --git a/test/integration/test/utils/journey-test-helpers.ts b/test/integration/test/utils/journey-test-helpers.ts index 35f3348827..72dcca5a8c 100644 --- a/test/integration/test/utils/journey-test-helpers.ts +++ b/test/integration/test/utils/journey-test-helpers.ts @@ -15,6 +15,7 @@ import { createDbSignCommand } from '@prisma-next/cli/commands/db-sign'; import { createDbUpdateCommand } from '@prisma-next/cli/commands/db-update'; import { createDbVerifyCommand } from '@prisma-next/cli/commands/db-verify'; import { createMigrationApplyCommand } from '@prisma-next/cli/commands/migration-apply'; +import { createMigrationNewCommand } from '@prisma-next/cli/commands/migration-new'; import { createMigrationPlanCommand } from '@prisma-next/cli/commands/migration-plan'; import { createMigrationRefCommand } from '@prisma-next/cli/commands/migration-ref'; import { createMigrationShowCommand } from '@prisma-next/cli/commands/migration-show'; @@ -175,6 +176,7 @@ export const contractFixtures = { 'contract-phone-bio': join(JOURNEY_FIXTURES_DIR, 'contract-phone-bio.ts'), 'contract-avatar': join(JOURNEY_FIXTURES_DIR, 'contract-avatar.ts'), 'contract-all': join(JOURNEY_FIXTURES_DIR, 'contract-all.ts'), + 'contract-unique-email': join(JOURNEY_FIXTURES_DIR, 'contract-unique-email.ts'), } as const; export type ContractVariant = keyof typeof contractFixtures; @@ -337,6 +339,13 @@ export async function runMigrationPlan( return runCommand(createMigrationPlanCommand(), ctx, extraArgs); } +export async function runMigrationNew( + ctx: JourneyContext, + extraArgs: readonly string[] = [], +): Promise { + return runCommand(createMigrationNewCommand(), ctx, extraArgs); +} + export async function runMigrationApply( ctx: JourneyContext, extraArgs: readonly string[] = [], @@ -362,7 +371,6 @@ export async function runMigrationVerify( ctx: JourneyContext, extraArgs: readonly string[] = [], ): Promise { - // migration verify doesn't support --config, use runCommandRaw return runCommandRaw(createMigrationVerifyCommand(), ctx.testDir, extraArgs); } diff --git a/turbo.json b/turbo.json index 74e99e228e..704f3a058e 100644 --- a/turbo.json +++ b/turbo.json @@ -40,6 +40,9 @@ }, "clean": { "cache": false + }, + "@prisma-next/sql-builder#emit": { + "dependsOn": ["@prisma-next/cli#build"] } } }