Alembic regenerates public tables FK when using a multi-tenant configuration. #1799
-
|
We are running a multi-tenancy configuration as follows. We now have the need to introduce some public tables. With this setup, everything works as expected but the FK of any public table are constantly droped/regenerated.
public_metadata = MetaData(schema="public")
company_metadata = MetaData()
def get_metadata(schema: str) -> MetaData:
if schema == public_metadata.schema:
return public_metadata
return company_metadata
class CompanyBase(Base, DeclarativeBase):
metadata = company_metadata
__abstract__ = True
class PublicBase(Base, DeclarativeBase):
metadata = public_metadata
__abstract__ = True
import asyncio
from collections.abc import Iterable
from typing import Any
from alembic import context
from alembic.operations import MigrationScript
from alembic.runtime.migration import MigrationContext
from sqlalchemy import text
from sqlalchemy.engine import Connection
from sqlalchemy.ext.asyncio import AsyncEngine
TARGET_SCHEMAS: set[str] = config.attributes.get("TARGET_SCHEMAS", {"company_1", "public"})
config.set_main_option("targets", ", ".join([COMPANY_SCHEMA, PUBLIC_SCHEMA]))
def include_object(object: Any, name: Any, type_: str, reflected: bool, compare_to: Any) -> bool:
if type_ == "table" and reflected and compare_to is None:
return False
else:
return True
def get_target(schema: str) -> str:
if schema in {PUBLIC_SCHEMA}:
return schema
return COMPANY_SCHEMA
def run_migrations(connection: Connection) -> None:
"""Run all migrations for a given metadata/schema object."""
for schema in TARGET_SCHEMAS:
target = get_target(schema)
metadata = get_metadata(schema)
# extensions are installed on public, we need to add it to the path
connection.execute(text(f"SET SEARCH_PATH to {schema}, {PUBLIC_SCHEMA}"))
connection.dialect.default_schema_name = schema
context.configure(
connection=connection,
target_metadata=metadata,
upgrade_token=f"{target}_upgrades",
downgrade_token=f"{target}_downgrades",
transaction_per_migration=True,
include_object=include_object,
# This ensures one version_table per schema
version_table_schema=schema,
)
with context.begin_transaction():
context.run_migrations(target=target)
async def run_async_migrations(engine: AsyncEngine) -> None:
async with engine.begin() as conn:
await conn.run_sync(run_migrations)
def run_migrations_online() -> None:
if (connectable := config.attributes.get("connection", None)) is None:
raise ValueError("No connection available")
if isinstance(connectable, AsyncEngine):
asyncio.run(run_async_migrations(engine=connectable))
elif isinstance(connectable, Connection):
run_migrations(connection=connectable)script.py.mako (for completion) <%! import re %>
from collections.abc import Sequence
import sqlalchemy as sa
from alembic import op
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision: str = ${repr(up_revision)}
down_revision: str | Sequence[str] | None = ${repr(down_revision)}
branch_labels: str | Sequence[str] | None = ${repr(branch_labels)}
depends_on: str | Sequence[str] | None = ${repr(depends_on)}
def upgrade(target: str) -> None:
globals().get("upgrade_%s" % target, lambda: None)()
def downgrade(target: str) -> None:
globals().get("downgrade_%s" % target, lambda: None)()
<% targets = config.get_main_option("targets") %>
% for target in re.split(r',\s*', targets):
def upgrade_${target}() -> None:
${context.get("%s_upgrades" % target, "pass")}
def downgrade_${target}() -> None:
${context.get("%s_downgrades" % target, "pass")}
% endforI can try to construct a MRE, but would like to know if this is expected behaviour or it's not supported. |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 6 replies
-
|
hi - if you are messing around with search_path and all that you may need to take additional steps to ensure only the objects you expect are considered. as to the question "is this expected", in general the answer is "yes", the way that SQLAlchemy interprets PG foreign keys is described in depth at https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#postgresql-schema-reflection , I would recommend stepping through that document and comparing to what's happening in your code to get a full picture. It looks like you are using some variant of the recipe at https://alembic.sqlalchemy.org/en/latest/cookbook.html#rudimental-schema-level-multi-tenancy-for-postgresql-mysql-other-databases which should be noted indicates it's an interim recipe as Alembic does not have out-of-the-box support for this use case. From there you can affect how autogenerate proceeds using three distinct hooks, the include_object() hook I see you're using, the include_name hook which can exclude whole schemas, and then the most fine grained hook which is the rewriter where if these diffs follow a predictable pattern, you can filter them all out at the end. |
Beta Was this translation helpful? Give feedback.
hi -
if you are messing around with search_path and all that you may need to take additional steps to ensure only the objects you expect are considered. as to the question "is this expected", in general the answer is "yes", the way that SQLAlchemy interprets PG foreign keys is described in depth at https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#postgresql-schema-reflection , I would recommend stepping through that document and comparing to what's happening in your code to get a full picture.
It looks like you are using some variant of the recipe at https://alembic.sqlalchemy.org/en/latest/cookbook.html#rudimental-schema-level-multi-tenancy-for-postgresql-mysql-other-databases …