Alembic autogenerate support for creation, alteration and deletion of enums
Alembic will now automatically:
- Create enums that currently are not in postgres schema
- Remove/add/alter enum values
- Reorder enum values
- Delete unused enums from schema
If you are curious to know about analogs and reasons for this library to exist see alternatives and motivation
Install library:
pip install alembic-postgresql-enum
Add the line:
# env.py import alembic_postgresql_enum ...
To the top of your migrations/env.py file.
This import will affect newly generated migrations.
To try it out you can edit some enums in your schema
and then run alembic revision --autogenerate
- Creation of enums
- Deletion of unreferenced enums
- Detection of enum values changes
- Partial index preservation
- Omitting managing enums
class MyEnum(enum.Enum): one = 1 two = 2 three = 3 class ExampleTable(BaseModel): test_field = Column(Integer, primary_key=True, autoincrement=False) enum_field = Column(postgresql.ENUM(MyEnum))
This code will generate migration given below:
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### # this line is generated by our library sa.Enum('one', 'two', 'three', name='myenum').create(op.get_bind()) op.create_table('example_table', sa.Column('test_field', sa.Integer(), nullable=False), # create_type=False argument is now present on postgresql.ENUM as library takes care of enum creation sa.Column('enum_field', postgresql.ENUM('one', 'two', 'three', name='myenum', create_type=False), nullable=True), sa.PrimaryKeyConstraint('test_field') ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### # drop_table does not drop enum by alembic op.drop_table('example_table') # It is dropped by us sa.Enum('one', 'two', 'three', name='myenum').drop(op.get_bind()) # ### end Alembic commands ###
class MyEnum(enum.Enum): one = 1 two = 2 three = 3 class ExampleTable(BaseModel): test_field = Column(Integer, primary_key=True, autoincrement=False) # this column has just been added enum_field = Column(postgresql.ENUM(MyEnum))
This code will generate migration given below:
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### # this line is generated by our library sa.Enum('one', 'two', 'three', name='myenum').create(op.get_bind()) # create_type=False argument is now present on postgresql.ENUM as library takes care of enum creation op.add_column('example_table', sa.Column('enum_field', postgresql.ENUM('one', 'two', 'three', name='myenum', create_type=False), nullable=False)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('example_table', 'enum_field') # enum is explicitly dropped as it is no longer used sa.Enum('one', 'two', 'three', name='myenum').drop(op.get_bind()) # ### end Alembic commands ###
If enum is defined in postgres schema, but its mentions removed from code - It will be automatically removed
class ExampleTable(BaseModel): test_field = Column(Integer, primary_key=True, autoincrement=False) # enum_field is removed from table
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('example_table', 'enum_field') sa.Enum('one', 'two', 'four', name='myenum').drop(op.get_bind()) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### sa.Enum('one', 'two', 'four', name='myenum').create(op.get_bind()) op.add_column('example_table', sa.Column('enum_field', postgresql.ENUM('one', 'two', 'four', name='myenum', create_type=False), autoincrement=False, nullable=True)) # ### end Alembic commands ###
Can be disabled with detect_enum_values_changes
configuration flag turned off
If new enum value is defined sync_enum_values function call will be added to migration to account for it
class MyEnum(enum.Enum): one = 1 two = 2 three = 3 four = 4 # New enum value
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.sync_enum_values( enum_schema='public', enum_name='myenum', new_values=['one', 'two', 'three', 'four'], affected_columns=[TableReference(table_schema='public', table_name='example_table', column_name='enum_field')], enum_values_to_rename=[], ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.sync_enum_values( enum_schema='public', enum_name='myenum', new_values=['one', 'two', 'three'], affected_columns=[TableReference(table_schema='public', table_name='example_table', column_name='enum_field')], enum_values_to_rename=[], ) # ### end Alembic commands ###
If enum value is removed it also will be detected
class MyEnum(enum.Enum): one = 1 two = 2 # three = 3 removed
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.sync_enum_values( enum_schema='public', enum_name='myenum', new_values=['one', 'two'], affected_columns=[TableReference(table_schema='public', table_name='example_table', column_name='enum_field')], enum_values_to_rename=[], ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.sync_enum_values( enum_schema='public', enum_name='myenum', new_values=['one', 'two', 'three'], affected_columns=[TableReference(table_schema='public', table_name='example_table', column_name='enum_field')], enum_values_to_rename=[], ) # ### end Alembic commands ###
In this case you must manually edit migration
class MyEnum(enum.Enum): one = 1 two = 2 three = 3 # renamed from `tree`
This code will generate this migration:
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.sync_enum_values( enum_schema='public', enum_name='myenum', new_values=['one', 'two', 'three'], affected_columns=[TableReference(table_schema='public', table_name='example_table', column_name='enum_field')], enum_values_to_rename=[], ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.sync_enum_values( enum_schema='public', enum_name='myenum', new_values=['one', 'two', 'tree'], affected_columns=[TableReference(table_schema='public', table_name='example_table', column_name='enum_field')], enum_values_to_rename=[], ) # ### end Alembic commands ###
This migration will cause problems with existing rows that references MyEnum
So adjust migration like that
def upgrade(): op.sync_enum_values( enum_schema='public', enum_name='myenum', new_values=['one', 'two', 'three'], affected_columns=[TableReference(table_schema='public', table_name='example_table', column_name='enum_field')], enum_values_to_rename=[('tree', 'three')], ) def downgrade(): op.sync_enum_values( enum_schema='public', enum_name='myenum', new_values=['one', 'two', 'tree'], affected_columns=[TableReference(table_schema='public', table_name='example_table', column_name='enum_field')], enum_values_to_rename=[('three', 'tree')], )
Do not forget to switch places old and new values for downgrade
All defaults in postgres will be renamed automatically as well
When modifying enum values, partial indexes that reference the enum type are preserved via dropping and recreating. This is particularly important for indexes with WHERE
clauses that use enum comparisons. Depending on the size and complexity of the index this might impact the speed and locking nature of the schema migration.
Note: For alembic's offline mode support, partial index detection happens during migration generation time. The detected indexes are then passed to the migration as a parameter. This ensures that offline migrations can execute without needing database access.
Consider a table with a partial unique index:
class UserStatus(enum.Enum): active = "active" deleted = "deleted" class User(Base): __tablename__ = "user" id = Column(Integer, primary_key=True) username = Column(String, nullable=False) status = Column(postgresql.ENUM(UserStatus)) __table_args__ = ( Index( "uq_user_username", "username", unique=True, postgresql_where=(text("status != 'deleted'")), ), )
When you add a new enum value (e.g., pending
), the library will:
- Detect any indexes that reference the enum type in their WHERE clauses (during migration generation)
- Temporarily drop these indexes before modifying the enum
- Recreate the indexes with their original definitions after the enum modification is complete
In the generated migration, indexes are included as a parameter:
from alembic_postgresql_enum.sql_commands.indexes import TableIndex op.sync_enum_values( enum_schema='public', enum_name='userstatus', new_values=['active', 'pending', 'deleted'], affected_columns=[TableReference(...)], enum_values_to_rename=[], indexes_to_recreate=[ TableIndex( name='uq_user_username', definition="CREATE UNIQUE INDEX uq_user_username ON users USING btree (username) WHERE (status <> 'deleted'::userstatus)", ), ], )
This ensures that partial indexes like WHERE status != 'deleted'
continue to work correctly after enum modifications, without manual intervention.
- Partial indexes with WHERE clauses referencing the enum
- Unique constraints with partial conditions
- Any index using enum comparisons (
=
,!=
,IN
, etc.) - When enum values are renamed (not dropped), the index definitions are updated to use the new value names.
When an enum value referenced in a partial index is being dropped, the library will detect this and provide a clear error message:
ERROR: Cannot drop enum value(s) 'deleted' because they are referenced in partial index 'idx_users'
Index definition: CREATE INDEX idx_users ON users WHERE (status != 'deleted'::user_status)
To resolve this issue, either:
1. Use enum_values_to_rename to rename 'deleted' to other values instead of dropping
2. Manually drop the index 'idx_users' before running this migration
3. Update your code to not drop these enum values
If configured include_name
function returns False
given enum will be not managed.
import alembic_postgresql_enum def include_name(name: str) -> bool: return name not in ['enum-to-ignore', 'some-internal-enum'] alembic_postgresql_enum.set_configuration( alembic_postgresql_enum.Config( include_name=include_name, ) )
Feature is similar to sqlalchemy feature for tables
You can configure this extension to disable parts of it, or to enable some feature flags
To do so you need to call set_configuration function after the import:
import alembic_postgresql_enum alembic_postgresql_enum.set_configuration( alembic_postgresql_enum.Config( add_type_ignore=True, ) )
Available options:
-
add_type_ignore
(False
by default) - flag that can be turned on to add# type: ignore[attr-defined]
at the end of generatedop.sync_enum_values
calls. This is helpful if you are using type checker such asmypy
.type: ignore
is needed because there is no way to add new function to an existing alembic'sop
. -
include_name
(lambda _: True
bby default) - it adds ability to ignore process enum by name in similar way alembic allows to defineinclude_name
function. This property accepts function that takes enum name and returns whether it should be processed. -
drop_unused_enums
(True
by default) - feature flag that can be turned off to disable clean up of undeclared enums -
detect_enum_values_changes
(True
by default) - feature flag that can be turned off to disable generation ofop.sync_enum_values
. -
force_dialect_support
(False
by default) - if you are using one of the postgresql dialects you can activate the library with this flag. WARNING we do not guarantee the performance of our extension with this flag enabled. -
ignore_enum_values_order
(False
by default) - flag that can be turned on to ignore changes in enum value order, because, by default, values order matters in postgresql.