PKbFHcΝÈalembic/__init__.pyfrom os import path __version__ = '0.6.9' package_dir = path.abspath(path.dirname(__file__)) from . import op from . import context PKB!CG}}alembic/autogenerate.bak.py"""Provide the 'autogenerate' feature which can produce migration operations automatically.""" from alembic import util from sqlalchemy.engine.reflection import Inspector from sqlalchemy.util import OrderedSet from sqlalchemy import schema as sa_schema, types as sqltypes import re from coloredlog import ColoredStreamHandler import logging log = logging.getLogger(__name__) ################################################### # public def compare_metadata(context, metadata): """Compare a database schema to that given in a :class:`~sqlalchemy.schema.MetaData` instance. The database connection is presented in the context of a :class:`.MigrationContext` object, which provides database connectivity as well as optional comparison functions to use for datatypes and server defaults - see the "autogenerate" arguments at :meth:`.EnvironmentContext.configure` for details on these. The return format is a list of "diff" directives, each representing individual differences:: from alembic.migration import MigrationContext from alembic.autogenerate import compare_metadata from sqlalchemy.schema import SchemaItem from sqlalchemy.types import TypeEngine from sqlalchemy import (create_engine, MetaData, Column, Integer, String, Table) import pprint engine = create_engine("sqlite://") engine.execute(''' create table foo ( id integer not null primary key, old_data varchar, x integer )''') engine.execute(''' create table bar ( data varchar )''') metadata = MetaData() Table('foo', metadata, Column('id', Integer, primary_key=True), Column('data', Integer), Column('x', Integer, nullable=False) ) Table('bat', metadata, Column('info', String) ) mc = MigrationContext.configure(engine.connect()) diff = compare_metadata(mc, metadata) pprint.pprint(diff, indent=2, width=20) Output:: [ ( 'add_table', Table('bat', MetaData(bind=None), Column('info', String(), table=), schema=None)), ( 'remove_table', Table(u'bar', MetaData(bind=None), Column(u'data', VARCHAR(), table=), schema=None)), ( 'add_column', None, 'foo', Column('data', Integer(), table=)), ( 'remove_column', None, 'foo', Column(u'old_data', VARCHAR(), table=None)), [ ( 'modify_nullable', None, 'foo', u'x', { 'existing_server_default': None, 'existing_type': INTEGER()}, True, False)]] :param context: a :class:`.MigrationContext` instance. :param metadata: a :class:`~sqlalchemy.schema.MetaData` instance. """ autogen_context, connection = _autogen_context(context, None) diffs = [] _produce_net_changes(connection, metadata, diffs, autogen_context) return diffs ################################################### # top level def _produce_migration_diffs(context, template_args, imports, include_symbol=None, include_schemas=False): opts = context.opts metadata = opts['target_metadata'] include_symbol = opts.get('include_symbol', include_symbol) include_schemas = opts.get('include_schemas', include_schemas) if metadata is None: raise util.CommandError( "Can't proceed with --autogenerate option; environment " "script %s does not provide " "a MetaData object to the context." % ( context.script.env_py_location )) autogen_context, connection = _autogen_context(context, imports) diffs = [] remove_tables = template_args['config'].get_main_option("remove_tables") #if removate_tables is '1', then will generate drop table statement _produce_net_changes(connection, metadata, diffs, autogen_context, include_symbol, include_schemas, remove_tables=='1') template_args[opts['upgrade_token']] = \ _indent(_produce_upgrade_commands(diffs, autogen_context)) template_args[opts['downgrade_token']] = \ _indent(_produce_downgrade_commands(diffs, autogen_context)) template_args['imports'] = "\n".join(sorted(imports)) def _autogen_context(context, imports): opts = context.opts connection = context.bind return { 'imports': imports, 'connection': connection, 'dialect': connection.dialect, 'context': context, 'opts': opts }, connection def _indent(text): text = "### commands auto generated by Alembic - "\ "please adjust! ###\n" + text text += "\n### end Alembic commands ###" text = re.compile(r'^', re.M).sub(" ", text).strip() return text ################################################### # walk structures def _produce_net_changes(connection, metadata, diffs, autogen_context, include_symbol=None, include_schemas=False, remove_tables=False): inspector = Inspector.from_engine(connection) # TODO: not hardcode alembic_version here ? conn_table_names = set() if include_schemas: schemas = set(inspector.get_schema_names()) # replace default schema name with None schemas.discard("information_schema") # replace the "default" schema with None schemas.add(None) schemas.discard(connection.dialect.default_schema_name) else: schemas = [None] for s in schemas: tables = set(inspector.get_table_names(schema=s)).\ difference(['alembic_version']) conn_table_names.update(zip([s] * len(tables), tables)) metadata_table_names = OrderedSet([(table.schema, table.name) for table in metadata.sorted_tables]) if include_symbol: conn_table_names = set((s, name) for s, name in conn_table_names if include_symbol(name, s)) metadata_table_names = OrderedSet((s, name) for s, name in metadata_table_names if include_symbol(name, s)) _compare_tables(conn_table_names, metadata_table_names, inspector, metadata, diffs, autogen_context, remove_tables) def _compare_tables(conn_table_names, metadata_table_names, inspector, metadata, diffs, autogen_context, remove_tables): for s, tname in metadata_table_names.difference(conn_table_names): name = '%s.%s' % (s, tname) if s else tname if metadata.tables[tname].__mapping_only__: log.info("{{white|red:Skipped}} added table %r", name) else: diffs.append(("add_table", metadata.tables[name])) log.info("{{white|green:Detected}} added table %r", name) removal_metadata = sa_schema.MetaData() for s, tname in conn_table_names.difference(metadata_table_names): name = '%s.%s' % (s, tname) if s else tname exists = name in removal_metadata.tables t = sa_schema.Table(tname, removal_metadata, schema=s) if not exists: inspector.reflecttable(t, None) if remove_tables: diffs.append(("remove_table", t)) log.info("{{white|green:Detected}} removed table %r", name) # else: # log.info("{{white|red:Skipped}} removed table %r", name) existing_tables = conn_table_names.intersection(metadata_table_names) conn_column_info = dict( ((s, tname), dict( (rec["name"], rec) for rec in inspector.get_columns(tname, schema=s) ) ) for s, tname in existing_tables ) for s, tname in sorted(existing_tables): name = '%s.%s' % (s, tname) if s else tname _compare_columns(s, tname, conn_column_info[(s, tname)], metadata.tables[name], diffs, autogen_context, inspector) # TODO: # index add/drop # table constraints # sequences ################################################### # element comparison def _compare_columns(schema, tname, conn_table, metadata_table, diffs, autogen_context, inspector): name = '%s.%s' % (schema, tname) if schema else tname metadata_cols_by_name = dict((c.name, c) for c in metadata_table.c) conn_col_names = set(conn_table) metadata_col_names = set(metadata_cols_by_name) for cname in metadata_col_names.difference(conn_col_names): if not metadata_table.__mapping_only__: diffs.append( ("add_column", schema, tname, metadata_cols_by_name[cname]) ) log.info("{{white|green:Detected}} added column '%s.%s'", name, cname) else: log.info("{{white|red:Skipped}} added column '%s.%s'", name, cname) for cname in conn_col_names.difference(metadata_col_names): if not metadata_table.__mapping_only__: diffs.append( ("remove_column", schema, tname, sa_schema.Column( cname, conn_table[cname]['type'], nullable=conn_table[cname]['nullable'], server_default=conn_table[cname]['default'] )) ) log.info("{{white|green:Detected}} removed column '%s.%s'", name, cname) else: log.info("{{white|red:Skipped}} removed column '%s.%s'", name, cname) for colname in metadata_col_names.intersection(conn_col_names): metadata_col = metadata_cols_by_name[colname] conn_col = conn_table[colname] col_diff = [] _compare_type(schema, tname, colname, conn_col, metadata_col, col_diff, autogen_context ) _compare_nullable(schema, tname, colname, conn_col, metadata_col, col_diff, autogen_context ) _compare_server_default(schema, tname, colname, conn_col, metadata_col, col_diff, autogen_context ) if col_diff: diffs.append(col_diff) #compare index conn_indexes = inspector.get_indexes(tname) m_indexes = {} m_keys = set() c_indexes = {} c_keys = set() for i in metadata_table.indexes: m_indexes[i.name] = {'name':i.name, 'table':tname, 'unique':i.unique, 'column_names': [y.name for y in i.columns]} m_keys.add(i.name) for i in conn_indexes: c_indexes[i['name']] = {'name':i['name'], 'table':tname, 'unique':i['unique'], 'column_names': i['column_names']} c_keys.add(i['name']) diff_add = m_keys - c_keys if diff_add: for x in diff_add: if not metadata_table.__mapping_only__: diffs.append(("add_index", m_indexes[x])) log.info("{{white|green:Detected}} add index '%s on %s(%s)'" % (x, tname, ','.join(["%r" % y for y in m_indexes[x]['column_names']]))) else: log.info("{{white|red:Skipped}} add index '%s on %s(%s)'" % (x, tname, ','.join(["%r" % y for y in m_indexes[x]['column_names']]))) diff_del = c_keys - m_keys if diff_del: for x in diff_del: if not metadata_table.__mapping_only__: diffs.append(("remove_index", c_indexes[x])) log.info("{{white|green:Detected}} remove index '%s on %s'" % (x, tname)) else: log.info("{{white|red:Skipped}} remove index '%s on %s'" % (x, tname)) diff_change = m_keys & c_keys if diff_change: for x in diff_change: a = m_indexes[x] b = c_indexes[x] if a != b: if not metadata_table.__mapping_only__: diffs.append(("remove_index", b)) diffs.append(("add_index", a)) d = '' if a['unique'] != b['unique']: d += (' unique=%r' % b['unique']) + ' to ' + ('unique=%r' % a['unique']) if a['column_names'] != b['column_names']: d += ' columns %r to %r' % (b['column_names'], a['column_names']) if not metadata_table.__mapping_only__: log.info("{{white|green:Detected}} change index '%s on %s changes as: %s'" % (x, tname, d)) else: log.info("{{white|red:Skipped}} change index '%s on %s changes as: %s'" % (x, tname, d)) def _compare_nullable(schema, tname, cname, conn_col, metadata_col, diffs, autogen_context): conn_col_nullable = conn_col['nullable'] if conn_col_nullable is not metadata_col.nullable: if not metadata_col.table.__mapping_only__: diffs.append( ("modify_nullable", schema, tname, cname, { "existing_type": conn_col['type'], "existing_server_default": conn_col['default'], }, conn_col_nullable, metadata_col.nullable), ) log.info("{{white|green:Detected}} %s on column '%s.%s'", "NULL" if metadata_col.nullable else "NOT NULL", tname, cname ) else: log.info("{{white|red:Skipped}} %s on column '%s.%s'", "NULL" if metadata_col.nullable else "NOT NULL", tname, cname ) def _get_type(t): name = t.__class__.__name__ r = repr(t) if name.upper() == 'VARCHAR': r = '%s(length=%d)' % (name, t.length) elif name.upper() == 'CHAR': r = '%s(length=%d)' % (name, t.length) elif name.upper() == 'DECIMAL': r = '%s(precision=%d, scale=%d)' % ('Numeric', t.precision, t.scale) elif name.upper() == 'PICKLETYPE': r = '%s()' % 'BLOB' elif name.upper() == 'INTEGER': r = '%s()' % 'INTEGER' return r def _compare(c1, c2): r1 = _get_type(c1) r2 = _get_type(c2) if r1.upper() == 'BOOLEAN()' or r2.upper() == 'BOOLEAN()': return False if r1.upper() == 'MEDIUMTEXT()': return False else: return r1.upper() != r2.upper() def _compare_type(schema, tname, cname, conn_col, metadata_col, diffs, autogen_context): conn_type = conn_col['type'] metadata_type = metadata_col.type if conn_type._type_affinity is sqltypes.NullType: log.info("Couldn't determine database type " "for column '%s.%s'" % (tname, cname)) return if metadata_type._type_affinity is sqltypes.NullType: log.info("Column '%s.%s' has no type within " "the model; can't compare" % (tname, cname)) return #isdiff = autogen_context['context']._compare_type(conn_col, metadata_col) isdiff = _compare(conn_col['type'], metadata_col.type) if isdiff: if not metadata_col.table.__mapping_only__: diffs.append( ("modify_type", schema, tname, cname, { "existing_nullable": conn_col['nullable'], "existing_server_default": conn_col['default'], }, conn_type, metadata_type), ) log.info("{{white|green:Detected}} type change from %r to %r on '%s.%s'", conn_type, metadata_type, tname, cname ) else: log.info("{{white|red:Skipped}} type change from %r to %r on '%s.%s'", conn_type, metadata_type, tname, cname ) def _compare_server_default(schema, tname, cname, conn_col, metadata_col, diffs, autogen_context): metadata_default = metadata_col.server_default conn_col_default = conn_col['default'] if conn_col_default is None and metadata_default is None: return False rendered_metadata_default = _render_server_default( metadata_default, autogen_context) isdiff = autogen_context['context']._compare_server_default( conn_col, metadata_col, rendered_metadata_default ) if isdiff: if not metadata_col.table.__mapping_only__: conn_col_default = conn_col['default'] diffs.append( ("modify_default", schema, tname, cname, { "existing_nullable": conn_col['nullable'], "existing_type": conn_col['type'], }, conn_col_default, metadata_default), ) log.info("{{white|green:Detected}} server default %s changed on column '%s.%s'", rendered_metadata_default, tname, cname ) else: log.info("{{white|red:Skipped}} server default %s changed on column '%s.%s'", rendered_metadata_default, tname, cname ) ################################################### # produce command structure def _produce_upgrade_commands(diffs, autogen_context): buf = [] for diff in diffs: buf.append(_invoke_command("upgrade", diff, autogen_context)) if not buf: buf = ["pass"] return "\n".join(buf) def _produce_downgrade_commands(diffs, autogen_context): buf = [] for diff in reversed(diffs): buf.append(_invoke_command("downgrade", diff, autogen_context)) if not buf: buf = ["pass"] return "\n".join(buf) def _invoke_command(updown, args, autogen_context): if isinstance(args, tuple): return _invoke_adddrop_command(updown, args, autogen_context) else: return _invoke_modify_command(updown, args, autogen_context) def _invoke_adddrop_command(updown, args, autogen_context): cmd_type = args[0] adddrop, cmd_type = cmd_type.split("_") cmd_args = args[1:] + (autogen_context,) _commands = { "table": (_drop_table, _add_table), "column": (_drop_column, _add_column), "index": (_drop_index, _add_index), } cmd_callables = _commands[cmd_type] if ( updown == "upgrade" and adddrop == "add" ) or ( updown == "downgrade" and adddrop == "remove" ): return cmd_callables[1](*cmd_args) else: return cmd_callables[0](*cmd_args) def _invoke_modify_command(updown, args, autogen_context): sname, tname, cname = args[0][1:4] kw = {} _arg_struct = { "modify_type": ("existing_type", "type_"), "modify_nullable": ("existing_nullable", "nullable"), "modify_default": ("existing_server_default", "server_default"), } for diff in args: diff_kw = diff[4] for arg in ("existing_type", \ "existing_nullable", \ "existing_server_default"): if arg in diff_kw: kw.setdefault(arg, diff_kw[arg]) old_kw, new_kw = _arg_struct[diff[0]] if updown == "upgrade": kw[new_kw] = diff[-1] kw[old_kw] = diff[-2] else: kw[new_kw] = diff[-2] kw[old_kw] = diff[-1] if "nullable" in kw: kw.pop("existing_nullable", None) if "server_default" in kw: kw.pop("existing_server_default", None) return _modify_col(tname, cname, autogen_context, schema=sname, **kw) ################################################### # render python def _add_table(table, autogen_context): text = "%(prefix)screate_table(%(tablename)r,\n%(args)s" % { 'tablename': table.name, 'prefix': _alembic_autogenerate_prefix(autogen_context), 'args': ',\n'.join( [col for col in [_render_column(col, autogen_context) for col in table.c] if col] + sorted([rcons for rcons in [_render_constraint(cons, autogen_context) for cons in table.constraints] if rcons is not None ]) ) } if table.schema: text += ",\nschema=%r" % table.schema for k in sorted(table.kwargs): text += ",\n%s=%r" % (k.replace(" ", "_"), table.kwargs[k]) text += "\n)" # print text # raise Exception return text def _drop_table(table, autogen_context): text = "%(prefix)sdrop_table(%(tname)r" % { "prefix": _alembic_autogenerate_prefix(autogen_context), "tname": table.name } if table.schema: text += ", schema=%r" % table.schema text += ")" return text def _add_index(index, autogen_context): #process indexes by limodou 2013/05/09 text = "op.create_index('%(name)s', '%(table)s', %(columns)s, unique=%(unique)r)" % { 'name':index['name'], 'table':index['table'], 'columns':[str(x) for x in index['column_names']], 'unique': index['unique'] } return text def _drop_index(index, autogen_context): text = "op.drop_index('%s', '%s')" % (index['name'], index['table']) return text def _add_column(schema, tname, column, autogen_context): text = "%(prefix)sadd_column(%(tname)r, %(column)s" % { "prefix": _alembic_autogenerate_prefix(autogen_context), "tname": tname, "column": _render_column(column, autogen_context) } if schema: text += ", schema=%r" % schema text += ")" return text def _drop_column(schema, tname, column, autogen_context): text = "%(prefix)sdrop_column(%(tname)r, %(cname)r" % { "prefix": _alembic_autogenerate_prefix(autogen_context), "tname": tname, "cname": column.name } if schema: text += ", schema=%r" % schema text += ")" return text def _modify_col(tname, cname, autogen_context, server_default=False, type_=None, nullable=None, existing_type=None, existing_nullable=None, existing_server_default=False, schema=None): sqla_prefix = _sqlalchemy_autogenerate_prefix(autogen_context) indent = " " * 11 text = "%(prefix)salter_column(%(tname)r, %(cname)r" % { 'prefix': _alembic_autogenerate_prefix( autogen_context), 'tname': tname, 'cname': cname} text += ",\n%sexisting_type=%s" % (indent, _repr_type(sqla_prefix, existing_type, autogen_context)) if server_default is not False: rendered = _render_server_default( server_default, autogen_context) text += ",\n%sserver_default=%s" % (indent, rendered) if type_ is not None: text += ",\n%stype_=%s" % (indent, _repr_type(sqla_prefix, type_, autogen_context)) if nullable is not None: text += ",\n%snullable=%r" % ( indent, nullable,) if existing_nullable is not None: text += ",\n%sexisting_nullable=%r" % ( indent, existing_nullable) if existing_server_default: rendered = _render_server_default( existing_server_default, autogen_context) text += ",\n%sexisting_server_default=%s" % ( indent, rendered) if schema: text += ",\n%sschema=%r" % (indent, schema) text += ")" return text def _sqlalchemy_autogenerate_prefix(autogen_context): return autogen_context['opts']['sqlalchemy_module_prefix'] or '' def _alembic_autogenerate_prefix(autogen_context): return autogen_context['opts']['alembic_module_prefix'] or '' def _user_defined_render(type_, object_, autogen_context): if 'opts' in autogen_context and \ 'render_item' in autogen_context['opts']: render = autogen_context['opts']['render_item'] if render: rendered = render(type_, object_, autogen_context) if rendered is not False: return rendered return False def _render_column(column, autogen_context): rendered = _user_defined_render("column", column, autogen_context) if rendered is not False: return rendered opts = [] if column.server_default: rendered = _render_server_default( column.server_default, autogen_context ) if rendered: opts.append(("server_default", rendered)) if not column.autoincrement: opts.append(("autoincrement", column.autoincrement)) if column.nullable is not None: opts.append(("nullable", column.nullable)) # TODO: for non-ascii colname, assign a "key" return "%(prefix)sColumn(%(name)r, %(type)s, %(kw)s)" % { 'prefix': _sqlalchemy_autogenerate_prefix(autogen_context), 'name': column.name, 'type': _repr_type(_sqlalchemy_autogenerate_prefix(autogen_context), column.type, autogen_context), 'kw': ", ".join(["%s=%s" % (kwname, val) for kwname, val in opts]) } def _render_server_default(default, autogen_context): rendered = _user_defined_render("server_default", default, autogen_context) if rendered is not False: return rendered if isinstance(default, sa_schema.DefaultClause): if isinstance(default.arg, basestring): default = default.arg else: default = str(default.arg.compile( dialect=autogen_context['dialect'])) if isinstance(default, basestring): # TODO: this is just a hack to get # tests to pass until we figure out # WTF sqlite is doing default = re.sub(r"^'|'$", "", default) return repr(default) else: return None def _repr_type(prefix, type_, autogen_context): from sqlalchemy.types import PickleType def _repr(t): if isinstance(t, PickleType): return 'PickleType()' else: return repr(t) mod = type(type_).__module__ imports = autogen_context.get('imports', None) if mod.startswith("sqlalchemy.dialects"): dname = re.match(r"sqlalchemy\.dialects\.(\w+)", mod).group(1) if imports is not None: imports.add("from sqlalchemy.dialects import %s" % dname) return "%s.%s" % (dname, _repr(type_)) else: return "%s%s" % (prefix, _repr(type_)) def _render_constraint(constraint, autogen_context): renderer = _constraint_renderers.get(type(constraint), None) if renderer: return renderer(constraint, autogen_context) else: return None def _render_primary_key(constraint, autogen_context): rendered = _user_defined_render("primary_key", constraint, autogen_context) if rendered is not False: return rendered opts = [] if constraint.name: opts.append(("name", repr(constraint.name))) return "%(prefix)sPrimaryKeyConstraint(%(args)s)" % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "args": ", ".join( [repr(c.key) for c in constraint.columns] + ["%s=%s" % (kwname, val) for kwname, val in opts] ), } def _fk_colspec(fk, metadata_schema): """Implement a 'safe' version of ForeignKey._get_colspec() that never tries to resolve the remote table. """ if metadata_schema is None: return fk._get_colspec() else: # need to render schema breaking up tokens by hand, since the # ForeignKeyConstraint here may not actually have a remote # Table present tokens = fk._colspec.split(".") # no schema in the colspec, render it if len(tokens) == 2: return "%s.%s" % (metadata_schema, fk._colspec) else: return fk._colspec def _render_foreign_key(constraint, autogen_context): rendered = _user_defined_render("foreign_key", constraint, autogen_context) if rendered is not False: return rendered opts = [] if constraint.name: opts.append(("name", repr(constraint.name))) if constraint.onupdate: opts.append(("onupdate", repr(constraint.onupdate))) if constraint.ondelete: opts.append(("ondelete", repr(constraint.ondelete))) if constraint.initially: opts.append(("initially", repr(constraint.initially))) if constraint.deferrable: opts.append(("deferrable", repr(constraint.deferrable))) apply_metadata_schema = constraint.parent.metadata.schema return "%(prefix)sForeignKeyConstraint([%(cols)s], "\ "[%(refcols)s], %(args)s)" % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "cols": ", ".join("'%s'" % f.parent.key for f in constraint.elements), "refcols": ", ".join(repr(_fk_colspec(f, apply_metadata_schema)) for f in constraint.elements), "args": ", ".join( ["%s=%s" % (kwname, val) for kwname, val in opts] ), } def _render_check_constraint(constraint, autogen_context): rendered = _user_defined_render("check", constraint, autogen_context) if rendered is not False: return rendered # detect the constraint being part of # a parent type which is probably in the Table already. # ideally SQLAlchemy would give us more of a first class # way to detect this. if constraint._create_rule and \ hasattr(constraint._create_rule, 'target') and \ isinstance(constraint._create_rule.target, sqltypes.TypeEngine): return None opts = [] if constraint.name: opts.append(("name", repr(constraint.name))) return "%(prefix)sCheckConstraint(%(sqltext)r)" % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "sqltext": str( constraint.sqltext.compile( dialect=autogen_context['dialect'] ) ) } def _render_unique_constraint(constraint, autogen_context): rendered = _user_defined_render("unique", constraint, autogen_context) if rendered is not False: return rendered opts = [] if constraint.name: opts.append(("name", "'%s'" % constraint.name)) return "%(prefix)sUniqueConstraint(%(cols)s%(opts)s)" % { 'opts': ", " + (", ".join("%s=%s" % (k, v) for k, v in opts)) if opts else "", 'cols': ",".join(["'%s'" % c.name for c in constraint.columns]), "prefix": _sqlalchemy_autogenerate_prefix(autogen_context) } _constraint_renderers = { sa_schema.PrimaryKeyConstraint: _render_primary_key, sa_schema.ForeignKeyConstraint: _render_foreign_key, sa_schema.UniqueConstraint: _render_unique_constraint, sa_schema.CheckConstraint: _render_check_constraint } PK D=lx..alembic/coloredlog.py#coding=utf-8 import logging import re try: import colorama colorama.init() except: colorama = None _r_color_delimeter = re.compile(r'\{\{.*?\}\}') #Available formatting constants are: #Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. #Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. #Style: DIM, NORMAL, BRIGHT, RESET_ALL class ColoredStream(object): def __init__(self, stream, color_delimeter=('{{', '}}')): self.stream = stream self.color_delimeter = color_delimeter def write(self, buf): def m(match): c, text = match.group()[2:-2].split(':', 1) v = list(c.split('|')) v.extend(['', '']) fore, back, style = v[:3] msg = self.colored(text, fore, back, style) return msg b = _r_color_delimeter.sub(m, buf) self.stream.write(b) def colored(self, text, fore=None, back=None, style=None): if colorama: part = [] if fore: part.append(getattr(colorama.Fore, fore.upper(), None)) if back: part.append(getattr(colorama.Back, back.upper(), None)) if style: part.append(getattr(colorama.Style, style.upper(), None)) part.append(text) part = filter(None, part) part.append(colorama.Fore.RESET + colorama.Back.RESET + colorama.Style.RESET_ALL) return ''.join(part) else: return text class ColoredStreamHandler(logging.StreamHandler): def __init__(self, stream=None, color_delimeter=('{{', '}}')): logging.StreamHandler.__init__(self, stream) self.color_delimeter = color_delimeter self.stream = ColoredStream(self.stream, color_delimeter) if __name__ == '__main__': log = logging.getLogger('test') log.addHandler(ColoredStreamHandler()) log.setLevel(logging.DEBUG) log.info("Test {{white|red:Red text}} {{green:Green Text}} {{yellow|white|BRIGHT:bright}}")PK@8Eڥ#""alembic/command.pyimport os from .script import ScriptDirectory from .environment import EnvironmentContext from . import util, autogenerate as autogen def list_templates(config): """List available templates""" config.print_stdout("Available templates:\n") for tempname in os.listdir(config.get_template_directory()): with open(os.path.join( config.get_template_directory(), tempname, 'README')) as readme: synopsis = next(readme) config.print_stdout("%s - %s", tempname, synopsis) config.print_stdout("\nTemplates are used via the 'init' command, e.g.:") config.print_stdout("\n alembic init --template pylons ./scripts") def init(config, directory, template='generic'): """Initialize a new scripts directory.""" if os.access(directory, os.F_OK): raise util.CommandError("Directory %s already exists" % directory) template_dir = os.path.join(config.get_template_directory(), template) if not os.access(template_dir, os.F_OK): raise util.CommandError("No such template %r" % template) util.status("Creating directory %s" % os.path.abspath(directory), os.makedirs, directory) versions = os.path.join(directory, 'versions') util.status("Creating directory %s" % os.path.abspath(versions), os.makedirs, versions) script = ScriptDirectory(directory) for file_ in os.listdir(template_dir): file_path = os.path.join(template_dir, file_) if file_ == 'alembic.ini.mako': config_file = os.path.abspath(config.config_file_name) if os.access(config_file, os.F_OK): util.msg("File %s already exists, skipping" % config_file) else: script._generate_template( file_path, config_file, script_location=directory ) elif os.path.isfile(file_path): output_file = os.path.join(directory, file_) script._copy_file( file_path, output_file ) util.msg("Please edit configuration/connection/logging " "settings in %r before proceeding." % config_file) def revision(config, message=None, autogenerate=False, sql=False, skip=False): """Create a new revision file.""" script = ScriptDirectory.from_config(config) template_args = { 'config': config # Let templates use config for # e.g. multiple databases } imports = set() environment = util.asbool( config.get_main_option("revision_environment") ) if autogenerate: environment = True def retrieve_migrations(rev, context): if script.get_revision(rev) is not script.get_revision("head"): if not skip: raise util.CommandError("Target database is not up to date.") else: head = script.get_revision("head") if head is not None: head = head.revision context._update_current_rev(rev, head) autogen._produce_migration_diffs(context, template_args, imports) return [] elif environment: def retrieve_migrations(rev, context): return [] if environment: with EnvironmentContext( config, script, fn=retrieve_migrations, as_sql=sql, template_args=template_args, ): script.run_env() return script.generate_revision(util.rev_id(), message, refresh=True, **template_args) def upgrade(config, revision, sql=False, tag=None): """Upgrade to a later version.""" script = ScriptDirectory.from_config(config) starting_rev = None if ":" in revision: if not sql: raise util.CommandError("Range revision not allowed") starting_rev, revision = revision.split(':', 2) def upgrade(rev, context): return script._upgrade_revs(revision, rev) with EnvironmentContext( config, script, fn=upgrade, as_sql=sql, starting_rev=starting_rev, destination_rev=revision, tag=tag ): script.run_env() def downgrade(config, revision, sql=False, tag=None): """Revert to a previous version.""" script = ScriptDirectory.from_config(config) starting_rev = None if ":" in revision: if not sql: raise util.CommandError("Range revision not allowed") starting_rev, revision = revision.split(':', 2) elif sql: raise util.CommandError( "downgrade with --sql requires :") def downgrade(rev, context): return script._downgrade_revs(revision, rev) with EnvironmentContext( config, script, fn=downgrade, as_sql=sql, starting_rev=starting_rev, destination_rev=revision, tag=tag ): script.run_env() def history(config, rev_range=None): """List changeset scripts in chronological order.""" script = ScriptDirectory.from_config(config) if rev_range is not None: if ":" not in rev_range: raise util.CommandError( "History range requires [start]:[end], " "[start]:, or :[end]") base, head = rev_range.strip().split(":") else: base = head = None def _display_history(config, script, base, head): for sc in script.walk_revisions( base=base or "base", head=head or "head"): if sc.is_head: config.print_stdout("") config.print_stdout(sc.log_entry) def _display_history_w_current(config, script, base=None, head=None): def _display_current_history(rev, context): if head is None: _display_history(config, script, base, rev) elif base is None: _display_history(config, script, rev, head) return [] with EnvironmentContext( config, script, fn=_display_current_history ): script.run_env() if base == "current": _display_history_w_current(config, script, head=head) elif head == "current": _display_history_w_current(config, script, base=base) else: _display_history(config, script, base, head) def branches(config): """Show current un-spliced branch points""" script = ScriptDirectory.from_config(config) for sc in script.walk_revisions(): if sc.is_branch_point: config.print_stdout(sc) for rev in sc.nextrev: config.print_stdout("%s -> %s", " " * len(str(sc.down_revision)), script.get_revision(rev) ) def current(config, head_only=False): """Display the current revision for each database.""" script = ScriptDirectory.from_config(config) def display_version(rev, context): rev = script.get_revision(rev) if head_only: config.print_stdout("%s%s" % ( rev.revision if rev else None, " (head)" if rev and rev.is_head else "")) else: config.print_stdout("Current revision for %s: %s", util.obfuscate_url_pw( context.connection.engine.url), rev) return [] with EnvironmentContext( config, script, fn=display_version ): script.run_env() def stamp(config, revision, sql=False, tag=None): """'stamp' the revision table with the given revision; don't run any migrations.""" script = ScriptDirectory.from_config(config) def do_stamp(rev, context): if sql: current = False else: current = context._current_rev() dest = script.get_revision(revision) if dest is not None: dest = dest.revision context._update_current_rev(current, dest) return [] with EnvironmentContext( config, script, fn=do_stamp, as_sql=sql, destination_rev=revision, tag=tag ): script.run_env() def splice(config, parent, child): """'splice' two branches, creating a new revision file. this command isn't implemented right now. """ raise NotImplementedError() PK )E9)  alembic/compat.pyimport io import sys from sqlalchemy import __version__ as sa_version if sys.version_info < (2, 6): raise NotImplementedError("Python 2.6 or greater is required.") sqla_08 = sa_version >= '0.8.0' sqla_09 = sa_version >= '0.9.0' py2k = sys.version_info < (3, 0) py3k = sys.version_info >= (3, 0) py33 = sys.version_info >= (3, 3) if py3k: import builtins as compat_builtins string_types = str, binary_type = bytes text_type = str def callable(fn): return hasattr(fn, '__call__') def u(s): return s else: import __builtin__ as compat_builtins string_types = basestring, binary_type = str text_type = unicode callable = callable def u(s): return unicode(s, "utf-8") if py3k: from configparser import ConfigParser as SafeConfigParser import configparser else: from ConfigParser import SafeConfigParser import ConfigParser as configparser if py2k: from mako.util import parse_encoding if py33: from importlib import machinery def load_module_py(module_id, path): return machinery.SourceFileLoader( module_id, path).load_module(module_id) def load_module_pyc(module_id, path): return machinery.SourcelessFileLoader( module_id, path).load_module(module_id) else: import imp def load_module_py(module_id, path): with open(path, 'rb') as fp: mod = imp.load_source(module_id, path, fp) if py2k: source_encoding = parse_encoding(fp) if source_encoding: mod._alembic_source_encoding = source_encoding return mod def load_module_pyc(module_id, path): with open(path, 'rb') as fp: mod = imp.load_compiled(module_id, path, fp) # no source encoding here return mod try: exec_ = getattr(compat_builtins, 'exec') except AttributeError: # Python 2 def exec_(func_text, globals_, lcl): exec('exec func_text in globals_, lcl') ################################################ # cross-compatible metaclass implementation # Copyright (c) 2010-2012 Benjamin Peterson def with_metaclass(meta, base=object): """Create a base class with a metaclass.""" return meta("%sBase" % meta.__name__, (base,), {}) ################################################ # produce a wrapper that allows encoded text to stream # into a given buffer, but doesn't close it. # not sure of a more idiomatic approach to this. class EncodedIO(io.TextIOWrapper): def close(self): pass if py2k: # in Py2K, the io.* package is awkward because it does not # easily wrap the file type (e.g. sys.stdout) and I can't # figure out at all how to wrap StringIO.StringIO (used by nosetests) # and also might be user specified too. So create a full # adapter. class ActLikePy3kIO(object): """Produce an object capable of wrapping either sys.stdout (e.g. file) *or* StringIO.StringIO(). """ def _false(self): return False def _true(self): return True readable = seekable = _false writable = _true closed = False def __init__(self, file_): self.file_ = file_ def write(self, text): return self.file_.write(text) def flush(self): return self.file_.flush() class EncodedIO(EncodedIO): def __init__(self, file_, encoding): super(EncodedIO, self).__init__( ActLikePy3kIO(file_), encoding=encoding) PK )ES!**alembic/config.pyfrom argparse import ArgumentParser from .compat import SafeConfigParser import inspect import os import sys from . import command, util, package_dir, compat class Config(object): """Represent an Alembic configuration. Within an ``env.py`` script, this is available via the :attr:`.EnvironmentContext.config` attribute, which in turn is available at ``alembic.context``:: from alembic import context some_param = context.config.get_main_option("my option") When invoking Alembic programatically, a new :class:`.Config` can be created by passing the name of an .ini file to the constructor:: from alembic.config import Config alembic_cfg = Config("/path/to/yourapp/alembic.ini") With a :class:`.Config` object, you can then run Alembic commands programmatically using the directives in :mod:`alembic.command`. The :class:`.Config` object can also be constructed without a filename. Values can be set programmatically, and new sections will be created as needed:: from alembic.config import Config alembic_cfg = Config() alembic_cfg.set_main_option("script_location", "myapp:migrations") alembic_cfg.set_main_option("url", "postgresql://foo/bar") alembic_cfg.set_section_option("mysection", "foo", "bar") :param file_: name of the .ini file to open. :param ini_section: name of the main Alembic section within the .ini file :param output_buffer: optional file-like input buffer which will be passed to the :class:`.MigrationContext` - used to redirect the output of "offline generation" when using Alembic programmatically. :param stdout: buffer where the "print" output of commands will be sent. Defaults to ``sys.stdout``. ..versionadded:: 0.4 """ def __init__(self, file_=None, ini_section='alembic', output_buffer=None, stdout=sys.stdout, cmd_opts=None): """Construct a new :class:`.Config` """ self.config_file_name = file_ self.config_ini_section = ini_section self.output_buffer = output_buffer self.stdout = stdout self.cmd_opts = cmd_opts cmd_opts = None """The command-line options passed to the ``alembic`` script. Within an ``env.py`` script this can be accessed via the :attr:`.EnvironmentContext.config` attribute. .. versionadded:: 0.6.0 .. seealso:: :meth:`.EnvironmentContext.get_x_argument` """ config_file_name = None """Filesystem path to the .ini file in use.""" config_ini_section = None """Name of the config file section to read basic configuration from. Defaults to ``alembic``, that is the ``[alembic]`` section of the .ini file. This value is modified using the ``-n/--name`` option to the Alembic runnier. """ def print_stdout(self, text, *arg): """Render a message to standard out.""" util.write_outstream( self.stdout, (compat.text_type(text) % arg), "\n" ) @util.memoized_property def file_config(self): """Return the underlying :class:`ConfigParser` object. Direct access to the .ini file is available here, though the :meth:`.Config.get_section` and :meth:`.Config.get_main_option` methods provide a possibly simpler interface. """ if self.config_file_name: here = os.path.abspath(os.path.dirname(self.config_file_name)) else: here = "" file_config = SafeConfigParser({'here': here}) if self.config_file_name: file_config.read([self.config_file_name]) else: file_config.add_section(self.config_ini_section) return file_config def get_template_directory(self): """Return the directory where Alembic setup templates are found. This method is used by the alembic ``init`` and ``list_templates`` commands. """ return os.path.join(package_dir, 'templates') def get_section(self, name): """Return all the configuration options from a given .ini file section as a dictionary. """ return dict(self.file_config.items(name)) def set_main_option(self, name, value): """Set an option programmatically within the 'main' section. This overrides whatever was in the .ini file. """ self.file_config.set(self.config_ini_section, name, value) def remove_main_option(self, name): self.file_config.remove_option(self.config_ini_section, name) def set_section_option(self, section, name, value): """Set an option programmatically within the given section. The section is created if it doesn't exist already. The value here will override whatever was in the .ini file. """ if not self.file_config.has_section(section): self.file_config.add_section(section) self.file_config.set(section, name, value) def get_section_option(self, section, name, default=None): """Return an option from the given section of the .ini file. """ if not self.file_config.has_section(section): raise util.CommandError("No config file %r found, or file has no " "'[%s]' section" % (self.config_file_name, section)) if self.file_config.has_option(section, name): return self.file_config.get(section, name) else: return default def get_main_option(self, name, default=None): """Return an option from the 'main' section of the .ini file. This defaults to being a key from the ``[alembic]`` section, unless the ``-n/--name`` flag were used to indicate a different section. """ return self.get_section_option(self.config_ini_section, name, default) class CommandLine(object): def __init__(self, prog=None): self._generate_args(prog) def _generate_args(self, prog): def add_options(parser, positional, kwargs): if 'template' in kwargs: parser.add_argument("-t", "--template", default='generic', type=str, help="Setup template for use with 'init'") if 'message' in kwargs: parser.add_argument( "-m", "--message", type=str, help="Message string to use with 'revision'") if 'sql' in kwargs: parser.add_argument( "--sql", action="store_true", help="Don't emit SQL to database - dump to " "standard output/file instead") if 'tag' in kwargs: parser.add_argument( "--tag", type=str, help="Arbitrary 'tag' name - can be used by " "custom env.py scripts.") if 'autogenerate' in kwargs: parser.add_argument( "--autogenerate", action="store_true", help="Populate revision script with candidate " "migration operations, based on comparison " "of database to model.") # "current" command if 'head_only' in kwargs: parser.add_argument( "--head-only", action="store_true", help="Only show current version and " "whether or not this is the head revision.") if 'rev_range' in kwargs: parser.add_argument("-r", "--rev-range", action="store", help="Specify a revision range; " "format is [start]:[end]") positional_help = { 'directory': "location of scripts directory", 'revision': "revision identifier" } for arg in positional: subparser.add_argument(arg, help=positional_help.get(arg)) parser = ArgumentParser(prog=prog) parser.add_argument("-c", "--config", type=str, default="alembic.ini", help="Alternate config file") parser.add_argument("-n", "--name", type=str, default="alembic", help="Name of section in .ini file to " "use for Alembic config") parser.add_argument("-x", action="append", help="Additional arguments consumed by " "custom env.py scripts, e.g. -x " "setting1=somesetting -x setting2=somesetting") subparsers = parser.add_subparsers() for fn in [getattr(command, n) for n in dir(command)]: if inspect.isfunction(fn) and \ fn.__name__[0] != '_' and \ fn.__module__ == 'alembic.command': spec = inspect.getargspec(fn) if spec[3]: positional = spec[0][1:-len(spec[3])] kwarg = spec[0][-len(spec[3]):] else: positional = spec[0][1:] kwarg = [] subparser = subparsers.add_parser( fn.__name__, help=fn.__doc__) add_options(subparser, positional, kwarg) subparser.set_defaults(cmd=(fn, positional, kwarg)) self.parser = parser def run_cmd(self, config, options): fn, positional, kwarg = options.cmd try: fn(config, *[getattr(options, k) for k in positional], **dict((k, getattr(options, k)) for k in kwarg) ) except util.CommandError as e: util.err(str(e)) def main(self, argv=None): options = self.parser.parse_args(argv) if not hasattr(options, "cmd"): # see http://bugs.python.org/issue9253, argparse # behavior changed incompatibly in py3.3 self.parser.error("too few arguments") else: cfg = Config(file_=options.config, ini_section=options.name, cmd_opts=options) self.run_cmd(cfg, options) def main(argv=None, prog=None, **kwargs): """The console runner function for Alembic.""" CommandLine(prog=prog).main(argv=argv) if __name__ == '__main__': main() PKBEECBalembic/context.pyfrom .environment import EnvironmentContext from . import util # create proxy functions for # each method on the EnvironmentContext class. util.create_module_class_proxy(EnvironmentContext, globals(), locals()) PK )Exxalembic/environment.pyfrom .operations import Operations from .migration import MigrationContext from . import util class EnvironmentContext(object): """Represent the state made available to an ``env.py`` script. :class:`.EnvironmentContext` is normally instantiated by the commands present in the :mod:`alembic.command` module. From within an ``env.py`` script, the current :class:`.EnvironmentContext` is available via the ``alembic.context`` datamember. :class:`.EnvironmentContext` is also a Python context manager, that is, is intended to be used using the ``with:`` statement. A typical use of :class:`.EnvironmentContext`:: from alembic.config import Config from alembic.script import ScriptDirectory config = Config() config.set_main_option("script_location", "myapp:migrations") script = ScriptDirectory.from_config(config) def my_function(rev, context): '''do something with revision "rev", which will be the current database revision, and "context", which is the MigrationContext that the env.py will create''' with EnvironmentContext( config, script, fn = my_function, as_sql = False, starting_rev = 'base', destination_rev = 'head', tag = "sometag" ): script.run_env() The above script will invoke the ``env.py`` script within the migration environment. If and when ``env.py`` calls :meth:`.MigrationContext.run_migrations`, the ``my_function()`` function above will be called by the :class:`.MigrationContext`, given the context itself as well as the current revision in the database. .. note:: For most API usages other than full blown invocation of migration scripts, the :class:`.MigrationContext` and :class:`.ScriptDirectory` objects can be created and used directly. The :class:`.EnvironmentContext` object is *only* needed when you need to actually invoke the ``env.py`` module present in the migration environment. """ _migration_context = None config = None """An instance of :class:`.Config` representing the configuration file contents as well as other variables set programmatically within it.""" script = None """An instance of :class:`.ScriptDirectory` which provides programmatic access to version files within the ``versions/`` directory. """ def __init__(self, config, script, **kw): """Construct a new :class:`.EnvironmentContext`. :param config: a :class:`.Config` instance. :param script: a :class:`.ScriptDirectory` instance. :param \**kw: keyword options that will be ultimately passed along to the :class:`.MigrationContext` when :meth:`.EnvironmentContext.configure` is called. """ self.config = config self.script = script self.context_opts = kw def __enter__(self): """Establish a context which provides a :class:`.EnvironmentContext` object to env.py scripts. The :class:`.EnvironmentContext` will be made available as ``from alembic import context``. """ from .context import _install_proxy _install_proxy(self) return self def __exit__(self, *arg, **kw): from . import context, op context._remove_proxy() op._remove_proxy() def is_offline_mode(self): """Return True if the current migrations environment is running in "offline mode". This is ``True`` or ``False`` depending on the the ``--sql`` flag passed. This function does not require that the :class:`.MigrationContext` has been configured. """ return self.context_opts.get('as_sql', False) def is_transactional_ddl(self): """Return True if the context is configured to expect a transactional DDL capable backend. This defaults to the type of database in use, and can be overridden by the ``transactional_ddl`` argument to :meth:`.configure` This function requires that a :class:`.MigrationContext` has first been made available via :meth:`.configure`. """ return self.get_context().impl.transactional_ddl def requires_connection(self): return not self.is_offline_mode() def get_head_revision(self): """Return the hex identifier of the 'head' revision. This function does not require that the :class:`.MigrationContext` has been configured. """ return self.script._as_rev_number("head") def get_starting_revision_argument(self): """Return the 'starting revision' argument, if the revision was passed using ``start:end``. This is only meaningful in "offline" mode. Returns ``None`` if no value is available or was configured. This function does not require that the :class:`.MigrationContext` has been configured. """ if self._migration_context is not None: return self.script._as_rev_number( self.get_context()._start_from_rev) elif 'starting_rev' in self.context_opts: return self.script._as_rev_number( self.context_opts['starting_rev']) else: raise util.CommandError( "No starting revision argument is available.") def get_revision_argument(self): """Get the 'destination' revision argument. This is typically the argument passed to the ``upgrade`` or ``downgrade`` command. If it was specified as ``head``, the actual version number is returned; if specified as ``base``, ``None`` is returned. This function does not require that the :class:`.MigrationContext` has been configured. """ return self.script._as_rev_number( self.context_opts['destination_rev']) def get_tag_argument(self): """Return the value passed for the ``--tag`` argument, if any. The ``--tag`` argument is not used directly by Alembic, but is available for custom ``env.py`` configurations that wish to use it; particularly for offline generation scripts that wish to generate tagged filenames. This function does not require that the :class:`.MigrationContext` has been configured. .. seealso:: :meth:`.EnvironmentContext.get_x_argument` - a newer and more open ended system of extending ``env.py`` scripts via the command line. """ return self.context_opts.get('tag', None) def get_x_argument(self, as_dictionary=False): """Return the value(s) passed for the ``-x`` argument, if any. The ``-x`` argument is an open ended flag that allows any user-defined value or values to be passed on the command line, then available here for consumption by a custom ``env.py`` script. The return value is a list, returned directly from the ``argparse`` structure. If ``as_dictionary=True`` is passed, the ``x`` arguments are parsed using ``key=value`` format into a dictionary that is then returned. For example, to support passing a database URL on the command line, the standard ``env.py`` script can be modified like this:: cmd_line_url = context.get_x_argument( as_dictionary=True).get('dbname') if cmd_line_url: engine = create_engine(cmd_line_url) else: engine = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) This then takes effect by running the ``alembic`` script as:: alembic -x dbname=postgresql://user:pass@host/dbname upgrade head This function does not require that the :class:`.MigrationContext` has been configured. .. versionadded:: 0.6.0 .. seealso:: :meth:`.EnvironmentContext.get_tag_argument` :attr:`.Config.cmd_opts` """ if self.config.cmd_opts is not None: value = self.config.cmd_opts.x or [] else: value = [] if as_dictionary: value = dict( arg.split('=', 1) for arg in value ) return value def configure(self, connection=None, url=None, dialect_name=None, transactional_ddl=None, transaction_per_migration=False, output_buffer=None, starting_rev=None, tag=None, template_args=None, target_metadata=None, include_symbol=None, include_object=None, include_schemas=False, compare_type=False, compare_server_default=False, render_item=None, upgrade_token="upgrades", downgrade_token="downgrades", alembic_module_prefix="op.", sqlalchemy_module_prefix="sa.", user_module_prefix=None, **kw ): """Configure a :class:`.MigrationContext` within this :class:`.EnvironmentContext` which will provide database connectivity and other configuration to a series of migration scripts. Many methods on :class:`.EnvironmentContext` require that this method has been called in order to function, as they ultimately need to have database access or at least access to the dialect in use. Those which do are documented as such. The important thing needed by :meth:`.configure` is a means to determine what kind of database dialect is in use. An actual connection to that database is needed only if the :class:`.MigrationContext` is to be used in "online" mode. If the :meth:`.is_offline_mode` function returns ``True``, then no connection is needed here. Otherwise, the ``connection`` parameter should be present as an instance of :class:`sqlalchemy.engine.Connection`. This function is typically called from the ``env.py`` script within a migration environment. It can be called multiple times for an invocation. The most recent :class:`~sqlalchemy.engine.Connection` for which it was called is the one that will be operated upon by the next call to :meth:`.run_migrations`. General parameters: :param connection: a :class:`~sqlalchemy.engine.Connection` to use for SQL execution in "online" mode. When present, is also used to determine the type of dialect in use. :param url: a string database url, or a :class:`sqlalchemy.engine.url.URL` object. The type of dialect to be used will be derived from this if ``connection`` is not passed. :param dialect_name: string name of a dialect, such as "postgresql", "mssql", etc. The type of dialect to be used will be derived from this if ``connection`` and ``url`` are not passed. :param transactional_ddl: Force the usage of "transactional" DDL on or off; this otherwise defaults to whether or not the dialect in use supports it. :param transaction_per_migration: if True, nest each migration script in a transaction rather than the full series of migrations to run. .. versionadded:: 0.6.5 :param output_buffer: a file-like object that will be used for textual output when the ``--sql`` option is used to generate SQL scripts. Defaults to ``sys.stdout`` if not passed here and also not present on the :class:`.Config` object. The value here overrides that of the :class:`.Config` object. :param output_encoding: when using ``--sql`` to generate SQL scripts, apply this encoding to the string output. .. versionadded:: 0.5.0 :param starting_rev: Override the "starting revision" argument when using ``--sql`` mode. :param tag: a string tag for usage by custom ``env.py`` scripts. Set via the ``--tag`` option, can be overridden here. :param template_args: dictionary of template arguments which will be added to the template argument environment when running the "revision" command. Note that the script environment is only run within the "revision" command if the --autogenerate option is used, or if the option "revision_environment=true" is present in the alembic.ini file. .. versionadded:: 0.3.3 :param version_table: The name of the Alembic version table. The default is ``'alembic_version'``. :param version_table_schema: Optional schema to place version table within. .. versionadded:: 0.5.0 Parameters specific to the autogenerate feature, when ``alembic revision`` is run with the ``--autogenerate`` feature: :param target_metadata: a :class:`sqlalchemy.schema.MetaData` object that will be consulted during autogeneration. The tables present will be compared against what is locally available on the target :class:`~sqlalchemy.engine.Connection` to produce candidate upgrade/downgrade operations. :param compare_type: Indicates type comparison behavior during an autogenerate operation. Defaults to ``False`` which disables type comparison. Set to ``True`` to turn on default type comparison, which has varied accuracy depending on backend. To customize type comparison behavior, a callable may be specified which can filter type comparisons during an autogenerate operation. The format of this callable is:: def my_compare_type(context, inspected_column, metadata_column, inspected_type, metadata_type): # return True if the types are different, # False if not, or None to allow the default implementation # to compare these types return None context.configure( # ... compare_type = my_compare_type ) ``inspected_column`` is a :class:`sqlalchemy.schema.Column` as returned by :meth:`sqlalchemy.engine.reflection.Inspector.reflecttable`, whereas ``metadata_column`` is a :class:`sqlalchemy.schema.Column` from the local model environment. A return value of ``None`` indicates to allow default type comparison to proceed. .. seealso:: :paramref:`.EnvironmentContext.configure.compare_server_default` :param compare_server_default: Indicates server default comparison behavior during an autogenerate operation. Defaults to ``False`` which disables server default comparison. Set to ``True`` to turn on server default comparison, which has varied accuracy depending on backend. To customize server default comparison behavior, a callable may be specified which can filter server default comparisons during an autogenerate operation. defaults during an autogenerate operation. The format of this callable is:: def my_compare_server_default(context, inspected_column, metadata_column, inspected_default, metadata_default, rendered_metadata_default): # return True if the defaults are different, # False if not, or None to allow the default implementation # to compare these defaults return None context.configure( # ... compare_server_default = my_compare_server_default ) ``inspected_column`` is a dictionary structure as returned by :meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas ``metadata_column`` is a :class:`sqlalchemy.schema.Column` from the local model environment. A return value of ``None`` indicates to allow default server default comparison to proceed. Note that some backends such as Postgresql actually execute the two defaults on the database side to compare for equivalence. .. seealso:: :paramref:`.EnvironmentContext.configure.compare_type` :param include_object: A callable function which is given the chance to return ``True`` or ``False`` for any object, indicating if the given object should be considered in the autogenerate sweep. The function accepts the following positional arguments: * ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such as a :class:`~sqlalchemy.schema.Table` or :class:`~sqlalchemy.schema.Column` object * ``name``: the name of the object. This is typically available via ``object.name``. * ``type``: a string describing the type of object; currently ``"table"`` or ``"column"`` * ``reflected``: ``True`` if the given object was produced based on table reflection, ``False`` if it's from a local :class:`.MetaData` object. * ``compare_to``: the object being compared against, if available, else ``None``. E.g.:: def include_object(object, name, type_, reflected, compare_to): if (type_ == "column" and not reflected and object.info.get("skip_autogenerate", False)): return False else: return True context.configure( # ... include_object = include_object ) :paramref:`.EnvironmentContext.configure.include_object` can also be used to filter on specific schemas to include or omit, when the :paramref:`.EnvironmentContext.configure.include_schemas` flag is set to ``True``. The :attr:`.Table.schema` attribute on each :class:`.Table` object reflected will indicate the name of the schema from which the :class:`.Table` originates. .. versionadded:: 0.6.0 .. seealso:: :paramref:`.EnvironmentContext.configure.include_schemas` :param include_symbol: A callable function which, given a table name and schema name (may be ``None``), returns ``True`` or ``False``, indicating if the given table should be considered in the autogenerate sweep. .. deprecated:: 0.6.0 :paramref:`.EnvironmentContext.configure.include_symbol` is superceded by the more generic :paramref:`.EnvironmentContext.configure.include_object` parameter. E.g.:: def include_symbol(tablename, schema): return tablename not in ("skip_table_one", "skip_table_two") context.configure( # ... include_symbol = include_symbol ) .. seealso:: :paramref:`.EnvironmentContext.configure.include_schemas` :paramref:`.EnvironmentContext.configure.include_object` :param include_schemas: If True, autogenerate will scan across all schemas located by the SQLAlchemy :meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names` method, and include all differences in tables found across all those schemas. When using this option, you may want to also use the :paramref:`.EnvironmentContext.configure.include_object` option to specify a callable which can filter the tables/schemas that get included. .. versionadded :: 0.4.0 .. seealso:: :paramref:`.EnvironmentContext.configure.include_object` :param render_item: Callable that can be used to override how any schema item, i.e. column, constraint, type, etc., is rendered for autogenerate. The callable receives a string describing the type of object, the object, and the autogen context. If it returns False, the default rendering method will be used. If it returns None, the item will not be rendered in the context of a Table construct, that is, can be used to skip columns or constraints within op.create_table():: def my_render_column(type_, col, autogen_context): if type_ == "column" and isinstance(col, MySpecialCol): return repr(col) else: return False context.configure( # ... render_item = my_render_column ) Available values for the type string include: ``"column"``, ``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``, ``"type"``, ``"server_default"``. .. versionadded:: 0.5.0 .. seealso:: :ref:`autogen_render_types` :param upgrade_token: When autogenerate completes, the text of the candidate upgrade operations will be present in this template variable when ``script.py.mako`` is rendered. Defaults to ``upgrades``. :param downgrade_token: When autogenerate completes, the text of the candidate downgrade operations will be present in this template variable when ``script.py.mako`` is rendered. Defaults to ``downgrades``. :param alembic_module_prefix: When autogenerate refers to Alembic :mod:`alembic.operations` constructs, this prefix will be used (i.e. ``op.create_table``) Defaults to "``op.``". Can be ``None`` to indicate no prefix. :param sqlalchemy_module_prefix: When autogenerate refers to SQLAlchemy :class:`~sqlalchemy.schema.Column` or type classes, this prefix will be used (i.e. ``sa.Column("somename", sa.Integer)``) Defaults to "``sa.``". Can be ``None`` to indicate no prefix. Note that when dialect-specific types are rendered, autogenerate will render them using the dialect module name, i.e. ``mssql.BIT()``, ``postgresql.UUID()``. :param user_module_prefix: When autogenerate refers to a SQLAlchemy type (e.g. :class:`.TypeEngine`) where the module name is not under the ``sqlalchemy`` namespace, this prefix will be used within autogenerate, if non-``None``; if left at its default of ``None``, the :paramref:`.EnvironmentContext.configure.sqlalchemy_module_prefix` is used instead. .. versionadded:: 0.6.3 added :paramref:`.EnvironmentContext.configure.user_module_prefix` .. seealso:: :ref:`autogen_module_prefix` Parameters specific to individual backends: :param mssql_batch_separator: The "batch separator" which will be placed between each statement when generating offline SQL Server migrations. Defaults to ``GO``. Note this is in addition to the customary semicolon ``;`` at the end of each statement; SQL Server considers the "batch separator" to denote the end of an individual statement execution, and cannot group certain dependent operations in one step. :param oracle_batch_separator: The "batch separator" which will be placed between each statement when generating offline Oracle migrations. Defaults to ``/``. Oracle doesn't add a semicolon between statements like most other backends. """ opts = self.context_opts if transactional_ddl is not None: opts["transactional_ddl"] = transactional_ddl if output_buffer is not None: opts["output_buffer"] = output_buffer elif self.config.output_buffer is not None: opts["output_buffer"] = self.config.output_buffer if starting_rev: opts['starting_rev'] = starting_rev if tag: opts['tag'] = tag if template_args and 'template_args' in opts: opts['template_args'].update(template_args) opts["transaction_per_migration"] = transaction_per_migration opts['target_metadata'] = target_metadata opts['include_symbol'] = include_symbol opts['include_object'] = include_object opts['include_schemas'] = include_schemas opts['upgrade_token'] = upgrade_token opts['downgrade_token'] = downgrade_token opts['sqlalchemy_module_prefix'] = sqlalchemy_module_prefix opts['alembic_module_prefix'] = alembic_module_prefix opts['user_module_prefix'] = user_module_prefix if render_item is not None: opts['render_item'] = render_item if compare_type is not None: opts['compare_type'] = compare_type if compare_server_default is not None: opts['compare_server_default'] = compare_server_default opts['script'] = self.script opts.update(kw) self._migration_context = MigrationContext.configure( connection=connection, url=url, dialect_name=dialect_name, environment_context=self, opts=opts ) def run_migrations(self, **kw): """Run migrations as determined by the current command line configuration as well as versioning information present (or not) in the current database connection (if one is present). The function accepts optional ``**kw`` arguments. If these are passed, they are sent directly to the ``upgrade()`` and ``downgrade()`` functions within each target revision file. By modifying the ``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()`` functions accept arguments, parameters can be passed here so that contextual information, usually information to identify a particular database in use, can be passed from a custom ``env.py`` script to the migration functions. This function requires that a :class:`.MigrationContext` has first been made available via :meth:`.configure`. """ with Operations.context(self._migration_context): self.get_context().run_migrations(**kw) def execute(self, sql, execution_options=None): """Execute the given SQL using the current change context. The behavior of :meth:`.execute` is the same as that of :meth:`.Operations.execute`. Please see that function's documentation for full detail including caveats and limitations. This function requires that a :class:`.MigrationContext` has first been made available via :meth:`.configure`. """ self.get_context().execute(sql, execution_options=execution_options) def static_output(self, text): """Emit text directly to the "offline" SQL stream. Typically this is for emitting comments that start with --. The statement is not treated as a SQL execution, no ; or batch separator is added, etc. """ self.get_context().impl.static_output(text) def begin_transaction(self): """Return a context manager that will enclose an operation within a "transaction", as defined by the environment's offline and transactional DDL settings. e.g.:: with context.begin_transaction(): context.run_migrations() :meth:`.begin_transaction` is intended to "do the right thing" regardless of calling context: * If :meth:`.is_transactional_ddl` is ``False``, returns a "do nothing" context manager which otherwise produces no transactional state or directives. * If :meth:`.is_offline_mode` is ``True``, returns a context manager that will invoke the :meth:`.DefaultImpl.emit_begin` and :meth:`.DefaultImpl.emit_commit` methods, which will produce the string directives ``BEGIN`` and ``COMMIT`` on the output stream, as rendered by the target backend (e.g. SQL Server would emit ``BEGIN TRANSACTION``). * Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin` on the current online connection, which returns a :class:`sqlalchemy.engine.Transaction` object. This object demarcates a real transaction and is itself a context manager, which will roll back if an exception is raised. Note that a custom ``env.py`` script which has more specific transactional needs can of course manipulate the :class:`~sqlalchemy.engine.Connection` directly to produce transactional state in "online" mode. """ return self.get_context().begin_transaction() def get_context(self): """Return the current :class:`.MigrationContext` object. If :meth:`.EnvironmentContext.configure` has not been called yet, raises an exception. """ if self._migration_context is None: raise Exception("No context has been configured yet.") return self._migration_context def get_bind(self): """Return the current 'bind'. In "online" mode, this is the :class:`sqlalchemy.engine.Connection` currently being used to emit SQL to the database. This function requires that a :class:`.MigrationContext` has first been made available via :meth:`.configure`. """ return self.get_context().bind def get_impl(self): return self.get_context().impl PK )E8585alembic/migration.pyimport logging import sys from contextlib import contextmanager from sqlalchemy import MetaData, Table, Column, String, literal_column from sqlalchemy import create_engine from sqlalchemy.engine import url as sqla_url from .compat import callable, EncodedIO from . import ddl, util log = logging.getLogger(__name__) class MigrationContext(object): """Represent the database state made available to a migration script. :class:`.MigrationContext` is the front end to an actual database connection, or alternatively a string output stream given a particular database dialect, from an Alembic perspective. When inside the ``env.py`` script, the :class:`.MigrationContext` is available via the :meth:`.EnvironmentContext.get_context` method, which is available at ``alembic.context``:: # from within env.py script from alembic import context migration_context = context.get_context() For usage outside of an ``env.py`` script, such as for utility routines that want to check the current version in the database, the :meth:`.MigrationContext.configure` method to create new :class:`.MigrationContext` objects. For example, to get at the current revision in the database using :meth:`.MigrationContext.get_current_revision`:: # in any application, outside of an env.py script from alembic.migration import MigrationContext from sqlalchemy import create_engine engine = create_engine("postgresql://mydatabase") conn = engine.connect() context = MigrationContext.configure(conn) current_rev = context.get_current_revision() The above context can also be used to produce Alembic migration operations with an :class:`.Operations` instance:: # in any application, outside of the normal Alembic environment from alembic.operations import Operations op = Operations(context) op.alter_column("mytable", "somecolumn", nullable=True) """ def __init__(self, dialect, connection, opts, environment_context=None): self.environment_context = environment_context self.opts = opts self.dialect = dialect self.script = opts.get('script') as_sql = opts.get('as_sql', False) transactional_ddl = opts.get("transactional_ddl") self._transaction_per_migration = opts.get( "transaction_per_migration", False) if as_sql: self.connection = self._stdout_connection(connection) assert self.connection is not None else: self.connection = connection self._migrations_fn = opts.get('fn') self.as_sql = as_sql if "output_encoding" in opts: self.output_buffer = EncodedIO( opts.get("output_buffer") or sys.stdout, opts['output_encoding'] ) else: self.output_buffer = opts.get("output_buffer", sys.stdout) self._user_compare_type = opts.get('compare_type', False) self._user_compare_server_default = opts.get( 'compare_server_default', False) version_table = opts.get('version_table', 'alembic_version') version_table_schema = opts.get('version_table_schema', None) self._version = Table( version_table, MetaData(), Column('version_num', String(32), nullable=False), schema=version_table_schema) self._start_from_rev = opts.get("starting_rev") self.impl = ddl.DefaultImpl.get_by_dialect(dialect)( dialect, self.connection, self.as_sql, transactional_ddl, self.output_buffer, opts ) log.info("Context impl %s.", self.impl.__class__.__name__) if self.as_sql: log.info("Generating static SQL") log.info("Will assume %s DDL.", "transactional" if self.impl.transactional_ddl else "non-transactional") @classmethod def configure(cls, connection=None, url=None, dialect_name=None, environment_context=None, opts=None, ): """Create a new :class:`.MigrationContext`. This is a factory method usually called by :meth:`.EnvironmentContext.configure`. :param connection: a :class:`~sqlalchemy.engine.Connection` to use for SQL execution in "online" mode. When present, is also used to determine the type of dialect in use. :param url: a string database url, or a :class:`sqlalchemy.engine.url.URL` object. The type of dialect to be used will be derived from this if ``connection`` is not passed. :param dialect_name: string name of a dialect, such as "postgresql", "mssql", etc. The type of dialect to be used will be derived from this if ``connection`` and ``url`` are not passed. :param opts: dictionary of options. Most other options accepted by :meth:`.EnvironmentContext.configure` are passed via this dictionary. """ if opts is None: opts = {} if connection: dialect = connection.dialect elif url: url = sqla_url.make_url(url) dialect = url.get_dialect()() elif dialect_name: url = sqla_url.make_url("%s://" % dialect_name) dialect = url.get_dialect()() else: raise Exception("Connection, url, or dialect_name is required.") return MigrationContext(dialect, connection, opts, environment_context) def begin_transaction(self, _per_migration=False): transaction_now = _per_migration == self._transaction_per_migration if not transaction_now: @contextmanager def do_nothing(): yield return do_nothing() elif not self.impl.transactional_ddl: @contextmanager def do_nothing(): yield return do_nothing() elif self.as_sql: @contextmanager def begin_commit(): self.impl.emit_begin() yield self.impl.emit_commit() return begin_commit() else: return self.bind.begin() def get_current_revision(self): """Return the current revision, usually that which is present in the ``alembic_version`` table in the database. If this :class:`.MigrationContext` was configured in "offline" mode, that is with ``as_sql=True``, the ``starting_rev`` parameter is returned instead, if any. """ if self.as_sql: return self._start_from_rev else: if self._start_from_rev: raise util.CommandError( "Can't specify current_rev to context " "when using a database connection") self._version.create(self.connection, checkfirst=True) return self.connection.scalar(self._version.select()) _current_rev = get_current_revision """The 0.2 method name, for backwards compat.""" def _update_current_rev(self, old, new): if old == new: return if new is None: self.impl._exec(self._version.delete()) elif old is None: self.impl._exec(self._version.insert(). values(version_num=literal_column("'%s'" % new)) ) else: self.impl._exec(self._version.update(). values(version_num=literal_column("'%s'" % new)) ) def run_migrations(self, **kw): """Run the migration scripts established for this :class:`.MigrationContext`, if any. The commands in :mod:`alembic.command` will set up a function that is ultimately passed to the :class:`.MigrationContext` as the ``fn`` argument. This function represents the "work" that will be done when :meth:`.MigrationContext.run_migrations` is called, typically from within the ``env.py`` script of the migration environment. The "work function" then provides an iterable of version callables and other version information which in the case of the ``upgrade`` or ``downgrade`` commands are the list of version scripts to invoke. Other commands yield nothing, in the case that a command wants to run some other operation against the database such as the ``current`` or ``stamp`` commands. :param \**kw: keyword arguments here will be passed to each migration callable, that is the ``upgrade()`` or ``downgrade()`` method within revision scripts. """ current_rev = rev = False stamp_per_migration = not self.impl.transactional_ddl or \ self._transaction_per_migration self.impl.start_migrations() for change, prev_rev, rev, doc in self._migrations_fn( self.get_current_revision(), self): with self.begin_transaction(_per_migration=True): if current_rev is False: current_rev = prev_rev if self.as_sql and not current_rev: self._version.create(self.connection) if doc: log.info( "Running %s %s -> %s, %s", change.__name__, prev_rev, rev, doc) else: log.info( "Running %s %s -> %s", change.__name__, prev_rev, rev) if self.as_sql: self.impl.static_output( "-- Running %s %s -> %s" % (change.__name__, prev_rev, rev) ) change(**kw) if stamp_per_migration: self._update_current_rev(prev_rev, rev) prev_rev = rev if rev is not False: if not stamp_per_migration: self._update_current_rev(current_rev, rev) if self.as_sql and not rev: self._version.drop(self.connection) def execute(self, sql, execution_options=None): """Execute a SQL construct or string statement. The underlying execution mechanics are used, that is if this is "offline mode" the SQL is written to the output buffer, otherwise the SQL is emitted on the current SQLAlchemy connection. """ self.impl._exec(sql, execution_options) def _stdout_connection(self, connection): def dump(construct, *multiparams, **params): self.impl._exec(construct) return create_engine("%s://" % self.dialect.name, strategy="mock", executor=dump) @property def bind(self): """Return the current "bind". In online mode, this is an instance of :class:`sqlalchemy.engine.Connection`, and is suitable for ad-hoc execution of any kind of usage described in :ref:`sqlexpression_toplevel` as well as for usage with the :meth:`sqlalchemy.schema.Table.create` and :meth:`sqlalchemy.schema.MetaData.create_all` methods of :class:`~sqlalchemy.schema.Table`, :class:`~sqlalchemy.schema.MetaData`. Note that when "standard output" mode is enabled, this bind will be a "mock" connection handler that cannot return results and is only appropriate for a very limited subset of commands. """ return self.connection @property def config(self): """Return the :class:`.Config` used by the current environment, if any. .. versionadded:: 0.6.6 """ if self.environment_context: return self.environment_context.config else: return None def _compare_type(self, inspector_column, metadata_column): if self._user_compare_type is False: return False if callable(self._user_compare_type): user_value = self._user_compare_type( self, inspector_column, metadata_column, inspector_column.type, metadata_column.type ) if user_value is not None: return user_value return self.impl.compare_type( inspector_column, metadata_column) def _compare_server_default(self, inspector_column, metadata_column, rendered_metadata_default, rendered_column_default): if self._user_compare_server_default is False: return False if callable(self._user_compare_server_default): user_value = self._user_compare_server_default( self, inspector_column, metadata_column, rendered_column_default, metadata_column.server_default, rendered_metadata_default ) if user_value is not None: return user_value return self.impl.compare_server_default( inspector_column, metadata_column, rendered_metadata_default, rendered_column_default) PKEECA<] alembic/op.pyfrom .operations import Operations from . import util # create proxy functions for # each method on the Operations class. util.create_module_class_proxy(Operations, globals(), locals()) PK )ET;Calembic/operations.pyfrom contextlib import contextmanager from sqlalchemy.types import NULLTYPE, Integer from sqlalchemy import schema as sa_schema from . import util from .compat import string_types from .ddl import impl __all__ = ('Operations',) try: from sqlalchemy.sql.naming import conv except: conv = None class Operations(object): """Define high level migration operations. Each operation corresponds to some schema migration operation, executed against a particular :class:`.MigrationContext` which in turn represents connectivity to a database, or a file output stream. While :class:`.Operations` is normally configured as part of the :meth:`.EnvironmentContext.run_migrations` method called from an ``env.py`` script, a standalone :class:`.Operations` instance can be made for use cases external to regular Alembic migrations by passing in a :class:`.MigrationContext`:: from alembic.migration import MigrationContext from alembic.operations import Operations conn = myengine.connect() ctx = MigrationContext.configure(conn) op = Operations(ctx) op.alter_column("t", "c", nullable=True) """ def __init__(self, migration_context): """Construct a new :class:`.Operations` :param migration_context: a :class:`.MigrationContext` instance. """ self.migration_context = migration_context self.impl = migration_context.impl @classmethod @contextmanager def context(cls, migration_context): from .op import _install_proxy, _remove_proxy op = Operations(migration_context) _install_proxy(op) yield op _remove_proxy() def _primary_key_constraint(self, name, table_name, cols, schema=None): m = self._metadata() columns = [sa_schema.Column(n, NULLTYPE) for n in cols] t1 = sa_schema.Table(table_name, m, *columns, schema=schema) p = sa_schema.PrimaryKeyConstraint(*columns, name=name) t1.append_constraint(p) return p def _foreign_key_constraint(self, name, source, referent, local_cols, remote_cols, onupdate=None, ondelete=None, deferrable=None, source_schema=None, referent_schema=None, initially=None, match=None, **dialect_kw): m = self._metadata() if source == referent: t1_cols = local_cols + remote_cols else: t1_cols = local_cols sa_schema.Table( referent, m, *[sa_schema.Column(n, NULLTYPE) for n in remote_cols], schema=referent_schema) t1 = sa_schema.Table( source, m, *[sa_schema.Column(n, NULLTYPE) for n in t1_cols], schema=source_schema) tname = "%s.%s" % (referent_schema, referent) if referent_schema \ else referent f = sa_schema.ForeignKeyConstraint(local_cols, ["%s.%s" % (tname, n) for n in remote_cols], name=name, onupdate=onupdate, ondelete=ondelete, deferrable=deferrable, initially=initially, match=match, **dialect_kw ) t1.append_constraint(f) return f def _unique_constraint(self, name, source, local_cols, schema=None, **kw): t = sa_schema.Table( source, self._metadata(), *[sa_schema.Column(n, NULLTYPE) for n in local_cols], schema=schema) kw['name'] = name uq = sa_schema.UniqueConstraint(*[t.c[n] for n in local_cols], **kw) # TODO: need event tests to ensure the event # is fired off here t.append_constraint(uq) return uq def _check_constraint(self, name, source, condition, schema=None, **kw): t = sa_schema.Table(source, self._metadata(), sa_schema.Column('x', Integer), schema=schema) ck = sa_schema.CheckConstraint(condition, name=name, **kw) t.append_constraint(ck) return ck def _metadata(self): kw = {} if 'target_metadata' in self.migration_context.opts: mt = self.migration_context.opts['target_metadata'] if hasattr(mt, 'naming_convention'): kw['naming_convention'] = mt.naming_convention return sa_schema.MetaData(**kw) def _table(self, name, *columns, **kw): m = self._metadata() t = sa_schema.Table(name, m, *columns, **kw) for f in t.foreign_keys: self._ensure_table_for_fk(m, f) return t def _column(self, name, type_, **kw): return sa_schema.Column(name, type_, **kw) def _index(self, name, tablename, columns, schema=None, **kw): t = sa_schema.Table( tablename or 'no_table', self._metadata(), schema=schema ) idx = sa_schema.Index( name, *[impl._textual_index_column(t, n) for n in columns], **kw) return idx def _parse_table_key(self, table_key): if '.' in table_key: tokens = table_key.split('.') sname = ".".join(tokens[0:-1]) tname = tokens[-1] else: tname = table_key sname = None return (sname, tname) def _ensure_table_for_fk(self, metadata, fk): """create a placeholder Table object for the referent of a ForeignKey. """ if isinstance(fk._colspec, string_types): table_key, cname = fk._colspec.rsplit('.', 1) sname, tname = self._parse_table_key(table_key) if table_key not in metadata.tables: rel_t = sa_schema.Table(tname, metadata, schema=sname) else: rel_t = metadata.tables[table_key] if cname not in rel_t.c: rel_t.append_column(sa_schema.Column(cname, NULLTYPE)) def get_context(self): """Return the :class:`.MigrationContext` object that's currently in use. """ return self.migration_context def rename_table(self, old_table_name, new_table_name, schema=None): """Emit an ALTER TABLE to rename a table. :param old_table_name: old name. :param new_table_name: new name. :param schema: Optional schema name to operate within. """ self.impl.rename_table( old_table_name, new_table_name, schema=schema ) @util._with_legacy_names([('name', 'new_column_name')]) def alter_column(self, table_name, column_name, nullable=None, server_default=False, new_column_name=None, type_=None, autoincrement=None, existing_type=None, existing_server_default=False, existing_nullable=None, existing_autoincrement=None, schema=None ): """Issue an "alter column" instruction using the current migration context. Generally, only that aspect of the column which is being changed, i.e. name, type, nullability, default, needs to be specified. Multiple changes can also be specified at once and the backend should "do the right thing", emitting each change either separately or together as the backend allows. MySQL has special requirements here, since MySQL cannot ALTER a column without a full specification. When producing MySQL-compatible migration files, it is recommended that the ``existing_type``, ``existing_server_default``, and ``existing_nullable`` parameters be present, if not being altered. Type changes which are against the SQLAlchemy "schema" types :class:`~sqlalchemy.types.Boolean` and :class:`~sqlalchemy.types.Enum` may also add or drop constraints which accompany those types on backends that don't support them natively. The ``existing_server_default`` argument is used in this case as well to remove a previous constraint. :param table_name: string name of the target table. :param column_name: string name of the target column, as it exists before the operation begins. :param nullable: Optional; specify ``True`` or ``False`` to alter the column's nullability. :param server_default: Optional; specify a string SQL expression, :func:`~sqlalchemy.sql.expression.text`, or :class:`~sqlalchemy.schema.DefaultClause` to indicate an alteration to the column's default value. Set to ``None`` to have the default removed. :param new_column_name: Optional; specify a string name here to indicate the new name within a column rename operation. .. versionchanged:: 0.5.0 The ``name`` parameter is now named ``new_column_name``. The old name will continue to function for backwards compatibility. :param ``type_``: Optional; a :class:`~sqlalchemy.types.TypeEngine` type object to specify a change to the column's type. For SQLAlchemy types that also indicate a constraint (i.e. :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`), the constraint is also generated. :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column; currently understood by the MySQL dialect. :param existing_type: Optional; a :class:`~sqlalchemy.types.TypeEngine` type object to specify the previous type. This is required for all MySQL column alter operations that don't otherwise specify a new type, as well as for when nullability is being changed on a SQL Server column. It is also used if the type is a so-called SQLlchemy "schema" type which may define a constraint (i.e. :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`), so that the constraint can be dropped. :param existing_server_default: Optional; The existing default value of the column. Required on MySQL if an existing default is not being changed; else MySQL removes the default. :param existing_nullable: Optional; the existing nullability of the column. Required on MySQL if the existing nullability is not being changed; else MySQL sets this to NULL. :param existing_autoincrement: Optional; the existing autoincrement of the column. Used for MySQL's system of altering a column that specifies ``AUTO_INCREMENT``. :param schema: Optional schema name to operate within. .. versionadded:: 0.4.0 """ compiler = self.impl.dialect.statement_compiler( self.impl.dialect, None ) def _count_constraint(constraint): return not isinstance( constraint, sa_schema.PrimaryKeyConstraint) and \ (not constraint._create_rule or constraint._create_rule(compiler)) if existing_type and type_: t = self._table(table_name, sa_schema.Column(column_name, existing_type), schema=schema ) for constraint in t.constraints: if _count_constraint(constraint): self.impl.drop_constraint(constraint) self.impl.alter_column(table_name, column_name, nullable=nullable, server_default=server_default, name=new_column_name, type_=type_, schema=schema, autoincrement=autoincrement, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, existing_autoincrement=existing_autoincrement ) if type_: t = self._table(table_name, sa_schema.Column(column_name, type_), schema=schema ) for constraint in t.constraints: if _count_constraint(constraint): self.impl.add_constraint(constraint) def f(self, name): """Indicate a string name that has already had a naming convention applied to it. This feature combines with the SQLAlchemy ``naming_convention`` feature to disambiguate constraint names that have already had naming conventions applied to them, versus those that have not. This is necessary in the case that the ``"%(constraint_name)s"`` token is used within a naming convention, so that it can be identified that this particular name should remain fixed. If the :meth:`.Operations.f` is used on a constraint, the naming convention will not take effect:: op.add_column('t', 'x', Boolean(name=op.f('ck_bool_t_x'))) Above, the CHECK constraint generated will have the name ``ck_bool_t_x`` regardless of whether or not a naming convention is in use. Alternatively, if a naming convention is in use, and 'f' is not used, names will be converted along conventions. If the ``target_metadata`` contains the naming convention ``{"ck": "ck_bool_%(table_name)s_%(constraint_name)s"}``, then the output of the following: op.add_column('t', 'x', Boolean(name='x')) will be:: CONSTRAINT ck_bool_t_x CHECK (x in (1, 0))) The function is rendered in the output of autogenerate when a particular constraint name is already converted, for SQLAlchemy version **0.9.4 and greater only**. Even though ``naming_convention`` was introduced in 0.9.2, the string disambiguation service is new as of 0.9.4. .. versionadded:: 0.6.4 """ if conv: return conv(name) else: raise NotImplementedError( "op.f() feature requires SQLAlchemy 0.9.4 or greater.") def add_column(self, table_name, column, schema=None): """Issue an "add column" instruction using the current migration context. e.g.:: from alembic import op from sqlalchemy import Column, String op.add_column('organization', Column('name', String()) ) The provided :class:`~sqlalchemy.schema.Column` object can also specify a :class:`~sqlalchemy.schema.ForeignKey`, referencing a remote table name. Alembic will automatically generate a stub "referenced" table and emit a second ALTER statement in order to add the constraint separately:: from alembic import op from sqlalchemy import Column, INTEGER, ForeignKey op.add_column('organization', Column('account_id', INTEGER, ForeignKey('accounts.id')) ) Note that this statement uses the :class:`~sqlalchemy.schema.Column` construct as is from the SQLAlchemy library. In particular, default values to be created on the database side are specified using the ``server_default`` parameter, and not ``default`` which only specifies Python-side defaults:: from alembic import op from sqlalchemy import Column, TIMESTAMP, func # specify "DEFAULT NOW" along with the column add op.add_column('account', Column('timestamp', TIMESTAMP, server_default=func.now()) ) :param table_name: String name of the parent table. :param column: a :class:`sqlalchemy.schema.Column` object representing the new column. :param schema: Optional schema name to operate within. .. versionadded:: 0.4.0 """ t = self._table(table_name, column, schema=schema) self.impl.add_column( table_name, column, schema=schema ) for constraint in t.constraints: if not isinstance(constraint, sa_schema.PrimaryKeyConstraint): self.impl.add_constraint(constraint) def drop_column(self, table_name, column_name, **kw): """Issue a "drop column" instruction using the current migration context. e.g.:: drop_column('organization', 'account_id') :param table_name: name of table :param column_name: name of column :param schema: Optional schema name to operate within. .. versionadded:: 0.4.0 :param mssql_drop_check: Optional boolean. When ``True``, on Microsoft SQL Server only, first drop the CHECK constraint on the column using a SQL-script-compatible block that selects into a @variable from sys.check_constraints, then exec's a separate DROP CONSTRAINT for that constraint. :param mssql_drop_default: Optional boolean. When ``True``, on Microsoft SQL Server only, first drop the DEFAULT constraint on the column using a SQL-script-compatible block that selects into a @variable from sys.default_constraints, then exec's a separate DROP CONSTRAINT for that default. :param mssql_drop_foreign_key: Optional boolean. When ``True``, on Microsoft SQL Server only, first drop a single FOREIGN KEY constraint on the column using a SQL-script-compatible block that selects into a @variable from sys.foreign_keys/sys.foreign_key_columns, then exec's a separate DROP CONSTRAINT for that default. Only works if the column has exactly one FK constraint which refers to it, at the moment. .. versionadded:: 0.6.2 """ self.impl.drop_column( table_name, self._column(column_name, NULLTYPE), **kw ) def create_primary_key(self, name, table_name, cols, schema=None): """Issue a "create primary key" instruction using the current migration context. e.g.:: from alembic import op op.create_primary_key( "pk_my_table", "my_table", ["id", "version"] ) This internally generates a :class:`~sqlalchemy.schema.Table` object containing the necessary columns, then generates a new :class:`~sqlalchemy.schema.PrimaryKeyConstraint` object which it then associates with the :class:`~sqlalchemy.schema.Table`. Any event listeners associated with this action will be fired off normally. The :class:`~sqlalchemy.schema.AddConstraint` construct is ultimately used to generate the ALTER statement. .. versionadded:: 0.5.0 :param name: Name of the primary key constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions` ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param table_name: String name of the target table. :param cols: a list of string column names to be applied to the primary key constraint. :param schema: Optional schema name of the table. """ self.impl.add_constraint( self._primary_key_constraint(name, table_name, cols, schema) ) def create_foreign_key(self, name, source, referent, local_cols, remote_cols, onupdate=None, ondelete=None, deferrable=None, initially=None, match=None, source_schema=None, referent_schema=None, **dialect_kw): """Issue a "create foreign key" instruction using the current migration context. e.g.:: from alembic import op op.create_foreign_key( "fk_user_address", "address", "user", ["user_id"], ["id"]) This internally generates a :class:`~sqlalchemy.schema.Table` object containing the necessary columns, then generates a new :class:`~sqlalchemy.schema.ForeignKeyConstraint` object which it then associates with the :class:`~sqlalchemy.schema.Table`. Any event listeners associated with this action will be fired off normally. The :class:`~sqlalchemy.schema.AddConstraint` construct is ultimately used to generate the ALTER statement. :param name: Name of the foreign key constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions`, ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param source: String name of the source table. :param referent: String name of the destination table. :param local_cols: a list of string column names in the source table. :param remote_cols: a list of string column names in the remote table. :param onupdate: Optional string. If set, emit ON UPDATE when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT. :param ondelete: Optional string. If set, emit ON DELETE when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT. :param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param source_schema: Optional schema name of the source table. :param referent_schema: Optional schema name of the destination table. """ self.impl.add_constraint( self._foreign_key_constraint(name, source, referent, local_cols, remote_cols, onupdate=onupdate, ondelete=ondelete, deferrable=deferrable, source_schema=source_schema, referent_schema=referent_schema, initially=initially, match=match, **dialect_kw) ) def create_unique_constraint(self, name, source, local_cols, schema=None, **kw): """Issue a "create unique constraint" instruction using the current migration context. e.g.:: from alembic import op op.create_unique_constraint("uq_user_name", "user", ["name"]) This internally generates a :class:`~sqlalchemy.schema.Table` object containing the necessary columns, then generates a new :class:`~sqlalchemy.schema.UniqueConstraint` object which it then associates with the :class:`~sqlalchemy.schema.Table`. Any event listeners associated with this action will be fired off normally. The :class:`~sqlalchemy.schema.AddConstraint` construct is ultimately used to generate the ALTER statement. :param name: Name of the unique constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions`, ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param source: String name of the source table. Dotted schema names are supported. :param local_cols: a list of string column names in the source table. :param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: optional string. If set, emit INITIALLY when issuing DDL for this constraint. :param schema: Optional schema name to operate within. .. versionadded:: 0.4.0 """ self.impl.add_constraint( self._unique_constraint(name, source, local_cols, schema=schema, **kw) ) def create_check_constraint(self, name, source, condition, schema=None, **kw): """Issue a "create check constraint" instruction using the current migration context. e.g.:: from alembic import op from sqlalchemy.sql import column, func op.create_check_constraint( "ck_user_name_len", "user", func.len(column('name')) > 5 ) CHECK constraints are usually against a SQL expression, so ad-hoc table metadata is usually needed. The function will convert the given arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound to an anonymous table in order to emit the CREATE statement. :param name: Name of the check constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions`, ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param source: String name of the source table. :param condition: SQL expression that's the condition of the constraint. Can be a string or SQLAlchemy expression language structure. :param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: optional string. If set, emit INITIALLY when issuing DDL for this constraint. :param schema: Optional schema name to operate within. ..versionadded:: 0.4.0 """ self.impl.add_constraint( self._check_constraint( name, source, condition, schema=schema, **kw) ) def create_table(self, name, *columns, **kw): """Issue a "create table" instruction using the current migration context. This directive receives an argument list similar to that of the traditional :class:`sqlalchemy.schema.Table` construct, but without the metadata:: from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column from alembic import op op.create_table( 'account', Column('id', INTEGER, primary_key=True), Column('name', VARCHAR(50), nullable=False), Column('description', NVARCHAR(200)) Column('timestamp', TIMESTAMP, server_default=func.now()) ) Note that :meth:`.create_table` accepts :class:`~sqlalchemy.schema.Column` constructs directly from the SQLAlchemy library. In particular, default values to be created on the database side are specified using the ``server_default`` parameter, and not ``default`` which only specifies Python-side defaults:: from alembic import op from sqlalchemy import Column, TIMESTAMP, func # specify "DEFAULT NOW" along with the "timestamp" column op.create_table('account', Column('id', INTEGER, primary_key=True), Column('timestamp', TIMESTAMP, server_default=func.now()) ) :param name: Name of the table :param \*columns: collection of :class:`~sqlalchemy.schema.Column` objects within the table, as well as optional :class:`~sqlalchemy.schema.Constraint` objects and :class:`~.sqlalchemy.schema.Index` objects. :param schema: Optional schema name to operate within. :param \**kw: Other keyword arguments are passed to the underlying :class:`sqlalchemy.schema.Table` object created for the command. """ self.impl.create_table( self._table(name, *columns, **kw) ) def drop_table(self, name, **kw): """Issue a "drop table" instruction using the current migration context. e.g.:: drop_table("accounts") :param name: Name of the table :param schema: Optional schema name to operate within. .. versionadded:: 0.4.0 :param \**kw: Other keyword arguments are passed to the underlying :class:`sqlalchemy.schema.Table` object created for the command. """ self.impl.drop_table( self._table(name, **kw) ) def create_index(self, name, table_name, columns, schema=None, **kw): """Issue a "create index" instruction using the current migration context. e.g.:: from alembic import op op.create_index('ik_test', 't1', ['foo', 'bar']) Functional indexes can be produced by using the :func:`sqlalchemy.sql.expression.text` construct:: from alembic import op from sqlalchemy import text op.create_index('ik_test', 't1', [text('lower(foo)')]) .. versionadded:: 0.6.7 support for making use of the :func:`~sqlalchemy.sql.expression.text` construct in conjunction with :meth:`.Operations.create_index` in order to produce functional expressions within CREATE INDEX. :param name: name of the index. :param table_name: name of the owning table. .. versionchanged:: 0.5.0 The ``tablename`` parameter is now named ``table_name``. As this is a positional argument, the old name is no longer present. :param columns: a list consisting of string column names and/or :func:`~sqlalchemy.sql.expression.text` constructs. :param schema: Optional schema name to operate within. .. versionadded:: 0.4.0 """ self.impl.create_index( self._index(name, table_name, columns, schema=schema, **kw) ) @util._with_legacy_names([('tablename', 'table_name')]) def drop_index(self, name, table_name=None, schema=None): """Issue a "drop index" instruction using the current migration context. e.g.:: drop_index("accounts") :param name: name of the index. :param table_name: name of the owning table. Some backends such as Microsoft SQL Server require this. .. versionchanged:: 0.5.0 The ``tablename`` parameter is now named ``table_name``. The old name will continue to function for backwards compatibility. :param schema: Optional schema name to operate within. .. versionadded:: 0.4.0 """ # need a dummy column name here since SQLAlchemy # 0.7.6 and further raises on Index with no columns self.impl.drop_index( self._index(name, table_name, ['x'], schema=schema) ) @util._with_legacy_names([("type", "type_")]) def drop_constraint(self, name, table_name, type_=None, schema=None): """Drop a constraint of the given name, typically via DROP CONSTRAINT. :param name: name of the constraint. :param table_name: table name. .. versionchanged:: 0.5.0 The ``tablename`` parameter is now named ``table_name``. As this is a positional argument, the old name is no longer present. :param ``type_``: optional, required on MySQL. can be 'foreignkey', 'primary', 'unique', or 'check'. .. versionchanged:: 0.5.0 The ``type`` parameter is now named ``type_``. The old name ``type`` will remain for backwards compatibility. .. versionadded:: 0.3.6 'primary' qualfier to enable dropping of MySQL primary key constraints. :param schema: Optional schema name to operate within. .. versionadded:: 0.4.0 """ t = self._table(table_name, schema=schema) types = { 'foreignkey': lambda name: sa_schema.ForeignKeyConstraint( [], [], name=name), 'primary': sa_schema.PrimaryKeyConstraint, 'unique': sa_schema.UniqueConstraint, 'check': lambda name: sa_schema.CheckConstraint("", name=name), None: sa_schema.Constraint } try: const = types[type_] except KeyError: raise TypeError("'type' can be one of %s" % ", ".join(sorted(repr(x) for x in types))) const = const(name=name) t.append_constraint(const) self.impl.drop_constraint(const) def bulk_insert(self, table, rows, multiinsert=True): """Issue a "bulk insert" operation using the current migration context. This provides a means of representing an INSERT of multiple rows which works equally well in the context of executing on a live connection as well as that of generating a SQL script. In the case of a SQL script, the values are rendered inline into the statement. e.g.:: from alembic import op from datetime import date from sqlalchemy.sql import table, column from sqlalchemy import String, Integer, Date # Create an ad-hoc table to use for the insert statement. accounts_table = table('account', column('id', Integer), column('name', String), column('create_date', Date) ) op.bulk_insert(accounts_table, [ {'id':1, 'name':'John Smith', 'create_date':date(2010, 10, 5)}, {'id':2, 'name':'Ed Williams', 'create_date':date(2007, 5, 27)}, {'id':3, 'name':'Wendy Jones', 'create_date':date(2008, 8, 15)}, ] ) When using --sql mode, some datatypes may not render inline automatically, such as dates and other special types. When this issue is present, :meth:`.Operations.inline_literal` may be used:: op.bulk_insert(accounts_table, [ {'id':1, 'name':'John Smith', 'create_date':op.inline_literal("2010-10-05")}, {'id':2, 'name':'Ed Williams', 'create_date':op.inline_literal("2007-05-27")}, {'id':3, 'name':'Wendy Jones', 'create_date':op.inline_literal("2008-08-15")}, ], multiinsert=False ) When using :meth:`.Operations.inline_literal` in conjunction with :meth:`.Operations.bulk_insert`, in order for the statement to work in "online" (e.g. non --sql) mode, the :paramref:`~.Operations.bulk_insert.multiinsert` flag should be set to ``False``, which will have the effect of individual INSERT statements being emitted to the database, each with a distinct VALUES clause, so that the "inline" values can still be rendered, rather than attempting to pass the values as bound parameters. .. versionadded:: 0.6.4 :meth:`.Operations.inline_literal` can now be used with :meth:`.Operations.bulk_insert`, and the :paramref:`~.Operations.bulk_insert.multiinsert` flag has been added to assist in this usage when running in "online" mode. :param table: a table object which represents the target of the INSERT. :param rows: a list of dictionaries indicating rows. :param multiinsert: when at its default of True and --sql mode is not enabled, the INSERT statement will be executed using "executemany()" style, where all elements in the list of dictionaries are passed as bound parameters in a single list. Setting this to False results in individual INSERT statements being emitted per parameter set, and is needed in those cases where non-literal values are present in the parameter sets. .. versionadded:: 0.6.4 """ self.impl.bulk_insert(table, rows, multiinsert=multiinsert) def inline_literal(self, value, type_=None): """Produce an 'inline literal' expression, suitable for using in an INSERT, UPDATE, or DELETE statement. When using Alembic in "offline" mode, CRUD operations aren't compatible with SQLAlchemy's default behavior surrounding literal values, which is that they are converted into bound values and passed separately into the ``execute()`` method of the DBAPI cursor. An offline SQL script needs to have these rendered inline. While it should always be noted that inline literal values are an **enormous** security hole in an application that handles untrusted input, a schema migration is not run in this context, so literals are safe to render inline, with the caveat that advanced types like dates may not be supported directly by SQLAlchemy. See :meth:`.execute` for an example usage of :meth:`.inline_literal`. :param value: The value to render. Strings, integers, and simple numerics should be supported. Other types like boolean, dates, etc. may or may not be supported yet by various backends. :param ``type_``: optional - a :class:`sqlalchemy.types.TypeEngine` subclass stating the type of this value. In SQLAlchemy expressions, this is usually derived automatically from the Python type of the value itself, as well as based on the context in which the value is used. """ return impl._literal_bindparam(None, value, type_=type_) def execute(self, sql, execution_options=None): """Execute the given SQL using the current migration context. In a SQL script context, the statement is emitted directly to the output stream. There is *no* return result, however, as this function is oriented towards generating a change script that can run in "offline" mode. For full interaction with a connected database, use the "bind" available from the context:: from alembic import op connection = op.get_bind() Also note that any parameterized statement here *will not work* in offline mode - INSERT, UPDATE and DELETE statements which refer to literal values would need to render inline expressions. For simple use cases, the :meth:`.inline_literal` function can be used for **rudimentary** quoting of string values. For "bulk" inserts, consider using :meth:`.bulk_insert`. For example, to emit an UPDATE statement which is equally compatible with both online and offline mode:: from sqlalchemy.sql import table, column from sqlalchemy import String from alembic import op account = table('account', column('name', String) ) op.execute( account.update().\\ where(account.c.name==op.inline_literal('account 1')).\\ values({'name':op.inline_literal('account 2')}) ) Note above we also used the SQLAlchemy :func:`sqlalchemy.sql.expression.table` and :func:`sqlalchemy.sql.expression.column` constructs to make a brief, ad-hoc table construct just for our UPDATE statement. A full :class:`~sqlalchemy.schema.Table` construct of course works perfectly fine as well, though note it's a recommended practice to at least ensure the definition of a table is self-contained within the migration script, rather than imported from a module that may break compatibility with older migrations. :param sql: Any legal SQLAlchemy expression, including: * a string * a :func:`sqlalchemy.sql.expression.text` construct. * a :func:`sqlalchemy.sql.expression.insert` construct. * a :func:`sqlalchemy.sql.expression.update`, :func:`sqlalchemy.sql.expression.insert`, or :func:`sqlalchemy.sql.expression.delete` construct. * Pretty much anything that's "executable" as described in :ref:`sqlexpression_toplevel`. :param execution_options: Optional dictionary of execution options, will be passed to :meth:`sqlalchemy.engine.Connection.execution_options`. """ self.migration_context.impl.execute( sql, execution_options=execution_options) def get_bind(self): """Return the current 'bind'. Under normal circumstances, this is the :class:`~sqlalchemy.engine.Connection` currently being used to emit SQL to the database. In a SQL script context, this value is ``None``. [TODO: verify this] """ return self.migration_context.impl.bind PK )E"NFNFalembic/script.pyimport datetime import os import re import shutil from . import util _sourceless_rev_file = re.compile(r'(?!__init__)(.*\.py)(c|o)?$') _only_source_rev_file = re.compile(r'(?!__init__)(.*\.py)$') _legacy_rev = re.compile(r'([a-f0-9]+)\.py$') _mod_def_re = re.compile(r'(upgrade|downgrade)_([a-z0-9]+)') _slug_re = re.compile(r'\w+') _default_file_template = "%(rev)s_%(slug)s" _relative_destination = re.compile(r'(?:\+|-)\d+') class ScriptDirectory(object): """Provides operations upon an Alembic script directory. This object is useful to get information as to current revisions, most notably being able to get at the "head" revision, for schemes that want to test if the current revision in the database is the most recent:: from alembic.script import ScriptDirectory from alembic.config import Config config = Config() config.set_main_option("script_location", "myapp:migrations") script = ScriptDirectory.from_config(config) head_revision = script.get_current_head() """ def __init__(self, dir, file_template=_default_file_template, truncate_slug_length=40, sourceless=False): self.dir = dir self.versions = os.path.join(self.dir, 'versions') self.file_template = file_template self.truncate_slug_length = truncate_slug_length or 40 self.sourceless = sourceless if not os.access(dir, os.F_OK): raise util.CommandError("Path doesn't exist: %r. Please use " "the 'init' command to create a new " "scripts folder." % dir) @classmethod def from_config(cls, config): """Produce a new :class:`.ScriptDirectory` given a :class:`.Config` instance. The :class:`.Config` need only have the ``script_location`` key present. """ script_location = config.get_main_option('script_location') if script_location is None: raise util.CommandError("No 'script_location' key " "found in configuration.") truncate_slug_length = config.get_main_option("truncate_slug_length") if truncate_slug_length is not None: truncate_slug_length = int(truncate_slug_length) return ScriptDirectory( util.coerce_resource_to_filename(script_location), file_template=config.get_main_option( 'file_template', _default_file_template), truncate_slug_length=truncate_slug_length, sourceless=config.get_main_option("sourceless") == "true" ) def walk_revisions(self, base="base", head="head"): """Iterate through all revisions. This is actually a breadth-first tree traversal, with leaf nodes being heads. """ if head == "head": heads = set(self.get_heads()) else: heads = set([head]) while heads: todo = set(heads) heads = set() for head in todo: if head in heads: break for sc in self.iterate_revisions(head, base): if sc.is_branch_point and sc.revision not in todo: heads.add(sc.revision) break else: yield sc def get_revision(self, id_): """Return the :class:`.Script` instance with the given rev id.""" id_ = self.as_revision_number(id_) try: return self._revision_map[id_] except KeyError: # do a partial lookup revs = [x for x in self._revision_map if x is not None and x.startswith(id_)] if not revs: raise util.CommandError("No such revision '%s'" % id_) elif len(revs) > 1: raise util.CommandError( "Multiple revisions start " "with '%s', %s..." % ( id_, ", ".join("'%s'" % r for r in revs[0:3]) )) else: return self._revision_map[revs[0]] _get_rev = get_revision def as_revision_number(self, id_): """Convert a symbolic revision, i.e. 'head' or 'base', into an actual revision number.""" if id_ == 'head': id_ = self.get_current_head() elif id_ == 'base': id_ = None return id_ _as_rev_number = as_revision_number def iterate_revisions(self, upper, lower): """Iterate through script revisions, starting at the given upper revision identifier and ending at the lower. The traversal uses strictly the `down_revision` marker inside each migration script, so it is a requirement that upper >= lower, else you'll get nothing back. The iterator yields :class:`.Script` objects. """ if upper is not None and _relative_destination.match(upper): relative = int(upper) revs = list(self._iterate_revisions("head", lower)) revs = revs[-relative:] if len(revs) != abs(relative): raise util.CommandError( "Relative revision %s didn't " "produce %d migrations" % (upper, abs(relative))) return iter(revs) elif lower is not None and _relative_destination.match(lower): relative = int(lower) revs = list(self._iterate_revisions(upper, "base")) revs = revs[0:-relative] if len(revs) != abs(relative): raise util.CommandError( "Relative revision %s didn't " "produce %d migrations" % (lower, abs(relative))) return iter(revs) else: return self._iterate_revisions(upper, lower) def _iterate_revisions(self, upper, lower): lower = self.get_revision(lower) upper = self.get_revision(upper) orig = lower.revision if lower else 'base', \ upper.revision if upper else 'base' script = upper while script != lower: if script is None and lower is not None: raise util.CommandError( "Revision %s is not an ancestor of %s" % orig) yield script downrev = script.down_revision script = self._revision_map[downrev] def _upgrade_revs(self, destination, current_rev): revs = self.iterate_revisions(destination, current_rev) return [ (script.module.upgrade, script.down_revision, script.revision, script.doc) for script in reversed(list(revs)) ] def _downgrade_revs(self, destination, current_rev): revs = self.iterate_revisions(current_rev, destination) return [ (script.module.downgrade, script.revision, script.down_revision, script.doc) for script in revs ] def run_env(self): """Run the script environment. This basically runs the ``env.py`` script present in the migration environment. It is called exclusively by the command functions in :mod:`alembic.command`. """ util.load_python_file(self.dir, 'env.py') @property def env_py_location(self): return os.path.abspath(os.path.join(self.dir, "env.py")) @util.memoized_property def _revision_map(self): map_ = {} for file_ in os.listdir(self.versions): script = Script._from_filename(self, self.versions, file_) if script is None: continue if script.revision in map_: util.warn("Revision %s is present more than once" % script.revision) map_[script.revision] = script for rev in map_.values(): if rev.down_revision is None: continue if rev.down_revision not in map_: util.warn("Revision %s referenced from %s is not present" % (rev.down_revision, rev)) rev.down_revision = None else: map_[rev.down_revision].add_nextrev(rev.revision) map_[None] = None return map_ def _rev_path(self, rev_id, message, create_date): slug = "_".join(_slug_re.findall(message or "")).lower() if len(slug) > self.truncate_slug_length: slug = slug[:self.truncate_slug_length].rsplit('_', 1)[0] + '_' filename = "%s.py" % ( self.file_template % { 'rev': rev_id, 'slug': slug, 'year': create_date.year, 'month': create_date.month, 'day': create_date.day, 'hour': create_date.hour, 'minute': create_date.minute, 'second': create_date.second } ) return os.path.join(self.versions, filename) def get_current_head(self): """Return the current head revision. If the script directory has multiple heads due to branching, an error is raised. Returns a string revision number. """ current_heads = self.get_heads() if len(current_heads) > 1: raise util.CommandError( 'Only a single head is supported. The ' 'script directory has multiple heads (due to branching), ' 'which must be resolved by manually editing the revision ' 'files to form a linear sequence. Run `alembic branches` to ' 'see the divergence(s).') if current_heads: return current_heads[0] else: return None _current_head = get_current_head """the 0.2 name, for backwards compat.""" def get_heads(self): """Return all "head" revisions as strings. Returns a list of string revision numbers. This is normally a list of length one, unless branches are present. The :meth:`.ScriptDirectory.get_current_head()` method can be used normally when a script directory has only one head. """ heads = [] for script in self._revision_map.values(): if script and script.is_head: heads.append(script.revision) return heads def get_base(self): """Return the "base" revision as a string. This is the revision number of the script that has a ``down_revision`` of None. Behavior is not defined if more than one script has a ``down_revision`` of None. """ for script in self._revision_map.values(): if script and script.down_revision is None \ and script.revision in self._revision_map: return script.revision else: return None def _generate_template(self, src, dest, **kw): util.status("Generating %s" % os.path.abspath(dest), util.template_to_file, src, dest, **kw ) def _copy_file(self, src, dest): util.status("Generating %s" % os.path.abspath(dest), shutil.copy, src, dest) def generate_revision(self, revid, message, refresh=False, **kw): """Generate a new revision file. This runs the ``script.py.mako`` template, given template arguments, and creates a new file. :param revid: String revision id. Typically this comes from ``alembic.util.rev_id()``. :param message: the revision message, the one passed by the -m argument to the ``revision`` command. :param refresh: when True, the in-memory state of this :class:`.ScriptDirectory` will be updated with a new :class:`.Script` instance representing the new revision; the :class:`.Script` instance is returned. If False, the file is created but the state of the :class:`.ScriptDirectory` is unmodified; ``None`` is returned. """ current_head = self.get_current_head() create_date = datetime.datetime.now() path = self._rev_path(revid, message, create_date) self._generate_template( os.path.join(self.dir, "script.py.mako"), path, up_revision=str(revid), down_revision=current_head, create_date=create_date, message=message if message is not None else ("empty message"), **kw ) if refresh: script = Script._from_path(self, path) self._revision_map[script.revision] = script if script.down_revision: self._revision_map[script.down_revision].\ add_nextrev(script.revision) return script else: return None class Script(object): """Represent a single revision file in a ``versions/`` directory. The :class:`.Script` instance is returned by methods such as :meth:`.ScriptDirectory.iterate_revisions`. """ nextrev = frozenset() def __init__(self, module, rev_id, path): self.module = module self.revision = rev_id self.path = path self.down_revision = getattr(module, 'down_revision', None) revision = None """The string revision number for this :class:`.Script` instance.""" module = None """The Python module representing the actual script itself.""" path = None """Filesystem path of the script.""" down_revision = None """The ``down_revision`` identifier within the migration script.""" @property def doc(self): """Return the docstring given in the script.""" return re.split("\n\n", self.longdoc)[0] @property def longdoc(self): """Return the docstring given in the script.""" doc = self.module.__doc__ if doc: if hasattr(self.module, "_alembic_source_encoding"): doc = doc.decode(self.module._alembic_source_encoding) return doc.strip() else: return "" def add_nextrev(self, rev): self.nextrev = self.nextrev.union([rev]) @property def is_head(self): """Return True if this :class:`.Script` is a 'head' revision. This is determined based on whether any other :class:`.Script` within the :class:`.ScriptDirectory` refers to this :class:`.Script`. Multiple heads can be present. """ return not bool(self.nextrev) @property def is_branch_point(self): """Return True if this :class:`.Script` is a branch point. A branchpoint is defined as a :class:`.Script` which is referred to by more than one succeeding :class:`.Script`, that is more than one :class:`.Script` has a `down_revision` identifier pointing here. """ return len(self.nextrev) > 1 @property def log_entry(self): return \ "Rev: %s%s%s\n" \ "Parent: %s\n" \ "Path: %s\n" \ "\n%s\n" % ( self.revision, " (head)" if self.is_head else "", " (branchpoint)" if self.is_branch_point else "", self.down_revision, self.path, "\n".join( " %s" % para for para in self.longdoc.splitlines() ) ) def __str__(self): return "%s -> %s%s%s, %s" % ( self.down_revision, self.revision, " (head)" if self.is_head else "", " (branchpoint)" if self.is_branch_point else "", self.doc) @classmethod def _from_path(cls, scriptdir, path): dir_, filename = os.path.split(path) return cls._from_filename(scriptdir, dir_, filename) @classmethod def _from_filename(cls, scriptdir, dir_, filename): if scriptdir.sourceless: py_match = _sourceless_rev_file.match(filename) else: py_match = _only_source_rev_file.match(filename) if not py_match: return None py_filename = py_match.group(1) if scriptdir.sourceless: is_c = py_match.group(2) == 'c' is_o = py_match.group(2) == 'o' else: is_c = is_o = False if is_o or is_c: py_exists = os.path.exists(os.path.join(dir_, py_filename)) pyc_exists = os.path.exists(os.path.join(dir_, py_filename + "c")) # prefer .py over .pyc because we'd like to get the # source encoding; prefer .pyc over .pyo because we'd like to # have the docstrings which a -OO file would not have if py_exists or is_o and pyc_exists: return None module = util.load_python_file(dir_, filename) if not hasattr(module, "revision"): # attempt to get the revision id from the script name, # this for legacy only m = _legacy_rev.match(filename) if not m: raise util.CommandError( "Could not determine revision id from filename %s. " "Be sure the 'revision' variable is " "declared inside the script (please see 'Upgrading " "from Alembic 0.1 to 0.2' in the documentation)." % filename) else: revision = m.group(1) else: revision = module.revision return Script(module, revision, os.path.join(dir_, filename)) PK<8E'`H)H)alembic/util.pyimport sys import os import textwrap import warnings import re import inspect import uuid from mako.template import Template from sqlalchemy.engine import url from sqlalchemy import __version__ from .compat import callable, exec_, load_module_py, load_module_pyc, \ binary_type class CommandError(Exception): pass def _safe_int(value): try: return int(value) except: return value _vers = tuple( [_safe_int(x) for x in re.findall(r'(\d+|[abc]\d)', __version__)]) sqla_07 = _vers > (0, 7, 2) sqla_08 = _vers >= (0, 8, 0, 'b2') sqla_09 = _vers >= (0, 9, 0) sqla_092 = _vers >= (0, 9, 2) sqla_094 = _vers >= (0, 9, 4) if not sqla_07: raise CommandError( "SQLAlchemy 0.7.3 or greater is required. ") from sqlalchemy.util import format_argspec_plus, update_wrapper from sqlalchemy.util.compat import inspect_getfullargspec import logging log = logging.getLogger(__name__) try: import fcntl import termios import struct ioctl = fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)) _h, TERMWIDTH, _hp, _wp = struct.unpack('HHHH', ioctl) if TERMWIDTH <= 0: # can occur if running in emacs pseudo-tty TERMWIDTH = None except (ImportError, IOError): TERMWIDTH = None def template_to_file(template_file, dest, **kw): with open(dest, 'w') as f: f.write( Template(filename=template_file).render(**kw) ) def create_module_class_proxy(cls, globals_, locals_): """Create module level proxy functions for the methods on a given class. The functions will have a compatible signature as the methods. A proxy is established using the ``_install_proxy(obj)`` function, and removed using ``_remove_proxy()``, both installed by calling this function. """ attr_names = set() def _install_proxy(obj): globals_['_proxy'] = obj for name in attr_names: globals_[name] = getattr(obj, name) def _remove_proxy(): globals_['_proxy'] = None for name in attr_names: del globals_[name] globals_['_install_proxy'] = _install_proxy globals_['_remove_proxy'] = _remove_proxy def _create_op_proxy(name): fn = getattr(cls, name) spec = inspect.getargspec(fn) if spec[0] and spec[0][0] == 'self': spec[0].pop(0) args = inspect.formatargspec(*spec) num_defaults = 0 if spec[3]: num_defaults += len(spec[3]) name_args = spec[0] if num_defaults: defaulted_vals = name_args[0 - num_defaults:] else: defaulted_vals = () apply_kw = inspect.formatargspec( name_args, spec[1], spec[2], defaulted_vals, formatvalue=lambda x: '=' + x) def _name_error(name): raise NameError( "Can't invoke function '%s', as the proxy object has " "not yet been " "established for the Alembic '%s' class. " "Try placing this code inside a callable." % ( name, cls.__name__ )) globals_['_name_error'] = _name_error func_text = textwrap.dedent("""\ def %(name)s(%(args)s): %(doc)r try: p = _proxy except NameError: _name_error('%(name)s') return _proxy.%(name)s(%(apply_kw)s) e """ % { 'name': name, 'args': args[1:-1], 'apply_kw': apply_kw[1:-1], 'doc': fn.__doc__, }) lcl = {} exec_(func_text, globals_, lcl) return lcl[name] for methname in dir(cls): if not methname.startswith('_'): if callable(getattr(cls, methname)): locals_[methname] = _create_op_proxy(methname) else: attr_names.add(methname) def write_outstream(stream, *text): encoding = getattr(stream, 'encoding', 'ascii') or 'ascii' for t in text: if not isinstance(t, binary_type): t = t.encode(encoding, 'replace') t = t.decode(encoding) try: stream.write(t) except IOError: # suppress "broken pipe" errors. # no known way to handle this on Python 3 however # as the exception is "ignored" (noisily) in TextIOWrapper. break def coerce_resource_to_filename(fname): """Interpret a filename as either a filesystem location or as a package resource. Names that are non absolute paths and contain a colon are interpreted as resources and coerced to a file location. """ if not os.path.isabs(fname) and ":" in fname: import pkg_resources fname = pkg_resources.resource_filename(*fname.split(':')) return fname def status(_statmsg, fn, *arg, **kw): msg(_statmsg + " ...", False) try: ret = fn(*arg, **kw) write_outstream(sys.stdout, " done\n") return ret except: write_outstream(sys.stdout, " FAILED\n") raise def err(message): log.error(message) msg("FAILED: %s" % message) sys.exit(-1) def obfuscate_url_pw(u): u = url.make_url(u) if u.password: u.password = 'XXXXX' return str(u) def asbool(value): return value is not None and \ value.lower() == 'true' def warn(msg): warnings.warn(msg) def msg(msg, newline=True): if TERMWIDTH is None: write_outstream(sys.stdout, msg) if newline: write_outstream(sys.stdout, "\n") else: # left indent output lines lines = textwrap.wrap(msg, TERMWIDTH) if len(lines) > 1: for line in lines[0:-1]: write_outstream(sys.stdout, " ", line, "\n") write_outstream(sys.stdout, " ", lines[-1], ("\n" if newline else "")) def load_python_file(dir_, filename): """Load a file from the given path as a Python module.""" module_id = re.sub(r'\W', "_", filename) path = os.path.join(dir_, filename) _, ext = os.path.splitext(filename) if ext == ".py": if os.path.exists(path): module = load_module_py(module_id, path) elif os.path.exists(simple_pyc_file_from_path(path)): # look for sourceless load module = load_module_pyc( module_id, simple_pyc_file_from_path(path)) else: raise ImportError("Can't find Python file %s" % path) elif ext in (".pyc", ".pyo"): module = load_module_pyc(module_id, path) del sys.modules[module_id] return module def simple_pyc_file_from_path(path): """Given a python source path, return the so-called "sourceless" .pyc or .pyo path. This just a .pyc or .pyo file where the .py file would be. Even with PEP-3147, which normally puts .pyc/.pyo files in __pycache__, this use case remains supported as a so-called "sourceless module import". """ if sys.flags.optimize: return path + "o" # e.g. .pyo else: return path + "c" # e.g. .pyc def pyc_file_from_path(path): """Given a python source path, locate the .pyc. See http://www.python.org/dev/peps/pep-3147/ #detecting-pep-3147-availability http://www.python.org/dev/peps/pep-3147/#file-extension-checks """ import imp has3147 = hasattr(imp, 'get_tag') if has3147: return imp.cache_from_source(path) else: return simple_pyc_file_from_path(path) def rev_id(): val = int(uuid.uuid4()) % 100000000000000 return hex(val)[2:-1] class memoized_property(object): """A read-only @property that is only evaluated once.""" def __init__(self, fget, doc=None): self.fget = fget self.__doc__ = doc or fget.__doc__ self.__name__ = fget.__name__ def __get__(self, obj, cls): if obj is None: return None obj.__dict__[self.__name__] = result = self.fget(obj) return result class immutabledict(dict): def _immutable(self, *arg, **kw): raise TypeError("%s object is immutable" % self.__class__.__name__) __delitem__ = __setitem__ = __setattr__ = \ clear = pop = popitem = setdefault = \ update = _immutable def __new__(cls, *args): new = dict.__new__(cls) dict.__init__(new, *args) return new def __init__(self, *args): pass def __reduce__(self): return immutabledict, (dict(self), ) def union(self, d): if not self: return immutabledict(d) else: d2 = immutabledict(self) dict.update(d2, d) return d2 def __repr__(self): return "immutabledict(%s)" % dict.__repr__(self) def _with_legacy_names(translations): def decorate(fn): spec = inspect_getfullargspec(fn) metadata = dict(target='target', fn='fn') metadata.update(format_argspec_plus(spec, grouped=False)) has_keywords = bool(spec[2]) if not has_keywords: metadata['args'] += ", **kw" metadata['apply_kw'] += ", **kw" def go(*arg, **kw): names = set(kw).difference(spec[0]) for oldname, newname in translations: if oldname in kw: kw[newname] = kw.pop(oldname) names.discard(oldname) warnings.warn( "Argument '%s' is now named '%s' for function '%s'" % (oldname, newname, fn.__name__)) if not has_keywords and names: raise TypeError("Unknown arguments: %s" % ", ".join(names)) return fn(*arg, **kw) code = 'lambda %(args)s: %(target)s(%(apply_kw)s)' % ( metadata) decorated = eval(code, {"target": go}) decorated.__defaults__ = getattr(fn, '__func__', fn).__defaults__ update_wrapper(decorated, fn) if hasattr(decorated, '__wrapped__'): # update_wrapper in py3k applies __wrapped__, which causes # inspect.getargspec() to ignore the extra arguments on our # wrapper as of Python 3.4. We need this for the # "module class proxy" thing though, so just del the __wrapped__ # for now. See #175 as well as bugs.python.org/issue17482 del decorated.__wrapped__ return decorated return decorate PK];8EqA[XX alembic/autogenerate/__init__.pyfrom .api import compare_metadata, _produce_migration_diffs, \ _produce_net_changes PKerE^B9&9&alembic/autogenerate/api.py"""Provide the 'autogenerate' feature which can produce migration operations automatically.""" import logging import re from sqlalchemy.engine.reflection import Inspector from sqlalchemy.util import OrderedSet from .compare import _compare_tables from .render import _drop_table, _drop_column, _drop_index, _drop_constraint, \ _add_table, _add_column, _add_index, _add_constraint, _modify_col from .. import util log = logging.getLogger(__name__) ################################################### # public def compare_metadata(context, metadata): """Compare a database schema to that given in a :class:`~sqlalchemy.schema.MetaData` instance. The database connection is presented in the context of a :class:`.MigrationContext` object, which provides database connectivity as well as optional comparison functions to use for datatypes and server defaults - see the "autogenerate" arguments at :meth:`.EnvironmentContext.configure` for details on these. The return format is a list of "diff" directives, each representing individual differences:: from alembic.migration import MigrationContext from alembic.autogenerate import compare_metadata from sqlalchemy.schema import SchemaItem from sqlalchemy.types import TypeEngine from sqlalchemy import (create_engine, MetaData, Column, Integer, String, Table) import pprint engine = create_engine("sqlite://") engine.execute(''' create table foo ( id integer not null primary key, old_data varchar, x integer )''') engine.execute(''' create table bar ( data varchar )''') metadata = MetaData() Table('foo', metadata, Column('id', Integer, primary_key=True), Column('data', Integer), Column('x', Integer, nullable=False) ) Table('bat', metadata, Column('info', String) ) mc = MigrationContext.configure(engine.connect()) diff = compare_metadata(mc, metadata) pprint.pprint(diff, indent=2, width=20) Output:: [ ( 'add_table', Table('bat', MetaData(bind=None), Column('info', String(), table=), schema=None)), ( 'remove_table', Table(u'bar', MetaData(bind=None), Column(u'data', VARCHAR(), table=), schema=None)), ( 'add_column', None, 'foo', Column('data', Integer(), table=)), ( 'remove_column', None, 'foo', Column(u'old_data', VARCHAR(), table=None)), [ ( 'modify_nullable', None, 'foo', u'x', { 'existing_server_default': None, 'existing_type': INTEGER()}, True, False)]] :param context: a :class:`.MigrationContext` instance. :param metadata: a :class:`~sqlalchemy.schema.MetaData` instance. """ autogen_context, connection = _autogen_context(context, None) diffs = [] object_filters = _get_object_filters(context.opts) include_schemas = context.opts.get('include_schemas', False) _produce_net_changes(connection, metadata, diffs, autogen_context, object_filters, include_schemas) return diffs ################################################### # top level def _produce_migration_diffs(context, template_args, imports, include_symbol=None, include_object=None, include_schemas=False): opts = context.opts metadata = opts['target_metadata'] include_schemas = opts.get('include_schemas', include_schemas) object_filters = _get_object_filters(opts, include_symbol, include_object) if metadata is None: raise util.CommandError( "Can't proceed with --autogenerate option; environment " "script %s does not provide " "a MetaData object to the context." % ( context.script.env_py_location )) autogen_context, connection = _autogen_context(context, imports) diffs = [] remove_tables = template_args['config'].get_main_option("remove_tables") #if removate_tables is '1', then will generate drop table statement _produce_net_changes(connection, metadata, diffs, autogen_context, object_filters, include_schemas, remove_tables=='1') template_args[opts['upgrade_token']] = \ _indent(_produce_upgrade_commands(diffs, autogen_context)) template_args[opts['downgrade_token']] = \ _indent(_produce_downgrade_commands(diffs, autogen_context)) template_args['imports'] = "\n".join(sorted(imports)) def _get_object_filters( context_opts, include_symbol=None, include_object=None): include_symbol = context_opts.get('include_symbol', include_symbol) include_object = context_opts.get('include_object', include_object) object_filters = [] if include_symbol: def include_symbol_filter(object, name, type_, reflected, compare_to): if type_ == "table": return include_symbol(name, object.schema) else: return True object_filters.append(include_symbol_filter) if include_object: object_filters.append(include_object) return object_filters def _autogen_context(context, imports): opts = context.opts connection = context.bind return { 'imports': imports, 'connection': connection, 'dialect': connection.dialect, 'context': context, 'opts': opts }, connection def _indent(text): text = "### commands auto generated by Alembic - "\ "please adjust! ###\n" + text text += "\n### end Alembic commands ###" text = re.compile(r'^', re.M).sub(" ", text).strip() return text ################################################### # walk structures def _produce_net_changes(connection, metadata, diffs, autogen_context, object_filters=(), include_schemas=False, remove_tables=False): inspector = Inspector.from_engine(connection) # TODO: not hardcode alembic_version here ? conn_table_names = set() default_schema = connection.dialect.default_schema_name if include_schemas: schemas = set(inspector.get_schema_names()) # replace default schema name with None schemas.discard("information_schema") # replace the "default" schema with None schemas.add(None) schemas.discard(default_schema) else: schemas = [None] for s in schemas: tables = set(inspector.get_table_names(schema=s)).\ difference(['alembic_version']) conn_table_names.update(zip([s] * len(tables), tables)) metadata_table_names = OrderedSet([(table.schema, table.name) for table in metadata.sorted_tables]) _compare_tables(conn_table_names, metadata_table_names, object_filters, inspector, metadata, diffs, autogen_context, remove_tables) def _produce_upgrade_commands(diffs, autogen_context): buf = [] for diff in diffs: buf.append(_invoke_command("upgrade", diff, autogen_context)) if not buf: buf = ["pass"] return "\n".join(buf) def _produce_downgrade_commands(diffs, autogen_context): buf = [] for diff in reversed(diffs): buf.append(_invoke_command("downgrade", diff, autogen_context)) if not buf: buf = ["pass"] return "\n".join(buf) def _invoke_command(updown, args, autogen_context): if isinstance(args, tuple): return _invoke_adddrop_command(updown, args, autogen_context) else: return _invoke_modify_command(updown, args, autogen_context) def _invoke_adddrop_command(updown, args, autogen_context): cmd_type = args[0] adddrop, cmd_type = cmd_type.split("_") cmd_args = args[1:] + (autogen_context,) _commands = { "table": (_drop_table, _add_table), "column": (_drop_column, _add_column), "index": (_drop_index, _add_index), "constraint": (_drop_constraint, _add_constraint), } cmd_callables = _commands[cmd_type] if ( updown == "upgrade" and adddrop == "add" ) or ( updown == "downgrade" and adddrop == "remove" ): return cmd_callables[1](*cmd_args) else: return cmd_callables[0](*cmd_args) def _invoke_modify_command(updown, args, autogen_context): sname, tname, cname = args[0][1:4] kw = {} _arg_struct = { "modify_type": ("existing_type", "type_"), "modify_nullable": ("existing_nullable", "nullable"), "modify_default": ("existing_server_default", "server_default"), } for diff in args: diff_kw = diff[4] for arg in ("existing_type", "existing_nullable", "existing_server_default"): if arg in diff_kw: kw.setdefault(arg, diff_kw[arg]) old_kw, new_kw = _arg_struct[diff[0]] if updown == "upgrade": kw[new_kw] = diff[-1] kw[old_kw] = diff[-2] else: kw[new_kw] = diff[-2] kw[old_kw] = diff[-1] if "nullable" in kw: kw.pop("existing_nullable", None) if "server_default" in kw: kw.pop("existing_server_default", None) return _modify_col(tname, cname, autogen_context, schema=sname, **kw) PK.BHPY[[alembic/autogenerate/compare.pyfrom sqlalchemy import schema as sa_schema, types as sqltypes import logging from .. import compat from .render import _render_server_default from sqlalchemy.util import OrderedSet log = logging.getLogger(__name__) def _run_filters(object_, name, type_, reflected, compare_to, object_filters): for fn in object_filters: if not fn(object_, name, type_, reflected, compare_to): return False else: return True def _compare_tables(conn_table_names, metadata_table_names, object_filters, inspector, metadata, diffs, autogen_context, remove_tables): default_schema = inspector.bind.dialect.default_schema_name # tables coming from the connection will not have "schema" # set if it matches default_schema_name; so we need a list # of table names from local metadata that also have "None" if schema # == default_schema_name. Most setups will be like this anyway but # some are not (see #170) metadata_table_names_no_dflt_schema = OrderedSet([ (schema if schema != default_schema else None, tname) for schema, tname in metadata_table_names ]) # to adjust for the MetaData collection storing the tables either # as "schemaname.tablename" or just "tablename", create a new lookup # which will match the "non-default-schema" keys to the Table object. tname_to_table = dict( ( no_dflt_schema, metadata.tables[sa_schema._get_table_key(tname, schema)] ) for no_dflt_schema, (schema, tname) in zip( metadata_table_names_no_dflt_schema, metadata_table_names) ) metadata_table_names = metadata_table_names_no_dflt_schema for s, tname in metadata_table_names.difference(conn_table_names): name = '%s.%s' % (s, tname) if s else tname metadata_table = tname_to_table[(s, tname)] if _run_filters( metadata_table, tname, "table", False, None, object_filters): diffs.append(("add_table", metadata_table)) log.info("{{white|green:Detected}} added table %r", name) _compare_indexes_and_uniques(s, tname, object_filters, None, metadata_table, diffs, autogen_context, inspector, []) removal_metadata = sa_schema.MetaData() for s, tname in conn_table_names.difference(metadata_table_names): name = sa_schema._get_table_key(tname, s) exists = name in removal_metadata.tables t = sa_schema.Table(tname, removal_metadata, schema=s) if not exists: inspector.reflecttable(t, None) if remove_tables: if _run_filters(t, tname, "table", True, None, object_filters): diffs.append(("remove_table", t)) log.info("{{white|green:Detected}} removed table %r", name) existing_tables = conn_table_names.intersection(metadata_table_names) existing_metadata = sa_schema.MetaData() conn_column_info = {} for s, tname in existing_tables: name = sa_schema._get_table_key(tname, s) exists = name in existing_metadata.tables t = sa_schema.Table(tname, existing_metadata, schema=s) if not exists: inspector.reflecttable(t, None) conn_column_info[(s, tname)] = t for s, tname in sorted(existing_tables, key=lambda x: (x[0] or '', x[1])): s = s or None name = '%s.%s' % (s, tname) if s else tname metadata_table = tname_to_table[(s, tname)] conn_table = existing_metadata.tables[name] if _run_filters( metadata_table, tname, "table", False, conn_table, object_filters): removed_columns = _compare_columns(s, tname, object_filters, conn_table, metadata_table, diffs, autogen_context, inspector) _compare_indexes_and_uniques(s, tname, object_filters, conn_table, metadata_table, diffs, autogen_context, inspector, removed_columns) # TODO: # table constraints # sequences def _make_index(params, conn_table): return sa_schema.Index( params['name'], *[conn_table.c[cname] for cname in params['column_names']], unique=params['unique'] ) def _make_unique_constraint(params, conn_table): return sa_schema.UniqueConstraint( *[conn_table.c[cname] for cname in params['column_names']], name=params['name'] ) def _compare_columns(schema, tname, object_filters, conn_table, metadata_table, diffs, autogen_context, inspector): name = '%s.%s' % (schema, tname) if schema else tname metadata_cols_by_name = dict((c.name, c) for c in metadata_table.c) conn_col_names = dict((c.name, c) for c in conn_table.c) metadata_col_names = OrderedSet(sorted(metadata_cols_by_name)) removed_columns = [] for cname in metadata_col_names.difference(conn_col_names): if _run_filters(metadata_cols_by_name[cname], cname, "column", False, None, object_filters): if not metadata_table.__mapping_only__: diffs.append( ("add_column", schema, tname, metadata_cols_by_name[cname]) ) log.info("{{white|green:Detected}} added column '%s.%s'", name, cname) else: log.info("{{white|red:Skipped}} added column '%s.%s'", name, cname) for cname in set(conn_col_names).difference(metadata_col_names): if _run_filters(conn_table.c[cname], cname, "column", True, None, object_filters): if not metadata_table.__mapping_only__: diffs.append( ("remove_column", schema, tname, conn_table.c[cname]) ) removed_columns.append(cname) log.info("{{white|green:Detected}} removed column '%s.%s'", name, cname) else: log.info("{{white|red:Skipped}} removed column '%s.%s'", name, cname) for colname in metadata_col_names.intersection(conn_col_names): metadata_col = metadata_cols_by_name[colname] conn_col = conn_table.c[colname] if not _run_filters( metadata_col, colname, "column", False, conn_col, object_filters): continue col_diff = [] _compare_type(schema, tname, colname, conn_col, metadata_col, col_diff, autogen_context ) _compare_nullable(schema, tname, colname, conn_col, metadata_col, col_diff, autogen_context ) _compare_server_default(schema, tname, colname, conn_col, metadata_col, col_diff, autogen_context ) if col_diff: diffs.append(col_diff) return removed_columns class _constraint_sig(object): def __eq__(self, other): return self.const == other.const def __ne__(self, other): return self.const != other.const def __hash__(self): return hash(self.const) class _uq_constraint_sig(_constraint_sig): is_index = False is_unique = True def __init__(self, const): self.const = const self.name = const.name self.sig = tuple(sorted([col.name for col in const.columns])) @property def column_names(self): return [col.name for col in self.const.columns] class _ix_constraint_sig(_constraint_sig): is_index = True def __init__(self, const): self.const = const self.name = const.name self.sig = tuple(sorted([col.name for col in const.columns])) self.is_unique = bool(const.unique) @property def column_names(self): return _get_index_column_names(self.const) def _get_index_column_names(idx): if compat.sqla_08: return [getattr(exp, "name", None) for exp in idx.expressions] else: return [getattr(col, "name", None) for col in idx.columns] def _compare_indexes_and_uniques(schema, tname, object_filters, conn_table, metadata_table, diffs, autogen_context, inspector, removed_columns): is_create_table = conn_table is None # 1a. get raw indexes and unique constraints from metadata ... metadata_unique_constraints = set( uq for uq in metadata_table.constraints if isinstance(uq, sa_schema.UniqueConstraint) ) metadata_indexes = set(metadata_table.indexes) conn_uniques = conn_indexes = frozenset() supports_unique_constraints = False if conn_table is not None: # 1b. ... and from connection, if the table exists if hasattr(inspector, "get_unique_constraints"): try: conn_uniques = inspector.get_unique_constraints( tname, schema=schema) supports_unique_constraints = True except NotImplementedError: pass try: conn_indexes = inspector.get_indexes(tname, schema=schema) except NotImplementedError: pass # 2. convert conn-level objects from raw inspector records # into schema objects conn_uniques = set(_make_unique_constraint(uq_def, conn_table) for uq_def in conn_uniques) conn_indexes = set(_make_index(ix, conn_table) for ix in conn_indexes) # 3. give the dialect a chance to omit indexes and constraints that # we know are either added implicitly by the DB or that the DB # can't accurately report on autogen_context['context'].impl.\ correct_for_autogen_constraints( conn_uniques, conn_indexes, metadata_unique_constraints, metadata_indexes) # 4. organize the constraints into "signature" collections, the # _constraint_sig() objects provide a consistent facade over both # Index and UniqueConstraint so we can easily work with them # interchangeably metadata_unique_constraints = set(_uq_constraint_sig(uq) for uq in metadata_unique_constraints ) metadata_indexes = set(_ix_constraint_sig(ix) for ix in metadata_indexes) conn_unique_constraints = set( _uq_constraint_sig(uq) for uq in conn_uniques) conn_indexes = set(_ix_constraint_sig(ix) for ix in conn_indexes) # 5. index things by name, for those objects that have names metadata_names = dict( (c.name, c) for c in metadata_unique_constraints.union(metadata_indexes) if c.name is not None) conn_uniques_by_name = dict((c.name, c) for c in conn_unique_constraints) conn_indexes_by_name = dict((c.name, c) for c in conn_indexes) conn_names = dict((c.name, c) for c in conn_unique_constraints.union(conn_indexes) if c.name is not None) doubled_constraints = dict( (name, (conn_uniques_by_name[name], conn_indexes_by_name[name])) for name in set( conn_uniques_by_name).intersection(conn_indexes_by_name) ) # 6. index things by "column signature", to help with unnamed unique # constraints. conn_uniques_by_sig = dict((uq.sig, uq) for uq in conn_unique_constraints) metadata_uniques_by_sig = dict( (uq.sig, uq) for uq in metadata_unique_constraints) metadata_indexes_by_sig = dict( (ix.sig, ix) for ix in metadata_indexes) unnamed_metadata_uniques = dict( (uq.sig, uq) for uq in metadata_unique_constraints if uq.name is None) # assumptions: # 1. a unique constraint or an index from the connection *always* # has a name. # 2. an index on the metadata side *always* has a name. # 3. a unique constraint on the metadata side *might* have a name. # 4. The backend may double up indexes as unique constraints and # vice versa (e.g. MySQL, Postgresql) def obj_added(obj): if obj.is_index: diffs.append(("add_index", obj.const)) #log.info("{{white|green:Detected}} added index '%s' on '%s(%s)'" % (key, tname, ','.join([exp.name for exp in m_objs[key].columns]))) log.info("{{white|green:Detected}} added index '%s' on %s(%s)", obj.name, tname, ', '.join([ "'%s'" % obj.column_names ]) ) else: if not supports_unique_constraints: # can't report unique indexes as added if we don't # detect them return if is_create_table: # unique constraints are created inline with table defs return diffs.append(("add_constraint", obj.const)) log.info("{{white|green:Detected}} added unique constraint %s on %s(%s)", obj.name, tname, ', '.join([ "'%s'" % obj.column_names ]) ) def obj_removed(obj): if obj.is_index: if obj.is_unique and not supports_unique_constraints: # many databases double up unique constraints # as unique indexes. without that list we can't # be sure what we're doing here return diffs.append(("remove_index", obj.const)) log.info("{{white|green:Detected}} removed index '%s' on '%s'", obj.name, tname) else: diffs.append(("remove_constraint", obj.const)) log.info("{{white|green:Detected}} removed unique constraint '%s' on '%s'", obj.name, tname ) def obj_changed(old, new, msg): if old.is_index: # convert between both Nones (SQLA ticket #2825) on the metadata # side and zeroes on the reflection side. if not metadata_table.__mapping_only__: diffs.append(("remove_index", old.const)) diffs.append(("add_index", new.const)) msg = [] if new.is_unique is not old.is_unique: msg.append(' unique from %r to %r' % ( old.is_unique, new.is_unique )) if new.column_names != old.column_names: msg.append(' columns from (%s) to (%s)' % ( ', '.join(old.column_names), ', '.join(new.column_names) )) if not metadata_table.__mapping_only__: log.info("{{white|green:Detected}} changed index '%s' on '%s' changes as: %s" % (new.name, tname, ', '.join(msg))) else: log.info("{{white|red:Skipped}} changed index '%s' on '%s' changes as: %s" % (new.name, tname, ', '.join(msg))) #log.info("Detected changed index '%s' on '%s':%s", # old.name, tname, ', '.join(msg) # ) # diffs.append(("remove_index", old.const)) # diffs.append(("add_index", new.const)) else: log.info("Detected changed unique constraint '%s' on '%s':%s", old.name, tname, ', '.join(msg) ) diffs.append(("remove_constraint", old.const)) diffs.append(("add_constraint", new.const)) for added_name in sorted(set(metadata_names).difference(conn_names)): obj = metadata_names[added_name] obj_added(obj) for existing_name in sorted(set(metadata_names).intersection(conn_names)): metadata_obj = metadata_names[existing_name] if existing_name in doubled_constraints: conn_uq, conn_idx = doubled_constraints[existing_name] if metadata_obj.is_index: conn_obj = conn_idx else: conn_obj = conn_uq else: conn_obj = conn_names[existing_name] if conn_obj.is_index != metadata_obj.is_index: obj_removed(conn_obj) obj_added(metadata_obj) else: msg = [] if conn_obj.is_unique != metadata_obj.is_unique: msg.append(' unique=%r to unique=%r' % ( conn_obj.is_unique, metadata_obj.is_unique )) if conn_obj.sig != metadata_obj.sig: msg.append(' columns %r to %r' % ( conn_obj.sig, metadata_obj.sig )) if msg: obj_changed(conn_obj, metadata_obj, msg) for removed_name in sorted(set(conn_names).difference(metadata_names)): conn_obj = conn_names[removed_name] if not conn_obj.is_index and conn_obj.sig in unnamed_metadata_uniques: continue elif removed_name in doubled_constraints: #add if index is only one column and it's in removed columns, it'll #be skipped if len(conn_obj.sig) == 1 and conn_obj.sig[0] in removed_columns: continue if conn_obj.sig not in metadata_indexes_by_sig and \ conn_obj.sig not in metadata_uniques_by_sig: conn_uq, conn_idx = doubled_constraints[removed_name] #drop will failed in mysql add by limodou if inspector.dialect.name != 'mysql': obj_removed(conn_uq) obj_removed(conn_idx) else: obj_removed(conn_obj) for uq_sig in unnamed_metadata_uniques: if uq_sig not in conn_uniques_by_sig: obj_added(unnamed_metadata_uniques[uq_sig]) def _compare_nullable(schema, tname, cname, conn_col, metadata_col, diffs, autogen_context): conn_col_nullable = conn_col.nullable if not metadata_col.table.__mapping_only__: if conn_col_nullable is not metadata_col.nullable: diffs.append( ("modify_nullable", schema, tname, cname, { "existing_type": conn_col.type, "existing_server_default": conn_col.server_default, }, conn_col_nullable, metadata_col.nullable), ) log.info("{{white|green:Detected}} %s on column '%s.%s'", "NULL" if metadata_col.nullable else "NOT NULL", tname, cname ) else: log.info("{{white|red:Skipped}} %s on column '%s.%s'", "NULL" if metadata_col.nullable else "NOT NULL", tname, cname ) def _get_type(t): name = t.__class__.__name__ r = repr(t) if name.upper() == 'VARCHAR': r = '%s(length=%d)' % (name, t.length) elif name.upper() == 'CHAR': r = '%s(length=%d)' % (name, t.length) elif name.upper() == 'DECIMAL': r = '%s(precision=%d, scale=%d)' % ('Numeric', t.precision, t.scale) elif name.upper() == 'PICKLETYPE': r = '%s()' % 'BLOB' elif name.upper() == 'INTEGER': r = '%s()' % 'INTEGER' return r def _compare(c1, c2): r1 = _get_type(c1) r2 = _get_type(c2) if r1.upper() == 'BOOLEAN()' or r2.upper() == 'BOOLEAN()': return False if r1.upper() == 'MEDIUMTEXT()': return False else: return r1.upper() != r2.upper() def _compare_type(schema, tname, cname, conn_col, metadata_col, diffs, autogen_context): conn_type = conn_col.type metadata_type = metadata_col.type if conn_type._type_affinity is sqltypes.NullType: log.info("Couldn't determine database type " "for column '%s.%s'", tname, cname) return if metadata_type._type_affinity is sqltypes.NullType: log.info("Column '%s.%s' has no type within " "the model; can't compare", tname, cname) return #isdiff = autogen_context['context']._compare_type(conn_col, metadata_col) isdiff = _compare(conn_type, metadata_type) if isdiff: if not metadata_col.table.__mapping_only__: diffs.append( ("modify_type", schema, tname, cname, { "existing_nullable": conn_col.nullable, "existing_server_default": conn_col.server_default, }, conn_type, metadata_type), ) log.info("{{white|green:Detected}} type change from %r to %r on '%s.%s'", conn_type, metadata_type, tname, cname ) else: log.info("{{white|red:Skipped}} type change from %r to %r on '%s.%s'", conn_type, metadata_type, tname, cname ) def _render_server_default_for_compare(metadata_default, metadata_col, autogen_context): return _render_server_default( metadata_default, autogen_context, repr_=metadata_col.type._type_affinity is sqltypes.String) def _compare_server_default(schema, tname, cname, conn_col, metadata_col, diffs, autogen_context): metadata_default = metadata_col.server_default conn_col_default = conn_col.server_default if conn_col_default is None and metadata_default is None: return False rendered_metadata_default = _render_server_default( metadata_default, autogen_context) # rendered_conn_default = conn_col.server_default.arg.text \ # if conn_col.server_default else None rendered_conn_default = _render_server_default( conn_col_default, autogen_context) isdiff = autogen_context['context']._compare_server_default( conn_col, metadata_col, rendered_metadata_default, rendered_conn_default ) if isdiff: if not metadata_col.table.__mapping_only__: conn_col_default = rendered_conn_default diffs.append( ("modify_default", schema, tname, cname, { "existing_nullable": conn_col.nullable, "existing_type": conn_col.type, }, conn_col_default, metadata_default), ) log.info("{{white|green:Detected}} server default changed from %s to %s on column '%s.%s'", rendered_conn_default, rendered_metadata_default, tname, cname ) else: log.info("{{white|red:Skipped}} server default %s changed on column '%s.%s'", rendered_conn_default, rendered_metadata_default, tname, cname ) PKH+Ey'BBalembic/autogenerate/render.pyfrom sqlalchemy import schema as sa_schema, types as sqltypes, sql import logging from .. import compat import re from ..compat import string_types log = logging.getLogger(__name__) MAX_PYTHON_ARGS = 255 try: from sqlalchemy.sql.naming import conv def _render_gen_name(autogen_context, name): if isinstance(name, conv): return _f_name(_alembic_autogenerate_prefix(autogen_context), name) else: return name except ImportError: def _render_gen_name(autogen_context, name): return name class _f_name(object): def __init__(self, prefix, name): self.prefix = prefix self.name = name def __repr__(self): return "%sf(%r)" % (self.prefix, self.name) def _render_potential_expr(value, autogen_context): if isinstance(value, sql.ClauseElement): if compat.sqla_08: compile_kw = dict(compile_kwargs={'literal_binds': True}) else: compile_kw = {} return "%(prefix)stext(%(sql)r)" % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "sql": str( value.compile(dialect=autogen_context['dialect'], **compile_kw) ) } else: return repr(value) def _add_table(table, autogen_context): args = [col for col in [_render_column(col, autogen_context) for col in table.c] if col] + \ sorted([rcons for rcons in [_render_constraint(cons, autogen_context) for cons in table.constraints] if rcons is not None ]) if len(args) > MAX_PYTHON_ARGS: args = '*[' + ',\n'.join(args) + ']' else: args = ',\n'.join(args) text = "%(prefix)screate_table(%(tablename)r,\n%(args)s" % { 'tablename': table.name, 'prefix': _alembic_autogenerate_prefix(autogen_context), 'args': args, } if table.schema: text += ",\nschema=%r" % table.schema for k in sorted(table.kwargs): text += ",\n%s=%r" % (k.replace(" ", "_"), table.kwargs[k]) text += "\n)" return text def _drop_table(table, autogen_context): text = "%(prefix)sdrop_table(%(tname)r" % { "prefix": _alembic_autogenerate_prefix(autogen_context), "tname": table.name } if table.schema: text += ", schema=%r" % table.schema text += ")" return text def _add_index(index, autogen_context): """ Generate Alembic operations for the CREATE INDEX of an :class:`~sqlalchemy.schema.Index` instance. """ from .compare import _get_index_column_names text = "%(prefix)screate_index(%(name)r, '%(table)s', %(columns)s, "\ "unique=%(unique)r%(schema)s%(kwargs)s)" % { 'prefix': _alembic_autogenerate_prefix(autogen_context), 'name': _render_gen_name(autogen_context, index.name), 'table': index.table.name, 'columns': _get_index_column_names(index), 'unique': index.unique or False, 'schema': (", schema='%s'" % index.table.schema) if index.table.schema else '', 'kwargs': ( ', ' + ', '.join( ["%s=%s" % (key, _render_potential_expr(val, autogen_context)) for key, val in index.kwargs.items()])) if len(index.kwargs) else '' } return text def _drop_index(index, autogen_context): """ Generate Alembic operations for the DROP INDEX of an :class:`~sqlalchemy.schema.Index` instance. """ text = "%(prefix)sdrop_index(%(name)r, "\ "table_name='%(table_name)s'%(schema)s)" % { 'prefix': _alembic_autogenerate_prefix(autogen_context), 'name': _render_gen_name(autogen_context, index.name), 'table_name': index.table.name, 'schema': ((", schema='%s'" % index.table.schema) if index.table.schema else '') } return text def _render_unique_constraint(constraint, autogen_context): rendered = _user_defined_render("unique", constraint, autogen_context) if rendered is not False: return rendered return _uq_constraint(constraint, autogen_context, False) def _add_unique_constraint(constraint, autogen_context): """ Generate Alembic operations for the ALTER TABLE .. ADD CONSTRAINT ... UNIQUE of a :class:`~sqlalchemy.schema.UniqueConstraint` instance. """ return _uq_constraint(constraint, autogen_context, True) def _uq_constraint(constraint, autogen_context, alter): opts = [] if constraint.deferrable: opts.append(("deferrable", str(constraint.deferrable))) if constraint.initially: opts.append(("initially", str(constraint.initially))) if alter and constraint.table.schema: opts.append(("schema", str(constraint.table.schema))) if not alter and constraint.name: opts.append( ("name", _render_gen_name(autogen_context, constraint.name))) if alter: args = [repr(_render_gen_name(autogen_context, constraint.name)), repr(constraint.table.name)] args.append(repr([col.name for col in constraint.columns])) args.extend(["%s=%r" % (k, v) for k, v in opts]) return "%(prefix)screate_unique_constraint(%(args)s)" % { 'prefix': _alembic_autogenerate_prefix(autogen_context), 'args': ", ".join(args) } else: args = [repr(col.name) for col in constraint.columns] args.extend(["%s=%r" % (k, v) for k, v in opts]) return "%(prefix)sUniqueConstraint(%(args)s)" % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "args": ", ".join(args) } def _add_fk_constraint(constraint, autogen_context): raise NotImplementedError() def _add_pk_constraint(constraint, autogen_context): raise NotImplementedError() def _add_check_constraint(constraint, autogen_context): raise NotImplementedError() def _add_constraint(constraint, autogen_context): """ Dispatcher for the different types of constraints. """ funcs = { "unique_constraint": _add_unique_constraint, "foreign_key_constraint": _add_fk_constraint, "primary_key_constraint": _add_pk_constraint, "check_constraint": _add_check_constraint, "column_check_constraint": _add_check_constraint, } return funcs[constraint.__visit_name__](constraint, autogen_context) def _drop_constraint(constraint, autogen_context): """ Generate Alembic operations for the ALTER TABLE ... DROP CONSTRAINT of a :class:`~sqlalchemy.schema.UniqueConstraint` instance. """ text = "%(prefix)sdrop_constraint"\ "(%(name)r, '%(table_name)s'%(schema)s)" % { 'prefix': _alembic_autogenerate_prefix(autogen_context), 'name': _render_gen_name(autogen_context, constraint.name), 'table_name': constraint.table.name, 'schema': (", schema='%s'" % constraint.table.schema) if constraint.table.schema else '', } return text def _add_column(schema, tname, column, autogen_context): text = "%(prefix)sadd_column(%(tname)r, %(column)s" % { "prefix": _alembic_autogenerate_prefix(autogen_context), "tname": tname, "column": _render_column(column, autogen_context) } if schema: text += ", schema=%r" % schema text += ")" return text def _drop_column(schema, tname, column, autogen_context): text = "%(prefix)sdrop_column(%(tname)r, %(cname)r" % { "prefix": _alembic_autogenerate_prefix(autogen_context), "tname": tname, "cname": column.name } if schema: text += ", schema=%r" % schema text += ")" return text def _modify_col(tname, cname, autogen_context, server_default=False, type_=None, nullable=None, existing_type=None, existing_nullable=None, existing_server_default=False, schema=None): indent = " " * 11 text = "%(prefix)salter_column(%(tname)r, %(cname)r" % { 'prefix': _alembic_autogenerate_prefix( autogen_context), 'tname': tname, 'cname': cname} text += ",\n%sexisting_type=%s" % ( indent, _repr_type(existing_type, autogen_context)) if server_default is not False: rendered = _render_server_default( server_default, autogen_context) text += ",\n%sserver_default=%s" % (indent, rendered) if type_ is not None: text += ",\n%stype_=%s" % (indent, _repr_type(type_, autogen_context)) if nullable is not None: text += ",\n%snullable=%r" % ( indent, nullable,) if existing_nullable is not None: text += ",\n%sexisting_nullable=%r" % ( indent, existing_nullable) if existing_server_default: rendered = _render_server_default( existing_server_default, autogen_context) text += ",\n%sexisting_server_default=%s" % ( indent, rendered) if schema: text += ",\n%sschema=%r" % (indent, schema) text += ")" return text def _user_autogenerate_prefix(autogen_context): prefix = autogen_context['opts']['user_module_prefix'] if prefix is None: return _sqlalchemy_autogenerate_prefix(autogen_context) else: return prefix def _sqlalchemy_autogenerate_prefix(autogen_context): return autogen_context['opts']['sqlalchemy_module_prefix'] or '' def _alembic_autogenerate_prefix(autogen_context): return autogen_context['opts']['alembic_module_prefix'] or '' def _user_defined_render(type_, object_, autogen_context): if 'opts' in autogen_context and \ 'render_item' in autogen_context['opts']: render = autogen_context['opts']['render_item'] if render: rendered = render(type_, object_, autogen_context) if rendered is not False: return rendered return False def _render_column(column, autogen_context): rendered = _user_defined_render("column", column, autogen_context) if rendered is not False: return rendered opts = [] if column.server_default: rendered = _render_server_default( column.server_default, autogen_context ) if rendered: opts.append(("server_default", rendered)) if not column.autoincrement: opts.append(("autoincrement", column.autoincrement)) if column.nullable is not None: opts.append(("nullable", column.nullable)) # TODO: for non-ascii colname, assign a "key" return "%(prefix)sColumn(%(name)r, %(type)s, %(kw)s)" % { 'prefix': _sqlalchemy_autogenerate_prefix(autogen_context), 'name': column.name, 'type': _repr_type(column.type, autogen_context), 'kw': ", ".join(["%s=%s" % (kwname, val) for kwname, val in opts]) } def _render_server_default(default, autogen_context, repr_=True): rendered = _user_defined_render("server_default", default, autogen_context) if rendered is not False: return rendered if isinstance(default, sa_schema.DefaultClause): if isinstance(default.arg, string_types): default = default.arg else: default = str(default.arg.compile( dialect=autogen_context['dialect'])) if isinstance(default, string_types): if repr_: default = re.sub(r"^'|'$", "", default) return repr(default) else: return default else: return None def _repr_type(type_, autogen_context): rendered = _user_defined_render("type", type_, autogen_context) if rendered is not False: return rendered mod = type(type_).__module__ imports = autogen_context.get('imports', None) if mod.startswith("sqlalchemy.dialects"): dname = re.match(r"sqlalchemy\.dialects\.(\w+)", mod).group(1) if imports is not None: imports.add("from sqlalchemy.dialects import %s" % dname) return "%s.%r" % (dname, type_) elif mod.startswith("sqlalchemy"): prefix = _sqlalchemy_autogenerate_prefix(autogen_context) return "%s%r" % (prefix, type_) else: prefix = _user_autogenerate_prefix(autogen_context) return "%s%r" % (prefix, type_) def _render_constraint(constraint, autogen_context): renderer = _constraint_renderers.get(type(constraint), None) if renderer: return renderer(constraint, autogen_context) else: return None def _render_primary_key(constraint, autogen_context): rendered = _user_defined_render("primary_key", constraint, autogen_context) if rendered is not False: return rendered if not constraint.columns: return None opts = [] if constraint.name: opts.append(("name", repr( _render_gen_name(autogen_context, constraint.name)))) return "%(prefix)sPrimaryKeyConstraint(%(args)s)" % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "args": ", ".join( [repr(c.key) for c in constraint.columns] + ["%s=%s" % (kwname, val) for kwname, val in opts] ), } def _fk_colspec(fk, metadata_schema): """Implement a 'safe' version of ForeignKey._get_colspec() that never tries to resolve the remote table. """ colspec = fk._get_colspec() if metadata_schema is not None and colspec.count(".") == 1: # need to render schema breaking up tokens by hand, since the # ForeignKeyConstraint here may not actually have a remote # Table present # no schema in the colspec, render it colspec = "%s.%s" % (metadata_schema, colspec) return colspec def _render_foreign_key(constraint, autogen_context): rendered = _user_defined_render("foreign_key", constraint, autogen_context) if rendered is not False: return rendered opts = [] if constraint.name: opts.append(("name", repr( _render_gen_name(autogen_context, constraint.name)))) if constraint.onupdate: opts.append(("onupdate", repr(constraint.onupdate))) if constraint.ondelete: opts.append(("ondelete", repr(constraint.ondelete))) if constraint.initially: opts.append(("initially", repr(constraint.initially))) if constraint.deferrable: opts.append(("deferrable", repr(constraint.deferrable))) if constraint.use_alter: opts.append(("use_alter", repr(constraint.use_alter))) apply_metadata_schema = constraint.parent.metadata.schema return "%(prefix)sForeignKeyConstraint([%(cols)s], "\ "[%(refcols)s], %(args)s)" % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "cols": ", ".join( "'%s'" % f.parent.key for f in constraint.elements), "refcols": ", ".join(repr(_fk_colspec(f, apply_metadata_schema)) for f in constraint.elements), "args": ", ".join( ["%s=%s" % (kwname, val) for kwname, val in opts] ), } def _render_check_constraint(constraint, autogen_context): rendered = _user_defined_render("check", constraint, autogen_context) if rendered is not False: return rendered # detect the constraint being part of # a parent type which is probably in the Table already. # ideally SQLAlchemy would give us more of a first class # way to detect this. if constraint._create_rule and \ hasattr(constraint._create_rule, 'target') and \ isinstance(constraint._create_rule.target, sqltypes.TypeEngine): return None opts = [] if constraint.name: opts.append( ( "name", repr(_render_gen_name(autogen_context, constraint.name)) ) ) return "%(prefix)sCheckConstraint(%(sqltext)r%(opts)s)" % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "opts": ", " + (", ".join("%s=%s" % (k, v) for k, v in opts)) if opts else "", "sqltext": str( constraint.sqltext.compile( dialect=autogen_context['dialect'] ) ) } _constraint_renderers = { sa_schema.PrimaryKeyConstraint: _render_primary_key, sa_schema.ForeignKeyConstraint: _render_foreign_key, sa_schema.UniqueConstraint: _render_unique_constraint, sa_schema.CheckConstraint: _render_check_constraint } PK D4:UUalembic/ddl/__init__.pyfrom . import postgresql, mysql, sqlite, mssql, oracle from .impl import DefaultImpl PK )ED=#alembic/ddl/base.pyimport functools from sqlalchemy.ext.compiler import compiles from sqlalchemy.schema import DDLElement, Column from sqlalchemy import Integer from sqlalchemy import types as sqltypes class AlterTable(DDLElement): """Represent an ALTER TABLE statement. Only the string name and optional schema name of the table is required, not a full Table object. """ def __init__(self, table_name, schema=None): self.table_name = table_name self.schema = schema class RenameTable(AlterTable): def __init__(self, old_table_name, new_table_name, schema=None): super(RenameTable, self).__init__(old_table_name, schema=schema) self.new_table_name = new_table_name class AlterColumn(AlterTable): def __init__(self, name, column_name, schema=None, existing_type=None, existing_nullable=None, existing_server_default=None): super(AlterColumn, self).__init__(name, schema=schema) self.column_name = column_name self.existing_type = sqltypes.to_instance(existing_type) \ if existing_type is not None else None self.existing_nullable = existing_nullable self.existing_server_default = existing_server_default class ColumnNullable(AlterColumn): def __init__(self, name, column_name, nullable, **kw): super(ColumnNullable, self).__init__(name, column_name, **kw) self.nullable = nullable class ColumnType(AlterColumn): def __init__(self, name, column_name, type_, **kw): super(ColumnType, self).__init__(name, column_name, **kw) self.type_ = sqltypes.to_instance(type_) class ColumnName(AlterColumn): def __init__(self, name, column_name, newname, **kw): super(ColumnName, self).__init__(name, column_name, **kw) self.newname = newname class ColumnDefault(AlterColumn): def __init__(self, name, column_name, default, **kw): super(ColumnDefault, self).__init__(name, column_name, **kw) self.default = default class AddColumn(AlterTable): def __init__(self, name, column, schema=None): super(AddColumn, self).__init__(name, schema=schema) self.column = column class DropColumn(AlterTable): def __init__(self, name, column, schema=None): super(DropColumn, self).__init__(name, schema=schema) self.column = column @compiles(RenameTable) def visit_rename_table(element, compiler, **kw): return "%s RENAME TO %s" % ( alter_table(compiler, element.table_name, element.schema), format_table_name(compiler, element.new_table_name, element.schema) ) @compiles(AddColumn) def visit_add_column(element, compiler, **kw): return "%s %s" % ( alter_table(compiler, element.table_name, element.schema), add_column(compiler, element.column, **kw) ) @compiles(DropColumn) def visit_drop_column(element, compiler, **kw): return "%s %s" % ( alter_table(compiler, element.table_name, element.schema), drop_column(compiler, element.column.name, **kw) ) @compiles(ColumnNullable) def visit_column_nullable(element, compiler, **kw): return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "DROP NOT NULL" if element.nullable else "SET NOT NULL" ) @compiles(ColumnType) def visit_column_type(element, compiler, **kw): return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "TYPE %s" % format_type(compiler, element.type_) ) @compiles(ColumnName) def visit_column_name(element, compiler, **kw): return "%s RENAME %s TO %s" % ( alter_table(compiler, element.table_name, element.schema), format_column_name(compiler, element.column_name), format_column_name(compiler, element.newname) ) @compiles(ColumnDefault) def visit_column_default(element, compiler, **kw): return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "SET DEFAULT %s" % format_server_default(compiler, element.default) if element.default is not None else "DROP DEFAULT" ) def quote_dotted(name, quote): """quote the elements of a dotted name""" result = '.'.join([quote(x) for x in name.split('.')]) return result def format_table_name(compiler, name, schema): quote = functools.partial(compiler.preparer.quote, force=None) if schema: return quote_dotted(schema, quote) + "." + quote(name) else: return quote(name) def format_column_name(compiler, name): return compiler.preparer.quote(name, None) def format_server_default(compiler, default): return compiler.get_column_default_string( Column("x", Integer, server_default=default) ) def format_type(compiler, type_): return compiler.dialect.type_compiler.process(type_) def alter_table(compiler, name, schema): return "ALTER TABLE %s" % format_table_name(compiler, name, schema) def drop_column(compiler, name): return 'DROP COLUMN %s' % format_column_name(compiler, name) def alter_column(compiler, name): return 'ALTER COLUMN %s' % format_column_name(compiler, name) def add_column(compiler, column, **kw): return "ADD COLUMN %s" % compiler.get_column_specification(column, **kw) PK )Eو#--alembic/ddl/impl.pyfrom sqlalchemy.sql.expression import _BindParamClause from sqlalchemy.ext.compiler import compiles from sqlalchemy import schema, text, sql from sqlalchemy.sql import expression from sqlalchemy import types as sqltypes from ..compat import string_types, text_type, with_metaclass from .. import util from . import base class ImplMeta(type): def __init__(cls, classname, bases, dict_): newtype = type.__init__(cls, classname, bases, dict_) if '__dialect__' in dict_: _impls[dict_['__dialect__']] = cls return newtype _impls = {} class DefaultImpl(with_metaclass(ImplMeta)): """Provide the entrypoint for major migration operations, including database-specific behavioral variances. While individual SQL/DDL constructs already provide for database-specific implementations, variances here allow for entirely different sequences of operations to take place for a particular migration, such as SQL Server's special 'IDENTITY INSERT' step for bulk inserts. """ __dialect__ = 'default' transactional_ddl = False command_terminator = ";" def __init__(self, dialect, connection, as_sql, transactional_ddl, output_buffer, context_opts): self.dialect = dialect self.connection = connection self.as_sql = as_sql self.output_buffer = output_buffer self.memo = {} self.context_opts = context_opts if transactional_ddl is not None: self.transactional_ddl = transactional_ddl @classmethod def get_by_dialect(cls, dialect): return _impls[dialect.name] def static_output(self, text): self.output_buffer.write(text_type(text + "\n\n")) self.output_buffer.flush() @property def bind(self): return self.connection def _exec(self, construct, execution_options=None, multiparams=(), params=util.immutabledict()): if isinstance(construct, string_types): construct = text(construct) if self.as_sql: if multiparams or params: # TODO: coverage raise Exception("Execution arguments not allowed with as_sql") self.static_output(text_type( construct.compile(dialect=self.dialect) ).replace("\t", " ").strip() + self.command_terminator) else: conn = self.connection if execution_options: conn = conn.execution_options(**execution_options) conn.execute(construct, *multiparams, **params) def execute(self, sql, execution_options=None): self._exec(sql, execution_options) def alter_column(self, table_name, column_name, nullable=None, server_default=False, name=None, type_=None, schema=None, autoincrement=None, existing_type=None, existing_server_default=None, existing_nullable=None, existing_autoincrement=None ): if autoincrement is not None or existing_autoincrement is not None: util.warn( "autoincrement and existing_autoincrement " "only make sense for MySQL") if nullable is not None: self._exec(base.ColumnNullable( table_name, column_name, nullable, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, )) if server_default is not False: self._exec(base.ColumnDefault( table_name, column_name, server_default, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, )) if type_ is not None: self._exec(base.ColumnType( table_name, column_name, type_, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, )) # do the new name last ;) if name is not None: self._exec(base.ColumnName( table_name, column_name, name, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, )) def add_column(self, table_name, column, schema=None): self._exec(base.AddColumn(table_name, column, schema=schema)) def drop_column(self, table_name, column, schema=None, **kw): self._exec(base.DropColumn(table_name, column, schema=schema)) def add_constraint(self, const): if const._create_rule is None or \ const._create_rule(self): self._exec(schema.AddConstraint(const)) def drop_constraint(self, const): self._exec(schema.DropConstraint(const)) def rename_table(self, old_table_name, new_table_name, schema=None): self._exec(base.RenameTable(old_table_name, new_table_name, schema=schema)) def create_table(self, table): if util.sqla_07: table.dispatch.before_create(table, self.connection, checkfirst=False, _ddl_runner=self) self._exec(schema.CreateTable(table)) if util.sqla_07: table.dispatch.after_create(table, self.connection, checkfirst=False, _ddl_runner=self) for index in table.indexes: self._exec(schema.CreateIndex(index)) def drop_table(self, table): self._exec(schema.DropTable(table)) def create_index(self, index): self._exec(schema.CreateIndex(index)) def drop_index(self, index): self._exec(schema.DropIndex(index)) def bulk_insert(self, table, rows, multiinsert=True): if not isinstance(rows, list): raise TypeError("List expected") elif rows and not isinstance(rows[0], dict): raise TypeError("List of dictionaries expected") if self.as_sql: for row in rows: self._exec(table.insert(inline=True).values(**dict( (k, _literal_bindparam(k, v, type_=table.c[k].type) if not isinstance(v, _literal_bindparam) else v) for k, v in row.items() ))) else: # work around http://www.sqlalchemy.org/trac/ticket/2461 if not hasattr(table, '_autoincrement_column'): table._autoincrement_column = None if rows: if multiinsert: self._exec(table.insert(inline=True), multiparams=rows) else: for row in rows: self._exec(table.insert(inline=True).values(**row)) def compare_type(self, inspector_column, metadata_column): conn_type = inspector_column.type metadata_type = metadata_column.type metadata_impl = metadata_type.dialect_impl(self.dialect) # work around SQLAlchemy bug "stale value for type affinity" # fixed in 0.7.4 metadata_impl.__dict__.pop('_type_affinity', None) if conn_type._compare_type_affinity( metadata_impl ): comparator = _type_comparators.get(conn_type._type_affinity, None) return comparator and comparator(metadata_type, conn_type) else: return True def compare_server_default(self, inspector_column, metadata_column, rendered_metadata_default, rendered_inspector_default): return rendered_inspector_default != rendered_metadata_default def correct_for_autogen_constraints(self, conn_uniques, conn_indexes, metadata_unique_constraints, metadata_indexes): pass def start_migrations(self): """A hook called when :meth:`.EnvironmentContext.run_migrations` is called. Implementations can set up per-migration-run state here. """ def emit_begin(self): """Emit the string ``BEGIN``, or the backend-specific equivalent, on the current connection context. This is used in offline mode and typically via :meth:`.EnvironmentContext.begin_transaction`. """ self.static_output("BEGIN" + self.command_terminator) def emit_commit(self): """Emit the string ``COMMIT``, or the backend-specific equivalent, on the current connection context. This is used in offline mode and typically via :meth:`.EnvironmentContext.begin_transaction`. """ self.static_output("COMMIT" + self.command_terminator) class _literal_bindparam(_BindParamClause): pass @compiles(_literal_bindparam) def _render_literal_bindparam(element, compiler, **kw): return compiler.render_literal_bindparam(element, **kw) def _textual_index_column(table, text_): """a workaround for the Index construct's severe lack of flexibility""" if isinstance(text_, string_types): c = schema.Column(text_, sqltypes.NULLTYPE) table.append_column(c) return c elif isinstance(text_, expression.TextClause): return _textual_index_element(table, text_) else: raise ValueError("String or text() construct expected") class _textual_index_element(sql.ColumnElement): """Wrap around a sqlalchemy text() construct in such a way that we appear like a column-oriented SQL expression to an Index construct. The issue here is that currently the Postgresql dialect, the biggest recipient of functional indexes, keys all the index expressions to the corresponding column expressions when rendering CREATE INDEX, so the Index we create here needs to have a .columns collection that is the same length as the .expressions collection. Ultimately SQLAlchemy should support text() expressions in indexes. See https://bitbucket.org/zzzeek/sqlalchemy/issue/3174/\ support-text-sent-to-indexes """ __visit_name__ = '_textual_idx_element' def __init__(self, table, text): self.table = table self.text = text self.key = text.text self.fake_column = schema.Column(self.text.text, sqltypes.NULLTYPE) table.append_column(self.fake_column) def get_children(self): return [self.fake_column] @compiles(_textual_index_element) def _render_textual_index_column(element, compiler, **kw): return compiler.process(element.text, **kw) def _string_compare(t1, t2): return \ t1.length is not None and \ t1.length != t2.length def _numeric_compare(t1, t2): return \ ( t1.precision is not None and t1.precision != t2.precision ) or \ ( t1.scale is not None and t1.scale != t2.scale ) _type_comparators = { sqltypes.String: _string_compare, sqltypes.Numeric: _numeric_compare } PK )E1ü0_ _ alembic/ddl/mssql.pyfrom sqlalchemy.ext.compiler import compiles from .. import util from .impl import DefaultImpl from .base import alter_table, AddColumn, ColumnName, RenameTable,\ format_table_name, format_column_name, ColumnNullable, alter_column,\ format_server_default, ColumnDefault, format_type, ColumnType from sqlalchemy.sql.expression import ClauseElement, Executable class MSSQLImpl(DefaultImpl): __dialect__ = 'mssql' transactional_ddl = True batch_separator = "GO" def __init__(self, *arg, **kw): super(MSSQLImpl, self).__init__(*arg, **kw) self.batch_separator = self.context_opts.get( "mssql_batch_separator", self.batch_separator) def _exec(self, construct, *args, **kw): super(MSSQLImpl, self)._exec(construct, *args, **kw) if self.as_sql and self.batch_separator: self.static_output(self.batch_separator) def emit_begin(self): self.static_output("BEGIN TRANSACTION" + self.command_terminator) def emit_commit(self): super(MSSQLImpl, self).emit_commit() if self.as_sql and self.batch_separator: self.static_output(self.batch_separator) def alter_column(self, table_name, column_name, nullable=None, server_default=False, name=None, type_=None, schema=None, autoincrement=None, existing_type=None, existing_server_default=None, existing_nullable=None, existing_autoincrement=None ): if nullable is not None and existing_type is None: if type_ is not None: existing_type = type_ # the NULL/NOT NULL alter will handle # the type alteration type_ = None else: raise util.CommandError( "MS-SQL ALTER COLUMN operations " "with NULL or NOT NULL require the " "existing_type or a new type_ be passed.") super(MSSQLImpl, self).alter_column( table_name, column_name, nullable=nullable, type_=type_, schema=schema, autoincrement=autoincrement, existing_type=existing_type, existing_nullable=existing_nullable, existing_autoincrement=existing_autoincrement ) if server_default is not False: if existing_server_default is not False or \ server_default is None: self._exec( _ExecDropConstraint( table_name, column_name, 'sys.default_constraints') ) if server_default is not None: super(MSSQLImpl, self).alter_column( table_name, column_name, schema=schema, server_default=server_default) if name is not None: super(MSSQLImpl, self).alter_column( table_name, column_name, schema=schema, name=name) def bulk_insert(self, table, rows, **kw): if self.as_sql: self._exec( "SET IDENTITY_INSERT %s ON" % self.dialect.identifier_preparer.format_table(table) ) super(MSSQLImpl, self).bulk_insert(table, rows, **kw) self._exec( "SET IDENTITY_INSERT %s OFF" % self.dialect.identifier_preparer.format_table(table) ) else: super(MSSQLImpl, self).bulk_insert(table, rows, **kw) def drop_column(self, table_name, column, **kw): drop_default = kw.pop('mssql_drop_default', False) if drop_default: self._exec( _ExecDropConstraint( table_name, column, 'sys.default_constraints') ) drop_check = kw.pop('mssql_drop_check', False) if drop_check: self._exec( _ExecDropConstraint( table_name, column, 'sys.check_constraints') ) drop_fks = kw.pop('mssql_drop_foreign_key', False) if drop_fks: self._exec( _ExecDropFKConstraint(table_name, column) ) super(MSSQLImpl, self).drop_column(table_name, column) class _ExecDropConstraint(Executable, ClauseElement): def __init__(self, tname, colname, type_): self.tname = tname self.colname = colname self.type_ = type_ class _ExecDropFKConstraint(Executable, ClauseElement): def __init__(self, tname, colname): self.tname = tname self.colname = colname @compiles(_ExecDropConstraint, 'mssql') def _exec_drop_col_constraint(element, compiler, **kw): tname, colname, type_ = element.tname, element.colname, element.type_ # from http://www.mssqltips.com/sqlservertip/1425/\ # working-with-default-constraints-in-sql-server/ # TODO: needs table formatting, etc. return """declare @const_name varchar(256) select @const_name = [name] from %(type)s where parent_object_id = object_id('%(tname)s') and col_name(parent_object_id, parent_column_id) = '%(colname)s' exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % { 'type': type_, 'tname': tname, 'colname': colname, 'tname_quoted': format_table_name(compiler, tname, None), } @compiles(_ExecDropFKConstraint, 'mssql') def _exec_drop_col_fk_constraint(element, compiler, **kw): tname, colname = element.tname, element.colname return """declare @const_name varchar(256) select @const_name = [name] from sys.foreign_keys fk join sys.foreign_key_columns fkc on fk.object_id=fkc.constraint_object_id where fkc.parent_object_id = object_id('%(tname)s') and col_name(fkc.parent_object_id, fkc.parent_column_id) = '%(colname)s' exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % { 'tname': tname, 'colname': colname, 'tname_quoted': format_table_name(compiler, tname, None), } @compiles(AddColumn, 'mssql') def visit_add_column(element, compiler, **kw): return "%s %s" % ( alter_table(compiler, element.table_name, element.schema), mssql_add_column(compiler, element.column, **kw) ) def mssql_add_column(compiler, column, **kw): return "ADD %s" % compiler.get_column_specification(column, **kw) @compiles(ColumnNullable, 'mssql') def visit_column_nullable(element, compiler, **kw): return "%s %s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), format_type(compiler, element.existing_type), "NULL" if element.nullable else "NOT NULL" ) @compiles(ColumnDefault, 'mssql') def visit_column_default(element, compiler, **kw): # TODO: there can also be a named constraint # with ADD CONSTRAINT here return "%s ADD DEFAULT %s FOR %s" % ( alter_table(compiler, element.table_name, element.schema), format_server_default(compiler, element.default), format_column_name(compiler, element.column_name) ) @compiles(ColumnName, 'mssql') def visit_rename_column(element, compiler, **kw): return "EXEC sp_rename '%s.%s', %s, 'COLUMN'" % ( format_table_name(compiler, element.table_name, element.schema), format_column_name(compiler, element.column_name), format_column_name(compiler, element.newname) ) @compiles(ColumnType, 'mssql') def visit_column_type(element, compiler, **kw): return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), format_type(compiler, element.type_) ) @compiles(RenameTable, 'mssql') def visit_rename_table(element, compiler, **kw): return "EXEC sp_rename '%s', %s" % ( format_table_name(compiler, element.table_name, element.schema), format_table_name(compiler, element.new_table_name, element.schema) ) PK )Ealembic/ddl/mysql.pyfrom sqlalchemy.ext.compiler import compiles from sqlalchemy import types as sqltypes from sqlalchemy import schema from ..compat import string_types from .. import util from .impl import DefaultImpl from .base import ColumnNullable, ColumnName, ColumnDefault, \ ColumnType, AlterColumn, format_column_name, \ format_server_default from .base import alter_table class MySQLImpl(DefaultImpl): __dialect__ = 'mysql' transactional_ddl = False def alter_column(self, table_name, column_name, nullable=None, server_default=False, name=None, type_=None, schema=None, autoincrement=None, existing_type=None, existing_server_default=None, existing_nullable=None, existing_autoincrement=None ): if name is not None: self._exec( MySQLChangeColumn( table_name, column_name, schema=schema, newname=name, nullable=nullable if nullable is not None else existing_nullable if existing_nullable is not None else True, type_=type_ if type_ is not None else existing_type, default=server_default if server_default is not False else existing_server_default, autoincrement=autoincrement if autoincrement is not None else existing_autoincrement ) ) elif nullable is not None or \ type_ is not None or \ autoincrement is not None: self._exec( MySQLModifyColumn( table_name, column_name, schema=schema, newname=name if name is not None else column_name, nullable=nullable if nullable is not None else existing_nullable if existing_nullable is not None else True, type_=type_ if type_ is not None else existing_type, default=server_default if server_default is not False else existing_server_default, autoincrement=autoincrement if autoincrement is not None else existing_autoincrement ) ) elif server_default is not False: self._exec( MySQLAlterDefault( table_name, column_name, server_default, schema=schema, ) ) def correct_for_autogen_constraints(self, conn_unique_constraints, conn_indexes, metadata_unique_constraints, metadata_indexes): removed = set() for idx in list(conn_indexes): # MySQL puts implicit indexes on FK columns, even if # composite and even if MyISAM, so can't check this too easily. # the name of the index may be the column name or it may # be the name of the FK constraint. for col in idx.columns: if idx.name == col.name: conn_indexes.remove(idx) removed.add(idx.name) break for fk in col.foreign_keys: if fk.name == idx.name: conn_indexes.remove(idx) removed.add(idx.name) break # then remove indexes from the "metadata_indexes" # that we've removed from reflected, otherwise they come out # as adds (see #202) for idx in list(metadata_indexes): if idx.name in removed: metadata_indexes.remove(idx) class MySQLAlterDefault(AlterColumn): def __init__(self, name, column_name, default, schema=None): super(AlterColumn, self).__init__(name, schema=schema) self.column_name = column_name self.default = default class MySQLChangeColumn(AlterColumn): def __init__(self, name, column_name, schema=None, newname=None, type_=None, nullable=None, default=False, autoincrement=None): super(AlterColumn, self).__init__(name, schema=schema) self.column_name = column_name self.nullable = nullable self.newname = newname self.default = default self.autoincrement = autoincrement if type_ is None: raise util.CommandError( "All MySQL CHANGE/MODIFY COLUMN operations " "require the existing type." ) self.type_ = sqltypes.to_instance(type_) class MySQLModifyColumn(MySQLChangeColumn): pass @compiles(ColumnNullable, 'mysql') @compiles(ColumnName, 'mysql') @compiles(ColumnDefault, 'mysql') @compiles(ColumnType, 'mysql') def _mysql_doesnt_support_individual(element, compiler, **kw): raise NotImplementedError( "Individual alter column constructs not supported by MySQL" ) @compiles(MySQLAlterDefault, "mysql") def _mysql_alter_default(element, compiler, **kw): return "%s ALTER COLUMN %s %s" % ( alter_table(compiler, element.table_name, element.schema), format_column_name(compiler, element.column_name), "SET DEFAULT %s" % format_server_default(compiler, element.default) if element.default is not None else "DROP DEFAULT" ) @compiles(MySQLModifyColumn, "mysql") def _mysql_modify_column(element, compiler, **kw): return "%s MODIFY %s %s" % ( alter_table(compiler, element.table_name, element.schema), format_column_name(compiler, element.column_name), _mysql_colspec( compiler, nullable=element.nullable, server_default=element.default, type_=element.type_, autoincrement=element.autoincrement ), ) @compiles(MySQLChangeColumn, "mysql") def _mysql_change_column(element, compiler, **kw): return "%s CHANGE %s %s %s" % ( alter_table(compiler, element.table_name, element.schema), format_column_name(compiler, element.column_name), format_column_name(compiler, element.newname), _mysql_colspec( compiler, nullable=element.nullable, server_default=element.default, type_=element.type_, autoincrement=element.autoincrement ), ) def _render_value(compiler, expr): if isinstance(expr, string_types): return "'%s'" % expr else: return compiler.sql_compiler.process(expr) def _mysql_colspec(compiler, nullable, server_default, type_, autoincrement): spec = "%s %s" % ( compiler.dialect.type_compiler.process(type_), "NULL" if nullable else "NOT NULL" ) if autoincrement: spec += " AUTO_INCREMENT" if server_default is not False and server_default is not None: spec += " DEFAULT %s" % _render_value(compiler, server_default) return spec @compiles(schema.DropConstraint, "mysql") def _mysql_drop_constraint(element, compiler, **kw): """Redefine SQLAlchemy's drop constraint to raise errors for invalid constraint type.""" constraint = element.element if isinstance(constraint, (schema.ForeignKeyConstraint, schema.PrimaryKeyConstraint, schema.UniqueConstraint) ): return compiler.visit_drop_constraint(element, **kw) elif isinstance(constraint, schema.CheckConstraint): raise NotImplementedError( "MySQL does not support CHECK constraints.") else: raise NotImplementedError( "No generic 'DROP CONSTRAINT' in MySQL - " "please specify constraint type") PK )E$p_ _ alembic/ddl/oracle.pyfrom sqlalchemy.ext.compiler import compiles from .impl import DefaultImpl from .base import alter_table, AddColumn, ColumnName, \ format_column_name, ColumnNullable, \ format_server_default, ColumnDefault, format_type, ColumnType class OracleImpl(DefaultImpl): __dialect__ = 'oracle' transactional_ddl = True batch_separator = "/" command_terminator = "" def __init__(self, *arg, **kw): super(OracleImpl, self).__init__(*arg, **kw) self.batch_separator = self.context_opts.get( "oracle_batch_separator", self.batch_separator) def _exec(self, construct, *args, **kw): super(OracleImpl, self)._exec(construct, *args, **kw) if self.as_sql and self.batch_separator: self.static_output(self.batch_separator) def emit_begin(self): self._exec("SET TRANSACTION READ WRITE") def emit_commit(self): self._exec("COMMIT") @compiles(AddColumn, 'oracle') def visit_add_column(element, compiler, **kw): return "%s %s" % ( alter_table(compiler, element.table_name, element.schema), add_column(compiler, element.column, **kw), ) @compiles(ColumnNullable, 'oracle') def visit_column_nullable(element, compiler, **kw): return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "NULL" if element.nullable else "NOT NULL" ) @compiles(ColumnType, 'oracle') def visit_column_type(element, compiler, **kw): return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "%s" % format_type(compiler, element.type_) ) @compiles(ColumnName, 'oracle') def visit_column_name(element, compiler, **kw): return "%s RENAME COLUMN %s TO %s" % ( alter_table(compiler, element.table_name, element.schema), format_column_name(compiler, element.column_name), format_column_name(compiler, element.newname) ) @compiles(ColumnDefault, 'oracle') def visit_column_default(element, compiler, **kw): return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "DEFAULT %s" % format_server_default(compiler, element.default) if element.default is not None else "DEFAULT NULL" ) def alter_column(compiler, name): return 'MODIFY %s' % format_column_name(compiler, name) def add_column(compiler, column, **kw): return "ADD %s" % compiler.get_column_specification(column, **kw) PK )E̼55alembic/ddl/postgresql.pyimport re from .. import compat from .base import compiles, alter_table, format_table_name, RenameTable from .impl import DefaultImpl class PostgresqlImpl(DefaultImpl): __dialect__ = 'postgresql' transactional_ddl = True def compare_server_default(self, inspector_column, metadata_column, rendered_metadata_default, rendered_inspector_default): # don't do defaults for SERIAL columns if metadata_column.primary_key and \ metadata_column is metadata_column.table._autoincrement_column: return False conn_col_default = rendered_inspector_default if None in (conn_col_default, rendered_metadata_default): return conn_col_default != rendered_metadata_default if metadata_column.server_default is not None and \ isinstance(metadata_column.server_default.arg, compat.string_types) and \ not re.match(r"^'.+'$", rendered_metadata_default): rendered_metadata_default = "'%s'" % rendered_metadata_default return not self.connection.scalar( "SELECT %s = %s" % ( conn_col_default, rendered_metadata_default ) ) @compiles(RenameTable, "postgresql") def visit_rename_table(element, compiler, **kw): return "%s RENAME TO %s" % ( alter_table(compiler, element.table_name, element.schema), format_table_name(compiler, element.new_table_name, None) ) PK )E"| alembic/ddl/sqlite.pyfrom .. import util from .impl import DefaultImpl import re class SQLiteImpl(DefaultImpl): __dialect__ = 'sqlite' transactional_ddl = False """SQLite supports transactional DDL, but pysqlite does not: see: http://bugs.python.org/issue10740 """ def add_constraint(self, const): # attempt to distinguish between an # auto-gen constraint and an explicit one if const._create_rule is None: raise NotImplementedError( "No support for ALTER of constraints in SQLite dialect") elif const._create_rule(self): util.warn("Skipping unsupported ALTER for " "creation of implicit constraint") def drop_constraint(self, const): if const._create_rule is None: raise NotImplementedError( "No support for ALTER of constraints in SQLite dialect") def compare_server_default(self, inspector_column, metadata_column, rendered_metadata_default, rendered_inspector_default): rendered_metadata_default = re.sub( r"^'|'$", "", rendered_metadata_default) return rendered_inspector_default != repr(rendered_metadata_default) def correct_for_autogen_constraints( self, conn_unique_constraints, conn_indexes, metadata_unique_constraints, metadata_indexes): def uq_sig(uq): return tuple(sorted(uq.columns.keys())) conn_unique_sigs = set( uq_sig(uq) for uq in conn_unique_constraints ) for idx in list(metadata_unique_constraints): # SQLite backend can't report on unnamed UNIQUE constraints, # so remove these, unless we see an exact signature match if idx.name is None and uq_sig(idx) not in conn_unique_sigs: metadata_unique_constraints.remove(idx) for idx in list(conn_unique_constraints): # just in case we fix the backend such that it does report # on them, blow them out of the reflected collection too otherwise # they will come up as removed. if the backend supports this now, # add a version check here for the dialect. if idx.name is None: conn_unique_constraints.remove(idx) # @compiles(AddColumn, 'sqlite') # def visit_add_column(element, compiler, **kw): # return "%s %s" % ( # alter_table(compiler, element.table_name, element.schema), # add_column(compiler, element.column, **kw) # ) # def add_column(compiler, column, **kw): # text = "ADD COLUMN %s" % compiler.get_column_specification(column, **kw) # need to modify SQLAlchemy so that the CHECK associated with a Boolean # or Enum gets placed as part of the column constraints, not the Table # see ticket 98 # for const in column.constraints: # text += compiler.process(AddConstraint(const)) # return text PKjuD>Zflhh*alembic/templates/generic/alembic.ini.mako# A generic, single database configuration. [alembic] # path to migration scripts script_location = ${script_location} # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false sqlalchemy.url = driver://user:pass@localhost/dbname # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S PK<8E=Q alembic/templates/generic/env.pyfrom __future__ import with_statement from alembic import context from sqlalchemy import engine_from_config, pool from logging.config import fileConfig # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = None # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure(url=url, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() PKBAݤ&& alembic/templates/generic/READMEGeneric single-database configuration.PKBAn(alembic/templates/generic/script.py.mako"""${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} def downgrade(): ${downgrades if downgrades else "pass"} PKjuDx*alembic/templates/multidb/alembic.ini.mako# a multi-database configuration. [alembic] # path to migration scripts script_location = ${script_location} # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false databases = engine1, engine2 [engine1] sqlalchemy.url = driver://user:pass@localhost/dbname [engine2] sqlalchemy.url = driver://user:pass@localhost/dbname2 # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S PK<8E|̓ alembic/templates/multidb/env.pyfrom __future__ import with_statement from alembic import context from sqlalchemy import engine_from_config, pool from logging.config import fileConfig import logging import re USE_TWOPHASE = False # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) logger = logging.getLogger('alembic.env') # gather section names referring to different # databases. These are named "engine1", "engine2" # in the sample .ini file. db_names = config.get_main_option('databases') # add your model's MetaData objects here # for 'autogenerate' support. These must be set # up to hold just those tables targeting a # particular database. table.tometadata() may be # helpful here in case a "copy" of # a MetaData is needed. # from myapp import mymodel # target_metadata = { # 'engine1':mymodel.metadata1, # 'engine2':mymodel.metadata2 #} target_metadata = {} # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ # for the --sql use case, run migrations for each URL into # individual files. engines = {} for name in re.split(r',\s*', db_names): engines[name] = rec = {} rec['url'] = context.config.get_section_option(name, "sqlalchemy.url") for name, rec in engines.items(): logger.info("Migrating database %s" % name) file_ = "%s.sql" % name logger.info("Writing output to %s" % file_) with open(file_, 'w') as buffer: context.configure(url=rec['url'], output_buffer=buffer, target_metadata=target_metadata.get(name)) with context.begin_transaction(): context.run_migrations(engine_name=name) def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ # for the direct-to-DB use case, start a transaction on all # engines, then run all migrations, then commit all transactions. engines = {} for name in re.split(r',\s*', db_names): engines[name] = rec = {} rec['engine'] = engine_from_config( context.config.get_section(name), prefix='sqlalchemy.', poolclass=pool.NullPool) for name, rec in engines.items(): engine = rec['engine'] rec['connection'] = conn = engine.connect() if USE_TWOPHASE: rec['transaction'] = conn.begin_twophase() else: rec['transaction'] = conn.begin() try: for name, rec in engines.items(): logger.info("Migrating database %s" % name) context.configure( connection=rec['connection'], upgrade_token="%s_upgrades" % name, downgrade_token="%s_downgrades" % name, target_metadata=target_metadata.get(name) ) context.run_migrations(engine_name=name) if USE_TWOPHASE: for rec in engines.values(): rec['transaction'].prepare() for rec in engines.values(): rec['transaction'].commit() except: for rec in engines.values(): rec['transaction'].rollback() raise finally: for rec in engines.values(): rec['connection'].close() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() PKBA*)) alembic/templates/multidb/READMERudimentary multi-database configuration.PK<8EGII(alembic/templates/multidb/script.py.mako<%! import re %>"""${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(engine_name): globals()["upgrade_%s" % engine_name]() def downgrade(engine_name): globals()["downgrade_%s" % engine_name]() <% db_names = config.get_main_option("databases") %> ## generate an "upgrade_() / downgrade_()" function ## for each database name in the ini file. % for db_name in re.split(r',\s*', db_names): def upgrade_${db_name}(): ${context.get("%s_upgrades" % db_name, "pass")} def downgrade_${db_name}(): ${context.get("%s_downgrades" % db_name, "pass")} % endfor PKkuD __)alembic/templates/pylons/alembic.ini.mako# a Pylons configuration. [alembic] # path to migration scripts script_location = ${script_location} # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false pylons_config_file = ./development.ini # that's it !PK )Ek>QE E alembic/templates/pylons/env.py"""Pylons bootstrap environment. Place 'pylons_config_file' into alembic.ini, and the application will be loaded from there. """ from alembic import context from paste.deploy import loadapp from logging.config import fileConfig from sqlalchemy.engine.base import Engine try: # if pylons app already in, don't create a new app from pylons import config as pylons_config pylons_config['__file__'] except: config = context.config # can use config['__file__'] here, i.e. the Pylons # ini file, instead of alembic.ini config_file = config.get_main_option('pylons_config_file') fileConfig(config_file) wsgi_app = loadapp('config:%s' % config_file, relative_to='.') # customize this section for non-standard engine configurations. meta = __import__("%s.model.meta" % wsgi_app.config['pylons.package']).model.meta # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = None def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ context.configure( url=meta.engine.url, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ # specify here how the engine is acquired # engine = meta.engine raise NotImplementedError("Please specify engine connectivity here") if isinstance(engine, Engine): connection = engine.connect() else: raise Exception( 'Expected engine instance got %s instead' % type(engine) ) context.configure( connection=connection, target_metadata=target_metadata ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() PKBAW;;alembic/templates/pylons/READMEConfiguration that reads from a Pylons project environment.PKBAn'alembic/templates/pylons/script.py.mako"""${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} def downgrade(): ${downgrades if downgrades else "pass"} PKdFHk5.uliweb_alembic-0.6.9.dist-info/DESCRIPTION.rstAlembic is a new database migrations tool, written by the author of `SQLAlchemy `_. A migrations tool offers the following functionality: * Can emit ALTER statements to a database in order to change the structure of tables and other constructs * Provides a system whereby "migration scripts" may be constructed; each script indicates a particular series of steps that can "upgrade" a target database to a new version, and optionally a series of steps that can "downgrade" similarly, doing the same steps in reverse. * Allows the scripts to execute in some sequential manner. The goals of Alembic are: * Very open ended and transparent configuration and operation. A new Alembic environment is generated from a set of templates which is selected among a set of options when setup first occurs. The templates then deposit a series of scripts that define fully how database connectivity is established and how migration scripts are invoked; the migration scripts themselves are generated from a template within that series of scripts. The scripts can then be further customized to define exactly how databases will be interacted with and what structure new migration files should take. * Full support for transactional DDL. The default scripts ensure that all migrations occur within a transaction - for those databases which support this (Postgresql, Microsoft SQL Server), migrations can be tested with no need to manually undo changes upon failure. * Minimalist script construction. Basic operations like renaming tables/columns, adding/removing columns, changing column attributes can be performed through one line commands like alter_column(), rename_table(), add_constraint(). There is no need to recreate full SQLAlchemy Table structures for simple operations like these - the functions themselves generate minimalist schema structures behind the scenes to achieve the given DDL sequence. * "auto generation" of migrations. While real world migrations are far more complex than what can be automatically determined, Alembic can still eliminate the initial grunt work in generating new migration directives from an altered schema. The ``--autogenerate`` feature will inspect the current status of a database using SQLAlchemy's schema inspection capabilities, compare it to the current state of the database model as specified in Python, and generate a series of "candidate" migrations, rendering them into a new migration script as Python directives. The developer then edits the new file, adding additional directives and data migrations as needed, to produce a finished migration. Table and column level changes can be detected, with constraints and indexes to follow as well. * Full support for migrations generated as SQL scripts. Those of us who work in corporate environments know that direct access to DDL commands on a production database is a rare privilege, and DBAs want textual SQL scripts. Alembic's usage model and commands are oriented towards being able to run a series of migrations into a textual output file as easily as it runs them directly to a database. Care must be taken in this mode to not invoke other operations that rely upon in-memory SELECTs of rows - Alembic tries to provide helper constructs like bulk_insert() to help with data-oriented operations that are compatible with script-based DDL. * Non-linear versioning. Scripts are given UUID identifiers similarly to a DVCS, and the linkage of one script to the next is achieved via markers within the scripts themselves. Through this open-ended mechanism, branches containing other migration scripts can be merged - the linkages can be manually edited within the script files to create the new sequence. * Provide a library of ALTER constructs that can be used by any SQLAlchemy application. The DDL constructs build upon SQLAlchemy's own DDLElement base and can be used standalone by any application or script. * Don't break our necks over SQLite's inability to ALTER things. SQLite has almost no support for table or column alteration, and this is likely intentional. Alembic's design is kept simple by not contorting its core API around these limitations, understanding that SQLite is simply not intended to support schema changes. While Alembic's architecture can support SQLite's workarounds, and we will support these features provided someone takes the initiative to implement and test, until the SQLite developers decide to provide a fully working version of ALTER, it's still vastly preferable to use Alembic, or any migrations tool, with databases that are designed to work under the assumption of in-place schema migrations taking place. Documentation and status of Alembic is at http://readthedocs.org/docs/alembic/. PKdFH$ 11/uliweb_alembic-0.6.9.dist-info/entry_points.txt[console_scripts] alembic = alembic.config:main PKdFHKK,uliweb_alembic-0.6.9.dist-info/metadata.json{"classifiers": ["Development Status :: 4 - Beta", "Environment :: Console", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database :: Front-Ends"], "extensions": {"python.commands": {"wrap_console": {"alembic": "alembic.config:main"}}, "python.details": {"contacts": [{"email": "limodou@gmail.com", "name": "limodou", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/limodou/uliweb-alembic"}}, "python.exports": {"console_scripts": {"alembic": "alembic.config:main"}}}, "extras": [], "generator": "bdist_wheel (0.28.0)", "keywords": ["SQLAlchemy", "migrations"], "license": "MIT", "metadata_version": "2.0", "name": "uliweb-alembic", "run_requires": [{"requires": ["Mako", "SQLAlchemy (>=0.7.3)"]}], "summary": "A port of alembic for uliweb framework.", "test_requires": [{"requires": ["mock", "nose (>=0.11)"]}], "version": "0.6.9"}PKdFHjt,uliweb_alembic-0.6.9.dist-info/top_level.txtalembic PKdFH>nn$uliweb_alembic-0.6.9.dist-info/WHEELWheel-Version: 1.0 Generator: bdist_wheel (0.28.0) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any PKdFHWcX'uliweb_alembic-0.6.9.dist-info/METADATAMetadata-Version: 2.0 Name: uliweb-alembic Version: 0.6.9 Summary: A port of alembic for uliweb framework. Home-page: https://github.com/limodou/uliweb-alembic Author: limodou Author-email: limodou@gmail.com License: MIT Keywords: SQLAlchemy migrations Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Environment :: Console Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Database :: Front-Ends Requires-Dist: Mako Requires-Dist: SQLAlchemy (>=0.7.3) Alembic is a new database migrations tool, written by the author of `SQLAlchemy `_. A migrations tool offers the following functionality: * Can emit ALTER statements to a database in order to change the structure of tables and other constructs * Provides a system whereby "migration scripts" may be constructed; each script indicates a particular series of steps that can "upgrade" a target database to a new version, and optionally a series of steps that can "downgrade" similarly, doing the same steps in reverse. * Allows the scripts to execute in some sequential manner. The goals of Alembic are: * Very open ended and transparent configuration and operation. A new Alembic environment is generated from a set of templates which is selected among a set of options when setup first occurs. The templates then deposit a series of scripts that define fully how database connectivity is established and how migration scripts are invoked; the migration scripts themselves are generated from a template within that series of scripts. The scripts can then be further customized to define exactly how databases will be interacted with and what structure new migration files should take. * Full support for transactional DDL. The default scripts ensure that all migrations occur within a transaction - for those databases which support this (Postgresql, Microsoft SQL Server), migrations can be tested with no need to manually undo changes upon failure. * Minimalist script construction. Basic operations like renaming tables/columns, adding/removing columns, changing column attributes can be performed through one line commands like alter_column(), rename_table(), add_constraint(). There is no need to recreate full SQLAlchemy Table structures for simple operations like these - the functions themselves generate minimalist schema structures behind the scenes to achieve the given DDL sequence. * "auto generation" of migrations. While real world migrations are far more complex than what can be automatically determined, Alembic can still eliminate the initial grunt work in generating new migration directives from an altered schema. The ``--autogenerate`` feature will inspect the current status of a database using SQLAlchemy's schema inspection capabilities, compare it to the current state of the database model as specified in Python, and generate a series of "candidate" migrations, rendering them into a new migration script as Python directives. The developer then edits the new file, adding additional directives and data migrations as needed, to produce a finished migration. Table and column level changes can be detected, with constraints and indexes to follow as well. * Full support for migrations generated as SQL scripts. Those of us who work in corporate environments know that direct access to DDL commands on a production database is a rare privilege, and DBAs want textual SQL scripts. Alembic's usage model and commands are oriented towards being able to run a series of migrations into a textual output file as easily as it runs them directly to a database. Care must be taken in this mode to not invoke other operations that rely upon in-memory SELECTs of rows - Alembic tries to provide helper constructs like bulk_insert() to help with data-oriented operations that are compatible with script-based DDL. * Non-linear versioning. Scripts are given UUID identifiers similarly to a DVCS, and the linkage of one script to the next is achieved via markers within the scripts themselves. Through this open-ended mechanism, branches containing other migration scripts can be merged - the linkages can be manually edited within the script files to create the new sequence. * Provide a library of ALTER constructs that can be used by any SQLAlchemy application. The DDL constructs build upon SQLAlchemy's own DDLElement base and can be used standalone by any application or script. * Don't break our necks over SQLite's inability to ALTER things. SQLite has almost no support for table or column alteration, and this is likely intentional. Alembic's design is kept simple by not contorting its core API around these limitations, understanding that SQLite is simply not intended to support schema changes. While Alembic's architecture can support SQLite's workarounds, and we will support these features provided someone takes the initiative to implement and test, until the SQLite developers decide to provide a fully working version of ALTER, it's still vastly preferable to use Alembic, or any migrations tool, with databases that are designed to work under the assumption of in-place schema migrations taking place. Documentation and status of Alembic is at http://readthedocs.org/docs/alembic/. PKdFHAJV%uliweb_alembic-0.6.9.dist-info/RECORDalembic/__init__.py,sha256=BTGPf1vXzmGExmm3x-kVPE0WmLcOPpMqOikIZXXYfr0,136 alembic/autogenerate.bak.py,sha256=xaIl2Su8QYROJt8tG_cWsuPmP6Sphj9tpgmVM4bfQ_Q,32015 alembic/coloredlog.py,sha256=r8tEh0gv1EZBozMEzGt5i4hGAwSkf8Oz8kBAgNeBbeo,2094 alembic/command.py,sha256=yD-o5Y4fpulzMrzpUHTJTUFB7lHSv2jKr6ER1HZPvco,8723 alembic/compat.py,sha256=DpM8Xwmr8FvGlgDL6io4vb7ViSShYw3jbfRz5mCSVKY,3616 alembic/config.py,sha256=Y1dGSnfXiItaSuT-h10TWcoDZorckwd7HB4GmREtSDU,10904 alembic/context.py,sha256=NrNoz7X0dSmKbr-PO95aRKrqJIyyYikju7ZPkgcxIwo,212 alembic/environment.py,sha256=aKH0DVx3zIOB827GjTSxRtMw1IItuLdNtyN5qXzo7Dc,30739 alembic/migration.py,sha256=ta_PuqpeDHg_KWqBCqJY5BfssWTFtp2KaR8sovcUzxU,13624 alembic/op.py,sha256=0ondIA3G8rld7uwIaZXLpM1-1WmbPUx5a2iVQ6S81h8,187 alembic/operations.py,sha256=p9e0nj7bsP9rl_64i2VMPBKI6AAyU3Q4cu_qmkw4t9s,43138 alembic/script.py,sha256=s93hfGBOyxrLjLcvu4fUL4KnHWi0FMQ7JQ4pYCTHIkI,17998 alembic/util.py,sha256=oRYLfaL6zIR2xfXmcjjG1sgzUdGetQiLv5wkO2R2f4w,10568 alembic/autogenerate/__init__.py,sha256=WVW-m0wb0NhF8b5Pgu9emmnH2KyJrdGCMawv60CplMU,88 alembic/autogenerate/api.py,sha256=qF-dG-E5D1tPtxEeVqwVUNazMy4UId39QFrt6ASBH9k,9785 alembic/autogenerate/compare.py,sha256=KBE7jBsjJ4MsKascg51t9ysp5KPZBjJPtaIQXC_DVJM,23455 alembic/autogenerate/render.py,sha256=Je7MX0kisc2qsw4Orip2NWn1-Pj4dYfeCk2v7Vlux3g,16897 alembic/ddl/__init__.py,sha256=2WG0x87wBHfq1fFp_rwf2JYG0En5qVOwcGuVjx2vU6Y,85 alembic/ddl/base.py,sha256=DiZpDeLhRztugO2RXKI92nVqTmacEt63V1q2t44tdB4,5594 alembic/ddl/impl.py,sha256=RHe6X9sPp7cySTzCmH2OoohSaxFV0awg92Ao-IPlooU,11685 alembic/ddl/mssql.py,sha256=_MvkTrP2fvmisQT5noabmIX2whT7jVDj9J0HmNQZByw,8287 alembic/ddl/mysql.py,sha256=gtXEWA-akOu9W-507ZcMhfZQR1bQqjHIbTV1Tjt0-VE,8177 alembic/ddl/oracle.py,sha256=_3ZEbftirt0Lu6F174yXyOifbk0NnPg9hr8IAU1DGVE,2655 alembic/ddl/postgresql.py,sha256=pCAfu-AQHtpLWtO_w-pcKsDmOK_6TcNoaie8WxZZDKQ,1589 alembic/ddl/sqlite.py,sha256=6XsAABehuOWlyLj61f1qlZBJFRa8x04zjH7HnYW1DrA,3015 alembic/templates/generic/README,sha256=MVlc9TYmr57RbhXET6QxgyCcwWP7w-vLkEsirENqiIQ,38 alembic/templates/generic/alembic.ini.mako,sha256=LfP3T1S21Y_hgtlcEoQ4Lec5NYLJcbNPMxldS_8ZLzc,1128 alembic/templates/generic/env.py,sha256=YkvfhGAUK9vRVw6VGpThc2Wy5s2BiqZ4hUKLJTNLAlE,1977 alembic/templates/generic/script.py.mako,sha256=D8kFI44_9vBJZrAYSkZxDTX2-S5Y-oEetEd9BKlo9S8,412 alembic/templates/multidb/README,sha256=c7CNHkVvVSJsGZ75Qlcuo1nXKQITDu0W3hSULyz1pWg,41 alembic/templates/multidb/alembic.ini.mako,sha256=xV6_SM9Exuktyzdr8hlSqVVd5w1OPNu5BwcJILtRAyk,1223 alembic/templates/multidb/env.py,sha256=_nd9yYMVJk9hkVB_kl8-RdmPvbV4HQ2VCi9PBeiNXoI,4096 alembic/templates/multidb/script.py.mako,sha256=8XLpne27S4ZfBKnN6VssW8kl30hRvqXwNrEuEjZevRM,841 alembic/templates/pylons/README,sha256=gr4MQnn_ScvV_kasPpXgo6ntAtcIWmOlga9vURbgUwI,59 alembic/templates/pylons/alembic.ini.mako,sha256=-UJkezedmsZSe_S9Rt63mUvX4w2YCuSslt-QmuOqBQI,607 alembic/templates/pylons/env.py,sha256=fLRt4w98z4Wchsuiy9jjBvs_s1SXYO762KSSIOMKroE,2373 alembic/templates/pylons/script.py.mako,sha256=D8kFI44_9vBJZrAYSkZxDTX2-S5Y-oEetEd9BKlo9S8,412 uliweb_alembic-0.6.9.dist-info/DESCRIPTION.rst,sha256=GNFlz9sVCHA5zMw1JsmlXPbvZsWxg461bSD1y81x4Lk,4878 uliweb_alembic-0.6.9.dist-info/METADATA,sha256=4mT3Ppoc07SKckfVWKEu5eq56BYmByJy1_yGVW9eFrs,5604 uliweb_alembic-0.6.9.dist-info/RECORD,, uliweb_alembic-0.6.9.dist-info/WHEEL,sha256=c5du820PMLPXFYzXDp0SSjIjJ-7MmVRpJa1kKfTaqlc,110 uliweb_alembic-0.6.9.dist-info/entry_points.txt,sha256=jOSnN_2fhU8xzDQ50rdNr425J8kf_exuY8GrAo1daz8,49 uliweb_alembic-0.6.9.dist-info/metadata.json,sha256=fgFTHeHam9tZkKJFbySl8RnlG-XtepBGVvEgLdRJqs0,1099 uliweb_alembic-0.6.9.dist-info/top_level.txt,sha256=FwKWd5VsPFC8iQjpu1u9Cn-JnK3-V1RhUCmWqz1cl-s,8 PKbFHcΝÈalembic/__init__.pyPKB!CG}}alembic/autogenerate.bak.pyPK D=lx..~alembic/coloredlog.pyPK@8Eڥ#""balembic/command.pyPK )E9)  alembic/compat.pyPK )ES!**alembic/config.pyPKBEECBalembic/context.pyPK )Exxalembic/environment.pyPK )E8585[alembic/migration.pyPKEECA<] palembic/op.pyPK )ET;CValembic/operations.pyPK )E"NFNF :alembic/script.pyPK<8E'`H)H)alembic/util.pyPK];8EqA[XX alembic/autogenerate/__init__.pyPKerE^B9&9&alembic/autogenerate/api.pyPK.BHPY[[alembic/autogenerate/compare.pyPKH+Ey'BB,alembic/autogenerate/render.pyPK D4:UUoalembic/ddl/__init__.pyPK )ED=#oalembic/ddl/base.pyPK )Eو#--alembic/ddl/impl.pyPK )E1ü0_ _ alembic/ddl/mssql.pyPK )Ealembic/ddl/mysql.pyPK )E$p_ _ =alembic/ddl/oracle.pyPK )E̼55alembic/ddl/postgresql.pyPK )E"| ;alembic/ddl/sqlite.pyPKjuD>Zflhh*5alembic/templates/generic/alembic.ini.makoPK<8E=Q alembic/templates/generic/env.pyPKBAݤ&& alembic/templates/generic/READMEPKBAn(@alembic/templates/generic/script.py.makoPKjuDx*" alembic/templates/multidb/alembic.ini.makoPK<8E|̓ 1%alembic/templates/multidb/env.pyPKBA*)) o5alembic/templates/multidb/READMEPK<8EGII(5alembic/templates/multidb/script.py.makoPKkuD __)e9alembic/templates/pylons/alembic.ini.makoPK )Ek>QE E  <alembic/templates/pylons/env.pyPKBAW;;Ealembic/templates/pylons/READMEPKBAn'Falembic/templates/pylons/script.py.makoPKdFHk5.Guliweb_alembic-0.6.9.dist-info/DESCRIPTION.rstPKdFH$ 11/@[uliweb_alembic-0.6.9.dist-info/entry_points.txtPKdFHKK,[uliweb_alembic-0.6.9.dist-info/metadata.jsonPKdFHjt,S`uliweb_alembic-0.6.9.dist-info/top_level.txtPKdFH>nn$`uliweb_alembic-0.6.9.dist-info/WHEELPKdFHWcX'Uauliweb_alembic-0.6.9.dist-info/METADATAPKdFHAJV%~wuliweb_alembic-0.6.9.dist-info/RECORDPK,, b