Python sqlalchemy 模块,BIGINT 实例源码
我们从Python开源项目中,提取了以下9个代码示例,用于说明如何使用sqlalchemy.BIGINT。
def downgrade():
# Drop trigger
op.execute(
'DROP TRIGGER increment_num_rows_in_db_array ON callset_to_db_array_association CASCADE')
op.drop_column(u'db_array', 'num_rows')
op.create_table(
'db_row',
sa.Column('id', sa.BIGINT(), nullable=False),
sa.Column('db_array_id', autoincrement=False,
sa.Column('tile_row_id',
sa.ForeignKeyConstraint(['db_array_id'],[u'db_array.id'], name=u'db_row_db_array_id_fkey'),
sa.PrimaryKeyConstraint('id', name=u'db_row_pkey'))
op.add_column(
u'callset',
sa.Column('individual_id', nullable=False))
op.add_column(u'callset', sa.Column('dbrow_id', nullable=False))
op.drop_constraint('callset_source_sample_id_fkey','callset', type_='foreignkey')
op.drop_constraint('callset_target_sample_id_fkey', type_='foreignkey')
op.create_foreign_key(u'callset_individual_id_fkey', 'individual', ['individual_id'], ['id'])
op.create_foreign_key(u'callset_dbrow_id_fkey', 'db_row', ['dbrow_id'], ['id'])
op.drop_column(u'callset', 'target_sample_id')
op.drop_column(u'callset', 'source_sample_id')
op.drop_index('db_array_id_tile_row_id_idx',table_name='callset_to_db_array_association')
op.drop_table('callset_to_db_array_association')
op.drop_table('sample')
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('run',
sa.Column('id', sa.Integer(),
sa.Column('run_id', sa.String(length=30), nullable=True),
sa.Column('library_reads_sequenced',
sa.Column('total_num_bases',
sa.Column('download_size',
sa.Column('avg_read_length', sa.Float(),
sa.Column('baseA_count',
sa.Column('baseC_count',
sa.Column('baseG_count',
sa.Column('baseT_count',
sa.Column('baseN_count',
sa.Column('gc_percent',
sa.Column('run_quality_counts', sa.Text(),
sa.Column('dataset_id',
sa.ForeignKeyConstraint(['dataset_id'], ['dataset.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_unique_constraint(None, 'dataset', ['db_source_uid'])
# ### end Alembic commands ###
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column(
'reference_set',
sa.Column('next_tiledb_column_offset', sa.BigInteger(), nullable=False, default=0))
op.add_column(
'reference',
sa.Column('tiledb_column_offset', nullable=True))
op.alter_column('reference', 'length', existing_type=sa.BIGINT(), nullable=False)
op.alter_column('reference', 'name', existing_type=sa.TEXT(), nullable=False)
op.create_unique_constraint('unique_name_per_reference_set_constraint', 'reference',
['reference_set_id', 'name'])
op.create_index('unique_reference_set_id_offset_idx', 'tiledb_column_offset'],
unique=True)
op.drop_column('reference', 'offset')
# Trigger on reference insertion
op.execute('''\
CREATE OR REPLACE FUNCTION increment_next_column_in_reference_set_pgsql()
RETURNS trigger AS $increment_next_column_in_reference_set_pgsql$
BEGIN
UPDATE reference SET tiledb_column_offset=(select next_tiledb_column_offset from reference_set where id=NEW.reference_set_id) where NEW.tiledb_column_offset IS NULL and id=NEW.id;
UPDATE reference_set SET next_tiledb_column_offset=next_tiledb_column_offset+NEW.length WHERE id = NEW.reference_set_id;
RETURN NEW;
END;
$increment_next_column_in_reference_set_pgsql$ LANGUAGE plpgsql;
CREATE TRIGGER increment_next_column_in_reference_set AFTER INSERT ON reference
FOR EACH ROW EXECUTE PROCEDURE increment_next_column_in_reference_set_pgsql();
''')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
# Drop trigger
op.execute(
'DROP TRIGGER increment_next_column_in_reference_set ON reference CASCADE')
op.add_column('reference',
sa.Column('offset', nullable=True))
op.drop_index('unique_reference_set_id_offset_idx', table_name='reference')
op.drop_constraint('unique_name_per_reference_set_constraint', type_='unique')
op.alter_column('reference', nullable=True)
op.alter_column('reference', nullable=True)
op.drop_column('reference', 'tiledb_column_offset')
op.drop_column('reference_set', 'next_tiledb_column_offset')
### end Alembic commands ###
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('file',
sa.Column('file_id',
sa.Column('sha512_hash', sa.TEXT(),
sa.Column('sha256_hash',
sa.Column('sha1_hash',
sa.Column('md5_hash',
sa.Column('size', sa.FLOAT(),
sa.Column('mime_type', sa.VARCHAR(length=120),
nullable=True),
sa.Column('submitted_by',
nullable=False),
sa.Column('status', sa.VARCHAR(length=20),
sa.Column('last_updated', sa.DATETIME(),
sa.Column('first_seen',
sa.PrimaryKeyConstraint('file_id')
)
op.create_table('lookup_request',
sa.Column('request_id',
sa.Column('requested_at',
sa.Column('requestor',
sa.Column('lookup_hash',
sa.Column('result',
sa.ForeignKeyConstraint(['file_id'], ['file.file_id'],
sa.PrimaryKeyConstraint('request_id')
)
# ### end Alembic commands ###
def upgrade():
op.alter_column('file', 'obsid', nullable=True)
def downgrade():
# This probably won't work in practice since the rows with null obsids will need
# to be deleted.
op.alter_column('file', nullable=False)
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('queue',
sa.Column('enqueued_at', postgresql.TIMESTAMP(timezone=True), server_default=sa.text('Now()'),
sa.Column('dequeued_at',
sa.Column('expected_at',
sa.Column('schedule_at',
sa.Column('q_name',
sa.Column('data', postgresql.JSON(astext_type=sa.Text()),
sa.PrimaryKeyConstraint('id')
)
op.create_index('priority_idx', 'queue', ['schedule_at', 'expected_at'], unique=False)
# ### end Alembic commands ###
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('measurement',
sa.Column('measurement_id',
sa.Column('physical_measurements_id',
sa.Column('code_system', sa.String(length=255),
sa.Column('code_value',
sa.Column('measurement_time', model.utils.UTCDateTime(),
sa.Column('body_site_code_system',
sa.Column('body_site_code_value',
sa.Column('value_string', sa.String(length=1024),
sa.Column('value_decimal',
sa.Column('value_unit',
sa.Column('value_code_system',
sa.Column('value_code_value',
sa.Column('value_datetime',
sa.Column('parent_id',
sa.Column('qualifier_id',
sa.ForeignKeyConstraint(['parent_id'], ['measurement.measurement_id'],
sa.ForeignKeyConstraint(['physical_measurements_id'], ['physical_measurements.physical_measurements_id'],
sa.ForeignKeyConstraint(['qualifier_id'],
sa.PrimaryKeyConstraint('measurement_id')
)
op.create_table('measurement_to_qualifier',
sa.ForeignKeyConstraint(['measurement_id'],
sa.PrimaryKeyConstraint('measurement_id', 'qualifier_id')
)
# ### end Alembic commands ###
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。