Commit 6c1ef089 authored by Mario de la Ossa's avatar Mario de la Ossa Committed by Adam Hegyi

Set all NULL `lock_version` values to 0 PART 2

Rails silently casts NULL `lock_version` values to 0 while doing
optimistic locking, which causes false stale object exceptions. We had a
monkey patch that would change it to check for [NULL, 0] but want to
avoid monkey-patching if possible, which means we need to clean up our
database values.

This Commit is for CI objects (CI Stages, CI Builds, CI Pipelines)
parent b85f9cc8
---
title: Set NULL `lock_version` values to 0 for CI objects
merge_request: 30305
author:
type: fixed
......@@ -31,7 +31,7 @@ class CleanupOptimisticLockingNulls < ActiveRecord::Migration[5.2]
'CleanupOptimisticLockingNulls',
2.minutes,
batch_size: BATCH_SIZE,
other_arguments: [table]
other_job_arguments: [table]
)
end
end
......
# frozen_string_literal: true
class CleanupOptimisticLockingNullsPt2 < ActiveRecord::Migration[5.2]
def change
# no-op: the MR that contained this migration was reverted
end
end
# frozen_string_literal: true
class CleanupOptimisticLockingNullsPt2Fixed < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
TABLES = %w(ci_stages ci_builds ci_pipelines).freeze
BATCH_SIZE = 10_000
def declare_class(table)
Class.new(ActiveRecord::Base) do
include EachBatch
self.table_name = table
self.inheritance_column = :_type_disabled # Disable STI
end
end
def up
last_table_final_delay = 0
TABLES.each do |table|
# cleanup wrong index created in the previous migration, it might be there on staging
remove_concurrent_index table.to_sym, :lock_version, where: "lock_version IS NULL"
add_concurrent_index table.to_sym, :id, where: "lock_version IS NULL", name: "tmp_index_#{table}_lock_version"
last_table_final_delay = queue_background_migration_jobs_by_range_at_intervals(
declare_class(table).where(lock_version: nil),
'CleanupOptimisticLockingNulls',
2.minutes,
batch_size: BATCH_SIZE,
other_job_arguments: [table],
initial_delay: last_table_final_delay
)
end
end
def down
TABLES.each do |table|
remove_concurrent_index table.to_sym, :id, where: "lock_version IS NULL", name: "tmp_index_#{table}_lock_version"
end
end
end
......@@ -10723,6 +10723,12 @@ CREATE INDEX tmp_build_stage_position_index ON public.ci_builds USING btree (sta
CREATE INDEX tmp_idx_on_user_id_where_bio_is_filled ON public.users USING btree (id) WHERE ((COALESCE(bio, ''::character varying))::text IS DISTINCT FROM ''::text);
CREATE INDEX tmp_index_ci_builds_lock_version ON public.ci_builds USING btree (id) WHERE (lock_version IS NULL);
CREATE INDEX tmp_index_ci_pipelines_lock_version ON public.ci_pipelines USING btree (id) WHERE (lock_version IS NULL);
CREATE INDEX tmp_index_ci_stages_lock_version ON public.ci_stages USING btree (id) WHERE (lock_version IS NULL);
CREATE UNIQUE INDEX users_security_dashboard_projects_unique_index ON public.users_security_dashboard_projects USING btree (project_id, user_id);
CREATE UNIQUE INDEX vulnerability_feedback_unique_idx ON public.vulnerability_feedback USING btree (project_id, category, feedback_type, project_fingerprint);
......@@ -13257,6 +13263,7 @@ COPY "schema_migrations" (version) FROM STDIN;
20200214214934
20200215222507
20200215225103
20200217210353
20200217223651
20200217225719
20200218113721
......@@ -13484,5 +13491,6 @@ COPY "schema_migrations" (version) FROM STDIN;
20200423080607
20200423081409
20200424050250
20200427064130
\.
......@@ -1063,6 +1063,8 @@ into similar problems in the future (e.g. when new tables are created).
# batch_size - The maximum number of rows per job
# other_arguments - Other arguments to send to the job
#
# *Returns the final migration delay*
#
# Example:
#
# class Route < ActiveRecord::Base
......@@ -1079,7 +1081,7 @@ into similar problems in the future (e.g. when new tables are created).
# # do something
# end
# end
def queue_background_migration_jobs_by_range_at_intervals(model_class, job_class_name, delay_interval, batch_size: BACKGROUND_MIGRATION_BATCH_SIZE, other_arguments: [])
def queue_background_migration_jobs_by_range_at_intervals(model_class, job_class_name, delay_interval, batch_size: BACKGROUND_MIGRATION_BATCH_SIZE, other_job_arguments: [], initial_delay: 0)
raise "#{model_class} does not have an ID to use for batch ranges" unless model_class.column_names.include?('id')
# To not overload the worker too much we enforce a minimum interval both
......@@ -1088,14 +1090,19 @@ into similar problems in the future (e.g. when new tables are created).
delay_interval = BackgroundMigrationWorker.minimum_interval
end
final_delay = nil
model_class.each_batch(of: batch_size) do |relation, index|
start_id, end_id = relation.pluck(Arel.sql('MIN(id), MAX(id)')).first
# `BackgroundMigrationWorker.bulk_perform_in` schedules all jobs for
# the same time, which is not helpful in most cases where we wish to
# spread the work over time.
migrate_in(delay_interval * index, job_class_name, [start_id, end_id] + other_arguments)
final_delay = initial_delay + delay_interval * index
migrate_in(final_delay, job_class_name, [start_id, end_id] + other_job_arguments)
end
final_delay
end
# Fetches indexes on a column by name for postgres.
......
......@@ -1365,6 +1365,14 @@ describe Gitlab::Database::MigrationHelpers do
end
end
it 'returns the final expected delay' do
Sidekiq::Testing.fake! do
final_delay = model.queue_background_migration_jobs_by_range_at_intervals(User, 'FooJob', 10.minutes, batch_size: 2)
expect(final_delay.to_f).to eq(20.minutes.to_f)
end
end
context 'with batch_size option' do
it 'queues jobs correctly' do
Sidekiq::Testing.fake! do
......@@ -1389,12 +1397,25 @@ describe Gitlab::Database::MigrationHelpers do
end
end
context 'with other_arguments option' do
context 'with other_job_arguments option' do
it 'queues jobs correctly' do
Sidekiq::Testing.fake! do
model.queue_background_migration_jobs_by_range_at_intervals(User, 'FooJob', 10.minutes, other_job_arguments: [1, 2])
expect(BackgroundMigrationWorker.jobs[0]['args']).to eq(['FooJob', [id1, id3, 1, 2]])
expect(BackgroundMigrationWorker.jobs[0]['at']).to eq(10.minutes.from_now.to_f)
end
end
end
context 'with initial_delay option' do
it 'queues jobs correctly' do
model.queue_background_migration_jobs_by_range_at_intervals(User, 'FooJob', 10.minutes, other_arguments: [1, 2])
Sidekiq::Testing.fake! do
model.queue_background_migration_jobs_by_range_at_intervals(User, 'FooJob', 10.minutes, other_job_arguments: [1, 2], initial_delay: 10.minutes)
expect(BackgroundMigrationWorker.jobs[0]['args']).to eq(['FooJob', [id1, id3, 1, 2]])
expect(BackgroundMigrationWorker.jobs[0]['at']).to eq(10.minutes.from_now.to_f)
expect(BackgroundMigrationWorker.jobs[0]['args']).to eq(['FooJob', [id1, id3, 1, 2]])
expect(BackgroundMigrationWorker.jobs[0]['at']).to eq(20.minutes.from_now.to_f)
end
end
end
end
......
# frozen_string_literal: true
require 'spec_helper'
require Rails.root.join('db', 'post_migrate', '20200427064130_cleanup_optimistic_locking_nulls_pt2_fixed.rb')
describe CleanupOptimisticLockingNullsPt2Fixed, :migration do
TABLES = %w(ci_stages ci_builds ci_pipelines).freeze
TABLES.each do |table|
let(table.to_sym) { table(table.to_sym) }
end
let(:tables) { TABLES.map { |t| method(t.to_sym).call } }
before do
# Create necessary rows
ci_stages.create!
ci_builds.create!
ci_pipelines.create!
# Nullify `lock_version` column for all rows
# Needs to be done with a SQL fragment, otherwise Rails will coerce it to 0
tables.each do |table|
table.update_all('lock_version = NULL')
end
end
it 'correctly migrates nullified lock_version column', :sidekiq_might_not_need_inline do
tables.each do |table|
expect(table.where(lock_version: nil).count).to eq(1)
end
tables.each do |table|
expect(table.where(lock_version: 0).count).to eq(0)
end
migrate!
tables.each do |table|
expect(table.where(lock_version: nil).count).to eq(0)
end
tables.each do |table|
expect(table.where(lock_version: 0).count).to eq(1)
end
end
end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment