Commit 32fe0463 authored by pbair's avatar pbair

WithLockRetries can error if all attempts fail

Modify WithLockRetries to accept an optional flag that if all attempts
to obtain the lock fail, it will raise an error rather than executing
without a lock_timeout.
parent 6d59e61c
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
module Gitlab module Gitlab
module Database module Database
class WithLockRetries class WithLockRetries
AttemptsExhaustedError = Class.new(StandardError)
NULL_LOGGER = Gitlab::JsonLogger.new('/dev/null') NULL_LOGGER = Gitlab::JsonLogger.new('/dev/null')
# Each element of the array represents a retry iteration. # Each element of the array represents a retry iteration.
...@@ -63,7 +65,7 @@ module Gitlab ...@@ -63,7 +65,7 @@ module Gitlab
@log_params = { method: 'with_lock_retries', class: klass.to_s } @log_params = { method: 'with_lock_retries', class: klass.to_s }
end end
def run(&block) def run(raise_on_exhaustion: false, &block)
raise 'no block given' unless block_given? raise 'no block given' unless block_given?
@block = block @block = block
...@@ -85,6 +87,9 @@ module Gitlab ...@@ -85,6 +87,9 @@ module Gitlab
retry retry
else else
reset_db_settings reset_db_settings
raise AttemptsExhaustedError, 'configured attempts to obtain locks are exhausted' if raise_on_exhaustion
run_block_without_lock_timeout run_block_without_lock_timeout
end end
......
...@@ -72,9 +72,14 @@ RSpec.describe Gitlab::Database::WithLockRetries do ...@@ -72,9 +72,14 @@ RSpec.describe Gitlab::Database::WithLockRetries do
lock_attempts = 0 lock_attempts = 0
lock_acquired = false lock_acquired = false
expect_any_instance_of(Gitlab::Database::WithLockRetries).to receive(:sleep).exactly(retry_count - 1).times # we don't sleep in the last iteration # the actual number of attempts to run_block_with_transaction can never exceed the number of
# timings_configurations, so here we limit the retry_count if it exceeds that value
allow_any_instance_of(Gitlab::Database::WithLockRetries).to receive(:run_block_with_transaction).and_wrap_original do |method| #
# also, there is no call to sleep after the final attempt, which is why it will always be one less
expected_runs_with_timeout = [retry_count, timing_configuration.size].min
expect(subject).to receive(:sleep).exactly(expected_runs_with_timeout - 1).times
expect(subject).to receive(:run_block_with_transaction).exactly(expected_runs_with_timeout).times.and_wrap_original do |method|
lock_fiber.resume if lock_attempts == retry_count lock_fiber.resume if lock_attempts == retry_count
method.call method.call
...@@ -114,6 +119,33 @@ RSpec.describe Gitlab::Database::WithLockRetries do ...@@ -114,6 +119,33 @@ RSpec.describe Gitlab::Database::WithLockRetries do
end end
end end
context 'after the retries, when requested to raise an error' do
let(:expected_attempts_with_timeout) { timing_configuration.size }
let(:retry_count) { timing_configuration.size + 1 }
it 'raises an error instead of waiting indefinitely for the lock' do
lock_attempts = 0
lock_acquired = false
expect(subject).to receive(:sleep).exactly(expected_attempts_with_timeout - 1).times
expect(subject).to receive(:run_block_with_transaction).exactly(expected_attempts_with_timeout).times.and_call_original
expect do
subject.run(raise_on_exhaustion: true) do
lock_attempts += 1
ActiveRecord::Base.transaction do
ActiveRecord::Base.connection.execute("LOCK TABLE #{Project.table_name} in exclusive mode")
lock_acquired = true
end
end
end.to raise_error(described_class::AttemptsExhaustedError)
expect(lock_attempts).to eq(retry_count - 1)
expect(lock_acquired).to eq(false)
end
end
context 'when statement timeout is reached' do context 'when statement timeout is reached' do
it 'raises QueryCanceled error' do it 'raises QueryCanceled error' do
lock_acquired = false lock_acquired = false
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment