Commit 3320965b authored by Anastasia McDonald's avatar Anastasia McDonald

Merge branch 'jmd-add-praefect-dataloss-accept-e2e' into 'master'

Add praefect accept-dataloss e2e

See merge request gitlab-org/gitlab!80574
parents 5ddd8885 4763ef88
......@@ -327,6 +327,22 @@ module QA
end
end
def accept_dataloss_for_project(project_id, authoritative_storage)
repository_hash = "#{Digest::SHA256.hexdigest(project_id.to_s)}"
repository = "@hashed/#{repository_hash[0, 2]}/#{repository_hash[2, 2]}/#{repository_hash}.git"
cmd = %{
docker exec #{@praefect} \
praefect \
-config /var/opt/gitlab/praefect/config.toml \
accept-dataloss \
--virtual-storage=default \
--repository=#{repository} \
--authoritative-storage=#{authoritative_storage}
}
shell(cmd)
end
def wait_for_health_check_all_nodes
wait_for_gitaly_health_check(@primary_node)
wait_for_gitaly_health_check(@secondary_node)
......@@ -415,6 +431,27 @@ module QA
Support::Waiter.wait_until(sleep_interval: 1) { replication_queue_incomplete_count == 0 && replicated?(project_id) }
end
def wait_for_replication_to_node(project_id, node)
Support::Waiter.wait_until(sleep_interval: 1) do
result = []
shell sql_to_docker_exec_cmd(%{
select * from replication_queue
where state = 'ready'
and job ->> 'change' = 'update'
and job ->> 'target_node_storage' = '#{node}'
and job ->> 'relative_path' = '#{Digest::SHA256.hexdigest(project_id.to_s)}.git';
}) do |line|
result << line.strip
QA::Runtime::Logger.debug(line.strip)
end
# The result should look like this when all items are replicated
# id | state | created_at | updated_at | attempt | lock_id | job | meta
# ----+-------+------------+------------+---------+---------+-----+------
# (0 rows)
result[2] == '(0 rows)'
end
end
def replication_pending?
result = []
shell sql_to_docker_exec_cmd(
......
......@@ -52,6 +52,53 @@ module QA
expect(project_data_loss).to include('gitaly3, assigned host, unhealthy')
end
end
it 'allows admin resolve scenario where data cannot be recovered', testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/352708' do
# Ensure everything is in sync before begining test
praefect_manager.wait_for_project_synced_across_all_storages(project.id)
Resource::Repository::Commit.fabricate_via_api! do |commit|
commit.project = project
commit.commit_message = 'accept-dataloss-1'
commit.add_files([
{ file_path: "new_file-#{SecureRandom.hex(8)}.txt", content: 'Add a commit to gitaly1,gitaly2,gitaly3' }
])
end
praefect_manager.wait_for_replication_to_node(project.id, praefect_manager.primary_node)
praefect_manager.stop_primary_node
Resource::Repository::Commit.fabricate_via_api! do |commit|
commit.project = project
commit.commit_message = 'accept-dataloss-2'
commit.add_files([
{ file_path: "new_file-#{SecureRandom.hex(8)}.txt", content: 'Add a commit to gitaly2,gitaly3' }
])
end
praefect_manager.wait_for_replication_to_node(project.id, praefect_manager.secondary_node)
praefect_manager.stop_secondary_node
Resource::Repository::Commit.fabricate_via_api! do |commit|
commit.project = project
commit.commit_message = 'accept-dataloss-3'
commit.add_files([
{ file_path: "new_file-#{SecureRandom.hex(8)}.txt", content: 'Add a commit to gitaly3' }
])
end
# Confirms that they want to accept dataloss, using gitaly2 as authoritative storage to use as a base
praefect_manager.accept_dataloss_for_project(project.id, praefect_manager.secondary_node)
# Restart nodes, and allow replication to apply dataloss changes
praefect_manager.start_all_nodes
praefect_manager.wait_for_project_synced_across_all_storages(project.id)
# Validate that gitaly2 was accepted as the authorative storage
aggregate_failures "validate correct set of commits available" do
expect(project.commits.map { |commit| commit[:message].chomp }).to include('accept-dataloss-1')
expect(project.commits.map { |commit| commit[:message].chomp }).to include('accept-dataloss-2')
expect(project.commits.map { |commit| commit[:message].chomp }).not_to include('accept-dataloss-3')
end
end
end
end
end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment