Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
G
gitlab-ce
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
1
Merge Requests
1
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
gitlab-ce
Commits
69381291
Commit
69381291
authored
Feb 17, 2022
by
John McDonnell
Committed by
Ramya Authappan
Feb 17, 2022
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add a praefect dataloss information e2e
parent
cd6fb32e
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
105 additions
and
2 deletions
+105
-2
qa/qa/service/praefect_manager.rb
qa/qa/service/praefect_manager.rb
+48
-2
qa/qa/specs/features/api/3_create/gitaly/praefect_dataloss_spec.rb
...cs/features/api/3_create/gitaly/praefect_dataloss_spec.rb
+57
-0
No files found.
qa/qa/service/praefect_manager.rb
View file @
69381291
# frozen_string_literal: true
require
'digest'
module
QA
module
Service
class
PraefectManager
...
...
@@ -68,7 +70,7 @@ module QA
def
stop_secondary_node
stop_node
(
@secondary_node
)
wait_until_node_is_removed_from_healthy_storages
(
@s
top_s
econdary_node
)
wait_until_node_is_removed_from_healthy_storages
(
@secondary_node
)
end
def
start_secondary_node
...
...
@@ -106,6 +108,8 @@ module QA
end
def
stop_node
(
name
)
return
if
node_state
(
name
)
==
'paused'
shell
"docker pause
#{
name
}
"
end
...
...
@@ -200,7 +204,7 @@ module QA
start_node
(
@primary_node
)
start_node
(
@secondary_node
)
start_node
(
@tertiary_node
)
start_
node
(
@praefect
)
start_
praefect
wait_for_health_check_all_nodes
end
...
...
@@ -281,6 +285,48 @@ module QA
end
end
def
praefect_dataloss_information
(
project_id
)
dataloss_info
=
[]
cmd
=
"docker exec
#{
@praefect
}
praefect -config /var/opt/gitlab/praefect/config.toml dataloss --partially-unavailable=true"
shell
(
cmd
)
{
|
line
|
dataloss_info
<<
line
.
strip
}
# Expected will have a record for each repository in the storage, in the following format
# @hashed/bc/52/bc52dd634277c4a34a2d6210994a9a5e2ab6d33bb4a3a8963410e00ca6c15a02.git:
# Primary: gitaly1
# In-Sync Storages:
# gitaly1, assigned host
# gitaly3, assigned host
# Outdated Storages:
# gitaly2 is behind by 1 change or less, assigned host
#
# Alternatively, if all repositories are in sync, a concise message is returned
# Virtual storage: default
# All repositories are fully available on all assigned storages!
# extract the relevant project under test info if it is identified
start_index
=
dataloss_info
.
index
{
|
line
|
line
.
include?
(
"
#{
Digest
::
SHA256
.
hexdigest
(
project_id
.
to_s
)
}
.git"
)
}
unless
start_index
.
nil?
dataloss_info
=
dataloss_info
[
start_index
,
7
]
end
dataloss_info
&
.
each
{
|
info
|
QA
::
Runtime
::
Logger
.
debug
(
info
)
}
dataloss_info
end
def
praefect_dataloss_info_for_project
(
project_id
)
dataloss_info
=
[]
Support
::
Retrier
.
retry_until
(
max_duration:
60
)
do
dataloss_info
=
praefect_dataloss_information
(
project_id
)
dataloss_info
.
include?
(
"
#{
Digest
::
SHA256
.
hexdigest
(
project_id
.
to_s
)
}
.git"
)
end
end
def
wait_for_project_synced_across_all_storages
(
project_id
)
Support
::
Retrier
.
retry_until
(
max_duration:
60
)
do
praefect_dataloss_information
(
project_id
).
include?
(
'All repositories are fully available on all assigned storages!'
)
end
end
def
wait_for_health_check_all_nodes
wait_for_gitaly_health_check
(
@primary_node
)
wait_for_gitaly_health_check
(
@secondary_node
)
...
...
qa/qa/specs/features/api/3_create/gitaly/praefect_dataloss_spec.rb
0 → 100644
View file @
69381291
# frozen_string_literal: true
module
QA
RSpec
.
describe
'Create'
do
context
'Praefect dataloss commands'
,
:orchestrated
,
:gitaly_cluster
do
let
(
:praefect_manager
)
{
Service
::
PraefectManager
.
new
}
let
(
:project
)
do
Resource
::
Project
.
fabricate!
do
|
project
|
project
.
name
=
'gitaly_cluster-dataloss-project'
project
.
initialize_with_readme
=
true
end
end
before
do
praefect_manager
.
start_all_nodes
end
it
'confirms that changes are synced across all storages'
,
testcase:
'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/352691'
do
expect
{
praefect_manager
.
praefect_dataloss_information
(
project
.
id
)
}
.
to
(
eventually_include
(
'All repositories are fully available on all assigned storages!'
)
.
within
(
max_duration:
60
))
end
it
'identifies how many changes are not in sync across storages'
,
testcase:
'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/352692'
do
# Ensure our test repository is replicated and in a consistent state prior to test
praefect_manager
.
wait_for_project_synced_across_all_storages
(
project
.
id
)
# testing for gitaly2 'out of sync'
praefect_manager
.
stop_secondary_node
number_of_changes
=
3
1
.
upto
(
number_of_changes
)
do
|
i
|
Resource
::
Repository
::
Commit
.
fabricate_via_api!
do
|
commit
|
commit
.
project
=
project
commit
.
branch
=
"newbranch-
#{
SecureRandom
.
hex
(
8
)
}
"
commit
.
start_branch
=
project
.
default_branch
commit
.
commit_message
=
'Add new file'
commit
.
add_files
([
{
file_path:
"new_file-
#{
SecureRandom
.
hex
(
8
)
}
.txt"
,
content:
'new file'
}
])
end
end
# testing for gitaly3 'in sync' but marked unhealthy
praefect_manager
.
stop_tertiary_node
project_data_loss
=
praefect_manager
.
praefect_dataloss_information
(
project
.
id
)
aggregate_failures
"validate dataloss identified"
do
expect
(
project_data_loss
).
to
include
(
'gitaly1, assigned host'
)
expect
(
project_data_loss
).
to
include
(
"gitaly2 is behind by
#{
number_of_changes
}
changes or less, assigned host, unhealthy"
)
expect
(
project_data_loss
).
to
include
(
'gitaly3, assigned host, unhealthy'
)
end
end
end
end
end
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment