Commit 7d97102f authored by Mark Lapierre's avatar Mark Lapierre Committed by Lin Jen-Shin

Run tests in parallel via parallel_tests

Uses the parallel_tests gem to execute tests in multiple processes
simultaneously on the same machine.

Adds the `--parallel` CLI option that instructs the QA framework
to use the parallel_tests executable.

Tests need access to global state contained in `Runtime::Scenario`
so when `--parallel` is invoked `Runtime::Scenario` is serialized
to an environment variable, which is passed to parallel_tests,
and then deserialized in `spec_helper`.
parent ebcf92c5
--color
--format documentation
--format ParallelTests::RSpec::SummaryLogger --out tmp/spec_summary.log
--format ParallelTests::RSpec::RuntimeLogger --out tmp/parallel_runtime_rspec.log
--require spec_helper
source 'https://rubygems.org'
gem 'gitlab-qa'
gem 'pry-byebug', '~> 3.5.1', platform: :mri
gem 'capybara', '~> 2.16.1'
gem 'capybara-screenshot', '~> 1.0.18'
......@@ -11,3 +12,4 @@ gem 'nokogiri', '~> 1.10.3'
gem 'rspec-retry', '~> 0.6.1'
gem 'faker', '~> 1.6', '>= 1.6.6'
gem 'knapsack', '~> 1.17'
gem 'parallel_tests', '~> 2.29'
......@@ -35,6 +35,7 @@ GEM
faker (1.9.3)
i18n (>= 0.7)
ffi (1.9.25)
gitlab-qa (4.0.0)
http-cookie (1.0.3)
domain_name (~> 0.5)
i18n (0.9.1)
......@@ -53,6 +54,9 @@ GEM
netrc (0.11.0)
nokogiri (1.10.3)
mini_portile2 (~> 2.4.0)
parallel (1.17.0)
parallel_tests (2.29.0)
parallel
pry (0.11.3)
coderay (~> 1.1.0)
method_source (~> 0.9.0)
......@@ -104,8 +108,10 @@ DEPENDENCIES
capybara (~> 2.16.1)
capybara-screenshot (~> 1.0.18)
faker (~> 1.6, >= 1.6.6)
gitlab-qa
knapsack (~> 1.17)
nokogiri (~> 1.10.3)
parallel_tests (~> 2.29)
pry-byebug (~> 3.5.1)
rake (~> 12.3.0)
rspec (~> 3.7)
......
......@@ -360,6 +360,7 @@ module QA
module Specs
autoload :Config, 'qa/specs/config'
autoload :Runner, 'qa/specs/runner'
autoload :ParallelRunner, 'qa/specs/parallel_runner'
module Helpers
autoload :Quarantine, 'qa/specs/helpers/quarantine'
......
......@@ -13,6 +13,8 @@ module QA
NotRespondingError = Class.new(RuntimeError)
CAPYBARA_MAX_WAIT_TIME = 10
def initialize
self.class.configure!
end
......@@ -43,6 +45,8 @@ module QA
end
end
Capybara.server_port = 9887 + ENV['TEST_ENV_NUMBER'].to_i
return if Capybara.drivers.include?(:chrome)
Capybara.register_driver QA::Runtime::Env.browser do |app|
......@@ -119,7 +123,7 @@ module QA
Capybara.configure do |config|
config.default_driver = QA::Runtime::Env.browser
config.javascript_driver = QA::Runtime::Env.browser
config.default_max_wait_time = 10
config.default_max_wait_time = CAPYBARA_MAX_WAIT_TIME
# https://github.com/mattheworiordan/capybara-screenshot/issues/164
config.save_path = ::File.expand_path('../../tmp', __dir__)
end
......
# frozen_string_literal: true
require 'gitlab/qa'
module QA
module Runtime
module Env
......@@ -7,6 +9,8 @@ module QA
attr_writer :personal_access_token, :ldap_username, :ldap_password
ENV_VARIABLES = Gitlab::QA::Runtime::Env::ENV_VARIABLES
# The environment variables used to indicate if the environment under test
# supports the given feature
SUPPORTED_FEATURES = {
......@@ -201,6 +205,10 @@ module QA
enabled?(ENV[SUPPORTED_FEATURES[feature]], default: true)
end
def runtime_scenario_attributes
ENV['QA_RUNTIME_SCENARIO_ATTRIBUTES']
end
private
def remote_grid_credentials
......
# frozen_string_literal: true
require 'json'
module QA
module Runtime
##
......@@ -24,6 +26,10 @@ module QA
end
end
def from_env(var)
JSON.parse(Runtime::Env.runtime_scenario_attributes).each { |k, v| define(k, v) }
end
def method_missing(name, *)
raise ArgumentError, "Scenario attribute `#{name}` not defined!"
end
......
......@@ -7,6 +7,7 @@ module QA
attribute :gitlab_address, '--address URL', 'Address of the instance to test'
attribute :enable_feature, '--enable-feature FEATURE_FLAG', 'Enable a feature before running tests'
attribute :parallel, '--parallel', 'Execute tests in parallel'
end
end
end
# frozen_string_literal: true
require 'open3'
module QA
module Specs
module ParallelRunner
module_function
def run(args)
unless args.include?('--')
index = args.index { |opt| opt.include?('features') }
args.insert(index, '--') if index
end
env = {}
Runtime::Env::ENV_VARIABLES.each_key do |key|
env[key] = ENV[key] if ENV[key]
end
env['QA_RUNTIME_SCENARIO_ATTRIBUTES'] = Runtime::Scenario.attributes.to_json
env['GITLAB_QA_ACCESS_TOKEN'] = Runtime::API::Client.new(:gitlab).personal_access_token unless env['GITLAB_QA_ACCESS_TOKEN']
cmd = "bundle exec parallel_test -t rspec --combine-stderr --serialize-stdout -- #{args.flatten.join(' ')}"
::Open3.popen2e(env, cmd) do |_, out, wait|
out.each { |line| puts line }
exit wait.value.exitstatus
end
end
end
end
end
# frozen_string_literal: true
require 'knapsack'
require 'rspec/core'
require 'rspec/expectations'
require 'knapsack'
module QA
module Specs
......@@ -17,46 +17,58 @@ module QA
@options = []
end
def perform
args = []
args.push('--tty') if tty
def paths_from_knapsack
allocator = Knapsack::AllocatorBuilder.new(Knapsack::Adapters::RSpecAdapter).allocator
QA::Runtime::Logger.info ''
QA::Runtime::Logger.info 'Report specs:'
QA::Runtime::Logger.info allocator.report_node_tests.join(', ')
QA::Runtime::Logger.info ''
QA::Runtime::Logger.info 'Leftover specs:'
QA::Runtime::Logger.info allocator.leftover_node_tests.join(', ')
QA::Runtime::Logger.info ''
['--', allocator.node_tests]
end
def rspec_tags
tags_for_rspec = []
if tags.any?
tags.each { |tag| args.push(['--tag', tag.to_s]) }
tags.each { |tag| tags_for_rspec.push(['--tag', tag.to_s]) }
else
args.push(%w[--tag ~orchestrated]) unless (%w[-t --tag] & options).any?
tags_for_rspec.push(%w[--tag ~orchestrated]) unless (%w[-t --tag] & options).any?
end
args.push(%w[--tag ~skip_signup_disabled]) if QA::Runtime::Env.signup_disabled?
tags_for_rspec.push(%w[--tag ~skip_signup_disabled]) if QA::Runtime::Env.signup_disabled?
QA::Runtime::Env.supported_features.each_key do |key|
args.push(["--tag", "~requires_#{key}"]) unless QA::Runtime::Env.can_test? key
tags_for_rspec.push(%W[--tag ~requires_#{key}]) unless QA::Runtime::Env.can_test? key
end
args.push(options)
tags_for_rspec
end
Runtime::Browser.configure!
def perform
args = []
args.push('--tty') if tty
args.push(rspec_tags)
args.push(options)
if Runtime::Env.knapsack?
allocator = Knapsack::AllocatorBuilder.new(Knapsack::Adapters::RSpecAdapter).allocator
QA::Runtime::Logger.info ''
QA::Runtime::Logger.info 'Report specs:'
QA::Runtime::Logger.info allocator.report_node_tests.join(', ')
QA::Runtime::Logger.info ''
QA::Runtime::Logger.info 'Leftover specs:'
QA::Runtime::Logger.info allocator.leftover_node_tests.join(', ')
QA::Runtime::Logger.info ''
args.push(['--', allocator.node_tests])
args.push(paths_from_knapsack)
else
args.push(DEFAULT_TEST_PATH_ARGS) unless options.any? { |opt| opt =~ %r{/features/} }
end
if Runtime::Scenario.attributes[:parallel]
ParallelRunner.run(args.flatten)
else
RSpec::Core::Runner.run(args.flatten, $stderr, $stdout).tap do |status|
abort if status.nonzero?
end
end
end
end
end
end
......@@ -91,26 +91,26 @@ describe QA::Support::Page::Logging do
it 'logs has_element?' do
expect { subject.has_element?(:element) }
.to output(/has_element\? :element \(wait: 2\) returned: true/).to_stdout_from_any_process
.to output(/has_element\? :element \(wait: #{QA::Runtime::Browser::CAPYBARA_MAX_WAIT_TIME}\) returned: true/).to_stdout_from_any_process
end
it 'logs has_element? with text' do
expect { subject.has_element?(:element, text: "some text") }
.to output(/has_element\? :element with text \"some text\" \(wait: 2\) returned: true/).to_stdout_from_any_process
.to output(/has_element\? :element with text \"some text\" \(wait: #{QA::Runtime::Browser::CAPYBARA_MAX_WAIT_TIME}\) returned: true/).to_stdout_from_any_process
end
it 'logs has_no_element?' do
allow(page).to receive(:has_no_css?).and_return(true)
expect { subject.has_no_element?(:element) }
.to output(/has_no_element\? :element \(wait: 2\) returned: true/).to_stdout_from_any_process
.to output(/has_no_element\? :element \(wait: #{QA::Runtime::Browser::CAPYBARA_MAX_WAIT_TIME}\) returned: true/).to_stdout_from_any_process
end
it 'logs has_no_element? with text' do
allow(page).to receive(:has_no_css?).and_return(true)
expect { subject.has_no_element?(:element, text: "more text") }
.to output(/has_no_element\? :element with text \"more text\" \(wait: 2\) returned: true/).to_stdout_from_any_process
.to output(/has_no_element\? :element with text \"more text\" \(wait: #{QA::Runtime::Browser::CAPYBARA_MAX_WAIT_TIME}\) returned: true/).to_stdout_from_any_process
end
it 'logs has_text?' do
......
......@@ -8,6 +8,10 @@ if ENV['CI'] && QA::Runtime::Env.knapsack? && !ENV['NO_KNAPSACK']
Knapsack::Adapters::RSpecAdapter.bind
end
QA::Runtime::Browser.configure!
QA::Runtime::Scenario.from_env(QA::Runtime::Env.runtime_scenario_attributes) if QA::Runtime::Env.runtime_scenario_attributes
%w[helpers shared_examples].each do |d|
Dir[::File.join(__dir__, d, '**', '*.rb')].each { |f| require f }
end
......
# frozen_string_literal: true
describe QA::Specs::ParallelRunner do
include Helpers::StubENV
before do
allow(QA::Runtime::Scenario).to receive(:attributes).and_return(parallel: true)
stub_env('GITLAB_QA_ACCESS_TOKEN', 'skip_token_creation')
end
it 'passes args to parallel_tests' do
expect_cli_arguments(['--tag', '~orchestrated', *QA::Specs::Runner::DEFAULT_TEST_PATH_ARGS])
subject.run(['--tag', '~orchestrated', *QA::Specs::Runner::DEFAULT_TEST_PATH_ARGS])
end
it 'passes a given test path to parallel_tests and adds a separator' do
expect_cli_arguments(%w[-- qa/specs/features/foo])
subject.run(%w[qa/specs/features/foo])
end
it 'passes tags and test paths to parallel_tests and adds a separator' do
expect_cli_arguments(%w[--tag smoke -- qa/specs/features/foo qa/specs/features/bar])
subject.run(%w[--tag smoke qa/specs/features/foo qa/specs/features/bar])
end
it 'passes tags and test paths with separators to parallel_tests' do
expect_cli_arguments(%w[-- --tag smoke -- qa/specs/features/foo qa/specs/features/bar])
subject.run(%w[-- --tag smoke -- qa/specs/features/foo qa/specs/features/bar])
end
it 'passes supported environment variables' do
# Test only env vars starting with GITLAB because some of the others
# affect how the runner behaves, and we're not concerned with those
# behaviors in this test
gitlab_env_vars = QA::Runtime::Env::ENV_VARIABLES.reject { |v| !v.start_with?('GITLAB') }
gitlab_env_vars.each do |k, v|
stub_env(k, v)
end
gitlab_env_vars['QA_RUNTIME_SCENARIO_ATTRIBUTES'] = '{"parallel":true}'
expect_cli_arguments([], gitlab_env_vars)
subject.run([])
end
def expect_cli_arguments(arguments, env = { 'QA_RUNTIME_SCENARIO_ATTRIBUTES' => '{"parallel":true}' })
cmd = "bundle exec parallel_test -t rspec --combine-stderr --serialize-stdout -- #{arguments.join(' ')}"
expect(Open3).to receive(:popen2e)
.with(hash_including(env), cmd)
.and_return(0)
end
end
......@@ -58,11 +58,11 @@ describe QA::Specs::Runner do
end
end
context 'when "-- qa/specs/features/foo" is set as options' do
subject { described_class.new.tap { |runner| runner.options = %w[-- qa/specs/features/foo] } }
context 'when "--tag smoke" and "qa/specs/features/foo" are set as options' do
subject { described_class.new.tap { |runner| runner.options = %w[--tag smoke qa/specs/features/foo] } }
it 'passes the given tests path and excludes the orchestrated tag' do
expect_rspec_runner_arguments(['--tag', '~orchestrated', '--', 'qa/specs/features/foo'])
it 'focuses on the given tag and includes the path without excluding the orchestrated tag' do
expect_rspec_runner_arguments(['--tag', 'smoke', 'qa/specs/features/foo'])
subject.perform
end
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment