Commit 778772c8 authored by GitLab Bot's avatar GitLab Bot

Add latest changes from gitlab-org/gitlab@master

parent a552864a
# frozen_string_literal: true
module Ci
module PipelineProcessing
class LegacyProcessingService
include Gitlab::Utils::StrongMemoize
attr_reader :pipeline
def initialize(pipeline)
@pipeline = pipeline
end
def execute(trigger_build_ids = nil)
success = process_stages_without_needs
# we evaluate dependent needs,
# only when the another job has finished
success = process_builds_with_needs(trigger_build_ids) || success
@pipeline.update_status
success
end
private
def process_stages_without_needs
stage_indexes_of_created_processables_without_needs.flat_map do |index|
process_stage_without_needs(index)
end.any?
end
def process_stage_without_needs(index)
current_status = status_for_prior_stages(index)
return unless HasStatus::COMPLETED_STATUSES.include?(current_status)
created_processables_in_stage_without_needs(index).find_each.select do |build|
process_build(build, current_status)
end.any?
end
def process_builds_with_needs(trigger_build_ids)
return false unless trigger_build_ids.present?
return false unless Feature.enabled?(:ci_dag_support, project, default_enabled: true)
# we find processables that are dependent:
# 1. because of current dependency,
trigger_build_names = pipeline.processables.latest
.for_ids(trigger_build_ids).names
# 2. does not have builds that not yet complete
incomplete_build_names = pipeline.processables.latest
.incomplete.names
# Each found processable is guaranteed here to have completed status
created_processables
.with_needs(trigger_build_names)
.without_needs(incomplete_build_names)
.find_each
.map(&method(:process_build_with_needs))
.any?
end
def process_build_with_needs(build)
current_status = status_for_build_needs(build.needs.map(&:name))
return unless HasStatus::COMPLETED_STATUSES.include?(current_status)
process_build(build, current_status)
end
def process_build(build, current_status)
Gitlab::OptimisticLocking.retry_lock(build) do |subject|
Ci::ProcessBuildService.new(project, subject.user)
.execute(subject, current_status)
end
end
def status_for_prior_stages(index)
pipeline.processables.status_for_prior_stages(index)
end
def status_for_build_needs(needs)
pipeline.processables.status_for_names(needs)
end
# rubocop: disable CodeReuse/ActiveRecord
def stage_indexes_of_created_processables_without_needs
created_processables_without_needs.order(:stage_idx)
.pluck(Arel.sql('DISTINCT stage_idx'))
end
# rubocop: enable CodeReuse/ActiveRecord
def created_processables_in_stage_without_needs(index)
created_processables_without_needs
.with_preloads
.for_stage(index)
end
def created_processables_without_needs
if Feature.enabled?(:ci_dag_support, project, default_enabled: true)
pipeline.processables.created.without_needs
else
pipeline.processables.created
end
end
def created_processables
pipeline.processables.created
end
def project
pipeline.project
end
end
end
end
...@@ -2,8 +2,6 @@ ...@@ -2,8 +2,6 @@
module Ci module Ci
class ProcessPipelineService class ProcessPipelineService
include Gitlab::Utils::StrongMemoize
attr_reader :pipeline attr_reader :pipeline
def initialize(pipeline) def initialize(pipeline)
...@@ -13,105 +11,13 @@ module Ci ...@@ -13,105 +11,13 @@ module Ci
def execute(trigger_build_ids = nil) def execute(trigger_build_ids = nil)
update_retried update_retried
success = process_stages_without_needs Ci::PipelineProcessing::LegacyProcessingService
.new(pipeline)
# we evaluate dependent needs, .execute(trigger_build_ids)
# only when the another job has finished
success = process_builds_with_needs(trigger_build_ids) || success
@pipeline.update_status
success
end end
private private
def process_stages_without_needs
stage_indexes_of_created_processables_without_needs.flat_map do |index|
process_stage_without_needs(index)
end.any?
end
def process_stage_without_needs(index)
current_status = status_for_prior_stages(index)
return unless HasStatus::COMPLETED_STATUSES.include?(current_status)
created_processables_in_stage_without_needs(index).find_each.select do |build|
process_build(build, current_status)
end.any?
end
def process_builds_with_needs(trigger_build_ids)
return false unless trigger_build_ids.present?
return false unless Feature.enabled?(:ci_dag_support, project, default_enabled: true)
# we find processables that are dependent:
# 1. because of current dependency,
trigger_build_names = pipeline.processables.latest
.for_ids(trigger_build_ids).names
# 2. does not have builds that not yet complete
incomplete_build_names = pipeline.processables.latest
.incomplete.names
# Each found processable is guaranteed here to have completed status
created_processables
.with_needs(trigger_build_names)
.without_needs(incomplete_build_names)
.find_each
.map(&method(:process_build_with_needs))
.any?
end
def process_build_with_needs(build)
current_status = status_for_build_needs(build.needs.map(&:name))
return unless HasStatus::COMPLETED_STATUSES.include?(current_status)
process_build(build, current_status)
end
def process_build(build, current_status)
Gitlab::OptimisticLocking.retry_lock(build) do |subject|
Ci::ProcessBuildService.new(project, build.user)
.execute(subject, current_status)
end
end
def status_for_prior_stages(index)
pipeline.processables.status_for_prior_stages(index)
end
def status_for_build_needs(needs)
pipeline.processables.status_for_names(needs)
end
# rubocop: disable CodeReuse/ActiveRecord
def stage_indexes_of_created_processables_without_needs
created_processables_without_needs.order(:stage_idx)
.pluck(Arel.sql('DISTINCT stage_idx'))
end
# rubocop: enable CodeReuse/ActiveRecord
def created_processables_in_stage_without_needs(index)
created_processables_without_needs
.with_preloads
.for_stage(index)
end
def created_processables_without_needs
if Feature.enabled?(:ci_dag_support, project, default_enabled: true)
pipeline.processables.created.without_needs
else
pipeline.processables.created
end
end
def created_processables
pipeline.processables.created
end
# This method is for compatibility and data consistency and should be removed with 9.3 version of GitLab # This method is for compatibility and data consistency and should be removed with 9.3 version of GitLab
# This replicates what is db/post_migrate/20170416103934_upate_retried_for_ci_build.rb # This replicates what is db/post_migrate/20170416103934_upate_retried_for_ci_build.rb
# and ensures that functionality will not be broken before migration is run # and ensures that functionality will not be broken before migration is run
...@@ -131,9 +37,5 @@ module Ci ...@@ -131,9 +37,5 @@ module Ci
.update_all(retried: true) if latest_statuses.any? .update_all(retried: true) if latest_statuses.any?
end end
# rubocop: enable CodeReuse/ActiveRecord # rubocop: enable CodeReuse/ActiveRecord
def project
pipeline.project
end
end end
end end
...@@ -207,6 +207,21 @@ To use a custom Gitaly repository in CI, for instance if you want your ...@@ -207,6 +207,21 @@ To use a custom Gitaly repository in CI, for instance if you want your
GitLab fork to always use your own Gitaly fork, set `GITALY_REPO_URL` GitLab fork to always use your own Gitaly fork, set `GITALY_REPO_URL`
as a [CI environment variable](../ci/variables/README.md#gitlab-cicd-environment-variables). as a [CI environment variable](../ci/variables/README.md#gitlab-cicd-environment-variables).
### Use a locally modified version of Gitaly RPC client
If you are making changes to the RPC client, such as adding a new endpoint or adding a new
parameter to an existing endpoint, follow the guide for
[Gitaly proto](https://gitlab.com/gitlab-org/gitaly/blob/master/proto/README.md). After pushing
the branch with the changes (`new-feature-branch`, for example):
1. Change the `gitaly` line in the Rails' `Gemfile` to:
```ruby
gem 'gitaly', git: 'https://gitlab.com/gitlab-org/gitaly.git', branch: 'new-feature-branch'
```
1. Run `bundle install` to use the modified RPC client.
--- ---
[Return to Development documentation](README.md) [Return to Development documentation](README.md)
......
...@@ -22,8 +22,8 @@ ...@@ -22,8 +22,8 @@
"prettier-staged-save": "node ./scripts/frontend/prettier.js save", "prettier-staged-save": "node ./scripts/frontend/prettier.js save",
"prettier-all": "node ./scripts/frontend/prettier.js check-all", "prettier-all": "node ./scripts/frontend/prettier.js check-all",
"prettier-all-save": "node ./scripts/frontend/prettier.js save-all", "prettier-all-save": "node ./scripts/frontend/prettier.js save-all",
"stylelint": "node node_modules/stylelint/bin/stylelint.js app/assets/stylesheets/**/*.* ee/app/assets/stylesheets/**/*.* !**/vendors/**", "stylelint": "yarn stylelint-file app/assets/stylesheets/**/*.* ee/app/assets/stylesheets/**/*.* !**/vendors/**",
"stylelint-file": "node node_modules/stylelint/bin/stylelint.js", "stylelint-file": "BROWSERSLIST_IGNORE_OLD_DATA=true node node_modules/stylelint/bin/stylelint.js",
"stylelint-create-utility-map": "node scripts/frontend/stylelint/stylelint-utility-map.js", "stylelint-create-utility-map": "node scripts/frontend/stylelint/stylelint-utility-map.js",
"test": "node scripts/frontend/test", "test": "node scripts/frontend/test",
"webpack": "NODE_OPTIONS=\"--max-old-space-size=3584\" webpack --config config/webpack.config.js", "webpack": "NODE_OPTIONS=\"--max-old-space-size=3584\" webpack --config config/webpack.config.js",
...@@ -39,8 +39,8 @@ ...@@ -39,8 +39,8 @@
"@babel/plugin-syntax-dynamic-import": "^7.2.0", "@babel/plugin-syntax-dynamic-import": "^7.2.0",
"@babel/plugin-syntax-import-meta": "^7.2.0", "@babel/plugin-syntax-import-meta": "^7.2.0",
"@babel/preset-env": "^7.6.2", "@babel/preset-env": "^7.6.2",
"@gitlab/svgs": "^1.88.0", "@gitlab/svgs": "^1.89.0",
"@gitlab/ui": "8.10.0", "@gitlab/ui": "8.15.0",
"@gitlab/visual-review-tools": "1.5.1", "@gitlab/visual-review-tools": "1.5.1",
"@sentry/browser": "^5.10.2", "@sentry/browser": "^5.10.2",
"@sourcegraph/code-host-integration": "^0.0.18", "@sourcegraph/code-host-integration": "^0.0.18",
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
module QA module QA
context 'Non-devops' do context 'Non-devops' do
describe 'Performance bar display', :requires_admin, quarantine: 'https://gitlab.com/gitlab-org/gitlab/issues/196141' do describe 'Performance bar display', :requires_admin do
context 'when logged in as an admin user' do context 'when logged in as an admin user' do
# 4 metrics: pg, gitaly, redis, total # 4 metrics: pg, gitaly, redis, total
let(:metrics_count) { 4 } let(:metrics_count) { 4 }
......
...@@ -2002,7 +2002,7 @@ ...@@ -2002,7 +2002,7 @@
}, },
{ {
"id": 31, "id": 31,
"title": "Libero nam magnam incidunt eaque placeat error et.", "title": "issue_with_timelogs",
"author_id": 16, "author_id": 16,
"project_id": 5, "project_id": 5,
"created_at": "2016-06-14T15:02:07.280Z", "created_at": "2016-06-14T15:02:07.280Z",
...@@ -2016,6 +2016,16 @@ ...@@ -2016,6 +2016,16 @@
"confidential": false, "confidential": false,
"due_date": null, "due_date": null,
"moved_to_id": null, "moved_to_id": null,
"timelogs": [
{
"id": 1,
"time_spent": 72000,
"user_id": 1,
"created_at": "2019-12-27T09:15:22.302Z",
"updated_at": "2019-12-27T09:15:22.302Z",
"spent_at": "2019-12-27T00:00:00.000Z"
}
],
"notes": [ "notes": [
{ {
"id": 423, "id": 423,
...@@ -2347,7 +2357,30 @@ ...@@ -2347,7 +2357,30 @@
] ]
} }
], ],
"releases": [], "releases": [
{
"id": 1,
"tag": "release-1.1",
"description": "Some release notes",
"project_id": 5,
"created_at": "2019-12-26T10:17:14.621Z",
"updated_at": "2019-12-26T10:17:14.621Z",
"author_id": 1,
"name": "release-1.1",
"sha": "901de3a8bd5573f4a049b1457d28bc1592ba6bf9",
"released_at": "2019-12-26T10:17:14.615Z",
"links": [
{
"id": 1,
"release_id" : 1,
"url": "http://localhost/namespace6/project6/-/jobs/140463678/artifacts/download",
"name": "release-1.1.dmg",
"created_at": "2019-12-26T10:17:14.621Z",
"updated_at": "2019-12-26T10:17:14.621Z"
}
]
}
],
"project_members": [ "project_members": [
{ {
"id": 36, "id": 36,
...@@ -6816,6 +6849,40 @@ ...@@ -6816,6 +6849,40 @@
"duration": null, "duration": null,
"stages": [ "stages": [
] ]
},
{
"id": 42,
"project_id": 5,
"ref": "master",
"sha": "ce84140e8b878ce6e7c4d298c7202ff38170e3ac",
"before_sha": null,
"push_data": null,
"created_at": "2016-03-22T15:20:35.763Z",
"updated_at": "2016-03-22T15:20:35.763Z",
"tag": false,
"yaml_errors": null,
"committed_at": null,
"status": "failed",
"started_at": null,
"finished_at": null,
"duration": null,
"stages": [
],
"source": "external_pull_request_event",
"external_pull_request":
{
"id": 3,
"pull_request_iid": 4,
"source_branch": "feature",
"target_branch": "master",
"source_repository": "the-repository",
"target_repository": "the-repository",
"source_sha": "ce84140e8b878ce6e7c4d298c7202ff38170e3ac",
"target_sha": "a09386439ca39abe575675ffd4b89ae824fec22f",
"status": "open",
"created_at": "2016-03-22T15:20:35.763Z",
"updated_at": "2016-03-22T15:20:35.763Z"
}
} }
], ],
"triggers": [ "triggers": [
...@@ -6835,6 +6902,21 @@ ...@@ -6835,6 +6902,21 @@
"updated_at": "2017-01-16T15:25:29.637Z" "updated_at": "2017-01-16T15:25:29.637Z"
} }
], ],
"pipeline_schedules": [
{
"id": 1,
"description": "Schedule Description",
"ref": "master",
"cron": "0 4 * * 0",
"cron_timezone": "UTC",
"next_run_at": "2019-12-29T04:19:00.000Z",
"project_id": 5,
"owner_id": 1,
"active": true,
"created_at": "2019-12-26T10:14:57.778Z",
"updated_at": "2019-12-26T10:14:57.778Z"
}
],
"container_expiration_policy": { "container_expiration_policy": {
"created_at": "2019-12-13 13:45:04 UTC", "created_at": "2019-12-13 13:45:04 UTC",
"updated_at": "2019-12-13 13:45:04 UTC", "updated_at": "2019-12-13 13:45:04 UTC",
...@@ -7354,6 +7436,33 @@ ...@@ -7354,6 +7436,33 @@
"ci_cd_settings": { "ci_cd_settings": {
"group_runners_enabled": false "group_runners_enabled": false
}, },
"auto_devops": {
"id": 1,
"created_at": "2017-10-19T15:36:23.466Z",
"updated_at": "2017-10-19T15:36:23.466Z",
"enabled": null,
"deploy_strategy": "continuous"
},
"error_tracking_setting": {
"api_url": "https://gitlab.example.com/api/0/projects/sentry-org/sentry-project",
"project_name": "Sentry Project",
"organization_name": "Sentry Org"
},
"external_pull_requests": [
{
"id": 3,
"pull_request_iid": 4,
"source_branch": "feature",
"target_branch": "master",
"source_repository": "the-repository",
"target_repository": "the-repository",
"source_sha": "ce84140e8b878ce6e7c4d298c7202ff38170e3ac",
"target_sha": "a09386439ca39abe575675ffd4b89ae824fec22f",
"status": "open",
"created_at": "2019-12-24T14:04:50.053Z",
"updated_at": "2019-12-24T14:05:18.138Z"
}
],
"boards": [ "boards": [
{ {
"id": 29, "id": 29,
......
...@@ -116,6 +116,15 @@ describe Gitlab::ImportExport::ProjectTreeRestorer do ...@@ -116,6 +116,15 @@ describe Gitlab::ImportExport::ProjectTreeRestorer do
expect(Issue.find_by(title: 'Issue without assignees').assignees).to be_empty expect(Issue.find_by(title: 'Issue without assignees').assignees).to be_empty
end end
it 'restores timelogs for issues' do
timelog = Issue.find_by(title: 'issue_with_timelogs').timelogs.last
aggregate_failures do
expect(timelog.time_spent).to eq(72000)
expect(timelog.spent_at).to eq("2019-12-27T00:00:00.000Z")
end
end
it 'contains the merge access levels on a protected branch' do it 'contains the merge access levels on a protected branch' do
expect(ProtectedBranch.first.merge_access_levels).not_to be_empty expect(ProtectedBranch.first.merge_access_levels).not_to be_empty
end end
...@@ -229,6 +238,11 @@ describe Gitlab::ImportExport::ProjectTreeRestorer do ...@@ -229,6 +238,11 @@ describe Gitlab::ImportExport::ProjectTreeRestorer do
expect(@project.ci_cd_settings.group_runners_enabled?).to eq(false) expect(@project.ci_cd_settings.group_runners_enabled?).to eq(false)
end end
it 'restores `auto_devops`' do
expect(@project.auto_devops_enabled?).to eq(true)
expect(@project.auto_devops.deploy_strategy).to eq('continuous')
end
it 'restores the correct service' do it 'restores the correct service' do
expect(CustomIssueTrackerService.first).not_to be_nil expect(CustomIssueTrackerService.first).not_to be_nil
end end
...@@ -268,6 +282,55 @@ describe Gitlab::ImportExport::ProjectTreeRestorer do ...@@ -268,6 +282,55 @@ describe Gitlab::ImportExport::ProjectTreeRestorer do
end end
end end
it 'restores error_tracking_setting' do
setting = @project.error_tracking_setting
aggregate_failures do
expect(setting.api_url).to eq("https://gitlab.example.com/api/0/projects/sentry-org/sentry-project")
expect(setting.project_name).to eq("Sentry Project")
expect(setting.organization_name).to eq("Sentry Org")
end
end
it 'restores external pull requests' do
external_pr = @project.external_pull_requests.last
aggregate_failures do
expect(external_pr.pull_request_iid).to eq(4)
expect(external_pr.source_branch).to eq("feature")
expect(external_pr.target_branch).to eq("master")
expect(external_pr.status).to eq("open")
end
end
it 'restores pipeline schedules' do
pipeline_schedule = @project.pipeline_schedules.last
aggregate_failures do
expect(pipeline_schedule.description).to eq('Schedule Description')
expect(pipeline_schedule.ref).to eq('master')
expect(pipeline_schedule.cron).to eq('0 4 * * 0')
expect(pipeline_schedule.cron_timezone).to eq('UTC')
expect(pipeline_schedule.active).to eq(true)
end
end
it 'restores releases with links' do
release = @project.releases.last
link = release.links.last
aggregate_failures do
expect(release.tag).to eq('release-1.1')
expect(release.description).to eq('Some release notes')
expect(release.name).to eq('release-1.1')
expect(release.sha).to eq('901de3a8bd5573f4a049b1457d28bc1592ba6bf9')
expect(release.released_at).to eq('2019-12-26T10:17:14.615Z')
expect(link.url).to eq('http://localhost/namespace6/project6/-/jobs/140463678/artifacts/download')
expect(link.name).to eq('release-1.1.dmg')
end
end
context 'Merge requests' do context 'Merge requests' do
it 'always has the new project as a target' do it 'always has the new project as a target' do
expect(MergeRequest.find_by_title('MR1').target_project).to eq(@project) expect(MergeRequest.find_by_title('MR1').target_project).to eq(@project)
...@@ -321,9 +384,9 @@ describe Gitlab::ImportExport::ProjectTreeRestorer do ...@@ -321,9 +384,9 @@ describe Gitlab::ImportExport::ProjectTreeRestorer do
end end
it 'has the correct number of pipelines and statuses' do it 'has the correct number of pipelines and statuses' do
expect(@project.ci_pipelines.size).to eq(6) expect(@project.ci_pipelines.size).to eq(7)
@project.ci_pipelines.order(:id).zip([2, 2, 2, 2, 2, 0]) @project.ci_pipelines.order(:id).zip([2, 2, 2, 2, 2, 0, 0])
.each do |(pipeline, expected_status_size)| .each do |(pipeline, expected_status_size)|
expect(pipeline.statuses.size).to eq(expected_status_size) expect(pipeline.statuses.size).to eq(expected_status_size)
end end
...@@ -332,7 +395,7 @@ describe Gitlab::ImportExport::ProjectTreeRestorer do ...@@ -332,7 +395,7 @@ describe Gitlab::ImportExport::ProjectTreeRestorer do
context 'when restoring hierarchy of pipeline, stages and jobs' do context 'when restoring hierarchy of pipeline, stages and jobs' do
it 'restores pipelines' do it 'restores pipelines' do
expect(Ci::Pipeline.all.count).to be 6 expect(Ci::Pipeline.all.count).to be 7
end end
it 'restores pipeline stages' do it 'restores pipeline stages' do
...@@ -358,6 +421,12 @@ describe Gitlab::ImportExport::ProjectTreeRestorer do ...@@ -358,6 +421,12 @@ describe Gitlab::ImportExport::ProjectTreeRestorer do
it 'restores a Hash for CommitStatus options' do it 'restores a Hash for CommitStatus options' do
expect(CommitStatus.all.map(&:options).compact).to all(be_a(Hash)) expect(CommitStatus.all.map(&:options).compact).to all(be_a(Hash))
end end
it 'restores external pull request for the restored pipeline' do
pipeline_with_external_pr = @project.ci_pipelines.order(:id).last
expect(pipeline_with_external_pr.external_pull_request).to be_persisted
end
end end
end end
end end
......
# frozen_string_literal: true
require 'spec_helper'
require_relative 'shared_processing_service.rb'
describe Ci::PipelineProcessing::LegacyProcessingService do
it_behaves_like 'Pipeline Processing Service'
end
# frozen_string_literal: true
shared_examples 'Pipeline Processing Service' do
let(:user) { create(:user) }
let(:project) { create(:project) }
let(:pipeline) do
create(:ci_empty_pipeline, ref: 'master', project: project)
end
before do
stub_ci_pipeline_to_return_yaml_file
stub_not_protect_default_branch
project.add_developer(user)
end
context 'when simple pipeline is defined' do
before do
create_build('linux', stage_idx: 0)
create_build('mac', stage_idx: 0)
create_build('rspec', stage_idx: 1)
create_build('rubocop', stage_idx: 1)
create_build('deploy', stage_idx: 2)
end
it 'processes a pipeline', :sidekiq_inline do
expect(process_pipeline).to be_truthy
succeed_pending
expect(builds.success.count).to eq(2)
succeed_pending
expect(builds.success.count).to eq(4)
succeed_pending
expect(builds.success.count).to eq(5)
end
it 'does not process pipeline if existing stage is running' do
expect(process_pipeline).to be_truthy
expect(builds.pending.count).to eq(2)
expect(process_pipeline).to be_falsey
expect(builds.pending.count).to eq(2)
end
end
context 'custom stage with first job allowed to fail' do
before do
create_build('clean_job', stage_idx: 0, allow_failure: true)
create_build('test_job', stage_idx: 1, allow_failure: true)
end
it 'automatically triggers a next stage when build finishes', :sidekiq_inline do
expect(process_pipeline).to be_truthy
expect(builds_statuses).to eq ['pending']
fail_running_or_pending
expect(builds_statuses).to eq %w(failed pending)
fail_running_or_pending
expect(pipeline.reload).to be_success
end
end
context 'when optional manual actions are defined', :sidekiq_inline do
before do
create_build('build', stage_idx: 0)
create_build('test', stage_idx: 1)
create_build('test_failure', stage_idx: 2, when: 'on_failure')
create_build('deploy', stage_idx: 3)
create_build('production', stage_idx: 3, when: 'manual', allow_failure: true)
create_build('cleanup', stage_idx: 4, when: 'always')
create_build('clear:cache', stage_idx: 4, when: 'manual', allow_failure: true)
end
context 'when builds are successful' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build']
expect(builds_statuses).to eq ['pending']
succeed_running_or_pending
expect(builds_names).to eq %w(build test)
expect(builds_statuses).to eq %w(success pending)
succeed_running_or_pending
expect(builds_names).to eq %w(build test deploy production)
expect(builds_statuses).to eq %w(success success pending manual)
succeed_running_or_pending
expect(builds_names).to eq %w(build test deploy production cleanup clear:cache)
expect(builds_statuses).to eq %w(success success success manual pending manual)
succeed_running_or_pending
expect(builds_statuses).to eq %w(success success success manual success manual)
expect(pipeline.reload.status).to eq 'success'
end
end
context 'when test job fails' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build']
expect(builds_statuses).to eq ['pending']
succeed_running_or_pending
expect(builds_names).to eq %w(build test)
expect(builds_statuses).to eq %w(success pending)
fail_running_or_pending
expect(builds_names).to eq %w(build test test_failure)
expect(builds_statuses).to eq %w(success failed pending)
succeed_running_or_pending
expect(builds_names).to eq %w(build test test_failure cleanup)
expect(builds_statuses).to eq %w(success failed success pending)
succeed_running_or_pending
expect(builds_statuses).to eq %w(success failed success success)
expect(pipeline.reload.status).to eq 'failed'
end
end
context 'when test and test_failure jobs fail' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build']
expect(builds_statuses).to eq ['pending']
succeed_running_or_pending
expect(builds_names).to eq %w(build test)
expect(builds_statuses).to eq %w(success pending)
fail_running_or_pending
expect(builds_names).to eq %w(build test test_failure)
expect(builds_statuses).to eq %w(success failed pending)
fail_running_or_pending
expect(builds_names).to eq %w(build test test_failure cleanup)
expect(builds_statuses).to eq %w(success failed failed pending)
succeed_running_or_pending
expect(builds_names).to eq %w(build test test_failure cleanup)
expect(builds_statuses).to eq %w(success failed failed success)
expect(pipeline.reload.status).to eq('failed')
end
end
context 'when deploy job fails' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build']
expect(builds_statuses).to eq ['pending']
succeed_running_or_pending
expect(builds_names).to eq %w(build test)
expect(builds_statuses).to eq %w(success pending)
succeed_running_or_pending
expect(builds_names).to eq %w(build test deploy production)
expect(builds_statuses).to eq %w(success success pending manual)
fail_running_or_pending
expect(builds_names).to eq %w(build test deploy production cleanup)
expect(builds_statuses).to eq %w(success success failed manual pending)
succeed_running_or_pending
expect(builds_statuses).to eq %w(success success failed manual success)
expect(pipeline.reload).to be_failed
end
end
context 'when build is canceled in the second stage' do
it 'does not schedule builds after build has been canceled' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build']
expect(builds_statuses).to eq ['pending']
succeed_running_or_pending
expect(builds.running_or_pending).not_to be_empty
expect(builds_names).to eq %w(build test)
expect(builds_statuses).to eq %w(success pending)
cancel_running_or_pending
expect(builds.running_or_pending).to be_empty
expect(builds_names).to eq %w[build test]
expect(builds_statuses).to eq %w[success canceled]
expect(pipeline.reload).to be_canceled
end
end
context 'when listing optional manual actions' do
it 'returns only for skipped builds' do
# currently all builds are created
expect(process_pipeline).to be_truthy
expect(manual_actions).to be_empty
# succeed stage build
succeed_running_or_pending
expect(manual_actions).to be_empty
# succeed stage test
succeed_running_or_pending
expect(manual_actions).to be_one # production
# succeed stage deploy
succeed_running_or_pending
expect(manual_actions).to be_many # production and clear cache
end
end
end
context 'when delayed jobs are defined', :sidekiq_inline do
context 'when the scene is timed incremental rollout' do
before do
create_build('build', stage_idx: 0)
create_build('rollout10%', **delayed_options, stage_idx: 1)
create_build('rollout100%', **delayed_options, stage_idx: 2)
create_build('cleanup', stage_idx: 3)
allow(Ci::BuildScheduleWorker).to receive(:perform_at)
end
context 'when builds are successful' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
Timecop.travel 2.minutes.from_now do
enqueue_scheduled('rollout10%')
end
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'scheduled' })
Timecop.travel 2.minutes.from_now do
enqueue_scheduled('rollout100%')
end
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'success', 'cleanup': 'pending' })
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'success', 'cleanup': 'success' })
expect(pipeline.reload.status).to eq 'success'
end
end
context 'when build job fails' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
fail_running_or_pending
expect(builds_names_and_statuses).to eq({ 'build': 'failed' })
expect(pipeline.reload.status).to eq 'failed'
end
end
context 'when rollout 10% is unscheduled' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
unschedule
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'manual' })
expect(pipeline.reload.status).to eq 'manual'
end
context 'when user plays rollout 10%' do
it 'schedules rollout100%' do
process_pipeline
succeed_pending
unschedule
play_manual_action('rollout10%')
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'scheduled' })
expect(pipeline.reload.status).to eq 'scheduled'
end
end
end
context 'when rollout 10% fails' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
Timecop.travel 2.minutes.from_now do
enqueue_scheduled('rollout10%')
end
fail_running_or_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'failed' })
expect(pipeline.reload.status).to eq 'failed'
end
context 'when user retries rollout 10%' do
it 'does not schedule rollout10% again' do
process_pipeline
succeed_pending
enqueue_scheduled('rollout10%')
fail_running_or_pending
retry_build('rollout10%')
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'pending' })
expect(pipeline.reload.status).to eq 'running'
end
end
end
context 'when rollout 10% is played immidiately' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
play_manual_action('rollout10%')
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'pending' })
expect(pipeline.reload.status).to eq 'running'
end
end
end
context 'when only one scheduled job exists in a pipeline' do
before do
create_build('delayed', **delayed_options, stage_idx: 0)
allow(Ci::BuildScheduleWorker).to receive(:perform_at)
end
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'delayed': 'scheduled' })
expect(pipeline.reload.status).to eq 'scheduled'
end
end
context 'when there are two delayed jobs in a stage' do
before do
create_build('delayed1', **delayed_options, stage_idx: 0)
create_build('delayed2', **delayed_options, stage_idx: 0)
create_build('job', stage_idx: 1)
allow(Ci::BuildScheduleWorker).to receive(:perform_at)
end
it 'blocks the stage until all scheduled jobs finished' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'delayed1': 'scheduled', 'delayed2': 'scheduled' })
Timecop.travel 2.minutes.from_now do
enqueue_scheduled('delayed1')
end
expect(builds_names_and_statuses).to eq({ 'delayed1': 'pending', 'delayed2': 'scheduled' })
expect(pipeline.reload.status).to eq 'running'
end
end
context 'when a delayed job is allowed to fail' do
before do
create_build('delayed', **delayed_options, allow_failure: true, stage_idx: 0)
create_build('job', stage_idx: 1)
allow(Ci::BuildScheduleWorker).to receive(:perform_at)
end
it 'blocks the stage and continues after it failed' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'delayed': 'scheduled' })
Timecop.travel 2.minutes.from_now do
enqueue_scheduled('delayed')
end
fail_running_or_pending
expect(builds_names_and_statuses).to eq({ 'delayed': 'failed', 'job': 'pending' })
expect(pipeline.reload.status).to eq 'pending'
end
end
end
context 'when an exception is raised during a persistent ref creation' do
before do
successful_build('test', stage_idx: 0)
allow_next_instance_of(Ci::PersistentRef) do |instance|
allow(instance).to receive(:delete_refs) { raise ArgumentError }
end
end
it 'process the pipeline' do
expect { process_pipeline }.not_to raise_error
end
end
context 'when there are manual action in earlier stages' do
context 'when first stage has only optional manual actions' do
before do
create_build('build', stage_idx: 0, when: 'manual', allow_failure: true)
create_build('check', stage_idx: 1)
create_build('test', stage_idx: 2)
process_pipeline
end
it 'starts from the second stage' do
expect(all_builds_statuses).to eq %w[manual pending created]
end
end
context 'when second stage has only optional manual actions' do
before do
create_build('check', stage_idx: 0)
create_build('build', stage_idx: 1, when: 'manual', allow_failure: true)
create_build('test', stage_idx: 2)
process_pipeline
end
it 'skips second stage and continues on third stage', :sidekiq_inline do
expect(all_builds_statuses).to eq(%w[pending created created])
builds.first.success
expect(all_builds_statuses).to eq(%w[success manual pending])
end
end
end
context 'when there are only manual actions in stages' do
before do
create_build('image', stage_idx: 0, when: 'manual', allow_failure: true)
create_build('build', stage_idx: 1, when: 'manual', allow_failure: true)
create_build('deploy', stage_idx: 2, when: 'manual')
create_build('check', stage_idx: 3)
process_pipeline
end
it 'processes all jobs until blocking actions encountered' do
expect(all_builds_statuses).to eq(%w[manual manual manual created])
expect(all_builds_names).to eq(%w[image build deploy check])
expect(pipeline.reload).to be_blocked
end
end
context 'when there is only one manual action' do
before do
create_build('deploy', stage_idx: 0, when: 'manual', allow_failure: true)
process_pipeline
end
it 'skips the pipeline' do
expect(pipeline.reload).to be_skipped
end
context 'when the action was played' do
before do
play_manual_action('deploy')
end
it 'queues the action and pipeline', :sidekiq_inline do
expect(all_builds_statuses).to eq(%w[pending])
expect(pipeline.reload).to be_pending
end
end
end
context 'when blocking manual actions are defined', :sidekiq_inline do
before do
create_build('code:test', stage_idx: 0)
create_build('staging:deploy', stage_idx: 1, when: 'manual')
create_build('staging:test', stage_idx: 2, when: 'on_success')
create_build('production:deploy', stage_idx: 3, when: 'manual')
create_build('production:test', stage_idx: 4, when: 'always')
end
context 'when first stage succeeds' do
it 'blocks pipeline on stage with first manual action' do
process_pipeline
expect(builds_names).to eq %w[code:test]
expect(builds_statuses).to eq %w[pending]
expect(pipeline.reload.status).to eq 'pending'
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy]
expect(builds_statuses).to eq %w[success manual]
expect(pipeline.reload).to be_manual
end
end
context 'when first stage fails' do
it 'does not take blocking action into account' do
process_pipeline
expect(builds_names).to eq %w[code:test]
expect(builds_statuses).to eq %w[pending]
expect(pipeline.reload.status).to eq 'pending'
fail_running_or_pending
expect(builds_names).to eq %w[code:test production:test]
expect(builds_statuses).to eq %w[failed pending]
succeed_running_or_pending
expect(builds_statuses).to eq %w[failed success]
expect(pipeline.reload).to be_failed
end
end
context 'when pipeline is promoted sequentially up to the end' do
before do
# Users need ability to merge into a branch in order to trigger
# protected manual actions.
#
create(:protected_branch, :developers_can_merge,
name: 'master', project: project)
end
it 'properly processes entire pipeline' do
process_pipeline
expect(builds_names).to eq %w[code:test]
expect(builds_statuses).to eq %w[pending]
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy]
expect(builds_statuses).to eq %w[success manual]
expect(pipeline.reload).to be_manual
play_manual_action('staging:deploy')
expect(builds_statuses).to eq %w[success pending]
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy staging:test]
expect(builds_statuses).to eq %w[success success pending]
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy staging:test
production:deploy]
expect(builds_statuses).to eq %w[success success success manual]
expect(pipeline.reload).to be_manual
expect(pipeline.reload).to be_blocked
expect(pipeline.reload).not_to be_active
expect(pipeline.reload).not_to be_complete
play_manual_action('production:deploy')
expect(builds_statuses).to eq %w[success success success pending]
expect(pipeline.reload).to be_running
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy staging:test
production:deploy production:test]
expect(builds_statuses).to eq %w[success success success success pending]
expect(pipeline.reload).to be_running
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy staging:test
production:deploy production:test]
expect(builds_statuses).to eq %w[success success success success success]
expect(pipeline.reload).to be_success
end
end
end
context 'when second stage has only on_failure jobs', :sidekiq_inline do
before do
create_build('check', stage_idx: 0)
create_build('build', stage_idx: 1, when: 'on_failure')
create_build('test', stage_idx: 2)
process_pipeline
end
it 'skips second stage and continues on third stage' do
expect(all_builds_statuses).to eq(%w[pending created created])
builds.first.success
expect(all_builds_statuses).to eq(%w[success skipped pending])
end
end
context 'when failed build in the middle stage is retried', :sidekiq_inline do
context 'when failed build is the only unsuccessful build in the stage' do
before do
create_build('build:1', stage_idx: 0)
create_build('build:2', stage_idx: 0)
create_build('test:1', stage_idx: 1)
create_build('test:2', stage_idx: 1)
create_build('deploy:1', stage_idx: 2)
create_build('deploy:2', stage_idx: 2)
end
it 'does trigger builds in the next stage' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build:1', 'build:2']
succeed_running_or_pending
expect(builds_names).to eq ['build:1', 'build:2', 'test:1', 'test:2']
pipeline.builds.find_by(name: 'test:1').success!
pipeline.builds.find_by(name: 'test:2').drop!
expect(builds_names).to eq ['build:1', 'build:2', 'test:1', 'test:2']
Ci::Build.retry(pipeline.builds.find_by(name: 'test:2'), user).reset.success!
expect(builds_names).to eq ['build:1', 'build:2', 'test:1', 'test:2',
'test:2', 'deploy:1', 'deploy:2']
end
end
end
context 'when builds with auto-retries are configured', :sidekiq_inline do
before do
create_build('build:1', stage_idx: 0, user: user, options: { script: 'aa', retry: 2 })
create_build('test:1', stage_idx: 1, user: user, when: :on_failure)
create_build('test:2', stage_idx: 1, user: user, options: { script: 'aa', retry: 1 })
end
it 'automatically retries builds in a valid order' do
expect(process_pipeline).to be_truthy
fail_running_or_pending
expect(builds_names).to eq %w[build:1 build:1]
expect(builds_statuses).to eq %w[failed pending]
succeed_running_or_pending
expect(builds_names).to eq %w[build:1 build:1 test:2]
expect(builds_statuses).to eq %w[failed success pending]
succeed_running_or_pending
expect(builds_names).to eq %w[build:1 build:1 test:2]
expect(builds_statuses).to eq %w[failed success success]
expect(pipeline.reload).to be_success
end
end
context 'when pipeline with needs is created', :sidekiq_inline do
let!(:linux_build) { create_build('linux:build', stage: 'build', stage_idx: 0) }
let!(:mac_build) { create_build('mac:build', stage: 'build', stage_idx: 0) }
let!(:linux_rspec) { create_build('linux:rspec', stage: 'test', stage_idx: 1) }
let!(:linux_rubocop) { create_build('linux:rubocop', stage: 'test', stage_idx: 1) }
let!(:mac_rspec) { create_build('mac:rspec', stage: 'test', stage_idx: 1) }
let!(:mac_rubocop) { create_build('mac:rubocop', stage: 'test', stage_idx: 1) }
let!(:deploy) { create_build('deploy', stage: 'deploy', stage_idx: 2) }
let!(:linux_rspec_on_build) { create(:ci_build_need, build: linux_rspec, name: 'linux:build') }
let!(:linux_rubocop_on_build) { create(:ci_build_need, build: linux_rubocop, name: 'linux:build') }
let!(:mac_rspec_on_build) { create(:ci_build_need, build: mac_rspec, name: 'mac:build') }
let!(:mac_rubocop_on_build) { create(:ci_build_need, build: mac_rubocop, name: 'mac:build') }
it 'when linux:* finishes first it runs it out of order' do
expect(process_pipeline).to be_truthy
expect(stages).to eq(%w(pending created created))
expect(builds.pending).to contain_exactly(linux_build, mac_build)
# we follow the single path of linux
linux_build.reset.success!
expect(stages).to eq(%w(running pending created))
expect(builds.success).to contain_exactly(linux_build)
expect(builds.pending).to contain_exactly(mac_build, linux_rspec, linux_rubocop)
linux_rspec.reset.success!
expect(stages).to eq(%w(running running created))
expect(builds.success).to contain_exactly(linux_build, linux_rspec)
expect(builds.pending).to contain_exactly(mac_build, linux_rubocop)
linux_rubocop.reset.success!
expect(stages).to eq(%w(running running created))
expect(builds.success).to contain_exactly(linux_build, linux_rspec, linux_rubocop)
expect(builds.pending).to contain_exactly(mac_build)
mac_build.reset.success!
mac_rspec.reset.success!
mac_rubocop.reset.success!
expect(stages).to eq(%w(success success pending))
expect(builds.success).to contain_exactly(
linux_build, linux_rspec, linux_rubocop, mac_build, mac_rspec, mac_rubocop)
expect(builds.pending).to contain_exactly(deploy)
end
context 'when feature ci_dag_support is disabled' do
before do
stub_feature_flags(ci_dag_support: false)
end
it 'when linux:build finishes first it follows stages' do
expect(process_pipeline).to be_truthy
expect(stages).to eq(%w(pending created created))
expect(builds.pending).to contain_exactly(linux_build, mac_build)
# we follow the single path of linux
linux_build.reset.success!
expect(stages).to eq(%w(running created created))
expect(builds.success).to contain_exactly(linux_build)
expect(builds.pending).to contain_exactly(mac_build)
mac_build.reset.success!
expect(stages).to eq(%w(success pending created))
expect(builds.success).to contain_exactly(linux_build, mac_build)
expect(builds.pending).to contain_exactly(
linux_rspec, linux_rubocop, mac_rspec, mac_rubocop)
linux_rspec.reset.success!
linux_rubocop.reset.success!
mac_rspec.reset.success!
mac_rubocop.reset.success!
expect(stages).to eq(%w(success success pending))
expect(builds.success).to contain_exactly(
linux_build, linux_rspec, linux_rubocop, mac_build, mac_rspec, mac_rubocop)
expect(builds.pending).to contain_exactly(deploy)
end
end
context 'when one of the jobs is run on a failure' do
let!(:linux_notify) { create_build('linux:notify', stage: 'deploy', stage_idx: 2, when: 'on_failure') }
let!(:linux_notify_on_build) { create(:ci_build_need, build: linux_notify, name: 'linux:build') }
context 'when another job in build phase fails first' do
context 'when ci_dag_support is enabled' do
it 'does skip linux:notify' do
expect(process_pipeline).to be_truthy
mac_build.reset.drop!
linux_build.reset.success!
expect(linux_notify.reset).to be_skipped
end
end
context 'when ci_dag_support is disabled' do
before do
stub_feature_flags(ci_dag_support: false)
end
it 'does run linux:notify' do
expect(process_pipeline).to be_truthy
mac_build.reset.drop!
linux_build.reset.success!
expect(linux_notify.reset).to be_pending
end
end
end
context 'when linux:build job fails first' do
it 'does run linux:notify' do
expect(process_pipeline).to be_truthy
linux_build.reset.drop!
expect(linux_notify.reset).to be_pending
end
end
end
end
def process_pipeline
described_class.new(pipeline).execute
end
def all_builds
pipeline.builds.order(:stage_idx, :id)
end
def builds
all_builds.where.not(status: [:created, :skipped])
end
def stages
pipeline.reset.stages.map(&:status)
end
def builds_names
builds.pluck(:name)
end
def builds_names_and_statuses
builds.each_with_object({}) do |b, h|
h[b.name.to_sym] = b.status
h
end
end
def all_builds_names
all_builds.pluck(:name)
end
def builds_statuses
builds.pluck(:status)
end
def all_builds_statuses
all_builds.pluck(:status)
end
def succeed_pending
builds.pending.map(&:success)
end
def succeed_running_or_pending
pipeline.builds.running_or_pending.each(&:success)
end
def fail_running_or_pending
pipeline.builds.running_or_pending.each(&:drop)
end
def cancel_running_or_pending
pipeline.builds.running_or_pending.each(&:cancel)
end
def play_manual_action(name)
builds.find_by(name: name).play(user)
end
def enqueue_scheduled(name)
builds.scheduled.find_by(name: name).enqueue_scheduled
end
def retry_build(name)
Ci::Build.retry(builds.find_by(name: name), user)
end
def manual_actions
pipeline.manual_actions.reload
end
def create_build(name, **opts)
create(:ci_build, :created, pipeline: pipeline, name: name, **opts)
end
def successful_build(name, **opts)
create(:ci_build, :success, pipeline: pipeline, name: name, **opts)
end
def delayed_options
{ when: 'delayed', options: { script: %w(echo), start_in: '1 minute' } }
end
def unschedule
pipeline.builds.scheduled.map(&:unschedule)
end
end
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
require 'spec_helper' require 'spec_helper'
describe Ci::ProcessPipelineService, '#execute' do describe Ci::ProcessPipelineService do
let(:user) { create(:user) } let(:user) { create(:user) }
let(:project) { create(:project) } let(:project) { create(:project) }
...@@ -18,668 +18,6 @@ describe Ci::ProcessPipelineService, '#execute' do ...@@ -18,668 +18,6 @@ describe Ci::ProcessPipelineService, '#execute' do
project.add_developer(user) project.add_developer(user)
end end
context 'when simple pipeline is defined' do
before do
create_build('linux', stage_idx: 0)
create_build('mac', stage_idx: 0)
create_build('rspec', stage_idx: 1)
create_build('rubocop', stage_idx: 1)
create_build('deploy', stage_idx: 2)
end
it 'processes a pipeline', :sidekiq_might_not_need_inline do
expect(process_pipeline).to be_truthy
succeed_pending
expect(builds.success.count).to eq(2)
succeed_pending
expect(builds.success.count).to eq(4)
succeed_pending
expect(builds.success.count).to eq(5)
end
it 'does not process pipeline if existing stage is running' do
expect(process_pipeline).to be_truthy
expect(builds.pending.count).to eq(2)
expect(process_pipeline).to be_falsey
expect(builds.pending.count).to eq(2)
end
end
context 'custom stage with first job allowed to fail' do
before do
create_build('clean_job', stage_idx: 0, allow_failure: true)
create_build('test_job', stage_idx: 1, allow_failure: true)
end
it 'automatically triggers a next stage when build finishes', :sidekiq_might_not_need_inline do
expect(process_pipeline).to be_truthy
expect(builds_statuses).to eq ['pending']
fail_running_or_pending
expect(builds_statuses).to eq %w(failed pending)
fail_running_or_pending
expect(pipeline.reload).to be_success
end
end
context 'when optional manual actions are defined', :sidekiq_might_not_need_inline do
before do
create_build('build', stage_idx: 0)
create_build('test', stage_idx: 1)
create_build('test_failure', stage_idx: 2, when: 'on_failure')
create_build('deploy', stage_idx: 3)
create_build('production', stage_idx: 3, when: 'manual', allow_failure: true)
create_build('cleanup', stage_idx: 4, when: 'always')
create_build('clear:cache', stage_idx: 4, when: 'manual', allow_failure: true)
end
context 'when builds are successful' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build']
expect(builds_statuses).to eq ['pending']
succeed_running_or_pending
expect(builds_names).to eq %w(build test)
expect(builds_statuses).to eq %w(success pending)
succeed_running_or_pending
expect(builds_names).to eq %w(build test deploy production)
expect(builds_statuses).to eq %w(success success pending manual)
succeed_running_or_pending
expect(builds_names).to eq %w(build test deploy production cleanup clear:cache)
expect(builds_statuses).to eq %w(success success success manual pending manual)
succeed_running_or_pending
expect(builds_statuses).to eq %w(success success success manual success manual)
expect(pipeline.reload.status).to eq 'success'
end
end
context 'when test job fails' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build']
expect(builds_statuses).to eq ['pending']
succeed_running_or_pending
expect(builds_names).to eq %w(build test)
expect(builds_statuses).to eq %w(success pending)
fail_running_or_pending
expect(builds_names).to eq %w(build test test_failure)
expect(builds_statuses).to eq %w(success failed pending)
succeed_running_or_pending
expect(builds_names).to eq %w(build test test_failure cleanup)
expect(builds_statuses).to eq %w(success failed success pending)
succeed_running_or_pending
expect(builds_statuses).to eq %w(success failed success success)
expect(pipeline.reload.status).to eq 'failed'
end
end
context 'when test and test_failure jobs fail' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build']
expect(builds_statuses).to eq ['pending']
succeed_running_or_pending
expect(builds_names).to eq %w(build test)
expect(builds_statuses).to eq %w(success pending)
fail_running_or_pending
expect(builds_names).to eq %w(build test test_failure)
expect(builds_statuses).to eq %w(success failed pending)
fail_running_or_pending
expect(builds_names).to eq %w(build test test_failure cleanup)
expect(builds_statuses).to eq %w(success failed failed pending)
succeed_running_or_pending
expect(builds_names).to eq %w(build test test_failure cleanup)
expect(builds_statuses).to eq %w(success failed failed success)
expect(pipeline.reload.status).to eq('failed')
end
end
context 'when deploy job fails' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build']
expect(builds_statuses).to eq ['pending']
succeed_running_or_pending
expect(builds_names).to eq %w(build test)
expect(builds_statuses).to eq %w(success pending)
succeed_running_or_pending
expect(builds_names).to eq %w(build test deploy production)
expect(builds_statuses).to eq %w(success success pending manual)
fail_running_or_pending
expect(builds_names).to eq %w(build test deploy production cleanup)
expect(builds_statuses).to eq %w(success success failed manual pending)
succeed_running_or_pending
expect(builds_statuses).to eq %w(success success failed manual success)
expect(pipeline.reload).to be_failed
end
end
context 'when build is canceled in the second stage' do
it 'does not schedule builds after build has been canceled' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build']
expect(builds_statuses).to eq ['pending']
succeed_running_or_pending
expect(builds.running_or_pending).not_to be_empty
expect(builds_names).to eq %w(build test)
expect(builds_statuses).to eq %w(success pending)
cancel_running_or_pending
expect(builds.running_or_pending).to be_empty
expect(builds_names).to eq %w[build test]
expect(builds_statuses).to eq %w[success canceled]
expect(pipeline.reload).to be_canceled
end
end
context 'when listing optional manual actions' do
it 'returns only for skipped builds' do
# currently all builds are created
expect(process_pipeline).to be_truthy
expect(manual_actions).to be_empty
# succeed stage build
succeed_running_or_pending
expect(manual_actions).to be_empty
# succeed stage test
succeed_running_or_pending
expect(manual_actions).to be_one # production
# succeed stage deploy
succeed_running_or_pending
expect(manual_actions).to be_many # production and clear cache
end
end
end
context 'when delayed jobs are defined', :sidekiq_might_not_need_inline do
context 'when the scene is timed incremental rollout' do
before do
create_build('build', stage_idx: 0)
create_build('rollout10%', **delayed_options, stage_idx: 1)
create_build('rollout100%', **delayed_options, stage_idx: 2)
create_build('cleanup', stage_idx: 3)
allow(Ci::BuildScheduleWorker).to receive(:perform_at)
end
context 'when builds are successful' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
Timecop.travel 2.minutes.from_now do
enqueue_scheduled('rollout10%')
end
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'scheduled' })
Timecop.travel 2.minutes.from_now do
enqueue_scheduled('rollout100%')
end
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'success', 'cleanup': 'pending' })
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'success', 'cleanup': 'success' })
expect(pipeline.reload.status).to eq 'success'
end
end
context 'when build job fails' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
fail_running_or_pending
expect(builds_names_and_statuses).to eq({ 'build': 'failed' })
expect(pipeline.reload.status).to eq 'failed'
end
end
context 'when rollout 10% is unscheduled' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
unschedule
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'manual' })
expect(pipeline.reload.status).to eq 'manual'
end
context 'when user plays rollout 10%' do
it 'schedules rollout100%' do
process_pipeline
succeed_pending
unschedule
play_manual_action('rollout10%')
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'success', 'rollout100%': 'scheduled' })
expect(pipeline.reload.status).to eq 'scheduled'
end
end
end
context 'when rollout 10% fails' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
Timecop.travel 2.minutes.from_now do
enqueue_scheduled('rollout10%')
end
fail_running_or_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'failed' })
expect(pipeline.reload.status).to eq 'failed'
end
context 'when user retries rollout 10%' do
it 'does not schedule rollout10% again' do
process_pipeline
succeed_pending
enqueue_scheduled('rollout10%')
fail_running_or_pending
retry_build('rollout10%')
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'pending' })
expect(pipeline.reload.status).to eq 'running'
end
end
end
context 'when rollout 10% is played immidiately' do
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'build': 'pending' })
succeed_pending
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'scheduled' })
play_manual_action('rollout10%')
expect(builds_names_and_statuses).to eq({ 'build': 'success', 'rollout10%': 'pending' })
expect(pipeline.reload.status).to eq 'running'
end
end
end
context 'when only one scheduled job exists in a pipeline' do
before do
create_build('delayed', **delayed_options, stage_idx: 0)
allow(Ci::BuildScheduleWorker).to receive(:perform_at)
end
it 'properly processes the pipeline' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'delayed': 'scheduled' })
expect(pipeline.reload.status).to eq 'scheduled'
end
end
context 'when there are two delayed jobs in a stage' do
before do
create_build('delayed1', **delayed_options, stage_idx: 0)
create_build('delayed2', **delayed_options, stage_idx: 0)
create_build('job', stage_idx: 1)
allow(Ci::BuildScheduleWorker).to receive(:perform_at)
end
it 'blocks the stage until all scheduled jobs finished' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'delayed1': 'scheduled', 'delayed2': 'scheduled' })
Timecop.travel 2.minutes.from_now do
enqueue_scheduled('delayed1')
end
expect(builds_names_and_statuses).to eq({ 'delayed1': 'pending', 'delayed2': 'scheduled' })
expect(pipeline.reload.status).to eq 'running'
end
end
context 'when a delayed job is allowed to fail' do
before do
create_build('delayed', **delayed_options, allow_failure: true, stage_idx: 0)
create_build('job', stage_idx: 1)
allow(Ci::BuildScheduleWorker).to receive(:perform_at)
end
it 'blocks the stage and continues after it failed' do
expect(process_pipeline).to be_truthy
expect(builds_names_and_statuses).to eq({ 'delayed': 'scheduled' })
Timecop.travel 2.minutes.from_now do
enqueue_scheduled('delayed')
end
fail_running_or_pending
expect(builds_names_and_statuses).to eq({ 'delayed': 'failed', 'job': 'pending' })
expect(pipeline.reload.status).to eq 'pending'
end
end
end
context 'when an exception is raised during a persistent ref creation' do
before do
successful_build('test', stage_idx: 0)
allow_next_instance_of(Ci::PersistentRef) do |instance|
allow(instance).to receive(:delete_refs) { raise ArgumentError }
end
end
it 'process the pipeline' do
expect { process_pipeline }.not_to raise_error
end
end
context 'when there are manual action in earlier stages' do
context 'when first stage has only optional manual actions' do
before do
create_build('build', stage_idx: 0, when: 'manual', allow_failure: true)
create_build('check', stage_idx: 1)
create_build('test', stage_idx: 2)
process_pipeline
end
it 'starts from the second stage' do
expect(all_builds_statuses).to eq %w[manual pending created]
end
end
context 'when second stage has only optional manual actions' do
before do
create_build('check', stage_idx: 0)
create_build('build', stage_idx: 1, when: 'manual', allow_failure: true)
create_build('test', stage_idx: 2)
process_pipeline
end
it 'skips second stage and continues on third stage', :sidekiq_might_not_need_inline do
expect(all_builds_statuses).to eq(%w[pending created created])
builds.first.success
expect(all_builds_statuses).to eq(%w[success manual pending])
end
end
end
context 'when there are only manual actions in stages' do
before do
create_build('image', stage_idx: 0, when: 'manual', allow_failure: true)
create_build('build', stage_idx: 1, when: 'manual', allow_failure: true)
create_build('deploy', stage_idx: 2, when: 'manual')
create_build('check', stage_idx: 3)
process_pipeline
end
it 'processes all jobs until blocking actions encountered' do
expect(all_builds_statuses).to eq(%w[manual manual manual created])
expect(all_builds_names).to eq(%w[image build deploy check])
expect(pipeline.reload).to be_blocked
end
end
context 'when there is only one manual action' do
before do
create_build('deploy', stage_idx: 0, when: 'manual', allow_failure: true)
process_pipeline
end
it 'skips the pipeline' do
expect(pipeline.reload).to be_skipped
end
context 'when the action was played' do
before do
play_manual_action('deploy')
end
it 'queues the action and pipeline', :sidekiq_might_not_need_inline do
expect(all_builds_statuses).to eq(%w[pending])
expect(pipeline.reload).to be_pending
end
end
end
context 'when blocking manual actions are defined', :sidekiq_might_not_need_inline do
before do
create_build('code:test', stage_idx: 0)
create_build('staging:deploy', stage_idx: 1, when: 'manual')
create_build('staging:test', stage_idx: 2, when: 'on_success')
create_build('production:deploy', stage_idx: 3, when: 'manual')
create_build('production:test', stage_idx: 4, when: 'always')
end
context 'when first stage succeeds' do
it 'blocks pipeline on stage with first manual action' do
process_pipeline
expect(builds_names).to eq %w[code:test]
expect(builds_statuses).to eq %w[pending]
expect(pipeline.reload.status).to eq 'pending'
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy]
expect(builds_statuses).to eq %w[success manual]
expect(pipeline.reload).to be_manual
end
end
context 'when first stage fails' do
it 'does not take blocking action into account' do
process_pipeline
expect(builds_names).to eq %w[code:test]
expect(builds_statuses).to eq %w[pending]
expect(pipeline.reload.status).to eq 'pending'
fail_running_or_pending
expect(builds_names).to eq %w[code:test production:test]
expect(builds_statuses).to eq %w[failed pending]
succeed_running_or_pending
expect(builds_statuses).to eq %w[failed success]
expect(pipeline.reload).to be_failed
end
end
context 'when pipeline is promoted sequentially up to the end' do
before do
# Users need ability to merge into a branch in order to trigger
# protected manual actions.
#
create(:protected_branch, :developers_can_merge,
name: 'master', project: project)
end
it 'properly processes entire pipeline' do
process_pipeline
expect(builds_names).to eq %w[code:test]
expect(builds_statuses).to eq %w[pending]
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy]
expect(builds_statuses).to eq %w[success manual]
expect(pipeline.reload).to be_manual
play_manual_action('staging:deploy')
expect(builds_statuses).to eq %w[success pending]
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy staging:test]
expect(builds_statuses).to eq %w[success success pending]
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy staging:test
production:deploy]
expect(builds_statuses).to eq %w[success success success manual]
expect(pipeline.reload).to be_manual
expect(pipeline.reload).to be_blocked
expect(pipeline.reload).not_to be_active
expect(pipeline.reload).not_to be_complete
play_manual_action('production:deploy')
expect(builds_statuses).to eq %w[success success success pending]
expect(pipeline.reload).to be_running
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy staging:test
production:deploy production:test]
expect(builds_statuses).to eq %w[success success success success pending]
expect(pipeline.reload).to be_running
succeed_running_or_pending
expect(builds_names).to eq %w[code:test staging:deploy staging:test
production:deploy production:test]
expect(builds_statuses).to eq %w[success success success success success]
expect(pipeline.reload).to be_success
end
end
end
context 'when second stage has only on_failure jobs', :sidekiq_might_not_need_inline do
before do
create_build('check', stage_idx: 0)
create_build('build', stage_idx: 1, when: 'on_failure')
create_build('test', stage_idx: 2)
process_pipeline
end
it 'skips second stage and continues on third stage' do
expect(all_builds_statuses).to eq(%w[pending created created])
builds.first.success
expect(all_builds_statuses).to eq(%w[success skipped pending])
end
end
context 'when failed build in the middle stage is retried', :sidekiq_might_not_need_inline do
context 'when failed build is the only unsuccessful build in the stage' do
before do
create_build('build:1', stage_idx: 0)
create_build('build:2', stage_idx: 0)
create_build('test:1', stage_idx: 1)
create_build('test:2', stage_idx: 1)
create_build('deploy:1', stage_idx: 2)
create_build('deploy:2', stage_idx: 2)
end
it 'does trigger builds in the next stage' do
expect(process_pipeline).to be_truthy
expect(builds_names).to eq ['build:1', 'build:2']
succeed_running_or_pending
expect(builds_names).to eq ['build:1', 'build:2', 'test:1', 'test:2']
pipeline.builds.find_by(name: 'test:1').success
pipeline.builds.find_by(name: 'test:2').drop
expect(builds_names).to eq ['build:1', 'build:2', 'test:1', 'test:2']
Ci::Build.retry(pipeline.builds.find_by(name: 'test:2'), user).success
expect(builds_names).to eq ['build:1', 'build:2', 'test:1', 'test:2',
'test:2', 'deploy:1', 'deploy:2']
end
end
end
context 'updates a list of retried builds' do context 'updates a list of retried builds' do
subject { described_class.retried.order(:id) } subject { described_class.retried.order(:id) }
...@@ -695,251 +33,15 @@ describe Ci::ProcessPipelineService, '#execute' do ...@@ -695,251 +33,15 @@ describe Ci::ProcessPipelineService, '#execute' do
end end
end end
context 'when builds with auto-retries are configured', :sidekiq_might_not_need_inline do
before do
create_build('build:1', stage_idx: 0, user: user, options: { script: 'aa', retry: 2 })
create_build('test:1', stage_idx: 1, user: user, when: :on_failure)
create_build('test:2', stage_idx: 1, user: user, options: { script: 'aa', retry: 1 })
end
it 'automatically retries builds in a valid order' do
expect(process_pipeline).to be_truthy
fail_running_or_pending
expect(builds_names).to eq %w[build:1 build:1]
expect(builds_statuses).to eq %w[failed pending]
succeed_running_or_pending
expect(builds_names).to eq %w[build:1 build:1 test:2]
expect(builds_statuses).to eq %w[failed success pending]
succeed_running_or_pending
expect(builds_names).to eq %w[build:1 build:1 test:2]
expect(builds_statuses).to eq %w[failed success success]
expect(pipeline.reload).to be_success
end
end
context 'when pipeline with needs is created', :sidekiq_might_not_need_inline do
let!(:linux_build) { create_build('linux:build', stage: 'build', stage_idx: 0) }
let!(:mac_build) { create_build('mac:build', stage: 'build', stage_idx: 0) }
let!(:linux_rspec) { create_build('linux:rspec', stage: 'test', stage_idx: 1) }
let!(:linux_rubocop) { create_build('linux:rubocop', stage: 'test', stage_idx: 1) }
let!(:mac_rspec) { create_build('mac:rspec', stage: 'test', stage_idx: 1) }
let!(:mac_rubocop) { create_build('mac:rubocop', stage: 'test', stage_idx: 1) }
let!(:deploy) { create_build('deploy', stage: 'deploy', stage_idx: 2) }
let!(:linux_rspec_on_build) { create(:ci_build_need, build: linux_rspec, name: 'linux:build') }
let!(:linux_rubocop_on_build) { create(:ci_build_need, build: linux_rubocop, name: 'linux:build') }
let!(:mac_rspec_on_build) { create(:ci_build_need, build: mac_rspec, name: 'mac:build') }
let!(:mac_rubocop_on_build) { create(:ci_build_need, build: mac_rubocop, name: 'mac:build') }
it 'when linux:* finishes first it runs it out of order' do
expect(process_pipeline).to be_truthy
expect(stages).to eq(%w(pending created created))
expect(builds.pending).to contain_exactly(linux_build, mac_build)
# we follow the single path of linux
linux_build.reset.success!
expect(stages).to eq(%w(running pending created))
expect(builds.success).to contain_exactly(linux_build)
expect(builds.pending).to contain_exactly(mac_build, linux_rspec, linux_rubocop)
linux_rspec.reset.success!
expect(stages).to eq(%w(running running created))
expect(builds.success).to contain_exactly(linux_build, linux_rspec)
expect(builds.pending).to contain_exactly(mac_build, linux_rubocop)
linux_rubocop.reset.success!
expect(stages).to eq(%w(running running created))
expect(builds.success).to contain_exactly(linux_build, linux_rspec, linux_rubocop)
expect(builds.pending).to contain_exactly(mac_build)
mac_build.reset.success!
mac_rspec.reset.success!
mac_rubocop.reset.success!
expect(stages).to eq(%w(success success pending))
expect(builds.success).to contain_exactly(
linux_build, linux_rspec, linux_rubocop, mac_build, mac_rspec, mac_rubocop)
expect(builds.pending).to contain_exactly(deploy)
end
context 'when feature ci_dag_support is disabled' do
before do
stub_feature_flags(ci_dag_support: false)
end
it 'when linux:build finishes first it follows stages' do
expect(process_pipeline).to be_truthy
expect(stages).to eq(%w(pending created created))
expect(builds.pending).to contain_exactly(linux_build, mac_build)
# we follow the single path of linux
linux_build.reset.success!
expect(stages).to eq(%w(running created created))
expect(builds.success).to contain_exactly(linux_build)
expect(builds.pending).to contain_exactly(mac_build)
mac_build.reset.success!
expect(stages).to eq(%w(success pending created))
expect(builds.success).to contain_exactly(linux_build, mac_build)
expect(builds.pending).to contain_exactly(
linux_rspec, linux_rubocop, mac_rspec, mac_rubocop)
linux_rspec.reset.success!
linux_rubocop.reset.success!
mac_rspec.reset.success!
mac_rubocop.reset.success!
expect(stages).to eq(%w(success success pending))
expect(builds.success).to contain_exactly(
linux_build, linux_rspec, linux_rubocop, mac_build, mac_rspec, mac_rubocop)
expect(builds.pending).to contain_exactly(deploy)
end
end
context 'when one of the jobs is run on a failure' do
let!(:linux_notify) { create_build('linux:notify', stage: 'deploy', stage_idx: 2, when: 'on_failure') }
let!(:linux_notify_on_build) { create(:ci_build_need, build: linux_notify, name: 'linux:build') }
context 'when another job in build phase fails first' do
context 'when ci_dag_support is enabled' do
it 'does skip linux:notify' do
expect(process_pipeline).to be_truthy
mac_build.reset.drop!
linux_build.reset.success!
expect(linux_notify.reset).to be_skipped
end
end
context 'when ci_dag_support is disabled' do
before do
stub_feature_flags(ci_dag_support: false)
end
it 'does run linux:notify' do
expect(process_pipeline).to be_truthy
mac_build.reset.drop!
linux_build.reset.success!
expect(linux_notify.reset).to be_pending
end
end
end
context 'when linux:build job fails first' do
it 'does run linux:notify' do
expect(process_pipeline).to be_truthy
linux_build.reset.drop!
expect(linux_notify.reset).to be_pending
end
end
end
end
def process_pipeline def process_pipeline
described_class.new(pipeline).execute described_class.new(pipeline).execute
end end
def all_builds
pipeline.builds.order(:stage_idx, :id)
end
def builds
all_builds.where.not(status: [:created, :skipped])
end
def stages
pipeline.reset.stages.map(&:status)
end
def builds_names
builds.pluck(:name)
end
def builds_names_and_statuses
builds.each_with_object({}) do |b, h|
h[b.name.to_sym] = b.status
h
end
end
def all_builds_names
all_builds.pluck(:name)
end
def builds_statuses
builds.pluck(:status)
end
def all_builds_statuses
all_builds.pluck(:status)
end
def succeed_pending
builds.pending.map(&:success)
end
def succeed_running_or_pending
pipeline.builds.running_or_pending.each(&:success)
end
def fail_running_or_pending
pipeline.builds.running_or_pending.each(&:drop)
end
def cancel_running_or_pending
pipeline.builds.running_or_pending.each(&:cancel)
end
def play_manual_action(name)
builds.find_by(name: name).play(user)
end
def enqueue_scheduled(name)
builds.scheduled.find_by(name: name).enqueue_scheduled
end
def retry_build(name)
Ci::Build.retry(builds.find_by(name: name), user)
end
def manual_actions
pipeline.manual_actions.reload
end
def create_build(name, **opts) def create_build(name, **opts)
create(:ci_build, :created, pipeline: pipeline, name: name, **opts) create(:ci_build, :created, pipeline: pipeline, name: name, **opts)
end end
def successful_build(name, **opts) def all_builds
create(:ci_build, :success, pipeline: pipeline, name: name, **opts) pipeline.builds.order(:stage_idx, :id)
end
def delayed_options
{ when: 'delayed', options: { script: %w(echo), start_in: '1 minute' } }
end
def unschedule
pipeline.builds.scheduled.map(&:unschedule)
end end
end end
...@@ -732,15 +732,15 @@ ...@@ -732,15 +732,15 @@
dependencies: dependencies:
vue-eslint-parser "^6.0.4" vue-eslint-parser "^6.0.4"
"@gitlab/svgs@^1.88.0": "@gitlab/svgs@^1.89.0":
version "1.88.0" version "1.89.0"
resolved "https://registry.yarnpkg.com/@gitlab/svgs/-/svgs-1.88.0.tgz#0a9b72e9591264fcac592ebf9944665c70f48de2" resolved "https://registry.yarnpkg.com/@gitlab/svgs/-/svgs-1.89.0.tgz#5bdaff1b0af1cc07ed34e89c21c34c7c6a3e1caa"
integrity sha512-ZgepCvZoB/lFdgttHtu8+9YlRZlVc9MnHDbbqcQCFBvrfOjY1wq12ikxnNbwKj8QNA47TRJvSS0TkHgMWYnbsA== integrity sha512-vI6VobZs6mq2Bbiej5bYMHyvtn8kD1O/uHSlyY9jgJoa2TXU+jFI9DqUpJmx8EIHt+o0qm/8G3XsFGEr5gLb7Q==
"@gitlab/ui@8.10.0": "@gitlab/ui@8.15.0":
version "8.10.0" version "8.15.0"
resolved "https://registry.yarnpkg.com/@gitlab/ui/-/ui-8.10.0.tgz#885ea8fe695ccff859821bd4ad4cefbea086e3b6" resolved "https://registry.yarnpkg.com/@gitlab/ui/-/ui-8.15.0.tgz#51fa3f2b4ccb8454bcb9680acb334bc88fe15f3d"
integrity sha512-bbq+7iiptsNUWtPBQs/ek5uDnkSQ7QGzJwddfZSvQPMuq+50fDSy3Gab1cwvZtSj1fSvAny0ksqbpQl1w+5AgA== integrity sha512-M9hnLVRMUF5DDfwPtR5CLsCyiWgjslqg2p37a6qwjdjZ+ST5t0Vr/44Mg4Lz4y2zxqjDaSmR4KtmipvykeQx1A==
dependencies: dependencies:
"@babel/standalone" "^7.0.0" "@babel/standalone" "^7.0.0"
"@gitlab/vue-toasted" "^1.3.0" "@gitlab/vue-toasted" "^1.3.0"
...@@ -750,6 +750,7 @@ ...@@ -750,6 +750,7 @@
highlight.js "^9.13.1" highlight.js "^9.13.1"
js-beautify "^1.8.8" js-beautify "^1.8.8"
lodash "^4.17.14" lodash "^4.17.14"
portal-vue "^2.1.6"
resize-observer-polyfill "^1.5.1" resize-observer-polyfill "^1.5.1"
url-search-params-polyfill "^5.0.0" url-search-params-polyfill "^5.0.0"
vue "^2.6.10" vue "^2.6.10"
...@@ -8742,10 +8743,10 @@ popper.js@^1.14.7, popper.js@^1.15.0: ...@@ -8742,10 +8743,10 @@ popper.js@^1.14.7, popper.js@^1.15.0:
resolved "https://registry.yarnpkg.com/popper.js/-/popper.js-1.15.0.tgz#5560b99bbad7647e9faa475c6b8056621f5a4ff2" resolved "https://registry.yarnpkg.com/popper.js/-/popper.js-1.15.0.tgz#5560b99bbad7647e9faa475c6b8056621f5a4ff2"
integrity sha512-w010cY1oCUmI+9KwwlWki+r5jxKfTFDVoadl7MSrIujHU5MJ5OR6HTDj6Xo8aoR/QsA56x8jKjA59qGH4ELtrA== integrity sha512-w010cY1oCUmI+9KwwlWki+r5jxKfTFDVoadl7MSrIujHU5MJ5OR6HTDj6Xo8aoR/QsA56x8jKjA59qGH4ELtrA==
portal-vue@^2.1.5: portal-vue@^2.1.5, portal-vue@^2.1.6:
version "2.1.5" version "2.1.7"
resolved "https://registry.yarnpkg.com/portal-vue/-/portal-vue-2.1.5.tgz#ecd0997cb32958205151cb72f40fd4f38d175e5c" resolved "https://registry.yarnpkg.com/portal-vue/-/portal-vue-2.1.7.tgz#ea08069b25b640ca08a5b86f67c612f15f4e4ad4"
integrity sha512-vZmdMn0mOo7puvxoMQ5zju6S29aFD+9yygJxyWQtPaMXS9xunAeoYdnx6yzfL9J8HD8pMZYgSieEIbioAKhrSQ== integrity sha512-+yCno2oB3xA7irTt0EU5Ezw22L2J51uKAacE/6hMPMoO/mx3h4rXFkkBkT4GFsMDv/vEe8TNKC3ujJJ0PTwb6g==
portfinder@^1.0.24: portfinder@^1.0.24:
version "1.0.24" version "1.0.24"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment