Commit 733befe9 authored by GitLab Bot's avatar GitLab Bot

Add latest changes from gitlab-org/gitlab@master

parent 10213bf3
...@@ -24,6 +24,7 @@ const Api = { ...@@ -24,6 +24,7 @@ const Api = {
projectMergeRequestChangesPath: '/api/:version/projects/:id/merge_requests/:mrid/changes', projectMergeRequestChangesPath: '/api/:version/projects/:id/merge_requests/:mrid/changes',
projectMergeRequestVersionsPath: '/api/:version/projects/:id/merge_requests/:mrid/versions', projectMergeRequestVersionsPath: '/api/:version/projects/:id/merge_requests/:mrid/versions',
projectRunnersPath: '/api/:version/projects/:id/runners', projectRunnersPath: '/api/:version/projects/:id/runners',
projectProtectedBranchesPath: '/api/:version/projects/:id/protected_branches',
mergeRequestsPath: '/api/:version/merge_requests', mergeRequestsPath: '/api/:version/merge_requests',
groupLabelsPath: '/groups/:namespace_path/-/labels', groupLabelsPath: '/groups/:namespace_path/-/labels',
issuableTemplatePath: '/:namespace_path/:project_path/templates/:type/:key', issuableTemplatePath: '/:namespace_path/:project_path/templates/:type/:key',
...@@ -220,6 +221,22 @@ const Api = { ...@@ -220,6 +221,22 @@ const Api = {
return axios.get(url, config); return axios.get(url, config);
}, },
projectProtectedBranches(id, query = '') {
const url = Api.buildUrl(Api.projectProtectedBranchesPath).replace(
':id',
encodeURIComponent(id),
);
return axios
.get(url, {
params: {
search: query,
per_page: DEFAULT_PER_PAGE,
},
})
.then(({ data }) => data);
},
mergeRequests(params = {}) { mergeRequests(params = {}) {
const url = Api.buildUrl(Api.mergeRequestsPath); const url = Api.buildUrl(Api.mergeRequestsPath);
......
import _ from 'underscore'; import { escape as esc } from 'lodash';
import axios from '../lib/utils/axios_utils'; import axios from '../lib/utils/axios_utils';
import { s__ } from '../locale'; import { s__ } from '../locale';
import Flash from '../flash'; import Flash from '../flash';
...@@ -10,7 +10,7 @@ function generateErrorBoxContent(errors) { ...@@ -10,7 +10,7 @@ function generateErrorBoxContent(errors) {
const errorList = [].concat(errors).map( const errorList = [].concat(errors).map(
errorString => ` errorString => `
<li> <li>
${_.escape(errorString)} ${esc(errorString)}
</li> </li>
`, `,
); );
......
...@@ -4,6 +4,6 @@ import 'jquery'; ...@@ -4,6 +4,6 @@ import 'jquery';
import 'jquery-ujs'; import 'jquery-ujs';
import 'vendor/jquery.endless-scroll'; import 'vendor/jquery.endless-scroll';
import 'jquery.caret'; // must be imported before at.js import 'jquery.caret'; // must be imported before at.js
import 'at.js'; import '@gitlab/at.js';
import 'vendor/jquery.scrollTo'; import 'vendor/jquery.scrollTo';
import 'jquery.waitforimages'; import 'jquery.waitforimages';
<script> <script>
import _ from 'underscore'; import { debounce, uniq } from 'lodash';
import { mapActions, mapState, mapGetters } from 'vuex'; import { mapActions, mapState, mapGetters } from 'vuex';
import { GlLoadingIcon } from '@gitlab/ui'; import { GlLoadingIcon } from '@gitlab/ui';
import { GlAreaChart } from '@gitlab/ui/dist/charts'; import { GlAreaChart } from '@gitlab/ui/dist/charts';
...@@ -120,7 +120,7 @@ export default { ...@@ -120,7 +120,7 @@ export default {
return this.xAxisRange[this.xAxisRange.length - 1]; return this.xAxisRange[this.xAxisRange.length - 1];
}, },
charts() { charts() {
return _.uniq(this.individualCharts); return uniq(this.individualCharts);
}, },
}, },
mounted() { mounted() {
...@@ -171,7 +171,7 @@ export default { ...@@ -171,7 +171,7 @@ export default {
}); });
}) })
.catch(() => {}); .catch(() => {});
this.masterChart.on('datazoom', _.debounce(this.setIndividualChartsZoom, 200)); this.masterChart.on('datazoom', debounce(this.setIndividualChartsZoom, 200));
}, },
onIndividualChartCreated(chart) { onIndividualChartCreated(chart) {
this.individualCharts.push(chart); this.individualCharts.push(chart);
......
import $ from 'jquery'; import $ from 'jquery';
import 'at.js'; import '@gitlab/at.js';
import _ from 'underscore'; import _ from 'underscore';
import SidebarMediator from '~/sidebar/sidebar_mediator'; import SidebarMediator from '~/sidebar/sidebar_mediator';
import glRegexp from './lib/utils/regexp'; import glRegexp from './lib/utils/regexp';
......
...@@ -12,6 +12,11 @@ export default { ...@@ -12,6 +12,11 @@ export default {
type: Object, type: Object,
required: true, required: true,
}, },
deploymentCluster: {
type: Object,
required: false,
default: null,
},
iconStatus: { iconStatus: {
type: Object, type: Object,
required: true, required: true,
...@@ -61,14 +66,14 @@ export default { ...@@ -61,14 +66,14 @@ export default {
: ''; : '';
}, },
hasCluster() { hasCluster() {
return this.hasLastDeployment && this.lastDeployment.cluster; return Boolean(this.deploymentCluster) && Boolean(this.deploymentCluster.name);
}, },
clusterNameOrLink() { clusterNameOrLink() {
if (!this.hasCluster) { if (!this.hasCluster) {
return ''; return '';
} }
const { name, path } = this.lastDeployment.cluster; const { name, path } = this.deploymentCluster;
const escapedName = _.escape(name); const escapedName = _.escape(name);
const escapedPath = _.escape(path); const escapedPath = _.escape(path);
...@@ -86,6 +91,9 @@ export default { ...@@ -86,6 +91,9 @@ export default {
false, false,
); );
}, },
kubernetesNamespace() {
return this.hasCluster ? this.deploymentCluster.kubernetes_namespace : null;
},
}, },
methods: { methods: {
deploymentLink(name) { deploymentLink(name) {
...@@ -109,75 +117,153 @@ export default { ...@@ -109,75 +117,153 @@ export default {
); );
}, },
lastEnvironmentMessage() { lastEnvironmentMessage() {
const { environmentLink, clusterNameOrLink, hasCluster } = this; const { environmentLink, clusterNameOrLink, hasCluster, kubernetesNamespace } = this;
if (hasCluster) {
const message = hasCluster if (kubernetesNamespace) {
? __('This job is deployed to %{environmentLink} using cluster %{clusterNameOrLink}.') return sprintf(
: __('This job is deployed to %{environmentLink}.'); __(
'This job is deployed to %{environmentLink} using cluster %{clusterNameOrLink} and namespace %{kubernetesNamespace}.',
return sprintf(message, { environmentLink, clusterNameOrLink }, false); ),
{ environmentLink, clusterNameOrLink, kubernetesNamespace },
false,
);
}
// we know the cluster but not the namespace
return sprintf(
__('This job is deployed to %{environmentLink} using cluster %{clusterNameOrLink}.'),
{ environmentLink, clusterNameOrLink },
false,
);
}
// not a cluster deployment
return sprintf(__('This job is deployed to %{environmentLink}.'), { environmentLink }, false);
}, },
outOfDateEnvironmentMessage() { outOfDateEnvironmentMessage() {
const { hasLastDeployment, hasCluster, environmentLink, clusterNameOrLink } = this; const {
hasLastDeployment,
hasCluster,
environmentLink,
clusterNameOrLink,
kubernetesNamespace,
} = this;
if (hasLastDeployment) { if (hasLastDeployment) {
const message = hasCluster const deploymentLink = this.deploymentLink(__('most recent deployment'));
? __( if (hasCluster) {
'This job is an out-of-date deployment to %{environmentLink} using cluster %{clusterNameOrLink}. View the %{deploymentLink}.', if (kubernetesNamespace) {
) return sprintf(
: __( __(
'This job is an out-of-date deployment to %{environmentLink}. View the %{deploymentLink}.', 'This job is an out-of-date deployment to %{environmentLink} using cluster %{clusterNameOrLink} and namespace %{kubernetesNamespace}. View the %{deploymentLink}.',
),
{ environmentLink, clusterNameOrLink, kubernetesNamespace, deploymentLink },
false,
); );
}
// we know the cluster but not the namespace
return sprintf(
__(
'This job is an out-of-date deployment to %{environmentLink} using cluster %{clusterNameOrLink}. View the %{deploymentLink}.',
),
{ environmentLink, clusterNameOrLink, deploymentLink },
false,
);
}
// not a cluster deployment
return sprintf( return sprintf(
message, __(
{ 'This job is an out-of-date deployment to %{environmentLink}. View the %{deploymentLink}.',
environmentLink, ),
clusterNameOrLink, { environmentLink, deploymentLink },
deploymentLink: this.deploymentLink(__('most recent deployment')),
},
false, false,
); );
} }
// no last deployment, i.e. this is the first deployment
const message = hasCluster if (hasCluster) {
? __( if (kubernetesNamespace) {
return sprintf(
__(
'This job is an out-of-date deployment to %{environmentLink} using cluster %{clusterNameOrLink} and namespace %{kubernetesNamespace}.',
),
{ environmentLink, clusterNameOrLink, kubernetesNamespace },
false,
);
}
// we know the cluster but not the namespace
return sprintf(
__(
'This job is an out-of-date deployment to %{environmentLink} using cluster %{clusterNameOrLink}.', 'This job is an out-of-date deployment to %{environmentLink} using cluster %{clusterNameOrLink}.',
) ),
: __('This job is an out-of-date deployment to %{environmentLink}.'); { environmentLink, clusterNameOrLink },
false,
);
}
// not a cluster deployment
return sprintf( return sprintf(
message, __('This job is an out-of-date deployment to %{environmentLink}.'),
{ { environmentLink },
environmentLink,
clusterNameOrLink,
},
false, false,
); );
}, },
creatingEnvironmentMessage() { creatingEnvironmentMessage() {
const { hasLastDeployment, hasCluster, environmentLink, clusterNameOrLink } = this; const {
hasLastDeployment,
hasCluster,
environmentLink,
clusterNameOrLink,
kubernetesNamespace,
} = this;
if (hasLastDeployment) { if (hasLastDeployment) {
const message = hasCluster const deploymentLink = this.deploymentLink(__('latest deployment'));
? __( if (hasCluster) {
'This job is creating a deployment to %{environmentLink} using cluster %{clusterNameOrLink}. This will overwrite the %{deploymentLink}.', if (kubernetesNamespace) {
) return sprintf(
: __( __(
'This job is creating a deployment to %{environmentLink}. This will overwrite the %{deploymentLink}.', 'This job is creating a deployment to %{environmentLink} using cluster %{clusterNameOrLink} and namespace %{kubernetesNamespace}. This will overwrite the %{deploymentLink}.',
),
{ environmentLink, clusterNameOrLink, kubernetesNamespace, deploymentLink },
false,
); );
}
// we know the cluster but not the namespace
return sprintf(
__(
'This job is creating a deployment to %{environmentLink} using cluster %{clusterNameOrLink}. This will overwrite the %{deploymentLink}.',
),
{ environmentLink, clusterNameOrLink, deploymentLink },
false,
);
}
// not a cluster deployment
return sprintf( return sprintf(
message, __(
{ 'This job is creating a deployment to %{environmentLink}. This will overwrite the %{deploymentLink}.',
environmentLink, ),
clusterNameOrLink, { environmentLink, deploymentLink },
deploymentLink: this.deploymentLink(__('latest deployment')),
},
false, false,
); );
} }
// no last deployment, i.e. this is the first deployment
if (hasCluster) {
if (kubernetesNamespace) {
return sprintf(
__(
'This job is creating a deployment to %{environmentLink} using cluster %{clusterNameOrLink} and namespace %{kubernetesNamespace}.',
),
{ environmentLink, clusterNameOrLink, kubernetesNamespace },
false,
);
}
// we know the cluster but not the namespace
return sprintf(
__(
'This job is creating a deployment to %{environmentLink} using cluster %{clusterNameOrLink}.',
),
{ environmentLink, clusterNameOrLink },
false,
);
}
// not a cluster deployment
return sprintf( return sprintf(
__('This job is creating a deployment to %{environmentLink}.'), __('This job is creating a deployment to %{environmentLink}.'),
{ environmentLink }, { environmentLink },
......
...@@ -256,6 +256,7 @@ export default { ...@@ -256,6 +256,7 @@ export default {
v-if="hasEnvironment" v-if="hasEnvironment"
class="js-job-environment" class="js-job-environment"
:deployment-status="job.deployment_status" :deployment-status="job.deployment_status"
:deployment-cluster="job.deployment_cluster"
:icon-status="job.status" :icon-status="job.status"
/> />
......
...@@ -15,7 +15,7 @@ import { escape, uniqueId } from 'lodash'; ...@@ -15,7 +15,7 @@ import { escape, uniqueId } from 'lodash';
import Cookies from 'js-cookie'; import Cookies from 'js-cookie';
import Autosize from 'autosize'; import Autosize from 'autosize';
import 'jquery.caret'; // required by at.js import 'jquery.caret'; // required by at.js
import 'at.js'; import '@gitlab/at.js';
import Vue from 'vue'; import Vue from 'vue';
import { GlSkeletonLoading } from '@gitlab/ui'; import { GlSkeletonLoading } from '@gitlab/ui';
import AjaxCache from '~/lib/utils/ajax_cache'; import AjaxCache from '~/lib/utils/ajax_cache';
......
...@@ -26,18 +26,18 @@ export default class UsagePingPayload { ...@@ -26,18 +26,18 @@ export default class UsagePingPayload {
requestPayload() { requestPayload() {
if (this.isInserted) return this.showPayload(); if (this.isInserted) return this.showPayload();
this.spinner.classList.add('d-inline'); this.spinner.classList.add('d-inline-flex');
return axios return axios
.get(this.container.dataset.endpoint, { .get(this.container.dataset.endpoint, {
responseType: 'text', responseType: 'text',
}) })
.then(({ data }) => { .then(({ data }) => {
this.spinner.classList.remove('d-inline'); this.spinner.classList.remove('d-inline-flex');
this.insertPayload(data); this.insertPayload(data);
}) })
.catch(() => { .catch(() => {
this.spinner.classList.remove('d-inline'); this.spinner.classList.remove('d-inline-flex');
flash(__('Error fetching usage ping data.')); flash(__('Error fetching usage ping data.'));
}); });
} }
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
// like a table or typography then make changes in the framework/ directory. // like a table or typography then make changes in the framework/ directory.
// If you need to add unique style that should affect only one page - use pages/ // If you need to add unique style that should affect only one page - use pages/
// directory. // directory.
@import "at.js/dist/css/jquery.atwho"; @import "@gitlab/at.js/dist/css/jquery.atwho";
@import "dropzone/dist/basic"; @import "dropzone/dist/basic";
@import "select2/select2"; @import "select2/select2";
......
...@@ -63,7 +63,8 @@ ...@@ -63,7 +63,8 @@
display: block; display: block;
} }
.select2-choices { .select2-choices,
.select2-choice {
border-color: $red-500; border-color: $red-500;
} }
} }
......
...@@ -49,3 +49,9 @@ ...@@ -49,3 +49,9 @@
@include spinner-color($white); @include spinner-color($white);
} }
} }
.btn {
.spinner {
vertical-align: text-bottom;
}
}
...@@ -30,6 +30,7 @@ class Deployment < ApplicationRecord ...@@ -30,6 +30,7 @@ class Deployment < ApplicationRecord
validate :valid_ref, on: :create validate :valid_ref, on: :create
delegate :name, to: :environment, prefix: true delegate :name, to: :environment, prefix: true
delegate :kubernetes_namespace, to: :deployment_cluster, allow_nil: true
scope :for_environment, -> (environment) { where(environment_id: environment) } scope :for_environment, -> (environment) { where(environment_id: environment) }
scope :for_environment_name, -> (name) do scope :for_environment_name, -> (name) do
......
...@@ -22,6 +22,12 @@ class BuildDetailsEntity < JobEntity ...@@ -22,6 +22,12 @@ class BuildDetailsEntity < JobEntity
end end
end end
expose :deployment_cluster, if: -> (build) { build&.deployment&.cluster } do |build, options|
# Until data is copied over from deployments.cluster_id, this entity must represent Deployment instead of DeploymentCluster
# https://gitlab.com/gitlab-org/gitlab/issues/202628
DeploymentClusterEntity.represent(build.deployment, options)
end
expose :artifact, if: -> (*) { can?(current_user, :read_build, build) } do expose :artifact, if: -> (*) { can?(current_user, :read_build, build) } do
expose :download_path, if: -> (*) { build.artifacts? } do |build| expose :download_path, if: -> (*) { build.artifacts? } do |build|
download_project_job_artifacts_path(project, build) download_project_job_artifacts_path(project, build)
......
# frozen_string_literal: true
class ClusterBasicEntity < Grape::Entity
include RequestAwareEntity
expose :name
expose :path, if: -> (cluster) { can?(request.current_user, :read_cluster, cluster) } do |cluster|
cluster.present(current_user: request.current_user).show_path
end
end
# frozen_string_literal: true
class DeploymentClusterEntity < Grape::Entity
include RequestAwareEntity
# Until data is copied over from deployments.cluster_id, this entity must represent Deployment instead of DeploymentCluster
# https://gitlab.com/gitlab-org/gitlab/issues/202628
expose :name do |deployment|
deployment.cluster.name
end
expose :path, if: -> (deployment) { can?(request.current_user, :read_cluster, deployment.cluster) } do |deployment|
deployment.cluster.present(current_user: request.current_user).show_path
end
expose :kubernetes_namespace, if: -> (deployment) { can?(request.current_user, :read_cluster, deployment.cluster) } do |deployment|
deployment.kubernetes_namespace
end
end
...@@ -41,7 +41,11 @@ class DeploymentEntity < Grape::Entity ...@@ -41,7 +41,11 @@ class DeploymentEntity < Grape::Entity
JobEntity.represent(deployment.playable_build, options.merge(only: [:play_path, :retry_path])) JobEntity.represent(deployment.playable_build, options.merge(only: [:play_path, :retry_path]))
end end
expose :cluster, using: ClusterBasicEntity expose :cluster do |deployment, options|
# Until data is copied over from deployments.cluster_id, this entity must represent Deployment instead of DeploymentCluster
# https://gitlab.com/gitlab-org/gitlab/issues/202628
DeploymentClusterEntity.represent(deployment, options) unless deployment.cluster.nil?
end
private private
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
%p.mb-2= s_('%{usage_ping_link_start}Learn more%{usage_ping_link_end} about what information is shared with GitLab Inc.').html_safe % { usage_ping_link_start: usage_ping_link_start, usage_ping_link_end: '</a>'.html_safe } %p.mb-2= s_('%{usage_ping_link_start}Learn more%{usage_ping_link_end} about what information is shared with GitLab Inc.').html_safe % { usage_ping_link_start: usage_ping_link_start, usage_ping_link_end: '</a>'.html_safe }
%button.btn.js-usage-ping-payload-trigger{ type: 'button' } %button.btn.js-usage-ping-payload-trigger{ type: 'button' }
.js-spinner.d-none= icon('spinner spin') .spinner.js-spinner.d-none
.js-text.d-inline= _('Preview payload') .js-text.d-inline= _('Preview payload')
%pre.usage-data.js-usage-ping-payload.js-syntax-highlight.code.highlight.mt-2.d-none{ data: { endpoint: usage_data_admin_application_settings_path(format: :html) } } %pre.usage-data.js-usage-ping-payload.js-syntax-highlight.code.highlight.mt-2.d-none{ data: { endpoint: usage_data_admin_application_settings_path(format: :html) } }
- else - else
......
...@@ -9,6 +9,6 @@ ...@@ -9,6 +9,6 @@
milestone_merge_request_count: @milestone.merge_requests.count }, milestone_merge_request_count: @milestone.merge_requests.count },
disabled: true } disabled: true }
= _('Delete') = _('Delete')
= icon('spin spinner', class: 'js-loading-icon hidden' ) .spinner.js-loading-icon.hidden
#delete-milestone-modal #delete-milestone-modal
...@@ -98,10 +98,6 @@ ...@@ -98,10 +98,6 @@
human_time_estimate: @milestone.human_total_issue_time_estimate, human_time_estimate: @milestone.human_total_issue_time_estimate,
human_time_spent: @milestone.human_total_issue_time_spent, human_time_spent: @milestone.human_total_issue_time_spent,
limit_to_hours: Gitlab::CurrentSettings.time_tracking_limit_to_hours.to_s } } limit_to_hours: Gitlab::CurrentSettings.time_tracking_limit_to_hours.to_s } }
// Fallback while content is loading
.title.hide-collapsed
= _('Time tracking')
= icon('spinner spin')
= render_if_exists 'shared/milestones/weight', milestone: milestone = render_if_exists 'shared/milestones/weight', milestone: milestone
......
.text-center.prepend-top-default .text-center.prepend-top-default
= icon('spin spinner 2x', 'aria-hidden': 'true', 'aria-label': 'Loading tab content') .spinner.spinner-md
---
title: Replaced underscore with lodash for spec/javascripts/badges
merge_request: 25135
author: Shubham Pandey
type: other
---
title: Add experimental --queue-selector option to sidekiq-cluster
merge_request: 18877
author:
type: changed
---
title: Fix autocomplete limitation bug
merge_request: 25167
author:
type: fixed
---
title: Separate provider, platform and post receive entities into own class files
merge_request: 25119
author: Rajendra Kadam
type: added
---
title: Show Kubernetes namespace on job show page
merge_request: 20983
author:
type: added
...@@ -29,7 +29,7 @@ module.exports = { ...@@ -29,7 +29,7 @@ module.exports = {
'vuex', 'vuex',
'pikaday', 'pikaday',
'vue/dist/vue.esm.js', 'vue/dist/vue.esm.js',
'at.js', '@gitlab/at.js',
'jed', 'jed',
'mermaid', 'mermaid',
'katex', 'katex',
......
...@@ -82,6 +82,93 @@ you list: ...@@ -82,6 +82,93 @@ you list:
sudo gitlab-ctl reconfigure sudo gitlab-ctl reconfigure
``` ```
## Queue selector (experimental)
> [Introduced](https://gitlab.com/gitlab-com/gl-infra/scalability/issues/45) in [GitLab Starter](https://about.gitlab.com/pricing/) 12.8.
CAUTION: **Caution:**
As this is marked as **experimental**, it is subject to change at any
time, including **breaking backwards compatibility**. This is so that we
can react to changes we need for our GitLab.com deployment. We have a
tracking issue open to [remove the experimental
designation](https://gitlab.com/gitlab-com/gl-infra/scalability/issues/147)
from this feature; please comment there if you are interested in using
this in your own deployment.
In addition to selecting queues by name, as above, the
`experimental_queue_selector` option allows queue groups to be selected
in a more general way using the following components:
- Attributes that can be selected.
- Operators used to construct a query.
### Available attributes
From the [list of all available
attributes](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/workers/all_queues.yml),
`experimental_queue_selector` allows selecting of queues by the
following attributes:
- `feature_category` - the [GitLab feature
category](https://about.gitlab.com/direction/maturity/#category-maturity) the
queue belongs to. For example, the `merge` queue belongs to the
`source_code_management` category.
- `has_external_dependencies` - whether or not the queue connects to external
services. For example, all importers have this set to `true`.
- `latency_sensitive` - whether or not the queue is particularly sensitive to
latency, which also means that its jobs should run quickly. For example, the
`authorized_projects` queue is used to refresh user permissions, and is
latency sensitive.
- `name` - the queue name. The other attributes are typically more useful as
they are more general, but this is available in case a particular queue needs
to be selected.
- `resource_boundary` - if the worker is bound by `cpu`, `memory`, or
`unknown`. For example, the `project_export` queue is memory bound as it has
to load data in memory before saving it for export.
Both `has_external_dependencies` and `latency_sensitive` are boolean attributes:
only the exact string `true` is considered true, and everything else is
considered false.
### Available operators
`experimental_queue_selector` supports the following operators, listed
from highest to lowest precedence:
- `|` - the logical OR operator. For example, `query_a|query_b` (where `query_a`
and `query_b` are queries made up of the other operators here) will include
queues that match either query.
- `&` - the logical AND operator. For example, `query_a&query_b` (where
`query_a` and `query_b` are queries made up of the other operators here) will
only include queues that match both queries.
- `!=` - the NOT IN operator. For example, `feature_category!=issue_tracking`
excludes all queues from the `issue_tracking` feature category.
- `=` - the IN operator. For example, `resource_boundary=cpu` includes all
queues that are CPU bound.
- `,` - the concatenate set operator. For example,
`feature_category=continuous_integration,pages` includes all queues from
either the `continuous_integration` category or the `pages` category. This
example is also possible using the OR operator, but allows greater brevity, as
well as being lower precedence.
The operator precedence for this syntax is fixed: it's not possible to make AND
have higher precedence than OR.
### Example queries
In `/etc/gitlab/gitlab.rb`:
```ruby
sidekiq_cluster['enable'] = true
sidekiq_cluster['experimental_queue_selector'] = true
sidekiq_cluster['queue_groups'] = [
# Run all non-CPU-bound queues that are latency sensitive
'resource_boundary!=cpu&latency_sensitive=true',
# Run all continuous integration and pages queues that are not latency sensitive
'feature_category=continuous_integration,pages&latency_sensitive=false'
]
```
## Ignore all GitHub import queues ## Ignore all GitHub import queues
When [importing from GitHub](../../user/project/import/github.md), Sidekiq might When [importing from GitHub](../../user/project/import/github.md), Sidekiq might
......
...@@ -198,11 +198,11 @@ The Windows Shared Runners are currently in ...@@ -198,11 +198,11 @@ The Windows Shared Runners are currently in
[beta](https://about.gitlab.com/handbook/product/#beta) and should not be used [beta](https://about.gitlab.com/handbook/product/#beta) and should not be used
for production workloads. for production workloads.
During the beta period for groups and private projects the use of During the beta period, the
Windows Shared Runners will count towards the [shared runner pipeline [shared runner pipeline quota](../admin_area/settings/continuous_integration.md#shared-runners-pipeline-minutes-quota-starter-only)
quota](https://docs.gitlab.com/ee/user/admin_area/settings/continuous_integration.html#shared-runners-pipeline-minutes-quota-starter-only) will apply for groups and projects in the same way as Linux Runners.
as if they are Linux Runners, we do have plans to change this in This may change when the beta period ends, as discussed in this
[#30835](https://gitlab.com/gitlab-org/gitlab/issues/30834). [related issue](https://gitlab.com/gitlab-org/gitlab/issues/30834).
Windows Shared Runners on GitLab.com automatically autoscale by Windows Shared Runners on GitLab.com automatically autoscale by
launching virtual machines on the Google Cloud Platform. This solution uses launching virtual machines on the Google Cloud Platform. This solution uses
...@@ -321,18 +321,17 @@ test: ...@@ -321,18 +321,17 @@ test:
- All the limitations mentioned in our [beta - All the limitations mentioned in our [beta
definition](https://about.gitlab.com/handbook/product/#beta). definition](https://about.gitlab.com/handbook/product/#beta).
- The average provisioning time for a new Windows VM is 5 minutes. - The average provisioning time for a new Windows VM is 5 minutes.
This means that for the beta you will notice slower build start times This means that you may notice slower build start times
on the Windows Shared Runner fleet compared to Linux. In a future on the Windows Shared Runner fleet during the beta. In a future
release we will add the ability to the autoscaler which will enable release we will update the autoscaler to enable
the pre-warming of virtual machines. This will significantly reduce the pre-provisioning of virtual machines. This will significantly reduce
the time it takes to provision a VM on the Windows fleet. You can the time it takes to provision a VM on the Windows fleet. You can
follow along in this follow along in the [related issue](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/autoscaler/issues/32).
[issue](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/autoscaler/issues/32).
- The Windows Shared Runner fleet may be unavailable occasionally - The Windows Shared Runner fleet may be unavailable occasionally
for maintenance or updates. for maintenance or updates.
- The Windows Shared Runner virtual machine instances do not use the - The Windows Shared Runner virtual machine instances do not use the
GitLab Docker executor. This means that unlike the Linux Shared GitLab Docker executor. This means that you will not be able to specify
Runners, you will not be able to specify `image` and `services` in [`image`](../../ci/yaml/README.md#image) or [`services`](../../ci/yaml/README.md#services) in
your pipeline configuration. your pipeline configuration.
- For the beta release, we have included a set of software packages in - For the beta release, we have included a set of software packages in
the base VM image. If your CI job requires additional software that's the base VM image. If your CI job requires additional software that's
......
...@@ -130,7 +130,7 @@ Once selected, click the **Delete selected** button to confirm the deletion: ...@@ -130,7 +130,7 @@ Once selected, click the **Delete selected** button to confirm the deletion:
![Delete multiple designs](img/delete_multiple_designs_v12_4.png) ![Delete multiple designs](img/delete_multiple_designs_v12_4.png)
NOTE: **Note:** **Note:**
Only the latest version of the designs can be deleted. Only the latest version of the designs can be deleted.
Deleted designs are not permanently lost; they can be Deleted designs are not permanently lost; they can be
viewed by browsing previous versions. viewed by browsing previous versions.
...@@ -144,6 +144,9 @@ which you can start a new discussion: ...@@ -144,6 +144,9 @@ which you can start a new discussion:
![Starting a new discussion on design](img/adding_note_to_design_1.png) ![Starting a new discussion on design](img/adding_note_to_design_1.png)
From GitLab 12.8 on, when you are starting a new discussion, you can adjust the badge's position by
dragging it around the image.
Different discussions have different badge numbers: Different discussions have different badge numbers:
![Discussions on design annotations](img/adding_note_to_design_2.png) ![Discussions on design annotations](img/adding_note_to_design_2.png)
......
...@@ -243,39 +243,6 @@ module API ...@@ -243,39 +243,6 @@ module API
expose :startline expose :startline
expose :project_id expose :project_id
end end
module Platform
class Kubernetes < Grape::Entity
expose :api_url
expose :namespace
expose :authorization_type
expose :ca_cert
end
end
module Provider
class Gcp < Grape::Entity
expose :cluster_id
expose :status_name
expose :gcp_project_id
expose :zone
expose :machine_type
expose :num_nodes
expose :endpoint
end
end
module InternalPostReceive
class Message < Grape::Entity
expose :message
expose :type
end
class Response < Grape::Entity
expose :messages, using: Message
expose :reference_counter_decreased
end
end
end end
end end
......
# frozen_string_literal: true
module API
module Entities
module InternalPostReceive
class Message < Grape::Entity
expose :message
expose :type
end
end
end
end
# frozen_string_literal: true
module API
module Entities
module InternalPostReceive
class Response < Grape::Entity
expose :messages, using: Entities::InternalPostReceive::Message
expose :reference_counter_decreased
end
end
end
end
# frozen_string_literal: true
module API
module Entities
module Platform
class Kubernetes < Grape::Entity
expose :api_url
expose :namespace
expose :authorization_type
expose :ca_cert
end
end
end
end
# frozen_string_literal: true
module API
module Entities
module Provider
class Gcp < Grape::Entity
expose :cluster_id
expose :status_name
expose :gcp_project_id
expose :zone
expose :machine_type
expose :num_nodes
expose :endpoint
end
end
end
end
...@@ -18,17 +18,39 @@ module Gitlab ...@@ -18,17 +18,39 @@ module Gitlab
result result
end.freeze end.freeze
def worker_queues(rails_path = Rails.root.to_s) QUERY_OR_OPERATOR = '|'
QUERY_AND_OPERATOR = '&'
QUERY_CONCATENATE_OPERATOR = ','
QUERY_TERM_REGEX = %r{^(\w+)(!?=)([\w#{QUERY_CONCATENATE_OPERATOR}]+)}.freeze
QUERY_PREDICATES = {
feature_category: :to_sym,
has_external_dependencies: lambda { |value| value == 'true' },
latency_sensitive: lambda { |value| value == 'true' },
name: :to_s,
resource_boundary: :to_sym
}.freeze
QueryError = Class.new(StandardError)
InvalidTerm = Class.new(QueryError)
UnknownOperator = Class.new(QueryError)
UnknownPredicate = Class.new(QueryError)
def all_queues(rails_path = Rails.root.to_s)
@worker_queues ||= {} @worker_queues ||= {}
@worker_queues[rails_path] ||= QUEUE_CONFIG_PATHS.flat_map do |path| @worker_queues[rails_path] ||= QUEUE_CONFIG_PATHS.flat_map do |path|
full_path = File.join(rails_path, path) full_path = File.join(rails_path, path)
queues = File.exist?(full_path) ? YAML.load_file(full_path) : []
# https://gitlab.com/gitlab-org/gitlab/issues/199230 File.exist?(full_path) ? YAML.load_file(full_path) : []
queues.map { |queue| queue.is_a?(Hash) ? queue[:name] : queue }
end end
end end
# rubocop:enable Gitlab/ModuleWithInstanceVariables
def worker_queues(rails_path = Rails.root.to_s)
# https://gitlab.com/gitlab-org/gitlab/issues/199230
worker_names(all_queues(rails_path))
end
def expand_queues(queues, all_queues = self.worker_queues) def expand_queues(queues, all_queues = self.worker_queues)
return [] if queues.empty? return [] if queues.empty?
...@@ -40,12 +62,64 @@ module Gitlab ...@@ -40,12 +62,64 @@ module Gitlab
end end
end end
def query_workers(query_string, queues)
worker_names(queues.select(&query_string_to_lambda(query_string)))
end
def clear_memoization! def clear_memoization!
if instance_variable_defined?('@worker_queues') if instance_variable_defined?('@worker_queues')
remove_instance_variable('@worker_queues') remove_instance_variable('@worker_queues')
end end
end end
# rubocop:enable Gitlab/ModuleWithInstanceVariables
private
def worker_names(workers)
workers.map { |queue| queue.is_a?(Hash) ? queue[:name] : queue }
end
def query_string_to_lambda(query_string)
or_clauses = query_string.split(QUERY_OR_OPERATOR).map do |and_clauses_string|
and_clauses_predicates = and_clauses_string.split(QUERY_AND_OPERATOR).map do |term|
predicate_for_term(term)
end
lambda { |worker| and_clauses_predicates.all? { |predicate| predicate.call(worker) } }
end
lambda { |worker| or_clauses.any? { |predicate| predicate.call(worker) } }
end
def predicate_for_term(term)
match = term.match(QUERY_TERM_REGEX)
raise InvalidTerm.new("Invalid term: #{term}") unless match
_, lhs, op, rhs = *match
predicate_for_op(op, predicate_factory(lhs, rhs.split(QUERY_CONCATENATE_OPERATOR)))
end
def predicate_for_op(op, predicate)
case op
when '='
predicate
when '!='
lambda { |worker| !predicate.call(worker) }
else
# This is unreachable because InvalidTerm will be raised instead, but
# keeping it allows to guard against that changing in future.
raise UnknownOperator.new("Unknown operator: #{op}")
end
end
def predicate_factory(lhs, values)
values_block = QUERY_PREDICATES[lhs.to_sym]
raise UnknownPredicate.new("Unknown predicate: #{lhs}") unless values_block
lambda { |queue| values.map(&values_block).include?(queue[lhs.to_sym]) }
end
end end
end end
end end
...@@ -1958,6 +1958,9 @@ msgstr "" ...@@ -1958,6 +1958,9 @@ msgstr ""
msgid "Any Milestone" msgid "Any Milestone"
msgstr "" msgstr ""
msgid "Any branch"
msgstr ""
msgid "Any eligible user" msgid "Any eligible user"
msgstr "" msgstr ""
...@@ -2033,6 +2036,9 @@ msgstr "" ...@@ -2033,6 +2036,9 @@ msgstr ""
msgid "Apply template" msgid "Apply template"
msgstr "" msgstr ""
msgid "Apply this approval rule to any branch or a specific protected branch."
msgstr ""
msgid "Applying a template will replace the existing issue description. Any changes you have made will be lost." msgid "Applying a template will replace the existing issue description. Any changes you have made will be lost."
msgstr "" msgstr ""
...@@ -2086,6 +2092,9 @@ msgstr "" ...@@ -2086,6 +2092,9 @@ msgstr ""
msgid "ApprovalRule|Rule name" msgid "ApprovalRule|Rule name"
msgstr "" msgstr ""
msgid "ApprovalRule|Target branch"
msgstr ""
msgid "ApprovalRule|e.g. QA, Security, etc." msgid "ApprovalRule|e.g. QA, Security, etc."
msgstr "" msgstr ""
...@@ -13999,6 +14008,9 @@ msgstr "" ...@@ -13999,6 +14008,9 @@ msgstr ""
msgid "Please select a group." msgid "Please select a group."
msgstr "" msgstr ""
msgid "Please select a valid target branch"
msgstr ""
msgid "Please select and add a member" msgid "Please select and add a member"
msgstr "" msgstr ""
...@@ -19587,6 +19599,12 @@ msgstr "" ...@@ -19587,6 +19599,12 @@ msgstr ""
msgid "This job has not started yet" msgid "This job has not started yet"
msgstr "" msgstr ""
msgid "This job is an out-of-date deployment to %{environmentLink} using cluster %{clusterNameOrLink} and namespace %{kubernetesNamespace}."
msgstr ""
msgid "This job is an out-of-date deployment to %{environmentLink} using cluster %{clusterNameOrLink} and namespace %{kubernetesNamespace}. View the %{deploymentLink}."
msgstr ""
msgid "This job is an out-of-date deployment to %{environmentLink} using cluster %{clusterNameOrLink}." msgid "This job is an out-of-date deployment to %{environmentLink} using cluster %{clusterNameOrLink}."
msgstr "" msgstr ""
...@@ -19602,6 +19620,15 @@ msgstr "" ...@@ -19602,6 +19620,15 @@ msgstr ""
msgid "This job is archived. Only the complete pipeline can be retried." msgid "This job is archived. Only the complete pipeline can be retried."
msgstr "" msgstr ""
msgid "This job is creating a deployment to %{environmentLink} using cluster %{clusterNameOrLink} and namespace %{kubernetesNamespace}."
msgstr ""
msgid "This job is creating a deployment to %{environmentLink} using cluster %{clusterNameOrLink} and namespace %{kubernetesNamespace}. This will overwrite the %{deploymentLink}."
msgstr ""
msgid "This job is creating a deployment to %{environmentLink} using cluster %{clusterNameOrLink}."
msgstr ""
msgid "This job is creating a deployment to %{environmentLink} using cluster %{clusterNameOrLink}. This will overwrite the %{deploymentLink}." msgid "This job is creating a deployment to %{environmentLink} using cluster %{clusterNameOrLink}. This will overwrite the %{deploymentLink}."
msgstr "" msgstr ""
...@@ -19611,6 +19638,9 @@ msgstr "" ...@@ -19611,6 +19638,9 @@ msgstr ""
msgid "This job is creating a deployment to %{environmentLink}. This will overwrite the %{deploymentLink}." msgid "This job is creating a deployment to %{environmentLink}. This will overwrite the %{deploymentLink}."
msgstr "" msgstr ""
msgid "This job is deployed to %{environmentLink} using cluster %{clusterNameOrLink} and namespace %{kubernetesNamespace}."
msgstr ""
msgid "This job is deployed to %{environmentLink} using cluster %{clusterNameOrLink}." msgid "This job is deployed to %{environmentLink} using cluster %{clusterNameOrLink}."
msgstr "" msgstr ""
......
# frozen_string_literal: true
FactoryBot.define do
factory :deployment_cluster, class: 'DeploymentCluster' do
cluster
deployment
kubernetes_namespace { 'the-namespace' }
end
end
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
"cluster": { "cluster": {
"oneOf": [ "oneOf": [
{ "type": "null" }, { "type": "null" },
{ "$ref": "cluster_basic.json" } { "$ref": "deployment_cluster.json" }
] ]
}, },
"manual_actions": { "manual_actions": {
......
...@@ -10,6 +10,12 @@ ...@@ -10,6 +10,12 @@
{ "type": "null" }, { "type": "null" },
{ "type": "string" } { "type": "string" }
] ]
},
"kubernetes_namespace": {
"oneOf": [
{ "type": "null" },
{ "type": "string" }
]
} }
}, },
"additionalProperties": false "additionalProperties": false
......
...@@ -15,6 +15,12 @@ ...@@ -15,6 +15,12 @@
"terminal_path": { "type": "string" }, "terminal_path": { "type": "string" },
"trigger": { "$ref": "trigger.json" }, "trigger": { "$ref": "trigger.json" },
"deployment_status": { "$ref": "deployment_status.json" }, "deployment_status": { "$ref": "deployment_status.json" },
"deployment_cluster": {
"oneOf": [
{ "$ref": "../deployment_cluster.json" },
{ "type": "null" }
]
},
"runner": { "$ref": "runner.json" }, "runner": { "$ref": "runner.json" },
"runners": { "$ref": "runners.json" }, "runners": { "$ref": "runners.json" },
"has_trace": { "type": "boolean" }, "has_trace": { "type": "boolean" },
......
...@@ -4,7 +4,7 @@ import $ from 'jquery'; ...@@ -4,7 +4,7 @@ import $ from 'jquery';
import GfmAutoComplete, { membersBeforeSave } from 'ee_else_ce/gfm_auto_complete'; import GfmAutoComplete, { membersBeforeSave } from 'ee_else_ce/gfm_auto_complete';
import 'jquery.caret'; import 'jquery.caret';
import 'at.js'; import '@gitlab/at.js';
import { TEST_HOST } from 'helpers/test_constants'; import { TEST_HOST } from 'helpers/test_constants';
import { getJSONFixture } from 'helpers/fixtures'; import { getJSONFixture } from 'helpers/fixtures';
......
import _ from 'underscore'; import { uniqueId } from 'lodash';
import { DUMMY_IMAGE_URL, TEST_HOST } from 'spec/test_constants'; import { DUMMY_IMAGE_URL, TEST_HOST } from 'spec/test_constants';
import { PROJECT_BADGE } from '~/badges/constants'; import { PROJECT_BADGE } from '~/badges/constants';
export const createDummyBadge = () => { export const createDummyBadge = () => {
const id = _.uniqueId(); const id = uniqueId();
return { return {
id, id,
name: 'TestBadge', name: 'TestBadge',
......
...@@ -4,6 +4,7 @@ import mountComponent from '../../helpers/vue_mount_component_helper'; ...@@ -4,6 +4,7 @@ import mountComponent from '../../helpers/vue_mount_component_helper';
const TEST_CLUSTER_NAME = 'test_cluster'; const TEST_CLUSTER_NAME = 'test_cluster';
const TEST_CLUSTER_PATH = 'path/to/test_cluster'; const TEST_CLUSTER_PATH = 'path/to/test_cluster';
const TEST_KUBERNETES_NAMESPACE = 'this-is-a-kubernetes-namespace';
describe('Environments block', () => { describe('Environments block', () => {
const Component = Vue.extend(component); const Component = Vue.extend(component);
...@@ -28,17 +29,18 @@ describe('Environments block', () => { ...@@ -28,17 +29,18 @@ describe('Environments block', () => {
last_deployment: { ...lastDeployment }, last_deployment: { ...lastDeployment },
}); });
const createEnvironmentWithCluster = () => ({ const createDeploymentWithCluster = () => ({ name: TEST_CLUSTER_NAME, path: TEST_CLUSTER_PATH });
...environment,
last_deployment: { const createDeploymentWithClusterAndKubernetesNamespace = () => ({
...lastDeployment, name: TEST_CLUSTER_NAME,
cluster: { name: TEST_CLUSTER_NAME, path: TEST_CLUSTER_PATH }, path: TEST_CLUSTER_PATH,
}, kubernetes_namespace: TEST_KUBERNETES_NAMESPACE,
}); });
const createComponent = (deploymentStatus = {}) => { const createComponent = (deploymentStatus = {}, deploymentCluster = {}) => {
vm = mountComponent(Component, { vm = mountComponent(Component, {
deploymentStatus, deploymentStatus,
deploymentCluster,
iconStatus: status, iconStatus: status,
}); });
}; };
...@@ -62,15 +64,36 @@ describe('Environments block', () => { ...@@ -62,15 +64,36 @@ describe('Environments block', () => {
expect(findText()).toEqual('This job is deployed to environment.'); expect(findText()).toEqual('This job is deployed to environment.');
}); });
it('renders info with cluster', () => { describe('when there is a cluster', () => {
createComponent({ it('renders info with cluster', () => {
status: 'last', createComponent(
environment: createEnvironmentWithCluster(), {
status: 'last',
environment: createEnvironmentWithLastDeployment(),
},
createDeploymentWithCluster(),
);
expect(findText()).toEqual(
`This job is deployed to environment using cluster ${TEST_CLUSTER_NAME}.`,
);
}); });
expect(findText()).toEqual( describe('when there is a kubernetes namespace', () => {
`This job is deployed to environment using cluster ${TEST_CLUSTER_NAME}.`, it('renders info with cluster', () => {
); createComponent(
{
status: 'last',
environment: createEnvironmentWithLastDeployment(),
},
createDeploymentWithClusterAndKubernetesNamespace(),
);
expect(findText()).toEqual(
`This job is deployed to environment using cluster ${TEST_CLUSTER_NAME} and namespace ${TEST_KUBERNETES_NAMESPACE}.`,
);
});
});
}); });
}); });
...@@ -89,15 +112,36 @@ describe('Environments block', () => { ...@@ -89,15 +112,36 @@ describe('Environments block', () => {
expect(findJobDeploymentLink().getAttribute('href')).toEqual('bar'); expect(findJobDeploymentLink().getAttribute('href')).toEqual('bar');
}); });
it('renders info with cluster', () => { describe('when there is a cluster', () => {
createComponent({ it('renders info with cluster', () => {
status: 'out_of_date', createComponent(
environment: createEnvironmentWithCluster(), {
status: 'out_of_date',
environment: createEnvironmentWithLastDeployment(),
},
createDeploymentWithCluster(),
);
expect(findText()).toEqual(
`This job is an out-of-date deployment to environment using cluster ${TEST_CLUSTER_NAME}. View the most recent deployment.`,
);
}); });
expect(findText()).toEqual( describe('when there is a kubernetes namespace', () => {
`This job is an out-of-date deployment to environment using cluster ${TEST_CLUSTER_NAME}. View the most recent deployment.`, it('renders info with cluster', () => {
); createComponent(
{
status: 'out_of_date',
environment: createEnvironmentWithLastDeployment(),
},
createDeploymentWithClusterAndKubernetesNamespace(),
);
expect(findText()).toEqual(
`This job is an out-of-date deployment to environment using cluster ${TEST_CLUSTER_NAME} and namespace ${TEST_KUBERNETES_NAMESPACE}. View the most recent deployment.`,
);
});
});
}); });
}); });
...@@ -143,7 +187,7 @@ describe('Environments block', () => { ...@@ -143,7 +187,7 @@ describe('Environments block', () => {
}); });
describe('without last deployment', () => { describe('without last deployment', () => {
it('renders info about failed deployment', () => { it('renders info about deployment being created', () => {
createComponent({ createComponent({
status: 'creating', status: 'creating',
environment, environment,
...@@ -151,6 +195,22 @@ describe('Environments block', () => { ...@@ -151,6 +195,22 @@ describe('Environments block', () => {
expect(findText()).toEqual('This job is creating a deployment to environment.'); expect(findText()).toEqual('This job is creating a deployment to environment.');
}); });
describe('when there is a cluster', () => {
it('inclues information about the cluster', () => {
createComponent(
{
status: 'creating',
environment,
},
createDeploymentWithCluster(),
);
expect(findText()).toEqual(
`This job is creating a deployment to environment using cluster ${TEST_CLUSTER_NAME}.`,
);
});
});
}); });
describe('without environment', () => { describe('without environment', () => {
...@@ -167,10 +227,13 @@ describe('Environments block', () => { ...@@ -167,10 +227,13 @@ describe('Environments block', () => {
describe('with a cluster', () => { describe('with a cluster', () => {
it('renders the cluster link', () => { it('renders the cluster link', () => {
createComponent({ createComponent(
status: 'last', {
environment: createEnvironmentWithCluster(), status: 'last',
}); environment: createEnvironmentWithLastDeployment(),
},
createDeploymentWithCluster(),
);
expect(findText()).toEqual( expect(findText()).toEqual(
`This job is deployed to environment using cluster ${TEST_CLUSTER_NAME}.`, `This job is deployed to environment using cluster ${TEST_CLUSTER_NAME}.`,
...@@ -181,18 +244,13 @@ describe('Environments block', () => { ...@@ -181,18 +244,13 @@ describe('Environments block', () => {
describe('when the cluster is missing the path', () => { describe('when the cluster is missing the path', () => {
it('renders the name without a link', () => { it('renders the name without a link', () => {
const cluster = { createComponent(
name: 'the-cluster', {
}; status: 'last',
createComponent({ environment: createEnvironmentWithLastDeployment(),
status: 'last', },
environment: Object.assign({}, environment, { { name: 'the-cluster' },
last_deployment: { );
...lastDeployment,
cluster,
},
}),
});
expect(findText()).toContain('using cluster the-cluster.'); expect(findText()).toContain('using cluster the-cluster.');
......
# frozen_string_literal: true # frozen_string_literal: true
require 'fast_spec_helper' require 'fast_spec_helper'
require 'rspec-parameterized'
describe Gitlab::SidekiqConfig::CliMethods do describe Gitlab::SidekiqConfig::CliMethods do
let(:dummy_root) { '/tmp/' } let(:dummy_root) { '/tmp/' }
...@@ -82,7 +83,7 @@ describe Gitlab::SidekiqConfig::CliMethods do ...@@ -82,7 +83,7 @@ describe Gitlab::SidekiqConfig::CliMethods do
end end
describe '.expand_queues' do describe '.expand_queues' do
let(:all_queues) do let(:worker_queues) do
['cronjob:stuck_import_jobs', 'cronjob:stuck_merge_jobs', 'post_receive'] ['cronjob:stuck_import_jobs', 'cronjob:stuck_merge_jobs', 'post_receive']
end end
...@@ -92,25 +93,125 @@ describe Gitlab::SidekiqConfig::CliMethods do ...@@ -92,25 +93,125 @@ describe Gitlab::SidekiqConfig::CliMethods do
expect(described_class.expand_queues(['cronjob'])) expect(described_class.expand_queues(['cronjob']))
.to contain_exactly('cronjob') .to contain_exactly('cronjob')
allow(described_class).to receive(:worker_queues).and_return(all_queues) allow(described_class).to receive(:worker_queues).and_return(worker_queues)
expect(described_class.expand_queues(['cronjob'])) expect(described_class.expand_queues(['cronjob']))
.to contain_exactly('cronjob', 'cronjob:stuck_import_jobs', 'cronjob:stuck_merge_jobs') .to contain_exactly('cronjob', 'cronjob:stuck_import_jobs', 'cronjob:stuck_merge_jobs')
end end
it 'expands queue namespaces to concrete queue names' do it 'expands queue namespaces to concrete queue names' do
expect(described_class.expand_queues(['cronjob'], all_queues)) expect(described_class.expand_queues(['cronjob'], worker_queues))
.to contain_exactly('cronjob', 'cronjob:stuck_import_jobs', 'cronjob:stuck_merge_jobs') .to contain_exactly('cronjob', 'cronjob:stuck_import_jobs', 'cronjob:stuck_merge_jobs')
end end
it 'lets concrete queue names pass through' do it 'lets concrete queue names pass through' do
expect(described_class.expand_queues(['post_receive'], all_queues)) expect(described_class.expand_queues(['post_receive'], worker_queues))
.to contain_exactly('post_receive') .to contain_exactly('post_receive')
end end
it 'lets unknown queues pass through' do it 'lets unknown queues pass through' do
expect(described_class.expand_queues(['unknown'], all_queues)) expect(described_class.expand_queues(['unknown'], worker_queues))
.to contain_exactly('unknown') .to contain_exactly('unknown')
end end
end end
describe '.query_workers' do
using RSpec::Parameterized::TableSyntax
let(:queues) do
[
{
name: 'a',
feature_category: :category_a,
has_external_dependencies: false,
latency_sensitive: false,
resource_boundary: :cpu
},
{
name: 'a_2',
feature_category: :category_a,
has_external_dependencies: false,
latency_sensitive: true,
resource_boundary: :none
},
{
name: 'b',
feature_category: :category_b,
has_external_dependencies: true,
latency_sensitive: true,
resource_boundary: :memory
},
{
name: 'c',
feature_category: :category_c,
has_external_dependencies: false,
latency_sensitive: false,
resource_boundary: :memory
}
]
end
context 'with valid input' do
where(:query, :selected_queues) do
# feature_category
'feature_category=category_a' | %w(a a_2)
'feature_category=category_a,category_c' | %w(a a_2 c)
'feature_category=category_a|feature_category=category_c' | %w(a a_2 c)
'feature_category!=category_a' | %w(b c)
# has_external_dependencies
'has_external_dependencies=true' | %w(b)
'has_external_dependencies=false' | %w(a a_2 c)
'has_external_dependencies=true,false' | %w(a a_2 b c)
'has_external_dependencies=true|has_external_dependencies=false' | %w(a a_2 b c)
'has_external_dependencies!=true' | %w(a a_2 c)
# latency_sensitive
'latency_sensitive=true' | %w(a_2 b)
'latency_sensitive=false' | %w(a c)
'latency_sensitive=true,false' | %w(a a_2 b c)
'latency_sensitive=true|latency_sensitive=false' | %w(a a_2 b c)
'latency_sensitive!=true' | %w(a c)
# name
'name=a' | %w(a)
'name=a,b' | %w(a b)
'name=a,a_2|name=b' | %w(a a_2 b)
'name!=a,a_2' | %w(b c)
# resource_boundary
'resource_boundary=memory' | %w(b c)
'resource_boundary=memory,cpu' | %w(a b c)
'resource_boundary=memory|resource_boundary=cpu' | %w(a b c)
'resource_boundary!=memory,cpu' | %w(a_2)
# combinations
'feature_category=category_a&latency_sensitive=true' | %w(a_2)
'feature_category=category_a&latency_sensitive=true|feature_category=category_c' | %w(a_2 c)
end
with_them do
it do
expect(described_class.query_workers(query, queues))
.to match_array(selected_queues)
end
end
end
context 'with invalid input' do
where(:query, :error) do
'feature_category="category_a"' | described_class::InvalidTerm
'feature_category=' | described_class::InvalidTerm
'feature_category~category_a' | described_class::InvalidTerm
'worker_name=a' | described_class::UnknownPredicate
end
with_them do
it do
expect { described_class.query_workers(query, queues) }
.to raise_error(error)
end
end
end
end
end end
...@@ -18,6 +18,7 @@ describe Deployment do ...@@ -18,6 +18,7 @@ describe Deployment do
it { is_expected.to delegate_method(:commit).to(:project) } it { is_expected.to delegate_method(:commit).to(:project) }
it { is_expected.to delegate_method(:commit_title).to(:commit).as(:try) } it { is_expected.to delegate_method(:commit_title).to(:commit).as(:try) }
it { is_expected.to delegate_method(:manual_actions).to(:deployable).as(:try) } it { is_expected.to delegate_method(:manual_actions).to(:deployable).as(:try) }
it { is_expected.to delegate_method(:kubernetes_namespace).to(:deployment_cluster).as(:kubernetes_namespace) }
it { is_expected.to validate_presence_of(:ref) } it { is_expected.to validate_presence_of(:ref) }
it { is_expected.to validate_presence_of(:sha) } it { is_expected.to validate_presence_of(:sha) }
......
...@@ -2,9 +2,9 @@ ...@@ -2,9 +2,9 @@
require 'spec_helper' require 'spec_helper'
describe ClusterBasicEntity do describe DeploymentClusterEntity do
describe '#as_json' do describe '#as_json' do
subject { described_class.new(cluster, request: request).as_json } subject { described_class.new(deployment, request: request).as_json }
let(:maintainer) { create(:user) } let(:maintainer) { create(:user) }
let(:developer) { create(:user) } let(:developer) { create(:user) }
...@@ -12,26 +12,30 @@ describe ClusterBasicEntity do ...@@ -12,26 +12,30 @@ describe ClusterBasicEntity do
let(:request) { double(:request, current_user: current_user) } let(:request) { double(:request, current_user: current_user) }
let(:project) { create(:project) } let(:project) { create(:project) }
let(:cluster) { create(:cluster, name: 'the-cluster', projects: [project]) } let(:cluster) { create(:cluster, name: 'the-cluster', projects: [project]) }
let(:deployment) { create(:deployment, cluster: cluster) }
let!(:deployment_cluster) { create(:deployment_cluster, cluster: cluster, deployment: deployment) }
before do before do
project.add_maintainer(maintainer) project.add_maintainer(maintainer)
project.add_developer(developer) project.add_developer(developer)
end end
it 'matches cluster_basic entity schema' do it 'matches deployment_cluster entity schema' do
expect(subject.as_json).to match_schema('cluster_basic') expect(subject.as_json).to match_schema('deployment_cluster')
end end
it 'exposes the cluster details' do it 'exposes the cluster details' do
expect(subject[:name]).to eq('the-cluster') expect(subject[:name]).to eq('the-cluster')
expect(subject[:path]).to eq("/#{project.full_path}/-/clusters/#{cluster.id}") expect(subject[:path]).to eq("/#{project.full_path}/-/clusters/#{cluster.id}")
expect(subject[:kubernetes_namespace]).to eq(deployment_cluster.kubernetes_namespace)
end end
context 'when the user does not have permission to view the cluster' do context 'when the user does not have permission to view the cluster' do
let(:current_user) { developer } let(:current_user) { developer }
it 'does not include the path' do it 'does not include the path nor the namespace' do
expect(subject[:path]).to be_nil expect(subject[:path]).to be_nil
expect(subject[:kubernetes_namespace]).to be_nil
end end
end end
end end
......
...@@ -705,6 +705,11 @@ ...@@ -705,6 +705,11 @@
exec-sh "^0.3.2" exec-sh "^0.3.2"
minimist "^1.2.0" minimist "^1.2.0"
"@gitlab/at.js@^1.5.5":
version "1.5.5"
resolved "https://registry.yarnpkg.com/@gitlab/at.js/-/at.js-1.5.5.tgz#5f6bfe6baaef360daa9b038fa78798d7a6a916b4"
integrity sha512-282Dn3SPVsUHVDhMsXgfnv+Rzog0uxecjttxGRQvxh25es1+xvkGQFsvJfkSKJ3X1kHVkSjKf+Tt5Rra+Jhp9g==
"@gitlab/eslint-config@^2.1.2": "@gitlab/eslint-config@^2.1.2":
version "2.1.2" version "2.1.2"
resolved "https://registry.yarnpkg.com/@gitlab/eslint-config/-/eslint-config-2.1.2.tgz#9f4011d3bf15f3e2668a1faa754f0b9804f23f8f" resolved "https://registry.yarnpkg.com/@gitlab/eslint-config/-/eslint-config-2.1.2.tgz#9f4011d3bf15f3e2668a1faa754f0b9804f23f8f"
...@@ -740,10 +745,10 @@ ...@@ -740,10 +745,10 @@
resolved "https://registry.yarnpkg.com/@gitlab/svgs/-/svgs-1.96.0.tgz#1d32730389e94358dc245e8336912523446d1269" resolved "https://registry.yarnpkg.com/@gitlab/svgs/-/svgs-1.96.0.tgz#1d32730389e94358dc245e8336912523446d1269"
integrity sha512-mhg6kndxDhwjWChKhs5utO6PowlOyFdaCXUrkkxxe2H3cd8DYa40QOEcJeUrSIhkmgIMVesUawesx5tt4Bnnnw== integrity sha512-mhg6kndxDhwjWChKhs5utO6PowlOyFdaCXUrkkxxe2H3cd8DYa40QOEcJeUrSIhkmgIMVesUawesx5tt4Bnnnw==
"@gitlab/ui@^9.6.0": "@gitlab/ui@^9.8.0":
version "9.6.0" version "9.8.0"
resolved "https://registry.yarnpkg.com/@gitlab/ui/-/ui-9.6.0.tgz#13119a56a34be34fd07e761cab0af3c00462159d" resolved "https://registry.yarnpkg.com/@gitlab/ui/-/ui-9.8.0.tgz#b1a0b5f1f6ac9fdb19b64d74f0f729e3ec182495"
integrity sha512-R0pUa30l/JX/+1K/rZGAjDvCLLoQuodwCxBNzQ5U1ylnnfGclVrM2rBlZT3UlWnMkb9BRhTPn6uoC/HBOAo37g== integrity sha512-0VjSTjCCtevdoeByxf5o/OimzV3zt1MMH5DlZSqakML38uoOM0WpgXI/4xAipzfYwiKUW+IWbuyZGJ3ucaJnhQ==
dependencies: dependencies:
"@babel/standalone" "^7.0.0" "@babel/standalone" "^7.0.0"
"@gitlab/vue-toasted" "^1.3.0" "@gitlab/vue-toasted" "^1.3.0"
...@@ -1774,11 +1779,6 @@ asynckit@^0.4.0: ...@@ -1774,11 +1779,6 @@ asynckit@^0.4.0:
resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
integrity sha1-x57Zf380y48robyXkLzDZkdLS3k= integrity sha1-x57Zf380y48robyXkLzDZkdLS3k=
at.js@^1.5.4:
version "1.5.4"
resolved "https://registry.yarnpkg.com/at.js/-/at.js-1.5.4.tgz#8fc60cc80eadbe4874449b166a818e7ae1d784c1"
integrity sha512-G8mgUb/PqShPoH8AyjuxsTGvIr1o716BtQUKDM44C8qN2W615y7KGJ68MlTGamd0J0D/m28emUkzagaHTdrGZw==
atob@^2.1.1: atob@^2.1.1:
version "2.1.2" version "2.1.2"
resolved "https://registry.yarnpkg.com/atob/-/atob-2.1.2.tgz#6d9517eb9e030d2436666651e86bd9f6f13533c9" resolved "https://registry.yarnpkg.com/atob/-/atob-2.1.2.tgz#6d9517eb9e030d2436666651e86bd9f6f13533c9"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment