Commit 3290d466 authored by GitLab Bot's avatar GitLab Bot

Add latest changes from gitlab-org/gitlab@master

parent c6b3ec3f
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
policy: pull policy: pull
.use-pg9: .use-pg9:
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.5-golang-1.14-git-2.24-lfs-2.9-chrome-73.0-node-12.x-yarn-1.21-postgresql-9.6-graphicsmagick-1.3.34" image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.5-golang-1.14-git-2.26-lfs-2.9-chrome-73.0-node-12.x-yarn-1.21-postgresql-9.6-graphicsmagick-1.3.34"
services: services:
- name: postgres:9.6.17 - name: postgres:9.6.17
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"] command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
key: "debian-stretch-ruby-2.6.5-pg11-node-12.x" key: "debian-stretch-ruby-2.6.5-pg11-node-12.x"
.use-pg9-ee: .use-pg9-ee:
image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.5-golang-1.14-git-2.24-lfs-2.9-chrome-73.0-node-12.x-yarn-1.21-postgresql-9.6-graphicsmagick-1.3.34" image: "registry.gitlab.com/gitlab-org/gitlab-build-images:ruby-2.6.5-golang-1.14-git-2.26-lfs-2.9-chrome-73.0-node-12.x-yarn-1.21-postgresql-9.6-graphicsmagick-1.3.34"
services: services:
- name: postgres:9.6.17 - name: postgres:9.6.17
command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"] command: ["postgres", "-c", "fsync=off", "-c", "synchronous_commit=off", "-c", "full_page_writes=off"]
......
...@@ -81,7 +81,7 @@ review-build-cng: ...@@ -81,7 +81,7 @@ review-build-cng:
.review-workflow-base: .review-workflow-base:
extends: extends:
- .default-retry - .default-retry
image: registry.gitlab.com/gitlab-org/gitlab-build-images:gitlab-charts-build-base image: registry.gitlab.com/gitlab-org/gitlab-build-images:gitlab-helm3-kubectl1.14
variables: variables:
HOST_SUFFIX: "${CI_ENVIRONMENT_SLUG}" HOST_SUFFIX: "${CI_ENVIRONMENT_SLUG}"
DOMAIN: "-${CI_ENVIRONMENT_SLUG}.${REVIEW_APPS_DOMAIN}" DOMAIN: "-${CI_ENVIRONMENT_SLUG}.${REVIEW_APPS_DOMAIN}"
...@@ -113,7 +113,6 @@ review-deploy: ...@@ -113,7 +113,6 @@ review-deploy:
script: script:
- check_kube_domain - check_kube_domain
- ensure_namespace - ensure_namespace
- install_tiller
- install_external_dns - install_external_dns
- download_chart - download_chart
- date - date
...@@ -149,6 +148,7 @@ review-stop-failed-deployment: ...@@ -149,6 +148,7 @@ review-stop-failed-deployment:
stage: prepare stage: prepare
script: script:
- delete_failed_release - delete_failed_release
- delete_helm2_release
review-stop: review-stop:
extends: extends:
...@@ -210,8 +210,11 @@ review-qa-all: ...@@ -210,8 +210,11 @@ review-qa-all:
review-performance: review-performance:
extends: extends:
- .review-docker - .default-retry
- .review:rules:mr-and-schedule-auto-if-frontend-manual-otherwise - .review:rules:mr-and-schedule-auto-if-frontend-manual-otherwise
image:
name: sitespeedio/sitespeed.io:6.3.1
entrypoint: [""]
stage: qa stage: qa
# This is needed so that manual jobs with needs don't block the pipeline. # This is needed so that manual jobs with needs don't block the pipeline.
# See https://gitlab.com/gitlab-org/gitlab/-/issues/199979. # See https://gitlab.com/gitlab-org/gitlab/-/issues/199979.
...@@ -224,7 +227,7 @@ review-performance: ...@@ -224,7 +227,7 @@ review-performance:
- wget -O ./gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/master/index.js - wget -O ./gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/master/index.js
- mkdir -p sitespeed-results - mkdir -p sitespeed-results
script: script:
- docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.3.1 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results "${CI_ENVIRONMENT_URL}" - /start.sh --plugins.add ./gitlab-exporter --outputFolder sitespeed-results "${CI_ENVIRONMENT_URL}"
after_script: after_script:
- mv sitespeed-results/data/performance.json performance.json - mv sitespeed-results/data/performance.json performance.json
artifacts: artifacts:
......
...@@ -12,6 +12,20 @@ import { s__, sprintf } from '../../locale'; ...@@ -12,6 +12,20 @@ import { s__, sprintf } from '../../locale';
import { PROMETHEUS_TIMEOUT } from '../constants'; import { PROMETHEUS_TIMEOUT } from '../constants';
function prometheusMetricQueryParams(timeRange) {
const { start, end } = convertToFixedRange(timeRange);
const timeDiff = (new Date(end) - new Date(start)) / 1000;
const minStep = 60;
const queryDataPoints = 600;
return {
start_time: start,
end_time: end,
step: Math.max(minStep, Math.ceil(timeDiff / queryDataPoints)),
};
}
function backOffRequest(makeRequestCallback) { function backOffRequest(makeRequestCallback) {
return backOff((next, stop) => { return backOff((next, stop) => {
makeRequestCallback() makeRequestCallback()
...@@ -26,6 +40,20 @@ function backOffRequest(makeRequestCallback) { ...@@ -26,6 +40,20 @@ function backOffRequest(makeRequestCallback) {
}, PROMETHEUS_TIMEOUT); }, PROMETHEUS_TIMEOUT);
} }
function getPrometheusMetricResult(prometheusEndpoint, params) {
return backOffRequest(() => axios.get(prometheusEndpoint, { params }))
.then(res => res.data)
.then(response => {
if (response.status === 'error') {
throw new Error(response.error);
}
return response.data.result;
});
}
// Setup
export const setGettingStartedEmptyState = ({ commit }) => { export const setGettingStartedEmptyState = ({ commit }) => {
commit(types.SET_GETTING_STARTED_EMPTY_STATE); commit(types.SET_GETTING_STARTED_EMPTY_STATE);
}; };
...@@ -47,56 +75,26 @@ export const setShowErrorBanner = ({ commit }, enabled) => { ...@@ -47,56 +75,26 @@ export const setShowErrorBanner = ({ commit }, enabled) => {
commit(types.SET_SHOW_ERROR_BANNER, enabled); commit(types.SET_SHOW_ERROR_BANNER, enabled);
}; };
export const requestMetricsDashboard = ({ commit }) => { // All Data
commit(types.REQUEST_METRICS_DATA);
};
export const receiveMetricsDashboardSuccess = ({ commit, dispatch }, { response, params }) => {
const { all_dashboards, dashboard, metrics_data } = response;
commit(types.SET_ALL_DASHBOARDS, all_dashboards);
commit(types.RECEIVE_METRICS_DATA_SUCCESS, dashboard);
commit(types.SET_ENDPOINTS, convertObjectPropsToCamelCase(metrics_data));
return dispatch('fetchPrometheusMetrics', params);
};
export const receiveMetricsDashboardFailure = ({ commit }, error) => {
commit(types.RECEIVE_METRICS_DATA_FAILURE, error);
};
export const receiveDeploymentsDataSuccess = ({ commit }, data) =>
commit(types.RECEIVE_DEPLOYMENTS_DATA_SUCCESS, data);
export const receiveDeploymentsDataFailure = ({ commit }) =>
commit(types.RECEIVE_DEPLOYMENTS_DATA_FAILURE);
export const requestEnvironmentsData = ({ commit }) => commit(types.REQUEST_ENVIRONMENTS_DATA);
export const receiveEnvironmentsDataSuccess = ({ commit }, data) =>
commit(types.RECEIVE_ENVIRONMENTS_DATA_SUCCESS, data);
export const receiveEnvironmentsDataFailure = ({ commit }) =>
commit(types.RECEIVE_ENVIRONMENTS_DATA_FAILURE);
export const fetchData = ({ dispatch }) => { export const fetchData = ({ dispatch }) => {
dispatch('fetchDashboard');
dispatch('fetchDeploymentsData');
dispatch('fetchEnvironmentsData'); dispatch('fetchEnvironmentsData');
dispatch('fetchDashboard');
}; };
// Metrics dashboard
export const fetchDashboard = ({ state, commit, dispatch }) => { export const fetchDashboard = ({ state, commit, dispatch }) => {
dispatch('requestMetricsDashboard'); dispatch('requestMetricsDashboard');
const params = {}; const params = {};
if (state.timeRange) {
const { start, end } = convertToFixedRange(state.timeRange);
params.start_time = start;
params.end_time = end;
}
if (state.currentDashboard) { if (state.currentDashboard) {
params.dashboard = state.currentDashboard; params.dashboard = state.currentDashboard;
} }
return backOffRequest(() => axios.get(state.dashboardEndpoint, { params })) return backOffRequest(() => axios.get(state.dashboardEndpoint, { params }))
.then(resp => resp.data) .then(resp => resp.data)
.then(response => dispatch('receiveMetricsDashboardSuccess', { response, params })) .then(response => dispatch('receiveMetricsDashboardSuccess', { response }))
.catch(error => { .catch(error => {
Sentry.captureException(error); Sentry.captureException(error);
...@@ -120,61 +118,43 @@ export const fetchDashboard = ({ state, commit, dispatch }) => { ...@@ -120,61 +118,43 @@ export const fetchDashboard = ({ state, commit, dispatch }) => {
}); });
}; };
function fetchPrometheusResult(prometheusEndpoint, params) { export const requestMetricsDashboard = ({ commit }) => {
return backOffRequest(() => axios.get(prometheusEndpoint, { params })) commit(types.REQUEST_METRICS_DASHBOARD);
.then(res => res.data) };
.then(response => { export const receiveMetricsDashboardSuccess = ({ commit, dispatch }, { response }) => {
if (response.status === 'error') { const { all_dashboards, dashboard, metrics_data } = response;
throw new Error(response.error);
}
return response.data.result;
});
}
/**
* Returns list of metrics in data.result
* {"status":"success", "data":{"resultType":"matrix","result":[]}}
*
* @param {metric} metric
*/
export const fetchPrometheusMetric = ({ commit }, { metric, params }) => {
const { start_time, end_time } = params;
const timeDiff = (new Date(end_time) - new Date(start_time)) / 1000;
const minStep = 60; commit(types.SET_ALL_DASHBOARDS, all_dashboards);
const queryDataPoints = 600; commit(types.RECEIVE_METRICS_DASHBOARD_SUCCESS, dashboard);
const step = metric.step ? metric.step : Math.max(minStep, Math.ceil(timeDiff / queryDataPoints)); commit(types.SET_ENDPOINTS, convertObjectPropsToCamelCase(metrics_data));
const queryParams = { return dispatch('fetchPrometheusMetrics');
start_time, };
end_time, export const receiveMetricsDashboardFailure = ({ commit }, error) => {
step, commit(types.RECEIVE_METRICS_DASHBOARD_FAILURE, error);
}; };
commit(types.REQUEST_METRIC_RESULT, { metricId: metric.metricId }); // Metrics
return fetchPrometheusResult(metric.prometheusEndpointPath, queryParams) /**
.then(result => { * Loads timeseries data: Prometheus data points and deployment data from the project
commit(types.RECEIVE_METRIC_RESULT_SUCCESS, { metricId: metric.metricId, result }); * @param {Object} Vuex store
}) */
.catch(error => { export const fetchPrometheusMetrics = ({ state, dispatch, getters }) => {
Sentry.captureException(error); dispatch('fetchDeploymentsData');
commit(types.RECEIVE_METRIC_RESULT_FAILURE, { metricId: metric.metricId, error }); if (!state.timeRange) {
// Continue to throw error so the dashboard can notify using createFlash createFlash(s__(`Metrics|Invalid time range, please verify.`), 'warning');
throw error; return Promise.reject();
}); }
};
export const fetchPrometheusMetrics = ({ state, commit, dispatch, getters }, params) => { const defaultQueryParams = prometheusMetricQueryParams(state.timeRange);
commit(types.REQUEST_METRICS_DATA);
const promises = []; const promises = [];
state.dashboard.panelGroups.forEach(group => { state.dashboard.panelGroups.forEach(group => {
group.panels.forEach(panel => { group.panels.forEach(panel => {
panel.metrics.forEach(metric => { panel.metrics.forEach(metric => {
promises.push(dispatch('fetchPrometheusMetric', { metric, params })); promises.push(dispatch('fetchPrometheusMetric', { metric, defaultQueryParams }));
}); });
}); });
}); });
...@@ -192,6 +172,35 @@ export const fetchPrometheusMetrics = ({ state, commit, dispatch, getters }, par ...@@ -192,6 +172,35 @@ export const fetchPrometheusMetrics = ({ state, commit, dispatch, getters }, par
}); });
}; };
/**
* Returns list of metrics in data.result
* {"status":"success", "data":{"resultType":"matrix","result":[]}}
*
* @param {metric} metric
*/
export const fetchPrometheusMetric = ({ commit }, { metric, defaultQueryParams }) => {
const queryParams = { ...defaultQueryParams };
if (metric.step) {
queryParams.step = metric.step;
}
commit(types.REQUEST_METRIC_RESULT, { metricId: metric.metricId });
return getPrometheusMetricResult(metric.prometheusEndpointPath, queryParams)
.then(result => {
commit(types.RECEIVE_METRIC_RESULT_SUCCESS, { metricId: metric.metricId, result });
})
.catch(error => {
Sentry.captureException(error);
commit(types.RECEIVE_METRIC_RESULT_FAILURE, { metricId: metric.metricId, error });
// Continue to throw error so the dashboard can notify using createFlash
throw error;
});
};
// Deployments
export const fetchDeploymentsData = ({ state, dispatch }) => { export const fetchDeploymentsData = ({ state, dispatch }) => {
if (!state.deploymentsEndpoint) { if (!state.deploymentsEndpoint) {
return Promise.resolve([]); return Promise.resolve([]);
...@@ -212,6 +221,14 @@ export const fetchDeploymentsData = ({ state, dispatch }) => { ...@@ -212,6 +221,14 @@ export const fetchDeploymentsData = ({ state, dispatch }) => {
createFlash(s__('Metrics|There was an error getting deployment information.')); createFlash(s__('Metrics|There was an error getting deployment information.'));
}); });
}; };
export const receiveDeploymentsDataSuccess = ({ commit }, data) => {
commit(types.RECEIVE_DEPLOYMENTS_DATA_SUCCESS, data);
};
export const receiveDeploymentsDataFailure = ({ commit }) => {
commit(types.RECEIVE_DEPLOYMENTS_DATA_FAILURE);
};
// Environments
export const fetchEnvironmentsData = ({ state, dispatch }) => { export const fetchEnvironmentsData = ({ state, dispatch }) => {
dispatch('requestEnvironmentsData'); dispatch('requestEnvironmentsData');
...@@ -241,6 +258,17 @@ export const fetchEnvironmentsData = ({ state, dispatch }) => { ...@@ -241,6 +258,17 @@ export const fetchEnvironmentsData = ({ state, dispatch }) => {
createFlash(s__('Metrics|There was an error getting environments information.')); createFlash(s__('Metrics|There was an error getting environments information.'));
}); });
}; };
export const requestEnvironmentsData = ({ commit }) => {
commit(types.REQUEST_ENVIRONMENTS_DATA);
};
export const receiveEnvironmentsDataSuccess = ({ commit }, data) => {
commit(types.RECEIVE_ENVIRONMENTS_DATA_SUCCESS, data);
};
export const receiveEnvironmentsDataFailure = ({ commit }) => {
commit(types.RECEIVE_ENVIRONMENTS_DATA_FAILURE);
};
// Dashboard manipulation
/** /**
* Set a new array of metrics to a panel group * Set a new array of metrics to a panel group
......
export const REQUEST_METRICS_DATA = 'REQUEST_METRICS_DATA'; // Dashboard "skeleton", groups, panels and metrics
export const RECEIVE_METRICS_DATA_SUCCESS = 'RECEIVE_METRICS_DATA_SUCCESS'; export const REQUEST_METRICS_DASHBOARD = 'REQUEST_METRICS_DASHBOARD';
export const RECEIVE_METRICS_DATA_FAILURE = 'RECEIVE_METRICS_DATA_FAILURE'; export const RECEIVE_METRICS_DASHBOARD_SUCCESS = 'RECEIVE_METRICS_DASHBOARD_SUCCESS';
export const RECEIVE_METRICS_DASHBOARD_FAILURE = 'RECEIVE_METRICS_DASHBOARD_FAILURE';
// Git project deployments
export const REQUEST_DEPLOYMENTS_DATA = 'REQUEST_DEPLOYMENTS_DATA'; export const REQUEST_DEPLOYMENTS_DATA = 'REQUEST_DEPLOYMENTS_DATA';
export const RECEIVE_DEPLOYMENTS_DATA_SUCCESS = 'RECEIVE_DEPLOYMENTS_DATA_SUCCESS'; export const RECEIVE_DEPLOYMENTS_DATA_SUCCESS = 'RECEIVE_DEPLOYMENTS_DATA_SUCCESS';
export const RECEIVE_DEPLOYMENTS_DATA_FAILURE = 'RECEIVE_DEPLOYMENTS_DATA_FAILURE'; export const RECEIVE_DEPLOYMENTS_DATA_FAILURE = 'RECEIVE_DEPLOYMENTS_DATA_FAILURE';
// Environments
export const REQUEST_ENVIRONMENTS_DATA = 'REQUEST_ENVIRONMENTS_DATA'; export const REQUEST_ENVIRONMENTS_DATA = 'REQUEST_ENVIRONMENTS_DATA';
export const RECEIVE_ENVIRONMENTS_DATA_SUCCESS = 'RECEIVE_ENVIRONMENTS_DATA_SUCCESS'; export const RECEIVE_ENVIRONMENTS_DATA_SUCCESS = 'RECEIVE_ENVIRONMENTS_DATA_SUCCESS';
export const RECEIVE_ENVIRONMENTS_DATA_FAILURE = 'RECEIVE_ENVIRONMENTS_DATA_FAILURE'; export const RECEIVE_ENVIRONMENTS_DATA_FAILURE = 'RECEIVE_ENVIRONMENTS_DATA_FAILURE';
// Metric data points
export const REQUEST_METRIC_RESULT = 'REQUEST_METRIC_RESULT'; export const REQUEST_METRIC_RESULT = 'REQUEST_METRIC_RESULT';
export const RECEIVE_METRIC_RESULT_SUCCESS = 'RECEIVE_METRIC_RESULT_SUCCESS'; export const RECEIVE_METRIC_RESULT_SUCCESS = 'RECEIVE_METRIC_RESULT_SUCCESS';
export const RECEIVE_METRIC_RESULT_FAILURE = 'RECEIVE_METRIC_RESULT_FAILURE'; export const RECEIVE_METRIC_RESULT_FAILURE = 'RECEIVE_METRIC_RESULT_FAILURE';
// Parameters and other information
export const SET_TIME_RANGE = 'SET_TIME_RANGE'; export const SET_TIME_RANGE = 'SET_TIME_RANGE';
export const SET_ALL_DASHBOARDS = 'SET_ALL_DASHBOARDS'; export const SET_ALL_DASHBOARDS = 'SET_ALL_DASHBOARDS';
export const SET_ENDPOINTS = 'SET_ENDPOINTS'; export const SET_ENDPOINTS = 'SET_ENDPOINTS';
......
...@@ -74,18 +74,18 @@ export default { ...@@ -74,18 +74,18 @@ export default {
/** /**
* Dashboard panels structure and global state * Dashboard panels structure and global state
*/ */
[types.REQUEST_METRICS_DATA](state) { [types.REQUEST_METRICS_DASHBOARD](state) {
state.emptyState = 'loading'; state.emptyState = 'loading';
state.showEmptyState = true; state.showEmptyState = true;
}, },
[types.RECEIVE_METRICS_DATA_SUCCESS](state, dashboard) { [types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, dashboard) {
state.dashboard = mapToDashboardViewModel(dashboard); state.dashboard = mapToDashboardViewModel(dashboard);
if (!state.dashboard.panelGroups.length) { if (!state.dashboard.panelGroups.length) {
state.emptyState = 'noData'; state.emptyState = 'noData';
} }
}, },
[types.RECEIVE_METRICS_DATA_FAILURE](state, error) { [types.RECEIVE_METRICS_DASHBOARD_FAILURE](state, error) {
state.emptyState = error ? 'unableToConnect' : 'noData'; state.emptyState = error ? 'unableToConnect' : 'noData';
state.showEmptyState = true; state.showEmptyState = true;
}, },
......
gs
<script> <script>
import { GlIcon } from '@gitlab/ui'; import { GlIcon } from '@gitlab/ui';
import { mapActions, mapGetters } from 'vuex'; import { mapActions, mapGetters } from 'vuex';
import { __ } from '~/locale'; import { __ } from '~/locale';
import LocalStorageSync from '~/vue_shared/components/local_storage_sync.vue';
import Tracking from '~/tracking'; import Tracking from '~/tracking';
import { ASC, DESC } from '../constants'; import { ASC, DESC } from '../constants';
...@@ -14,16 +16,20 @@ export default { ...@@ -14,16 +16,20 @@ export default {
SORT_OPTIONS, SORT_OPTIONS,
components: { components: {
GlIcon, GlIcon,
LocalStorageSync,
}, },
mixins: [Tracking.mixin()], mixins: [Tracking.mixin()],
computed: { computed: {
...mapGetters(['sortDirection']), ...mapGetters(['sortDirection', 'noteableType']),
selectedOption() { selectedOption() {
return SORT_OPTIONS.find(({ key }) => this.sortDirection === key); return SORT_OPTIONS.find(({ key }) => this.sortDirection === key);
}, },
dropdownText() { dropdownText() {
return this.selectedOption.text; return this.selectedOption.text;
}, },
storageKey() {
return `sort_direction_${this.noteableType.toLowerCase()}`;
},
}, },
methods: { methods: {
...mapActions(['setDiscussionSortDirection']), ...mapActions(['setDiscussionSortDirection']),
...@@ -44,6 +50,11 @@ export default { ...@@ -44,6 +50,11 @@ export default {
<template> <template>
<div class="mr-2 d-inline-block align-bottom full-width-mobile"> <div class="mr-2 d-inline-block align-bottom full-width-mobile">
<local-storage-sync
:value="sortDirection"
:storage-key="storageKey"
@input="setDiscussionSortDirection"
/>
<button class="btn btn-sm js-dropdown-text" data-toggle="dropdown" aria-expanded="false"> <button class="btn btn-sm js-dropdown-text" data-toggle="dropdown" aria-expanded="false">
{{ dropdownText }} {{ dropdownText }}
<gl-icon name="chevron-down" /> <gl-icon name="chevron-down" />
......
...@@ -4,6 +4,7 @@ import { SNIPPET_VISIBILITY_PUBLIC } from '../constants'; ...@@ -4,6 +4,7 @@ import { SNIPPET_VISIBILITY_PUBLIC } from '../constants';
import BlobHeader from '~/blob/components/blob_header.vue'; import BlobHeader from '~/blob/components/blob_header.vue';
import BlobContent from '~/blob/components/blob_content.vue'; import BlobContent from '~/blob/components/blob_content.vue';
import { GlLoadingIcon } from '@gitlab/ui'; import { GlLoadingIcon } from '@gitlab/ui';
import CloneDropdownButton from '~/vue_shared/components/clone_dropdown.vue';
import GetSnippetBlobQuery from '../queries/snippet.blob.query.graphql'; import GetSnippetBlobQuery from '../queries/snippet.blob.query.graphql';
import GetBlobContent from '../queries/snippet.blob.content.query.graphql'; import GetBlobContent from '../queries/snippet.blob.content.query.graphql';
...@@ -16,6 +17,7 @@ export default { ...@@ -16,6 +17,7 @@ export default {
BlobHeader, BlobHeader,
BlobContent, BlobContent,
GlLoadingIcon, GlLoadingIcon,
CloneDropdownButton,
}, },
apollo: { apollo: {
blob: { blob: {
...@@ -72,6 +74,9 @@ export default { ...@@ -72,6 +74,9 @@ export default {
const { richViewer, simpleViewer } = this.blob; const { richViewer, simpleViewer } = this.blob;
return this.activeViewerType === RICH_BLOB_VIEWER ? richViewer : simpleViewer; return this.activeViewerType === RICH_BLOB_VIEWER ? richViewer : simpleViewer;
}, },
canBeCloned() {
return this.snippet.sshUrlToRepo || this.snippet.httpUrlToRepo;
},
}, },
methods: { methods: {
switchViewer(newViewer, respectHash = false) { switchViewer(newViewer, respectHash = false) {
...@@ -90,7 +95,15 @@ export default { ...@@ -90,7 +95,15 @@ export default {
class="prepend-top-20 append-bottom-20" class="prepend-top-20 append-bottom-20"
/> />
<article v-else class="file-holder snippet-file-content"> <article v-else class="file-holder snippet-file-content">
<blob-header :blob="blob" :active-viewer-type="viewer.type" @viewer-changed="switchViewer" /> <blob-header :blob="blob" :active-viewer-type="viewer.type" @viewer-changed="switchViewer">
<template #actions>
<clone-dropdown-button
v-if="canBeCloned"
:ssh-link="snippet.sshUrlToRepo"
:http-link="snippet.httpUrlToRepo"
/>
</template>
</blob-header>
<blob-content :loading="isContentLoading" :content="blobContent" :active-viewer="viewer" /> <blob-content :loading="isContentLoading" :content="blobContent" :active-viewer="viewer" />
</article> </article>
</div> </div>
......
...@@ -7,6 +7,8 @@ fragment SnippetBase on Snippet { ...@@ -7,6 +7,8 @@ fragment SnippetBase on Snippet {
updatedAt updatedAt
visibilityLevel visibilityLevel
webUrl webUrl
httpUrlToRepo
sshUrlToRepo
userPermissions { userPermissions {
adminSnippet adminSnippet
updateSnippet updateSnippet
......
<script>
export default {
props: {
storageKey: {
type: String,
required: true,
},
value: {
type: String,
required: false,
default: '',
},
},
watch: {
value(newVal) {
this.saveValue(newVal);
},
},
mounted() {
// On mount, trigger update if we actually have a localStorageValue
const value = this.getValue();
if (value && this.value !== value) {
this.$emit('input', value);
}
},
methods: {
getValue() {
return localStorage.getItem(this.storageKey);
},
saveValue(val) {
localStorage.setItem(this.storageKey, val);
},
},
render() {
return this.$slots.default;
},
};
</script>
.dropdown { .dropdown {
position: relative; position: relative;
// Once the new design (https://gitlab.com/gitlab-org/gitlab-foss/-/issues/63499/designs)
// for Snippets is introduced and Clone button is relocated, we won't
// need this style.
// Issue for the refactoring: https://gitlab.com/gitlab-org/gitlab/-/issues/213327
&.gl-new-dropdown button.dropdown-toggle {
@include gl-display-inline-flex;
}
.btn-link { .btn-link {
&:hover { &:hover {
cursor: pointer; cursor: pointer;
......
...@@ -59,6 +59,7 @@ module Clusters ...@@ -59,6 +59,7 @@ module Clusters
has_one_cluster_application :elastic_stack has_one_cluster_application :elastic_stack
has_many :kubernetes_namespaces has_many :kubernetes_namespaces
has_many :metrics_dashboard_annotations, class_name: 'Metrics::Dashboard::Annotation', inverse_of: :cluster
accepts_nested_attributes_for :provider_gcp, update_only: true accepts_nested_attributes_for :provider_gcp, update_only: true
accepts_nested_attributes_for :provider_aws, update_only: true accepts_nested_attributes_for :provider_aws, update_only: true
......
...@@ -18,6 +18,7 @@ class Environment < ApplicationRecord ...@@ -18,6 +18,7 @@ class Environment < ApplicationRecord
has_many :successful_deployments, -> { success }, class_name: 'Deployment' has_many :successful_deployments, -> { success }, class_name: 'Deployment'
has_many :active_deployments, -> { active }, class_name: 'Deployment' has_many :active_deployments, -> { active }, class_name: 'Deployment'
has_many :prometheus_alerts, inverse_of: :environment has_many :prometheus_alerts, inverse_of: :environment
has_many :metrics_dashboard_annotations, class_name: 'Metrics::Dashboard::Annotation', inverse_of: :environment
has_many :self_managed_prometheus_alert_events, inverse_of: :environment has_many :self_managed_prometheus_alert_events, inverse_of: :environment
has_one :last_deployment, -> { success.order('deployments.id DESC') }, class_name: 'Deployment' has_one :last_deployment, -> { success.order('deployments.id DESC') }, class_name: 'Deployment'
......
# frozen_string_literal: true
module Metrics
module Dashboard
class Annotation < ApplicationRecord
self.table_name = 'metrics_dashboard_annotations'
belongs_to :environment, inverse_of: :metrics_dashboard_annotations
belongs_to :cluster, class_name: 'Clusters::Cluster', inverse_of: :metrics_dashboard_annotations
validates :starting_at, presence: true
validates :description, presence: true, length: { maximum: 255 }
validates :dashboard_path, presence: true, length: { maximum: 255 }
validates :panel_xid, length: { maximum: 255 }
validate :single_ownership
validate :orphaned_annotation
private
def single_ownership
return if cluster.nil? ^ environment.nil?
errors.add(:base, s_("Metrics::Dashboard::Annotation|Annotation can't belong to both a cluster and an environment at the same time"))
end
def orphaned_annotation
return if cluster.present? || environment.present?
errors.add(:base, s_("Metrics::Dashboard::Annotation|Annotation must belong to a cluster or an environment"))
end
end
end
end
...@@ -75,6 +75,9 @@ class GroupPolicy < BasePolicy ...@@ -75,6 +75,9 @@ class GroupPolicy < BasePolicy
rule { developer }.policy do rule { developer }.policy do
enable :admin_milestone enable :admin_milestone
enable :read_package enable :read_package
enable :create_metrics_dashboard_annotation
enable :delete_metrics_dashboard_annotation
enable :update_metrics_dashboard_annotation
end end
rule { reporter }.policy do rule { reporter }.policy do
...@@ -82,6 +85,7 @@ class GroupPolicy < BasePolicy ...@@ -82,6 +85,7 @@ class GroupPolicy < BasePolicy
enable :admin_label enable :admin_label
enable :admin_list enable :admin_list
enable :admin_issue enable :admin_issue
enable :read_metrics_dashboard_annotation
end end
rule { maintainer }.policy do rule { maintainer }.policy do
......
# frozen_string_literal: true
module Metrics
module Dashboard
class AnnotationPolicy < BasePolicy
delegate { @subject.cluster }
delegate { @subject.environment }
end
end
end
...@@ -224,6 +224,7 @@ class ProjectPolicy < BasePolicy ...@@ -224,6 +224,7 @@ class ProjectPolicy < BasePolicy
enable :read_sentry_issue enable :read_sentry_issue
enable :update_sentry_issue enable :update_sentry_issue
enable :read_prometheus enable :read_prometheus
enable :read_metrics_dashboard_annotation
end end
# We define `:public_user_access` separately because there are cases in gitlab-ee # We define `:public_user_access` separately because there are cases in gitlab-ee
...@@ -276,6 +277,9 @@ class ProjectPolicy < BasePolicy ...@@ -276,6 +277,9 @@ class ProjectPolicy < BasePolicy
enable :update_deployment enable :update_deployment
enable :create_release enable :create_release
enable :update_release enable :update_release
enable :create_metrics_dashboard_annotation
enable :delete_metrics_dashboard_annotation
enable :update_metrics_dashboard_annotation
end end
rule { can?(:developer_access) & user_confirmed? }.policy do rule { can?(:developer_access) & user_confirmed? }.policy do
......
# frozen_string_literal: true
# Create Metrics::Dashboard::Annotation entry based on matched dashboard_path, environment, cluster
module Metrics
module Dashboard
module Annotations
class CreateService < ::BaseService
include Stepable
steps :authorize_environment_access,
:authorize_cluster_access,
:parse_dashboard_path,
:create
def initialize(user, params)
@user, @params = user, params
end
def execute
execute_steps
end
private
attr_reader :user, :params
def authorize_environment_access(options)
if environment.nil? || Ability.allowed?(user, :create_metrics_dashboard_annotation, project)
options[:environment] = environment
success(options)
else
error(s_('Metrics::Dashboard::Annotation|You are not authorized to create annotation for selected environment'))
end
end
def authorize_cluster_access(options)
if cluster.nil? || Ability.allowed?(user, :create_metrics_dashboard_annotation, cluster)
options[:cluster] = cluster
success(options)
else
error(s_('Metrics::Dashboard::Annotation|You are not authorized to create annotation for selected cluster'))
end
end
def parse_dashboard_path(options)
dashboard_path = params[:dashboard_path]
Gitlab::Metrics::Dashboard::Finder.find_raw(project, dashboard_path: dashboard_path)
options[:dashboard_path] = dashboard_path
success(options)
rescue Gitlab::Template::Finders::RepoTemplateFinder::FileNotFoundError
error(s_('Metrics::Dashboard::Annotation|Dashboard with requested path can not be found'))
end
def create(options)
annotation = Annotation.new(options.slice(:environment, :cluster, :dashboard_path).merge(params.slice(:description, :starting_at, :ending_at)))
if annotation.save
success(annotation: annotation)
else
error(annotation.errors)
end
end
def environment
params[:environment]
end
def cluster
params[:cluster]
end
def project
(environment || cluster)&.project
end
end
end
end
end
# frozen_string_literal: true
# Delete Metrics::Dashboard::Annotation entry
module Metrics
module Dashboard
module Annotations
class DeleteService < ::BaseService
include Stepable
steps :authorize_action,
:delete
def initialize(user, annotation)
@user, @annotation = user, annotation
end
def execute
execute_steps
end
private
attr_reader :user, :annotation
def authorize_action(_options)
if Ability.allowed?(user, :delete_metrics_dashboard_annotation, annotation)
success
else
error(s_('Metrics::Dashboard::Annotation|You are not authorized to delete this annotation'))
end
end
def delete(_options)
if annotation.destroy
success
else
error(s_('Metrics::Dashboard::Annotation|Annotation has not been deleted'))
end
end
end
end
end
end
---
title: Added the clone button for Snippet view
merge_request: 28840
author:
type: added
---
title: Add metrics dashboard annotation model, relation, policy, create and delete services. To provide interface for create and delete operations.
merge_request: 27583
author:
type: added
# frozen_string_literal: true
# See https://docs.gitlab.com/ee/development/migration_style_guide.html
# for more information on how to write migrations for GitLab.
class CreateMetricsDashboardAnnotations < ActiveRecord::Migration[6.0]
# Set this constant to true if this migration requires downtime.
DOWNTIME = false
def change
create_table :metrics_dashboard_annotations do |t|
t.datetime_with_timezone :starting_at, null: false
t.datetime_with_timezone :ending_at
t.references :environment, index: false, foreign_key: { on_delete: :cascade }, null: true
t.references :cluster, index: false, foreign_key: { on_delete: :cascade }, null: true
t.string :dashboard_path, null: false, limit: 255
t.string :panel_xid, limit: 255
t.text :description, null: false, limit: 255
t.index %i(environment_id dashboard_path starting_at ending_at), where: 'environment_id IS NOT NULL', name: "index_metrics_dashboard_annotations_on_environment_id_and_3_col"
t.index %i(cluster_id dashboard_path starting_at ending_at), where: 'cluster_id IS NOT NULL', name: "index_metrics_dashboard_annotations_on_cluster_id_and_3_columns"
end
end
end
...@@ -3872,6 +3872,26 @@ CREATE SEQUENCE public.merge_trains_id_seq ...@@ -3872,6 +3872,26 @@ CREATE SEQUENCE public.merge_trains_id_seq
ALTER SEQUENCE public.merge_trains_id_seq OWNED BY public.merge_trains.id; ALTER SEQUENCE public.merge_trains_id_seq OWNED BY public.merge_trains.id;
CREATE TABLE public.metrics_dashboard_annotations (
id bigint NOT NULL,
starting_at timestamp with time zone NOT NULL,
ending_at timestamp with time zone,
environment_id bigint,
cluster_id bigint,
dashboard_path character varying(255) NOT NULL,
panel_xid character varying(255),
description text NOT NULL
);
CREATE SEQUENCE public.metrics_dashboard_annotations_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE public.metrics_dashboard_annotations_id_seq OWNED BY public.metrics_dashboard_annotations.id;
CREATE TABLE public.milestone_releases ( CREATE TABLE public.milestone_releases (
milestone_id bigint NOT NULL, milestone_id bigint NOT NULL,
release_id bigint NOT NULL release_id bigint NOT NULL
...@@ -7196,6 +7216,8 @@ ALTER TABLE ONLY public.merge_requests_closing_issues ALTER COLUMN id SET DEFAUL ...@@ -7196,6 +7216,8 @@ ALTER TABLE ONLY public.merge_requests_closing_issues ALTER COLUMN id SET DEFAUL
ALTER TABLE ONLY public.merge_trains ALTER COLUMN id SET DEFAULT nextval('public.merge_trains_id_seq'::regclass); ALTER TABLE ONLY public.merge_trains ALTER COLUMN id SET DEFAULT nextval('public.merge_trains_id_seq'::regclass);
ALTER TABLE ONLY public.metrics_dashboard_annotations ALTER COLUMN id SET DEFAULT nextval('public.metrics_dashboard_annotations_id_seq'::regclass);
ALTER TABLE ONLY public.milestones ALTER COLUMN id SET DEFAULT nextval('public.milestones_id_seq'::regclass); ALTER TABLE ONLY public.milestones ALTER COLUMN id SET DEFAULT nextval('public.milestones_id_seq'::regclass);
ALTER TABLE ONLY public.namespace_statistics ALTER COLUMN id SET DEFAULT nextval('public.namespace_statistics_id_seq'::regclass); ALTER TABLE ONLY public.namespace_statistics ALTER COLUMN id SET DEFAULT nextval('public.namespace_statistics_id_seq'::regclass);
...@@ -7974,6 +7996,9 @@ ALTER TABLE ONLY public.merge_requests ...@@ -7974,6 +7996,9 @@ ALTER TABLE ONLY public.merge_requests
ALTER TABLE ONLY public.merge_trains ALTER TABLE ONLY public.merge_trains
ADD CONSTRAINT merge_trains_pkey PRIMARY KEY (id); ADD CONSTRAINT merge_trains_pkey PRIMARY KEY (id);
ALTER TABLE ONLY public.metrics_dashboard_annotations
ADD CONSTRAINT metrics_dashboard_annotations_pkey PRIMARY KEY (id);
ALTER TABLE ONLY public.milestones ALTER TABLE ONLY public.milestones
ADD CONSTRAINT milestones_pkey PRIMARY KEY (id); ADD CONSTRAINT milestones_pkey PRIMARY KEY (id);
...@@ -9459,6 +9484,10 @@ CREATE INDEX index_merge_trains_on_pipeline_id ON public.merge_trains USING btre ...@@ -9459,6 +9484,10 @@ CREATE INDEX index_merge_trains_on_pipeline_id ON public.merge_trains USING btre
CREATE INDEX index_merge_trains_on_user_id ON public.merge_trains USING btree (user_id); CREATE INDEX index_merge_trains_on_user_id ON public.merge_trains USING btree (user_id);
CREATE INDEX index_metrics_dashboard_annotations_on_cluster_id_and_3_columns ON public.metrics_dashboard_annotations USING btree (cluster_id, dashboard_path, starting_at, ending_at) WHERE (cluster_id IS NOT NULL);
CREATE INDEX index_metrics_dashboard_annotations_on_environment_id_and_3_col ON public.metrics_dashboard_annotations USING btree (environment_id, dashboard_path, starting_at, ending_at) WHERE (environment_id IS NOT NULL);
CREATE INDEX index_milestone_releases_on_release_id ON public.milestone_releases USING btree (release_id); CREATE INDEX index_milestone_releases_on_release_id ON public.milestone_releases USING btree (release_id);
CREATE INDEX index_milestones_on_description_trigram ON public.milestones USING gin (description public.gin_trgm_ops); CREATE INDEX index_milestones_on_description_trigram ON public.milestones USING gin (description public.gin_trgm_ops);
...@@ -11063,6 +11092,9 @@ ALTER TABLE ONLY public.suggestions ...@@ -11063,6 +11092,9 @@ ALTER TABLE ONLY public.suggestions
ALTER TABLE ONLY public.requirements ALTER TABLE ONLY public.requirements
ADD CONSTRAINT fk_rails_33fed8aa4e FOREIGN KEY (author_id) REFERENCES public.users(id) ON DELETE SET NULL; ADD CONSTRAINT fk_rails_33fed8aa4e FOREIGN KEY (author_id) REFERENCES public.users(id) ON DELETE SET NULL;
ALTER TABLE ONLY public.metrics_dashboard_annotations
ADD CONSTRAINT fk_rails_345ab51043 FOREIGN KEY (cluster_id) REFERENCES public.clusters(id) ON DELETE CASCADE;
ALTER TABLE ONLY public.wiki_page_slugs ALTER TABLE ONLY public.wiki_page_slugs
ADD CONSTRAINT fk_rails_358b46be14 FOREIGN KEY (wiki_page_meta_id) REFERENCES public.wiki_page_meta(id) ON DELETE CASCADE; ADD CONSTRAINT fk_rails_358b46be14 FOREIGN KEY (wiki_page_meta_id) REFERENCES public.wiki_page_meta(id) ON DELETE CASCADE;
...@@ -11582,6 +11614,9 @@ ALTER TABLE ONLY public.clusters ...@@ -11582,6 +11614,9 @@ ALTER TABLE ONLY public.clusters
ALTER TABLE ONLY public.analytics_cycle_analytics_group_stages ALTER TABLE ONLY public.analytics_cycle_analytics_group_stages
ADD CONSTRAINT fk_rails_ae5da3409b FOREIGN KEY (group_id) REFERENCES public.namespaces(id) ON DELETE CASCADE; ADD CONSTRAINT fk_rails_ae5da3409b FOREIGN KEY (group_id) REFERENCES public.namespaces(id) ON DELETE CASCADE;
ALTER TABLE ONLY public.metrics_dashboard_annotations
ADD CONSTRAINT fk_rails_aeb11a7643 FOREIGN KEY (environment_id) REFERENCES public.environments(id) ON DELETE CASCADE;
ALTER TABLE ONLY public.pool_repositories ALTER TABLE ONLY public.pool_repositories
ADD CONSTRAINT fk_rails_af3f8c5d62 FOREIGN KEY (shard_id) REFERENCES public.shards(id) ON DELETE RESTRICT; ADD CONSTRAINT fk_rails_af3f8c5d62 FOREIGN KEY (shard_id) REFERENCES public.shards(id) ON DELETE RESTRICT;
...@@ -12911,6 +12946,7 @@ COPY "schema_migrations" (version) FROM STDIN; ...@@ -12911,6 +12946,7 @@ COPY "schema_migrations" (version) FROM STDIN;
20200318175008 20200318175008
20200319071702 20200319071702
20200319123041 20200319123041
20200319124127
20200319203901 20200319203901
20200320112455 20200320112455
20200320123839 20200320123839
......
...@@ -176,6 +176,14 @@ To set this limit on a self-managed installation, run the following in the ...@@ -176,6 +176,14 @@ To set this limit on a self-managed installation, run the following in the
Plan.default.limits.update!(ci_pipeline_schedules: 100) Plan.default.limits.update!(ci_pipeline_schedules: 100)
``` ```
## Instance monitoring and metrics
### Prometheus Alert JSON payloads
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/14929) in GitLab 12.6.
Prometheus alert payloads sent to the `notify.json` endpoint are limited to 1 MB in size.
## Environment data on Deploy Boards ## Environment data on Deploy Boards
[Deploy Boards](../user/project/deploy_boards.md) load information from Kubernetes about [Deploy Boards](../user/project/deploy_boards.md) load information from Kubernetes about
......
...@@ -455,11 +455,11 @@ bin/rake gettext:regenerate ...@@ -455,11 +455,11 @@ bin/rake gettext:regenerate
This command will update `locale/gitlab.pot` file with the newly externalized This command will update `locale/gitlab.pot` file with the newly externalized
strings and remove any strings that aren't used anymore. You should check this strings and remove any strings that aren't used anymore. You should check this
file in. Once the changes are on master, they will be picked up by file in. Once the changes are on master, they will be picked up by
[Crowdin](https://translate.gitlab.com) and be presented for [CrowdIn](https://translate.gitlab.com) and be presented for
translation. translation.
We don't need to check in any changes to the `locale/[language]/gitlab.po` files. We don't need to check in any changes to the `locale/[language]/gitlab.po` files.
They are updated automatically when [translations from Crowdin are merged](merging_translations.md). They are updated automatically when [translations from CrowdIn are merged](merging_translations.md).
If there are merge conflicts in the `gitlab.pot` file, you can delete the file If there are merge conflicts in the `gitlab.pot` file, you can delete the file
and regenerate it using the same command. and regenerate it using the same command.
......
...@@ -30,7 +30,7 @@ See [Externalization for GitLab](externalization.md). ...@@ -30,7 +30,7 @@ See [Externalization for GitLab](externalization.md).
### Translate strings ### Translate strings
The translation process is managed at <https://translate.gitlab.com> The translation process is managed at <https://translate.gitlab.com>
using [Crowdin](https://crowdin.com/). using [CrowdIn](https://crowdin.com/).
You will need to create an account before you can submit translations. You will need to create an account before you can submit translations.
Once you are signed in, select the language you wish to contribute translations to. Once you are signed in, select the language you wish to contribute translations to.
...@@ -51,4 +51,4 @@ able to proofread and instructions on becoming a proofreader yourself. ...@@ -51,4 +51,4 @@ able to proofread and instructions on becoming a proofreader yourself.
Translations are typically included in the next major or minor release. Translations are typically included in the next major or minor release.
See [Merging translations from Crowdin](merging_translations.md). See [Merging translations from CrowdIn](merging_translations.md).
# Merging translations from Crowdin # Merging translations from CrowdIn
Crowdin automatically syncs the `gitlab.pot` file with the Crowdin service, presenting CrowdIn automatically syncs the `gitlab.pot` file with the CrowdIn service, presenting
newly added externalized strings to the community of translators. newly added externalized strings to the community of translators.
[GitLab Crowdin Bot](https://gitlab.com/gitlab-crowdin-bot) also creates merge requests [GitLab CrowdIn Bot](https://gitlab.com/gitlab-crowdin-bot) also creates merge requests
to take newly approved translation submissions and merge them into the `locale/<language>/gitlab.po` to take newly approved translation submissions and merge them into the `locale/<language>/gitlab.po`
files. Check the [merge requests created by `gitlab-crowdin-bot`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests?scope=all&utf8=%E2%9C%93&state=opened&author_username=gitlab-crowdin-bot) files. Check the [merge requests created by `gitlab-crowdin-bot`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests?scope=all&utf8=%E2%9C%93&state=opened&author_username=gitlab-crowdin-bot)
to see new and merged merge requests. to see new and merged merge requests.
## Validation ## Validation
By default Crowdin commits translations with `[skip ci]` in the commit By default CrowdIn commits translations with `[skip ci]` in the commit
message. This is done to avoid a bunch of pipelines being run. Before message. This is done to avoid a bunch of pipelines being run. Before
merging translations, make sure to trigger a pipeline to validate merging translations, make sure to trigger a pipeline to validate
translations, we have static analysis validating things Crowdin translations, we have static analysis validating things CrowdIn
doesn't do. Create a new pipeline at `https://gitlab.com/gitlab-org/gitlab/pipelines/new` doesn't do. Create a new pipeline at `https://gitlab.com/gitlab-org/gitlab/pipelines/new`
(need Developer access permissions) for the `master-i18n` branch. (need Developer access permissions) for the `master-i18n` branch.
If there are validation errors, the easiest solution is to disapprove If there are validation errors, the easiest solution is to disapprove
the offending string in Crowdin, leaving a comment with what is the offending string in CrowdIn, leaving a comment with what is
required to fix the offense. There is an required to fix the offense. There is an
[issue](https://gitlab.com/gitlab-org/gitlab/issues/23256) [issue](https://gitlab.com/gitlab-org/gitlab/issues/23256)
suggesting to automate this process. Disapproving will exclude the suggesting to automate this process. Disapproving will exclude the
invalid translation, the merge request will be updated within a few invalid translation, the merge request will be updated within a few
minutes. minutes.
It might be handy to pause the integration on the Crowdin side for a It might be handy to pause the integration on the CrowdIn side for a
little while so translations don't keep coming. This can be done by little while so translations don't keep coming. This can be done by
clicking `Pause sync` on the [Crowdin integration settings clicking `Pause sync` on the [CrowdIn integration settings
page](https://translate.gitlab.com/project/gitlab-ee/settings#integration). page](https://translate.gitlab.com/project/gitlab-ee/settings#integration).
When all failures are resolved, the translations need to be double When all failures are resolved, the translations need to be double
...@@ -37,16 +37,16 @@ checked once more as discussed in [confidential issue](../../user/project/issues ...@@ -37,16 +37,16 @@ checked once more as discussed in [confidential issue](../../user/project/issues
When all translations are found good and pipelines pass the When all translations are found good and pipelines pass the
translations can be merged into the master branch. When merging the translations, translations can be merged into the master branch. When merging the translations,
make sure to check the **Remove source branch** checkbox, so Crowdin recreates the make sure to check the **Remove source branch** checkbox, so CrowdIn recreates the
`master-i18n` from master after the new translation was merged. `master-i18n` from master after the new translation was merged.
We are discussing [automating this entire process](https://gitlab.com/gitlab-org/gitlab/issues/19896). We are discussing [automating this entire process](https://gitlab.com/gitlab-org/gitlab/issues/19896).
## Recreate the merge request ## Recreate the merge request
Crowdin creates a new merge request as soon as the old one is closed CrowdIn creates a new merge request as soon as the old one is closed
or merged. But it won't recreate the `master-i18n` branch every or merged. But it won't recreate the `master-i18n` branch every
time. To force Crowdin to recreate the branch, close any [open merge time. To force CrowdIn to recreate the branch, close any [open merge
request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests?scope=all&utf8=%E2%9C%93&state=opened&author_username=gitlab-crowdin-bot) request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests?scope=all&utf8=%E2%9C%93&state=opened&author_username=gitlab-crowdin-bot)
and delete the and delete the
[`master-18n`](https://gitlab.com/gitlab-org/gitlab/-/branches/all?utf8=✓&search=master-i18n). [`master-18n`](https://gitlab.com/gitlab-org/gitlab/-/branches/all?utf8=✓&search=master-i18n).
......
...@@ -8,85 +8,85 @@ are very appreciative of the work done by translators and proofreaders! ...@@ -8,85 +8,85 @@ are very appreciative of the work done by translators and proofreaders!
- Albanian - Albanian
- Proofreaders needed. - Proofreaders needed.
- Amharic - Amharic
- Tsegaselassie Tadesse - [GitLab](https://gitlab.com/tsega), [Crowdin](https://crowdin.com/profile/tsegaselassi/activity) - Tsegaselassie Tadesse - [GitLab](https://gitlab.com/tsega), [CrowdIn](https://crowdin.com/profile/tsegaselassi/activity)
- Arabic - Arabic
- Proofreaders needed. - Proofreaders needed.
- Bulgarian - Bulgarian
- Lyubomir Vasilev - [Crowdin](https://crowdin.com/profile/lyubomirv) - Lyubomir Vasilev - [CrowdIn](https://crowdin.com/profile/lyubomirv)
- Catalan - Catalan
- David Planella - [GitLab](https://gitlab.com/dplanella), [Crowdin](https://crowdin.com/profile/dplanella) - David Planella - [GitLab](https://gitlab.com/dplanella), [CrowdIn](https://crowdin.com/profile/dplanella)
- Chinese Simplified 简体中文 - Chinese Simplified 简体中文
- Huang Tao - [GitLab](https://gitlab.com/htve), [Crowdin](https://crowdin.com/profile/htve) - Huang Tao - [GitLab](https://gitlab.com/htve), [CrowdIn](https://crowdin.com/profile/htve)
- Victor Wu - [GitLab](https://gitlab.com/victorwuky), [Crowdin](https://crowdin.com/profile/victorwu) - Victor Wu - [GitLab](https://gitlab.com/victorwuky), [CrowdIn](https://crowdin.com/profile/victorwu)
- Xiaogang Wen - [GitLab](https://gitlab.com/xiaogang_gitlab), [Crowdin](https://crowdin.com/profile/xiaogang_gitlab) - Xiaogang Wen - [GitLab](https://gitlab.com/xiaogang_gitlab), [CrowdIn](https://crowdin.com/profile/xiaogang_gitlab)
- Chinese Traditional 繁體中文 - Chinese Traditional 繁體中文
- Weizhe Ding - [GitLab](https://gitlab.com/d.weizhe), [Crowdin](https://crowdin.com/profile/d.weizhe) - Weizhe Ding - [GitLab](https://gitlab.com/d.weizhe), [CrowdIn](https://crowdin.com/profile/d.weizhe)
- Yi-Jyun Pan - [GitLab](https://gitlab.com/pan93412), [Crowdin](https://crowdin.com/profile/pan93412) - Yi-Jyun Pan - [GitLab](https://gitlab.com/pan93412), [CrowdIn](https://crowdin.com/profile/pan93412)
- Victor Wu - [GitLab](https://gitlab.com/victorwuky), [Crowdin](https://crowdin.com/profile/victorwu) - Victor Wu - [GitLab](https://gitlab.com/victorwuky), [CrowdIn](https://crowdin.com/profile/victorwu)
- Chinese Traditional, Hong Kong 繁體中文 (香港) - Chinese Traditional, Hong Kong 繁體中文 (香港)
- Victor Wu - [GitLab](https://gitlab.com/victorwuky), [Crowdin](https://crowdin.com/profile/victorwu) - Victor Wu - [GitLab](https://gitlab.com/victorwuky), [CrowdIn](https://crowdin.com/profile/victorwu)
- Ivan Ip - [GitLab](https://gitlab.com/lifehome), [Crowdin](https://crowdin.com/profile/lifehome) - Ivan Ip - [GitLab](https://gitlab.com/lifehome), [CrowdIn](https://crowdin.com/profile/lifehome)
- Czech - Czech
- Jan Urbanec - [GitLab](https://gitlab.com/TatranskyMedved), [Crowdin](https://crowdin.com/profile/Tatranskymedved) - Jan Urbanec - [GitLab](https://gitlab.com/TatranskyMedved), [CrowdIn](https://crowdin.com/profile/Tatranskymedved)
- Danish - Danish
- Saederup92 - [GitLab](https://gitlab.com/Saederup92), [Crowdin](https://crowdin.com/profile/Saederup92) - Saederup92 - [GitLab](https://gitlab.com/Saederup92), [CrowdIn](https://crowdin.com/profile/Saederup92)
- Dutch - Dutch
- Emily Hendle - [GitLab](https://gitlab.com/pundachan), [Crowdin](https://crowdin.com/profile/pandachan) - Emily Hendle - [GitLab](https://gitlab.com/pundachan), [CrowdIn](https://crowdin.com/profile/pandachan)
- Esperanto - Esperanto
- Lyubomir Vasilev - [Crowdin](https://crowdin.com/profile/lyubomirv) - Lyubomir Vasilev - [CrowdIn](https://crowdin.com/profile/lyubomirv)
- Estonian - Estonian
- Proofreaders needed. - Proofreaders needed.
- Filipino - Filipino
- Proofreaders needed. - Proofreaders needed.
- French - French
- Davy Defaud - [GitLab](https://gitlab.com/DevDef), [Crowdin](https://crowdin.com/profile/DevDef) - Davy Defaud - [GitLab](https://gitlab.com/DevDef), [CrowdIn](https://crowdin.com/profile/DevDef)
- Galician - Galician
- Antón Méixome - [Crowdin](https://crowdin.com/profile/meixome) - Antón Méixome - [CrowdIn](https://crowdin.com/profile/meixome)
- Pedro Garcia - [GitLab](https://gitlab.com/pedgarrod), [Crowdin](https://crowdin.com/profile/breaking_pitt) - Pedro Garcia - [GitLab](https://gitlab.com/pedgarrod), [CrowdIn](https://crowdin.com/profile/breaking_pitt)
- German - German
- Michael Hahnle - [GitLab](https://gitlab.com/mhah), [Crowdin](https://crowdin.com/profile/mhah) - Michael Hahnle - [GitLab](https://gitlab.com/mhah), [CrowdIn](https://crowdin.com/profile/mhah)
- Katrin Leinweber - [GitLab](https://gitlab.com/katrinleinweber/), [Crowdin](https://crowdin.com/profile/katrinleinweber) - Katrin Leinweber - [GitLab](https://gitlab.com/katrinleinweber/), [CrowdIn](https://crowdin.com/profile/katrinleinweber)
- Greek - Greek
- Proofreaders needed. - Proofreaders needed.
- Hebrew - Hebrew
- Yaron Shahrabani - [GitLab](https://gitlab.com/yarons), [Crowdin](https://crowdin.com/profile/YaronSh) - Yaron Shahrabani - [GitLab](https://gitlab.com/yarons), [CrowdIn](https://crowdin.com/profile/YaronSh)
- Hungarian - Hungarian
- Proofreaders needed. - Proofreaders needed.
- Indonesian - Indonesian
- Adi Ferdian - [GitLab](https://gitlab.com/adiferd), [Crowdin](https://crowdin.com/profile/adiferd) - Adi Ferdian - [GitLab](https://gitlab.com/adiferd), [CrowdIn](https://crowdin.com/profile/adiferd)
- Ahmad Naufal Mukhtar - [GitLab](https://gitlab.com/anaufalm), [Crowdin](https://crowdin.com/profile/anaufalm) - Ahmad Naufal Mukhtar - [GitLab](https://gitlab.com/anaufalm), [CrowdIn](https://crowdin.com/profile/anaufalm)
- Italian - Italian
- Massimiliano Cuttini - [GitLab](https://gitlab.com/maxcuttins), [Crowdin](https://crowdin.com/profile/maxcuttins) - Massimiliano Cuttini - [GitLab](https://gitlab.com/maxcuttins), [CrowdIn](https://crowdin.com/profile/maxcuttins)
- Paolo Falomo - [GitLab](https://gitlab.com/paolofalomo), [Crowdin](https://crowdin.com/profile/paolo.falomo) - Paolo Falomo - [GitLab](https://gitlab.com/paolofalomo), [CrowdIn](https://crowdin.com/profile/paolo.falomo)
- Japanese - Japanese
- Hiroyuki Sato - [GitLab](https://gitlab.com/hiroponz), [Crowdin](https://crowdin.com/profile/hiroponz) - Hiroyuki Sato - [GitLab](https://gitlab.com/hiroponz), [CrowdIn](https://crowdin.com/profile/hiroponz)
- Tomo Dote - [GitLab](https://gitlab.com/fu7mu4), [Crowdin](https://crowdin.com/profile/fu7mu4) - Tomo Dote - [GitLab](https://gitlab.com/fu7mu4), [CrowdIn](https://crowdin.com/profile/fu7mu4)
- Hiromi Nozawa - [GitLab](https://gitlab.com/hir0mi), [Crowdin](https://crowdin.com/profile/hir0mi) - Hiromi Nozawa - [GitLab](https://gitlab.com/hir0mi), [CrowdIn](https://crowdin.com/profile/hir0mi)
- Korean - Korean
- Chang-Ho Cha - [GitLab](https://gitlab.com/changho-cha), [Crowdin](https://crowdin.com/profile/zzazang) - Chang-Ho Cha - [GitLab](https://gitlab.com/changho-cha), [CrowdIn](https://crowdin.com/profile/zzazang)
- Ji Hun Oh - [GitLab](https://gitlab.com/Baw-Appie), [Crowdin](https://crowdin.com/profile/BawAppie) - Ji Hun Oh - [GitLab](https://gitlab.com/Baw-Appie), [CrowdIn](https://crowdin.com/profile/BawAppie)
- Jeongwhan Choi - [GitLab](https://gitlab.com/jeongwhanchoi), [Crowdin](https://crowdin.com/profile/jeongwhanchoi) - Jeongwhan Choi - [GitLab](https://gitlab.com/jeongwhanchoi), [CrowdIn](https://crowdin.com/profile/jeongwhanchoi)
- Mongolian - Mongolian
- Proofreaders needed. - Proofreaders needed.
- Norwegian Bokmal - Norwegian Bokmal
- Proofreaders needed. - Proofreaders needed.
- Polish - Polish
- Filip Mech - [GitLab](https://gitlab.com/mehenz), [Crowdin](https://crowdin.com/profile/mehenz) - Filip Mech - [GitLab](https://gitlab.com/mehenz), [CrowdIn](https://crowdin.com/profile/mehenz)
- Maksymilian Roman - [GitLab](https://gitlab.com/villaincandle), [Crowdin](https://crowdin.com/profile/villaincandle) - Maksymilian Roman - [GitLab](https://gitlab.com/villaincandle), [CrowdIn](https://crowdin.com/profile/villaincandle)
- Portuguese - Portuguese
- Proofreaders needed. - Proofreaders needed.
- Diogo Trindade - [GitLab](https://gitlab.com/luisdiogo2071317), [Crowdin](https://crowdin.com/profile/ldiogotrindade) - Diogo Trindade - [GitLab](https://gitlab.com/luisdiogo2071317), [CrowdIn](https://crowdin.com/profile/ldiogotrindade)
- Portuguese, Brazilian - Portuguese, Brazilian
- Paulo George Gomes Bezerra - [GitLab](https://gitlab.com/paulobezerra), [Crowdin](https://crowdin.com/profile/paulogomes.rep) - Paulo George Gomes Bezerra - [GitLab](https://gitlab.com/paulobezerra), [CrowdIn](https://crowdin.com/profile/paulogomes.rep)
- André Gama - [GitLab](https://gitlab.com/andregamma), [Crowdin](https://crowdin.com/profile/ToeOficial) - André Gama - [GitLab](https://gitlab.com/andregamma), [CrowdIn](https://crowdin.com/profile/ToeOficial)
- Romanian - Romanian
- Proofreaders needed. - Proofreaders needed.
- Russian - Russian
- Nikita Grylov - [GitLab](https://gitlab.com/nixel2007), [Crowdin](https://crowdin.com/profile/nixel2007) - Nikita Grylov - [GitLab](https://gitlab.com/nixel2007), [CrowdIn](https://crowdin.com/profile/nixel2007)
- Alexy Lustin - [GitLab](https://gitlab.com/allustin), [Crowdin](https://crowdin.com/profile/lustin) - Alexy Lustin - [GitLab](https://gitlab.com/allustin), [CrowdIn](https://crowdin.com/profile/lustin)
- Mark Minakou - [GitLab](https://gitlab.com/sandzhaj), [Crowdin](https://crowdin.com/profile/sandzhaj) - Mark Minakou - [GitLab](https://gitlab.com/sandzhaj), [CrowdIn](https://crowdin.com/profile/sandzhaj)
- NickVolynkin - [Crowdin](https://crowdin.com/profile/NickVolynkin) - NickVolynkin - [CrowdIn](https://crowdin.com/profile/NickVolynkin)
- Andrey Komarov - [GitLab](https://gitlab.com/elkamarado), [Crowdin](https://crowdin.com/profile/kamarado) - Andrey Komarov - [GitLab](https://gitlab.com/elkamarado), [CrowdIn](https://crowdin.com/profile/kamarado)
- Serbian (Cyrillic) - Serbian (Cyrillic)
- Proofreaders needed. - Proofreaders needed.
- Serbian (Latin) - Serbian (Latin)
...@@ -94,18 +94,18 @@ are very appreciative of the work done by translators and proofreaders! ...@@ -94,18 +94,18 @@ are very appreciative of the work done by translators and proofreaders!
- Slovak - Slovak
- Proofreaders needed. - Proofreaders needed.
- Spanish - Spanish
- Pedro Garcia - [GitLab](https://gitlab.com/pedgarrod), [Crowdin](https://crowdin.com/profile/breaking_pitt) - Pedro Garcia - [GitLab](https://gitlab.com/pedgarrod), [CrowdIn](https://crowdin.com/profile/breaking_pitt)
- Turkish - Turkish
- Ali Demirtaş - [GitLab](https://gitlab.com/alidemirtas), [Crowdin](https://crowdin.com/profile/alidemirtas) - Ali Demirtaş - [GitLab](https://gitlab.com/alidemirtas), [CrowdIn](https://crowdin.com/profile/alidemirtas)
- Ukrainian - Ukrainian
- Volodymyr Sobotovych - [GitLab](https://gitlab.com/wheleph), [Crowdin](https://crowdin.com/profile/wheleph) - Volodymyr Sobotovych - [GitLab](https://gitlab.com/wheleph), [CrowdIn](https://crowdin.com/profile/wheleph)
- Andrew Vityuk - [GitLab](https://gitlab.com/3_1_3_u), [Crowdin](https://crowdin.com/profile/andruwa13) - Andrew Vityuk - [GitLab](https://gitlab.com/3_1_3_u), [CrowdIn](https://crowdin.com/profile/andruwa13)
- Welsh - Welsh
- Proofreaders needed. - Proofreaders needed.
## Become a proofreader ## Become a proofreader
> **Note:** Before requesting Proofreader permissions in Crowdin please make > **Note:** Before requesting Proofreader permissions in CrowdIn please make
> sure that you have a history of contributing translations to the GitLab > sure that you have a history of contributing translations to the GitLab
> project. > project.
......
# Translating GitLab # Translating GitLab
For managing the translation process we use [Crowdin](https://crowdin.com). For managing the translation process we use [CrowdIn](https://crowdin.com).
## Using Crowdin ## Using CrowdIn
The first step is to get familiar with Crowdin. The first step is to get familiar with CrowdIn.
### Sign In ### Sign In
To contribute translations at <https://translate.gitlab.com> To contribute translations at <https://translate.gitlab.com>
you must create a Crowdin account. you must create a CrowdIn account.
You may create a new account or use any of their supported sign in services. You may create a new account or use any of their supported sign in services.
### Language Selections ### Language Selections
...@@ -24,7 +24,7 @@ GitLab is being translated into many languages. ...@@ -24,7 +24,7 @@ GitLab is being translated into many languages.
The online translation editor is the easiest way to contribute translations. The online translation editor is the easiest way to contribute translations.
![Crowdin Editor](img/crowdin-editor.png) ![CrowdIn Editor](img/crowdin-editor.png)
1. Strings for translation are listed in the left panel 1. Strings for translation are listed in the left panel
1. Translations are entered into the central panel. 1. Translations are entered into the central panel.
......
...@@ -156,6 +156,15 @@ To ensure these are successful: ...@@ -156,6 +156,15 @@ To ensure these are successful:
For example: `11.11.x` -> `12.0.x` -> `12.8.x` For example: `11.11.x` -> `12.0.x` -> `12.8.x`
### Upgrades from old versions
- `8.11.x` and earlier: you might have to upgrade to `8.12.0` specifically before you can
upgrade to `8.17.7`. This was [reported in an issue](https://gitlab.com/gitlab-org/gitlab/-/issues/207259).
- [CI changes prior to version 8.0](https://docs.gitlab.com/omnibus/update/README.html#updating-gitlab-ci-from-prior-540-to-version-714-via-omnibus-gitlab)
when it was merged into GitLab.
- Version specific changes in
[the Omnibus documentation](https://docs.gitlab.com/omnibus/update/README.html#version-specific-changes).
### Example upgrade paths ### Example upgrade paths
Please see the table below for some examples: Please see the table below for some examples:
...@@ -165,7 +174,7 @@ Please see the table below for some examples: ...@@ -165,7 +174,7 @@ Please see the table below for some examples:
| 9.4.5 | 8.13.4 | `8.13.4` -> `8.17.7` -> `9.4.5` | `8.17.7` is the last version in version `8` | | 9.4.5 | 8.13.4 | `8.13.4` -> `8.17.7` -> `9.4.5` | `8.17.7` is the last version in version `8` |
| 10.1.4 | 8.13.4 | `8.13.4 -> 8.17.7 -> 9.5.10 -> 10.1.4` | `8.17.7` is the last version in version `8`, `9.5.10` is the last version in version `9` | | 10.1.4 | 8.13.4 | `8.13.4 -> 8.17.7 -> 9.5.10 -> 10.1.4` | `8.17.7` is the last version in version `8`, `9.5.10` is the last version in version `9` |
| 11.3.4 | 8.13.4 | `8.13.4` -> `8.17.7` -> `9.5.10` -> `10.8.7` -> `11.3.4` | `8.17.7` is the last version in version `8`, `9.5.10` is the last version in version `9`, `10.8.7` is the last version in version `10` | | 11.3.4 | 8.13.4 | `8.13.4` -> `8.17.7` -> `9.5.10` -> `10.8.7` -> `11.3.4` | `8.17.7` is the last version in version `8`, `9.5.10` is the last version in version `9`, `10.8.7` is the last version in version `10` |
| 12.5.8 | 11.3.4 | `11.3.4` -> `11.11.8` -> `12.0.12` -> `12.5.8` | `11.11.8` is the last version in version `11`. `12.0.x` [is a required step](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/23211#note_272842444). | | 12.5.10 | 11.3.4 | `11.3.4` -> `11.11.8` -> `12.0.12` -> `12.5.10` | `11.11.8` is the last version in version `11`. `12.0.x` [is a required step](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/23211#note_272842444). |
| 12.8.5 | 9.2.6 | `9.2.6` -> `9.5.10` -> `10.8.7` -> `11.11.8` -> `12.0.12` -> `12.8.5` | Four intermediate versions are required: the final 9.5, 10.8, 11.11 releases, plus 12.0. | | 12.8.5 | 9.2.6 | `9.2.6` -> `9.5.10` -> `10.8.7` -> `11.11.8` -> `12.0.12` -> `12.8.5` | Four intermediate versions are required: the final 9.5, 10.8, 11.11 releases, plus 12.0. |
NOTE: **Note:** NOTE: **Note:**
...@@ -173,6 +182,21 @@ Instructions for installing a specific version of GitLab or downloading the pack ...@@ -173,6 +182,21 @@ Instructions for installing a specific version of GitLab or downloading the pack
## More information ## More information
Check [our release posts](https://about.gitlab.com/releases/categories/releases/).
Each month, we publish either a major or minor release of GitLab. At the end
of those release posts there are three sections to look for: deprecations, important notes,
and upgrade barometer. These will will draw your attention to:
- Steps you need to perform as part of an upgrade.
For example [8.12](https://about.gitlab.com/releases/2016/09/22/gitlab-8-12-released/#upgrade-barometer)
required the Elasticsearch index to be recreated. Any older version of GitLab upgrading to 8.12 or higher
would require this.
- Changes to the versions of software we support such as
[ceasing support for IE11 in GitLab 13](https://about.gitlab.com/releases/2020/03/22/gitlab-12-9-released/#ending-support-for-internet-explorer-11).
You should check all the major and minor versions you're passing over.
More information about the release procedures can be found in our More information about the release procedures can be found in our
[release documentation](https://gitlab.com/gitlab-org/release/docs). You may also want to read our [release documentation](https://gitlab.com/gitlab-org/release/docs). You may also want to read our
[Responsible Disclosure Policy](https://about.gitlab.com/security/disclosure/). [Responsible Disclosure Policy](https://about.gitlab.com/security/disclosure/).
...@@ -14,7 +14,7 @@ for merging into production. ...@@ -14,7 +14,7 @@ for merging into production.
To access the Compliance Dashboard for a group, navigate to **{shield}** **Security & Compliance > Compliance** on the group's menu. To access the Compliance Dashboard for a group, navigate to **{shield}** **Security & Compliance > Compliance** on the group's menu.
![Compliance Dashboard](img/compliance_dashboard_v12_8.png) ![Compliance Dashboard](img/compliance_dashboard_v12_10.png)
## Use cases ## Use cases
...@@ -24,6 +24,7 @@ You can use the dashboard to: ...@@ -24,6 +24,7 @@ You can use the dashboard to:
- Get an overview of the latest Merge Request for each project. - Get an overview of the latest Merge Request for each project.
- See if Merge Requests were approved and by whom. - See if Merge Requests were approved and by whom.
- See the latest [CI Pipeline](../../../ci/pipelines/index.md) result for each Merge Request.
## Permissions ## Permissions
......
...@@ -5937,6 +5937,9 @@ msgstr "" ...@@ -5937,6 +5937,9 @@ msgstr ""
msgid "Create project label" msgid "Create project label"
msgstr "" msgstr ""
msgid "Create requirement"
msgstr ""
msgid "Create wildcard: %{searchTerm}" msgid "Create wildcard: %{searchTerm}"
msgstr "" msgstr ""
...@@ -6852,6 +6855,9 @@ msgstr "" ...@@ -6852,6 +6855,9 @@ msgstr ""
msgid "Describe the goal of the changes and what reviewers should be aware of." msgid "Describe the goal of the changes and what reviewers should be aware of."
msgstr "" msgstr ""
msgid "Describe the requirement here"
msgstr ""
msgid "Description" msgid "Description"
msgstr "" msgstr ""
...@@ -12698,6 +12704,27 @@ msgstr "" ...@@ -12698,6 +12704,27 @@ msgstr ""
msgid "Metrics for environment" msgid "Metrics for environment"
msgstr "" msgstr ""
msgid "Metrics::Dashboard::Annotation|Annotation can't belong to both a cluster and an environment at the same time"
msgstr ""
msgid "Metrics::Dashboard::Annotation|Annotation has not been deleted"
msgstr ""
msgid "Metrics::Dashboard::Annotation|Annotation must belong to a cluster or an environment"
msgstr ""
msgid "Metrics::Dashboard::Annotation|Dashboard with requested path can not be found"
msgstr ""
msgid "Metrics::Dashboard::Annotation|You are not authorized to create annotation for selected cluster"
msgstr ""
msgid "Metrics::Dashboard::Annotation|You are not authorized to create annotation for selected environment"
msgstr ""
msgid "Metrics::Dashboard::Annotation|You are not authorized to delete this annotation"
msgstr ""
msgid "Metrics|Add metric" msgid "Metrics|Add metric"
msgstr "" msgstr ""
...@@ -16988,6 +17015,9 @@ msgstr "" ...@@ -16988,6 +17015,9 @@ msgstr ""
msgid "Require users to prove ownership of custom domains" msgid "Require users to prove ownership of custom domains"
msgstr "" msgstr ""
msgid "Requirement"
msgstr ""
msgid "Requirements" msgid "Requirements"
msgstr "" msgstr ""
...@@ -18647,6 +18677,9 @@ msgstr "" ...@@ -18647,6 +18677,9 @@ msgstr ""
msgid "Something went wrong while closing the %{issuable}. Please try again later" msgid "Something went wrong while closing the %{issuable}. Please try again later"
msgstr "" msgstr ""
msgid "Something went wrong while creating a requirement."
msgstr ""
msgid "Something went wrong while deleting description changes. Please try again." msgid "Something went wrong while deleting description changes. Please try again."
msgstr "" msgstr ""
...@@ -18722,6 +18755,9 @@ msgstr "" ...@@ -18722,6 +18755,9 @@ msgstr ""
msgid "Something went wrong while stopping this environment. Please try again." msgid "Something went wrong while stopping this environment. Please try again."
msgstr "" msgstr ""
msgid "Something went wrong while updating a requirement."
msgstr ""
msgid "Something went wrong while updating your list settings" msgid "Something went wrong while updating your list settings"
msgstr "" msgstr ""
......
...@@ -146,7 +146,6 @@ prometheus: ...@@ -146,7 +146,6 @@ prometheus:
install: false install: false
redis: redis:
metrics: metrics:
resources:
enabled: false enabled: false
resources: resources:
requests: requests:
......
...@@ -7,7 +7,7 @@ function deploy_exists() { ...@@ -7,7 +7,7 @@ function deploy_exists() {
echoinfo "Checking if ${release} exists in the ${namespace} namespace..." true echoinfo "Checking if ${release} exists in the ${namespace} namespace..." true
helm status --tiller-namespace "${namespace}" "${release}" >/dev/null 2>&1 helm status --namespace "${namespace}" "${release}" >/dev/null 2>&1
deploy_exists=$? deploy_exists=$?
echoinfo "Deployment status for ${release} is ${deploy_exists}" echoinfo "Deployment status for ${release} is ${deploy_exists}"
...@@ -20,15 +20,15 @@ function previous_deploy_failed() { ...@@ -20,15 +20,15 @@ function previous_deploy_failed() {
echoinfo "Checking for previous deployment of ${release}" true echoinfo "Checking for previous deployment of ${release}" true
helm status --tiller-namespace "${namespace}" "${release}" >/dev/null 2>&1 helm status --namespace "${namespace}" "${release}" >/dev/null 2>&1
local status=$? local status=$?
# if `status` is `0`, deployment exists, has a status # if `status` is `0`, deployment exists, has a status
if [ $status -eq 0 ]; then if [ $status -eq 0 ]; then
echoinfo "Previous deployment found, checking status..." echoinfo "Previous deployment found, checking status..."
deployment_status=$(helm status --tiller-namespace "${namespace}" "${release}" | grep ^STATUS | cut -d' ' -f2) deployment_status=$(helm status --namespace "${namespace}" "${release}" | grep ^STATUS | cut -d' ' -f2)
echoinfo "Previous deployment state: ${deployment_status}" echoinfo "Previous deployment state: ${deployment_status}"
if [[ "$deployment_status" == "FAILED" || "$deployment_status" == "PENDING_UPGRADE" || "$deployment_status" == "PENDING_INSTALL" ]]; then if [[ "$deployment_status" == "failed" || "$deployment_status" == "pending-upgrade" || "$deployment_status" == "pending-install" ]]; then
status=0; status=0;
else else
status=1; status=1;
...@@ -58,7 +58,7 @@ function helm_delete_release() { ...@@ -58,7 +58,7 @@ function helm_delete_release() {
echoinfo "Deleting Helm release '${release}'..." true echoinfo "Deleting Helm release '${release}'..." true
helm delete --tiller-namespace "${namespace}" --purge "${release}" helm uninstall --namespace "${namespace}" "${release}"
} }
function kubectl_cleanup_release() { function kubectl_cleanup_release() {
...@@ -95,6 +95,36 @@ function delete_failed_release() { ...@@ -95,6 +95,36 @@ function delete_failed_release() {
fi fi
} }
function helm2_deploy_exists() {
local namespace="${1}"
local release="${2}"
local deploy_exists
echoinfo "Checking if Helm 2 ${release} exists in the ${namespace} namespace..." true
kubectl get cm -l OWNER=TILLER -n ${namespace} | grep ${release} 2>&1
deploy_exists=$?
echoinfo "Helm 2 release for ${release} is ${deploy_exists}"
return $deploy_exists
}
function delete_helm2_release() {
local namespace="${KUBE_NAMESPACE}"
local release="${CI_ENVIRONMENT_SLUG}"
if [ -z "${release}" ]; then
echoerr "No release given, aborting the delete!"
return
fi
if ! helm2_deploy_exists "${namespace}" "${release}"; then
echoinfo "No Review App with ${release} is currently deployed by Helm 2."
else
echoinfo "Cleaning up ${release} installed by Helm 2"
kubectl_cleanup_release "${namespace}" "${release}"
fi
}
function get_pod() { function get_pod() {
local namespace="${KUBE_NAMESPACE}" local namespace="${KUBE_NAMESPACE}"
...@@ -148,54 +178,22 @@ function ensure_namespace() { ...@@ -148,54 +178,22 @@ function ensure_namespace() {
kubectl describe namespace "${namespace}" || kubectl create namespace "${namespace}" kubectl describe namespace "${namespace}" || kubectl create namespace "${namespace}"
} }
function install_tiller() {
local namespace="${KUBE_NAMESPACE}"
echoinfo "Checking deployment/tiller-deploy status in the ${namespace} namespace..." true
echoinfo "Initiating the Helm client..."
helm init --client-only
# Set toleration for Tiller to be installed on a specific node pool
helm init \
--tiller-namespace "${namespace}" \
--wait \
--upgrade \
--force-upgrade \
--node-selectors "app=helm" \
--replicas 3 \
--override "spec.template.spec.tolerations[0].key"="dedicated" \
--override "spec.template.spec.tolerations[0].operator"="Equal" \
--override "spec.template.spec.tolerations[0].value"="helm" \
--override "spec.template.spec.tolerations[0].effect"="NoSchedule"
kubectl rollout status --namespace "${namespace}" --watch "deployment/tiller-deploy"
if ! helm version --tiller-namespace "${namespace}" --debug; then
echo "Failed to init Tiller."
return 1
fi
}
function install_external_dns() { function install_external_dns() {
local namespace="${KUBE_NAMESPACE}" local namespace="${KUBE_NAMESPACE}"
local release="dns-gitlab-review-app" local release="dns-gitlab-review-app-helm3"
local domain local domain
domain=$(echo "${REVIEW_APPS_DOMAIN}" | awk -F. '{printf "%s.%s", $(NF-1), $NF}') domain=$(echo "${REVIEW_APPS_DOMAIN}" | awk -F. '{printf "%s.%s", $(NF-1), $NF}')
echoinfo "Installing external DNS for domain ${domain}..." true echoinfo "Installing external DNS for domain ${domain}..." true
if ! deploy_exists "${namespace}" "${release}" || previous_deploy_failed "${namespace}" "${release}" ; then if ! deploy_exists "${namespace}" "${release}" || previous_deploy_failed "${namespace}" "${release}" ; then
echoinfo "Installing external-dns Helm chart" echoinfo "Installing external-dns Helm chart"
helm repo update --tiller-namespace "${namespace}" helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo update
# Default requested: CPU => 0, memory => 0 # Default requested: CPU => 0, memory => 0
# Chart > 2.6.1 has a problem with AWS so we're pinning it for now. helm install "${release}" bitnami/external-dns \
# See https://gitlab.com/gitlab-org/gitlab/issues/37269 and https://github.com/kubernetes-sigs/external-dns/issues/1262
helm install stable/external-dns \
--tiller-namespace "${namespace}" \
--namespace "${namespace}" \ --namespace "${namespace}" \
--version '2.6.1' \ --version '2.13.3' \
--name "${release}" \
--set provider="aws" \ --set provider="aws" \
--set aws.credentials.secretKey="${REVIEW_APPS_AWS_SECRET_KEY}" \ --set aws.credentials.secretKey="${REVIEW_APPS_AWS_SECRET_KEY}" \
--set aws.credentials.accessKey="${REVIEW_APPS_AWS_ACCESS_KEY}" \ --set aws.credentials.accessKey="${REVIEW_APPS_AWS_ACCESS_KEY}" \
...@@ -289,11 +287,10 @@ function deploy() { ...@@ -289,11 +287,10 @@ function deploy() {
HELM_CMD=$(cat << EOF HELM_CMD=$(cat << EOF
helm upgrade \ helm upgrade \
--tiller-namespace="${namespace}" \
--namespace="${namespace}" \ --namespace="${namespace}" \
--install \ --install \
--wait \ --wait \
--timeout 900 \ --timeout 900s \
--set ci.branch="${CI_COMMIT_REF_NAME}" \ --set ci.branch="${CI_COMMIT_REF_NAME}" \
--set ci.commit.sha="${CI_COMMIT_SHORT_SHA}" \ --set ci.commit.sha="${CI_COMMIT_SHORT_SHA}" \
--set ci.job.url="${CI_JOB_URL}" \ --set ci.job.url="${CI_JOB_URL}" \
......
# frozen_string_literal: true
FactoryBot.define do
factory :metrics_dashboard_annotation, class: '::Metrics::Dashboard::Annotation' do
description { "Dashbaord annoation description" }
dashboard_path { "custom_dashbaord.yml" }
starting_at { Time.current }
environment
trait :with_cluster do
cluster
environment { nil }
end
end
end
# frozen_string_literal: true
require 'spec_helper'
describe 'Comment sort direction' do
let_it_be(:project) { create(:project, :public, :repository) }
let_it_be(:issue) { create(:issue, project: project) }
let_it_be(:comment_1) { create(:note_on_issue, noteable: issue, project: project, note: 'written first') }
let_it_be(:comment_2) { create(:note_on_issue, noteable: issue, project: project, note: 'written second') }
context 'on issue page', :js do
before do
visit project_issue_path(project, issue)
end
it 'saves sort order' do
# open dropdown, and select 'Newest first'
page.within('.issuable-details') do
click_button('Oldest first')
click_button('Newest first')
end
expect(first_comment).to have_content(comment_2.note)
expect(last_comment).to have_content(comment_1.note)
visit project_issue_path(project, issue)
wait_for_requests
expect(first_comment).to have_content(comment_2.note)
expect(last_comment).to have_content(comment_1.note)
end
end
def all_comments
all('.timeline > .note.timeline-entry')
end
def first_comment
all_comments.first
end
def last_comment
all_comments.last
end
end
...@@ -65,7 +65,7 @@ describe('Time series component', () => { ...@@ -65,7 +65,7 @@ describe('Time series component', () => {
store = createStore(); store = createStore();
store.commit( store.commit(
`monitoringDashboard/${types.RECEIVE_METRICS_DATA_SUCCESS}`, `monitoringDashboard/${types.RECEIVE_METRICS_DASHBOARD_SUCCESS}`,
metricsDashboardPayload, metricsDashboardPayload,
); );
......
...@@ -202,7 +202,7 @@ describe('Dashboard', () => { ...@@ -202,7 +202,7 @@ describe('Dashboard', () => {
createMountedWrapper({ hasMetrics: true }, { stubs: ['graph-group', 'panel-type'] }); createMountedWrapper({ hasMetrics: true }, { stubs: ['graph-group', 'panel-type'] });
wrapper.vm.$store.commit( wrapper.vm.$store.commit(
`monitoringDashboard/${types.RECEIVE_METRICS_DATA_SUCCESS}`, `monitoringDashboard/${types.RECEIVE_METRICS_DASHBOARD_SUCCESS}`,
metricsDashboardPayload, metricsDashboardPayload,
); );
wrapper.vm.$store.commit( wrapper.vm.$store.commit(
......
...@@ -32,7 +32,7 @@ export const propsData = { ...@@ -32,7 +32,7 @@ export const propsData = {
export const setupComponentStore = wrapper => { export const setupComponentStore = wrapper => {
wrapper.vm.$store.commit( wrapper.vm.$store.commit(
`monitoringDashboard/${types.RECEIVE_METRICS_DATA_SUCCESS}`, `monitoringDashboard/${types.RECEIVE_METRICS_DASHBOARD_SUCCESS}`,
metricsDashboardPayload, metricsDashboardPayload,
); );
......
...@@ -5,13 +5,13 @@ import axios from '~/lib/utils/axios_utils'; ...@@ -5,13 +5,13 @@ import axios from '~/lib/utils/axios_utils';
import statusCodes from '~/lib/utils/http_status'; import statusCodes from '~/lib/utils/http_status';
import * as commonUtils from '~/lib/utils/common_utils'; import * as commonUtils from '~/lib/utils/common_utils';
import createFlash from '~/flash'; import createFlash from '~/flash';
import { defaultTimeRange } from '~/vue_shared/constants';
import store from '~/monitoring/stores'; import store from '~/monitoring/stores';
import * as types from '~/monitoring/stores/mutation_types'; import * as types from '~/monitoring/stores/mutation_types';
import { import {
fetchDashboard, fetchDashboard,
receiveMetricsDashboardSuccess, receiveMetricsDashboardSuccess,
receiveMetricsDashboardFailure,
fetchDeploymentsData, fetchDeploymentsData,
fetchEnvironmentsData, fetchEnvironmentsData,
fetchPrometheusMetrics, fetchPrometheusMetrics,
...@@ -77,42 +77,40 @@ describe('Monitoring store actions', () => { ...@@ -77,42 +77,40 @@ describe('Monitoring store actions', () => {
}); });
describe('fetchDeploymentsData', () => { describe('fetchDeploymentsData', () => {
it('commits RECEIVE_DEPLOYMENTS_DATA_SUCCESS on error', done => { it('dispatches receiveDeploymentsDataSuccess on success', () => {
const dispatch = jest.fn();
const { state } = store; const { state } = store;
state.deploymentsEndpoint = '/success'; state.deploymentsEndpoint = '/success';
mock.onGet(state.deploymentsEndpoint).reply(200, { mock.onGet(state.deploymentsEndpoint).reply(200, {
deployments: deploymentData, deployments: deploymentData,
}); });
fetchDeploymentsData({
return testAction(
fetchDeploymentsData,
null,
state, state,
dispatch, [],
}) [{ type: 'receiveDeploymentsDataSuccess', payload: deploymentData }],
.then(() => { );
expect(dispatch).toHaveBeenCalledWith('receiveDeploymentsDataSuccess', deploymentData);
done();
})
.catch(done.fail);
}); });
it('commits RECEIVE_DEPLOYMENTS_DATA_FAILURE on error', done => { it('dispatches receiveDeploymentsDataFailure on error', () => {
const dispatch = jest.fn();
const { state } = store; const { state } = store;
state.deploymentsEndpoint = '/error'; state.deploymentsEndpoint = '/error';
mock.onGet(state.deploymentsEndpoint).reply(500); mock.onGet(state.deploymentsEndpoint).reply(500);
fetchDeploymentsData({
return testAction(
fetchDeploymentsData,
null,
state, state,
dispatch, [],
}) [{ type: 'receiveDeploymentsDataFailure' }],
.then(() => { () => {
expect(dispatch).toHaveBeenCalledWith('receiveDeploymentsDataFailure'); expect(createFlash).toHaveBeenCalled();
done(); },
}) );
.catch(done.fail);
}); });
}); });
describe('fetchEnvironmentsData', () => { describe('fetchEnvironmentsData', () => {
const dispatch = jest.fn();
const { state } = store; const { state } = store;
state.projectPath = 'gitlab-org/gitlab-test'; state.projectPath = 'gitlab-org/gitlab-test';
...@@ -164,15 +162,19 @@ describe('Monitoring store actions', () => { ...@@ -164,15 +162,19 @@ describe('Monitoring store actions', () => {
state.environmentsSearchTerm = searchTerm; state.environmentsSearchTerm = searchTerm;
mockMutate.mockReturnValue(Promise.resolve()); mockMutate.mockReturnValue(Promise.resolve());
return fetchEnvironmentsData({ return testAction(
fetchEnvironmentsData,
null,
state, state,
dispatch, [],
}).then(() => { [{ type: 'requestEnvironmentsData' }, { type: 'receiveEnvironmentsDataFailure' }],
() => {
expect(mockMutate).toHaveBeenCalledWith(mutationVariables); expect(mockMutate).toHaveBeenCalledWith(mutationVariables);
}); },
);
}); });
it('commits RECEIVE_ENVIRONMENTS_DATA_SUCCESS on success', () => { it('dispatches receiveEnvironmentsDataSuccess on success', () => {
jest.spyOn(gqClient, 'mutate').mockReturnValue( jest.spyOn(gqClient, 'mutate').mockReturnValue(
Promise.resolve({ Promise.resolve({
data: { data: {
...@@ -185,26 +187,31 @@ describe('Monitoring store actions', () => { ...@@ -185,26 +187,31 @@ describe('Monitoring store actions', () => {
}), }),
); );
return fetchEnvironmentsData({ return testAction(
fetchEnvironmentsData,
null,
state, state,
dispatch, [],
}).then(() => { [
expect(dispatch).toHaveBeenCalledWith( { type: 'requestEnvironmentsData' },
'receiveEnvironmentsDataSuccess', {
parseEnvironmentsResponse(environmentData, state.projectPath), type: 'receiveEnvironmentsDataSuccess',
payload: parseEnvironmentsResponse(environmentData, state.projectPath),
},
],
); );
}); });
});
it('commits RECEIVE_ENVIRONMENTS_DATA_FAILURE on error', () => { it('dispatches receiveEnvironmentsDataFailure on error', () => {
jest.spyOn(gqClient, 'mutate').mockReturnValue(Promise.reject()); jest.spyOn(gqClient, 'mutate').mockReturnValue(Promise.reject());
return fetchEnvironmentsData({ return testAction(
fetchEnvironmentsData,
null,
state, state,
dispatch, [],
}).then(() => { [{ type: 'requestEnvironmentsData' }, { type: 'receiveEnvironmentsDataFailure' }],
expect(dispatch).toHaveBeenCalledWith('receiveEnvironmentsDataFailure'); );
});
}); });
}); });
...@@ -266,27 +273,24 @@ describe('Monitoring store actions', () => { ...@@ -266,27 +273,24 @@ describe('Monitoring store actions', () => {
state = storeState(); state = storeState();
state.dashboardEndpoint = '/dashboard'; state.dashboardEndpoint = '/dashboard';
}); });
it('on success, dispatches receive and success actions', done => {
const params = {}; it('on success, dispatches receive and success actions', () => {
document.body.dataset.page = 'projects:environments:metrics'; document.body.dataset.page = 'projects:environments:metrics';
mock.onGet(state.dashboardEndpoint).reply(200, response); mock.onGet(state.dashboardEndpoint).reply(200, response);
fetchDashboard(
{ return testAction(
fetchDashboard,
null,
state, state,
commit, [],
dispatch, [
{ type: 'requestMetricsDashboard' },
{
type: 'receiveMetricsDashboardSuccess',
payload: { response },
}, },
params, ],
) );
.then(() => {
expect(dispatch).toHaveBeenCalledWith('requestMetricsDashboard');
expect(dispatch).toHaveBeenCalledWith('receiveMetricsDashboardSuccess', {
response,
params,
});
done();
})
.catch(done.fail);
}); });
describe('on failure', () => { describe('on failure', () => {
...@@ -299,7 +303,7 @@ describe('Monitoring store actions', () => { ...@@ -299,7 +303,7 @@ describe('Monitoring store actions', () => {
}; };
}); });
it('dispatches a failure action', done => { it('dispatches a failure', done => {
result() result()
.then(() => { .then(() => {
expect(commit).toHaveBeenCalledWith( expect(commit).toHaveBeenCalledWith(
...@@ -351,31 +355,22 @@ describe('Monitoring store actions', () => { ...@@ -351,31 +355,22 @@ describe('Monitoring store actions', () => {
let commit; let commit;
let dispatch; let dispatch;
let state; let state;
beforeEach(() => { beforeEach(() => {
commit = jest.fn(); commit = jest.fn();
dispatch = jest.fn(); dispatch = jest.fn();
state = storeState(); state = storeState();
}); });
it('stores groups ', () => {
const params = {}; it('stores groups', () => {
const response = metricsDashboardResponse; const response = metricsDashboardResponse;
receiveMetricsDashboardSuccess( receiveMetricsDashboardSuccess({ state, commit, dispatch }, { response });
{
state,
commit,
dispatch,
},
{
response,
params,
},
);
expect(commit).toHaveBeenCalledWith( expect(commit).toHaveBeenCalledWith(
types.RECEIVE_METRICS_DATA_SUCCESS, types.RECEIVE_METRICS_DASHBOARD_SUCCESS,
metricsDashboardResponse.dashboard, metricsDashboardResponse.dashboard,
); );
expect(dispatch).toHaveBeenCalledWith('fetchPrometheusMetrics', params); expect(dispatch).toHaveBeenCalledWith('fetchPrometheusMetrics');
}); });
it('sets the dashboards loaded from the repository', () => { it('sets the dashboards loaded from the repository', () => {
const params = {}; const params = {};
...@@ -395,29 +390,7 @@ describe('Monitoring store actions', () => { ...@@ -395,29 +390,7 @@ describe('Monitoring store actions', () => {
expect(commit).toHaveBeenCalledWith(types.SET_ALL_DASHBOARDS, dashboardGitResponse); expect(commit).toHaveBeenCalledWith(types.SET_ALL_DASHBOARDS, dashboardGitResponse);
}); });
}); });
describe('receiveMetricsDashboardFailure', () => {
let commit;
beforeEach(() => {
commit = jest.fn();
});
it('commits failure action', () => {
receiveMetricsDashboardFailure({
commit,
});
expect(commit).toHaveBeenCalledWith(types.RECEIVE_METRICS_DATA_FAILURE, undefined);
});
it('commits failure action with error', () => {
receiveMetricsDashboardFailure(
{
commit,
},
'uh-oh',
);
expect(commit).toHaveBeenCalledWith(types.RECEIVE_METRICS_DATA_FAILURE, 'uh-oh');
});
});
describe('fetchPrometheusMetrics', () => { describe('fetchPrometheusMetrics', () => {
const params = {};
let commit; let commit;
let dispatch; let dispatch;
let state; let state;
...@@ -427,13 +400,15 @@ describe('Monitoring store actions', () => { ...@@ -427,13 +400,15 @@ describe('Monitoring store actions', () => {
commit = jest.fn(); commit = jest.fn();
dispatch = jest.fn(); dispatch = jest.fn();
state = storeState(); state = storeState();
state.timeRange = defaultTimeRange;
}); });
it('commits empty state when state.groups is empty', done => { it('commits empty state when state.groups is empty', done => {
const getters = { const getters = {
metricsWithData: () => [], metricsWithData: () => [],
}; };
fetchPrometheusMetrics({ state, commit, dispatch, getters }, params) fetchPrometheusMetrics({ state, commit, dispatch, getters })
.then(() => { .then(() => {
expect(Tracking.event).toHaveBeenCalledWith( expect(Tracking.event).toHaveBeenCalledWith(
document.body.dataset.page, document.body.dataset.page,
...@@ -444,7 +419,9 @@ describe('Monitoring store actions', () => { ...@@ -444,7 +419,9 @@ describe('Monitoring store actions', () => {
value: 0, value: 0,
}, },
); );
expect(dispatch).not.toHaveBeenCalled(); expect(dispatch).toHaveBeenCalledTimes(1);
expect(dispatch).toHaveBeenCalledWith('fetchDeploymentsData');
expect(createFlash).not.toHaveBeenCalled(); expect(createFlash).not.toHaveBeenCalled();
done(); done();
}) })
...@@ -460,11 +437,15 @@ describe('Monitoring store actions', () => { ...@@ -460,11 +437,15 @@ describe('Monitoring store actions', () => {
metricsWithData: () => [metric.id], metricsWithData: () => [metric.id],
}; };
fetchPrometheusMetrics({ state, commit, dispatch, getters }, params) fetchPrometheusMetrics({ state, commit, dispatch, getters })
.then(() => { .then(() => {
expect(dispatch).toHaveBeenCalledWith('fetchPrometheusMetric', { expect(dispatch).toHaveBeenCalledWith('fetchPrometheusMetric', {
metric, metric,
params, defaultQueryParams: {
start_time: expect.any(String),
end_time: expect.any(String),
step: expect.any(Number),
},
}); });
expect(Tracking.event).toHaveBeenCalledWith( expect(Tracking.event).toHaveBeenCalledWith(
...@@ -487,16 +468,22 @@ describe('Monitoring store actions', () => { ...@@ -487,16 +468,22 @@ describe('Monitoring store actions', () => {
state.dashboard.panelGroups = metricsDashboardViewModel.panelGroups; state.dashboard.panelGroups = metricsDashboardViewModel.panelGroups;
const metric = state.dashboard.panelGroups[0].panels[0].metrics[0]; const metric = state.dashboard.panelGroups[0].panels[0].metrics[0];
dispatch.mockResolvedValueOnce(); // fetchDeploymentsData
// Mock having one out of four metrics failing // Mock having one out of four metrics failing
dispatch.mockRejectedValueOnce(new Error('Error fetching this metric')); dispatch.mockRejectedValueOnce(new Error('Error fetching this metric'));
dispatch.mockResolvedValue(); dispatch.mockResolvedValue();
fetchPrometheusMetrics({ state, commit, dispatch }, params) fetchPrometheusMetrics({ state, commit, dispatch })
.then(() => { .then(() => {
expect(dispatch).toHaveBeenCalledTimes(9); // one per metric expect(dispatch).toHaveBeenCalledTimes(10); // one per metric plus 1 for deployments
expect(dispatch).toHaveBeenCalledWith('fetchDeploymentsData');
expect(dispatch).toHaveBeenCalledWith('fetchPrometheusMetric', { expect(dispatch).toHaveBeenCalledWith('fetchPrometheusMetric', {
metric, metric,
params, defaultQueryParams: {
start_time: expect.any(String),
end_time: expect.any(String),
step: expect.any(Number),
},
}); });
expect(createFlash).toHaveBeenCalledTimes(1); expect(createFlash).toHaveBeenCalledTimes(1);
...@@ -508,9 +495,10 @@ describe('Monitoring store actions', () => { ...@@ -508,9 +495,10 @@ describe('Monitoring store actions', () => {
}); });
}); });
describe('fetchPrometheusMetric', () => { describe('fetchPrometheusMetric', () => {
const params = { const defaultQueryParams = {
start_time: '2019-08-06T12:40:02.184Z', start_time: '2019-08-06T12:40:02.184Z',
end_time: '2019-08-06T20:40:02.184Z', end_time: '2019-08-06T20:40:02.184Z',
step: 60,
}; };
let metric; let metric;
let state; let state;
...@@ -532,7 +520,7 @@ describe('Monitoring store actions', () => { ...@@ -532,7 +520,7 @@ describe('Monitoring store actions', () => {
testAction( testAction(
fetchPrometheusMetric, fetchPrometheusMetric,
{ metric, params }, { metric, defaultQueryParams },
state, state,
[ [
{ {
...@@ -569,7 +557,7 @@ describe('Monitoring store actions', () => { ...@@ -569,7 +557,7 @@ describe('Monitoring store actions', () => {
testAction( testAction(
fetchPrometheusMetric, fetchPrometheusMetric,
{ metric, params }, { metric, defaultQueryParams },
state, state,
[ [
{ {
...@@ -611,7 +599,7 @@ describe('Monitoring store actions', () => { ...@@ -611,7 +599,7 @@ describe('Monitoring store actions', () => {
testAction( testAction(
fetchPrometheusMetric, fetchPrometheusMetric,
{ metric, params }, { metric, defaultQueryParams },
state, state,
[ [
{ {
...@@ -646,7 +634,7 @@ describe('Monitoring store actions', () => { ...@@ -646,7 +634,7 @@ describe('Monitoring store actions', () => {
testAction( testAction(
fetchPrometheusMetric, fetchPrometheusMetric,
{ metric, params }, { metric, defaultQueryParams },
state, state,
[ [
{ {
...@@ -682,7 +670,7 @@ describe('Monitoring store actions', () => { ...@@ -682,7 +670,7 @@ describe('Monitoring store actions', () => {
testAction( testAction(
fetchPrometheusMetric, fetchPrometheusMetric,
{ metric, params }, { metric, defaultQueryParams },
state, state,
[ [
{ {
......
...@@ -51,7 +51,7 @@ describe('Monitoring store Getters', () => { ...@@ -51,7 +51,7 @@ describe('Monitoring store Getters', () => {
setupState({ setupState({
dashboard: { panelGroups: [] }, dashboard: { panelGroups: [] },
}); });
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, metricsDashboardPayload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, metricsDashboardPayload);
groups = state.dashboard.panelGroups; groups = state.dashboard.panelGroups;
}); });
...@@ -60,21 +60,21 @@ describe('Monitoring store Getters', () => { ...@@ -60,21 +60,21 @@ describe('Monitoring store Getters', () => {
}); });
it('on an empty metric with no result, returns NO_DATA', () => { it('on an empty metric with no result, returns NO_DATA', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, metricsDashboardPayload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, metricsDashboardPayload);
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedEmptyThroughputResult); mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedEmptyThroughputResult);
expect(getMetricStates()).toEqual([metricStates.NO_DATA]); expect(getMetricStates()).toEqual([metricStates.NO_DATA]);
}); });
it('on a metric with a result, returns OK', () => { it('on a metric with a result, returns OK', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, metricsDashboardPayload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, metricsDashboardPayload);
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixture); mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixture);
expect(getMetricStates()).toEqual([metricStates.OK]); expect(getMetricStates()).toEqual([metricStates.OK]);
}); });
it('on a metric with an error, returns an error', () => { it('on a metric with an error, returns an error', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, metricsDashboardPayload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, metricsDashboardPayload);
mutations[types.RECEIVE_METRIC_RESULT_FAILURE](state, { mutations[types.RECEIVE_METRIC_RESULT_FAILURE](state, {
metricId: groups[0].panels[0].metrics[0].metricId, metricId: groups[0].panels[0].metrics[0].metricId,
}); });
...@@ -83,7 +83,7 @@ describe('Monitoring store Getters', () => { ...@@ -83,7 +83,7 @@ describe('Monitoring store Getters', () => {
}); });
it('on multiple metrics with results, returns OK', () => { it('on multiple metrics with results, returns OK', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, metricsDashboardPayload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, metricsDashboardPayload);
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixture); mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixture);
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixtureStatusCode); mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixtureStatusCode);
...@@ -94,7 +94,7 @@ describe('Monitoring store Getters', () => { ...@@ -94,7 +94,7 @@ describe('Monitoring store Getters', () => {
expect(getMetricStates(state.dashboard.panelGroups[2].key)).toEqual([]); expect(getMetricStates(state.dashboard.panelGroups[2].key)).toEqual([]);
}); });
it('on multiple metrics errors', () => { it('on multiple metrics errors', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, metricsDashboardPayload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, metricsDashboardPayload);
mutations[types.RECEIVE_METRIC_RESULT_FAILURE](state, { mutations[types.RECEIVE_METRIC_RESULT_FAILURE](state, {
metricId: groups[0].panels[0].metrics[0].metricId, metricId: groups[0].panels[0].metrics[0].metricId,
...@@ -113,7 +113,7 @@ describe('Monitoring store Getters', () => { ...@@ -113,7 +113,7 @@ describe('Monitoring store Getters', () => {
}); });
it('on multiple metrics with errors', () => { it('on multiple metrics with errors', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, metricsDashboardPayload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, metricsDashboardPayload);
// An success in 1 group // An success in 1 group
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixture); mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixture);
...@@ -175,27 +175,27 @@ describe('Monitoring store Getters', () => { ...@@ -175,27 +175,27 @@ describe('Monitoring store Getters', () => {
}); });
it('no loaded metric returns empty', () => { it('no loaded metric returns empty', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, metricsDashboardPayload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, metricsDashboardPayload);
expect(metricsWithData()).toEqual([]); expect(metricsWithData()).toEqual([]);
}); });
it('an empty metric, returns empty', () => { it('an empty metric, returns empty', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, metricsDashboardPayload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, metricsDashboardPayload);
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedEmptyThroughputResult); mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedEmptyThroughputResult);
expect(metricsWithData()).toEqual([]); expect(metricsWithData()).toEqual([]);
}); });
it('a metric with results, it returns a metric', () => { it('a metric with results, it returns a metric', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, metricsDashboardPayload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, metricsDashboardPayload);
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixture); mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixture);
expect(metricsWithData()).toEqual([mockedQueryResultFixture.metricId]); expect(metricsWithData()).toEqual([mockedQueryResultFixture.metricId]);
}); });
it('multiple metrics with results, it return multiple metrics', () => { it('multiple metrics with results, it return multiple metrics', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, metricsDashboardPayload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, metricsDashboardPayload);
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixture); mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixture);
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixtureStatusCode); mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixtureStatusCode);
...@@ -206,7 +206,7 @@ describe('Monitoring store Getters', () => { ...@@ -206,7 +206,7 @@ describe('Monitoring store Getters', () => {
}); });
it('multiple metrics with results, it returns metrics filtered by group', () => { it('multiple metrics with results, it returns metrics filtered by group', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, metricsDashboardPayload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, metricsDashboardPayload);
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixture); mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixture);
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixtureStatusCode); mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixtureStatusCode);
...@@ -291,7 +291,7 @@ describe('Monitoring store Getters', () => { ...@@ -291,7 +291,7 @@ describe('Monitoring store Getters', () => {
}); });
it('return no metrics when dashboard is not persisted', () => { it('return no metrics when dashboard is not persisted', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, mockData); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, mockData);
metricsSavedToDb = getters.metricsSavedToDb(state); metricsSavedToDb = getters.metricsSavedToDb(state);
expect(metricsSavedToDb).toEqual([]); expect(metricsSavedToDb).toEqual([]);
...@@ -304,7 +304,7 @@ describe('Monitoring store Getters', () => { ...@@ -304,7 +304,7 @@ describe('Monitoring store Getters', () => {
metric.metric_id = id; metric.metric_id = id;
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, mockData); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, mockData);
metricsSavedToDb = getters.metricsSavedToDb(state); metricsSavedToDb = getters.metricsSavedToDb(state);
expect(metricsSavedToDb).toEqual([`${id}_${metric.id}`]); expect(metricsSavedToDb).toEqual([`${id}_${metric.id}`]);
...@@ -321,7 +321,7 @@ describe('Monitoring store Getters', () => { ...@@ -321,7 +321,7 @@ describe('Monitoring store Getters', () => {
metric1.metric_id = id1; metric1.metric_id = id1;
metric2.metric_id = id2; metric2.metric_id = id2;
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, mockData); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](state, mockData);
metricsSavedToDb = getters.metricsSavedToDb(state); metricsSavedToDb = getters.metricsSavedToDb(state);
expect(metricsSavedToDb).toEqual([`${id1}_${metric1.id}`, `${id2}_${metric2.id}`]); expect(metricsSavedToDb).toEqual([`${id1}_${metric1.id}`, `${id2}_${metric2.id}`]);
......
...@@ -20,7 +20,7 @@ describe('Monitoring mutations', () => { ...@@ -20,7 +20,7 @@ describe('Monitoring mutations', () => {
stateCopy = state(); stateCopy = state();
}); });
describe('RECEIVE_METRICS_DATA_SUCCESS', () => { describe('RECEIVE_METRICS_DASHBOARD_SUCCESS', () => {
let payload; let payload;
const getGroups = () => stateCopy.dashboard.panelGroups; const getGroups = () => stateCopy.dashboard.panelGroups;
...@@ -29,7 +29,7 @@ describe('Monitoring mutations', () => { ...@@ -29,7 +29,7 @@ describe('Monitoring mutations', () => {
payload = metricsDashboardPayload; payload = metricsDashboardPayload;
}); });
it('adds a key to the group', () => { it('adds a key to the group', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](stateCopy, payload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](stateCopy, payload);
const groups = getGroups(); const groups = getGroups();
expect(groups[0].key).toBe('system-metrics-kubernetes-0'); expect(groups[0].key).toBe('system-metrics-kubernetes-0');
...@@ -37,7 +37,7 @@ describe('Monitoring mutations', () => { ...@@ -37,7 +37,7 @@ describe('Monitoring mutations', () => {
expect(groups[2].key).toBe('response-metrics-nginx-ingress-2'); expect(groups[2].key).toBe('response-metrics-nginx-ingress-2');
}); });
it('normalizes values', () => { it('normalizes values', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](stateCopy, payload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](stateCopy, payload);
const expectedLabel = 'Pod average (MB)'; const expectedLabel = 'Pod average (MB)';
const { label, queryRange } = getGroups()[0].panels[2].metrics[0]; const { label, queryRange } = getGroups()[0].panels[2].metrics[0];
...@@ -45,7 +45,7 @@ describe('Monitoring mutations', () => { ...@@ -45,7 +45,7 @@ describe('Monitoring mutations', () => {
expect(queryRange.length).toBeGreaterThan(0); expect(queryRange.length).toBeGreaterThan(0);
}); });
it('contains six groups, with panels with a metric each', () => { it('contains six groups, with panels with a metric each', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](stateCopy, payload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](stateCopy, payload);
const groups = getGroups(); const groups = getGroups();
...@@ -61,7 +61,7 @@ describe('Monitoring mutations', () => { ...@@ -61,7 +61,7 @@ describe('Monitoring mutations', () => {
expect(groups[1].panels[0].metrics).toHaveLength(1); expect(groups[1].panels[0].metrics).toHaveLength(1);
}); });
it('assigns metrics a metric id', () => { it('assigns metrics a metric id', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](stateCopy, payload); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](stateCopy, payload);
const groups = getGroups(); const groups = getGroups();
...@@ -195,7 +195,7 @@ describe('Monitoring mutations', () => { ...@@ -195,7 +195,7 @@ describe('Monitoring mutations', () => {
describe('REQUEST_METRIC_RESULT', () => { describe('REQUEST_METRIC_RESULT', () => {
beforeEach(() => { beforeEach(() => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](stateCopy, dashboard); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](stateCopy, dashboard);
}); });
it('stores a loading state on a metric', () => { it('stores a loading state on a metric', () => {
expect(stateCopy.showEmptyState).toBe(true); expect(stateCopy.showEmptyState).toBe(true);
...@@ -218,7 +218,7 @@ describe('Monitoring mutations', () => { ...@@ -218,7 +218,7 @@ describe('Monitoring mutations', () => {
describe('RECEIVE_METRIC_RESULT_SUCCESS', () => { describe('RECEIVE_METRIC_RESULT_SUCCESS', () => {
beforeEach(() => { beforeEach(() => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](stateCopy, dashboard); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](stateCopy, dashboard);
}); });
it('clears empty state', () => { it('clears empty state', () => {
expect(stateCopy.showEmptyState).toBe(true); expect(stateCopy.showEmptyState).toBe(true);
...@@ -251,7 +251,7 @@ describe('Monitoring mutations', () => { ...@@ -251,7 +251,7 @@ describe('Monitoring mutations', () => {
describe('RECEIVE_METRIC_RESULT_FAILURE', () => { describe('RECEIVE_METRIC_RESULT_FAILURE', () => {
beforeEach(() => { beforeEach(() => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](stateCopy, dashboard); mutations[types.RECEIVE_METRICS_DASHBOARD_SUCCESS](stateCopy, dashboard);
}); });
it('maintains the loading state when a metric fails', () => { it('maintains the loading state when a metric fails', () => {
expect(stateCopy.showEmptyState).toBe(true); expect(stateCopy.showEmptyState).toBe(true);
......
import { shallowMount, createLocalVue } from '@vue/test-utils'; import { shallowMount, createLocalVue } from '@vue/test-utils';
import Vuex from 'vuex'; import Vuex from 'vuex';
import SortDiscussion from '~/notes/components/sort_discussion.vue'; import SortDiscussion from '~/notes/components/sort_discussion.vue';
import LocalStorageSync from '~/vue_shared/components/local_storage_sync.vue';
import createStore from '~/notes/stores'; import createStore from '~/notes/stores';
import { ASC, DESC } from '~/notes/constants'; import { ASC, DESC } from '~/notes/constants';
import Tracking from '~/tracking'; import Tracking from '~/tracking';
...@@ -21,6 +22,8 @@ describe('Sort Discussion component', () => { ...@@ -21,6 +22,8 @@ describe('Sort Discussion component', () => {
}); });
}; };
const findLocalStorageSync = () => wrapper.find(LocalStorageSync);
beforeEach(() => { beforeEach(() => {
store = createStore(); store = createStore();
jest.spyOn(Tracking, 'event'); jest.spyOn(Tracking, 'event');
...@@ -31,6 +34,22 @@ describe('Sort Discussion component', () => { ...@@ -31,6 +34,22 @@ describe('Sort Discussion component', () => {
wrapper = null; wrapper = null;
}); });
describe('default', () => {
beforeEach(() => {
createComponent();
});
it('has local storage sync', () => {
expect(findLocalStorageSync().exists()).toBe(true);
});
it('calls setDiscussionSortDirection when update is emitted', () => {
findLocalStorageSync().vm.$emit('input', ASC);
expect(store.dispatch).toHaveBeenCalledWith('setDiscussionSortDirection', ASC);
});
});
describe('when asc', () => { describe('when asc', () => {
describe('when the dropdown is clicked', () => { describe('when the dropdown is clicked', () => {
it('calls the right actions', () => { it('calls the right actions', () => {
......
import { shallowMount } from '@vue/test-utils';
import LocalStorageSync from '~/vue_shared/components/local_storage_sync.vue';
describe('Local Storage Sync', () => {
let wrapper;
const createComponent = ({ props = {}, slots = {} } = {}) => {
wrapper = shallowMount(LocalStorageSync, {
propsData: props,
slots,
});
};
afterEach(() => {
wrapper.destroy();
wrapper = null;
localStorage.clear();
});
it('is a renderless component', () => {
const html = '<div class="test-slot"></div>';
createComponent({
props: {
storageKey: 'key',
},
slots: {
default: html,
},
});
expect(wrapper.html()).toBe(html);
});
describe('localStorage empty', () => {
const storageKey = 'issue_list_order';
it('does not emit input event', () => {
createComponent({
props: {
storageKey,
value: 'ascending',
},
});
expect(wrapper.emitted('input')).toBeFalsy();
});
it('saves updated value to localStorage', () => {
createComponent({
props: {
storageKey,
value: 'ascending',
},
});
const newValue = 'descending';
wrapper.setProps({
value: newValue,
});
return wrapper.vm.$nextTick().then(() => {
expect(localStorage.getItem(storageKey)).toBe(newValue);
});
});
it('does not save default value', () => {
const value = 'ascending';
createComponent({
props: {
storageKey,
value,
},
});
expect(localStorage.getItem(storageKey)).toBe(null);
});
});
describe('localStorage has saved value', () => {
const storageKey = 'issue_list_order_by';
const savedValue = 'last_updated';
beforeEach(() => {
localStorage.setItem(storageKey, savedValue);
});
it('emits input event with saved value', () => {
createComponent({
props: {
storageKey,
value: 'ascending',
},
});
expect(wrapper.emitted('input')[0][0]).toBe(savedValue);
});
it('does not overwrite localStorage with prop value', () => {
createComponent({
props: {
storageKey,
value: 'created',
},
});
expect(localStorage.getItem(storageKey)).toBe(savedValue);
});
it('updating the value updates localStorage', () => {
createComponent({
props: {
storageKey,
value: 'created',
},
});
const newValue = 'last_updated';
wrapper.setProps({
value: newValue,
});
return wrapper.vm.$nextTick().then(() => {
expect(localStorage.getItem(storageKey)).toBe(newValue);
});
});
});
});
...@@ -39,7 +39,7 @@ const propsData = { ...@@ -39,7 +39,7 @@ const propsData = {
function setupComponentStore(component) { function setupComponentStore(component) {
// Load 2 panel groups // Load 2 panel groups
component.$store.commit( component.$store.commit(
`monitoringDashboard/${types.RECEIVE_METRICS_DATA_SUCCESS}`, `monitoringDashboard/${types.RECEIVE_METRICS_DASHBOARD_SUCCESS}`,
metricsDashboardPayload, metricsDashboardPayload,
); );
......
...@@ -27,6 +27,7 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do ...@@ -27,6 +27,7 @@ describe Clusters::Cluster, :use_clean_rails_memory_store_caching do
it { is_expected.to have_many(:kubernetes_namespaces) } it { is_expected.to have_many(:kubernetes_namespaces) }
it { is_expected.to have_one(:cluster_project) } it { is_expected.to have_one(:cluster_project) }
it { is_expected.to have_many(:deployment_clusters) } it { is_expected.to have_many(:deployment_clusters) }
it { is_expected.to have_many(:metrics_dashboard_annotations) }
it { is_expected.to delegate_method(:status).to(:provider) } it { is_expected.to delegate_method(:status).to(:provider) }
it { is_expected.to delegate_method(:status_reason).to(:provider) } it { is_expected.to delegate_method(:status_reason).to(:provider) }
......
...@@ -17,6 +17,7 @@ describe Environment, :use_clean_rails_memory_store_caching do ...@@ -17,6 +17,7 @@ describe Environment, :use_clean_rails_memory_store_caching do
it { is_expected.to belong_to(:project).required } it { is_expected.to belong_to(:project).required }
it { is_expected.to have_many(:deployments) } it { is_expected.to have_many(:deployments) }
it { is_expected.to have_many(:metrics_dashboard_annotations) }
it { is_expected.to delegate_method(:stop_action).to(:last_deployment) } it { is_expected.to delegate_method(:stop_action).to(:last_deployment) }
it { is_expected.to delegate_method(:manual_actions).to(:last_deployment) } it { is_expected.to delegate_method(:manual_actions).to(:last_deployment) }
......
# frozen_string_literal: true
require 'spec_helper'
describe Metrics::Dashboard::Annotation do
describe 'associations' do
it { is_expected.to belong_to(:environment).inverse_of(:metrics_dashboard_annotations) }
it { is_expected.to belong_to(:cluster).class_name('Clusters::Cluster').inverse_of(:metrics_dashboard_annotations) }
end
describe 'validation' do
it { is_expected.to validate_presence_of(:description) }
it { is_expected.to validate_presence_of(:dashboard_path) }
it { is_expected.to validate_presence_of(:starting_at) }
it { is_expected.to validate_length_of(:dashboard_path).is_at_most(255) }
it { is_expected.to validate_length_of(:panel_xid).is_at_most(255) }
it { is_expected.to validate_length_of(:description).is_at_most(255) }
context 'orphaned annotation' do
subject { build(:metrics_dashboard_annotation, environment: nil) }
it { is_expected.not_to be_valid }
it 'reports error about both missing relations' do
subject.valid?
expect(subject.errors.full_messages).to include(/Annotation must belong to a cluster or an environment/)
end
end
context 'environments annotation' do
subject { build(:metrics_dashboard_annotation) }
it { is_expected.to be_valid }
end
context 'clusters annotation' do
subject { build(:metrics_dashboard_annotation, :with_cluster) }
it { is_expected.to be_valid }
end
context 'annotation with shared ownership' do
subject { build(:metrics_dashboard_annotation, :with_cluster, environment: build(:environment) ) }
it 'reports error about both shared ownership' do
subject.valid?
expect(subject.errors.full_messages).to include(/Annotation can't belong to both a cluster and an environment at the same time/)
end
end
end
end
# frozen_string_literal: true
require 'spec_helper'
describe Metrics::Dashboard::AnnotationPolicy, :models do
shared_examples 'metrics dashboard annotation policy' do
context 'when guest' do
before do
project.add_guest(user)
end
it { expect(policy).to be_disallowed :read_metrics_dashboard_annotation }
it { expect(policy).to be_disallowed :create_metrics_dashboard_annotation }
it { expect(policy).to be_disallowed :update_metrics_dashboard_annotation }
it { expect(policy).to be_disallowed :delete_metrics_dashboard_annotation }
end
context 'when reporter' do
before do
project.add_reporter(user)
end
it { expect(policy).to be_allowed :read_metrics_dashboard_annotation }
it { expect(policy).to be_disallowed :create_metrics_dashboard_annotation }
it { expect(policy).to be_disallowed :update_metrics_dashboard_annotation }
it { expect(policy).to be_disallowed :delete_metrics_dashboard_annotation }
end
context 'when developer' do
before do
project.add_developer(user)
end
it { expect(policy).to be_allowed :read_metrics_dashboard_annotation }
it { expect(policy).to be_allowed :create_metrics_dashboard_annotation }
it { expect(policy).to be_allowed :update_metrics_dashboard_annotation }
it { expect(policy).to be_allowed :delete_metrics_dashboard_annotation }
end
context 'when maintainer' do
before do
project.add_maintainer(user)
end
it { expect(policy).to be_allowed :read_metrics_dashboard_annotation }
it { expect(policy).to be_allowed :create_metrics_dashboard_annotation }
it { expect(policy).to be_allowed :update_metrics_dashboard_annotation }
it { expect(policy).to be_allowed :delete_metrics_dashboard_annotation }
end
end
describe 'rules' do
context 'environments annotation' do
let(:annotation) { create(:metrics_dashboard_annotation, environment: environment) }
let(:environment) { create(:environment) }
let!(:project) { environment.project }
let(:user) { create(:user) }
let(:policy) { described_class.new(user, annotation) }
it_behaves_like 'metrics dashboard annotation policy'
end
context 'cluster annotation' do
let(:annotation) { create(:metrics_dashboard_annotation, environment: nil, cluster: cluster) }
let(:cluster) { create(:cluster, :project) }
let(:project) { cluster.project }
let(:user) { create(:user) }
let(:policy) { described_class.new(user, annotation) }
it_behaves_like 'metrics dashboard annotation policy'
end
end
end
...@@ -28,7 +28,7 @@ describe ProjectPolicy do ...@@ -28,7 +28,7 @@ describe ProjectPolicy do
download_code fork_project create_snippet update_issue download_code fork_project create_snippet update_issue
admin_issue admin_label admin_list read_commit_status read_build admin_issue admin_label admin_list read_commit_status read_build
read_container_image read_pipeline read_environment read_deployment read_container_image read_pipeline read_environment read_deployment
read_merge_request download_wiki_code read_sentry_issue read_merge_request download_wiki_code read_sentry_issue read_metrics_dashboard_annotation
] ]
end end
...@@ -43,6 +43,7 @@ describe ProjectPolicy do ...@@ -43,6 +43,7 @@ describe ProjectPolicy do
update_pipeline create_merge_request_from create_wiki push_code update_pipeline create_merge_request_from create_wiki push_code
resolve_note create_container_image update_container_image destroy_container_image resolve_note create_container_image update_container_image destroy_container_image
create_environment update_environment create_deployment update_deployment create_release update_release create_environment update_environment create_deployment update_deployment create_release update_release
create_metrics_dashboard_annotation delete_metrics_dashboard_annotation update_metrics_dashboard_annotation
] ]
end end
......
# frozen_string_literal: true
require 'spec_helper'
describe Metrics::Dashboard::Annotations::CreateService do
let_it_be(:user) { create(:user) }
let(:description) { 'test annotation' }
let(:dashboard_path) { 'config/prometheus/common_metrics.yml' }
let(:starting_at) { 15.minutes.ago }
let(:ending_at) { nil }
let(:service_instance) { described_class.new(user, annotation_params) }
let(:annotation_params) do
{
environment: environment,
cluster: cluster,
description: description,
dashboard_path: dashboard_path,
starting_at: starting_at,
ending_at: ending_at
}
end
shared_examples 'executed annotation creation' do
it 'returns success response', :aggregate_failures do
annotation = instance_double(::Metrics::Dashboard::Annotation)
allow(::Metrics::Dashboard::Annotation).to receive(:new).and_return(annotation)
allow(annotation).to receive(:save).and_return(true)
response = service_instance.execute
expect(response[:status]).to be :success
expect(response[:annotation]).to be annotation
end
it 'creates annotation', :aggregate_failures do
annotation = instance_double(::Metrics::Dashboard::Annotation)
expect(::Metrics::Dashboard::Annotation)
.to receive(:new).with(annotation_params).and_return(annotation)
expect(annotation).to receive(:save).and_return(true)
service_instance.execute
end
end
shared_examples 'prevented annotation creation' do |message|
it 'returns error response', :aggregate_failures do
response = service_instance.execute
expect(response[:status]).to be :error
expect(response[:message]).to eql message
end
it 'does not change db state' do
expect(::Metrics::Dashboard::Annotation).not_to receive(:new)
service_instance.execute
end
end
shared_examples 'annotation creation failure' do
it 'returns error response', :aggregate_failures do
annotation = instance_double(::Metrics::Dashboard::Annotation)
expect(annotation).to receive(:errors).and_return('Model validation error')
expect(::Metrics::Dashboard::Annotation)
.to receive(:new).with(annotation_params).and_return(annotation)
expect(annotation).to receive(:save).and_return(false)
response = service_instance.execute
expect(response[:status]).to be :error
expect(response[:message]).to eql 'Model validation error'
end
end
describe '.execute' do
context 'with environment' do
let(:environment) { create(:environment) }
let(:cluster) { nil }
context 'with anonymous user' do
it_behaves_like 'prevented annotation creation', 'You are not authorized to create annotation for selected environment'
end
context 'with maintainer user' do
before do
environment.project.add_maintainer(user)
end
it_behaves_like 'executed annotation creation'
end
end
context 'with cluster' do
let(:environment) { nil }
context 'with anonymous user' do
let(:cluster) { create(:cluster, :project) }
it_behaves_like 'prevented annotation creation', 'You are not authorized to create annotation for selected cluster'
end
context 'with maintainer user' do
let(:cluster) { create(:cluster, :project) }
before do
cluster.project.add_maintainer(user)
end
it_behaves_like 'executed annotation creation'
end
context 'with owner user' do
let(:cluster) { create(:cluster, :group) }
before do
cluster.group.add_owner(user)
end
it_behaves_like 'executed annotation creation'
end
end
context 'non cluster nor environment is supplied' do
let(:environment) { nil }
let(:cluster) { nil }
it_behaves_like 'annotation creation failure'
end
context 'missing dashboard_path' do
let(:cluster) { create(:cluster, :project) }
let(:environment) { nil }
let(:dashboard_path) { nil }
context 'with maintainer user' do
before do
cluster.project.add_maintainer(user)
end
it_behaves_like 'annotation creation failure'
end
end
context 'incorrect dashboard_path' do
let(:cluster) { create(:cluster, :project) }
let(:environment) { nil }
let(:dashboard_path) { 'something_incorrect.yml' }
context 'with maintainer user' do
before do
cluster.project.add_maintainer(user)
end
it_behaves_like 'prevented annotation creation', 'Dashboard with requested path can not be found'
end
end
end
end
# frozen_string_literal: true
require 'spec_helper'
describe Metrics::Dashboard::Annotations::DeleteService do
let(:user) { create(:user) }
let(:service_instance) { described_class.new(user, annotation) }
shared_examples 'executed annotation deletion' do
it 'returns success response', :aggregate_failures do
expect(annotation).to receive(:destroy).and_return(true)
response = service_instance.execute
expect(response[:status]).to be :success
end
end
shared_examples 'prevented annotation deletion' do |message|
it 'returns error response', :aggregate_failures do
response = service_instance.execute
expect(response[:status]).to be :error
expect(response[:message]).to eql message
end
it 'does not change db state' do
expect(annotation).not_to receive(:destroy)
service_instance.execute
end
end
describe '.execute' do
context 'with specific environment' do
let(:annotation) { create(:metrics_dashboard_annotation, environment: environment) }
let(:environment) { create(:environment) }
context 'with anonymous user' do
it_behaves_like 'prevented annotation deletion', 'You are not authorized to delete this annotation'
end
context 'with maintainer user' do
before do
environment.project.add_maintainer(user)
end
it_behaves_like 'executed annotation deletion'
context 'annotation failed to delete' do
it 'returns error response', :aggregate_failures do
allow(annotation).to receive(:destroy).and_return(false)
response = service_instance.execute
expect(response[:status]).to be :error
expect(response[:message]).to eql 'Annotation has not been deleted'
end
end
end
end
context 'with specific cluster' do
let(:annotation) { create(:metrics_dashboard_annotation, cluster: cluster, environment: nil) }
context 'with anonymous user' do
let(:cluster) { create(:cluster, :project) }
it_behaves_like 'prevented annotation deletion', 'You are not authorized to delete this annotation'
end
context 'with maintainer user' do
let(:cluster) { create(:cluster, :project) }
before do
cluster.project.add_maintainer(user)
end
it_behaves_like 'executed annotation deletion'
end
context 'with owner user' do
let(:cluster) { create(:cluster, :group) }
before do
cluster.group.add_owner(user)
end
it_behaves_like 'executed annotation deletion'
end
end
end
end
...@@ -18,8 +18,8 @@ RSpec.shared_context 'GroupPolicy context' do ...@@ -18,8 +18,8 @@ RSpec.shared_context 'GroupPolicy context' do
] ]
end end
let(:read_group_permissions) { %i[read_label read_list read_milestone read_board] } let(:read_group_permissions) { %i[read_label read_list read_milestone read_board] }
let(:reporter_permissions) { %i[admin_label read_container_image] } let(:reporter_permissions) { %i[admin_label read_container_image read_metrics_dashboard_annotation] }
let(:developer_permissions) { [:admin_milestone] } let(:developer_permissions) { %i[admin_milestone create_metrics_dashboard_annotation delete_metrics_dashboard_annotation update_metrics_dashboard_annotation] }
let(:maintainer_permissions) do let(:maintainer_permissions) do
%i[ %i[
create_projects create_projects
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment