Commit 209bd8cf authored by GitLab Bot's avatar GitLab Bot

Add latest changes from gitlab-org/gitlab@master

parent a9ced7da
...@@ -326,6 +326,7 @@ export default { ...@@ -326,6 +326,7 @@ export default {
}, },
[types.SET_SHOW_WHITESPACE](state, showWhitespace) { [types.SET_SHOW_WHITESPACE](state, showWhitespace) {
state.showWhitespace = showWhitespace; state.showWhitespace = showWhitespace;
state.diffFiles = [];
}, },
[types.TOGGLE_FILE_FINDER_VISIBLE](state, visible) { [types.TOGGLE_FILE_FINDER_VISIBLE](state, visible) {
state.fileFinderVisible = visible; state.fileFinderVisible = visible;
......
...@@ -5,6 +5,7 @@ import { highCountTrim } from '~/lib/utils/text_utility'; ...@@ -5,6 +5,7 @@ import { highCountTrim } from '~/lib/utils/text_utility';
import SetStatusModalTrigger from './set_status_modal/set_status_modal_trigger.vue'; import SetStatusModalTrigger from './set_status_modal/set_status_modal_trigger.vue';
import SetStatusModalWrapper from './set_status_modal/set_status_modal_wrapper.vue'; import SetStatusModalWrapper from './set_status_modal/set_status_modal_wrapper.vue';
import { parseBoolean } from '~/lib/utils/common_utils'; import { parseBoolean } from '~/lib/utils/common_utils';
import Tracking from '~/tracking';
/** /**
* Updates todo counter when todos are toggled. * Updates todo counter when todos are toggled.
...@@ -73,6 +74,24 @@ function initStatusTriggers() { ...@@ -73,6 +74,24 @@ function initStatusTriggers() {
} }
} }
export function initNavUserDropdownTracking() {
const el = document.querySelector('.js-nav-user-dropdown');
const buyEl = document.querySelector('.js-buy-ci-minutes-link');
if (el && buyEl) {
const { trackLabel, trackProperty } = buyEl.dataset;
const trackEvent = 'show_buy_ci_minutes';
$(el).on('shown.bs.dropdown', () => {
Tracking.event(undefined, trackEvent, {
label: trackLabel,
property: trackProperty,
});
});
}
}
document.addEventListener('DOMContentLoaded', () => { document.addEventListener('DOMContentLoaded', () => {
requestIdleCallback(initStatusTriggers); requestIdleCallback(initStatusTriggers);
initNavUserDropdownTracking();
}); });
/**
* @param {String} queryLabel - Default query label for chart
* @param {Object} metricAttributes - Default metric attribute values (e.g. method, instance)
* @returns {String} The formatted query label
* @example
* singleAttributeLabel('app', {__name__: "up", app: "prometheus"}) -> "app: prometheus"
*/
const singleAttributeLabel = (queryLabel, metricAttributes) => {
if (!queryLabel) return '';
const relevantAttribute = queryLabel.toLowerCase().replace(' ', '_');
const value = metricAttributes[relevantAttribute];
if (!value) return '';
return `${queryLabel}: ${value}`;
};
/**
* @param {String} queryLabel - Default query label for chart
* @param {Object} metricAttributes - Default metric attribute values (e.g. method, instance)
* @returns {String} The formatted query label
* @example
* templatedLabel('__name__', {__name__: "up", app: "prometheus"}) -> "__name__"
*/
const templatedLabel = (queryLabel, metricAttributes) => {
if (!queryLabel) return '';
// eslint-disable-next-line array-callback-return
Object.entries(metricAttributes).map(([templateVar, label]) => {
const regex = new RegExp(`{{\\s*${templateVar}\\s*}}`, 'g');
// eslint-disable-next-line no-param-reassign
queryLabel = queryLabel.replace(regex, label);
});
return queryLabel;
};
/**
* @param {Object} metricAttributes - Default metric attribute values (e.g. method, instance)
* @returns {String} The formatted query label
* @example
* multiMetricLabel('', {__name__: "up", app: "prometheus"}) -> "__name__: up, app: prometheus"
*/
const multiMetricLabel = metricAttributes => {
return Object.entries(metricAttributes)
.map(([templateVar, label]) => `${templateVar}: ${label}`)
.join(', ');
};
/**
* @param {String} queryLabel - Default query label for chart
* @param {Object} metricAttributes - Default metric attribute values (e.g. method, instance)
* @returns {String} The formatted query label
*/
const getSeriesLabel = (queryLabel, metricAttributes) => {
return (
singleAttributeLabel(queryLabel, metricAttributes) ||
templatedLabel(queryLabel, metricAttributes) ||
multiMetricLabel(metricAttributes) ||
queryLabel
);
};
/** /**
* @param {Array} queryResults - Array of Result objects * @param {Array} queryResults - Array of Result objects
* @param {Object} defaultConfig - Default chart config values (e.g. lineStyle, name) * @param {Object} defaultConfig - Default chart config values (e.g. lineStyle, name)
...@@ -12,21 +72,11 @@ export const makeDataSeries = (queryResults, defaultConfig) => ...@@ -12,21 +72,11 @@ export const makeDataSeries = (queryResults, defaultConfig) =>
if (!data.length) { if (!data.length) {
return null; return null;
} }
const relevantMetric = defaultConfig.name.toLowerCase().replace(' ', '_');
const name = result.metric[relevantMetric];
const series = { data }; const series = { data };
if (name) { return {
series.name = `${defaultConfig.name}: ${name}`; ...defaultConfig,
} else { ...series,
series.name = defaultConfig.name; name: getSeriesLabel(defaultConfig.name, result.metric),
Object.keys(result.metric).forEach(templateVar => { };
const value = result.metric[templateVar];
const regex = new RegExp(`{{\\s*${templateVar}\\s*}}`, 'g');
series.name = series.name.replace(regex, value);
});
}
return { ...defaultConfig, ...series };
}) })
.filter(series => series !== null); .filter(series => series !== null);
...@@ -58,7 +58,7 @@ export default { ...@@ -58,7 +58,7 @@ export default {
}, },
methods: { methods: {
formatLegendLabel(query) { formatLegendLabel(query) {
return `${query.label}`; return query.label;
}, },
onResize() { onResize() {
if (!this.$refs.barChart) return; if (!this.$refs.barChart) return;
......
...@@ -76,7 +76,7 @@ export default { ...@@ -76,7 +76,7 @@ export default {
}, },
methods: { methods: {
formatLegendLabel(query) { formatLegendLabel(query) {
return `${query.label}`; return query.label;
}, },
onResize() { onResize() {
if (!this.$refs.columnChart) return; if (!this.$refs.columnChart) return;
......
...@@ -251,7 +251,7 @@ export default { ...@@ -251,7 +251,7 @@ export default {
}, },
methods: { methods: {
formatLegendLabel(query) { formatLegendLabel(query) {
return `${query.label}`; return query.label;
}, },
isTooltipOfType(tooltipType, defaultType) { isTooltipOfType(tooltipType, defaultType) {
return tooltipType === defaultType; return tooltipType === defaultType;
......
...@@ -68,12 +68,11 @@ export const parseEnvironmentsResponse = (response = [], projectPath) => ...@@ -68,12 +68,11 @@ export const parseEnvironmentsResponse = (response = [], projectPath) =>
* https://gitlab.com/gitlab-org/gitlab/issues/207198 * https://gitlab.com/gitlab-org/gitlab/issues/207198
* *
* @param {Array} metrics - Array of prometheus metrics * @param {Array} metrics - Array of prometheus metrics
* @param {String} defaultLabel - Default label for metrics
* @returns {Object} * @returns {Object}
*/ */
const mapToMetricsViewModel = (metrics, defaultLabel) => const mapToMetricsViewModel = metrics =>
metrics.map(({ label, id, metric_id, query_range, prometheus_endpoint_path, ...metric }) => ({ metrics.map(({ label, id, metric_id, query_range, prometheus_endpoint_path, ...metric }) => ({
label: label || defaultLabel, label,
queryRange: query_range, queryRange: query_range,
prometheusEndpointPath: prometheus_endpoint_path, prometheusEndpointPath: prometheus_endpoint_path,
metricId: uniqMetricsId({ metric_id, id }), metricId: uniqMetricsId({ metric_id, id }),
......
...@@ -12,7 +12,7 @@ export default function createRouter(base, baseRef) { ...@@ -12,7 +12,7 @@ export default function createRouter(base, baseRef) {
base: joinPaths(gon.relative_url_root || '', base), base: joinPaths(gon.relative_url_root || '', base),
routes: [ routes: [
{ {
path: `(/-)?/tree/(${encodeURIComponent(baseRef)}|${baseRef})/:path*`, path: `(/-)?/tree/(${encodeURIComponent(baseRef).replace(/%2F/g, '/')}|${baseRef})/:path*`,
name: 'treePath', name: 'treePath',
component: TreePage, component: TreePage,
props: route => ({ props: route => ({
......
<script>
import { GlNewButton } from '@gitlab/ui';
export default {
components: {
GlNewButton,
},
props: {
saveable: {
type: Boolean,
required: false,
default: false,
},
},
};
</script>
<template>
<div class="d-flex bg-light border-top justify-content-between align-items-center py-3 px-4">
<gl-new-button variant="success" :disabled="!saveable">
{{ __('Submit Changes') }}
</gl-new-button>
</div>
</template>
...@@ -3,27 +3,29 @@ import { mapState, mapGetters, mapActions } from 'vuex'; ...@@ -3,27 +3,29 @@ import { mapState, mapGetters, mapActions } from 'vuex';
import { GlSkeletonLoader } from '@gitlab/ui'; import { GlSkeletonLoader } from '@gitlab/ui';
import EditArea from './edit_area.vue'; import EditArea from './edit_area.vue';
import Toolbar from './publish_toolbar.vue';
export default { export default {
components: { components: {
EditArea, EditArea,
GlSkeletonLoader, GlSkeletonLoader,
Toolbar,
}, },
computed: { computed: {
...mapState(['content', 'isLoadingContent']), ...mapState(['content', 'isLoadingContent']),
...mapGetters(['isContentLoaded']), ...mapGetters(['isContentLoaded', 'contentChanged']),
}, },
mounted() { mounted() {
this.loadContent(); this.loadContent();
}, },
methods: { methods: {
...mapActions(['loadContent']), ...mapActions(['loadContent', 'setContent']),
}, },
}; };
</script> </script>
<template> <template>
<div class="d-flex justify-content-center h-100"> <div class="d-flex justify-content-center h-100 pt-2">
<div v-if="isLoadingContent" class="w-50 h-50 mt-2"> <div v-if="isLoadingContent" class="w-50 h-50">
<gl-skeleton-loader :width="500" :height="102"> <gl-skeleton-loader :width="500" :height="102">
<rect width="500" height="16" rx="4" /> <rect width="500" height="16" rx="4" />
<rect y="20" width="375" height="16" rx="4" /> <rect y="20" width="375" height="16" rx="4" />
...@@ -33,6 +35,13 @@ export default { ...@@ -33,6 +35,13 @@ export default {
<rect x="410" y="40" width="90" height="16" rx="4" /> <rect x="410" y="40" width="90" height="16" rx="4" />
</gl-skeleton-loader> </gl-skeleton-loader>
</div> </div>
<edit-area v-if="isContentLoaded" class="w-75 h-100 shadow-none" :value="content" /> <div v-if="isContentLoaded" class="d-flex flex-grow-1 flex-column">
<edit-area
class="w-75 h-100 shadow-none align-self-center"
:value="content"
@input="setContent"
/>
<toolbar :saveable="contentChanged" />
</div>
</div> </div>
</template> </template>
...@@ -15,4 +15,8 @@ export const loadContent = ({ commit, state: { sourcePath, projectId } }) => { ...@@ -15,4 +15,8 @@ export const loadContent = ({ commit, state: { sourcePath, projectId } }) => {
}); });
}; };
export const setContent = ({ commit }, content) => {
commit(mutationTypes.SET_CONTENT, content);
};
export default () => {}; export default () => {};
// eslint-disable-next-line import/prefer-default-export export const isContentLoaded = ({ originalContent }) => Boolean(originalContent);
export const isContentLoaded = ({ content }) => Boolean(content); export const contentChanged = ({ originalContent, content }) => originalContent !== content;
export const LOAD_CONTENT = 'loadContent'; export const LOAD_CONTENT = 'loadContent';
export const RECEIVE_CONTENT_SUCCESS = 'receiveContentSuccess'; export const RECEIVE_CONTENT_SUCCESS = 'receiveContentSuccess';
export const RECEIVE_CONTENT_ERROR = 'receiveContentError'; export const RECEIVE_CONTENT_ERROR = 'receiveContentError';
export const SET_CONTENT = 'setContent';
...@@ -8,8 +8,12 @@ export default { ...@@ -8,8 +8,12 @@ export default {
state.isLoadingContent = false; state.isLoadingContent = false;
state.title = title; state.title = title;
state.content = content; state.content = content;
state.originalContent = content;
}, },
[types.RECEIVE_CONTENT_ERROR](state) { [types.RECEIVE_CONTENT_ERROR](state) {
state.isLoadingContent = false; state.isLoadingContent = false;
}, },
[types.SET_CONTENT](state, content) {
state.content = content;
},
}; };
...@@ -3,7 +3,9 @@ const createState = (initialState = {}) => ({ ...@@ -3,7 +3,9 @@ const createState = (initialState = {}) => ({
sourcePath: null, sourcePath: null,
isLoadingContent: false, isLoadingContent: false,
isSavingChanges: false,
originalContent: '',
content: '', content: '',
title: '', title: '',
......
...@@ -67,7 +67,6 @@ module Ci ...@@ -67,7 +67,6 @@ module Ci
end end
def from_needs(scope) def from_needs(scope)
return scope unless Feature.enabled?(:ci_dag_support, project, default_enabled: true)
return scope unless processable.scheduling_type_dag? return scope unless processable.scheduling_type_dag?
needs_names = processable.needs.artifacts.select(:name) needs_names = processable.needs.artifacts.select(:name)
......
...@@ -25,8 +25,6 @@ module Ci ...@@ -25,8 +25,6 @@ module Ci
end end
def self.select_with_aggregated_needs(project) def self.select_with_aggregated_needs(project)
return all unless Feature.enabled?(:ci_dag_support, project, default_enabled: true)
aggregated_needs_names = Ci::BuildNeed aggregated_needs_names = Ci::BuildNeed
.scoped_build .scoped_build
.select("ARRAY_AGG(name)") .select("ARRAY_AGG(name)")
......
...@@ -555,22 +555,28 @@ class MergeRequest < ApplicationRecord ...@@ -555,22 +555,28 @@ class MergeRequest < ApplicationRecord
end end
end end
def diff_stats
return unless diff_refs
strong_memoize(:diff_stats) do
project.repository.diff_stats(diff_refs.base_sha, diff_refs.head_sha)
end
end
def diff_size def diff_size
# Calling `merge_request_diff.diffs.real_size` will also perform # Calling `merge_request_diff.diffs.real_size` will also perform
# highlighting, which we don't need here. # highlighting, which we don't need here.
merge_request_diff&.real_size || diffs.real_size merge_request_diff&.real_size || diff_stats&.real_size || diffs.real_size
end end
def modified_paths(past_merge_request_diff: nil) def modified_paths(past_merge_request_diff: nil)
diffs = if past_merge_request_diff if past_merge_request_diff
past_merge_request_diff past_merge_request_diff.modified_paths
elsif compare elsif compare
compare diff_stats&.paths || compare.modified_paths
else else
self.merge_request_diff merge_request_diff.modified_paths
end end
diffs.modified_paths
end end
def new_paths def new_paths
......
...@@ -267,7 +267,7 @@ class Snippet < ApplicationRecord ...@@ -267,7 +267,7 @@ class Snippet < ApplicationRecord
def repository_size_checker def repository_size_checker
strong_memoize(:repository_size_checker) do strong_memoize(:repository_size_checker) do
::Gitlab::RepositorySizeChecker.new( ::Gitlab::RepositorySizeChecker.new(
current_size_proc: -> { repository._uncached_size.megabytes }, current_size_proc: -> { repository.size.megabytes },
limit: Gitlab::CurrentSettings.snippet_size_limit limit: Gitlab::CurrentSettings.snippet_size_limit
) )
end end
......
...@@ -8,7 +8,8 @@ module Ci ...@@ -8,7 +8,8 @@ module Ci
# issue: https://gitlab.com/gitlab-org/gitlab/issues/34224 # issue: https://gitlab.com/gitlab-org/gitlab/issues/34224
class CompareReportsBaseService < ::BaseService class CompareReportsBaseService < ::BaseService
def execute(base_pipeline, head_pipeline) def execute(base_pipeline, head_pipeline)
comparer = comparer_class.new(get_report(base_pipeline), get_report(head_pipeline)) comparer = build_comparer(base_pipeline, head_pipeline)
{ {
status: :parsed, status: :parsed,
key: key(base_pipeline, head_pipeline), key: key(base_pipeline, head_pipeline),
...@@ -28,6 +29,12 @@ module Ci ...@@ -28,6 +29,12 @@ module Ci
data&.fetch(:key, nil) == key(base_pipeline, head_pipeline) data&.fetch(:key, nil) == key(base_pipeline, head_pipeline)
end end
protected
def build_comparer(base_pipeline, head_pipeline)
comparer_class.new(get_report(base_pipeline), get_report(head_pipeline))
end
private private
def key(base_pipeline, head_pipeline) def key(base_pipeline, head_pipeline)
......
...@@ -93,7 +93,7 @@ module Ci ...@@ -93,7 +93,7 @@ module Ci
end end
def processable_status(processable) def processable_status(processable)
if Feature.enabled?(:ci_dag_support, project, default_enabled: true) && processable.scheduling_type_dag? if processable.scheduling_type_dag?
# Processable uses DAG, get status of all dependent needs # Processable uses DAG, get status of all dependent needs
@collection.status_for_names(processable.aggregated_needs_names.to_a) @collection.status_for_names(processable.aggregated_needs_names.to_a)
else else
......
...@@ -43,8 +43,6 @@ module Ci ...@@ -43,8 +43,6 @@ module Ci
end end
def process_dag_builds_without_needs def process_dag_builds_without_needs
return false unless Feature.enabled?(:ci_dag_support, project, default_enabled: true)
created_processables.scheduling_type_dag.without_needs.each do |build| created_processables.scheduling_type_dag.without_needs.each do |build|
process_build(build, 'success') process_build(build, 'success')
end end
...@@ -52,7 +50,6 @@ module Ci ...@@ -52,7 +50,6 @@ module Ci
def process_dag_builds_with_needs(trigger_build_ids) def process_dag_builds_with_needs(trigger_build_ids)
return false unless trigger_build_ids.present? return false unless trigger_build_ids.present?
return false unless Feature.enabled?(:ci_dag_support, project, default_enabled: true)
# we find processables that are dependent: # we find processables that are dependent:
# 1. because of current dependency, # 1. because of current dependency,
...@@ -110,11 +107,7 @@ module Ci ...@@ -110,11 +107,7 @@ module Ci
end end
def created_stage_scheduled_processables def created_stage_scheduled_processables
if Feature.enabled?(:ci_dag_support, project, default_enabled: true)
created_processables.scheduling_type_stage created_processables.scheduling_type_stage
else
created_processables
end
end end
def created_processables def created_processables
......
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
.dropdown-menu.dropdown-menu-right .dropdown-menu.dropdown-menu-right
= render 'layouts/header/help_dropdown' = render 'layouts/header/help_dropdown'
- if header_link?(:user_dropdown) - if header_link?(:user_dropdown)
%li.nav-item.header-user.dropdown{ data: { track_label: "profile_dropdown", track_event: "click_dropdown", track_value: "", qa_selector: 'user_menu' }, class: ('mr-0' if has_impersonation_link) } %li.nav-item.header-user.js-nav-user-dropdown.dropdown{ data: { track_label: "profile_dropdown", track_event: "click_dropdown", track_value: "", qa_selector: 'user_menu' }, class: ('mr-0' if has_impersonation_link) }
= link_to current_user, class: user_dropdown_class, data: { toggle: "dropdown" } do = link_to current_user, class: user_dropdown_class, data: { toggle: "dropdown" } do
= image_tag avatar_icon_for_user(current_user, 23), width: 23, height: 23, class: "header-user-avatar qa-user-avatar" = image_tag avatar_icon_for_user(current_user, 23), width: 23, height: 23, class: "header-user-avatar qa-user-avatar"
= render_if_exists 'layouts/header/user_notification_dot', project: project, namespace: group = render_if_exists 'layouts/header/user_notification_dot', project: project, namespace: group
......
#static-site-editor{ data: {} } #static-site-editor{ data: { project_id: '8', path: 'README.md' } }
...@@ -77,12 +77,8 @@ class PostReceive # rubocop:disable Scalability/IdempotentWorker ...@@ -77,12 +77,8 @@ class PostReceive # rubocop:disable Scalability/IdempotentWorker
return false unless user return false unless user
# At the moment, we only expires the repository caches.
# In the future we might need to call ProjectCacheWorker
# (or the custom class we create) to update the snippet
# repository size or any other key.
# We might also need to update the repository statistics.
expire_caches(post_received, snippet.repository) expire_caches(post_received, snippet.repository)
snippet.repository.expire_statistics_caches
end end
# Expire the repository status, branch, and tag cache once per push. # Expire the repository status, branch, and tag cache once per push.
......
---
title: Add an endpoint to allow group admin users to purge the dependency proxy for a group
merge_request: 27843
author:
type: added
---
title: Update query labels dynamically for embedded charts
merge_request: 29034
author:
type: other
---
title: Use diff-stats for calculating raw diffs modified paths
merge_request: 29134
author:
type: performance
---
title: Remove blobs_fetch_in_batches feature flag
merge_request: 29069
author:
type: added
---
title: Fixed whitespace toggle not showing the correct diff
merge_request:
author:
type: fixed
---
title: Validate dependency on job generating a CI config when using dynamic child pipelines
merge_request: 28901
author:
type: added
---
title: Remove `ci_dag_support` feature flag
merge_request: 28863
author: Lee Tickett
type: added
---
title: Update GitLab Elasticsearch Indexer
merge_request: 29256
author:
type: other
...@@ -66,6 +66,8 @@ ...@@ -66,6 +66,8 @@
- 1 - 1
- - delete_user - - delete_user
- 1 - 1
- - dependency_proxy
- 1
- - deployment - - deployment
- 3 - 3
- - design_management_new_version - - design_management_new_version
......
...@@ -73,7 +73,7 @@ for Wiki and Design Repository cases. ...@@ -73,7 +73,7 @@ for Wiki and Design Repository cases.
GitLab stores files and blobs such as Issue attachments or LFS objects into either: GitLab stores files and blobs such as Issue attachments or LFS objects into either:
- The filesystem in a specific location. - The filesystem in a specific location.
- An Object Storage solution. Object Storage solutions can be: - An [Object Storage](../../object_storage.md) solution. Object Storage solutions can be:
- Cloud based like Amazon S3 Google Cloud Storage. - Cloud based like Amazon S3 Google Cloud Storage.
- Hosted by you (like MinIO). - Hosted by you (like MinIO).
- A Storage Appliance that exposes an Object Storage-compatible API. - A Storage Appliance that exposes an Object Storage-compatible API.
......
...@@ -12,6 +12,8 @@ To have: ...@@ -12,6 +12,8 @@ To have:
- GitLab manage replication, follow [Enabling GitLab replication](#enabling-gitlab-managed-object-storage-replication). - GitLab manage replication, follow [Enabling GitLab replication](#enabling-gitlab-managed-object-storage-replication).
- Third-party services manage replication, follow [Third-party replication services](#third-party-replication-services). - Third-party services manage replication, follow [Third-party replication services](#third-party-replication-services).
[Read more about using object storage with GitLab](../../object_storage.md).
## Enabling GitLab managed object storage replication ## Enabling GitLab managed object storage replication
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/10586) in GitLab 12.4. > [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/10586) in GitLab 12.4.
......
...@@ -92,6 +92,8 @@ Use an object storage option like AWS S3 to store job artifacts. ...@@ -92,6 +92,8 @@ Use an object storage option like AWS S3 to store job artifacts.
DANGER: **Danger:** DANGER: **Danger:**
If you configure GitLab to store CI logs and artifacts on object storage, you must also enable [incremental logging](job_logs.md#new-incremental-logging-architecture). Otherwise, job logs will disappear or not be saved. If you configure GitLab to store CI logs and artifacts on object storage, you must also enable [incremental logging](job_logs.md#new-incremental-logging-architecture). Otherwise, job logs will disappear or not be saved.
[Read more about using object storage with GitLab](object_storage.md).
#### Object Storage Settings #### Object Storage Settings
For source installations the following settings are nested under `artifacts:` and then `object_store:`. On Omnibus GitLab installs they are prefixed by `artifacts_object_store_`. For source installations the following settings are nested under `artifacts:` and then `object_store:`. On Omnibus GitLab installs they are prefixed by `artifacts_object_store_`.
......
...@@ -61,6 +61,8 @@ You can also use external object storage in a private local network. For example ...@@ -61,6 +61,8 @@ You can also use external object storage in a private local network. For example
GitLab provides two different options for the uploading mechanism: "Direct upload" and "Background upload". GitLab provides two different options for the uploading mechanism: "Direct upload" and "Background upload".
[Read more about using object storage with GitLab](../object_storage.md).
**Option 1. Direct upload** **Option 1. Direct upload**
1. User pushes an `lfs` file to the GitLab instance 1. User pushes an `lfs` file to the GitLab instance
......
...@@ -68,6 +68,8 @@ Instead of storing the external diffs on disk, we recommended the use of an obje ...@@ -68,6 +68,8 @@ Instead of storing the external diffs on disk, we recommended the use of an obje
store like AWS S3 instead. This configuration relies on valid AWS credentials to store like AWS S3 instead. This configuration relies on valid AWS credentials to
be configured already. be configured already.
[Read more about using object storage with GitLab](object_storage.md).
## Object Storage Settings ## Object Storage Settings
For source installations, these settings are nested under `external_diffs:` and For source installations, these settings are nested under `external_diffs:` and
......
...@@ -21,9 +21,6 @@ Object storage options that GitLab has tested, or is aware of customers using in ...@@ -21,9 +21,6 @@ Object storage options that GitLab has tested, or is aware of customers using in
For configuring GitLab to use Object Storage refer to the following guides: For configuring GitLab to use Object Storage refer to the following guides:
1. Make sure the [`git` user home directory](https://docs.gitlab.com/omnibus/settings/configuration.html#moving-the-home-directory-for-a-user) is on local disk.
1. Configure [database lookup of SSH keys](operations/fast_ssh_key_lookup.md)
to eliminate the need for a shared `authorized_keys` file.
1. Configure [object storage for backups](../raketasks/backup_restore.md#uploading-backups-to-a-remote-cloud-storage). 1. Configure [object storage for backups](../raketasks/backup_restore.md#uploading-backups-to-a-remote-cloud-storage).
1. Configure [object storage for job artifacts](job_artifacts.md#using-object-storage) 1. Configure [object storage for job artifacts](job_artifacts.md#using-object-storage)
including [incremental logging](job_logs.md#new-incremental-logging-architecture). including [incremental logging](job_logs.md#new-incremental-logging-architecture).
...@@ -36,6 +33,19 @@ For configuring GitLab to use Object Storage refer to the following guides: ...@@ -36,6 +33,19 @@ For configuring GitLab to use Object Storage refer to the following guides:
1. Configure [object storage for Dependency Proxy](packages/dependency_proxy.md#using-object-storage) (optional feature). **(PREMIUM ONLY)** 1. Configure [object storage for Dependency Proxy](packages/dependency_proxy.md#using-object-storage) (optional feature). **(PREMIUM ONLY)**
1. Configure [object storage for Pseudonymizer](pseudonymizer.md#configuration) (optional feature). **(ULTIMATE ONLY)** 1. Configure [object storage for Pseudonymizer](pseudonymizer.md#configuration) (optional feature). **(ULTIMATE ONLY)**
1. Configure [object storage for autoscale Runner caching](https://docs.gitlab.com/runner/configuration/autoscale.html#distributed-runners-caching) (optional - for improved performance). 1. Configure [object storage for autoscale Runner caching](https://docs.gitlab.com/runner/configuration/autoscale.html#distributed-runners-caching) (optional - for improved performance).
1. Configure [object storage for Terraform state files](terraform_state.md#using-object-storage-core-only)
### Other alternatives to filesystem storage
If you're working to [scale out](scaling/index.md) your GitLab implementation,
or add [fault tolerance and redundancy](high_availability/README.md) you may be
looking at removing dependencies on block or network filesystems.
See the following guides and
[note that Pages requires disk storage](#gitlab-pages-requires-nfs):
1. Make sure the [`git` user home directory](https://docs.gitlab.com/omnibus/settings/configuration.html#moving-the-home-directory-for-a-user) is on local disk.
1. Configure [database lookup of SSH keys](operations/fast_ssh_key_lookup.md)
to eliminate the need for a shared `authorized_keys` file.
## Warnings, limitations, and known issues ## Warnings, limitations, and known issues
...@@ -67,8 +77,9 @@ with the Fog library that GitLab uses. Symptoms include: ...@@ -67,8 +77,9 @@ with the Fog library that GitLab uses. Symptoms include:
### GitLab Pages requires NFS ### GitLab Pages requires NFS
If you're working to [scale out](high_availability/README.md) your GitLab implementation and If you're working to add more GitLab servers for [scaling](scaling/index.md) or
one of your requirements is [GitLab Pages](../user/project/pages/index.md) this currently requires [fault tolerance](high_availability/README.md) and one of your requirements
is [GitLab Pages](../user/project/pages/index.md) this currently requires
NFS. There is [work in progress](https://gitlab.com/gitlab-org/gitlab-pages/issues/196) NFS. There is [work in progress](https://gitlab.com/gitlab-org/gitlab-pages/issues/196)
to remove this dependency. In the future, GitLab Pages may use to remove this dependency. In the future, GitLab Pages may use
[object storage](https://gitlab.com/gitlab-org/gitlab/-/issues/208135). [object storage](https://gitlab.com/gitlab-org/gitlab/-/issues/208135).
......
...@@ -367,6 +367,8 @@ The different supported drivers are: ...@@ -367,6 +367,8 @@ The different supported drivers are:
Read more about the individual driver's config options in the Read more about the individual driver's config options in the
[Docker Registry docs](https://docs.docker.com/registry/configuration/#storage). [Docker Registry docs](https://docs.docker.com/registry/configuration/#storage).
[Read more about using object storage with GitLab](../object_storage.md).
CAUTION: **Warning:** GitLab will not backup Docker images that are not stored on the CAUTION: **Warning:** GitLab will not backup Docker images that are not stored on the
filesystem. Remember to enable backups with your object storage provider if filesystem. Remember to enable backups with your object storage provider if
desired. desired.
......
...@@ -77,7 +77,9 @@ To change the local storage path: ...@@ -77,7 +77,9 @@ To change the local storage path:
### Using object storage ### Using object storage
Instead of relying on the local storage, you can use an object storage to Instead of relying on the local storage, you can use an object storage to
upload the blobs of the dependency proxy: store the blobs of the dependency proxy.
[Read more about using object storage with GitLab](../object_storage.md).
**Omnibus GitLab installations** **Omnibus GitLab installations**
......
...@@ -86,7 +86,9 @@ To change the local storage path: ...@@ -86,7 +86,9 @@ To change the local storage path:
### Using object storage ### Using object storage
Instead of relying on the local storage, you can use an object storage to Instead of relying on the local storage, you can use an object storage to
upload packages: store packages.
[Read more about using object storage with GitLab](../object_storage.md).
**Omnibus GitLab installations** **Omnibus GitLab installations**
......
...@@ -26,6 +26,8 @@ To configure the pseudonymizer, you need to: ...@@ -26,6 +26,8 @@ To configure the pseudonymizer, you need to:
Alternatively, you can use an absolute file path. Alternatively, you can use an absolute file path.
- Use an object storage and specify the connection parameters in the `pseudonymizer.upload.connection` configuration option. - Use an object storage and specify the connection parameters in the `pseudonymizer.upload.connection` configuration option.
[Read more about using object storage with GitLab](object_storage.md).
**For Omnibus installations:** **For Omnibus installations:**
1. Edit `/etc/gitlab/gitlab.rb` and add the following lines by replacing with 1. Edit `/etc/gitlab/gitlab.rb` and add the following lines by replacing with
......
...@@ -7,6 +7,8 @@ After [configuring the object storage](../../uploads.md#using-object-storage-cor ...@@ -7,6 +7,8 @@ After [configuring the object storage](../../uploads.md#using-object-storage-cor
>**Note:** >**Note:**
All of the processing will be done in a background worker and requires **no downtime**. All of the processing will be done in a background worker and requires **no downtime**.
[Read more about using object storage with GitLab](../../object_storage.md).
### All-in-one Rake task ### All-in-one Rake task
GitLab provides a wrapper Rake task that migrates all uploaded files - avatars, GitLab provides a wrapper Rake task that migrates all uploaded files - avatars,
......
...@@ -51,6 +51,8 @@ Instead of storing Terraform state files on disk, we recommend the use of an obj ...@@ -51,6 +51,8 @@ Instead of storing Terraform state files on disk, we recommend the use of an obj
store that is S3-compatible instead. This configuration relies on valid credentials to store that is S3-compatible instead. This configuration relies on valid credentials to
be configured already. be configured already.
[Read more about using object storage with GitLab](object_storage.md).
### Object storage settings ### Object storage settings
The following settings are: The following settings are:
......
...@@ -55,6 +55,8 @@ If you don't want to use the local disk where GitLab is installed to store the ...@@ -55,6 +55,8 @@ If you don't want to use the local disk where GitLab is installed to store the
uploads, you can use an object storage provider like AWS S3 instead. uploads, you can use an object storage provider like AWS S3 instead.
This configuration relies on valid AWS credentials to be configured already. This configuration relies on valid AWS credentials to be configured already.
[Read more about using object storage with GitLab](object_storage.md).
## Object Storage Settings ## Object Storage Settings
For source installations the following settings are nested under `uploads:` and then `object_store:`. On Omnibus GitLab installs they are prefixed by `uploads_object_store_`. For source installations the following settings are nested under `uploads:` and then `object_store:`. On Omnibus GitLab installs they are prefixed by `uploads_object_store_`.
......
# Dependency Proxy API **(PREMIUM)**
## Purge the dependency proxy for a group
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/11631) in GitLab 12.10.
Deletes the cached blobs for a group. This endpoint requires group admin access.
```plaintext
DELETE /groups/:id/dependency_proxy/cache
```
| Attribute | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `id` | integer/string | yes | The ID or [URL-encoded path of the group](README.md#namespaced-path-encoding) owned by the authenticated user |
Example request:
```shell
curl --request DELETE --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/groups/5/dependency_proxy/cache"
```
...@@ -162,7 +162,7 @@ When the user is authenticated and `simple` is not set this returns something li ...@@ -162,7 +162,7 @@ When the user is authenticated and `simple` is not set this returns something li
"merge_method": "merge", "merge_method": "merge",
"autoclose_referenced_issues": true, "autoclose_referenced_issues": true,
"suggestion_commit_message": null, "suggestion_commit_message": null,
"marked_for_deletion_at": "2020-04-03", "marked_for_deletion_at": "2020-04-03", // to be deprecated in GitLab 13.0 in favor of marked_for_deletion_on
"marked_for_deletion_on": "2020-04-03", "marked_for_deletion_on": "2020-04-03",
"statistics": { "statistics": {
"commit_count": 37, "commit_count": 37,
...@@ -287,6 +287,9 @@ When the user is authenticated and `simple` is not set this returns something li ...@@ -287,6 +287,9 @@ When the user is authenticated and `simple` is not set this returns something li
] ]
``` ```
NOTE: **Note:**
For users on GitLab [Silver, Premium, or higher](https://about.gitlab.com/pricing/) the `marked_for_deletion_at` attribute will be deprecated in GitLab 13.0 in favor of the `marked_for_deletion_on` attribute.
Users on GitLab [Starter, Bronze, or higher](https://about.gitlab.com/pricing/) will also see Users on GitLab [Starter, Bronze, or higher](https://about.gitlab.com/pricing/) will also see
the `approvals_before_merge` parameter: the `approvals_before_merge` parameter:
...@@ -408,7 +411,7 @@ This endpoint supports [keyset pagination](README.md#keyset-based-pagination) fo ...@@ -408,7 +411,7 @@ This endpoint supports [keyset pagination](README.md#keyset-based-pagination) fo
"merge_method": "merge", "merge_method": "merge",
"autoclose_referenced_issues": true, "autoclose_referenced_issues": true,
"suggestion_commit_message": null, "suggestion_commit_message": null,
"marked_for_deletion_at": "2020-04-03", "marked_for_deletion_at": "2020-04-03", // to be deprecated in GitLab 13.0 in favor of marked_for_deletion_on
"marked_for_deletion_on": "2020-04-03", "marked_for_deletion_on": "2020-04-03",
"statistics": { "statistics": {
"commit_count": 37, "commit_count": 37,
...@@ -874,7 +877,7 @@ GET /projects/:id ...@@ -874,7 +877,7 @@ GET /projects/:id
"service_desk_address": null, "service_desk_address": null,
"autoclose_referenced_issues": true, "autoclose_referenced_issues": true,
"suggestion_commit_message": null, "suggestion_commit_message": null,
"marked_for_deletion_at": "2020-04-03", "marked_for_deletion_at": "2020-04-03", // to be deprecated in GitLab 13.0 in favor of marked_for_deletion_on
"marked_for_deletion_on": "2020-04-03", "marked_for_deletion_on": "2020-04-03",
"statistics": { "statistics": {
"commit_count": 37, "commit_count": 37,
......
...@@ -4,7 +4,7 @@ type: reference ...@@ -4,7 +4,7 @@ type: reference
# Directed Acyclic Graph # Directed Acyclic Graph
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/47063) in GitLab 12.2 (enabled by `ci_dag_support` feature flag). > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/47063) in GitLab 12.2.
A [directed acyclic graph](https://www.techopedia.com/definition/5739/directed-acyclic-graph-dag) can be A [directed acyclic graph](https://www.techopedia.com/definition/5739/directed-acyclic-graph-dag) can be
used in the context of a CI/CD pipeline to build relationships between jobs such that used in the context of a CI/CD pipeline to build relationships between jobs such that
......
...@@ -136,12 +136,11 @@ your own script to generate a YAML file, which is then [used to trigger a child ...@@ -136,12 +136,11 @@ your own script to generate a YAML file, which is then [used to trigger a child
This technique can be very powerful in generating pipelines targeting content that changed or to This technique can be very powerful in generating pipelines targeting content that changed or to
build a matrix of targets and architectures. build a matrix of targets and architectures.
In GitLab 12.9, the child pipeline could fail to be created in certain cases, causing the parent pipeline to fail.
This is [resolved in GitLab 12.10](https://gitlab.com/gitlab-org/gitlab/-/issues/209070).
## Limitations ## Limitations
A parent pipeline can trigger many child pipelines, but a child pipeline cannot trigger A parent pipeline can trigger many child pipelines, but a child pipeline cannot trigger
further child pipelines. See the [related issue](https://gitlab.com/gitlab-org/gitlab/issues/29651) further child pipelines. See the [related issue](https://gitlab.com/gitlab-org/gitlab/issues/29651)
for discussion on possible future improvements. for discussion on possible future improvements.
When triggering dynamic child pipelines, if the job containing the CI config artifact is not a predecessor of the
trigger job, the child pipeline will fail to be created, causing also the parent pipeline to fail.
In the future we want to validate the trigger job's dependencies [at the time the parent pipeline is created](https://gitlab.com/gitlab-org/gitlab/-/issues/209070) rather than when the child pipeline is created.
...@@ -555,7 +555,7 @@ In `/etc/ssh/sshd_config` update the following: ...@@ -555,7 +555,7 @@ In `/etc/ssh/sshd_config` update the following:
#### Amazon S3 object storage #### Amazon S3 object storage
Since we're not using NFS for shared storage, we will use [Amazon S3](https://aws.amazon.com/s3/) buckets to store backups, artifacts, LFS objects, uploads, merge request diffs, container registry images, and more. For instructions on how to configure each of these, please see [Cloud Object Storage](../../administration/high_availability/object_storage.md). Since we're not using NFS for shared storage, we will use [Amazon S3](https://aws.amazon.com/s3/) buckets to store backups, artifacts, LFS objects, uploads, merge request diffs, container registry images, and more. Our [documentation includes configuration instructions](../../administration/object_storage.md) for each of these, and other information about using object storage with GitLab.
Remember to run `sudo gitlab-ctl reconfigure` after saving the changes to the `gitlab.rb` file. Remember to run `sudo gitlab-ctl reconfigure` after saving the changes to the `gitlab.rb` file.
...@@ -580,90 +580,55 @@ On the EC2 dashboard: ...@@ -580,90 +580,55 @@ On the EC2 dashboard:
Now we have a custom AMI that we'll use to create our launch configuration the next step. Now we have a custom AMI that we'll use to create our launch configuration the next step.
## Deploying GitLab inside an auto scaling group ## Deploy GitLab inside an auto scaling group
We'll use AWS's wizard to deploy GitLab and then SSH into the instance to ### Create a launch configuration
configure the PostgreSQL and Redis connections.
The Auto Scaling Group option is available through the EC2 dashboard on the left From the EC2 dashboard:
sidebar.
1. Click **Create Auto Scaling group**.
1. Create a new launch configuration.
### Choose the AMI
Choose the AMI:
1. Go to the Community AMIs and search for `GitLab EE <version>`
where `<version>` the latest version as seen on the
[releases page](https://about.gitlab.com/releases/).
![Choose AMI](img/choose_ami.png)
### Choose an instance type
You should choose an instance type based on your workload. Consult
[the hardware requirements](../requirements.md#hardware-requirements) to choose
one that fits your needs (at least `c5.xlarge`, which is enough to accommodate 100 users):
1. Choose the your instance type.
1. Click **Next: Configure Instance Details**.
### Configure details
In this step we'll configure some details:
1. Enter a name (`gitlab-autoscaling`).
1. Select the IAM role we created.
1. Optionally, enable CloudWatch and the EBS-optimized instance settings.
1. In the "Advanced Details" section, set the IP address type to
"Do not assign a public IP address to any instances."
1. Click **Next: Add Storage**.
### Add storage
The root volume is 8GB by default and should be enough given that we won't store any data there.
### Configure security group
As a last step, configure the security group:
1. Select the existing load balancer security group we have [created](#load-balancer).
1. Select **Review**.
### Review and launch
Now is a good time to review all the previous settings. When ready, click
**Create launch configuration** and select the SSH key pair with which you will
connect to the instance.
### Create Auto Scaling Group
We are now able to start creating our Auto Scaling Group:
1. Give it a group name.
1. Set the group size to 2 as we want to always start with two instances.
1. Assign it our network VPC and add the **private subnets**.
1. In the "Advanced Details" section, choose to receive traffic from ELBs
and select our ELB.
1. Choose the ELB health check.
1. Click **Next: Configure scaling policies**.
This is the really great part of Auto Scaling; we get to choose when AWS 1. Select **Launch Configurations** from the left menu and click **Create launch configuration**.
launches new instances and when it removes them. For this group we'll 1. Select **My AMIs** from the left menu and select the `GitLab` custom AMI we created above.
scale between 2 and 4 instances where one instance will be added if CPU 1. Select an instance type best suited for your needs (at least a `c5.xlarge`) and click **Configure details**.
1. Enter a name for your launch configuration (we'll use `gitlab-ha-launch-config`).
1. **Do not** check **Request Spot Instance**.
1. From the **IAM Role** dropdown, pick the `GitLabAdmin` instance role we [created earlier](#creating-an-iam-ec2-instance-role-and-profile).
1. Leave the rest as defaults and click **Add Storage**.
1. The root volume is 8GiB by default and should be enough given that we won’t store any data there. Click **Configure Security Group**.
1. Check **Select and existing security group** and select the `gitlab-loadbalancer-sec-group` we created earlier.
1. Click **Review**, review your changes, and click **Create launch configuration**.
1. Acknowledge that you have access to the private key or create a new one. Click **Create launch configuration**.
### Create an auto scaling group
1. As soon as the launch configuration is created, you'll see an option to **Create an Auto Scaling group using this launch configuration**. Click that to start creating the auto scaling group.
1. Enter a **Group name** (we'll use `gitlab-auto-scaling-group`).
1. For **Group size**, enter the number of instances you want to start with (we'll enter `2`).
1. Select the `gitlab-vpc` from the **Network** dropdown.
1. Add both the private [subnets we created earlier](#subnets).
1. Expand the **Advanced Details** section and check the **Receive traffic from one or more load balancers** option.
1. From the **Classic Load Balancers** dropdown, Select the load balancer we created earlier.
1. For **Health Check Type**, select **ELB**.
1. We'll leave our **Health Check Grace Period** as the default `300` seconds. Click **Configure scaling policies**.
1. Check **Use scaling policies to adjust the capacity of this group**.
1. For this group we'll scale between 2 and 4 instances where one instance will be added if CPU
utilization is greater than 60% and one instance is removed if it falls utilization is greater than 60% and one instance is removed if it falls
to less than 45%. to less than 45%.
![Auto scaling group policies](img/policies.png) ![Auto scaling group policies](img/policies.png)
Finally, configure notifications and tags as you see fit, and create the 1. Finally, configure notifications and tags as you see fit, review your changes, and create the
auto scaling group. auto scaling group.
You'll notice that after we save the configuration, AWS starts launching our two As the auto scaling group is created, you'll see your new instances spinning up in your EC2 dashboard. You'll also see the new instances added to your load balancer. Once the instances pass the heath check, they are ready to start receiving traffic from the load balancer.
instances in different AZs and without a public IP which is exactly what
we intended. Since our instances are created by the auto scaling group, go back to your instances and terminate the [instance we created manually above](#install-gitlab). We only needed this instance to create our custom AMI.
### Log in for the first time
Using the domain name you used when setting up [DNS for the load balancer](#configure-dns-for-load-balancer), you should now be able to visit GitLab in your browser. The very first time you will be asked to set up a password
for the `root` user which has admin privileges on the GitLab instance.
After you set it up, login with username `root` and the newly created password.
## Health check and monitoring with Prometheus ## Health check and monitoring with Prometheus
......
...@@ -309,6 +309,8 @@ In the example below we use Amazon S3 for storage, but Fog also lets you use ...@@ -309,6 +309,8 @@ In the example below we use Amazon S3 for storage, but Fog also lets you use
for AWS, Google, OpenStack Swift, Rackspace and Aliyun as well. A local driver is for AWS, Google, OpenStack Swift, Rackspace and Aliyun as well. A local driver is
[also available](#uploading-to-locally-mounted-shares). [also available](#uploading-to-locally-mounted-shares).
[Read more about using object storage with GitLab](../administration/object_storage.md).
#### Using Amazon S3 #### Using Amazon S3
For Omnibus GitLab packages: For Omnibus GitLab packages:
......
# Partial Clone for Large Repositories # Partial Clone
CAUTION: **Alpha:** As Git repositories grow in size, they can become cumbersome to work with
Partial Clone is an experimental feature, and will significantly increase because of the large amount of history that must be downloaded, and the large
Gitaly resource utilization when performing a partial clone, and decrease amount of disk space they require.
performance of subsequent fetch operations.
As Git repositories become very large, usability decreases as performance
decreases. One major challenge is cloning the repository, because Git will
download the entire repository including every commit and every version of
every object. This can be slow to transfer, and require large amounts of disk
space.
Historically, performing a **shallow clone**
([`--depth`](https://www.git-scm.com/docs/git-clone#Documentation/git-clone.txt---depthltdepthgt))
has been the only way to reduce the amount of data transferred when cloning
a Git repository. This does not, however, allow filtering by sub-tree which is
important for monolithic repositories containing many projects, or by object
size preventing unnecessary large objects being downloaded.
[Partial clone](https://github.com/git/git/blob/master/Documentation/technical/partial-clone.txt) [Partial clone](https://github.com/git/git/blob/master/Documentation/technical/partial-clone.txt)
is a performance optimization that "allows Git to function without having a is a performance optimization that "allows Git to function without having a
complete copy of the repository. The goal of this work is to allow Git better complete copy of the repository. The goal of this work is to allow Git better
handle extremely large repositories." handle extremely large repositories."
Specifically, using partial clone, it should be possible for Git to natively ## Filter by file size
support:
- large objects, instead of using [Git LFS](https://git-lfs.github.com/)
- enormous repositories
Briefly, partial clone works by:
- excluding objects from being transferred when cloning or fetching a > [Introduced](https://gitlab.com/gitlab-org/gitaly/-/issues/2553) in GitLab 12.10.
repository using a new `--filter` flag
- downloading missing objects on demand
Follow [Git for enormous repositories](https://gitlab.com/groups/gitlab-org/-/epics/773) for roadmap and updates. Storing large binary files in Git is normally discouraged, because every large
file added will be downloaded by everyone who clones or fetches changes
thereafter. This is slow, if not a complete obstruction when working from a slow
or unreliable internet connection.
## Enabling partial clone Using partial clone with a file size filter solves this problem, by excluding
troublesome large files from clones and fetches. When Git encounters a missing
file, it will be downloaded on demand.
> [Introduced](https://gitlab.com/gitlab-org/gitaly/issues/1553) in GitLab 12.4. When cloning a repository, use the `--filter=blob:limit=<size>` argument. For example,
to clone the repository excluding files larger than 1 megabyte:
To enable partial clone, use the [feature flags API](../../api/features.md).
For example:
```shell ```shell
curl --data "value=true" --header "PRIVATE-TOKEN: <your_access_token>" https://gitlab.example.com/api/v4/features/gitaly_upload_pack_filter git clone --filter=blob:limit=1m git@gitlab.com:gitlab-com/www-gitlab-com.git
``` ```
Alternatively, flip the switch and enable the feature flag: This would produce the following output:
```ruby ```plaintext
Feature.enable(:gitaly_upload_pack_filter) Cloning into 'www-gitlab-com'...
remote: Enumerating objects: 832467, done.
remote: Counting objects: 100% (832467/832467), done.
remote: Compressing objects: 100% (207226/207226), done.
remote: Total 832467 (delta 585563), reused 826624 (delta 580099), pack-reused 0
Receiving objects: 100% (832467/832467), 2.34 GiB | 5.05 MiB/s, done.
Resolving deltas: 100% (585563/585563), done.
remote: Enumerating objects: 146, done.
remote: Counting objects: 100% (146/146), done.
remote: Compressing objects: 100% (138/138), done.
remote: Total 146 (delta 8), reused 144 (delta 8), pack-reused 0
Receiving objects: 100% (146/146), 471.45 MiB | 4.60 MiB/s, done.
Resolving deltas: 100% (8/8), done.
Updating files: 100% (13008/13008), done.
Filtering content: 100% (3/3), 131.24 MiB | 4.65 MiB/s, done.
``` ```
## Excluding objects by size The output will be longer because Git will first clone the repository excluding
files larger than 1 megabyte, and second download any missing large files needed
Partial Clone allows large objects to be stored directly in the Git repository, to checkout the `master` branch.
and be excluded from clones as desired by the user. This eliminates the error
prone process of deciding which objects should be stored in LFS or not. Using When changing branches, Git may need to download more missing files.
partial clone, all files – large or small – may be treated the same.
## Filter by object type
> [Introduced](https://gitlab.com/gitlab-org/gitaly/-/issues/2553) in GitLab 12.10.
For enormous repositories with millions of files, and long history, it may be
helpful to exclude all files and use in combination with `sparse-checkout` to
reduce the size of your working copy.
```plaintext
# Clone the repo excluding all files
$ git clone --filter=blob:none --sparse git@gitlab.com:gitlab-com/www-gitlab-com/git
Cloning into 'www-gitlab-com'...
remote: Enumerating objects: 678296, done.
remote: Counting objects: 100% (678296/678296), done.
remote: Compressing objects: 100% (165915/165915), done.
remote: Total 678296 (delta 472342), reused 673292 (delta 467476), pack-reused 0
Receiving objects: 100% (678296/678296), 81.06 MiB | 5.74 MiB/s, done.
Resolving deltas: 100% (472342/472342), done.
remote: Enumerating objects: 28, done.
remote: Counting objects: 100% (28/28), done.
remote: Compressing objects: 100% (25/25), done.
remote: Total 28 (delta 0), reused 12 (delta 0), pack-reused 0
Receiving objects: 100% (28/28), 140.29 KiB | 341.00 KiB/s, done.
Updating files: 100% (28/28), done.
$ cd www-gitlab-com
$ git sparse-checkout init --cone
$ git sparse-checkout add data
remote: Enumerating objects: 301, done.
remote: Counting objects: 100% (301/301), done.
remote: Compressing objects: 100% (292/292), done.
remote: Total 301 (delta 16), reused 102 (delta 9), pack-reused 0
Receiving objects: 100% (301/301), 1.15 MiB | 608.00 KiB/s, done.
Resolving deltas: 100% (16/16), done.
Updating files: 100% (302/302), done.
```
With the `uploadpack.allowFilter` and `uploadpack.allowAnySHA1InWant` options For more details, see the Git documentation for
enabled on the Git server: [`sparse-checkout`](https://git-scm.com/docs/git-sparse-checkout).
```shell ## Filter by file path
# clone the repo, excluding blobs larger than 1 megabyte
git clone --filter=blob:limit=1m <url>
# in the checkout step of the clone, and any subsequent operations CAUTION: **Experimental:**
# any blobs that are needed will be downloaded on demand Partial Clone using `sparse` filters is experimental, slow, and will
git checkout feature-branch significantly increase Gitaly resource utilization when cloning and fetching.
```
## Excluding objects by path Deeper integration between Partial Clone and Sparse Checkout is being explored
through the `--filter=sparse:oid=<blob-ish>` filter spec, but this is highly
experimental. This mode of filtering uses a format similar to a `.gitignore`
file to specify which files should be included when cloning and fetching.
Partial Clone allows clones to be filtered by path using a format similar to a For more details, see the Git documentation for
`.gitignore` file stored inside the repository. [`rev-list-options`](https://gitlab.com/gitlab-org/git/-/blob/9fadedd637b312089337d73c3ed8447e9f0aa775/Documentation/rev-list-options.txt#L735-780).
With the `uploadpack.allowFilter` and `uploadpack.allowAnySHA1InWant` options With the `uploadpack.allowFilter` and `uploadpack.allowAnySHA1InWant` options
enabled on the Git server: enabled on the Git server:
......
...@@ -1116,3 +1116,22 @@ To avoid installation errors: ...@@ -1116,3 +1116,22 @@ To avoid installation errors:
kubectl get secrets/tiller-secret -n gitlab-managed-apps -o "jsonpath={.data['ca\.crt']}" | base64 -d > b.pem kubectl get secrets/tiller-secret -n gitlab-managed-apps -o "jsonpath={.data['ca\.crt']}" | base64 -d > b.pem
diff a.pem b.pem diff a.pem b.pem
``` ```
### Error installing managed apps on EKS cluster
If you're using a managed cluster on AWS EKS, and you are not able to install some of the managed
apps, consider checking the logs.
You can check the logs by running following commands:
```shell
kubectl get pods --all-namespaces
kubectl get services --all-namespaces
```
If you are getting the `Failed to assign an IP address to container` error, it's probably due to the
instance type you've specified in the AWS configuration.
The number and size of nodes might not have enough IP addresses to run or install those pods.
For reference, all the AWS instance IP limits are found
[in this AWS repository on GitHub](https://github.com/aws/amazon-vpc-cni-k8s/blob/master/pkg/awsutils/vpc_ip_resource_limit.go) (search for `InstanceENIsAvailable`).
...@@ -65,6 +65,13 @@ from GitLab. ...@@ -65,6 +65,13 @@ from GitLab.
The blobs are kept forever, and there is no hard limit on how much data can be The blobs are kept forever, and there is no hard limit on how much data can be
stored. stored.
## Clearing the cache
It is possible to use the GitLab API to purge the dependency proxy cache for a
given group to gain back disk space that may be taken up by image blobs that
are no longer needed. See the [dependency proxy API documentation](../../../api/dependency_proxy.md)
for more details.
## Limitations ## Limitations
The following limitations apply: The following limitations apply:
......
...@@ -15,6 +15,14 @@ module Gitlab ...@@ -15,6 +15,14 @@ module Gitlab
validations do validations do
validates :config, hash_or_string: true validates :config, hash_or_string: true
validates :config, allowed_keys: ALLOWED_KEYS validates :config, allowed_keys: ALLOWED_KEYS
validate do
next unless config.is_a?(Hash)
if config[:artifact] && config[:job].blank?
errors.add(:config, "must specify the job where to fetch the artifact from")
end
end
end end
end end
end end
......
...@@ -142,6 +142,7 @@ module Gitlab ...@@ -142,6 +142,7 @@ module Gitlab
validate_job_stage!(name, job) validate_job_stage!(name, job)
validate_job_dependencies!(name, job) validate_job_dependencies!(name, job)
validate_job_needs!(name, job) validate_job_needs!(name, job)
validate_dynamic_child_pipeline_dependencies!(name, job)
validate_job_environment!(name, job) validate_job_environment!(name, job)
end end
end end
...@@ -163,35 +164,48 @@ module Gitlab ...@@ -163,35 +164,48 @@ module Gitlab
def validate_job_dependencies!(name, job) def validate_job_dependencies!(name, job)
return unless job[:dependencies] return unless job[:dependencies]
stage_index = @stages.index(job[:stage])
job[:dependencies].each do |dependency| job[:dependencies].each do |dependency|
raise ValidationError, "#{name} job: undefined dependency: #{dependency}" unless @jobs[dependency.to_sym] validate_job_dependency!(name, dependency)
end
end
dependency_stage_index = @stages.index(@jobs[dependency.to_sym][:stage]) def validate_dynamic_child_pipeline_dependencies!(name, job)
return unless includes = job.dig(:trigger, :include)
unless dependency_stage_index.present? && dependency_stage_index < stage_index Array(includes).each do |included|
raise ValidationError, "#{name} job: dependency #{dependency} is not defined in prior stages" next unless included.is_a?(Hash)
end next unless dependency = included[:job]
validate_job_dependency!(name, dependency)
end end
end end
def validate_job_needs!(name, job) def validate_job_needs!(name, job)
return unless job.dig(:needs, :job) return unless needs = job.dig(:needs, :job)
stage_index = @stages.index(job[:stage]) needs.each do |need|
validate_job_dependency!(name, need[:name], 'need')
job.dig(:needs, :job).each do |need| end
need_job_name = need[:name] end
raise ValidationError, "#{name} job: undefined need: #{need_job_name}" unless @jobs[need_job_name.to_sym] def validate_job_dependency!(name, dependency, dependency_type = 'dependency')
unless @jobs[dependency.to_sym]
raise ValidationError, "#{name} job: undefined #{dependency_type}: #{dependency}"
end
needs_stage_index = @stages.index(@jobs[need_job_name.to_sym][:stage]) job_stage_index = stage_index(name)
dependency_stage_index = stage_index(dependency)
unless needs_stage_index.present? && needs_stage_index < stage_index # A dependency might be defined later in the configuration
raise ValidationError, "#{name} job: need #{need_job_name} is not defined in prior stages" # with a stage that does not exist
unless dependency_stage_index.present? && dependency_stage_index < job_stage_index
raise ValidationError, "#{name} job: #{dependency_type} #{dependency} is not defined in prior stages"
end end
end end
def stage_index(name)
stage = @jobs.dig(name.to_sym, :stage)
@stages.index(stage)
end end
def validate_job_environment!(name, job) def validate_job_environment!(name, job)
......
...@@ -86,13 +86,9 @@ module Gitlab ...@@ -86,13 +86,9 @@ module Gitlab
# to the caller to limit the number of blobs and blob_size_limit. # to the caller to limit the number of blobs and blob_size_limit.
# #
def batch(repository, blob_references, blob_size_limit: MAX_DATA_DISPLAY_SIZE) def batch(repository, blob_references, blob_size_limit: MAX_DATA_DISPLAY_SIZE)
if Feature.enabled?(:blobs_fetch_in_batches, default_enabled: true)
blob_references.each_slice(BATCH_SIZE).flat_map do |refs| blob_references.each_slice(BATCH_SIZE).flat_map do |refs|
repository.gitaly_blob_client.get_blobs(refs, blob_size_limit).to_a repository.gitaly_blob_client.get_blobs(refs, blob_size_limit).to_a
end end
else
repository.gitaly_blob_client.get_blobs(blob_references, blob_size_limit).to_a
end
end end
# Returns an array of Blob instances just with the metadata, that means # Returns an array of Blob instances just with the metadata, that means
......
...@@ -22,6 +22,15 @@ module Gitlab ...@@ -22,6 +22,15 @@ module Gitlab
@collection.map(&:path) @collection.map(&:path)
end end
def real_size
max_files = ::Commit.max_diff_options[:max_files]
if paths.size > max_files
"#{max_files}+"
else
paths.size.to_s
end
end
private private
def indexed_by_path def indexed_by_path
......
...@@ -2470,7 +2470,7 @@ msgid "AutoDevOps|Learn more in the %{link_to_documentation}" ...@@ -2470,7 +2470,7 @@ msgid "AutoDevOps|Learn more in the %{link_to_documentation}"
msgstr "Erfahre mehr in der %{link_to_documentation}" msgstr "Erfahre mehr in der %{link_to_documentation}"
msgid "AutoDevOps|The Auto DevOps pipeline has been enabled and will be used if no alternative CI configuration file is found. %{more_information_link}" msgid "AutoDevOps|The Auto DevOps pipeline has been enabled and will be used if no alternative CI configuration file is found. %{more_information_link}"
msgstr "Die Auto-DevOps-Pipeline wurde aktiviert und wir verwendet, falls keine alternative CI-Konfigurationsdatei gefunden wurde. %{more_information_link}" msgstr "Die Auto-DevOps-Pipeline wurde aktiviert und wird verwendet, falls keine alternative CI-Konfigurationsdatei gefunden wurde. %{more_information_link}"
msgid "Autocomplete" msgid "Autocomplete"
msgstr "Autovervollständigung" msgstr "Autovervollständigung"
......
...@@ -19388,6 +19388,9 @@ msgstr "" ...@@ -19388,6 +19388,9 @@ msgstr ""
msgid "Subkeys" msgid "Subkeys"
msgstr "" msgstr ""
msgid "Submit Changes"
msgstr ""
msgid "Submit a review" msgid "Submit a review"
msgstr "" msgstr ""
...@@ -20510,7 +20513,7 @@ msgstr "" ...@@ -20510,7 +20513,7 @@ msgstr ""
msgid "There was an error while fetching value stream analytics duration median data." msgid "There was an error while fetching value stream analytics duration median data."
msgstr "" msgstr ""
msgid "There was an error while fetching value stream analytics summary data." msgid "There was an error while fetching value stream analytics recent activity data."
msgstr "" msgstr ""
msgid "There was an error with the reCAPTCHA. Please solve the reCAPTCHA again." msgid "There was an error with the reCAPTCHA. Please solve the reCAPTCHA again."
......
...@@ -796,11 +796,13 @@ describe('DiffsStoreMutations', () => { ...@@ -796,11 +796,13 @@ describe('DiffsStoreMutations', () => {
it('sets showWhitespace', () => { it('sets showWhitespace', () => {
const state = { const state = {
showWhitespace: true, showWhitespace: true,
diffFiles: ['test'],
}; };
mutations[types.SET_SHOW_WHITESPACE](state, false); mutations[types.SET_SHOW_WHITESPACE](state, false);
expect(state.showWhitespace).toBe(false); expect(state.showWhitespace).toBe(false);
expect(state.diffFiles).toEqual([]);
}); });
}); });
......
import $ from 'jquery'; import $ from 'jquery';
import initTodoToggle from '~/header'; import initTodoToggle, { initNavUserDropdownTracking } from '~/header';
import { mockTracking, unmockTracking } from 'helpers/tracking_helper';
describe('Header', () => { describe('Header', () => {
describe('Todos notification', () => {
const todosPendingCount = '.todos-count'; const todosPendingCount = '.todos-count';
const fixtureTemplate = 'issues/open-issue.html'; const fixtureTemplate = 'issues/open-issue.html';
...@@ -50,4 +52,36 @@ describe('Header', () => { ...@@ -50,4 +52,36 @@ describe('Header', () => {
expect($(todosPendingCount).text()).toEqual('99+'); expect($(todosPendingCount).text()).toEqual('99+');
}); });
}); });
});
describe('Track user dropdown open', () => {
let trackingSpy;
beforeEach(() => {
setFixtures(`
<li class="js-nav-user-dropdown">
<a class="js-buy-ci-minutes-link" data-track-event="click_buy_ci_minutes" data-track-label="free" data-track-property="user_dropdown">Buy CI minutes
</a>
</li>`);
trackingSpy = mockTracking('_category_', $('.js-nav-user-dropdown').element, jest.spyOn);
document.body.dataset.page = 'some:page';
initNavUserDropdownTracking();
});
afterEach(() => {
unmockTracking();
});
it('sends a tracking event when the dropdown is opened and contains Buy CI minutes link', () => {
$('.js-nav-user-dropdown').trigger('shown.bs.dropdown');
expect(trackingSpy).toHaveBeenCalledTimes(1);
expect(trackingSpy).toHaveBeenCalledWith(undefined, 'show_buy_ci_minutes', {
label: 'free',
property: 'user_dropdown',
});
});
});
}); });
...@@ -56,6 +56,32 @@ describe('monitor helper', () => { ...@@ -56,6 +56,32 @@ describe('monitor helper', () => {
expect(result.name).toEqual('brpop'); expect(result.name).toEqual('brpop');
}); });
it('supports a multi metric label template expression', () => {
const config = {
...defaultConfig,
name: '',
};
const [result] = monitorHelper.makeDataSeries(
[
{
metric: {
backend: 'HA Server',
frontend: 'BA Server',
app: 'prometheus',
instance: 'k8 cluster 1',
},
values: series,
},
],
config,
);
expect(result.name).toBe(
'backend: HA Server, frontend: BA Server, app: prometheus, instance: k8 cluster 1',
);
});
it('supports space-padded template expressions', () => { it('supports space-padded template expressions', () => {
const config = { const config = {
...defaultConfig, ...defaultConfig,
......
...@@ -251,7 +251,7 @@ describe('mapToDashboardViewModel', () => { ...@@ -251,7 +251,7 @@ describe('mapToDashboardViewModel', () => {
}; };
it('creates a metric', () => { it('creates a metric', () => {
const dashboard = dashboardWithMetric({}); const dashboard = dashboardWithMetric({ label: 'Panel Label' });
expect(getMappedMetric(dashboard)).toEqual({ expect(getMappedMetric(dashboard)).toEqual({
label: expect.any(String), label: expect.any(String),
...@@ -268,11 +268,11 @@ describe('mapToDashboardViewModel', () => { ...@@ -268,11 +268,11 @@ describe('mapToDashboardViewModel', () => {
expect(getMappedMetric(dashboard).metricId).toEqual('1_http_responses'); expect(getMappedMetric(dashboard).metricId).toEqual('1_http_responses');
}); });
it('creates a metric with a default label', () => { it('creates a metric without a default label', () => {
const dashboard = dashboardWithMetric({}); const dashboard = dashboardWithMetric({});
expect(getMappedMetric(dashboard)).toMatchObject({ expect(getMappedMetric(dashboard)).toMatchObject({
label: defaultLabel, label: undefined,
}); });
}); });
......
...@@ -4,14 +4,15 @@ import createRouter from '~/repository/router'; ...@@ -4,14 +4,15 @@ import createRouter from '~/repository/router';
describe('Repository router spec', () => { describe('Repository router spec', () => {
it.each` it.each`
path | component | componentName path | branch | component | componentName
${'/'} | ${IndexPage} | ${'IndexPage'} ${'/'} | ${'master'} | ${IndexPage} | ${'IndexPage'}
${'/tree/master'} | ${TreePage} | ${'TreePage'} ${'/tree/master'} | ${'master'} | ${TreePage} | ${'TreePage'}
${'/-/tree/master'} | ${TreePage} | ${'TreePage'} ${'/-/tree/master'} | ${'master'} | ${TreePage} | ${'TreePage'}
${'/-/tree/master/app/assets'} | ${TreePage} | ${'TreePage'} ${'/-/tree/master/app/assets'} | ${'master'} | ${TreePage} | ${'TreePage'}
${'/-/tree/123/app/assets'} | ${null} | ${'null'} ${'/-/tree/feature/test-%23/app/assets'} | ${'feature/test-#'} | ${TreePage} | ${'TreePage'}
`('sets component as $componentName for path "$path"', ({ path, component }) => { ${'/-/tree/123/app/assets'} | ${'master'} | ${null} | ${'null'}
const router = createRouter('', 'master'); `('sets component as $componentName for path "$path"', ({ path, component, branch }) => {
const router = createRouter('', branch);
const componentsForRoute = router.getMatchedComponents(path); const componentsForRoute = router.getMatchedComponents(path);
......
import { shallowMount } from '@vue/test-utils';
import { GlNewButton } from '@gitlab/ui';
import PublishToolbar from '~/static_site_editor/components/publish_toolbar.vue';
describe('Static Site Editor Toolbar', () => {
let wrapper;
const buildWrapper = (propsData = {}) => {
wrapper = shallowMount(PublishToolbar, {
propsData: {
saveable: false,
...propsData,
},
});
};
const findSaveChangesButton = () => wrapper.find(GlNewButton);
beforeEach(() => {
buildWrapper();
});
afterEach(() => {
wrapper.destroy();
});
it('renders Submit Changes button', () => {
expect(findSaveChangesButton().exists()).toBe(true);
});
it('disables Submit Changes button', () => {
expect(findSaveChangesButton().attributes('disabled')).toBe('true');
});
describe('when saveable', () => {
it('enables Submit Changes button', () => {
buildWrapper({ saveable: true });
expect(findSaveChangesButton().attributes('disabled')).toBeFalsy();
});
});
});
...@@ -7,6 +7,7 @@ import createState from '~/static_site_editor/store/state'; ...@@ -7,6 +7,7 @@ import createState from '~/static_site_editor/store/state';
import StaticSiteEditor from '~/static_site_editor/components/static_site_editor.vue'; import StaticSiteEditor from '~/static_site_editor/components/static_site_editor.vue';
import EditArea from '~/static_site_editor/components/edit_area.vue'; import EditArea from '~/static_site_editor/components/edit_area.vue';
import PublishToolbar from '~/static_site_editor/components/publish_toolbar.vue';
const localVue = createLocalVue(); const localVue = createLocalVue();
...@@ -16,18 +17,31 @@ describe('StaticSiteEditor', () => { ...@@ -16,18 +17,31 @@ describe('StaticSiteEditor', () => {
let wrapper; let wrapper;
let store; let store;
let loadContentActionMock; let loadContentActionMock;
let setContentActionMock;
const buildStore = ({ initialState, getters } = {}) => { const buildStore = ({ initialState, getters } = {}) => {
loadContentActionMock = jest.fn(); loadContentActionMock = jest.fn();
setContentActionMock = jest.fn();
store = new Vuex.Store({ store = new Vuex.Store({
state: createState(initialState), state: createState(initialState),
getters: { getters: {
isContentLoaded: () => false, isContentLoaded: () => false,
contentChanged: () => false,
...getters, ...getters,
}, },
actions: { actions: {
loadContent: loadContentActionMock, loadContent: loadContentActionMock,
setContent: setContentActionMock,
},
});
};
const buildContentLoadedStore = ({ initialState, getters } = {}) => {
buildStore({
initialState,
getters: {
isContentLoaded: () => true,
...getters,
}, },
}); });
}; };
...@@ -40,6 +54,8 @@ describe('StaticSiteEditor', () => { ...@@ -40,6 +54,8 @@ describe('StaticSiteEditor', () => {
}; };
const findEditArea = () => wrapper.find(EditArea); const findEditArea = () => wrapper.find(EditArea);
const findPublishToolbar = () => wrapper.find(PublishToolbar);
const findSkeletonLoader = () => wrapper.find(GlSkeletonLoader);
beforeEach(() => { beforeEach(() => {
buildStore(); buildStore();
...@@ -54,6 +70,10 @@ describe('StaticSiteEditor', () => { ...@@ -54,6 +70,10 @@ describe('StaticSiteEditor', () => {
it('does not render edit area', () => { it('does not render edit area', () => {
expect(findEditArea().exists()).toBe(false); expect(findEditArea().exists()).toBe(false);
}); });
it('does not render toolbar', () => {
expect(findPublishToolbar().exists()).toBe(false);
});
}); });
describe('when content is loaded', () => { describe('when content is loaded', () => {
...@@ -68,19 +88,49 @@ describe('StaticSiteEditor', () => { ...@@ -68,19 +88,49 @@ describe('StaticSiteEditor', () => {
expect(findEditArea().exists()).toBe(true); expect(findEditArea().exists()).toBe(true);
}); });
it('does not render skeleton loader', () => {
expect(findSkeletonLoader().exists()).toBe(false);
});
it('passes page content to edit area', () => { it('passes page content to edit area', () => {
expect(findEditArea().props('value')).toBe(content); expect(findEditArea().props('value')).toBe(content);
}); });
it('renders toolbar', () => {
expect(findPublishToolbar().exists()).toBe(true);
});
});
it('sets toolbar as saveable when content changes', () => {
buildContentLoadedStore({
getters: {
contentChanged: () => true,
},
});
buildWrapper();
expect(findPublishToolbar().props('saveable')).toBe(true);
}); });
it('displays skeleton loader while loading content', () => { it('displays skeleton loader when loading content', () => {
buildStore({ initialState: { isLoadingContent: true } }); buildStore({ initialState: { isLoadingContent: true } });
buildWrapper(); buildWrapper();
expect(wrapper.find(GlSkeletonLoader).exists()).toBe(true); expect(findSkeletonLoader().exists()).toBe(true);
}); });
it('dispatches load content action', () => { it('dispatches load content action', () => {
expect(loadContentActionMock).toHaveBeenCalled(); expect(loadContentActionMock).toHaveBeenCalled();
}); });
it('dispatches setContent action when edit area emits input event', () => {
const content = 'new content';
buildContentLoadedStore();
buildWrapper();
findEditArea().vm.$emit('input', content);
expect(setContentActionMock).toHaveBeenCalledWith(expect.anything(), content, undefined);
});
}); });
...@@ -73,4 +73,15 @@ describe('Static Site Editor Store actions', () => { ...@@ -73,4 +73,15 @@ describe('Static Site Editor Store actions', () => {
}); });
}); });
}); });
describe('setContent', () => {
it('commits setContent mutation', () => {
testAction(actions.setContent, content, state, [
{
type: mutationTypes.SET_CONTENT,
payload: content,
},
]);
});
});
}); });
import createState from '~/static_site_editor/store/state'; import createState from '~/static_site_editor/store/state';
import { isContentLoaded } from '~/static_site_editor/store/getters'; import { isContentLoaded, contentChanged } from '~/static_site_editor/store/getters';
import { sourceContent as content } from '../mock_data'; import { sourceContent as content } from '../mock_data';
describe('Static Site Editor Store getters', () => { describe('Static Site Editor Store getters', () => {
describe('isContentLoaded', () => { describe('isContentLoaded', () => {
it('returns true when content is not empty', () => { it('returns true when originalContent is not empty', () => {
expect(isContentLoaded(createState({ content }))).toBe(true); expect(isContentLoaded(createState({ originalContent: content }))).toBe(true);
}); });
it('returns false when content is empty', () => { it('returns false when originalContent is empty', () => {
expect(isContentLoaded(createState({ content: '' }))).toBe(false); expect(isContentLoaded(createState({ originalContent: '' }))).toBe(false);
});
});
describe('contentChanged', () => {
it('returns true when content and originalContent are different', () => {
const state = createState({ content, originalContent: 'something else' });
expect(contentChanged(state)).toBe(true);
});
it('returns false when content and originalContent are the same', () => {
const state = createState({ content, originalContent: content });
expect(contentChanged(state)).toBe(false);
}); });
}); });
}); });
...@@ -35,8 +35,9 @@ describe('Static Site Editor Store mutations', () => { ...@@ -35,8 +35,9 @@ describe('Static Site Editor Store mutations', () => {
expect(state.title).toBe(payload.title); expect(state.title).toBe(payload.title);
}); });
it('sets content', () => { it('sets originalContent and content', () => {
expect(state.content).toBe(payload.content); expect(state.content).toBe(payload.content);
expect(state.originalContent).toBe(payload.content);
}); });
}); });
...@@ -49,4 +50,12 @@ describe('Static Site Editor Store mutations', () => { ...@@ -49,4 +50,12 @@ describe('Static Site Editor Store mutations', () => {
expect(state.isLoadingContent).toBe(false); expect(state.isLoadingContent).toBe(false);
}); });
}); });
describe('setContent', () => {
it('sets content', () => {
mutations[types.SET_CONTENT](state, content);
expect(state.content).toBe(content);
});
});
}); });
# frozen_string_literal: true
require 'spec_helper'
describe ::Gitlab::Ci::Config::Entry::Include do
subject(:include_entry) { described_class.new(config) }
describe 'validations' do
before do
include_entry.compose!
end
context 'when value is a string' do
let(:config) { 'test.yml' }
it { is_expected.to be_valid }
end
context 'when value is hash' do
context 'when using not allowed keys' do
let(:config) do
{ not_allowed: 'key' }
end
it { is_expected.not_to be_valid }
end
context 'when using "local"' do
let(:config) { { local: 'test.yml' } }
it { is_expected.to be_valid }
end
context 'when using "file"' do
let(:config) { { file: 'test.yml' } }
it { is_expected.to be_valid }
end
context 'when using "template"' do
let(:config) { { template: 'test.yml' } }
it { is_expected.to be_valid }
end
context 'when using "artifact"' do
context 'and specifying "job"' do
let(:config) { { artifact: 'test.yml', job: 'generator' } }
it { is_expected.to be_valid }
end
context 'without "job"' do
let(:config) { { artifact: 'test.yml' } }
it { is_expected.not_to be_valid }
it 'has specific error' do
expect(include_entry.errors)
.to include('include config must specify the job where to fetch the artifact from')
end
end
end
end
context 'when value is something else' do
let(:config) { 123 }
it { is_expected.not_to be_valid }
end
end
end
...@@ -2052,6 +2052,54 @@ module Gitlab ...@@ -2052,6 +2052,54 @@ module Gitlab
end end
end end
describe 'with parent-child pipeline' do
context 'when artifact and job are specified' do
let(:config) do
YAML.dump({
build1: { stage: 'build', script: 'test' },
test1: { stage: 'test', trigger: {
include: [{ artifact: 'generated.yml', job: 'build1' }]
} }
})
end
it { expect { subject }.not_to raise_error }
end
context 'when job is not specified specified while artifact is' do
let(:config) do
YAML.dump({
build1: { stage: 'build', script: 'test' },
test1: { stage: 'test', trigger: {
include: [{ artifact: 'generated.yml' }]
} }
})
end
it do
expect { subject }.to raise_error(
described_class::ValidationError,
/include config must specify the job where to fetch the artifact from/)
end
end
context 'when include is a string' do
let(:config) do
YAML.dump({
build1: { stage: 'build', script: 'test' },
test1: {
stage: 'test',
trigger: {
include: 'generated.yml'
}
}
})
end
it { expect { subject }.not_to raise_error }
end
end
describe "Error handling" do describe "Error handling" do
it "fails to parse YAML" do it "fails to parse YAML" do
expect do expect do
......
...@@ -301,7 +301,6 @@ describe Gitlab::Git::Blob, :seed_helper do ...@@ -301,7 +301,6 @@ describe Gitlab::Git::Blob, :seed_helper do
stub_const('Gitlab::Git::Blob::BATCH_SIZE', 2) stub_const('Gitlab::Git::Blob::BATCH_SIZE', 2)
end end
context 'blobs_fetch_in_batches is enabled' do
it 'fetches the blobs in batches' do it 'fetches the blobs in batches' do
expect(client).to receive(:get_blobs).with(first_batch, limit).ordered expect(client).to receive(:get_blobs).with(first_batch, limit).ordered
expect(client).to receive(:get_blobs).with(second_batch, limit).ordered expect(client).to receive(:get_blobs).with(second_batch, limit).ordered
...@@ -310,19 +309,6 @@ describe Gitlab::Git::Blob, :seed_helper do ...@@ -310,19 +309,6 @@ describe Gitlab::Git::Blob, :seed_helper do
subject subject
end end
end end
context 'blobs_fetch_in_batches is disabled' do
before do
stub_feature_flags(blobs_fetch_in_batches: false)
end
it 'fetches the blobs in a single batch' do
expect(client).to receive(:get_blobs).with(blob_references, limit)
subject
end
end
end
end end
describe '.batch_metadata' do describe '.batch_metadata' do
......
...@@ -29,4 +29,16 @@ describe Gitlab::Git::DiffStatsCollection do ...@@ -29,4 +29,16 @@ describe Gitlab::Git::DiffStatsCollection do
expect(collection.paths).to eq %w[foo bar] expect(collection.paths).to eq %w[foo bar]
end end
end end
describe '#real_size' do
it 'returns the number of modified files' do
expect(collection.real_size).to eq('2')
end
it 'returns capped number when it is bigger than max_files' do
allow(::Commit).to receive(:max_diff_options).and_return(max_files: 1)
expect(collection.real_size).to eq('1+')
end
end
end end
...@@ -96,14 +96,6 @@ describe Ci::BuildDependencies do ...@@ -96,14 +96,6 @@ describe Ci::BuildDependencies do
end end
it { is_expected.to contain_exactly(build, rspec_test, staging) } it { is_expected.to contain_exactly(build, rspec_test, staging) }
context 'when ci_dag_support is disabled' do
before do
stub_feature_flags(ci_dag_support: false)
end
it { is_expected.to contain_exactly(build, rspec_test, rubocop_test, staging) }
end
end end
context 'when need artifacts are defined' do context 'when need artifacts are defined' do
......
...@@ -25,20 +25,6 @@ describe Ci::Processable do ...@@ -25,20 +25,6 @@ describe Ci::Processable do
it 'returns all needs' do it 'returns all needs' do
expect(with_aggregated_needs.first.aggregated_needs_names).to contain_exactly('test1', 'test2') expect(with_aggregated_needs.first.aggregated_needs_names).to contain_exactly('test1', 'test2')
end end
context 'with ci_dag_support disabled' do
before do
stub_feature_flags(ci_dag_support: false)
end
it 'returns all processables' do
expect(with_aggregated_needs).to contain_exactly(processable)
end
it 'returns empty needs' do
expect(with_aggregated_needs.first.aggregated_needs_names).to be_nil
end
end
end end
context 'without needs' do context 'without needs' do
......
...@@ -874,7 +874,7 @@ describe MergeRequest do ...@@ -874,7 +874,7 @@ describe MergeRequest do
subject(:merge_request) { build(:merge_request) } subject(:merge_request) { build(:merge_request) }
before do before do
expect(diff).to receive(:modified_paths).and_return(paths) allow(diff).to receive(:modified_paths).and_return(paths)
end end
context 'when past_merge_request_diff is specified' do context 'when past_merge_request_diff is specified' do
...@@ -890,13 +890,32 @@ describe MergeRequest do ...@@ -890,13 +890,32 @@ describe MergeRequest do
let(:compare) { double(:compare) } let(:compare) { double(:compare) }
let(:diff) { compare } let(:diff) { compare }
it 'returns affected file paths from compare' do before do
merge_request.compare = compare merge_request.compare = compare
expect(merge_request).to receive(:diff_stats).and_return(diff_stats)
end
context 'and diff_stats are not present' do
let(:diff_stats) { nil }
it 'returns affected file paths from compare' do
expect(merge_request.modified_paths).to eq(paths) expect(merge_request.modified_paths).to eq(paths)
end end
end end
context 'and diff_stats are present' do
let(:diff_stats) { double(:diff_stats) }
it 'returns affected file paths from compare' do
diff_stats_path = double(:diff_stats_paths)
expect(diff_stats).to receive(:paths).and_return(diff_stats_path)
expect(merge_request.modified_paths).to eq(diff_stats_path)
end
end
end
context 'when no arguments provided' do context 'when no arguments provided' do
let(:diff) { merge_request.merge_request_diff } let(:diff) { merge_request.merge_request_diff }
......
...@@ -703,7 +703,7 @@ describe Snippet do ...@@ -703,7 +703,7 @@ describe Snippet do
let(:current_size) { 60 } let(:current_size) { 60 }
before do before do
allow(subject.repository).to receive(:_uncached_size).and_return(current_size) allow(subject.repository).to receive(:size).and_return(current_size)
end end
it 'sets up size checker', :aggregate_failures do it 'sets up size checker', :aggregate_failures do
......
...@@ -18,25 +18,10 @@ describe Ci::CreatePipelineService, '#execute' do ...@@ -18,25 +18,10 @@ describe Ci::CreatePipelineService, '#execute' do
before do before do
project.add_developer(user) project.add_developer(user)
stub_ci_pipeline_to_return_yaml_file stub_ci_pipeline_yaml_file(config)
end
describe 'child pipeline triggers' do
before do
stub_ci_pipeline_yaml_file <<~YAML
test:
script: rspec
deploy:
variables:
CROSS: downstream
stage: deploy
trigger:
include:
- local: path/to/child.yml
YAML
end end
shared_examples 'successful creation' do
it 'creates bridge jobs correctly' do it 'creates bridge jobs correctly' do
pipeline = create_pipeline! pipeline = create_pipeline!
...@@ -48,21 +33,57 @@ describe Ci::CreatePipelineService, '#execute' do ...@@ -48,21 +33,57 @@ describe Ci::CreatePipelineService, '#execute' do
expect(bridge).to be_a Ci::Bridge expect(bridge).to be_a Ci::Bridge
expect(bridge.stage).to eq 'deploy' expect(bridge.stage).to eq 'deploy'
expect(pipeline.statuses).to match_array [test, bridge] expect(pipeline.statuses).to match_array [test, bridge]
expect(bridge.options).to eq( expect(bridge.options).to eq(expected_bridge_options)
'trigger' => { 'include' => [{ 'local' => 'path/to/child.yml' }] }
)
expect(bridge.yaml_variables) expect(bridge.yaml_variables)
.to include(key: 'CROSS', value: 'downstream', public: true) .to include(key: 'CROSS', value: 'downstream', public: true)
end end
end end
shared_examples 'creation failure' do
it 'returns errors' do
pipeline = create_pipeline!
expect(pipeline.errors.full_messages.first).to match(expected_error)
expect(pipeline.failure_reason).to eq 'config_error'
expect(pipeline).to be_persisted
expect(pipeline.status).to eq 'failed'
end
end
describe 'child pipeline triggers' do describe 'child pipeline triggers' do
context 'when YAML is valid' do let(:config) do
before do <<~YAML
stub_ci_pipeline_yaml_file <<~YAML
test: test:
script: rspec script: rspec
deploy:
variables:
CROSS: downstream
stage: deploy
trigger:
include:
- local: path/to/child.yml
YAML
end
it_behaves_like 'successful creation' do
let(:expected_bridge_options) do
{
'trigger' => {
'include' => [
{ 'local' => 'path/to/child.yml' }
]
}
}
end
end
end
describe 'child pipeline triggers' do
context 'when YAML is valid' do
let(:config) do
<<~YAML
test:
script: rspec
deploy: deploy:
variables: variables:
CROSS: downstream CROSS: downstream
...@@ -73,33 +94,79 @@ describe Ci::CreatePipelineService, '#execute' do ...@@ -73,33 +94,79 @@ describe Ci::CreatePipelineService, '#execute' do
YAML YAML
end end
it 'creates bridge jobs correctly' do it_behaves_like 'successful creation' do
pipeline = create_pipeline! let(:expected_bridge_options) do
{
'trigger' => {
'include' => [
{ 'local' => 'path/to/child.yml' }
]
}
}
end
end
test = pipeline.statuses.find_by(name: 'test') context 'when trigger:include is specified as a string' do
bridge = pipeline.statuses.find_by(name: 'deploy') let(:config) do
<<~YAML
test:
script: rspec
deploy:
variables:
CROSS: downstream
stage: deploy
trigger:
include: path/to/child.yml
YAML
end
expect(pipeline).to be_persisted it_behaves_like 'successful creation' do
expect(test).to be_a Ci::Build let(:expected_bridge_options) do
expect(bridge).to be_a Ci::Bridge {
expect(bridge.stage).to eq 'deploy' 'trigger' => {
expect(pipeline.statuses).to match_array [test, bridge] 'include' => 'path/to/child.yml'
expect(bridge.options).to eq( }
'trigger' => { 'include' => [{ 'local' => 'path/to/child.yml' }] } }
) end
expect(bridge.yaml_variables)
.to include(key: 'CROSS', value: 'downstream', public: true)
end end
end end
context 'when YAML is invalid' do context 'when trigger:include is specified as array of strings' do
let(:config) do let(:config) do
<<~YAML
test:
script: rspec
deploy:
variables:
CROSS: downstream
stage: deploy
trigger:
include:
- path/to/child.yml
- path/to/child2.yml
YAML
end
it_behaves_like 'successful creation' do
let(:expected_bridge_options) do
{ {
'trigger' => {
'include' => ['path/to/child.yml', 'path/to/child2.yml']
}
}
end
end
end
end
context 'when limit of includes is reached' do
let(:config) do
YAML.dump({
test: { script: 'rspec' }, test: { script: 'rspec' },
deploy: { deploy: {
trigger: { include: included_files } trigger: { include: included_files }
} }
} })
end end
let(:included_files) do let(:included_files) do
...@@ -112,17 +179,46 @@ describe Ci::CreatePipelineService, '#execute' do ...@@ -112,17 +179,46 @@ describe Ci::CreatePipelineService, '#execute' do
Gitlab::Ci::Config::Entry::Trigger::ComplexTrigger::SameProjectTrigger::INCLUDE_MAX_SIZE Gitlab::Ci::Config::Entry::Trigger::ComplexTrigger::SameProjectTrigger::INCLUDE_MAX_SIZE
end end
before do it_behaves_like 'creation failure' do
stub_ci_pipeline_yaml_file(YAML.dump(config)) let(:expected_error) { /trigger:include config is too long/ }
end
end end
it 'returns errors' do context 'when including configs from artifact' do
pipeline = create_pipeline! context 'when specified dependency is in the wrong order' do
let(:config) do
<<~YAML
test:
trigger:
include:
- job: generator
artifact: 'generated.yml'
generator:
stage: 'deploy'
script: 'generator'
YAML
end
expect(pipeline.errors.full_messages.first).to match(/trigger:include config is too long/) it_behaves_like 'creation failure' do
expect(pipeline.failure_reason).to eq 'config_error' let(:expected_error) { /test job: dependency generator is not defined in prior stages/ }
expect(pipeline).to be_persisted end
expect(pipeline.status).to eq 'failed' end
context 'when specified dependency is missing :job key' do
let(:config) do
<<~YAML
test:
trigger:
include:
- artifact: 'generated.yml'
YAML
end
it_behaves_like 'creation failure' do
let(:expected_error) do
/include config must specify the job where to fetch the artifact from/
end
end
end end
end end
end end
......
...@@ -757,50 +757,12 @@ shared_examples 'Pipeline Processing Service' do ...@@ -757,50 +757,12 @@ shared_examples 'Pipeline Processing Service' do
expect(builds.pending).to contain_exactly(deploy) expect(builds.pending).to contain_exactly(deploy)
end end
context 'when feature ci_dag_support is disabled' do
before do
stub_feature_flags(ci_dag_support: false)
end
it 'when linux:build finishes first it follows stages' do
expect(process_pipeline).to be_truthy
expect(stages).to eq(%w(pending created created))
expect(builds.pending).to contain_exactly(linux_build, mac_build)
# we follow the single path of linux
linux_build.reset.success!
expect(stages).to eq(%w(running created created))
expect(builds.success).to contain_exactly(linux_build)
expect(builds.pending).to contain_exactly(mac_build)
mac_build.reset.success!
expect(stages).to eq(%w(success pending created))
expect(builds.success).to contain_exactly(linux_build, mac_build)
expect(builds.pending).to contain_exactly(
linux_rspec, linux_rubocop, mac_rspec, mac_rubocop)
linux_rspec.reset.success!
linux_rubocop.reset.success!
mac_rspec.reset.success!
mac_rubocop.reset.success!
expect(stages).to eq(%w(success success pending))
expect(builds.success).to contain_exactly(
linux_build, linux_rspec, linux_rubocop, mac_build, mac_rspec, mac_rubocop)
expect(builds.pending).to contain_exactly(deploy)
end
end
context 'when one of the jobs is run on a failure' do context 'when one of the jobs is run on a failure' do
let!(:linux_notify) { create_build('linux:notify', stage: 'deploy', stage_idx: 2, when: 'on_failure', scheduling_type: :dag) } let!(:linux_notify) { create_build('linux:notify', stage: 'deploy', stage_idx: 2, when: 'on_failure', scheduling_type: :dag) }
let!(:linux_notify_on_build) { create(:ci_build_need, build: linux_notify, name: 'linux:build') } let!(:linux_notify_on_build) { create(:ci_build_need, build: linux_notify, name: 'linux:build') }
context 'when another job in build phase fails first' do context 'when another job in build phase fails first' do
context 'when ci_dag_support is enabled' do
it 'does skip linux:notify' do it 'does skip linux:notify' do
expect(process_pipeline).to be_truthy expect(process_pipeline).to be_truthy
...@@ -811,22 +773,6 @@ shared_examples 'Pipeline Processing Service' do ...@@ -811,22 +773,6 @@ shared_examples 'Pipeline Processing Service' do
end end
end end
context 'when ci_dag_support is disabled' do
before do
stub_feature_flags(ci_dag_support: false)
end
it 'does run linux:notify' do
expect(process_pipeline).to be_truthy
mac_build.reset.drop!
linux_build.reset.success!
expect(linux_notify.reset).to be_pending
end
end
end
context 'when linux:build job fails first' do context 'when linux:build job fails first' do
it 'does run linux:notify' do it 'does run linux:notify' do
expect(process_pipeline).to be_truthy expect(process_pipeline).to be_truthy
...@@ -864,19 +810,6 @@ shared_examples 'Pipeline Processing Service' do ...@@ -864,19 +810,6 @@ shared_examples 'Pipeline Processing Service' do
expect(stages).to eq(%w(success success running)) expect(stages).to eq(%w(success success running))
expect(builds.pending).to contain_exactly(deploy) expect(builds.pending).to contain_exactly(deploy)
end end
context 'when ci_dag_support is disabled' do
before do
stub_feature_flags(ci_dag_support: false)
end
it 'does run deploy_pages at the start' do
expect(process_pipeline).to be_truthy
expect(stages).to eq(%w(pending created created))
expect(builds.pending).to contain_exactly(linux_build, mac_build)
end
end
end end
end end
......
...@@ -394,6 +394,7 @@ describe PostReceive do ...@@ -394,6 +394,7 @@ describe PostReceive do
it 'expires the status cache' do it 'expires the status cache' do
expect(snippet.repository).to receive(:empty?).and_return(true) expect(snippet.repository).to receive(:empty?).and_return(true)
expect(snippet.repository).to receive(:expire_status_cache) expect(snippet.repository).to receive(:expire_status_cache)
expect(snippet.repository).to receive(:expire_statistics_caches)
perform perform
end end
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment