Commit 45380bfb authored by GitLab Bot's avatar GitLab Bot

Automatic merge of gitlab-org/gitlab master

parents ab96c015 db32e7a9
...@@ -1031,6 +1031,7 @@ Gitlab/NamespacedClass: ...@@ -1031,6 +1031,7 @@ Gitlab/NamespacedClass:
- 'app/finders/joined_groups_finder.rb' - 'app/finders/joined_groups_finder.rb'
- 'app/finders/keys_finder.rb' - 'app/finders/keys_finder.rb'
- 'app/finders/labels_finder.rb' - 'app/finders/labels_finder.rb'
- 'app/finders/lfs_pointers_finder.rb'
- 'app/finders/license_template_finder.rb' - 'app/finders/license_template_finder.rb'
- 'app/finders/members_finder.rb' - 'app/finders/members_finder.rb'
- 'app/finders/merge_request_target_project_finder.rb' - 'app/finders/merge_request_target_project_finder.rb'
......
...@@ -28,6 +28,10 @@ export default { ...@@ -28,6 +28,10 @@ export default {
GlLoadingIcon, GlLoadingIcon,
GlIcon, GlIcon,
UserAccessRoleBadge, UserAccessRoleBadge,
ComplianceFrameworkLabel: () =>
import(
'ee_component/vue_shared/components/compliance_framework_label/compliance_framework_label.vue'
),
itemCaret, itemCaret,
itemTypeIcon, itemTypeIcon,
itemStats, itemStats,
...@@ -67,6 +71,9 @@ export default { ...@@ -67,6 +71,9 @@ export default {
hasAvatar() { hasAvatar() {
return this.group.avatarUrl !== null; return this.group.avatarUrl !== null;
}, },
hasComplianceFramework() {
return Boolean(this.group.complianceFramework?.name);
},
isGroup() { isGroup() {
return this.group.type === 'group'; return this.group.type === 'group';
}, },
...@@ -82,6 +89,9 @@ export default { ...@@ -82,6 +89,9 @@ export default {
microdata() { microdata() {
return this.group.microdata || {}; return this.group.microdata || {};
}, },
complianceFramework() {
return this.group.complianceFramework;
},
}, },
methods: { methods: {
onClickRowGroup(e) { onClickRowGroup(e) {
...@@ -167,6 +177,13 @@ export default { ...@@ -167,6 +177,13 @@ export default {
<user-access-role-badge v-if="group.permission" class="gl-mt-3"> <user-access-role-badge v-if="group.permission" class="gl-mt-3">
{{ group.permission }} {{ group.permission }}
</user-access-role-badge> </user-access-role-badge>
<compliance-framework-label
v-if="hasComplianceFramework"
class="gl-mt-3"
:name="complianceFramework.name"
:color="complianceFramework.color"
:description="complianceFramework.description"
/>
</div> </div>
<div v-if="group.description" class="description"> <div v-if="group.description" class="description">
<span <span
......
import { normalizeHeaders, parseIntPagination } from '../../lib/utils/common_utils'; import { isEmpty } from 'lodash';
import { normalizeHeaders, parseIntPagination } from '~/lib/utils/common_utils';
import { getGroupItemMicrodata } from './utils'; import { getGroupItemMicrodata } from './utils';
export default class GroupsStore { export default class GroupsStore {
...@@ -70,7 +71,7 @@ export default class GroupsStore { ...@@ -70,7 +71,7 @@ export default class GroupsStore {
? rawGroupItem.subgroup_count ? rawGroupItem.subgroup_count
: rawGroupItem.children_count; : rawGroupItem.children_count;
return { const groupItem = {
id: rawGroupItem.id, id: rawGroupItem.id,
name: rawGroupItem.name, name: rawGroupItem.name,
fullName: rawGroupItem.full_name, fullName: rawGroupItem.full_name,
...@@ -98,6 +99,16 @@ export default class GroupsStore { ...@@ -98,6 +99,16 @@ export default class GroupsStore {
pendingRemoval: rawGroupItem.marked_for_deletion, pendingRemoval: rawGroupItem.marked_for_deletion,
microdata: this.showSchemaMarkup ? getGroupItemMicrodata(rawGroupItem) : {}, microdata: this.showSchemaMarkup ? getGroupItemMicrodata(rawGroupItem) : {},
}; };
if (!isEmpty(rawGroupItem.compliance_management_framework)) {
groupItem.complianceFramework = {
name: rawGroupItem.compliance_management_framework.name,
color: rawGroupItem.compliance_management_framework.color,
description: rawGroupItem.compliance_management_framework.description,
};
}
return groupItem;
} }
removeGroup(group, parentGroup) { removeGroup(group, parentGroup) {
......
...@@ -35,7 +35,6 @@ import GlFieldErrors from './gl_field_errors'; ...@@ -35,7 +35,6 @@ import GlFieldErrors from './gl_field_errors';
import initUserPopovers from './user_popovers'; import initUserPopovers from './user_popovers';
import initBroadcastNotifications from './broadcast_notification'; import initBroadcastNotifications from './broadcast_notification';
import { initTopNav } from './nav'; import { initTopNav } from './nav';
import navEventHub, { EVENT_RESPONSIVE_TOGGLE } from './nav/event_hub';
import 'ee_else_ce/main_ee'; import 'ee_else_ce/main_ee';
...@@ -203,11 +202,7 @@ $body.on('ajax:complete, ajax:beforeSend, submit', 'form', function ajaxComplete ...@@ -203,11 +202,7 @@ $body.on('ajax:complete, ajax:beforeSend, submit', 'form', function ajaxComplete
}); });
$('.navbar-toggler').on('click', () => { $('.navbar-toggler').on('click', () => {
// The order is important. The `menu-expanded` is used as a source of truth for now. document.body.classList.toggle('top-nav-responsive-open');
// This can be simplified when the :combined_menu feature flag is removed.
// https://gitlab.com/gitlab-org/gitlab/-/issues/333180
$('.header-content').toggleClass('menu-expanded');
navEventHub.$emit(EVENT_RESPONSIVE_TOGGLE);
}); });
/** /**
......
...@@ -2,8 +2,7 @@ ...@@ -2,8 +2,7 @@
import { FREQUENT_ITEMS_PROJECTS, FREQUENT_ITEMS_GROUPS } from '~/frequent_items/constants'; import { FREQUENT_ITEMS_PROJECTS, FREQUENT_ITEMS_GROUPS } from '~/frequent_items/constants';
import { BV_DROPDOWN_SHOW, BV_DROPDOWN_HIDE } from '~/lib/utils/constants'; import { BV_DROPDOWN_SHOW, BV_DROPDOWN_HIDE } from '~/lib/utils/constants';
import KeepAliveSlots from '~/vue_shared/components/keep_alive_slots.vue'; import KeepAliveSlots from '~/vue_shared/components/keep_alive_slots.vue';
import eventHub, { EVENT_RESPONSIVE_TOGGLE } from '../event_hub'; import { resetMenuItemsActive } from '../utils';
import { resetMenuItemsActive, hasMenuExpanded } from '../utils';
import ResponsiveHeader from './responsive_header.vue'; import ResponsiveHeader from './responsive_header.vue';
import ResponsiveHome from './responsive_home.vue'; import ResponsiveHome from './responsive_home.vue';
import TopNavContainerView from './top_nav_container_view.vue'; import TopNavContainerView from './top_nav_container_view.vue';
...@@ -33,25 +32,14 @@ export default { ...@@ -33,25 +32,14 @@ export default {
}, },
}, },
created() { created() {
eventHub.$on(EVENT_RESPONSIVE_TOGGLE, this.updateResponsiveOpen);
this.$root.$on(BV_DROPDOWN_SHOW, this.showMobileOverlay); this.$root.$on(BV_DROPDOWN_SHOW, this.showMobileOverlay);
this.$root.$on(BV_DROPDOWN_HIDE, this.hideMobileOverlay); this.$root.$on(BV_DROPDOWN_HIDE, this.hideMobileOverlay);
this.updateResponsiveOpen();
}, },
beforeDestroy() { beforeDestroy() {
eventHub.$off(EVENT_RESPONSIVE_TOGGLE, this.onToggle);
this.$root.$off(BV_DROPDOWN_SHOW, this.showMobileOverlay); this.$root.$off(BV_DROPDOWN_SHOW, this.showMobileOverlay);
this.$root.$off(BV_DROPDOWN_HIDE, this.hideMobileOverlay); this.$root.$off(BV_DROPDOWN_HIDE, this.hideMobileOverlay);
}, },
methods: { methods: {
updateResponsiveOpen() {
if (hasMenuExpanded()) {
document.body.classList.add('top-nav-responsive-open');
} else {
document.body.classList.remove('top-nav-responsive-open');
}
},
onMenuItemClick({ view }) { onMenuItemClick({ view }) {
if (view) { if (view) {
this.activeView = view; this.activeView = view;
......
import eventHubFactory from '~/helpers/event_hub_factory';
export const EVENT_RESPONSIVE_TOGGLE = 'top-nav-responsive-toggle';
export default eventHubFactory();
export const hasMenuExpanded = () =>
Boolean(document.querySelector('.header-content.menu-expanded'));
export * from './has_menu_expanded';
export * from './reset_menu_items_active'; export * from './reset_menu_items_active';
<script> <script>
/* eslint-disable vue/no-v-html */ import {
import { GlFormGroup, GlButton, GlModal, GlToast, GlToggle, GlLink } from '@gitlab/ui'; GlFormGroup,
GlButton,
GlModal,
GlToast,
GlToggle,
GlLink,
GlSafeHtmlDirective,
} from '@gitlab/ui';
import Vue from 'vue'; import Vue from 'vue';
import { mapState, mapActions } from 'vuex'; import { mapState, mapActions } from 'vuex';
import { helpPagePath } from '~/helpers/help_page_helper'; import { helpPagePath } from '~/helpers/help_page_helper';
...@@ -18,6 +25,9 @@ export default { ...@@ -18,6 +25,9 @@ export default {
GlToggle, GlToggle,
GlLink, GlLink,
}, },
directives: {
SafeHtml: GlSafeHtmlDirective,
},
formLabels: { formLabels: {
createProject: __('Self monitoring'), createProject: __('Self monitoring'),
}, },
...@@ -137,7 +147,7 @@ export default { ...@@ -137,7 +147,7 @@ export default {
</div> </div>
<div class="settings-content"> <div class="settings-content">
<form name="self-monitoring-form"> <form name="self-monitoring-form">
<p ref="selfMonitoringFormText" v-html="selfMonitoringFormText"></p> <p ref="selfMonitoringFormText" v-safe-html="selfMonitoringFormText"></p>
<gl-form-group> <gl-form-group>
<gl-toggle <gl-toggle
v-model="selfMonitorEnabled" v-model="selfMonitorEnabled"
......
...@@ -27,17 +27,6 @@ $top-nav-hover-bg: var(--indigo-900-alpha-008, $indigo-900-alpha-008) !important ...@@ -27,17 +27,6 @@ $top-nav-hover-bg: var(--indigo-900-alpha-008, $indigo-900-alpha-008) !important
display: none; display: none;
} }
.menu-expanded {
.more-icon {
display: none;
}
.close-icon {
display: block;
margin: auto;
}
}
.header-content { .header-content {
width: 100%; width: 100%;
display: flex; display: flex;
...@@ -103,18 +92,6 @@ $top-nav-hover-bg: var(--indigo-900-alpha-008, $indigo-900-alpha-008) !important ...@@ -103,18 +92,6 @@ $top-nav-hover-bg: var(--indigo-900-alpha-008, $indigo-900-alpha-008) !important
.navbar-collapse > ul.nav > li:not(.d-none) { .navbar-collapse > ul.nav > li:not(.d-none) {
margin: 0 2px; margin: 0 2px;
} }
&.menu-expanded {
@include media-breakpoint-down(xs) {
.hide-when-menu-expanded {
display: none;
}
.navbar-collapse {
display: flex;
}
}
}
} }
.navbar-collapse { .navbar-collapse {
...@@ -673,19 +650,30 @@ $top-nav-hover-bg: var(--indigo-900-alpha-008, $indigo-900-alpha-008) !important ...@@ -673,19 +650,30 @@ $top-nav-hover-bg: var(--indigo-900-alpha-008, $indigo-900-alpha-008) !important
} }
.top-nav-responsive-open { .top-nav-responsive-open {
.hide-when-top-nav-responsive-open { .more-icon {
@include media-breakpoint-down(xs) { display: none;
}
.close-icon {
display: block;
margin: auto;
}
@include media-breakpoint-down(xs) {
.navbar-collapse {
display: flex;
}
.hide-when-top-nav-responsive-open {
display: none !important; display: none !important;
} }
}
.top-nav-responsive { .top-nav-responsive {
@include media-breakpoint-down(xs) {
@include gl-display-block; @include gl-display-block;
} }
}
.navbar-gitlab .header-content .title-container { .navbar-gitlab .header-content .title-container {
flex: 0; flex: 0;
}
} }
} }
# frozen_string_literal: true
class LfsPointersFinder
def initialize(repository, path)
@repository = repository
@path = path
end
def execute
return [] unless ref
blob_ids = tree.blobs.map(&:id)
# When current endpoint is a Blob then `tree.blobs` will be empty, it means we need to analyze
# the current Blob in order to determine if it's a LFS object
blob_ids = Array.wrap(current_blob&.id) if blob_ids.empty?
Gitlab::Git::Blob.batch_lfs_pointers(repository, blob_ids).map(&:id)
end
private
attr_reader :repository, :path
def ref
repository.root_ref
end
def tree
repository.tree(ref, path)
end
def current_blob
repository.blob_at(ref, path)
end
end
...@@ -59,8 +59,8 @@ module Projects ...@@ -59,8 +59,8 @@ module Projects
# @return [Array<[user_id, access_level]>] # @return [Array<[user_id, access_level]>]
def user_ids_and_access_levels_from_all_memberships def user_ids_and_access_levels_from_all_memberships
strong_memoize(:user_ids_and_access_levels_from_all_memberships) do strong_memoize(:user_ids_and_access_levels_from_all_memberships) do
all_possible_avenues_of_membership.flat_map do |relation| all_possible_avenues_of_membership.flat_map do |members|
relation.pluck(*USER_ID_AND_ACCESS_LEVEL) # rubocop: disable CodeReuse/ActiveRecord apply_scopes(members).pluck(*USER_ID_AND_ACCESS_LEVEL) # rubocop: disable CodeReuse/ActiveRecord
end end
end end
end end
...@@ -86,7 +86,7 @@ module Projects ...@@ -86,7 +86,7 @@ module Projects
members << Member.from_union(members_per_batch) members << Member.from_union(members_per_batch)
end end
members.flatten Member.from_union(members)
end end
def project_owner_acting_as_maintainer def project_owner_acting_as_maintainer
...@@ -120,6 +120,10 @@ module Projects ...@@ -120,6 +120,10 @@ module Projects
Arel.sql(column_alias) Arel.sql(column_alias)
) )
end end
def apply_scopes(members)
members
end
end end
end end
end end
# frozen_string_literal: true
module Projects
module Members
class EffectiveAccessLevelPerUserFinder < EffectiveAccessLevelFinder
def initialize(project, user)
@project = project
@user = user
end
private
attr_reader :user
def apply_scopes(members)
super.where(user_id: user.id) # rubocop: disable CodeReuse/ActiveRecord
end
end
end
end
# frozen_string_literal: true
module AuthorizedProjectUpdate
class ProjectRecalculatePerUserService < ProjectRecalculateService
def initialize(project, user)
@project = project
@user = user
end
private
attr_reader :user
def apply_scopes(project_authorizations)
super.where(user_id: user.id) # rubocop: disable CodeReuse/ActiveRecord
end
def effective_access_levels
Projects::Members::EffectiveAccessLevelPerUserFinder.new(project, user).execute
end
end
end
...@@ -26,7 +26,7 @@ module AuthorizedProjectUpdate ...@@ -26,7 +26,7 @@ module AuthorizedProjectUpdate
def current_authorizations def current_authorizations
strong_memoize(:current_authorizations) do strong_memoize(:current_authorizations) do
project.project_authorizations apply_scopes(project.project_authorizations)
.pluck(:user_id, :access_level) # rubocop: disable CodeReuse/ActiveRecord .pluck(:user_id, :access_level) # rubocop: disable CodeReuse/ActiveRecord
end end
end end
...@@ -35,8 +35,7 @@ module AuthorizedProjectUpdate ...@@ -35,8 +35,7 @@ module AuthorizedProjectUpdate
strong_memoize(:fresh_authorizations) do strong_memoize(:fresh_authorizations) do
result = [] result = []
Projects::Members::EffectiveAccessLevelFinder.new(project) effective_access_levels
.execute
.each_batch(of: BATCH_SIZE, column: :user_id) do |member_batch| .each_batch(of: BATCH_SIZE, column: :user_id) do |member_batch|
result += member_batch.pluck(:user_id, 'MAX(access_level)') # rubocop: disable CodeReuse/ActiveRecord result += member_batch.pluck(:user_id, 'MAX(access_level)') # rubocop: disable CodeReuse/ActiveRecord
end end
...@@ -76,5 +75,13 @@ module AuthorizedProjectUpdate ...@@ -76,5 +75,13 @@ module AuthorizedProjectUpdate
end end
end end
end end
def apply_scopes(project_authorizations)
project_authorizations
end
def effective_access_levels
Projects::Members::EffectiveAccessLevelFinder.new(project).execute
end
end end
end end
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
%a.gl-sr-only.gl-accessibility{ href: "#content-body" } Skip to content %a.gl-sr-only.gl-accessibility{ href: "#content-body" } Skip to content
.container-fluid .container-fluid
.header-content .header-content
.title-container.hide-when-menu-expanded .title-container.hide-when-top-nav-responsive-open
%h1.title %h1.title
%span.gl-sr-only GitLab %span.gl-sr-only GitLab
= link_to root_path, title: _('Dashboard'), id: 'logo', **tracking_attrs('main_navigation', 'click_gitlab_logo_link', 'navigation') do = link_to root_path, title: _('Dashboard'), id: 'logo', **tracking_attrs('main_navigation', 'click_gitlab_logo_link', 'navigation') do
......
# frozen_string_literal: true
class FinalizePushEventPayloadsBigintConversion2 < ActiveRecord::Migration[6.1]
include Gitlab::Database::MigrationHelpers
disable_ddl_transaction!
TABLE_NAME = 'push_event_payloads'
INDEX_NAME = 'index_push_event_payloads_on_event_id_convert_to_bigint'
def up
ensure_batched_background_migration_is_finished(
job_class_name: 'CopyColumnUsingBackgroundMigrationJob',
table_name: TABLE_NAME,
column_name: 'event_id',
job_arguments: [["event_id"], ["event_id_convert_to_bigint"]]
)
swap_columns
end
def down
swap_columns
end
private
def swap_columns
add_concurrent_index TABLE_NAME, :event_id_convert_to_bigint, unique: true, name: INDEX_NAME
# Add a foreign key on `event_id_convert_to_bigint` before we swap the columns and drop the old FK (fk_36c74129da)
add_concurrent_foreign_key TABLE_NAME, :events, column: :event_id_convert_to_bigint, on_delete: :cascade
with_lock_retries(raise_on_exhaustion: true) do
# We'll need ACCESS EXCLUSIVE lock on the related tables,
# lets make sure it can be acquired from the start
execute "LOCK TABLE #{TABLE_NAME}, events IN ACCESS EXCLUSIVE MODE"
# Swap column names
temp_name = 'event_id_tmp'
execute "ALTER TABLE #{quote_table_name(TABLE_NAME)} RENAME COLUMN #{quote_column_name(:event_id)} TO #{quote_column_name(temp_name)}"
execute "ALTER TABLE #{quote_table_name(TABLE_NAME)} RENAME COLUMN #{quote_column_name(:event_id_convert_to_bigint)} TO #{quote_column_name(:event_id)}"
execute "ALTER TABLE #{quote_table_name(TABLE_NAME)} RENAME COLUMN #{quote_column_name(temp_name)} TO #{quote_column_name(:event_id_convert_to_bigint)}"
# We need to update the trigger function in order to make PostgreSQL to
# regenerate the execution plan for it. This is to avoid type mismatch errors like
# "type of parameter 15 (bigint) does not match that when preparing the plan (integer)"
function_name = Gitlab::Database::UnidirectionalCopyTrigger.on_table(TABLE_NAME).name(:event_id, :event_id_convert_to_bigint)
execute "ALTER FUNCTION #{quote_table_name(function_name)} RESET ALL"
# Swap defaults
change_column_default TABLE_NAME, :event_id, nil
change_column_default TABLE_NAME, :event_id_convert_to_bigint, 0
# Swap PK constraint
execute "ALTER TABLE #{TABLE_NAME} DROP CONSTRAINT push_event_payloads_pkey"
rename_index TABLE_NAME, INDEX_NAME, 'push_event_payloads_pkey'
execute "ALTER TABLE #{TABLE_NAME} ADD CONSTRAINT push_event_payloads_pkey PRIMARY KEY USING INDEX push_event_payloads_pkey"
# Drop original FK on the old int4 `event_id` (fk_36c74129da)
remove_foreign_key TABLE_NAME, name: concurrent_foreign_key_name(TABLE_NAME, :event_id)
# We swapped the columns but the FK for event_id is still using the old name for the event_id_convert_to_bigint column
# So we have to also swap the FK name now that we dropped the other one with the same
rename_constraint(
TABLE_NAME,
concurrent_foreign_key_name(TABLE_NAME, :event_id_convert_to_bigint),
concurrent_foreign_key_name(TABLE_NAME, :event_id)
)
end
end
end
d35079b6d6ed38ce8f212a09e684988f7499d456d28f70b6178914b1b17eee5b
\ No newline at end of file
...@@ -17503,7 +17503,7 @@ ALTER SEQUENCE protected_tags_id_seq OWNED BY protected_tags.id; ...@@ -17503,7 +17503,7 @@ ALTER SEQUENCE protected_tags_id_seq OWNED BY protected_tags.id;
CREATE TABLE push_event_payloads ( CREATE TABLE push_event_payloads (
commit_count bigint NOT NULL, commit_count bigint NOT NULL,
event_id integer NOT NULL, event_id_convert_to_bigint integer DEFAULT 0 NOT NULL,
action smallint NOT NULL, action smallint NOT NULL,
ref_type smallint NOT NULL, ref_type smallint NOT NULL,
commit_from bytea, commit_from bytea,
...@@ -17511,7 +17511,7 @@ CREATE TABLE push_event_payloads ( ...@@ -17511,7 +17511,7 @@ CREATE TABLE push_event_payloads (
ref text, ref text,
commit_title character varying(70), commit_title character varying(70),
ref_count integer, ref_count integer,
event_id_convert_to_bigint bigint DEFAULT 0 NOT NULL event_id bigint NOT NULL
); );
CREATE TABLE push_rules ( CREATE TABLE push_rules (
...@@ -161,7 +161,7 @@ If the **primary** and **secondary** nodes have a checksum verification mismatch ...@@ -161,7 +161,7 @@ If the **primary** and **secondary** nodes have a checksum verification mismatch
![Project administration page](img/checksum-differences-admin-project-page.png) ![Project administration page](img/checksum-differences-admin-project-page.png)
1. Go to the project's repository directory on both **primary** and **secondary** nodes 1. Go to the project's repository directory on both **primary** and **secondary** nodes
(the path is usually `/var/opt/gitlab/git-data/repositories`). Note that if `git_data_dirs` (the path is usually `/var/opt/gitlab/git-data/repositories`). If `git_data_dirs`
is customized, check the directory layout on your server to be sure: is customized, check the directory layout on your server to be sure:
```shell ```shell
......
...@@ -94,7 +94,7 @@ follow these steps to avoid unnecessary data loss: ...@@ -94,7 +94,7 @@ follow these steps to avoid unnecessary data loss:
1. Until a [read-only mode](https://gitlab.com/gitlab-org/gitlab/-/issues/14609) 1. Until a [read-only mode](https://gitlab.com/gitlab-org/gitlab/-/issues/14609)
is implemented, updates must be prevented from happening manually to the is implemented, updates must be prevented from happening manually to the
**primary**. Note that your **secondary** node still needs read-only **primary**. Your **secondary** node still needs read-only
access to the **primary** node during the maintenance window: access to the **primary** node during the maintenance window:
1. At the scheduled time, using your cloud provider or your node's firewall, block 1. At the scheduled time, using your cloud provider or your node's firewall, block
......
...@@ -79,7 +79,7 @@ follow these steps to avoid unnecessary data loss: ...@@ -79,7 +79,7 @@ follow these steps to avoid unnecessary data loss:
1. Until a [read-only mode](https://gitlab.com/gitlab-org/gitlab/-/issues/14609) 1. Until a [read-only mode](https://gitlab.com/gitlab-org/gitlab/-/issues/14609)
is implemented, updates must be prevented from happening manually to the is implemented, updates must be prevented from happening manually to the
**primary**. Note that your **secondary** node still needs read-only **primary**. Your **secondary** node still needs read-only
access to the **primary** node during the maintenance window: access to the **primary** node during the maintenance window:
1. At the scheduled time, using your cloud provider or your node's firewall, block 1. At the scheduled time, using your cloud provider or your node's firewall, block
......
...@@ -101,12 +101,12 @@ From the perspective of a user performing Git operations: ...@@ -101,12 +101,12 @@ From the perspective of a user performing Git operations:
- The **primary** site behaves as a full read-write GitLab instance. - The **primary** site behaves as a full read-write GitLab instance.
- **Secondary** sites are read-only but proxy Git push operations to the **primary** site. This makes **secondary** sites appear to support push operations themselves. - **Secondary** sites are read-only but proxy Git push operations to the **primary** site. This makes **secondary** sites appear to support push operations themselves.
To simplify the diagram, some necessary components are omitted. Note that: To simplify the diagram, some necessary components are omitted.
- Git over SSH requires [`gitlab-shell`](https://gitlab.com/gitlab-org/gitlab-shell) and OpenSSH. - Git over SSH requires [`gitlab-shell`](https://gitlab.com/gitlab-org/gitlab-shell) and OpenSSH.
- Git over HTTPS required [`gitlab-workhorse`](https://gitlab.com/gitlab-org/gitlab-workhorse). - Git over HTTPS required [`gitlab-workhorse`](https://gitlab.com/gitlab-org/gitlab-workhorse).
Note that a **secondary** site needs two different PostgreSQL databases: A **secondary** site needs two different PostgreSQL databases:
- A read-only database instance that streams data from the main GitLab database. - A read-only database instance that streams data from the main GitLab database.
- [Another database instance](#geo-tracking-database) used internally by the **secondary** site to record what data has been replicated. - [Another database instance](#geo-tracking-database) used internally by the **secondary** site to record what data has been replicated.
......
...@@ -286,9 +286,9 @@ The two most obvious issues that can become apparent in the dashboard are: ...@@ -286,9 +286,9 @@ The two most obvious issues that can become apparent in the dashboard are:
- You are using a custom certificate or custom CA (see the [troubleshooting document](troubleshooting.md)). - You are using a custom certificate or custom CA (see the [troubleshooting document](troubleshooting.md)).
- The instance is firewalled (check your firewall rules). - The instance is firewalled (check your firewall rules).
Please note that disabling a **secondary** site stops the synchronization process. Disabling a **secondary** site stops the synchronization process.
Please note that if `git_data_dirs` is customized on the **primary** site for multiple If `git_data_dirs` is customized on the **primary** site for multiple
repository shards you must duplicate the same configuration on each **secondary** site. repository shards you must duplicate the same configuration on each **secondary** site.
Point your users to the [Using a Geo Site guide](usage.md). Point your users to the [Using a Geo Site guide](usage.md).
......
...@@ -204,7 +204,7 @@ successfully, you must replicate their data using some other means. ...@@ -204,7 +204,7 @@ successfully, you must replicate their data using some other means.
|[Server-side Git hooks](../../server_hooks.md) | [No](https://gitlab.com/groups/gitlab-org/-/epics/1867) | No | No | | |[Server-side Git hooks](../../server_hooks.md) | [No](https://gitlab.com/groups/gitlab-org/-/epics/1867) | No | No | |
|[Elasticsearch integration](../../../integration/elasticsearch.md) | [No](https://gitlab.com/gitlab-org/gitlab/-/issues/1186) | No | No | | |[Elasticsearch integration](../../../integration/elasticsearch.md) | [No](https://gitlab.com/gitlab-org/gitlab/-/issues/1186) | No | No | |
|[GitLab Pages](../../pages/index.md) | [No](https://gitlab.com/groups/gitlab-org/-/epics/589) | No | Via Object Storage provider if supported. **No** native Geo support (Beta). | | |[GitLab Pages](../../pages/index.md) | [No](https://gitlab.com/groups/gitlab-org/-/epics/589) | No | Via Object Storage provider if supported. **No** native Geo support (Beta). | |
|[Dependency proxy images](../../../user/packages/dependency_proxy/index.md) | [No](https://gitlab.com/gitlab-org/gitlab/-/issues/259694) | No | No | Blocked on [Geo: Secondary Mimicry](https://gitlab.com/groups/gitlab-org/-/epics/1528). Note that replication of this cache is not needed for Disaster Recovery purposes because it can be recreated from external sources. | |[Dependency proxy images](../../../user/packages/dependency_proxy/index.md) | [No](https://gitlab.com/gitlab-org/gitlab/-/issues/259694) | No | No | Blocked on [Geo: Secondary Mimicry](https://gitlab.com/groups/gitlab-org/-/epics/1528). Replication of this cache is not needed for Disaster Recovery purposes because it can be recreated from external sources. |
|[Vulnerability Export](../../../user/application_security/vulnerability_report/#export-vulnerability-details) | [Not planned](https://gitlab.com/groups/gitlab-org/-/epics/3111) | No | | Not planned because they are ephemeral and sensitive. They can be regenerated on demand. | |[Vulnerability Export](../../../user/application_security/vulnerability_report/#export-vulnerability-details) | [Not planned](https://gitlab.com/groups/gitlab-org/-/epics/3111) | No | | Not planned because they are ephemeral and sensitive. They can be regenerated on demand. |
#### Limitation of verification for files in Object Storage #### Limitation of verification for files in Object Storage
......
...@@ -154,7 +154,7 @@ the **primary** database. Use the following as a guide. ...@@ -154,7 +154,7 @@ the **primary** database. Use the following as a guide.
1. Generate an MD5 hash of the desired password for the database user that the 1. Generate an MD5 hash of the desired password for the database user that the
GitLab application uses to access the read-replica database: GitLab application uses to access the read-replica database:
Note that the username (`gitlab` by default) is incorporated into the hash. The username (`gitlab` by default) is incorporated into the hash.
```shell ```shell
gitlab-ctl pg-password-md5 gitlab gitlab-ctl pg-password-md5 gitlab
...@@ -203,7 +203,7 @@ the **primary** database. Use the following as a guide. ...@@ -203,7 +203,7 @@ the **primary** database. Use the following as a guide.
geo_postgresql['enable'] = false geo_postgresql['enable'] = false
## ##
## Disable all other services that aren't needed. Note that we had to enable ## Disable all other services that aren't needed. We had to enable
## geo_secondary_role to cause some configuration changes to postgresql, but ## geo_secondary_role to cause some configuration changes to postgresql, but
## the role enables single-node services by default. ## the role enables single-node services by default.
## ##
...@@ -241,7 +241,7 @@ Configure the tracking database. ...@@ -241,7 +241,7 @@ Configure the tracking database.
1. Generate an MD5 hash of the desired password for the database user that the 1. Generate an MD5 hash of the desired password for the database user that the
GitLab application uses to access the tracking database: GitLab application uses to access the tracking database:
Note that the username (`gitlab_geo` by default) is incorporated into the The username (`gitlab_geo` by default) is incorporated into the
hash. hash.
```shell ```shell
......
...@@ -242,7 +242,7 @@ the tracking database on port 5432. ...@@ -242,7 +242,7 @@ the tracking database on port 5432.
1. Save the file and [reconfigure GitLab](../../restart_gitlab.md#omnibus-gitlab-reconfigure) 1. Save the file and [reconfigure GitLab](../../restart_gitlab.md#omnibus-gitlab-reconfigure)
1. The reconfigure should automatically create the database. If needed, you can perform this task manually. Note that this task (whether run by itself or during reconfigure) requires the database user to be a superuser. 1. The reconfigure should automatically create the database. If needed, you can perform this task manually. This task (whether run by itself or during reconfigure) requires the database user to be a superuser.
```shell ```shell
gitlab-rake geo:db:create gitlab-rake geo:db:create
......
...@@ -952,7 +952,7 @@ result as you did at the start. For example: ...@@ -952,7 +952,7 @@ result as you did at the start. For example:
{enforced="true",status="ok"} 4424.985419441742 {enforced="true",status="ok"} 4424.985419441742
``` ```
Note that `enforced="true"` means that authentication is being enforced. `enforced="true"` means that authentication is being enforced.
## Pack-objects cache **(FREE SELF)** ## Pack-objects cache **(FREE SELF)**
...@@ -1076,7 +1076,7 @@ cache hit and the average amount of storage used by cache files. ...@@ -1076,7 +1076,7 @@ cache hit and the average amount of storage used by cache files.
Entries older than `max_age` get evicted from the in-memory metadata Entries older than `max_age` get evicted from the in-memory metadata
store, and deleted from disk. store, and deleted from disk.
Note that eviction does not interfere with ongoing requests, so it is OK Eviction does not interfere with ongoing requests, so it is OK
for `max_age` to be less than the time it takes to do a fetch over a for `max_age` to be less than the time it takes to do a fetch over a
slow connection. This is because Unix filesystems do not truly delete slow connection. This is because Unix filesystems do not truly delete
a file until all processes that are reading the deleted file have a file until all processes that are reading the deleted file have
......
...@@ -195,7 +195,7 @@ instructions only work on Omnibus-provided PostgreSQL: ...@@ -195,7 +195,7 @@ instructions only work on Omnibus-provided PostgreSQL:
``` ```
Replace `<PRAEFECT_SQL_PASSWORD_HASH>` with the hash of the password you generated in the Replace `<PRAEFECT_SQL_PASSWORD_HASH>` with the hash of the password you generated in the
preparation step. Note that it is prefixed with `md5` literal. preparation step. It is prefixed with `md5` literal.
1. The PgBouncer that is shipped with Omnibus is configured to use [`auth_query`](https://www.pgbouncer.org/config.html#generic-settings) 1. The PgBouncer that is shipped with Omnibus is configured to use [`auth_query`](https://www.pgbouncer.org/config.html#generic-settings)
and uses `pg_shadow_lookup` function. You need to create this function in `praefect_production` and uses `pg_shadow_lookup` function. You need to create this function in `praefect_production`
......
...@@ -80,7 +80,7 @@ GitLab displays your link in the **Menu > Admin > Monitoring > Metrics Dashboard ...@@ -80,7 +80,7 @@ GitLab displays your link in the **Menu > Admin > Monitoring > Metrics Dashboard
When setting up Grafana through the process above, no scope shows in the screen at When setting up Grafana through the process above, no scope shows in the screen at
**Menu >** **{admin}** **Admin > Applications > GitLab Grafana**. However, the `read_user` scope is **Menu >** **{admin}** **Admin > Applications > GitLab Grafana**. However, the `read_user` scope is
required and is provided to the application automatically. Note that setting any scope other than required and is provided to the application automatically. Setting any scope other than
`read_user` without also including `read_user` leads to this error when you try to log in using `read_user` without also including `read_user` leads to this error when you try to log in using
GitLab as the OAuth provider: GitLab as the OAuth provider:
......
...@@ -48,7 +48,7 @@ lookups via database lookup. ...@@ -48,7 +48,7 @@ lookups via database lookup.
As part of [setting up Geo](../geo/index.md#setup-instructions), As part of [setting up Geo](../geo/index.md#setup-instructions),
you are required to follow the steps outlined below for both the primary and you are required to follow the steps outlined below for both the primary and
secondary nodes, but note that the `Write to "authorized keys" file` checkbox secondary nodes, but the `Write to "authorized keys" file` checkbox
only needs to be unchecked on the primary node since it is reflected only needs to be unchecked on the primary node since it is reflected
automatically on the secondary if database replication is working. automatically on the secondary if database replication is working.
......
...@@ -41,7 +41,7 @@ which you can set it up: ...@@ -41,7 +41,7 @@ which you can set it up:
the Pages daemon is installed, so you must share it through the network. the Pages daemon is installed, so you must share it through the network.
- Run the Pages daemon in the same server as GitLab, listening on the same IP - Run the Pages daemon in the same server as GitLab, listening on the same IP
but on different ports. In that case, you must proxy the traffic with but on different ports. In that case, you must proxy the traffic with
a load balancer. If you choose that route note that you should use TCP load a load balancer. If you choose that route, you should use TCP load
balancing for HTTPS. If you use TLS-termination (HTTPS-load balancing), the balancing for HTTPS. If you use TLS-termination (HTTPS-load balancing), the
pages can't be served with user-provided certificates. For pages can't be served with user-provided certificates. For
HTTP it's OK to use HTTP or TCP load balancing. HTTP it's OK to use HTTP or TCP load balancing.
......
...@@ -41,7 +41,7 @@ which you can set it up: ...@@ -41,7 +41,7 @@ which you can set it up:
the Pages daemon is installed, so you must share it through the network. the Pages daemon is installed, so you must share it through the network.
1. Run the Pages daemon in the same server as GitLab, listening on the same IP 1. Run the Pages daemon in the same server as GitLab, listening on the same IP
but on different ports. In that case, you must proxy the traffic with but on different ports. In that case, you must proxy the traffic with
a load balancer. If you choose that route, note that you should use TCP load a load balancer. If you choose that route, you should use TCP load
balancing for HTTPS. If you use TLS-termination (HTTPS-load balancing), the balancing for HTTPS. If you use TLS-termination (HTTPS-load balancing), the
pages aren't able to be served with user-provided certificates. For pages aren't able to be served with user-provided certificates. For
HTTP, it's OK to use HTTP or TCP load balancing. HTTP, it's OK to use HTTP or TCP load balancing.
......
...@@ -889,7 +889,7 @@ with: ...@@ -889,7 +889,7 @@ with:
sudo gitlab-ctl stop patroni sudo gitlab-ctl stop patroni
``` ```
Note that stopping or restarting Patroni service on the leader node will trigger the automatic failover. If you Stopping or restarting Patroni service on the leader node will trigger the automatic failover. If you
want to signal Patroni to reload its configuration or restart PostgreSQL process without triggering the failover, you want to signal Patroni to reload its configuration or restart PostgreSQL process without triggering the failover, you
must use the `reload` or `restart` sub-commands of `gitlab-ctl patroni` instead. These two sub-commands are wrappers of must use the `reload` or `restart` sub-commands of `gitlab-ctl patroni` instead. These two sub-commands are wrappers of
the same `patronictl` commands. the same `patronictl` commands.
...@@ -1015,7 +1015,7 @@ Here are a few key facts that you must consider before upgrading PostgreSQL: ...@@ -1015,7 +1015,7 @@ Here are a few key facts that you must consider before upgrading PostgreSQL:
- Upgrading PostgreSQL creates a new data directory with a new control data. From Patroni's perspective - Upgrading PostgreSQL creates a new data directory with a new control data. From Patroni's perspective
this is a new cluster that needs to be bootstrapped again. Therefore, as part of the upgrade procedure, this is a new cluster that needs to be bootstrapped again. Therefore, as part of the upgrade procedure,
the cluster state (stored in Consul) is wiped out. Once the upgrade is completed, Patroni the cluster state (stored in Consul) is wiped out. Once the upgrade is completed, Patroni
bootstraps a new cluster. **Note that this changes your _cluster ID_**. bootstraps a new cluster. **This changes your _cluster ID_**.
- The procedures for upgrading leader and replicas are not the same. That is why it is important to use the - The procedures for upgrading leader and replicas are not the same. That is why it is important to use the
right procedure on each node. right procedure on each node.
......
...@@ -21,7 +21,7 @@ There are 3 things that are checked to determine integrity. ...@@ -21,7 +21,7 @@ There are 3 things that are checked to determine integrity.
1. Check for `config.lock` in the repository directory. 1. Check for `config.lock` in the repository directory.
1. Check for any branch/references lock files in `refs/heads`. 1. Check for any branch/references lock files in `refs/heads`.
It's important to note that the existence of `config.lock` or reference locks The existence of `config.lock` or reference locks
alone do not necessarily indicate a problem. Lock files are routinely created alone do not necessarily indicate a problem. Lock files are routinely created
and removed as Git and GitLab perform operations on the repository. They serve and removed as Git and GitLab perform operations on the repository. They serve
to prevent data integrity issues. However, if a Git operation is interrupted these to prevent data integrity issues. However, if a Git operation is interrupted these
......
...@@ -683,8 +683,6 @@ To make this work with Sentinel: ...@@ -683,8 +683,6 @@ To make this work with Sentinel:
] ]
``` ```
Note that:
- Redis URLs should be in the format: `redis://:PASSWORD@SENTINEL_PRIMARY_NAME`, where: - Redis URLs should be in the format: `redis://:PASSWORD@SENTINEL_PRIMARY_NAME`, where:
- `PASSWORD` is the plaintext password for the Redis instance. - `PASSWORD` is the plaintext password for the Redis instance.
- `SENTINEL_PRIMARY_NAME` is the Sentinel primary name set with `redis['master_name']`, - `SENTINEL_PRIMARY_NAME` is the Sentinel primary name set with `redis['master_name']`,
...@@ -731,7 +729,7 @@ redis_master_role['enable'] = true # enable only one of them ...@@ -731,7 +729,7 @@ redis_master_role['enable'] = true # enable only one of them
redis_replica_role['enable'] = true # enable only one of them redis_replica_role['enable'] = true # enable only one of them
# When Redis primary or Replica role are enabled, the following services are # When Redis primary or Replica role are enabled, the following services are
# enabled/disabled. Note that if Redis and Sentinel roles are combined, both # enabled/disabled. If Redis and Sentinel roles are combined, both
# services are enabled. # services are enabled.
# The following services are disabled # The following services are disabled
......
...@@ -345,7 +345,7 @@ Configure DNS for an alternate SSH hostname such as `altssh.gitlab.example.com`. ...@@ -345,7 +345,7 @@ Configure DNS for an alternate SSH hostname such as `altssh.gitlab.example.com`.
The Internal Load Balancer is used to balance any internal connections the GitLab environment requires The Internal Load Balancer is used to balance any internal connections the GitLab environment requires
such as connections to [PgBouncer](#configure-pgbouncer) and [Praefect](#configure-praefect) (Gitaly Cluster). such as connections to [PgBouncer](#configure-pgbouncer) and [Praefect](#configure-praefect) (Gitaly Cluster).
Note that it's a separate node from the External Load Balancer and shouldn't have any access externally. It's a separate node from the External Load Balancer and shouldn't have any access externally.
The following IP will be used as an example: The following IP will be used as an example:
......
...@@ -347,7 +347,7 @@ Configure DNS for an alternate SSH hostname such as `altssh.gitlab.example.com`. ...@@ -347,7 +347,7 @@ Configure DNS for an alternate SSH hostname such as `altssh.gitlab.example.com`.
The Internal Load Balancer is used to balance any internal connections the GitLab environment requires The Internal Load Balancer is used to balance any internal connections the GitLab environment requires
such as connections to [PgBouncer](#configure-pgbouncer) and [Praefect](#configure-praefect) (Gitaly Cluster). such as connections to [PgBouncer](#configure-pgbouncer) and [Praefect](#configure-praefect) (Gitaly Cluster).
Note that it's a separate node from the External Load Balancer and shouldn't have any access externally. It's a separate node from the External Load Balancer and shouldn't have any access externally.
The following IP will be used as an example: The following IP will be used as an example:
......
...@@ -346,7 +346,7 @@ Configure DNS for an alternate SSH hostname such as `altssh.gitlab.example.com`. ...@@ -346,7 +346,7 @@ Configure DNS for an alternate SSH hostname such as `altssh.gitlab.example.com`.
The Internal Load Balancer is used to balance any internal connections the GitLab environment requires The Internal Load Balancer is used to balance any internal connections the GitLab environment requires
such as connections to [PgBouncer](#configure-pgbouncer) and [Praefect](#configure-praefect) (Gitaly Cluster). such as connections to [PgBouncer](#configure-pgbouncer) and [Praefect](#configure-praefect) (Gitaly Cluster).
Note that it's a separate node from the External Load Balancer and shouldn't have any access externally. It's a separate node from the External Load Balancer and shouldn't have any access externally.
The following IP will be used as an example: The following IP will be used as an example:
...@@ -2086,7 +2086,7 @@ but with smaller performance requirements, several modifications can be consider ...@@ -2086,7 +2086,7 @@ but with smaller performance requirements, several modifications can be consider
- Combining select nodes: Some nodes can be combined to reduce complexity at the cost of some performance: - Combining select nodes: Some nodes can be combined to reduce complexity at the cost of some performance:
- GitLab Rails and Sidekiq: Sidekiq nodes can be removed and the component instead enabled on the GitLab Rails nodes. - GitLab Rails and Sidekiq: Sidekiq nodes can be removed and the component instead enabled on the GitLab Rails nodes.
- PostgreSQL and PgBouncer: PgBouncer nodes can be removed and the component instead enabled on PostgreSQL with the Internal Load Balancer pointing to them instead. - PostgreSQL and PgBouncer: PgBouncer nodes can be removed and the component instead enabled on PostgreSQL with the Internal Load Balancer pointing to them instead.
- Reducing the node counts: Some node types do not need consensus and can run with fewer nodes (but more than one for redundancy). Note that this will also lead to reduced performance. - Reducing the node counts: Some node types do not need consensus and can run with fewer nodes (but more than one for redundancy). This will also lead to reduced performance.
- GitLab Rails and Sidekiq: Stateless services don't have a minimum node count. Two are enough for redundancy. - GitLab Rails and Sidekiq: Stateless services don't have a minimum node count. Two are enough for redundancy.
- Gitaly and Praefect: A quorum is not strictly necessary. Two Gitaly nodes and two Praefect nodes are enough for redundancy. - Gitaly and Praefect: A quorum is not strictly necessary. Two Gitaly nodes and two Praefect nodes are enough for redundancy.
- Running select components in reputable Cloud PaaS solutions: Select components of the GitLab setup can instead be run on Cloud Provider PaaS solutions. By doing this, additional dependent components can also be removed: - Running select components in reputable Cloud PaaS solutions: Select components of the GitLab setup can instead be run on Cloud Provider PaaS solutions. By doing this, additional dependent components can also be removed:
......
...@@ -354,7 +354,7 @@ Configure DNS for an alternate SSH hostname such as `altssh.gitlab.example.com`. ...@@ -354,7 +354,7 @@ Configure DNS for an alternate SSH hostname such as `altssh.gitlab.example.com`.
The Internal Load Balancer is used to balance any internal connections the GitLab environment requires The Internal Load Balancer is used to balance any internal connections the GitLab environment requires
such as connections to [PgBouncer](#configure-pgbouncer) and [Praefect](#configure-praefect) (Gitaly Cluster). such as connections to [PgBouncer](#configure-pgbouncer) and [Praefect](#configure-praefect) (Gitaly Cluster).
Note that it's a separate node from the External Load Balancer and shouldn't have any access externally. It's a separate node from the External Load Balancer and shouldn't have any access externally.
The following IP will be used as an example: The following IP will be used as an example:
......
...@@ -338,7 +338,7 @@ Configure DNS for an alternate SSH hostname such as `altssh.gitlab.example.com`. ...@@ -338,7 +338,7 @@ Configure DNS for an alternate SSH hostname such as `altssh.gitlab.example.com`.
The Internal Load Balancer is used to balance any internal connections the GitLab environment requires The Internal Load Balancer is used to balance any internal connections the GitLab environment requires
such as connections to [PgBouncer](#configure-pgbouncer) and [Praefect](#configure-praefect) (Gitaly Cluster). such as connections to [PgBouncer](#configure-pgbouncer) and [Praefect](#configure-praefect) (Gitaly Cluster).
Note that it's a separate node from the External Load Balancer and shouldn't have any access externally. It's a separate node from the External Load Balancer and shouldn't have any access externally.
The following IP will be used as an example: The following IP will be used as an example:
......
...@@ -45,7 +45,7 @@ session by running: ...@@ -45,7 +45,7 @@ session by running:
ActiveRecord::Base.connection.execute('SET statement_timeout TO 0') ActiveRecord::Base.connection.execute('SET statement_timeout TO 0')
``` ```
Note that this change only affects the current Rails console session and will This change only affects the current Rails console session and will
not be persisted in the GitLab production environment or in the next Rails not be persisted in the GitLab production environment or in the next Rails
console session. console session.
...@@ -213,7 +213,7 @@ downtime. Otherwise skip to the next section. ...@@ -213,7 +213,7 @@ downtime. Otherwise skip to the next section.
exit exit
``` ```
Note that if the Puma process terminates before you are able to run these If the Puma process terminates before you are able to run these
commands, GDB will report an error. To buy more time, you can always raise the commands, GDB will report an error. To buy more time, you can always raise the
Puma worker timeout. For omnibus users, you can edit `/etc/gitlab/gitlab.rb` and Puma worker timeout. For omnibus users, you can edit `/etc/gitlab/gitlab.rb` and
increase it from 60 seconds to 600: increase it from 60 seconds to 600:
......
...@@ -22,7 +22,7 @@ guidance of a Support Engineer, or running them in a test environment with a ...@@ -22,7 +22,7 @@ guidance of a Support Engineer, or running them in a test environment with a
backup of the instance ready to be restored, just in case. backup of the instance ready to be restored, just in case.
WARNING: WARNING:
Please also note that as GitLab changes, changes to the code are inevitable, As GitLab changes, changes to the code are inevitable,
and so some scripts may not work as they once used to. These are not kept and so some scripts may not work as they once used to. These are not kept
up-to-date as these scripts/commands were added as they were found/needed. As up-to-date as these scripts/commands were added as they were found/needed. As
mentioned above, we recommend running these scripts under the supervision of a mentioned above, we recommend running these scripts under the supervision of a
......
...@@ -135,7 +135,7 @@ and they will assist you with any issues you are having. ...@@ -135,7 +135,7 @@ and they will assist you with any issues you are having.
# source-style commands should also work # source-style commands should also work
cd /srv/gitlab && bundle exec rake gitlab:check RAILS_ENV=production cd /srv/gitlab && bundle exec rake gitlab:check RAILS_ENV=production
# run GitLab check. Note that the output can be confusing and invalid because of the specific structure of GitLab installed via helm chart # run GitLab check. The output can be confusing and invalid because of the specific structure of GitLab installed via helm chart
/usr/local/bin/gitlab-rake gitlab:check /usr/local/bin/gitlab-rake gitlab:check
# open console without entering pod # open console without entering pod
......
...@@ -265,7 +265,7 @@ user.save!(validate: false) ...@@ -265,7 +265,7 @@ user.save!(validate: false)
This is not recommended, as validations are usually put in place to ensure the This is not recommended, as validations are usually put in place to ensure the
integrity and consistency of user-provided data. integrity and consistency of user-provided data.
Note that a validation error will prevent the entire object from being saved to A validation error will prevent the entire object from being saved to
the database. We'll see a little of this in the next section. If you're getting the database. We'll see a little of this in the next section. If you're getting
a mysterious red banner in the GitLab UI when submitting a form, this can often a mysterious red banner in the GitLab UI when submitting a form, this can often
be the fastest way to get to the root of the problem. be the fastest way to get to the root of the problem.
......
...@@ -199,7 +199,7 @@ To fix this problem: ...@@ -199,7 +199,7 @@ To fix this problem:
git config --global http.sslCAInfo ~/.ssl/gitlab.domain.tld.crt git config --global http.sslCAInfo ~/.ssl/gitlab.domain.tld.crt
``` ```
- Disable SSL verification in your Git client. Note that this intended as a - Disable SSL verification in your Git client. This is intended as a
temporary measure, as it could be considered a security risk. temporary measure, as it could be considered a security risk.
```shell ```shell
......
...@@ -57,7 +57,7 @@ Available types for the `action` parameter, and the resources that might be affe ...@@ -57,7 +57,7 @@ Available types for the `action` parameter, and the resources that might be affe
- Design - Design
- Wiki page - Wiki page
Note that these options are in lower case. These options are in lowercase.
### Target Types ### Target Types
...@@ -71,7 +71,7 @@ Available target types for the `target_type` parameter are: ...@@ -71,7 +71,7 @@ Available target types for the `target_type` parameter are:
- `snippet` - `snippet`
- `user` - `user`
Note that these options are in lower case. These options are in lowercase.
### Date formatting ### Date formatting
......
...@@ -130,7 +130,7 @@ POST /features/:name ...@@ -130,7 +130,7 @@ POST /features/:name
| `project` | string | no | A projects path, for example `gitlab-org/gitlab-foss` | | `project` | string | no | A projects path, for example `gitlab-org/gitlab-foss` |
| `force` | boolean | no | Skip feature flag validation checks, such as a YAML definition | | `force` | boolean | no | Skip feature flag validation checks, such as a YAML definition |
Note that you can enable or disable a feature for a `feature_group`, a `user`, You can enable or disable a feature for a `feature_group`, a `user`,
a `group`, and a `project` in a single API call. a `group`, and a `project` in a single API call.
```shell ```shell
......
...@@ -33,7 +33,7 @@ To use this in a [`script` definition](../ci/yaml/index.md#script) inside ...@@ -33,7 +33,7 @@ To use this in a [`script` definition](../ci/yaml/index.md#script) inside
- The `JOB-TOKEN` header with the GitLab-provided `CI_JOB_TOKEN` variable. - The `JOB-TOKEN` header with the GitLab-provided `CI_JOB_TOKEN` variable.
For example, the following job downloads the artifacts of the job with ID For example, the following job downloads the artifacts of the job with ID
`42`. Note that the command is wrapped into single quotes because it contains a `42`. The command is wrapped in single quotes because it contains a
colon (`:`): colon (`:`):
```yaml ```yaml
...@@ -98,7 +98,7 @@ To use this in a [`script` definition](../ci/yaml/index.md#script) inside ...@@ -98,7 +98,7 @@ To use this in a [`script` definition](../ci/yaml/index.md#script) inside
- The `JOB-TOKEN` header with the GitLab-provided `CI_JOB_TOKEN` variable. - The `JOB-TOKEN` header with the GitLab-provided `CI_JOB_TOKEN` variable.
For example, the following job downloads the artifacts of the `test` job For example, the following job downloads the artifacts of the `test` job
of the `main` branch. Note that the command is wrapped into single quotes of the `main` branch. The command is wrapped in single quotes
because it contains a colon (`:`): because it contains a colon (`:`):
```yaml ```yaml
......
...@@ -85,7 +85,7 @@ Supported attributes: ...@@ -85,7 +85,7 @@ Supported attributes:
## Get a blob from repository ## Get a blob from repository
Allows you to receive information about blob in repository like size and Allows you to receive information about blob in repository like size and
content. Note that blob content is Base64 encoded. This endpoint can be accessed content. Blob content is Base64 encoded. This endpoint can be accessed
without authentication if the repository is publicly accessible. without authentication if the repository is publicly accessible.
```plaintext ```plaintext
...@@ -149,7 +149,7 @@ curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.com/api/v4/pr ...@@ -149,7 +149,7 @@ curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.com/api/v4/pr
## Compare branches, tags or commits ## Compare branches, tags or commits
This endpoint can be accessed without authentication if the repository is This endpoint can be accessed without authentication if the repository is
publicly accessible. Note that diffs could have an empty diff string if [diff limits](../development/diffs.md#diff-limits) are reached. publicly accessible. Diffs can have an empty diff string if [diff limits](../development/diffs.md#diff-limits) are reached.
```plaintext ```plaintext
GET /projects/:id/repository/compare GET /projects/:id/repository/compare
...@@ -607,7 +607,7 @@ template: | ...@@ -607,7 +607,7 @@ template: |
{% end %} {% end %}
``` ```
Note that when specifying the template you should use `template: |` and not When specifying the template you should use `template: |` and not
`template: >`, as the latter doesn't preserve newlines in the template. `template: >`, as the latter doesn't preserve newlines in the template.
### Template data ### Template data
......
...@@ -24,7 +24,7 @@ in the following table. ...@@ -24,7 +24,7 @@ in the following table.
## Get file from repository ## Get file from repository
Allows you to receive information about file in repository like name, size, Allows you to receive information about file in repository like name, size,
content. Note that file content is Base64 encoded. This endpoint can be accessed content. File content is Base64 encoded. This endpoint can be accessed
without authentication if the repository is publicly accessible. without authentication if the repository is publicly accessible.
```plaintext ```plaintext
......
...@@ -259,7 +259,7 @@ GET /projects/:id/services/buildkite ...@@ -259,7 +259,7 @@ GET /projects/:id/services/buildkite
## Campfire ## Campfire
Send notifications about push events to Campfire chat rooms. Send notifications about push events to Campfire chat rooms.
Note that [new users can no longer sign up for Campfire](https://basecamp.com/retired/campfire). [New users can no longer sign up for Campfire](https://basecamp.com/retired/campfire).
### Create/Edit Campfire service ### Create/Edit Campfire service
......
...@@ -408,7 +408,7 @@ users. Either `password`, `reset_password`, or `force_random_password` ...@@ -408,7 +408,7 @@ users. Either `password`, `reset_password`, or `force_random_password`
must be specified. If `reset_password` and `force_random_password` are must be specified. If `reset_password` and `force_random_password` are
both `false`, then `password` is required. both `false`, then `password` is required.
Note that `force_random_password` and `reset_password` take priority `force_random_password` and `reset_password` take priority
over `password`. In addition, `reset_password` and over `password`. In addition, `reset_password` and
`force_random_password` can be used together. `force_random_password` can be used together.
...@@ -1600,7 +1600,7 @@ Example response: ...@@ -1600,7 +1600,7 @@ Example response:
> Requires admin permissions. > Requires admin permissions.
> Token values are returned once. Make sure you save it - you can't access it again. > Token values are returned once. Make sure you save it - you can't access it again.
It creates a new impersonation token. Note that only administrators can do this. It creates a new impersonation token. Only administrators can do this.
You are only able to create impersonation tokens to impersonate the user and perform You are only able to create impersonation tokens to impersonate the user and perform
both API calls and Git reads and writes. The user can't see these tokens in their profile both API calls and Git reads and writes. The user can't see these tokens in their profile
settings page. settings page.
......
...@@ -8,7 +8,7 @@ group: database ...@@ -8,7 +8,7 @@ group: database
This document is a proposal to work towards reducing and limiting table sizes on GitLab.com. We establish a **measurable target** by limiting table size to a certain threshold. This will be used as an indicator to drive database focus and decision making. With GitLab.com growing, we continuously re-evaluate which tables need to be worked on to prevent or otherwise fix violations. This document is a proposal to work towards reducing and limiting table sizes on GitLab.com. We establish a **measurable target** by limiting table size to a certain threshold. This will be used as an indicator to drive database focus and decision making. With GitLab.com growing, we continuously re-evaluate which tables need to be worked on to prevent or otherwise fix violations.
Note that this is not meant to be a hard rule but rather a strong indication that work needs to be done to break a table apart or otherwise reduce its size. This is not meant to be a hard rule but rather a strong indication that work needs to be done to break a table apart or otherwise reduce its size.
This is meant to be read in context with the [Database Sharding blueprint](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/64115), This is meant to be read in context with the [Database Sharding blueprint](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/64115),
which paints the bigger picture. This proposal here is thought to be part of the "debloating step" below, as we aim to reduce storage requirements and improve data modeling. Partitioning is part of the standard tool-belt: where possible, we can already use partitioning as a solution to cut physical table sizes significantly. Both will help to prepare efforts like decomposition (database usage is already optimized) and sharding (database is already partitioned along an identified data access dimension). which paints the bigger picture. This proposal here is thought to be part of the "debloating step" below, as we aim to reduce storage requirements and improve data modeling. Partitioning is part of the standard tool-belt: where possible, we can already use partitioning as a solution to cut physical table sizes significantly. Both will help to prepare efforts like decomposition (database usage is already optimized) and sharding (database is already partitioned along an identified data access dimension).
...@@ -124,7 +124,7 @@ In order to maintain and improve operational stability and lessen development bu ...@@ -124,7 +124,7 @@ In order to maintain and improve operational stability and lessen development bu
1. Indexes are smaller, can be maintained more efficiently and fit better into memory 1. Indexes are smaller, can be maintained more efficiently and fit better into memory
1. Data migrations are easier to reason about, take less time to implement and execute 1. Data migrations are easier to reason about, take less time to implement and execute
Note that this target is *pragmatic*: We understand table sizes depend on feature usage, code changes and other factors - which all change over time. We may not always find solutions where we can tightly limit the size of physical tables once and for all. That is acceptable though and we primarily aim to keep the situation on GitLab.com under control. We adapt our efforts to the situation present on GitLab.com and will re-evaluate frequently. This target is *pragmatic*: We understand table sizes depend on feature usage, code changes and other factors - which all change over time. We may not always find solutions where we can tightly limit the size of physical tables once and for all. That is acceptable though and we primarily aim to keep the situation on GitLab.com under control. We adapt our efforts to the situation present on GitLab.com and will re-evaluate frequently.
While there are changes we can make that lead to a constant maximum physical table size over time, this doesn't need to be the case necessarily. Consider for example hash partitioniong, which breaks a table down into a static number of partitions. With data growth over time, individual partitions will also grow in size and may eventually reach the threshold size again. We strive to get constant table sizes, but it is acceptable to ship easier solutions that don't have this characteristic but improve the situation for a considerable amount of time. While there are changes we can make that lead to a constant maximum physical table size over time, this doesn't need to be the case necessarily. Consider for example hash partitioniong, which breaks a table down into a static number of partitions. With data growth over time, individual partitions will also grow in size and may eventually reach the threshold size again. We strive to get constant table sizes, but it is acceptable to ship easier solutions that don't have this characteristic but improve the situation for a considerable amount of time.
......
...@@ -64,7 +64,7 @@ We already use Database Lab from [postgres.ai](https://postgres.ai/), which is a ...@@ -64,7 +64,7 @@ We already use Database Lab from [postgres.ai](https://postgres.ai/), which is a
Internally, this is based on ZFS and implements a "thin-cloning technology". That is, ZFS snapshots are being used to clone the data and it exposes a full read/write PostgreSQL cluster based on the cloned data. This is called a *thin clone*. It is rather short lived and is going to be destroyed again shortly after we are finished using it. Internally, this is based on ZFS and implements a "thin-cloning technology". That is, ZFS snapshots are being used to clone the data and it exposes a full read/write PostgreSQL cluster based on the cloned data. This is called a *thin clone*. It is rather short lived and is going to be destroyed again shortly after we are finished using it.
It is important to note that a thin clone is fully read/write. This allows us to execute migrations on top of it. A thin clone is fully read/write. This allows us to execute migrations on top of it.
Database Lab provides an API we can interact with to manage thin clones. In order to automate the migration and query testing, we add steps to the `gitlab/gitlab-org` CI pipeline. This triggers automation that performs the following steps for a given merge request: Database Lab provides an API we can interact with to manage thin clones. In order to automate the migration and query testing, we add steps to the `gitlab/gitlab-org` CI pipeline. This triggers automation that performs the following steps for a given merge request:
......
...@@ -79,7 +79,7 @@ GitLab CI/CD supports numerous configuration options: ...@@ -79,7 +79,7 @@ GitLab CI/CD supports numerous configuration options:
| [Optimize GitLab and GitLab Runner for large repositories](large_repositories/index.md) | Recommended strategies for handling large repositories. | | [Optimize GitLab and GitLab Runner for large repositories](large_repositories/index.md) | Recommended strategies for handling large repositories. |
| [`.gitlab-ci.yml` full reference](yaml/index.md) | All the attributes you can use with GitLab CI/CD. | | [`.gitlab-ci.yml` full reference](yaml/index.md) | All the attributes you can use with GitLab CI/CD. |
Note that certain operations can only be performed according to the Certain operations can only be performed according to the
[user](../user/permissions.md#gitlab-cicd-permissions) and [job](../user/permissions.md#job-permissions) permissions. [user](../user/permissions.md#gitlab-cicd-permissions) and [job](../user/permissions.md#job-permissions) permissions.
## Feature set ## Feature set
......
...@@ -45,7 +45,7 @@ ENTITY_TITLE ...@@ -45,7 +45,7 @@ ENTITY_TITLE
You can [disable comments](#disable-comments-on-jira-issues) on issues. You can [disable comments](#disable-comments-on-jira-issues) on issues.
### Require associated Jira issue for merge requests to be merged ### Require associated Jira issue for merge requests to be merged **(ULTIMATE)**
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/280766) in [GitLab Ultimate](https://about.gitlab.com/pricing/) 13.12 behind a feature flag, disabled by default. > - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/280766) in [GitLab Ultimate](https://about.gitlab.com/pricing/) 13.12 behind a feature flag, disabled by default.
> - [Deployed behind a feature flag](../../user/feature_flags.md), disabled by default. > - [Deployed behind a feature flag](../../user/feature_flags.md), disabled by default.
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
class Projects::PathLocksController < Projects::ApplicationController class Projects::PathLocksController < Projects::ApplicationController
include PathLocksHelper include PathLocksHelper
include ExtractsPath
# Authorize # Authorize
before_action :require_non_empty_project before_action :require_non_empty_project
...@@ -10,8 +9,6 @@ class Projects::PathLocksController < Projects::ApplicationController ...@@ -10,8 +9,6 @@ class Projects::PathLocksController < Projects::ApplicationController
before_action :authorize_push_code!, only: [:toggle] before_action :authorize_push_code!, only: [:toggle]
before_action :check_license before_action :check_license
before_action :assign_ref_vars, only: :toggle
before_action :lfs_blob_ids, only: :toggle
feature_category :source_code_management feature_category :source_code_management
...@@ -21,7 +18,7 @@ class Projects::PathLocksController < Projects::ApplicationController ...@@ -21,7 +18,7 @@ class Projects::PathLocksController < Projects::ApplicationController
# rubocop: disable CodeReuse/ActiveRecord # rubocop: disable CodeReuse/ActiveRecord
def toggle def toggle
path_lock = @project.path_locks.find_by(path: params[:path]) path_lock = @project.path_locks.find_by(path: path)
if path_lock if path_lock
unlock_file(path_lock) unlock_file(path_lock)
...@@ -62,13 +59,13 @@ class Projects::PathLocksController < Projects::ApplicationController ...@@ -62,13 +59,13 @@ class Projects::PathLocksController < Projects::ApplicationController
end end
def lock_file def lock_file
path_lock = PathLocks::LockService.new(project, current_user).execute(params[:path]) path_lock = PathLocks::LockService.new(project, current_user).execute(path)
if path_lock.persisted? && sync_with_lfs? if path_lock.persisted? && sync_with_lfs?
Lfs::LockFileService.new( Lfs::LockFileService.new(
project, project,
current_user, current_user,
path: params[:path], path: path,
create_path_lock: false create_path_lock: false
).execute ).execute
end end
...@@ -82,23 +79,21 @@ class Projects::PathLocksController < Projects::ApplicationController ...@@ -82,23 +79,21 @@ class Projects::PathLocksController < Projects::ApplicationController
end end
end end
# Override get_id from ExtractsPath.
# We don't support file locking per branch, that's why we use the root branch.
def get_id
id = project.repository.root_ref
id += "/#{params[:path]}" if params[:path].present?
id
end
def lfs_file? def lfs_file?
blob = project.repository.blob_at_branch(@ref, @path) blob = repository.blob_at_branch(repository.root_ref, path)
return false unless blob return false unless blob
@lfs_blob_ids.include?(blob.id) lfs_blob_ids = LfsPointersFinder.new(repository, path).execute
lfs_blob_ids.include?(blob.id)
end end
def sync_with_lfs? def sync_with_lfs?
project.lfs_enabled? && lfs_file? project.lfs_enabled? && lfs_file?
end end
def path
params[:path]
end
end end
...@@ -9,6 +9,16 @@ module EE ...@@ -9,6 +9,16 @@ module EE
expose :marked_for_deletion do |instance| expose :marked_for_deletion do |instance|
instance.marked_for_deletion? instance.marked_for_deletion?
end end
expose :compliance_management_framework, if: lambda { |_instance, _options| compliance_framework_available? }
end
private
def compliance_framework_available?
return unless project?
object.licensed_feature_available?(:compliance_framework)
end end
end end
end end
...@@ -22,16 +22,29 @@ module EE ...@@ -22,16 +22,29 @@ module EE
override :perform! override :perform!
def perform! def perform!
return unless limit.exceeded? if limit.exceeded?
limit.log_error!(log_attrs)
limit.log_error!(project_id: project.id, plan: project.actual_plan_name) error(limit.message, drop_reason: :size_limit_exceeded)
error(limit.message, drop_reason: :size_limit_exceeded) elsif limit.log_exceeded_limit?
limit.log_error!(log_attrs)
end
end end
override :break? override :break?
def break? def break?
limit.exceeded? limit.exceeded?
end end
private
def log_attrs
{
pipeline_source: pipeline.source,
plan: project.actual_plan_name,
project_id: project.id,
project_full_path: project.full_path
}
end
end end
end end
end end
......
...@@ -9,6 +9,8 @@ module EE ...@@ -9,6 +9,8 @@ module EE
include ::Gitlab::Utils::StrongMemoize include ::Gitlab::Utils::StrongMemoize
include ActionView::Helpers::TextHelper include ActionView::Helpers::TextHelper
LOGGABLE_JOBS_COUNT = 2000 # log large pipelines to determine a future global pipeline size limit
def initialize(namespace, pipeline, command) def initialize(namespace, pipeline, command)
@namespace = namespace @namespace = namespace
@pipeline = pipeline @pipeline = pipeline
...@@ -25,9 +27,11 @@ module EE ...@@ -25,9 +27,11 @@ module EE
seeds_size > ci_pipeline_size_limit seeds_size > ci_pipeline_size_limit
end end
def message def log_exceeded_limit?
return unless exceeded? seeds_size > LOGGABLE_JOBS_COUNT
end
def message
"Pipeline has too many jobs! Requested #{seeds_size}, but the limit is #{ci_pipeline_size_limit}." "Pipeline has too many jobs! Requested #{seeds_size}, but the limit is #{ci_pipeline_size_limit}."
end end
......
...@@ -3,14 +3,19 @@ ...@@ -3,14 +3,19 @@
require 'spec_helper' require 'spec_helper'
RSpec.describe Projects::PathLocksController do RSpec.describe Projects::PathLocksController do
let(:project) { create(:project, :repository, :public) } let_it_be(:project) { create(:project, :repository, :public) }
let(:user) { project.owner } let_it_be(:user) { project.owner }
let(:file_path) { 'files/lfs/lfs_object.iso' } let(:file_path) { 'files/lfs/lfs_object.iso' }
let(:lfs_enabled) { true }
before do before do
sign_in(user) sign_in(user)
allow_any_instance_of(Repository).to receive(:root_ref).and_return('lfs') allow_any_instance_of(Repository).to receive(:root_ref).and_return('lfs')
allow_next_found_instance_of(Project) do |project|
allow(project).to receive(:lfs_enabled?) { lfs_enabled }
end
end end
describe 'GET #index' do describe 'GET #index' do
...@@ -34,9 +39,7 @@ RSpec.describe Projects::PathLocksController do ...@@ -34,9 +39,7 @@ RSpec.describe Projects::PathLocksController do
describe 'POST #toggle' do describe 'POST #toggle' do
context 'when LFS is enabled' do context 'when LFS is enabled' do
before do let(:lfs_enabled) { true }
allow_any_instance_of(Project).to receive(:lfs_enabled?).and_return(true)
end
context 'when locking a file' do context 'when locking a file' do
it 'locks the file' do it 'locks the file' do
...@@ -71,6 +74,21 @@ RSpec.describe Projects::PathLocksController do ...@@ -71,6 +74,21 @@ RSpec.describe Projects::PathLocksController do
end end
end end
context 'when file does not exist' do
let(:file_path) { 'unknown-file' }
it 'locks the file' do
toggle_lock(file_path)
expect(PathLock.count).to eq(1)
expect(response).to have_gitlab_http_status(:ok)
end
it 'does not lock the file in LFS' do
expect { toggle_lock(file_path) }.not_to change { LfsFileLock.count }
end
end
context 'when unlocking a file' do context 'when unlocking a file' do
context 'with files' do context 'with files' do
before do before do
...@@ -87,6 +105,24 @@ RSpec.describe Projects::PathLocksController do ...@@ -87,6 +105,24 @@ RSpec.describe Projects::PathLocksController do
expect { toggle_lock(file_path) }.to change { LfsFileLock.count }.to(0) expect { toggle_lock(file_path) }.to change { LfsFileLock.count }.to(0)
end end
end end
context 'when file does not exist' do
let(:file_path) { 'unknown-file' }
before do
toggle_lock(file_path)
end
it 'unlocks the file' do
expect { toggle_lock(file_path) }.to change { PathLock.count }.to(0)
expect(response).to have_gitlab_http_status(:ok)
end
it 'does not unlock the file in LFS' do
expect { toggle_lock(file_path) }.not_to change { LfsFileLock.count }
end
end
end end
context 'when unlocking a directory' do context 'when unlocking a directory' do
...@@ -109,6 +145,8 @@ RSpec.describe Projects::PathLocksController do ...@@ -109,6 +145,8 @@ RSpec.describe Projects::PathLocksController do
end end
context 'when LFS is not enabled' do context 'when LFS is not enabled' do
let(:lfs_enabled) { false }
it 'locks the file' do it 'locks the file' do
expect { toggle_lock(file_path) }.to change { PathLock.count }.to(1) expect { toggle_lock(file_path) }.to change { PathLock.count }.to(1)
......
import { shallowMount } from '@vue/test-utils';
import ComplianceFrameworkLabel from 'ee_component/vue_shared/components/compliance_framework_label/compliance_framework_label.vue';
import waitForPromises from 'helpers/wait_for_promises';
import GroupFolder from '~/groups/components/group_folder.vue';
import GroupItem from '~/groups/components/group_item.vue';
import { mockParentGroupItem, mockChildren } from '../mock_data';
const createComponent = (props = {}) => {
return shallowMount(GroupItem, {
propsData: {
parentGroup: mockParentGroupItem,
...props,
},
components: { GroupFolder },
});
};
describe('GroupItemComponent', () => {
let wrapper;
const findComplianceFrameworkLabel = () => wrapper.findComponent(ComplianceFrameworkLabel);
afterEach(() => {
wrapper.destroy();
});
describe('Compliance framework label', () => {
it('does not render if the item does not have a compliance framework', async () => {
wrapper = createComponent({ group: mockChildren[0] });
await waitForPromises();
expect(findComplianceFrameworkLabel().exists()).toBe(false);
});
it('renders if the item has a compliance framework', async () => {
const { color, description, name } = mockChildren[1].complianceFramework;
wrapper = createComponent({ group: mockChildren[1] });
await waitForPromises();
expect(findComplianceFrameworkLabel().props()).toStrictEqual({
color,
description,
name,
});
});
});
});
export const mockParentGroupItem = {
id: 55,
name: 'hardware',
description: '',
visibility: 'public',
fullName: 'platform / hardware',
relativePath: '/platform/hardware',
canEdit: true,
type: 'group',
avatarUrl: null,
permission: 'Owner',
editPath: '/groups/platform/hardware/edit',
childrenCount: 3,
leavePath: '/groups/platform/hardware/group_members/leave',
parentId: 54,
memberCount: '1',
projectCount: 1,
subgroupCount: 2,
canLeave: false,
children: [],
isOpen: true,
isChildrenLoading: false,
isBeingRemoved: false,
updatedAt: '2017-04-09T18:40:39.101Z',
};
export const mockChildren = [
{
id: 57,
name: 'bsp',
description: '',
visibility: 'public',
fullName: 'platform / hardware / bsp',
relativePath: '/platform/hardware/bsp',
canEdit: true,
type: 'group',
avatarUrl: null,
permission: 'Owner',
editPath: '/groups/platform/hardware/bsp/edit',
childrenCount: 6,
leavePath: '/groups/platform/hardware/bsp/group_members/leave',
parentId: 55,
memberCount: '1',
projectCount: 4,
subgroupCount: 2,
canLeave: false,
children: [],
isOpen: true,
isChildrenLoading: false,
isBeingRemoved: false,
updatedAt: '2017-04-09T18:40:39.101Z',
complianceFramework: {},
},
{
id: 57,
name: 'bsp',
description: '',
visibility: 'public',
fullName: 'platform / hardware / bsp',
relativePath: '/platform/hardware/bsp',
canEdit: true,
type: 'group',
avatarUrl: null,
permission: 'Owner',
editPath: '/groups/platform/hardware/bsp/edit',
childrenCount: 6,
leavePath: '/groups/platform/hardware/bsp/group_members/leave',
parentId: 55,
memberCount: '1',
projectCount: 4,
subgroupCount: 2,
canLeave: false,
children: [],
isOpen: true,
isChildrenLoading: false,
isBeingRemoved: false,
updatedAt: '2017-04-09T18:40:39.101Z',
complianceFramework: {
name: 'GDPR',
description: 'General Data Protection Regulation',
color: '#009966',
},
},
];
export const mockRawChildren = [
{
id: 57,
name: 'bsp',
description: '',
visibility: 'public',
full_name: 'platform / hardware / bsp',
relative_path: '/platform/hardware/bsp',
can_edit: true,
type: 'group',
avatar_url: null,
permission: 'Owner',
edit_path: '/groups/platform/hardware/bsp/edit',
children_count: 6,
leave_path: '/groups/platform/hardware/bsp/group_members/leave',
parent_id: 55,
number_users_with_delimiter: '1',
project_count: 4,
subgroup_count: 2,
can_leave: false,
children: [],
updated_at: '2017-04-09T18:40:39.101Z',
},
{
id: 57,
name: 'bsp',
description: '',
visibility: 'public',
full_name: 'platform / hardware / bsp',
relative_path: '/platform/hardware/bsp',
can_edit: true,
type: 'group',
avatar_url: null,
permission: 'Owner',
edit_path: '/groups/platform/hardware/bsp/edit',
children_count: 6,
leave_path: '/groups/platform/hardware/bsp/group_members/leave',
parent_id: 55,
number_users_with_delimiter: '1',
project_count: 4,
subgroup_count: 2,
can_leave: false,
children: [],
updated_at: '2017-04-09T18:40:39.101Z',
compliance_management_framework: {
id: 1,
namespace_id: 1,
name: 'GDPR',
description: 'General Data Protection Regulation',
color: '#009966',
pipeline_configuration_full_path: null,
regulated: true,
},
},
];
import GroupsStore from '~/groups/store/groups_store';
import { mockRawChildren } from '../mock_data';
describe('ee/ProjectsStore', () => {
describe('formatGroupItem', () => {
it('without a compliance framework', () => {
const store = new GroupsStore();
const updatedGroupItem = store.formatGroupItem(mockRawChildren[0]);
expect(updatedGroupItem.complianceFramework).toBeUndefined();
});
it('with a compliance framework', () => {
const store = new GroupsStore();
const updatedGroupItem = store.formatGroupItem(mockRawChildren[1]);
expect(updatedGroupItem.complianceFramework).toStrictEqual({
name: mockRawChildren[1].compliance_management_framework.name,
color: mockRawChildren[1].compliance_management_framework.color,
description: mockRawChildren[1].compliance_management_framework.description,
});
});
});
});
...@@ -84,10 +84,32 @@ RSpec.describe EE::Gitlab::Ci::Pipeline::Quota::Size do ...@@ -84,10 +84,32 @@ RSpec.describe EE::Gitlab::Ci::Pipeline::Quota::Size do
context 'when limit is exceeded' do context 'when limit is exceeded' do
include_context 'pipeline size limit exceeded' include_context 'pipeline size limit exceeded'
it 'returns infor about pipeline size limit exceeded' do it 'returns info about pipeline size limit exceeded' do
expect(subject.message) expect(subject.message)
.to eq "Pipeline has too many jobs! Requested 2, but the limit is 1." .to eq "Pipeline has too many jobs! Requested 2, but the limit is 1."
end end
end end
end end
describe '#log_exceeded_limit?' do
context 'when there are more than 2000 jobs in the pipeline' do
let(:command) do
double(:command, pipeline_seed: double(:pipeline_seed, size: 2001))
end
it 'returns true' do
expect(subject.log_exceeded_limit?).to be_truthy
end
end
context 'when there are 2000 or less jobs in the pipeline' do
let(:command) do
double(:command, pipeline_seed: double(:pipeline_seed, size: 2000))
end
it 'returns false' do
expect(subject.log_exceeded_limit?).to be_falsey
end
end
end
end end
...@@ -54,7 +54,7 @@ RSpec.describe ::Gitlab::Ci::Pipeline::Chain::Limit::Activity do ...@@ -54,7 +54,7 @@ RSpec.describe ::Gitlab::Ci::Pipeline::Chain::Limit::Activity do
end end
it 'logs the error' do it 'logs the error' do
expect(Gitlab::ErrorTracking).to receive(:track_exception).with( expect(Gitlab::ErrorTracking).to receive(:log_exception).with(
instance_of(Gitlab::Ci::Limit::LimitExceededError), instance_of(Gitlab::Ci::Limit::LimitExceededError),
project_id: project.id, plan: namespace.actual_plan_name project_id: project.id, plan: namespace.actual_plan_name
) )
......
...@@ -56,7 +56,7 @@ RSpec.describe ::Gitlab::Ci::Pipeline::Chain::Limit::JobActivity do ...@@ -56,7 +56,7 @@ RSpec.describe ::Gitlab::Ci::Pipeline::Chain::Limit::JobActivity do
end end
it 'logs the error' do it 'logs the error' do
expect(Gitlab::ErrorTracking).to receive(:track_exception).with( expect(Gitlab::ErrorTracking).to receive(:log_exception).with(
instance_of(Gitlab::Ci::Limit::LimitExceededError), instance_of(Gitlab::Ci::Limit::LimitExceededError),
project_id: project.id, plan: namespace.actual_plan_name project_id: project.id, plan: namespace.actual_plan_name
) )
......
...@@ -68,9 +68,10 @@ RSpec.describe ::Gitlab::Ci::Pipeline::Chain::Limit::Size do ...@@ -68,9 +68,10 @@ RSpec.describe ::Gitlab::Ci::Pipeline::Chain::Limit::Size do
end end
it 'logs the error' do it 'logs the error' do
expect(Gitlab::ErrorTracking).to receive(:track_exception).with( expect(Gitlab::ErrorTracking).to receive(:log_exception).with(
instance_of(Gitlab::Ci::Limit::LimitExceededError), instance_of(Gitlab::Ci::Limit::LimitExceededError),
project_id: project.id, plan: namespace.actual_plan_name project_id: project.id, plan: namespace.actual_plan_name,
project_full_path: project.full_path, pipeline_source: pipeline.source
) )
subject subject
...@@ -132,4 +133,31 @@ RSpec.describe ::Gitlab::Ci::Pipeline::Chain::Limit::Size do ...@@ -132,4 +133,31 @@ RSpec.describe ::Gitlab::Ci::Pipeline::Chain::Limit::Size do
subject subject
end end
end end
context 'when pipeline size limit is disabled' do
before do
ultimate_plan = create(:ultimate_plan)
create(:plan_limits, plan: ultimate_plan, ci_pipeline_size: 0)
create(:gitlab_subscription, namespace: namespace, hosted_plan: ultimate_plan)
end
context 'when global pipeline size limit is exceeded' do
let(:command) do
double(:command,
project: project,
current_user: user,
pipeline_seed: double(:seed, size: 2001))
end
it 'logs the pipeline' do
expect(Gitlab::ErrorTracking).to receive(:log_exception).with(
instance_of(Gitlab::Ci::Limit::LimitExceededError),
project_id: project.id, plan: namespace.actual_plan_name,
project_full_path: project.full_path, pipeline_source: pipeline.source
)
subject
end
end
end
end end
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe GroupChildEntity do
include ExternalAuthorizationServiceHelpers
include Gitlab::Routing.url_helpers
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project, :with_sox_compliance_framework) }
let_it_be(:project_without_compliance_framework) { create(:project) }
let_it_be(:group) { create(:group) }
let(:request) { double('request') }
let(:entity) { described_class.new(object, request: request) }
subject(:json) { entity.as_json }
before do
allow(request).to receive(:current_user).and_return(user)
stub_commonmark_sourcepos_disabled
end
describe 'with compliance framework' do
shared_examples 'does not have the compliance framework' do
it do
expect(json[:compliance_management_framework]).to be_nil
end
end
context 'disabled' do
before do
stub_licensed_features(compliance_framework: false)
end
context 'for a project' do
let(:object) { project }
it_behaves_like 'does not have the compliance framework'
end
context 'for a group' do
let(:object) { group }
it_behaves_like 'does not have the compliance framework'
end
end
describe 'enabled' do
before do
stub_licensed_features(compliance_framework: true)
end
context 'for a project' do
let(:object) { project }
it 'has the compliance framework' do
expect(json[:compliance_management_framework]['name']).to eq('SOX')
end
end
context 'for a project without a compliance framework' do
let(:object) { project_without_compliance_framework }
it_behaves_like 'does not have the compliance framework'
end
context 'for a group' do
let(:object) { group }
it_behaves_like 'does not have the compliance framework'
end
end
end
end
...@@ -47,16 +47,6 @@ module ExtractsPath ...@@ -47,16 +47,6 @@ module ExtractsPath
end end
# rubocop:enable Gitlab/ModuleWithInstanceVariables # rubocop:enable Gitlab/ModuleWithInstanceVariables
def lfs_blob_ids
blob_ids = tree.blobs.map(&:id)
# When current endpoint is a Blob then `tree.blobs` will be empty, it means we need to analyze
# the current Blob in order to determine if it's a LFS object
blob_ids = Array.wrap(@repo.blob_at(@commit.id, @path)&.id) if blob_ids.empty? # rubocop:disable Gitlab/ModuleWithInstanceVariables
@lfs_blob_ids = Gitlab::Git::Blob.batch_lfs_pointers(repository_container.repository, blob_ids).map(&:id) # rubocop:disable Gitlab/ModuleWithInstanceVariables
end
private private
# Override in controllers to determine which actions are subject to the redirect # Override in controllers to determine which actions are subject to the redirect
......
...@@ -24,10 +24,13 @@ module Gitlab ...@@ -24,10 +24,13 @@ module Gitlab
end end
def log_error!(extra_context = {}) def log_error!(extra_context = {})
error = LimitExceededError.new(message) ::Gitlab::ErrorTracking.log_exception(limit_exceeded_error, extra_context)
# TODO: change this to Gitlab::ErrorTracking.log_exception(error, extra_context) end
# https://gitlab.com/gitlab-org/gitlab/issues/32906
::Gitlab::ErrorTracking.track_exception(error, extra_context) protected
def limit_exceeded_error
LimitExceededError.new(message)
end end
end end
end end
......
...@@ -24,7 +24,7 @@ module Gitlab ...@@ -24,7 +24,7 @@ module Gitlab
name = :gitlab_ci_pipeline_size_builds name = :gitlab_ci_pipeline_size_builds
comment = 'Pipeline size' comment = 'Pipeline size'
labels = { source: nil } labels = { source: nil }
buckets = [0, 1, 5, 10, 20, 50, 100, 200, 500, 1000] buckets = [0, 1, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 3000]
::Gitlab::Metrics.histogram(name, comment, labels, buckets) ::Gitlab::Metrics.histogram(name, comment, labels, buckets)
end end
......
...@@ -14,8 +14,8 @@ module Gitlab ...@@ -14,8 +14,8 @@ module Gitlab
@observations = [] @observations = []
end end
def observe(migration, &block) def observe(version:, name:, &block)
observation = Observation.new(migration) observation = Observation.new(version, name)
observation.success = true observation.success = true
exception = nil exception = nil
......
...@@ -4,7 +4,8 @@ module Gitlab ...@@ -4,7 +4,8 @@ module Gitlab
module Database module Database
module Migrations module Migrations
Observation = Struct.new( Observation = Struct.new(
:migration, :version,
:name,
:walltime, :walltime,
:success, :success,
:total_database_size_change, :total_database_size_change,
......
...@@ -23,7 +23,7 @@ module Gitlab ...@@ -23,7 +23,7 @@ module Gitlab
end end
def record(observation) def record(observation)
File.rename(@file_path, File.join(Instrumentation::RESULT_DIR, "#{observation.migration}-query-details.json")) File.rename(@file_path, File.join(Instrumentation::RESULT_DIR, "#{observation.version}_#{observation.name}-query-details.json"))
end end
def record_sql_event(_name, started, finished, _unique_id, payload) def record_sql_event(_name, started, finished, _unique_id, payload)
......
...@@ -18,7 +18,7 @@ module Gitlab ...@@ -18,7 +18,7 @@ module Gitlab
end end
def record(observation) def record(observation)
File.rename(@log_file_path, File.join(Instrumentation::RESULT_DIR, "#{observation.migration}.log")) File.rename(@log_file_path, File.join(Instrumentation::RESULT_DIR, "#{observation.version}_#{observation.name}.log"))
end end
end end
end end
......
...@@ -36,14 +36,17 @@ module Gitlab ...@@ -36,14 +36,17 @@ module Gitlab
end end
end end
def default_project_namespace(slug) def default_project_namespace(environment_slug)
namespace_slug = "#{project.path}-#{project.id}".downcase maybe_environment_suffix = cluster.namespace_per_environment? ? "-#{environment_slug}" : ''
suffix = "-#{project.id}#{maybe_environment_suffix}"
if cluster.namespace_per_environment? namespace = project_path_slug(63 - suffix.length) + suffix
namespace_slug += "-#{slug}" Gitlab::NamespaceSanitizer.sanitize(namespace)
end end
Gitlab::NamespaceSanitizer.sanitize(namespace_slug) def project_path_slug(max_length)
Gitlab::NamespaceSanitizer
.sanitize(project.path.downcase)
.first(max_length)
end end
## ##
......
...@@ -220,7 +220,7 @@ namespace :gitlab do ...@@ -220,7 +220,7 @@ namespace :gitlab do
instrumentation = Gitlab::Database::Migrations::Instrumentation.new instrumentation = Gitlab::Database::Migrations::Instrumentation.new
pending_migrations.each do |migration| pending_migrations.each do |migration|
instrumentation.observe(migration.version) do instrumentation.observe(version: migration.version, name: migration.name) do
ActiveRecord::Migrator.new(:up, ctx.migrations, ctx.schema_migration, migration.version).run ActiveRecord::Migrator.new(:up, ctx.migrations, ctx.schema_migration, migration.version).run
end end
end end
......
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe LfsPointersFinder do
subject(:finder) { described_class.new(repository, path) }
let_it_be(:project) { create(:project, :repository) }
let_it_be(:repository) { project.repository }
let(:path) { nil }
describe '#execute' do
subject { finder.execute }
let(:expected_blob_id) { '0c304a93cb8430108629bbbcaa27db3343299bc0' }
context 'when path has no LFS files' do
it { is_expected.to eq([]) }
end
context 'when path points to LFS file' do
let(:path) { 'files/lfs/lfs_object.iso' }
it 'returns LFS blob ids' do
is_expected.to eq([expected_blob_id])
end
end
context 'when path points to directory with LFS files' do
let(:path) { 'files/lfs/' }
it 'returns LFS blob ids' do
is_expected.to eq([expected_blob_id])
end
end
context 'when repository is empty' do
let(:project) { create(:project, :empty_repo) }
it { is_expected.to eq([]) }
end
end
end
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::Members::EffectiveAccessLevelPerUserFinder, '#execute' do
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, group: group) }
let_it_be(:user) { create(:user) }
# The result set is being converted to json just for the ease of testing.
subject { described_class.new(project, user).execute.as_json }
context 'a combination of all possible avenues of membership' do
let_it_be(:another_user) { create(:user) }
let_it_be(:shared_with_group) { create(:group) }
before do
create(:project_group_link, :maintainer, project: project, group: shared_with_group)
create(:group_group_link, :reporter, shared_group: project.group, shared_with_group: shared_with_group)
shared_with_group.add_maintainer(user)
shared_with_group.add_maintainer(another_user)
group.add_guest(user)
group.add_guest(another_user)
project.add_developer(user)
project.add_developer(another_user)
end
it 'includes the highest access level from all avenues of memberships for the specific user alone' do
expect(subject).to eq(
[{
'user_id' => user.id,
'access_level' => Gitlab::Access::MAINTAINER, # From project_group_link
'id' => nil
}]
)
end
end
end
import { mount } from '@vue/test-utils'; import { mount } from '@vue/test-utils';
import Vue from 'vue'; import waitForPromises from 'helpers/wait_for_promises';
import GroupFolder from '~/groups/components/group_folder.vue'; import GroupFolder from '~/groups/components/group_folder.vue';
import GroupItem from '~/groups/components/group_item.vue'; import GroupItem from '~/groups/components/group_item.vue';
import ItemActions from '~/groups/components/item_actions.vue'; import ItemActions from '~/groups/components/item_actions.vue';
...@@ -22,8 +22,7 @@ describe('GroupItemComponent', () => { ...@@ -22,8 +22,7 @@ describe('GroupItemComponent', () => {
beforeEach(() => { beforeEach(() => {
wrapper = createComponent(); wrapper = createComponent();
return waitForPromises();
return Vue.nextTick();
}); });
afterEach(() => { afterEach(() => {
......
...@@ -3,16 +3,10 @@ import ResponsiveApp from '~/nav/components/responsive_app.vue'; ...@@ -3,16 +3,10 @@ import ResponsiveApp from '~/nav/components/responsive_app.vue';
import ResponsiveHeader from '~/nav/components/responsive_header.vue'; import ResponsiveHeader from '~/nav/components/responsive_header.vue';
import ResponsiveHome from '~/nav/components/responsive_home.vue'; import ResponsiveHome from '~/nav/components/responsive_home.vue';
import TopNavContainerView from '~/nav/components/top_nav_container_view.vue'; import TopNavContainerView from '~/nav/components/top_nav_container_view.vue';
import eventHub, { EVENT_RESPONSIVE_TOGGLE } from '~/nav/event_hub';
import { resetMenuItemsActive } from '~/nav/utils/reset_menu_items_active'; import { resetMenuItemsActive } from '~/nav/utils/reset_menu_items_active';
import KeepAliveSlots from '~/vue_shared/components/keep_alive_slots.vue'; import KeepAliveSlots from '~/vue_shared/components/keep_alive_slots.vue';
import { TEST_NAV_DATA } from '../mock_data'; import { TEST_NAV_DATA } from '../mock_data';
const HTML_HEADER_CONTENT = '<div class="header-content"></div>';
const HTML_MENU_EXPANDED = '<div class="menu-expanded"></div>';
const HTML_HEADER_WITH_MENU_EXPANDED =
'<div></div><div class="header-content menu-expanded"></div>';
describe('~/nav/components/responsive_app.vue', () => { describe('~/nav/components/responsive_app.vue', () => {
let wrapper; let wrapper;
...@@ -26,13 +20,10 @@ describe('~/nav/components/responsive_app.vue', () => { ...@@ -26,13 +20,10 @@ describe('~/nav/components/responsive_app.vue', () => {
}, },
}); });
}; };
const triggerResponsiveToggle = () => eventHub.$emit(EVENT_RESPONSIVE_TOGGLE);
const findHome = () => wrapper.findComponent(ResponsiveHome); const findHome = () => wrapper.findComponent(ResponsiveHome);
const findMobileOverlay = () => wrapper.find('[data-testid="mobile-overlay"]'); const findMobileOverlay = () => wrapper.find('[data-testid="mobile-overlay"]');
const findSubviewHeader = () => wrapper.findComponent(ResponsiveHeader); const findSubviewHeader = () => wrapper.findComponent(ResponsiveHeader);
const findSubviewContainer = () => wrapper.findComponent(TopNavContainerView); const findSubviewContainer = () => wrapper.findComponent(TopNavContainerView);
const hasBodyResponsiveOpen = () => document.body.classList.contains('top-nav-responsive-open');
const hasMobileOverlayVisible = () => findMobileOverlay().classes('mobile-nav-open'); const hasMobileOverlayVisible = () => findMobileOverlay().classes('mobile-nav-open');
beforeEach(() => { beforeEach(() => {
...@@ -57,23 +48,6 @@ describe('~/nav/components/responsive_app.vue', () => { ...@@ -57,23 +48,6 @@ describe('~/nav/components/responsive_app.vue', () => {
}); });
}); });
it.each`
bodyHtml | expectation
${''} | ${false}
${HTML_HEADER_CONTENT} | ${false}
${HTML_MENU_EXPANDED} | ${false}
${HTML_HEADER_WITH_MENU_EXPANDED} | ${true}
`(
'with responsive toggle event and html set to $bodyHtml, responsive open = $expectation',
({ bodyHtml, expectation }) => {
document.body.innerHTML = bodyHtml;
triggerResponsiveToggle();
expect(hasBodyResponsiveOpen()).toBe(expectation);
},
);
it.each` it.each`
events | expectation events | expectation
${[]} | ${false} ${[]} | ${false}
...@@ -96,17 +70,6 @@ describe('~/nav/components/responsive_app.vue', () => { ...@@ -96,17 +70,6 @@ describe('~/nav/components/responsive_app.vue', () => {
); );
}); });
describe('with menu expanded in body', () => {
beforeEach(() => {
document.body.innerHTML = HTML_HEADER_WITH_MENU_EXPANDED;
createComponent();
});
it('sets the body responsive open', () => {
expect(hasBodyResponsiveOpen()).toBe(true);
});
});
const projectsContainerProps = { const projectsContainerProps = {
containerClass: 'gl-px-3', containerClass: 'gl-px-3',
frequentItemsDropdownType: ResponsiveApp.FREQUENT_ITEMS_PROJECTS.namespace, frequentItemsDropdownType: ResponsiveApp.FREQUENT_ITEMS_PROJECTS.namespace,
...@@ -159,17 +122,4 @@ describe('~/nav/components/responsive_app.vue', () => { ...@@ -159,17 +122,4 @@ describe('~/nav/components/responsive_app.vue', () => {
}); });
}); });
}); });
describe('when destroyed', () => {
beforeEach(() => {
createComponent();
wrapper.destroy();
});
it('responsive toggle event does nothing', () => {
triggerResponsiveToggle();
expect(hasBodyResponsiveOpen()).toBe(false);
});
});
}); });
...@@ -213,20 +213,4 @@ RSpec.describe ExtractsPath do ...@@ -213,20 +213,4 @@ RSpec.describe ExtractsPath do
expect(extract_ref_without_atom('foo.atom')).to eq(nil) expect(extract_ref_without_atom('foo.atom')).to eq(nil)
end end
end end
describe '#lfs_blob_ids' do
let(:tag) { @project.repository.add_tag(@project.owner, 'my-annotated-tag', 'master', 'test tag') }
let(:ref) { tag.target }
let(:params) { { ref: ref, path: 'README.md' } }
before do
@project = create(:project, :repository)
end
it 'handles annotated tags' do
assign_ref_vars
expect(lfs_blob_ids).to eq([])
end
end
end end
...@@ -85,7 +85,7 @@ RSpec.describe ::Gitlab::Ci::Pipeline::Chain::Limit::Deployments do ...@@ -85,7 +85,7 @@ RSpec.describe ::Gitlab::Ci::Pipeline::Chain::Limit::Deployments do
end end
it 'logs the error' do it 'logs the error' do
expect(Gitlab::ErrorTracking).to receive(:track_exception).with( expect(Gitlab::ErrorTracking).to receive(:log_exception).with(
instance_of(Gitlab::Ci::Limit::LimitExceededError), instance_of(Gitlab::Ci::Limit::LimitExceededError),
project_id: project.id, plan: namespace.actual_plan_name project_id: project.id, plan: namespace.actual_plan_name
) )
......
...@@ -5,14 +5,15 @@ RSpec.describe Gitlab::Database::Migrations::Instrumentation do ...@@ -5,14 +5,15 @@ RSpec.describe Gitlab::Database::Migrations::Instrumentation do
describe '#observe' do describe '#observe' do
subject { described_class.new } subject { described_class.new }
let(:migration) { 1234 } let(:migration_name) { 'test' }
let(:migration_version) { '12345' }
it 'executes the given block' do it 'executes the given block' do
expect { |b| subject.observe(migration, &b) }.to yield_control expect { |b| subject.observe(version: migration_version, name: migration_name, &b) }.to yield_control
end end
context 'behavior with observers' do context 'behavior with observers' do
subject { described_class.new(observers).observe(migration) {} } subject { described_class.new(observers).observe(version: migration_version, name: migration_name) {} }
let(:observers) { [observer] } let(:observers) { [observer] }
let(:observer) { instance_double('Gitlab::Database::Migrations::Observers::MigrationObserver', before: nil, after: nil, record: nil) } let(:observer) { instance_double('Gitlab::Database::Migrations::Observers::MigrationObserver', before: nil, after: nil, record: nil) }
...@@ -21,7 +22,7 @@ RSpec.describe Gitlab::Database::Migrations::Instrumentation do ...@@ -21,7 +22,7 @@ RSpec.describe Gitlab::Database::Migrations::Instrumentation do
expect(observer).to receive(:before).ordered expect(observer).to receive(:before).ordered
expect(observer).to receive(:after).ordered expect(observer).to receive(:after).ordered
expect(observer).to receive(:record).ordered do |observation| expect(observer).to receive(:record).ordered do |observation|
expect(observation.migration).to eq(migration) expect(observation.version).to eq(migration_version)
end end
subject subject
...@@ -47,7 +48,7 @@ RSpec.describe Gitlab::Database::Migrations::Instrumentation do ...@@ -47,7 +48,7 @@ RSpec.describe Gitlab::Database::Migrations::Instrumentation do
end end
context 'on successful execution' do context 'on successful execution' do
subject { described_class.new.observe(migration) {} } subject { described_class.new.observe(version: migration_version, name: migration_name) {} }
it 'records walltime' do it 'records walltime' do
expect(subject.walltime).not_to be_nil expect(subject.walltime).not_to be_nil
...@@ -58,12 +59,16 @@ RSpec.describe Gitlab::Database::Migrations::Instrumentation do ...@@ -58,12 +59,16 @@ RSpec.describe Gitlab::Database::Migrations::Instrumentation do
end end
it 'records the migration version' do it 'records the migration version' do
expect(subject.migration).to eq(migration) expect(subject.version).to eq(migration_version)
end
it 'records the migration name' do
expect(subject.name).to eq(migration_name)
end end
end end
context 'upon failure' do context 'upon failure' do
subject { described_class.new.observe(migration) { raise 'something went wrong' } } subject { described_class.new.observe(version: migration_version, name: migration_name) { raise 'something went wrong' } }
it 'raises the exception' do it 'raises the exception' do
expect { subject }.to raise_error(/something went wrong/) expect { subject }.to raise_error(/something went wrong/)
...@@ -73,7 +78,7 @@ RSpec.describe Gitlab::Database::Migrations::Instrumentation do ...@@ -73,7 +78,7 @@ RSpec.describe Gitlab::Database::Migrations::Instrumentation do
subject { instance.observations.first } subject { instance.observations.first }
before do before do
instance.observe(migration) { raise 'something went wrong' } instance.observe(version: migration_version, name: migration_name) { raise 'something went wrong' }
rescue StandardError rescue StandardError
# ignore # ignore
end end
...@@ -89,7 +94,11 @@ RSpec.describe Gitlab::Database::Migrations::Instrumentation do ...@@ -89,7 +94,11 @@ RSpec.describe Gitlab::Database::Migrations::Instrumentation do
end end
it 'records the migration version' do it 'records the migration version' do
expect(subject.migration).to eq(migration) expect(subject.version).to eq(migration_version)
end
it 'records the migration name' do
expect(subject.name).to eq(migration_name)
end end
end end
end end
...@@ -101,8 +110,8 @@ RSpec.describe Gitlab::Database::Migrations::Instrumentation do ...@@ -101,8 +110,8 @@ RSpec.describe Gitlab::Database::Migrations::Instrumentation do
let(:migration2) { double('migration2', call: nil) } let(:migration2) { double('migration2', call: nil) }
it 'records observations for all migrations' do it 'records observations for all migrations' do
subject.observe('migration1') {} subject.observe(version: migration_version, name: migration_name) {}
subject.observe('migration2') { raise 'something went wrong' } rescue nil subject.observe(version: migration_version, name: migration_name) { raise 'something went wrong' } rescue nil
expect(subject.observations.size).to eq(2) expect(subject.observations.size).to eq(2)
end end
......
...@@ -4,14 +4,15 @@ require 'spec_helper' ...@@ -4,14 +4,15 @@ require 'spec_helper'
RSpec.describe Gitlab::Database::Migrations::Observers::QueryDetails do RSpec.describe Gitlab::Database::Migrations::Observers::QueryDetails do
subject { described_class.new } subject { described_class.new }
let(:observation) { Gitlab::Database::Migrations::Observation.new(migration) } let(:observation) { Gitlab::Database::Migrations::Observation.new(migration_version, migration_name) }
let(:connection) { ActiveRecord::Base.connection } let(:connection) { ActiveRecord::Base.connection }
let(:query) { "select date_trunc('day', $1::timestamptz) + $2 * (interval '1 hour')" } let(:query) { "select date_trunc('day', $1::timestamptz) + $2 * (interval '1 hour')" }
let(:query_binds) { [Time.current, 3] } let(:query_binds) { [Time.current, 3] }
let(:directory_path) { Dir.mktmpdir } let(:directory_path) { Dir.mktmpdir }
let(:log_file) { "#{directory_path}/#{migration}-query-details.json" } let(:log_file) { "#{directory_path}/#{migration_version}_#{migration_name}-query-details.json" }
let(:query_details) { Gitlab::Json.parse(File.read(log_file)) } let(:query_details) { Gitlab::Json.parse(File.read(log_file)) }
let(:migration) { 20210422152437 } let(:migration_version) { 20210422152437 }
let(:migration_name) { 'test' }
before do before do
stub_const('Gitlab::Database::Migrations::Instrumentation::RESULT_DIR', directory_path) stub_const('Gitlab::Database::Migrations::Instrumentation::RESULT_DIR', directory_path)
......
...@@ -4,12 +4,13 @@ require 'spec_helper' ...@@ -4,12 +4,13 @@ require 'spec_helper'
RSpec.describe Gitlab::Database::Migrations::Observers::QueryLog do RSpec.describe Gitlab::Database::Migrations::Observers::QueryLog do
subject { described_class.new } subject { described_class.new }
let(:observation) { Gitlab::Database::Migrations::Observation.new(migration) } let(:observation) { Gitlab::Database::Migrations::Observation.new(migration_version, migration_name) }
let(:connection) { ActiveRecord::Base.connection } let(:connection) { ActiveRecord::Base.connection }
let(:query) { 'select 1' } let(:query) { 'select 1' }
let(:directory_path) { Dir.mktmpdir } let(:directory_path) { Dir.mktmpdir }
let(:log_file) { "#{directory_path}/current.log" } let(:log_file) { "#{directory_path}/current.log" }
let(:migration) { 20210422152437 } let(:migration_version) { 20210422152437 }
let(:migration_name) { 'test' }
before do before do
stub_const('Gitlab::Database::Migrations::Instrumentation::RESULT_DIR', directory_path) stub_const('Gitlab::Database::Migrations::Instrumentation::RESULT_DIR', directory_path)
...@@ -22,7 +23,7 @@ RSpec.describe Gitlab::Database::Migrations::Observers::QueryLog do ...@@ -22,7 +23,7 @@ RSpec.describe Gitlab::Database::Migrations::Observers::QueryLog do
it 'writes a file with the query log' do it 'writes a file with the query log' do
observe observe
expect(File.read("#{directory_path}/#{migration}.log")).to include(query) expect(File.read("#{directory_path}/#{migration_version}_#{migration_name}.log")).to include(query)
end end
it 'does not change the default logger' do it 'does not change the default logger' do
......
...@@ -32,6 +32,14 @@ RSpec.describe Gitlab::Kubernetes::DefaultNamespace do ...@@ -32,6 +32,14 @@ RSpec.describe Gitlab::Kubernetes::DefaultNamespace do
subject { generator.from_environment_slug(environment.slug) } subject { generator.from_environment_slug(environment.slug) }
shared_examples_for 'handles very long project paths' do
before do
allow(project).to receive(:path).and_return 'x' * 100
end
it { is_expected.to satisfy { |s| s.length <= 63 } }
end
context 'namespace per environment is enabled' do context 'namespace per environment is enabled' do
context 'platform namespace is specified' do context 'platform namespace is specified' do
let(:platform_namespace) { 'platform-namespace' } let(:platform_namespace) { 'platform-namespace' }
...@@ -47,15 +55,12 @@ RSpec.describe Gitlab::Kubernetes::DefaultNamespace do ...@@ -47,15 +55,12 @@ RSpec.describe Gitlab::Kubernetes::DefaultNamespace do
context 'platform namespace is blank' do context 'platform namespace is blank' do
let(:platform_namespace) { nil } let(:platform_namespace) { nil }
let(:mock_namespace) { 'mock-namespace' }
it 'constructs a namespace from the project and environment' do it 'constructs a namespace from the project and environment slug' do
expect(Gitlab::NamespaceSanitizer).to receive(:sanitize) expect(subject).to eq "path-with-capitals-#{project.id}-#{environment.slug}"
.with("#{project.path}-#{project.id}-#{environment.slug}".downcase)
.and_return(mock_namespace)
expect(subject).to eq mock_namespace
end end
it_behaves_like 'handles very long project paths'
end end
end end
...@@ -70,15 +75,12 @@ RSpec.describe Gitlab::Kubernetes::DefaultNamespace do ...@@ -70,15 +75,12 @@ RSpec.describe Gitlab::Kubernetes::DefaultNamespace do
context 'platform namespace is blank' do context 'platform namespace is blank' do
let(:platform_namespace) { nil } let(:platform_namespace) { nil }
let(:mock_namespace) { 'mock-namespace' }
it 'constructs a namespace from the project and environment' do it 'constructs a namespace from just the project' do
expect(Gitlab::NamespaceSanitizer).to receive(:sanitize) expect(subject).to eq "path-with-capitals-#{project.id}"
.with("#{project.path}-#{project.id}".downcase)
.and_return(mock_namespace)
expect(subject).to eq mock_namespace
end end
it_behaves_like 'handles very long project paths'
end end
end end
end end
......
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe AuthorizedProjectUpdate::ProjectRecalculatePerUserService, '#execute' do
let_it_be(:project) { create(:project) }
let_it_be(:user) { create(:user) }
let_it_be(:another_user) { create(:user) }
subject(:execute) { described_class.new(project, user).execute }
it 'returns success' do
expect(execute.success?).to eq(true)
end
context 'when there are no changes to be made' do
it 'does not change authorizations' do
expect { execute }.not_to(change { ProjectAuthorization.count })
end
end
context 'when there are changes to be made' do
context 'when addition is required' do
before do
project.add_developer(user)
project.add_developer(another_user)
project.project_authorizations.where(user: [user, another_user]).delete_all
end
it 'adds a new authorization record for the specific user' do
expect { execute }.to(
change { project.project_authorizations.where(user: user).count }
.from(0).to(1)
)
end
it 'does not add a new authorization record for the other user' do
expect { execute }.not_to(
change { project.project_authorizations.where(user: another_user).count }
)
end
it 'adds a new authorization record with the correct access level for the specific user' do
execute
project_authorization = project.project_authorizations.where(
user: user,
access_level: Gitlab::Access::DEVELOPER
)
expect(project_authorization).to exist
end
end
context 'when removal is required' do
before do
create(:project_authorization, user: user, project: project)
create(:project_authorization, user: another_user, project: project)
end
it 'removes the authorization record for the specific user' do
expect { execute }.to(
change { project.project_authorizations.where(user: user).count }
.from(1).to(0)
)
end
it 'does not remove the authorization record for the other user' do
expect { execute }.not_to(
change { project.project_authorizations.where(user: another_user).count }
)
end
end
context 'when an update in access level is required' do
before do
project.add_developer(user)
project.add_developer(another_user)
project.project_authorizations.where(user: [user, another_user]).delete_all
create(:project_authorization, project: project, user: user, access_level: Gitlab::Access::GUEST)
create(:project_authorization, project: project, user: another_user, access_level: Gitlab::Access::GUEST)
end
it 'updates the authorization of the specific user to the correct access level' do
expect { execute }.to(
change { project.project_authorizations.find_by(user: user).access_level }
.from(Gitlab::Access::GUEST).to(Gitlab::Access::DEVELOPER)
)
end
it 'does not update the authorization of the other user to the correct access level' do
expect { execute }.not_to(
change { project.project_authorizations.find_by(user: another_user).access_level }
.from(Gitlab::Access::GUEST)
)
end
end
end
end
...@@ -276,8 +276,8 @@ RSpec.describe 'gitlab:db namespace rake task', :silence_stdout do ...@@ -276,8 +276,8 @@ RSpec.describe 'gitlab:db namespace rake task', :silence_stdout do
let(:ctx) { double('ctx', migrations: all_migrations, schema_migration: double, get_all_versions: existing_versions) } let(:ctx) { double('ctx', migrations: all_migrations, schema_migration: double, get_all_versions: existing_versions) }
let(:instrumentation) { instance_double(Gitlab::Database::Migrations::Instrumentation, observations: observations) } let(:instrumentation) { instance_double(Gitlab::Database::Migrations::Instrumentation, observations: observations) }
let(:existing_versions) { [1] } let(:existing_versions) { [1] }
let(:all_migrations) { [double('migration1', version: 1), pending_migration] } let(:all_migrations) { [double('migration1', version: 1, name: 'test'), pending_migration] }
let(:pending_migration) { double('migration2', version: 2) } let(:pending_migration) { double('migration2', version: 2, name: 'test') }
let(:filename) { Gitlab::Database::Migrations::Instrumentation::STATS_FILENAME } let(:filename) { Gitlab::Database::Migrations::Instrumentation::STATS_FILENAME }
let(:result_dir) { Dir.mktmpdir } let(:result_dir) { Dir.mktmpdir }
let(:observations) { %w[some data] } let(:observations) { %w[some data] }
...@@ -303,7 +303,7 @@ RSpec.describe 'gitlab:db namespace rake task', :silence_stdout do ...@@ -303,7 +303,7 @@ RSpec.describe 'gitlab:db namespace rake task', :silence_stdout do
end end
it 'instruments the pending migration' do it 'instruments the pending migration' do
expect(instrumentation).to receive(:observe).with(2).and_yield expect(instrumentation).to receive(:observe).with(version: 2, name: 'test').and_yield
subject subject
end end
......
...@@ -15,7 +15,6 @@ RSpec.describe 'projects/tree/show' do ...@@ -15,7 +15,6 @@ RSpec.describe 'projects/tree/show' do
before do before do
assign(:project, project) assign(:project, project)
assign(:repository, repository) assign(:repository, repository)
assign(:lfs_blob_ids, [])
allow(view).to receive(:can?).and_return(true) allow(view).to receive(:can?).and_return(true)
allow(view).to receive(:can_collaborate_with_project?).and_return(true) allow(view).to receive(:can_collaborate_with_project?).and_return(true)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment