Commit 9b3edb04 authored by Michelle Gill's avatar Michelle Gill

Merge branch 'master' into patch-63

parents a1916cc9 0953cc16
...@@ -59,7 +59,7 @@ export default { ...@@ -59,7 +59,7 @@ export default {
}, },
requestRefreshPipelineGraph() { requestRefreshPipelineGraph() {
// When an action is clicked // When an action is clicked
// (wether in the dropdown or in the main nodes, we refresh the big graph) // (whether in the dropdown or in the main nodes, we refresh the big graph)
this.mediator this.mediator
.refreshPipeline() .refreshPipeline()
.catch(() => flash(__('An error occurred while making the request.'))); .catch(() => flash(__('An error occurred while making the request.')));
......
# frozen_string_literal: true # frozen_string_literal: true
# Module to prepend into finders to specify wether or not the finder requires # Module to prepend into finders to specify whether or not the finder requires
# cross project access # cross project access
# #
# This module depends on the finder implementing the following methods: # This module depends on the finder implementing the following methods:
......
...@@ -23,7 +23,7 @@ module Ci ...@@ -23,7 +23,7 @@ module Ci
scope :active, -> { where(active: true) } scope :active, -> { where(active: true) }
scope :inactive, -> { where(active: false) } scope :inactive, -> { where(active: false) }
scope :preloaded, -> { preload(:owner, :project) } scope :preloaded, -> { preload(:owner, project: [:route]) }
accepts_nested_attributes_for :variables, allow_destroy: true accepts_nested_attributes_for :variables, allow_destroy: true
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
class PipelineScheduleWorker class PipelineScheduleWorker
include ApplicationWorker include ApplicationWorker
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext include CronjobQueue
feature_category :continuous_integration feature_category :continuous_integration
worker_resource_boundary :cpu worker_resource_boundary :cpu
...@@ -10,8 +10,10 @@ class PipelineScheduleWorker ...@@ -10,8 +10,10 @@ class PipelineScheduleWorker
def perform def perform
Ci::PipelineSchedule.runnable_schedules.preloaded.find_in_batches do |schedules| Ci::PipelineSchedule.runnable_schedules.preloaded.find_in_batches do |schedules|
schedules.each do |schedule| schedules.each do |schedule|
with_context(project: schedule.project, user: schedule.owner) do
Ci::PipelineScheduleService.new(schedule.project, schedule.owner).execute(schedule) Ci::PipelineScheduleService.new(schedule.project, schedule.owner).execute(schedule)
end end
end end
end end
end
end end
---
title: Revert rename services template to instance migration
merge_request: 24885
author:
type: fixed
# frozen_string_literal: true
class AddEsBulkConfig < ActiveRecord::Migration[6.0]
# Set this constant to true if this migration requires downtime.
DOWNTIME = false
def change
add_column :application_settings, :elasticsearch_max_bulk_size_mb, :smallint, null: false, default: 10
add_column :application_settings, :elasticsearch_max_bulk_concurrency, :smallint, null: false, default: 10
end
end
# frozen_string_literal: true
class RemoveInstanceFromServices < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
def up
return unless column_exists?(:services, :instance)
undo_rename_column_concurrently :services, :template, :instance
end
def down
# This migration should not be rolled back because it
# removes a column that got added in migrations that
# have been reverted in https://gitlab.com/gitlab-org/gitlab/-/merge_requests/24857
end
end
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
# #
# It's strongly recommended that you check this file into your version control system. # It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 2020_02_07_151640) do ActiveRecord::Schema.define(version: 2020_02_11_152410) do
# These are extensions that must be enabled in order to support this database # These are extensions that must be enabled in order to support this database
enable_extension "pg_trgm" enable_extension "pg_trgm"
...@@ -344,6 +344,8 @@ ActiveRecord::Schema.define(version: 2020_02_07_151640) do ...@@ -344,6 +344,8 @@ ActiveRecord::Schema.define(version: 2020_02_07_151640) do
t.boolean "updating_name_disabled_for_users", default: false, null: false t.boolean "updating_name_disabled_for_users", default: false, null: false
t.integer "instance_administrators_group_id" t.integer "instance_administrators_group_id"
t.integer "elasticsearch_indexed_field_length_limit", default: 0, null: false t.integer "elasticsearch_indexed_field_length_limit", default: 0, null: false
t.integer "elasticsearch_max_bulk_size_mb", limit: 2, default: 10, null: false
t.integer "elasticsearch_max_bulk_concurrency", limit: 2, default: 10, null: false
t.index ["custom_project_templates_group_id"], name: "index_application_settings_on_custom_project_templates_group_id" t.index ["custom_project_templates_group_id"], name: "index_application_settings_on_custom_project_templates_group_id"
t.index ["file_template_project_id"], name: "index_application_settings_on_file_template_project_id" t.index ["file_template_project_id"], name: "index_application_settings_on_file_template_project_id"
t.index ["instance_administration_project_id"], name: "index_applicationsettings_on_instance_administration_project_id" t.index ["instance_administration_project_id"], name: "index_applicationsettings_on_instance_administration_project_id"
......
...@@ -188,9 +188,9 @@ code readability and test output. ...@@ -188,9 +188,9 @@ code readability and test output.
### Better output in tests ### Better output in tests
When comparing expected and actual values in tests, use When comparing expected and actual values in tests, use
[testify/require.Equal](https://godoc.org/github.com/stretchr/testify/require#Equal), [`testify/require.Equal`](https://godoc.org/github.com/stretchr/testify/require#Equal),
[testify/require.EqualError](https://godoc.org/github.com/stretchr/testify/require#EqualError), [`testify/require.EqualError`](https://godoc.org/github.com/stretchr/testify/require#EqualError),
[testify/require.EqualValues](https://godoc.org/github.com/stretchr/testify/require#EqualValues), [`testify/require.EqualValues`](https://godoc.org/github.com/stretchr/testify/require#EqualValues),
and others to improve readability when comparing structs, errors, and others to improve readability when comparing structs, errors,
large portions of text, or JSON documents: large portions of text, or JSON documents:
......
...@@ -536,7 +536,7 @@ reset before each example, add the `:prometheus` tag to the Rspec test. ...@@ -536,7 +536,7 @@ reset before each example, add the `:prometheus` tag to the Rspec test.
### Matchers ### Matchers
Custom matchers should be created to clarify the intent and/or hide the Custom matchers should be created to clarify the intent and/or hide the
complexity of RSpec expectations.They should be placed under complexity of RSpec expectations. They should be placed under
`spec/support/matchers/`. Matchers can be placed in subfolder if they apply to `spec/support/matchers/`. Matchers can be placed in subfolder if they apply to
a certain type of specs only (e.g. features, requests etc.) but shouldn't be if a certain type of specs only (e.g. features, requests etc.) but shouldn't be if
they apply to multiple type of specs. they apply to multiple type of specs.
......
...@@ -151,6 +151,8 @@ The following Elasticsearch settings are available: ...@@ -151,6 +151,8 @@ The following Elasticsearch settings are available:
| `AWS Access Key` | The AWS access key. | | `AWS Access Key` | The AWS access key. |
| `AWS Secret Access Key` | The AWS secret access key. | | `AWS Secret Access Key` | The AWS secret access key. |
| `Maximum field length` | See [the explanation in instance limits.](../administration/instance_limits.md#maximum-field-length). | | `Maximum field length` | See [the explanation in instance limits.](../administration/instance_limits.md#maximum-field-length). |
| `Maximum bulk request size (MiB)` | Repository indexing uses the Elasticsearch bulk request API. This setting determines the maximum size of an individual bulk request during these operations. |
| `Bulk request concurrency` | Each repository indexing operation may submit bulk requests in parallel. This increases indexing performance, but fills the Elasticsearch bulk requests queue faster. |
### Limiting namespaces and projects ### Limiting namespaces and projects
......
...@@ -356,6 +356,44 @@ Note the following properties: ...@@ -356,6 +356,44 @@ Note the following properties:
![anomaly panel type](img/prometheus_dashboard_column_panel_type.png) ![anomaly panel type](img/prometheus_dashboard_column_panel_type.png)
##### Stacked column
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/30583) in GitLab 12.8.
To add a stacked column panel type to a dashboard, look at the following sample dashboard file:
```yaml
dashboard: 'Dashboard title'
priority: 1
panel_groups:
- group: 'Group Title'
priority: 5
panels:
- type: 'stacked-column'
title: "Stacked column"
y_label: "y label"
x_label: 'x label'
metrics:
- id: memory_1
query_range: 'memory_query'
label: "memory query 1"
unit: "count"
series_name: 'group 1'
- id: memory_2
query_range: 'memory_query_2'
label: "memory query 2"
unit: "count"
series_name: 'group 2'
```
![stacked column panel type](img/prometheus_dashboard_stacked_column_panel_type_v12_8.png)
| Property | Type | Required | Description |
| ------ | ------ | ------ | ------ |
| `type` | string | yes | Type of panel to be rendered. For stacked column panel types, set to `stacked-column` |
| `query_range` | yes | yes | For stacked column panel types, you must use a [range query](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries) |
##### Single Stat ##### Single Stat
To add a single stat panel type to a dashboard, look at the following sample dashboard file: To add a single stat panel type to a dashboard, look at the following sample dashboard file:
......
...@@ -147,6 +147,22 @@ reduce the number of approvals left for all rules that the approver belongs to. ...@@ -147,6 +147,22 @@ reduce the number of approvals left for all rules that the approver belongs to.
![Approvals premium merge request widget](img/approvals_premium_mr_widget_v12_7.png) ![Approvals premium merge request widget](img/approvals_premium_mr_widget_v12_7.png)
### Scoped to Protected Branch **(PREMIUM)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/460) in [GitLab Premium](https://about.gitlab.com/pricing/) 12.8.
Approval rules are often only relevant to specific branches, like `master`.
When configuring [**Default Approval Rules**](#adding--editing-a-default-approval-rule)
these can be scoped to all the protected branches at once by navigating to your project's
**Settings**, expanding **Merge request approvals**, and selecting **Any branch** from
the **Target branch** dropdown.
Alternatively, you can select a very specific protected branch from the **Target branch** dropdown:
![Scoped to Protected Branch](img/scoped_to_protected_branch_v12_8.png)
To enable this configuration, see [Code Owner’s approvals for protected branches](../protected_branches.md#protected-branches-approval-by-code-owners-premium).
## Adding or removing an approval ## Adding or removing an approval
When an [eligible approver](#eligible-approvers) visits an open merge request, When an [eligible approver](#eligible-approvers) visits an open merge request,
......
...@@ -26,6 +26,8 @@ module EE ...@@ -26,6 +26,8 @@ module EE
:elasticsearch_aws_region, :elasticsearch_aws_region,
:elasticsearch_aws_secret_access_key, :elasticsearch_aws_secret_access_key,
:elasticsearch_indexing, :elasticsearch_indexing,
:elasticsearch_max_bulk_concurrency,
:elasticsearch_max_bulk_size_mb,
:elasticsearch_replicas, :elasticsearch_replicas,
:elasticsearch_indexed_field_length_limit, :elasticsearch_indexed_field_length_limit,
:elasticsearch_search, :elasticsearch_search,
......
...@@ -34,14 +34,21 @@ module Elastic ...@@ -34,14 +34,21 @@ module Elastic
Elasticsearch::Model::Registry.add(self) if self.is_a?(Class) Elasticsearch::Model::Registry.add(self) if self.is_a?(Class)
if self < ActiveRecord::Base if self < ActiveRecord::Base
after_commit on: :create do after_commit :maintain_elasticsearch_create, on: :create
if Gitlab::CurrentSettings.elasticsearch_indexing? && self.searchable? after_commit :maintain_elasticsearch_update, on: :update
ElasticIndexerWorker.perform_async(:index, self.class.to_s, self.id, self.es_id) after_commit :maintain_elasticsearch_destroy, on: :destroy
end end
end end
after_commit on: :update do def maintain_elasticsearch_create
if Gitlab::CurrentSettings.elasticsearch_indexing? && self.searchable? return unless Gitlab::CurrentSettings.elasticsearch_indexing? && self.searchable?
ElasticIndexerWorker.perform_async(:index, self.class.to_s, self.id, self.es_id)
end
def maintain_elasticsearch_update
return unless Gitlab::CurrentSettings.elasticsearch_indexing? && self.searchable?
ElasticIndexerWorker.perform_async( ElasticIndexerWorker.perform_async(
:update, :update,
self.class.to_s, self.class.to_s,
...@@ -50,21 +57,14 @@ module Elastic ...@@ -50,21 +57,14 @@ module Elastic
changed_fields: self.previous_changes.keys changed_fields: self.previous_changes.keys
) )
end end
end
after_commit on: :destroy do def maintain_elasticsearch_destroy
if Gitlab::CurrentSettings.elasticsearch_indexing? && self.searchable? return unless Gitlab::CurrentSettings.elasticsearch_indexing? && self.searchable?
ElasticIndexerWorker.perform_async( ElasticIndexerWorker.perform_async(
:delete, :delete, self.class.to_s, self.id, self.es_id, es_parent: self.es_parent
self.class.to_s,
self.id,
self.es_id,
es_parent: self.es_parent
) )
end end
end
end
end
class_methods do class_methods do
def __elasticsearch__ def __elasticsearch__
......
...@@ -48,6 +48,14 @@ module EE ...@@ -48,6 +48,14 @@ module EE
presence: true, presence: true,
numericality: { only_integer: true, greater_than: 0 } numericality: { only_integer: true, greater_than: 0 }
validates :elasticsearch_max_bulk_size_mb,
presence: true,
numericality: { only_integer: true, greater_than: 0 }
validates :elasticsearch_max_bulk_concurrency,
presence: true,
numericality: { only_integer: true, greater_than: 0 }
validates :elasticsearch_url, validates :elasticsearch_url,
presence: { message: "can't be blank when indexing is enabled" }, presence: { message: "can't be blank when indexing is enabled" },
if: ->(setting) { setting.elasticsearch_indexing? } if: ->(setting) { setting.elasticsearch_indexing? }
...@@ -90,6 +98,8 @@ module EE ...@@ -90,6 +98,8 @@ module EE
elasticsearch_replicas: 1, elasticsearch_replicas: 1,
elasticsearch_shards: 5, elasticsearch_shards: 5,
elasticsearch_indexed_field_length_limit: 0, elasticsearch_indexed_field_length_limit: 0,
elasticsearch_max_bulk_size_bytes: 10.megabytes,
elasticsearch_max_bulk_concurrency: 10,
elasticsearch_url: ENV['ELASTIC_URL'] || 'http://localhost:9200', elasticsearch_url: ENV['ELASTIC_URL'] || 'http://localhost:9200',
email_additional_text: nil, email_additional_text: nil,
lock_memberships_to_ldap: false, lock_memberships_to_ldap: false,
...@@ -209,7 +219,9 @@ module EE ...@@ -209,7 +219,9 @@ module EE
aws: elasticsearch_aws, aws: elasticsearch_aws,
aws_access_key: elasticsearch_aws_access_key, aws_access_key: elasticsearch_aws_access_key,
aws_secret_access_key: elasticsearch_aws_secret_access_key, aws_secret_access_key: elasticsearch_aws_secret_access_key,
aws_region: elasticsearch_aws_region aws_region: elasticsearch_aws_region,
max_bulk_size_bytes: elasticsearch_max_bulk_size_mb.megabytes,
max_bulk_concurrency: elasticsearch_max_bulk_concurrency
} }
end end
......
...@@ -59,7 +59,7 @@ module EE ...@@ -59,7 +59,7 @@ module EE
validate :custom_project_templates_group_allowed, if: :custom_project_templates_group_id_changed? validate :custom_project_templates_group_allowed, if: :custom_project_templates_group_id_changed?
scope :aimed_for_deletion, -> (date) { joins(:deletion_schedule).where('group_deletion_schedules.marked_for_deletion_on <= ?', date) } scope :aimed_for_deletion, -> (date) { joins(:deletion_schedule).where('group_deletion_schedules.marked_for_deletion_on <= ?', date) }
scope :with_deletion_schedule, -> { preload(:deletion_schedule) } scope :with_deletion_schedule, -> { preload(deletion_schedule: :deleting_user) }
scope :where_group_links_with_provider, ->(provider) do scope :where_group_links_with_provider, ->(provider) do
joins(:ldap_group_links).where(ldap_group_links: { provider: provider }) joins(:ldap_group_links).where(ldap_group_links: { provider: provider })
......
...@@ -143,6 +143,7 @@ module EE ...@@ -143,6 +143,7 @@ module EE
scope :with_repos_templates, -> { where(namespace_id: ::Gitlab::CurrentSettings.current_application_settings.custom_project_templates_group_id) } scope :with_repos_templates, -> { where(namespace_id: ::Gitlab::CurrentSettings.current_application_settings.custom_project_templates_group_id) }
scope :with_groups_level_repos_templates, -> { joins("INNER JOIN namespaces ON projects.namespace_id = namespaces.custom_project_templates_group_id") } scope :with_groups_level_repos_templates, -> { joins("INNER JOIN namespaces ON projects.namespace_id = namespaces.custom_project_templates_group_id") }
scope :with_designs, -> { where(id: DesignManagement::Design.select(:project_id)) } scope :with_designs, -> { where(id: DesignManagement::Design.select(:project_id)) }
scope :with_deleting_user, -> { includes(:deleting_user) }
delegate :shared_runners_minutes, :shared_runners_seconds, :shared_runners_seconds_last_reset, delegate :shared_runners_minutes, :shared_runners_seconds, :shared_runners_seconds_last_reset,
to: :statistics, allow_nil: true to: :statistics, allow_nil: true
......
...@@ -63,6 +63,20 @@ ...@@ -63,6 +63,20 @@
.form-text.text-muted .form-text.text-muted
= _('If any indexed field exceeds this limit it will be truncated to this number of characters and the rest will not be indexed or searchable. This does not apply to repository and wiki indexing. Setting this to 0 means it is unlimited.') = _('If any indexed field exceeds this limit it will be truncated to this number of characters and the rest will not be indexed or searchable. This does not apply to repository and wiki indexing. Setting this to 0 means it is unlimited.')
.form-group
= f.label :elasticsearch_max_bulk_size_mb, _('Maximum bulk request size (MiB)'), class: 'label-bold'
= f.number_field :elasticsearch_max_bulk_size_mb, value: @application_setting.elasticsearch_max_bulk_size_mb, class: 'form-control'
.form-text.text-muted
= _('Maximum size of Elasticsearch bulk indexing requests.')
= _('This only applies to repository indexing operations.')
.form-group
= f.label :elasticsearch_max_bulk_concurrency, _('Bulk request concurrency'), class: 'label-bold'
= f.number_field :elasticsearch_max_bulk_concurrency, value: @application_setting.elasticsearch_max_bulk_concurrency, class: 'form-control'
.form-text.text-muted
= _('Maximum concurrency of Elasticsearch bulk requests per indexing operation.')
= _('This only applies to repository indexing operations.')
.sub-section .sub-section
%h4= _('Elasticsearch indexing restrictions') %h4= _('Elasticsearch indexing restrictions')
.form-group .form-group
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
class AdjournedGroupDeletionWorker class AdjournedGroupDeletionWorker
include ApplicationWorker include ApplicationWorker
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext include CronjobQueue
INTERVAL = 5.minutes.to_i INTERVAL = 5.minutes.to_i
...@@ -11,14 +11,16 @@ class AdjournedGroupDeletionWorker ...@@ -11,14 +11,16 @@ class AdjournedGroupDeletionWorker
def perform def perform
deletion_cutoff = Gitlab::CurrentSettings.deletion_adjourned_period.days.ago.to_date deletion_cutoff = Gitlab::CurrentSettings.deletion_adjourned_period.days.ago.to_date
Group.aimed_for_deletion(deletion_cutoff) Group.with_route.aimed_for_deletion(deletion_cutoff)
.with_deletion_schedule .with_deletion_schedule
.find_each(batch_size: 100) # rubocop: disable CodeReuse/ActiveRecord .find_each(batch_size: 100) # rubocop: disable CodeReuse/ActiveRecord
.with_index do |group, index| .with_index do |group, index|
deletion_schedule = group.deletion_schedule deletion_schedule = group.deletion_schedule
delay = index * INTERVAL delay = index * INTERVAL
with_context(namespace: group, user: deletion_schedule.deleting_user) do
GroupDestroyWorker.perform_in(delay, group.id, deletion_schedule.user_id) GroupDestroyWorker.perform_in(delay, group.id, deletion_schedule.user_id)
end end
end end
end
end end
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
class AdjournedProjectsDeletionCronWorker class AdjournedProjectsDeletionCronWorker
include ApplicationWorker include ApplicationWorker
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext include CronjobQueue
INTERVAL = 5.minutes.to_i INTERVAL = 5.minutes.to_i
...@@ -11,10 +11,12 @@ class AdjournedProjectsDeletionCronWorker ...@@ -11,10 +11,12 @@ class AdjournedProjectsDeletionCronWorker
def perform def perform
deletion_cutoff = Gitlab::CurrentSettings.deletion_adjourned_period.days.ago.to_date deletion_cutoff = Gitlab::CurrentSettings.deletion_adjourned_period.days.ago.to_date
Project.aimed_for_deletion(deletion_cutoff).find_each(batch_size: 100).with_index do |project, index| # rubocop: disable CodeReuse/ActiveRecord Project.with_route.with_deleting_user.aimed_for_deletion(deletion_cutoff).find_each(batch_size: 100).with_index do |project, index| # rubocop: disable CodeReuse/ActiveRecord
delay = index * INTERVAL delay = index * INTERVAL
with_context(project: project, user: project.deleting_user) do
AdjournedProjectDeletionWorker.perform_in(delay, project.id) AdjournedProjectDeletionWorker.perform_in(delay, project.id)
end end
end end
end
end end
---
title: Make elasticsearch bulk parameters configurable
merge_request: 24688
author:
type: added
---
title: Add trial field to namespaces API
merge_request: 24666
author:
type: added
...@@ -170,6 +170,9 @@ module EE ...@@ -170,6 +170,9 @@ module EE
expose :trial_ends_on, if: can_admin_namespace do |namespace, _| expose :trial_ends_on, if: can_admin_namespace do |namespace, _|
namespace.trial_ends_on namespace.trial_ends_on
end end
expose :trial, if: can_admin_namespace do |namespace, _|
namespace.trial?
end
end end
end end
......
...@@ -40,7 +40,7 @@ module EE ...@@ -40,7 +40,7 @@ module EE
optional :max_seats_used, type: Integer, default: 0, desc: 'The max number of active users detected in the last month' optional :max_seats_used, type: Integer, default: 0, desc: 'The max number of active users detected in the last month'
optional :plan_code, type: String, desc: 'The code of the purchased plan' optional :plan_code, type: String, desc: 'The code of the purchased plan'
optional :end_date, type: Date, desc: 'The date when subscription expires' optional :end_date, type: Date, desc: 'The date when subscription expires'
optional :trial, type: Grape::API::Boolean, desc: 'Wether the subscription is trial' optional :trial, type: Grape::API::Boolean, desc: 'Whether the subscription is trial'
optional :trial_ends_on, type: Date, desc: 'The date when the trial expires' optional :trial_ends_on, type: Date, desc: 'The date when the trial expires'
optional :trial_starts_on, type: Date, desc: 'The date when the trial starts' optional :trial_starts_on, type: Date, desc: 'The date when the trial starts'
end end
......
...@@ -69,6 +69,8 @@ describe 'Admin updates EE-only settings' do ...@@ -69,6 +69,8 @@ describe 'Admin updates EE-only settings' do
fill_in 'Number of Elasticsearch shards', with: '120' fill_in 'Number of Elasticsearch shards', with: '120'
fill_in 'Number of Elasticsearch replicas', with: '2' fill_in 'Number of Elasticsearch replicas', with: '2'
fill_in 'Maximum field length', with: '100000' fill_in 'Maximum field length', with: '100000'
fill_in 'Maximum bulk request size (MiB)', with: '17'
fill_in 'Bulk request concurrency', with: '23'
click_button 'Save changes' click_button 'Save changes'
end end
...@@ -79,6 +81,8 @@ describe 'Admin updates EE-only settings' do ...@@ -79,6 +81,8 @@ describe 'Admin updates EE-only settings' do
expect(current_settings.elasticsearch_shards).to eq(120) expect(current_settings.elasticsearch_shards).to eq(120)
expect(current_settings.elasticsearch_replicas).to eq(2) expect(current_settings.elasticsearch_replicas).to eq(2)
expect(current_settings.elasticsearch_indexed_field_length_limit).to eq(100000) expect(current_settings.elasticsearch_indexed_field_length_limit).to eq(100000)
expect(current_settings.elasticsearch_max_bulk_size_mb).to eq(17)
expect(current_settings.elasticsearch_max_bulk_concurrency).to eq(23)
expect(page).to have_content 'Application settings saved successfully' expect(page).to have_content 'Application settings saved successfully'
end end
end end
......
...@@ -47,6 +47,18 @@ describe ApplicationSetting do ...@@ -47,6 +47,18 @@ describe ApplicationSetting do
it { is_expected.not_to allow_value(1.1).for(:elasticsearch_indexed_field_length_limit) } it { is_expected.not_to allow_value(1.1).for(:elasticsearch_indexed_field_length_limit) }
it { is_expected.not_to allow_value(-1).for(:elasticsearch_indexed_field_length_limit) } it { is_expected.not_to allow_value(-1).for(:elasticsearch_indexed_field_length_limit) }
it { is_expected.to allow_value(25).for(:elasticsearch_max_bulk_size_mb) }
it { is_expected.not_to allow_value(nil).for(:elasticsearch_max_bulk_size_mb) }
it { is_expected.not_to allow_value(0).for(:elasticsearch_max_bulk_size_mb) }
it { is_expected.not_to allow_value(1.1).for(:elasticsearch_max_bulk_size_mb) }
it { is_expected.not_to allow_value(-1).for(:elasticsearch_max_bulk_size_mb) }
it { is_expected.to allow_value(2).for(:elasticsearch_max_bulk_concurrency) }
it { is_expected.not_to allow_value(nil).for(:elasticsearch_max_bulk_concurrency) }
it { is_expected.not_to allow_value(0).for(:elasticsearch_max_bulk_concurrency) }
it { is_expected.not_to allow_value(1.1).for(:elasticsearch_max_bulk_concurrency) }
it { is_expected.not_to allow_value(-1).for(:elasticsearch_max_bulk_concurrency) }
it { is_expected.to allow_value(nil).for(:required_instance_ci_template) } it { is_expected.to allow_value(nil).for(:required_instance_ci_template) }
it { is_expected.not_to allow_value("").for(:required_instance_ci_template) } it { is_expected.not_to allow_value("").for(:required_instance_ci_template) }
it { is_expected.not_to allow_value(" ").for(:required_instance_ci_template) } it { is_expected.not_to allow_value(" ").for(:required_instance_ci_template) }
...@@ -208,7 +220,9 @@ describe ApplicationSetting do ...@@ -208,7 +220,9 @@ describe ApplicationSetting do
elasticsearch_aws: false, elasticsearch_aws: false,
elasticsearch_aws_region: 'test-region', elasticsearch_aws_region: 'test-region',
elasticsearch_aws_access_key: 'test-access-key', elasticsearch_aws_access_key: 'test-access-key',
elasticsearch_aws_secret_access_key: 'test-secret-access-key' elasticsearch_aws_secret_access_key: 'test-secret-access-key',
elasticsearch_max_bulk_size_mb: 67,
elasticsearch_max_bulk_concurrency: 8
) )
expect(setting.elasticsearch_config).to eq( expect(setting.elasticsearch_config).to eq(
...@@ -216,7 +230,9 @@ describe ApplicationSetting do ...@@ -216,7 +230,9 @@ describe ApplicationSetting do
aws: false, aws: false,
aws_region: 'test-region', aws_region: 'test-region',
aws_access_key: 'test-access-key', aws_access_key: 'test-access-key',
aws_secret_access_key: 'test-secret-access-key' aws_secret_access_key: 'test-secret-access-key',
max_bulk_size_bytes: 67.megabytes,
max_bulk_concurrency: 8
) )
end end
......
...@@ -22,12 +22,12 @@ describe API::Namespaces do ...@@ -22,12 +22,12 @@ describe API::Namespaces do
expect(group_kind_json_response.keys).to contain_exactly('id', 'kind', 'name', 'path', 'full_path', expect(group_kind_json_response.keys).to contain_exactly('id', 'kind', 'name', 'path', 'full_path',
'parent_id', 'members_count_with_descendants', 'parent_id', 'members_count_with_descendants',
'plan', 'shared_runners_minutes_limit', 'plan', 'shared_runners_minutes_limit',
'avatar_url', 'web_url', 'trial_ends_on', 'avatar_url', 'web_url', 'trial_ends_on', 'trial',
'extra_shared_runners_minutes_limit', 'billable_members_count') 'extra_shared_runners_minutes_limit', 'billable_members_count')
expect(user_kind_json_response.keys).to contain_exactly('id', 'kind', 'name', 'path', 'full_path', expect(user_kind_json_response.keys).to contain_exactly('id', 'kind', 'name', 'path', 'full_path',
'parent_id', 'plan', 'shared_runners_minutes_limit', 'parent_id', 'plan', 'shared_runners_minutes_limit',
'avatar_url', 'web_url', 'trial_ends_on', 'avatar_url', 'web_url', 'trial_ends_on', 'trial',
'extra_shared_runners_minutes_limit', 'billable_members_count') 'extra_shared_runners_minutes_limit', 'billable_members_count')
end end
end end
...@@ -41,7 +41,7 @@ describe API::Namespaces do ...@@ -41,7 +41,7 @@ describe API::Namespaces do
owned_group_response = json_response.find { |resource| resource['id'] == group1.id } owned_group_response = json_response.find { |resource| resource['id'] == group1.id }
expect(owned_group_response.keys).to contain_exactly('id', 'kind', 'name', 'path', 'full_path', 'trial_ends_on', expect(owned_group_response.keys).to contain_exactly('id', 'kind', 'name', 'path', 'full_path', 'trial_ends_on',
'plan', 'parent_id', 'members_count_with_descendants', 'plan', 'parent_id', 'members_count_with_descendants', 'trial',
'avatar_url', 'web_url', 'billable_members_count') 'avatar_url', 'web_url', 'billable_members_count')
end end
......
...@@ -128,7 +128,7 @@ module Gitlab ...@@ -128,7 +128,7 @@ module Gitlab
def load_all_data!(repository) def load_all_data!(repository)
return if @data == '' # don't mess with submodule blobs return if @data == '' # don't mess with submodule blobs
# Even if we return early, recalculate wether this blob is binary in # Even if we return early, recalculate whether this blob is binary in
# case a blob was initialized as text but the full data isn't # case a blob was initialized as text but the full data isn't
@binary = nil @binary = nil
......
...@@ -340,7 +340,7 @@ start_gitlab() { ...@@ -340,7 +340,7 @@ start_gitlab() {
# Wait for the pids to be planted # Wait for the pids to be planted
wait_for_pids wait_for_pids
# Finally check the status to tell wether or not GitLab is running # Finally check the status to tell whether or not GitLab is running
print_status print_status
} }
......
...@@ -2999,6 +2999,9 @@ msgstr "" ...@@ -2999,6 +2999,9 @@ msgstr ""
msgid "Built-in" msgid "Built-in"
msgstr "" msgstr ""
msgid "Bulk request concurrency"
msgstr ""
msgid "Burndown chart" msgid "Burndown chart"
msgstr "" msgstr ""
...@@ -11728,9 +11731,15 @@ msgstr "" ...@@ -11728,9 +11731,15 @@ msgstr ""
msgid "Maximum attachment size (MB)" msgid "Maximum attachment size (MB)"
msgstr "" msgstr ""
msgid "Maximum bulk request size (MiB)"
msgstr ""
msgid "Maximum capacity" msgid "Maximum capacity"
msgstr "" msgstr ""
msgid "Maximum concurrency of Elasticsearch bulk requests per indexing operation."
msgstr ""
msgid "Maximum delay (Minutes)" msgid "Maximum delay (Minutes)"
msgstr "" msgstr ""
...@@ -11773,6 +11782,9 @@ msgstr "" ...@@ -11773,6 +11782,9 @@ msgstr ""
msgid "Maximum size limit for each repository." msgid "Maximum size limit for each repository."
msgstr "" msgstr ""
msgid "Maximum size of Elasticsearch bulk indexing requests."
msgstr ""
msgid "Maximum size of individual attachments in comments." msgid "Maximum size of individual attachments in comments."
msgstr "" msgstr ""
...@@ -19578,6 +19590,9 @@ msgstr "" ...@@ -19578,6 +19590,9 @@ msgstr ""
msgid "This namespace has already been taken! Please choose another one." msgid "This namespace has already been taken! Please choose another one."
msgstr "" msgstr ""
msgid "This only applies to repository indexing operations."
msgstr ""
msgid "This option is only available on GitLab.com" msgid "This option is only available on GitLab.com"
msgstr "" msgstr ""
......
...@@ -194,7 +194,7 @@ describe ProjectsHelper do ...@@ -194,7 +194,7 @@ describe ProjectsHelper do
expect(helper.project_list_cache_key(project).last).to start_with('v') expect(helper.project_list_cache_key(project).last).to start_with('v')
end end
it 'includes wether or not the user can read cross project' do it 'includes whether or not the user can read cross project' do
expect(helper.project_list_cache_key(project)).to include('cross-project:true') expect(helper.project_list_cache_key(project)).to include('cross-project:true')
end end
......
...@@ -80,9 +80,9 @@ describe Ci::PipelineSchedule do ...@@ -80,9 +80,9 @@ describe Ci::PipelineSchedule do
it 'preloads the associations' do it 'preloads the associations' do
subject subject
query = ActiveRecord::QueryRecorder.new { subject.each(&:project) } query = ActiveRecord::QueryRecorder.new { subject.map(&:project).each(&:route) }
expect(query.count).to eq(2) expect(query.count).to eq(3)
end end
end end
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment