Commit 9b3edb04 authored by Michelle Gill's avatar Michelle Gill

Merge branch 'master' into patch-63

parents a1916cc9 0953cc16
......@@ -59,7 +59,7 @@ export default {
},
requestRefreshPipelineGraph() {
// When an action is clicked
// (wether in the dropdown or in the main nodes, we refresh the big graph)
// (whether in the dropdown or in the main nodes, we refresh the big graph)
this.mediator
.refreshPipeline()
.catch(() => flash(__('An error occurred while making the request.')));
......
# frozen_string_literal: true
# Module to prepend into finders to specify wether or not the finder requires
# Module to prepend into finders to specify whether or not the finder requires
# cross project access
#
# This module depends on the finder implementing the following methods:
......
......@@ -23,7 +23,7 @@ module Ci
scope :active, -> { where(active: true) }
scope :inactive, -> { where(active: false) }
scope :preloaded, -> { preload(:owner, :project) }
scope :preloaded, -> { preload(:owner, project: [:route]) }
accepts_nested_attributes_for :variables, allow_destroy: true
......
......@@ -2,7 +2,7 @@
class PipelineScheduleWorker
include ApplicationWorker
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext
include CronjobQueue
feature_category :continuous_integration
worker_resource_boundary :cpu
......@@ -10,7 +10,9 @@ class PipelineScheduleWorker
def perform
Ci::PipelineSchedule.runnable_schedules.preloaded.find_in_batches do |schedules|
schedules.each do |schedule|
Ci::PipelineScheduleService.new(schedule.project, schedule.owner).execute(schedule)
with_context(project: schedule.project, user: schedule.owner) do
Ci::PipelineScheduleService.new(schedule.project, schedule.owner).execute(schedule)
end
end
end
end
......
---
title: Revert rename services template to instance migration
merge_request: 24885
author:
type: fixed
# frozen_string_literal: true
class AddEsBulkConfig < ActiveRecord::Migration[6.0]
# Set this constant to true if this migration requires downtime.
DOWNTIME = false
def change
add_column :application_settings, :elasticsearch_max_bulk_size_mb, :smallint, null: false, default: 10
add_column :application_settings, :elasticsearch_max_bulk_concurrency, :smallint, null: false, default: 10
end
end
# frozen_string_literal: true
class RemoveInstanceFromServices < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
def up
return unless column_exists?(:services, :instance)
undo_rename_column_concurrently :services, :template, :instance
end
def down
# This migration should not be rolled back because it
# removes a column that got added in migrations that
# have been reverted in https://gitlab.com/gitlab-org/gitlab/-/merge_requests/24857
end
end
......@@ -10,7 +10,7 @@
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 2020_02_07_151640) do
ActiveRecord::Schema.define(version: 2020_02_11_152410) do
# These are extensions that must be enabled in order to support this database
enable_extension "pg_trgm"
......@@ -344,6 +344,8 @@ ActiveRecord::Schema.define(version: 2020_02_07_151640) do
t.boolean "updating_name_disabled_for_users", default: false, null: false
t.integer "instance_administrators_group_id"
t.integer "elasticsearch_indexed_field_length_limit", default: 0, null: false
t.integer "elasticsearch_max_bulk_size_mb", limit: 2, default: 10, null: false
t.integer "elasticsearch_max_bulk_concurrency", limit: 2, default: 10, null: false
t.index ["custom_project_templates_group_id"], name: "index_application_settings_on_custom_project_templates_group_id"
t.index ["file_template_project_id"], name: "index_application_settings_on_file_template_project_id"
t.index ["instance_administration_project_id"], name: "index_applicationsettings_on_instance_administration_project_id"
......
......@@ -171,7 +171,7 @@ keys must be manually replicated to the **secondary** node.
sudo -i
```
1. Edit `/etc/gitlab/gitlab.rb` and add a **unique** name for your node. You will need this in the next steps:
1. Edit `/etc/gitlab/gitlab.rb` and add a **unique** name for your node. You will need this in the next steps:
```ruby
# The unique identifier for the Geo node.
......
......@@ -45,7 +45,7 @@ query.
## Can I `git push` to a **secondary** node?
Yes! Pushing directly to a **secondary** node (for both HTTP and SSH, including Git LFS) was [introduced](https://about.gitlab.com/releases/2018/09/22/gitlab-11-3-released/) in [GitLab Premium](https://about.gitlab.com/pricing/#self-managed) 11.3.
Yes! Pushing directly to a **secondary** node (for both HTTP and SSH, including Git LFS) was [introduced](https://about.gitlab.com/releases/2018/09/22/gitlab-11-3-released/) in [GitLab Premium](https://about.gitlab.com/pricing/#self-managed) 11.3.
## How long does it take to have a commit replicated to a **secondary** node?
......
......@@ -227,7 +227,7 @@ remember to run `sudo gitlab-ctl reconfigure` again before trying the
#### Gitaly
Next we will configure each Gitaly server assigned to Praefect. Configuration for these
Next we will configure each Gitaly server assigned to Praefect. Configuration for these
is the same as a normal standalone Gitaly server, except that we use storage names and
auth tokens from Praefect instead of GitLab.
......
......@@ -16,9 +16,9 @@ source and the target.**
## Target directory is empty: use a tar pipe
If the target directory `/mnt/gitlab/repositories` is empty the
simplest thing to do is to use a tar pipe. This method has low
simplest thing to do is to use a tar pipe. This method has low
overhead and tar is almost always already installed on your system.
However, it is not possible to resume an interrupted tar pipe: if
However, it is not possible to resume an interrupted tar pipe: if
that happens then all data must be copied again.
```shell
......@@ -82,7 +82,7 @@ repository at a time.
In addition to rsync we will use [GNU
Parallel](http://www.gnu.org/software/parallel/). This utility is
not included in GitLab so you need to install it yourself with apt
or yum. Also note that the GitLab scripts we used below were added
or yum. Also note that the GitLab scripts we used below were added
in GitLab 8.1.
** This process does not clean up repositories at the target location that no
......
......@@ -75,7 +75,7 @@ Otherwise, you can set the `GITLAB_UNICORN_MEMORY_MIN` and `GITLAB_UNICORN_MEMOR
This is what a Unicorn worker memory restart looks like in unicorn_stderr.log.
You see that worker 4 (PID 125918) is inspecting itself and decides to exit.
The threshold memory value was 254802235 bytes, about 250MB. With GitLab this
threshold is a random value between 200 and 250 MB. The master process (PID
threshold is a random value between 200 and 250 MB. The master process (PID
117565) then reaps the worker process and spawns a new 'worker 4' with PID
127549.
......
......@@ -166,7 +166,7 @@ GIT_CURL_VERBOSE=1 GIT_TRACE=1 git clone <repository>
# A single project
project = Project.find_by_full_path('PROJECT_PATH')
# All projects in a particular namespace. Can be a username, a group
# All projects in a particular namespace. Can be a username, a group
# ('gitlab-org'), or even include subgroups ('gitlab-org/distribution')
namespace = Namespace.find_by_full_path('NAMESPACE_PATH')
projects = namespace.all_projects
......@@ -997,7 +997,7 @@ gitlab_rails['env'] = {
}
```
Then `gitlab-ctl reconfigure; gitlab-ctl restart sidekiq`. The Sidekiq logs will now include additional data for troubleshooting.
Then `gitlab-ctl reconfigure; gitlab-ctl restart sidekiq`. The Sidekiq logs will now include additional data for troubleshooting.
### Sidekiq kill signals
......
......@@ -133,7 +133,7 @@ sudo !!
### Memory, Disk, & CPU usage
```shell
# disk space info. The '-h' gives the data in human-readable values
# disk space info. The '-h' gives the data in human-readable values
df -h
# size of each file/dir and its contents in the current dir
......@@ -186,7 +186,7 @@ Be aware that strace can have major impacts to system performance when it is run
### The Strace Parser tool
Our [strace-parser tool](https://gitlab.com/wchandler/strace-parser) can be used to
provide a high level summary of the `strace` output. It is similar to `strace -C`,
provide a high level summary of the `strace` output. It is similar to `strace -C`,
but provides much more detailed statistics.
MacOS and Linux binaries [are available](https://gitlab.com/gitlab-com/support/toolbox/strace-parser/-/tags),
......@@ -198,7 +198,7 @@ First run the tool with no arguments other than the strace output file name to g
a summary of the top processes sorted by time spent actively performing tasks. You
can also sort based on total time, # of syscalls made, PID #, and # of child processes
using the `-S` or `--sort` flag. The number of results defaults to 25 processes, but
can be changed using the `-c`/`--count` option. See `--help` for full details.
can be changed using the `-c`/`--count` option. See `--help` for full details.
```shell
$ ./strace-parser strace.txt
......
......@@ -105,7 +105,7 @@ ORDER BY pg_relation_size(indexrelname::regclass) desc;
```
This query outputs a list containing all indexes that are never used and sorts
them by indexes sizes in descending order. This query can be useful to
them by indexes sizes in descending order. This query can be useful to
determine if any previously indexes are useful after all. More information on
the meaning of the various columns can be found at
<https://www.postgresql.org/docs/current/monitoring-stats.html>.
......
......@@ -107,7 +107,7 @@ confidence in their solution will not have been reached.
Before the review, the author is requested to submit comments on the merge
request diff alerting the reviewer to anything important as well as for anything
that demands further explanation or attention. Examples of content that may
that demands further explanation or attention. Examples of content that may
warrant a comment could be:
- The addition of a linting rule (Rubocop, JS etc)
......@@ -181,8 +181,8 @@ vulnerabilities must be either empty or containing:
Maintainers should **never** dismiss vulnerabilities to "empty" the list,
without duly verifying them.
Note that certain Merge Requests may target a stable branch. These are rare
events. These types of Merge Requests cannot be merged by the Maintainer.
Note that certain Merge Requests may target a stable branch. These are rare
events. These types of Merge Requests cannot be merged by the Maintainer.
Instead these should be sent to the [Release Manager](https://about.gitlab.com/community/release-managers/).
## Best practices
......
......@@ -82,9 +82,9 @@ When you submit code to GitLab, we really want it to get merged, but there will
When maintainers are reading through a merge request they may request guidance from other maintainers. If merge request maintainers conclude that the code should not be merged, our reasons will be fully disclosed. If it has been decided that the code quality is not up to GitLab’s standards, the merge request maintainer will refer the author to our docs and code style guides, and provide some guidance.
Sometimes style guides will be followed but the code will lack structural integrity, or the maintainer will have reservations about the code’s overall quality. When there is a reservation the maintainer will inform the author and provide some guidance. The author may then choose to update the merge request. Once the merge request has been updated and reassigned to the maintainer, they will review the code again. Once the code has been resubmitted any number of times, the maintainer may choose to close the merge request with a summary of why it will not be merged, as well as some guidance. If the merge request is closed the maintainer will be open to discussion as to how to improve the code so it can be approved in the future.
Sometimes style guides will be followed but the code will lack structural integrity, or the maintainer will have reservations about the code’s overall quality. When there is a reservation the maintainer will inform the author and provide some guidance. The author may then choose to update the merge request. Once the merge request has been updated and reassigned to the maintainer, they will review the code again. Once the code has been resubmitted any number of times, the maintainer may choose to close the merge request with a summary of why it will not be merged, as well as some guidance. If the merge request is closed the maintainer will be open to discussion as to how to improve the code so it can be approved in the future.
GitLab will do its best to review community contributions as quickly as possible. Specially appointed developers review community contributions daily. You may take a look at the [team page](https://about.gitlab.com/company/team/) for the merge request coach who specializes in the type of code you have written and mention them in the merge request. For example, if you have written some JavaScript in your code then you should mention the frontend merge request coach. If your code has multiple disciplines you may mention multiple merge request coaches.
GitLab will do its best to review community contributions as quickly as possible. Specially appointed developers review community contributions daily. You may take a look at the [team page](https://about.gitlab.com/company/team/) for the merge request coach who specializes in the type of code you have written and mention them in the merge request. For example, if you have written some JavaScript in your code then you should mention the frontend merge request coach. If your code has multiple disciplines you may mention multiple merge request coaches.
GitLab receives a lot of community contributions, so if your code has not been reviewed within two days (excluding weekend and public holidays) of its initial submission feel free to re-mention the appropriate merge request coach.
......
......@@ -193,7 +193,7 @@ You might get an error such as
This is because you've exceeded the disk space threshold - it thinks you don't have enough disk space left, based on the default 95% threshold.
In addition, the `read_only_allow_delete` setting will be set to `true`. It will block indexing, `forcemerge`, etc
In addition, the `read_only_allow_delete` setting will be set to `true`. It will block indexing, `forcemerge`, etc
```
curl "http://localhost:9200/gitlab-development/_settings?pretty"
......
......@@ -85,15 +85,15 @@ browser's developer console while on any page within GitLab.
#### Important Considerations
- **Keep Entry Points Lite:**
Page-specific JavaScript entry points should be as lite as possible. These
Page-specific JavaScript entry points should be as lite as possible. These
files are exempt from unit tests, and should be used primarily for
instantiation and dependency injection of classes and methods that live in
modules outside of the entry point script. Just import, read the DOM,
modules outside of the entry point script. Just import, read the DOM,
instantiate, and nothing else.
- **Entry Points May Be Asynchronous:**
_DO NOT ASSUME_ that the DOM has been fully loaded and available when an
entry point script is run. If you require that some code be run after the
entry point script is run. If you require that some code be run after the
DOM has loaded, you should attach an event handler to the `DOMContentLoaded`
event with:
......@@ -113,7 +113,7 @@ browser's developer console while on any page within GitLab.
with a relative path (e.g. `import initMyWidget from './my_widget';`).
- If a class or module is _used by multiple routes_, place it within a
shared directory at the closest common parent directory for the entry
points that import it. For example, if `my_widget.js` is imported within
points that import it. For example, if `my_widget.js` is imported within
both `pages/widget/show/index.js` and `pages/widget/run/index.js`, then
place the module at `pages/widget/shared/my_widget.js` and import it with
a relative path if possible (e.g. `../shared/my_widget`).
......@@ -122,7 +122,7 @@ browser's developer console while on any page within GitLab.
For GitLab Enterprise Edition, page-specific entry points will override their
Community Edition counterparts with the same name, so if
`ee/app/assets/javascripts/pages/foo/bar/index.js` exists, it will take
precedence over `app/assets/javascripts/pages/foo/bar/index.js`. If you want
precedence over `app/assets/javascripts/pages/foo/bar/index.js`. If you want
to minimize duplicate code, you can import one entry point from the other.
This is not done automatically to allow for flexibility in overriding
functionality.
......@@ -131,7 +131,7 @@ browser's developer console while on any page within GitLab.
For any code that does not need to be run immediately upon page load, (e.g.
modals, dropdowns, and other behaviors that can be lazy-loaded), you can split
your module into asynchronous chunks with dynamic import statements. These
your module into asynchronous chunks with dynamic import statements. These
imports return a Promise which will be resolved once the script has loaded:
```javascript
......
......@@ -140,7 +140,7 @@ long we're still performing work.
GitHub has a rate limit of 5 000 API calls per hour. The number of requests
necessary to import a project is largely dominated by the number of unique users
involved in a project (e.g. issue authors). Other data such as issue pages
and comments typically only requires a few dozen requests to import. This is
and comments typically only requires a few dozen requests to import. This is
because we need the Email address of users in order to map them to GitLab users.
We handle this by doing the following:
......
......@@ -188,9 +188,9 @@ code readability and test output.
### Better output in tests
When comparing expected and actual values in tests, use
[testify/require.Equal](https://godoc.org/github.com/stretchr/testify/require#Equal),
[testify/require.EqualError](https://godoc.org/github.com/stretchr/testify/require#EqualError),
[testify/require.EqualValues](https://godoc.org/github.com/stretchr/testify/require#EqualValues),
[`testify/require.Equal`](https://godoc.org/github.com/stretchr/testify/require#Equal),
[`testify/require.EqualError`](https://godoc.org/github.com/stretchr/testify/require#EqualError),
[`testify/require.EqualValues`](https://godoc.org/github.com/stretchr/testify/require#EqualValues),
and others to improve readability when comparing structs, errors,
large portions of text, or JSON documents:
......
......@@ -129,7 +129,7 @@ importer progresses. Here's what to do:
## Multi-destination Logging
GitLab is transitioning from unstructured/plaintext logs to structured/JSON logs. During this transition period some logs will be recorded in multiple formats through multi-destination logging.
GitLab is transitioning from unstructured/plaintext logs to structured/JSON logs. During this transition period some logs will be recorded in multiple formats through multi-destination logging.
### How to use multi-destination logging
......
......@@ -267,7 +267,7 @@ end
Here the call to `disable_statement_timeout` will use the connection local to
the `with_multiple_threads` block, instead of re-using the global connection
pool. This ensures each thread has its own connection object, and won't time
pool. This ensures each thread has its own connection object, and won't time
out when trying to obtain one.
**NOTE:** PostgreSQL has a maximum amount of connections that it allows. This
......
......@@ -26,7 +26,7 @@ by [`Namespaces#with_statistics`](https://gitlab.com/gitlab-org/gitlab/blob/4ab5
Additionally, the pattern that is currently used to update the project statistics
(the callback) doesn't scale adequately. It is currently one of the largest
[database queries transactions on production](https://gitlab.com/gitlab-org/gitlab-foss/issues/62488)
that takes the most time overall. We can't add one more query to it as
that takes the most time overall. We can't add one more query to it as
it will increase the transaction's length.
Because of all of the above, we can't apply the same pattern to store
......
......@@ -103,7 +103,7 @@ You also can move around in the callstack with these commands:
## Short commands
When you use `binding.pry` instead of `byebug`, the short commands
like `s`, `n`, `f`, and `c` do not work. To reinstall them, add this
like `s`, `n`, `f`, and `c` do not work. To reinstall them, add this
to `~/.pryrc`:
```ruby
......
......@@ -3,7 +3,7 @@
> This doc refers to <https://gitlab.com/gitlab-org/gitlab/blob/master/app/models/concerns/reactive_caching.rb>.
The `ReactiveCaching` concern is used for fetching some data in the background and store it
in the Rails cache, keeping it up-to-date for as long as it is being requested. If the
in the Rails cache, keeping it up-to-date for as long as it is being requested. If the
data hasn't been requested for `reactive_cache_lifetime`, it will stop being refreshed,
and then be removed.
......
......@@ -21,4 +21,4 @@ The more of the following that are true, the more likely you should choose the f
## Consider a façade-first approach
The façade approach is not necessarily a final step. It can (and possibly *should*) be treated as the first step, where later iterations will accomplish the complete rename.
The façade approach is not necessarily a final step. It can (and possibly *should*) be treated as the first step, where later iterations will accomplish the complete rename.
......@@ -165,7 +165,7 @@ and secondaries are set up a bit differently:
For replicas, colocating is advantageous because it reduces network hops
and hence latency. However, for the primary, colocating is
disadvantageous because PgBouncer would become a single point of failure
and cause errors. When a failover occurs, one of two things could
and cause errors. When a failover occurs, one of two things could
happen:
- The primary disappears from the network.
......@@ -212,7 +212,7 @@ Redis process.
#### High availability/Risks
Single-core: Like PgBouncer, a single Redis process can only use one
core. It does not support multi-threading.
core. It does not support multi-threading.
Dumb secondaries: Redis secondaries (aka slaves) don't actually
handle any load. Unlike PostgreSQL secondaries, they don't even serve
......
......@@ -126,7 +126,7 @@ Note that unlike `Gitlab::Popen.popen`, `IO.popen` does not capture standard err
## Avoid user input at the start of path strings
Various methods for opening and reading files in Ruby can be used to read the
standard output of a process instead of a file. The following two commands do
standard output of a process instead of a file. The following two commands do
roughly the same:
```ruby
......@@ -138,7 +138,7 @@ The key is to open a 'file' whose name starts with a `|`.
Affected methods include Kernel#open, File::read, File::open, IO::open and IO::read.
You can protect against this behavior of 'open' and 'read' by ensuring that an
attacker cannot control the start of the filename string you are opening. For
attacker cannot control the start of the filename string you are opening. For
instance, the following is sufficient to protect against accidentally starting
a shell command with `|`:
......
......@@ -536,7 +536,7 @@ reset before each example, add the `:prometheus` tag to the Rspec test.
### Matchers
Custom matchers should be created to clarify the intent and/or hide the
complexity of RSpec expectations.They should be placed under
complexity of RSpec expectations. They should be placed under
`spec/support/matchers/`. Matchers can be placed in subfolder if they apply to
a certain type of specs only (e.g. features, requests etc.) but shouldn't be if
they apply to multiple type of specs.
......
......@@ -27,7 +27,7 @@ Runtime::Browser.visit(:gitlab, Some::Page)
### Clicks
When we perform a click within our tests, we expect something to occur. That something could be a component to now
When we perform a click within our tests, we expect something to occur. That something could be a component to now
appear on the webpage, or the test to navigate away from the page entirely.
Dynamic element validation is instituted when using
......@@ -57,7 +57,7 @@ Simply put, a required element is a visible HTML element that appears on a UI co
#### Application
Requiring elements is very easy. By adding `required: true` as a parameter to an `element`, you've now made it
Requiring elements is very easy. By adding `required: true` as a parameter to an `element`, you've now made it
a requirement that the element appear on the page upon navigation.
## Examples
......
......@@ -152,7 +152,7 @@ Things to note:
- The name of the element and the qa_selector must match and be snake_cased
- If the element appears on the page unconditionally, add `required: true` to the element. See
[Dynamic element validation](dynamic_element_validation.md)
- You may see `.qa-selector` classes in existing Page Objects. We should prefer the [`data-qa-selector`](#data-qa-selector-vs-qa-selector)
- You may see `.qa-selector` classes in existing Page Objects. We should prefer the [`data-qa-selector`](#data-qa-selector-vs-qa-selector)
method of definition over the `.qa-selector` CSS class
### `data-qa-selector` vs `.qa-selector`
......@@ -173,7 +173,7 @@ and we should prefer the `data-qa-selector` method of definition.
A common occurrence in automated testing is selecting a single "one-of-many" element.
In a list of several items, how do you differentiate what you are selecting on?
The most common workaround for this is via text matching. Instead, a better practice is
The most common workaround for this is via text matching. Instead, a better practice is
by matching on that specific element by a unique identifier, rather than by text.
We got around this by adding the `data-qa-*` extensible selection mechanism.
......
......@@ -59,7 +59,7 @@ project = table(:projects).create!(id: 1, name: 'gitlab1', path: 'gitlab1')
#### `migrate!`
Use the `migrate!` helper to run the migration that is under test. It will not only
Use the `migrate!` helper to run the migration that is under test. It will not only
run the migration, but will also bump the schema version in the `schema_migrations`
table. It is necessary because in the `after` hook we trigger the rest of
the migrations, and we need to know where to start. Example:
......
......@@ -151,6 +151,8 @@ The following Elasticsearch settings are available:
| `AWS Access Key` | The AWS access key. |
| `AWS Secret Access Key` | The AWS secret access key. |
| `Maximum field length` | See [the explanation in instance limits.](../administration/instance_limits.md#maximum-field-length). |
| `Maximum bulk request size (MiB)` | Repository indexing uses the Elasticsearch bulk request API. This setting determines the maximum size of an individual bulk request during these operations. |
| `Bulk request concurrency` | Each repository indexing operation may submit bulk requests in parallel. This increases indexing performance, but fills the Elasticsearch bulk requests queue faster. |
### Limiting namespaces and projects
......
......@@ -356,6 +356,44 @@ Note the following properties:
![anomaly panel type](img/prometheus_dashboard_column_panel_type.png)
##### Stacked column
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/30583) in GitLab 12.8.
To add a stacked column panel type to a dashboard, look at the following sample dashboard file:
```yaml
dashboard: 'Dashboard title'
priority: 1
panel_groups:
- group: 'Group Title'
priority: 5
panels:
- type: 'stacked-column'
title: "Stacked column"
y_label: "y label"
x_label: 'x label'
metrics:
- id: memory_1
query_range: 'memory_query'
label: "memory query 1"
unit: "count"
series_name: 'group 1'
- id: memory_2
query_range: 'memory_query_2'
label: "memory query 2"
unit: "count"
series_name: 'group 2'
```
![stacked column panel type](img/prometheus_dashboard_stacked_column_panel_type_v12_8.png)
| Property | Type | Required | Description |
| ------ | ------ | ------ | ------ |
| `type` | string | yes | Type of panel to be rendered. For stacked column panel types, set to `stacked-column` |
| `query_range` | yes | yes | For stacked column panel types, you must use a [range query](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries) |
##### Single Stat
To add a single stat panel type to a dashboard, look at the following sample dashboard file:
......
......@@ -147,6 +147,22 @@ reduce the number of approvals left for all rules that the approver belongs to.
![Approvals premium merge request widget](img/approvals_premium_mr_widget_v12_7.png)
### Scoped to Protected Branch **(PREMIUM)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/460) in [GitLab Premium](https://about.gitlab.com/pricing/) 12.8.
Approval rules are often only relevant to specific branches, like `master`.
When configuring [**Default Approval Rules**](#adding--editing-a-default-approval-rule)
these can be scoped to all the protected branches at once by navigating to your project's
**Settings**, expanding **Merge request approvals**, and selecting **Any branch** from
the **Target branch** dropdown.
Alternatively, you can select a very specific protected branch from the **Target branch** dropdown:
![Scoped to Protected Branch](img/scoped_to_protected_branch_v12_8.png)
To enable this configuration, see [Code Owner’s approvals for protected branches](../protected_branches.md#protected-branches-approval-by-code-owners-premium).
## Adding or removing an approval
When an [eligible approver](#eligible-approvers) visits an open merge request,
......
......@@ -26,6 +26,8 @@ module EE
:elasticsearch_aws_region,
:elasticsearch_aws_secret_access_key,
:elasticsearch_indexing,
:elasticsearch_max_bulk_concurrency,
:elasticsearch_max_bulk_size_mb,
:elasticsearch_replicas,
:elasticsearch_indexed_field_length_limit,
:elasticsearch_search,
......
......@@ -34,38 +34,38 @@ module Elastic
Elasticsearch::Model::Registry.add(self) if self.is_a?(Class)
if self < ActiveRecord::Base
after_commit on: :create do
if Gitlab::CurrentSettings.elasticsearch_indexing? && self.searchable?
ElasticIndexerWorker.perform_async(:index, self.class.to_s, self.id, self.es_id)
end
end
after_commit on: :update do
if Gitlab::CurrentSettings.elasticsearch_indexing? && self.searchable?
ElasticIndexerWorker.perform_async(
:update,
self.class.to_s,
self.id,
self.es_id,
changed_fields: self.previous_changes.keys
)
end
end
after_commit on: :destroy do
if Gitlab::CurrentSettings.elasticsearch_indexing? && self.searchable?
ElasticIndexerWorker.perform_async(
:delete,
self.class.to_s,
self.id,
self.es_id,
es_parent: self.es_parent
)
end
end
after_commit :maintain_elasticsearch_create, on: :create
after_commit :maintain_elasticsearch_update, on: :update
after_commit :maintain_elasticsearch_destroy, on: :destroy
end
end
def maintain_elasticsearch_create
return unless Gitlab::CurrentSettings.elasticsearch_indexing? && self.searchable?
ElasticIndexerWorker.perform_async(:index, self.class.to_s, self.id, self.es_id)
end
def maintain_elasticsearch_update
return unless Gitlab::CurrentSettings.elasticsearch_indexing? && self.searchable?
ElasticIndexerWorker.perform_async(
:update,
self.class.to_s,
self.id,
self.es_id,
changed_fields: self.previous_changes.keys
)
end
def maintain_elasticsearch_destroy
return unless Gitlab::CurrentSettings.elasticsearch_indexing? && self.searchable?
ElasticIndexerWorker.perform_async(
:delete, self.class.to_s, self.id, self.es_id, es_parent: self.es_parent
)
end
class_methods do
def __elasticsearch__
@__elasticsearch__ ||= ::Elastic::MultiVersionClassProxy.new(self)
......
......@@ -48,6 +48,14 @@ module EE
presence: true,
numericality: { only_integer: true, greater_than: 0 }
validates :elasticsearch_max_bulk_size_mb,
presence: true,
numericality: { only_integer: true, greater_than: 0 }
validates :elasticsearch_max_bulk_concurrency,
presence: true,
numericality: { only_integer: true, greater_than: 0 }
validates :elasticsearch_url,
presence: { message: "can't be blank when indexing is enabled" },
if: ->(setting) { setting.elasticsearch_indexing? }
......@@ -90,6 +98,8 @@ module EE
elasticsearch_replicas: 1,
elasticsearch_shards: 5,
elasticsearch_indexed_field_length_limit: 0,
elasticsearch_max_bulk_size_bytes: 10.megabytes,
elasticsearch_max_bulk_concurrency: 10,
elasticsearch_url: ENV['ELASTIC_URL'] || 'http://localhost:9200',
email_additional_text: nil,
lock_memberships_to_ldap: false,
......@@ -209,7 +219,9 @@ module EE
aws: elasticsearch_aws,
aws_access_key: elasticsearch_aws_access_key,
aws_secret_access_key: elasticsearch_aws_secret_access_key,
aws_region: elasticsearch_aws_region
aws_region: elasticsearch_aws_region,
max_bulk_size_bytes: elasticsearch_max_bulk_size_mb.megabytes,
max_bulk_concurrency: elasticsearch_max_bulk_concurrency
}
end
......
......@@ -59,7 +59,7 @@ module EE
validate :custom_project_templates_group_allowed, if: :custom_project_templates_group_id_changed?
scope :aimed_for_deletion, -> (date) { joins(:deletion_schedule).where('group_deletion_schedules.marked_for_deletion_on <= ?', date) }
scope :with_deletion_schedule, -> { preload(:deletion_schedule) }
scope :with_deletion_schedule, -> { preload(deletion_schedule: :deleting_user) }
scope :where_group_links_with_provider, ->(provider) do
joins(:ldap_group_links).where(ldap_group_links: { provider: provider })
......
......@@ -143,6 +143,7 @@ module EE
scope :with_repos_templates, -> { where(namespace_id: ::Gitlab::CurrentSettings.current_application_settings.custom_project_templates_group_id) }
scope :with_groups_level_repos_templates, -> { joins("INNER JOIN namespaces ON projects.namespace_id = namespaces.custom_project_templates_group_id") }
scope :with_designs, -> { where(id: DesignManagement::Design.select(:project_id)) }
scope :with_deleting_user, -> { includes(:deleting_user) }
delegate :shared_runners_minutes, :shared_runners_seconds, :shared_runners_seconds_last_reset,
to: :statistics, allow_nil: true
......
......@@ -63,6 +63,20 @@
.form-text.text-muted
= _('If any indexed field exceeds this limit it will be truncated to this number of characters and the rest will not be indexed or searchable. This does not apply to repository and wiki indexing. Setting this to 0 means it is unlimited.')
.form-group
= f.label :elasticsearch_max_bulk_size_mb, _('Maximum bulk request size (MiB)'), class: 'label-bold'
= f.number_field :elasticsearch_max_bulk_size_mb, value: @application_setting.elasticsearch_max_bulk_size_mb, class: 'form-control'
.form-text.text-muted
= _('Maximum size of Elasticsearch bulk indexing requests.')
= _('This only applies to repository indexing operations.')
.form-group
= f.label :elasticsearch_max_bulk_concurrency, _('Bulk request concurrency'), class: 'label-bold'
= f.number_field :elasticsearch_max_bulk_concurrency, value: @application_setting.elasticsearch_max_bulk_concurrency, class: 'form-control'
.form-text.text-muted
= _('Maximum concurrency of Elasticsearch bulk requests per indexing operation.')
= _('This only applies to repository indexing operations.')
.sub-section
%h4= _('Elasticsearch indexing restrictions')
.form-group
......
......@@ -2,7 +2,7 @@
class AdjournedGroupDeletionWorker
include ApplicationWorker
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext
include CronjobQueue
INTERVAL = 5.minutes.to_i
......@@ -11,14 +11,16 @@ class AdjournedGroupDeletionWorker
def perform
deletion_cutoff = Gitlab::CurrentSettings.deletion_adjourned_period.days.ago.to_date
Group.aimed_for_deletion(deletion_cutoff)
Group.with_route.aimed_for_deletion(deletion_cutoff)
.with_deletion_schedule
.find_each(batch_size: 100) # rubocop: disable CodeReuse/ActiveRecord
.with_index do |group, index|
deletion_schedule = group.deletion_schedule
delay = index * INTERVAL
GroupDestroyWorker.perform_in(delay, group.id, deletion_schedule.user_id)
with_context(namespace: group, user: deletion_schedule.deleting_user) do
GroupDestroyWorker.perform_in(delay, group.id, deletion_schedule.user_id)
end
end
end
end
......@@ -2,7 +2,7 @@
class AdjournedProjectsDeletionCronWorker
include ApplicationWorker
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext
include CronjobQueue
INTERVAL = 5.minutes.to_i
......@@ -11,10 +11,12 @@ class AdjournedProjectsDeletionCronWorker
def perform
deletion_cutoff = Gitlab::CurrentSettings.deletion_adjourned_period.days.ago.to_date
Project.aimed_for_deletion(deletion_cutoff).find_each(batch_size: 100).with_index do |project, index| # rubocop: disable CodeReuse/ActiveRecord
Project.with_route.with_deleting_user.aimed_for_deletion(deletion_cutoff).find_each(batch_size: 100).with_index do |project, index| # rubocop: disable CodeReuse/ActiveRecord
delay = index * INTERVAL
AdjournedProjectDeletionWorker.perform_in(delay, project.id)
with_context(project: project, user: project.deleting_user) do
AdjournedProjectDeletionWorker.perform_in(delay, project.id)
end
end
end
end
---
title: Make elasticsearch bulk parameters configurable
merge_request: 24688
author:
type: added
---
title: Add trial field to namespaces API
merge_request: 24666
author:
type: added
......@@ -170,6 +170,9 @@ module EE
expose :trial_ends_on, if: can_admin_namespace do |namespace, _|
namespace.trial_ends_on
end
expose :trial, if: can_admin_namespace do |namespace, _|
namespace.trial?
end
end
end
......
......@@ -40,7 +40,7 @@ module EE
optional :max_seats_used, type: Integer, default: 0, desc: 'The max number of active users detected in the last month'
optional :plan_code, type: String, desc: 'The code of the purchased plan'
optional :end_date, type: Date, desc: 'The date when subscription expires'
optional :trial, type: Grape::API::Boolean, desc: 'Wether the subscription is trial'
optional :trial, type: Grape::API::Boolean, desc: 'Whether the subscription is trial'
optional :trial_ends_on, type: Date, desc: 'The date when the trial expires'
optional :trial_starts_on, type: Date, desc: 'The date when the trial starts'
end
......
......@@ -69,6 +69,8 @@ describe 'Admin updates EE-only settings' do
fill_in 'Number of Elasticsearch shards', with: '120'
fill_in 'Number of Elasticsearch replicas', with: '2'
fill_in 'Maximum field length', with: '100000'
fill_in 'Maximum bulk request size (MiB)', with: '17'
fill_in 'Bulk request concurrency', with: '23'
click_button 'Save changes'
end
......@@ -79,6 +81,8 @@ describe 'Admin updates EE-only settings' do
expect(current_settings.elasticsearch_shards).to eq(120)
expect(current_settings.elasticsearch_replicas).to eq(2)
expect(current_settings.elasticsearch_indexed_field_length_limit).to eq(100000)
expect(current_settings.elasticsearch_max_bulk_size_mb).to eq(17)
expect(current_settings.elasticsearch_max_bulk_concurrency).to eq(23)
expect(page).to have_content 'Application settings saved successfully'
end
end
......
......@@ -47,6 +47,18 @@ describe ApplicationSetting do
it { is_expected.not_to allow_value(1.1).for(:elasticsearch_indexed_field_length_limit) }
it { is_expected.not_to allow_value(-1).for(:elasticsearch_indexed_field_length_limit) }
it { is_expected.to allow_value(25).for(:elasticsearch_max_bulk_size_mb) }
it { is_expected.not_to allow_value(nil).for(:elasticsearch_max_bulk_size_mb) }
it { is_expected.not_to allow_value(0).for(:elasticsearch_max_bulk_size_mb) }
it { is_expected.not_to allow_value(1.1).for(:elasticsearch_max_bulk_size_mb) }
it { is_expected.not_to allow_value(-1).for(:elasticsearch_max_bulk_size_mb) }
it { is_expected.to allow_value(2).for(:elasticsearch_max_bulk_concurrency) }
it { is_expected.not_to allow_value(nil).for(:elasticsearch_max_bulk_concurrency) }
it { is_expected.not_to allow_value(0).for(:elasticsearch_max_bulk_concurrency) }
it { is_expected.not_to allow_value(1.1).for(:elasticsearch_max_bulk_concurrency) }
it { is_expected.not_to allow_value(-1).for(:elasticsearch_max_bulk_concurrency) }
it { is_expected.to allow_value(nil).for(:required_instance_ci_template) }
it { is_expected.not_to allow_value("").for(:required_instance_ci_template) }
it { is_expected.not_to allow_value(" ").for(:required_instance_ci_template) }
......@@ -208,7 +220,9 @@ describe ApplicationSetting do
elasticsearch_aws: false,
elasticsearch_aws_region: 'test-region',
elasticsearch_aws_access_key: 'test-access-key',
elasticsearch_aws_secret_access_key: 'test-secret-access-key'
elasticsearch_aws_secret_access_key: 'test-secret-access-key',
elasticsearch_max_bulk_size_mb: 67,
elasticsearch_max_bulk_concurrency: 8
)
expect(setting.elasticsearch_config).to eq(
......@@ -216,7 +230,9 @@ describe ApplicationSetting do
aws: false,
aws_region: 'test-region',
aws_access_key: 'test-access-key',
aws_secret_access_key: 'test-secret-access-key'
aws_secret_access_key: 'test-secret-access-key',
max_bulk_size_bytes: 67.megabytes,
max_bulk_concurrency: 8
)
end
......
......@@ -22,12 +22,12 @@ describe API::Namespaces do
expect(group_kind_json_response.keys).to contain_exactly('id', 'kind', 'name', 'path', 'full_path',
'parent_id', 'members_count_with_descendants',
'plan', 'shared_runners_minutes_limit',
'avatar_url', 'web_url', 'trial_ends_on',
'avatar_url', 'web_url', 'trial_ends_on', 'trial',
'extra_shared_runners_minutes_limit', 'billable_members_count')
expect(user_kind_json_response.keys).to contain_exactly('id', 'kind', 'name', 'path', 'full_path',
'parent_id', 'plan', 'shared_runners_minutes_limit',
'avatar_url', 'web_url', 'trial_ends_on',
'avatar_url', 'web_url', 'trial_ends_on', 'trial',
'extra_shared_runners_minutes_limit', 'billable_members_count')
end
end
......@@ -41,7 +41,7 @@ describe API::Namespaces do
owned_group_response = json_response.find { |resource| resource['id'] == group1.id }
expect(owned_group_response.keys).to contain_exactly('id', 'kind', 'name', 'path', 'full_path', 'trial_ends_on',
'plan', 'parent_id', 'members_count_with_descendants',
'plan', 'parent_id', 'members_count_with_descendants', 'trial',
'avatar_url', 'web_url', 'billable_members_count')
end
......
......@@ -128,7 +128,7 @@ module Gitlab
def load_all_data!(repository)
return if @data == '' # don't mess with submodule blobs
# Even if we return early, recalculate wether this blob is binary in
# Even if we return early, recalculate whether this blob is binary in
# case a blob was initialized as text but the full data isn't
@binary = nil
......
......@@ -340,7 +340,7 @@ start_gitlab() {
# Wait for the pids to be planted
wait_for_pids
# Finally check the status to tell wether or not GitLab is running
# Finally check the status to tell whether or not GitLab is running
print_status
}
......
......@@ -2999,6 +2999,9 @@ msgstr ""
msgid "Built-in"
msgstr ""
msgid "Bulk request concurrency"
msgstr ""
msgid "Burndown chart"
msgstr ""
......@@ -11728,9 +11731,15 @@ msgstr ""
msgid "Maximum attachment size (MB)"
msgstr ""
msgid "Maximum bulk request size (MiB)"
msgstr ""
msgid "Maximum capacity"
msgstr ""
msgid "Maximum concurrency of Elasticsearch bulk requests per indexing operation."
msgstr ""
msgid "Maximum delay (Minutes)"
msgstr ""
......@@ -11773,6 +11782,9 @@ msgstr ""
msgid "Maximum size limit for each repository."
msgstr ""
msgid "Maximum size of Elasticsearch bulk indexing requests."
msgstr ""
msgid "Maximum size of individual attachments in comments."
msgstr ""
......@@ -19578,6 +19590,9 @@ msgstr ""
msgid "This namespace has already been taken! Please choose another one."
msgstr ""
msgid "This only applies to repository indexing operations."
msgstr ""
msgid "This option is only available on GitLab.com"
msgstr ""
......
......@@ -194,7 +194,7 @@ describe ProjectsHelper do
expect(helper.project_list_cache_key(project).last).to start_with('v')
end
it 'includes wether or not the user can read cross project' do
it 'includes whether or not the user can read cross project' do
expect(helper.project_list_cache_key(project)).to include('cross-project:true')
end
......
......@@ -80,9 +80,9 @@ describe Ci::PipelineSchedule do
it 'preloads the associations' do
subject
query = ActiveRecord::QueryRecorder.new { subject.each(&:project) }
query = ActiveRecord::QueryRecorder.new { subject.map(&:project).each(&:route) }
expect(query.count).to eq(2)
expect(query.count).to eq(3)
end
end
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment