Commit 852f4a85 authored by GitLab Bot's avatar GitLab Bot

Add latest changes from gitlab-org/gitlab@master

parent 82cd20ac
...@@ -315,7 +315,9 @@ module ApplicationSettingsHelper ...@@ -315,7 +315,9 @@ module ApplicationSettingsHelper
:push_event_hooks_limit, :push_event_hooks_limit,
:push_event_activities_limit, :push_event_activities_limit,
:custom_http_clone_url_root, :custom_http_clone_url_root,
:snippet_size_limit :snippet_size_limit,
:email_restrictions_enabled,
:email_restrictions
] ]
end end
......
...@@ -243,6 +243,8 @@ class ApplicationSetting < ApplicationRecord ...@@ -243,6 +243,8 @@ class ApplicationSetting < ApplicationRecord
validates :snippet_size_limit, numericality: { only_integer: true, greater_than: 0 } validates :snippet_size_limit, numericality: { only_integer: true, greater_than: 0 }
validate :email_restrictions_regex_valid?
SUPPORTED_KEY_TYPES.each do |type| SUPPORTED_KEY_TYPES.each do |type|
validates :"#{type}_key_restriction", presence: true, key_restriction: { type: type } validates :"#{type}_key_restriction", presence: true, key_restriction: { type: type }
end end
...@@ -381,6 +383,14 @@ class ApplicationSetting < ApplicationRecord ...@@ -381,6 +383,14 @@ class ApplicationSetting < ApplicationRecord
def recaptcha_or_login_protection_enabled def recaptcha_or_login_protection_enabled
recaptcha_enabled || login_recaptcha_protection_enabled recaptcha_enabled || login_recaptcha_protection_enabled
end end
def email_restrictions_regex_valid?
return if email_restrictions.blank?
Gitlab::UntrustedRegexp.new(email_restrictions)
rescue RegexpError
errors.add(:email_restrictions, _('is not a valid regular expression'))
end
end end
ApplicationSetting.prepend_if_ee('EE::ApplicationSetting') ApplicationSetting.prepend_if_ee('EE::ApplicationSetting')
...@@ -62,6 +62,8 @@ module ApplicationSettingImplementation ...@@ -62,6 +62,8 @@ module ApplicationSettingImplementation
eks_account_id: nil, eks_account_id: nil,
eks_access_key_id: nil, eks_access_key_id: nil,
eks_secret_access_key: nil, eks_secret_access_key: nil,
email_restrictions_enabled: false,
email_restrictions: nil,
first_day_of_week: 0, first_day_of_week: 0,
gitaly_timeout_default: 55, gitaly_timeout_default: 55,
gitaly_timeout_fast: 10, gitaly_timeout_fast: 10,
......
...@@ -189,6 +189,7 @@ class User < ApplicationRecord ...@@ -189,6 +189,7 @@ class User < ApplicationRecord
validate :owns_public_email, if: :public_email_changed? validate :owns_public_email, if: :public_email_changed?
validate :owns_commit_email, if: :commit_email_changed? validate :owns_commit_email, if: :commit_email_changed?
validate :signup_domain_valid?, on: :create, if: ->(user) { !user.created_by_id } validate :signup_domain_valid?, on: :create, if: ->(user) { !user.created_by_id }
validate :check_email_restrictions, on: :create, if: ->(user) { !user.created_by_id }
validates :theme_id, allow_nil: true, inclusion: { in: Gitlab::Themes.valid_ids, validates :theme_id, allow_nil: true, inclusion: { in: Gitlab::Themes.valid_ids,
message: _("%{placeholder} is not a valid theme") % { placeholder: '%{value}' } } message: _("%{placeholder} is not a valid theme") % { placeholder: '%{value}' } }
...@@ -1751,6 +1752,18 @@ class User < ApplicationRecord ...@@ -1751,6 +1752,18 @@ class User < ApplicationRecord
end end
end end
def check_email_restrictions
return unless Feature.enabled?(:email_restrictions)
return unless Gitlab::CurrentSettings.email_restrictions_enabled?
restrictions = Gitlab::CurrentSettings.email_restrictions
return if restrictions.blank?
if Gitlab::UntrustedRegexp.new(restrictions).match?(email)
errors.add(:email, _('is not allowed for sign-up'))
end
end
def self.unique_internal(scope, username, email_pattern, &block) def self.unique_internal(scope, username, email_pattern, &block)
scope.first || create_unique_internal(scope, username, email_pattern, &block) scope.first || create_unique_internal(scope, username, email_pattern, &block)
end end
......
...@@ -49,6 +49,20 @@ ...@@ -49,6 +49,20 @@
= f.label :domain_blacklist, 'Blacklisted domains for sign-ups', class: 'label-bold' = f.label :domain_blacklist, 'Blacklisted domains for sign-ups', class: 'label-bold'
= f.text_area :domain_blacklist_raw, placeholder: 'domain.com', class: 'form-control', rows: 8 = f.text_area :domain_blacklist_raw, placeholder: 'domain.com', class: 'form-control', rows: 8
.form-text.text-muted Users with e-mail addresses that match these domain(s) will NOT be able to sign-up. Wildcards allowed. Use separate lines for multiple entries. Ex: domain.com, *.domain.com .form-text.text-muted Users with e-mail addresses that match these domain(s) will NOT be able to sign-up. Wildcards allowed. Use separate lines for multiple entries. Ex: domain.com, *.domain.com
- if Feature.enabled?(:email_restrictions)
.form-group
= f.label :email_restrictions_enabled, _('Email restrictions'), class: 'label-bold'
.form-check
= f.check_box :email_restrictions_enabled, class: 'form-check-input'
= f.label :email_restrictions_enabled, class: 'form-check-label' do
= _('Enable email restrictions for sign ups')
.form-group
= f.label :email_restrictions, _('Email restrictions for sign-ups'), class: 'label-bold'
= f.text_area :email_restrictions, class: 'form-control', rows: 4
.form-text.text-muted
- supported_syntax_link_url = 'https://github.com/google/re2/wiki/Syntax'
- supported_syntax_link_start = '<a href="%{url}" target="_blank" rel="noopener noreferrer">'.html_safe % { url: supported_syntax_link_url }
= _('Restricts sign-ups for email addresses that match the given regex. See the %{supported_syntax_link_start}supported syntax%{supported_syntax_link_end} for more information.').html_safe % { supported_syntax_link_start: supported_syntax_link_start, supported_syntax_link_end: '</a>'.html_safe }
.form-group .form-group
= f.label :after_sign_up_text, class: 'label-bold' = f.label :after_sign_up_text, class: 'label-bold'
......
---
title: Add deploy tokens instance API endpoint
merge_request: 25066
author:
type: added
---
title: Add restrictions for signup email addresses
merge_request: 25122
author:
type: added
# frozen_string_literal: true
class AddEmailRestrictionsToApplicationSettings < ActiveRecord::Migration[6.0]
DOWNTIME = false
def up
add_column(:application_settings, :email_restrictions_enabled, :boolean, default: false, null: false)
add_column(:application_settings, :email_restrictions, :text, null: true)
end
def down
remove_column(:application_settings, :email_restrictions_enabled)
remove_column(:application_settings, :email_restrictions)
end
end
...@@ -349,6 +349,8 @@ ActiveRecord::Schema.define(version: 2020_02_13_220211) do ...@@ -349,6 +349,8 @@ ActiveRecord::Schema.define(version: 2020_02_13_220211) do
t.boolean "disable_overriding_approvers_per_merge_request", default: false, null: false t.boolean "disable_overriding_approvers_per_merge_request", default: false, null: false
t.boolean "prevent_merge_requests_author_approval", default: false, null: false t.boolean "prevent_merge_requests_author_approval", default: false, null: false
t.boolean "prevent_merge_requests_committers_approval", default: false, null: false t.boolean "prevent_merge_requests_committers_approval", default: false, null: false
t.boolean "email_restrictions_enabled", default: false, null: false
t.text "email_restrictions"
t.index ["custom_project_templates_group_id"], name: "index_application_settings_on_custom_project_templates_group_id" t.index ["custom_project_templates_group_id"], name: "index_application_settings_on_custom_project_templates_group_id"
t.index ["file_template_project_id"], name: "index_application_settings_on_file_template_project_id" t.index ["file_template_project_id"], name: "index_application_settings_on_file_template_project_id"
t.index ["instance_administration_project_id"], name: "index_applicationsettings_on_instance_administration_project_id" t.index ["instance_administration_project_id"], name: "index_applicationsettings_on_instance_administration_project_id"
......
# Deploy Tokens API
## List all deploy tokens
Get a list of all deploy tokens across all projects of the GitLab instance.
>**Note:**
> This endpoint requires admin access.
```
GET /deploy_tokens
```
```shell
curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/deploy_tokens"
```
Example response:
```json
[
{
"id": 1,
"name": "MyToken",
"username": "gitlab+deploy-token-1",
"expires_at": "2020-02-14T00:00:00.000Z",
"token": "jMRvtPNxrn3crTAGukpZ",
"scopes": [
"read_repository",
"read_registry"
]
}
]
```
...@@ -33,7 +33,7 @@ to original issue and epic. ...@@ -33,7 +33,7 @@ to original issue and epic.
and those maintaining a GitLab setup. and those maintaining a GitLab setup.
Any change submitted can have an impact not only on the application itself but Any change submitted can have an impact not only on the application itself but
also those maintaining it and those keeping it up and running (e.g. production also those maintaining it and those keeping it up and running (for example, production
engineers). As a result you should think carefully about the impact of your engineers). As a result you should think carefully about the impact of your
merge request on not only the application but also on the people keeping it up merge request on not only the application but also on the people keeping it up
and running. and running.
...@@ -85,34 +85,34 @@ the following: ...@@ -85,34 +85,34 @@ the following:
1. Is there something that we can do differently to not process such a 1. Is there something that we can do differently to not process such a
big data set? big data set?
1. Should we build some fail-safe mechanism to contain 1. Should we build some fail-safe mechanism to contain
computational complexity? Usually it is better to degrade computational complexity? Usually it's better to degrade
the service for a single user instead of all users. the service for a single user instead of all users.
## Query plans and database structure ## Query plans and database structure
The query plan can answer the questions whether we need additional The query plan can tell us if we will need additional
indexes, or whether we perform expensive filtering (i.e. using sequential scans). indexes, or expensive filtering (such as using sequential scans).
Each query plan should be run against substantial size of data set. Each query plan should be run against substantial size of data set.
For example if you look for issues with specific conditions, For example, if you look for issues with specific conditions,
you should consider validating the query against you should consider validating a query against
a small number (a few hundred) and a big number (100_000) of issues. a small number (a few hundred) and a big number (100_000) of issues.
See how the query will behave if the result will be a few See how the query will behave if the result will be a few
and a few thousand. and a few thousand.
This is needed as we have users using GitLab for very big projects and This is needed as we have users using GitLab for very big projects and
in a very unconventional way. Even, if it seems that it is unlikely in a very unconventional way. Even if it seems that it's unlikely
that such big data set will be used, it is still plausible that one that such a big data set will be used, it's still plausible that one
of our customers will have the problem with the feature. of our customers will encounter a problem with the feature.
Understanding ahead of time how it is going to behave at scale even if we accept it, Understanding ahead of time how it's going to behave at scale, even if we accept it,
is the desired outcome. We should always have a plan or understanding what it takes is the desired outcome. We should always have a plan or understanding of what it will take
to optimise feature to the magnitude of higher usage patterns. to optimize the feature for higher usage patterns.
Every database structure should be optimised and sometimes even over-described Every database structure should be optimized and sometimes even over-described
to be prepared to be easily extended. The hardest part after some point is in preparation for easy extension. The hardest part after some point is
data migration. Migrating millions of rows will always be troublesome and data migration. Migrating millions of rows will always be troublesome and
can have negative impact on application. can have a negative impact on the application.
To better understand how to get help with the query plan reviews To better understand how to get help with the query plan reviews
read this section on [how to prepare the merge request for a database review](https://docs.gitlab.com/ee/development/database_review.html#how-to-prepare-the-merge-request-for-a-database-review). read this section on [how to prepare the merge request for a database review](https://docs.gitlab.com/ee/development/database_review.html#how-to-prepare-the-merge-request-for-a-database-review).
...@@ -167,14 +167,14 @@ be clearly mentioned in the merge request description. ...@@ -167,14 +167,14 @@ be clearly mentioned in the merge request description.
## Batch process ## Batch process
**Summary:** Iterating a single process to external services (e.g. PostgreSQL, Redis, Object Storage, etc) **Summary:** Iterating a single process to external services (for example, PostgreSQL, Redis, Object Storage)
should be executed in a **batch-style** in order to reduce connection overheads. should be executed in a **batch-style** in order to reduce connection overheads.
For fetching rows from various tables in a batch-style, please see [Eager Loading](#eager-loading) section. For fetching rows from various tables in a batch-style, please see [Eager Loading](#eager-loading) section.
### Example: Delete multiple files from Object Storage ### Example: Delete multiple files from Object Storage
When you delete multiple files from object storage (e.g. GCS), When you delete multiple files from object storage, like GCS,
executing a single REST API call multiple times is a quite expensive executing a single REST API call multiple times is a quite expensive
process. Ideally, this should be done in a batch-style, for example, S3 provides process. Ideally, this should be done in a batch-style, for example, S3 provides
[batch deletion API](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html), [batch deletion API](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html),
...@@ -187,12 +187,12 @@ in a batch style. ...@@ -187,12 +187,12 @@ in a batch style.
## Timeout ## Timeout
**Summary:** You should set a reasonable timeout when the system invokes HTTP calls **Summary:** You should set a reasonable timeout when the system invokes HTTP calls
to external services (e.g. Kubernetes), and it should be executed in Sidekiq, not to external services (such as Kubernetes), and it should be executed in Sidekiq, not
in Puma/Unicorn threads. in Puma/Unicorn threads.
Often, GitLab needs to communicate with an external service such as Kubernetes Often, GitLab needs to communicate with an external service such as Kubernetes
clusters. In this case, it's hard to estimate when the external service finishes clusters. In this case, it's hard to estimate when the external service finishes
the requested process, for example, if it's a user-owned cluster that is inactive for some reason, the requested process, for example, if it's a user-owned cluster that's inactive for some reason,
GitLab might wait for the response forever ([Example](https://gitlab.com/gitlab-org/gitlab/issues/31475)). GitLab might wait for the response forever ([Example](https://gitlab.com/gitlab-org/gitlab/issues/31475)).
This could result in Puma/Unicorn timeout and should be avoided at all cost. This could result in Puma/Unicorn timeout and should be avoided at all cost.
...@@ -203,7 +203,7 @@ Using [`ReactiveCaching`](https://docs.gitlab.com/ee/development/utilities.html# ...@@ -203,7 +203,7 @@ Using [`ReactiveCaching`](https://docs.gitlab.com/ee/development/utilities.html#
## Keep database transaction minimal ## Keep database transaction minimal
**Summary:** You should avoid accessing to external services (e.g. Gitaly) during database **Summary:** You should avoid accessing to external services like Gitaly during database
transactions, otherwise it leads to severe contention problems transactions, otherwise it leads to severe contention problems
as an open transaction basically blocks the release of a Postgres backend connection. as an open transaction basically blocks the release of a Postgres backend connection.
...@@ -247,14 +247,14 @@ necessary. ...@@ -247,14 +247,14 @@ necessary.
A merge request must not increase the memory usage of GitLab by more than the A merge request must not increase the memory usage of GitLab by more than the
absolute bare minimum required by the code. This means that if you have to parse absolute bare minimum required by the code. This means that if you have to parse
some large document (e.g. an HTML document) it's best to parse it as a stream some large document (for example, an HTML document) it's best to parse it as a stream
whenever possible, instead of loading the entire input into memory. Sometimes whenever possible, instead of loading the entire input into memory. Sometimes
this isn't possible, in that case this should be stated explicitly in the merge this isn't possible, in that case this should be stated explicitly in the merge
request. request.
## Lazy Rendering of UI Elements ## Lazy Rendering of UI Elements
**Summary:** only render UI elements when they're actually needed. **Summary:** only render UI elements when they are actually needed.
Certain UI elements may not always be needed. For example, when hovering over a Certain UI elements may not always be needed. For example, when hovering over a
diff line there's a small icon displayed that can be used to create a new diff line there's a small icon displayed that can be used to create a new
...@@ -284,7 +284,7 @@ data should be cached for a certain time period instead of the duration of the ...@@ -284,7 +284,7 @@ data should be cached for a certain time period instead of the duration of the
transaction. transaction.
For example, say you process multiple snippets of text containing username For example, say you process multiple snippets of text containing username
mentions (e.g. `Hello @alice` and `How are you doing @alice?`). By caching the mentions (for example, `Hello @alice` and `How are you doing @alice?`). By caching the
user objects for every username we can remove the need for running the same user objects for every username we can remove the need for running the same
query for every mention of `@alice`. query for every mention of `@alice`.
...@@ -304,7 +304,7 @@ The main styles of pagination are: ...@@ -304,7 +304,7 @@ The main styles of pagination are:
and the total number of pages. This style is well supported by all components of GitLab. and the total number of pages. This style is well supported by all components of GitLab.
1. Offset-based pagination, but without the count: user goes to a specific page, like 1. 1. Offset-based pagination, but without the count: user goes to a specific page, like 1.
User sees only the next page number, but does not see the total amount of pages. User sees only the next page number, but does not see the total amount of pages.
1. Next page using keyset-based pagination: user can only go to next page, as we do not know how many pages 1. Next page using keyset-based pagination: user can only go to next page, as we don't know how many pages
are available. are available.
1. Infinite scrolling pagination: user scrolls the page and next items are loaded asynchronously. This is ideal, 1. Infinite scrolling pagination: user scrolls the page and next items are loaded asynchronously. This is ideal,
as it has exact same benefits as the previous one. as it has exact same benefits as the previous one.
...@@ -316,20 +316,20 @@ can follow the progress looking at [API: Keyset Pagination ...@@ -316,20 +316,20 @@ can follow the progress looking at [API: Keyset Pagination
Take into consideration the following when choosing a pagination strategy: Take into consideration the following when choosing a pagination strategy:
1. It is very inefficient to calculate amount of objects that pass the filtering, 1. It's very inefficient to calculate amount of objects that pass the filtering,
this operation usually can take seconds, and can time out, this operation usually can take seconds, and can time out,
1. It is very inefficient to get entries for page at higher ordinals, like 1000. 1. It's very inefficient to get entries for page at higher ordinals, like 1000.
The database has to sort and iterate all previous items, and this operation usually The database has to sort and iterate all previous items, and this operation usually
can result in substantial load put on database. can result in substantial load put on database.
## Badge counters ## Badge counters
Counters should always be truncated. It means that we do not want to present Counters should always be truncated. It means that we don't want to present
the exact number over some threshold. The reason for that is for the cases where we want the exact number over some threshold. The reason for that is for the cases where we want
to calculate exact number of items, we effectively need to filter each of them for to calculate exact number of items, we effectively need to filter each of them for
the purpose of knowing the exact number of items matching. the purpose of knowing the exact number of items matching.
From ~UX perspective it is often acceptable to see that you have over 1000+ pipelines, From ~UX perspective it's often acceptable to see that you have over 1000+ pipelines,
instead of that you have 40000+ pipelines, but at a tradeoff of loading page for 2s longer. instead of that you have 40000+ pipelines, but at a tradeoff of loading page for 2s longer.
An example of this pattern is the list of pipelines and jobs. We truncate numbers to `1000+`, An example of this pattern is the list of pipelines and jobs. We truncate numbers to `1000+`,
...@@ -338,7 +338,7 @@ but we show an accurate number of running pipelines, which is the most interesti ...@@ -338,7 +338,7 @@ but we show an accurate number of running pipelines, which is the most interesti
There's a helper method that can be used for that purpose - `NumbersHelper.limited_counter_with_delimiter` - There's a helper method that can be used for that purpose - `NumbersHelper.limited_counter_with_delimiter` -
that accepts an upper limit of counting rows. that accepts an upper limit of counting rows.
In some cases it is desired that badge counters are loaded asynchronously. In some cases it's desired that badge counters are loaded asynchronously.
This can speed up the initial page load and give a better user experience overall. This can speed up the initial page load and give a better user experience overall.
## Application/misuse limits ## Application/misuse limits
...@@ -349,9 +349,9 @@ be performant and usable for the user, but **not limiting**. ...@@ -349,9 +349,9 @@ be performant and usable for the user, but **not limiting**.
**We want the features to be fully usable for the users.** **We want the features to be fully usable for the users.**
**However, we want to ensure that the feature will continue to perform well if used at its limit** **However, we want to ensure that the feature will continue to perform well if used at its limit**
**and it will not cause availability issues.** **and it won't cause availability issues.**
Consider that it is always better to start with some kind of limitation, Consider that it's always better to start with some kind of limitation,
instead of later introducing a breaking change that would result in some instead of later introducing a breaking change that would result in some
workflows breaking. workflows breaking.
...@@ -370,9 +370,9 @@ The intent of quotas could be different: ...@@ -370,9 +370,9 @@ The intent of quotas could be different:
Examples: Examples:
1. Pipeline Schedules: It is very unlikely that user will want to create 1. Pipeline Schedules: It's very unlikely that user will want to create
more than 50 schedules. more than 50 schedules.
In such cases it is rather expected that this is either misuse In such cases it's rather expected that this is either misuse
or abuse of the feature. Lack of the upper limit can result or abuse of the feature. Lack of the upper limit can result
in service degradation as the system will try to process all schedules in service degradation as the system will try to process all schedules
assigned the the project. assigned the the project.
......
...@@ -7,16 +7,15 @@ consistent performance of GitLab. ...@@ -7,16 +7,15 @@ consistent performance of GitLab.
The process of solving performance problems is roughly as follows: The process of solving performance problems is roughly as follows:
1. Make sure there's an issue open somewhere (e.g., on the GitLab CE issue 1. Make sure there's an issue open somewhere (for example, on the GitLab CE issue
tracker), create one if there isn't. See [#15607][#15607] for an example. tracker), and create one if there is not. See [#15607][#15607] for an example.
1. Measure the performance of the code in a production environment such as 1. Measure the performance of the code in a production environment such as
GitLab.com (see the [Tooling](#tooling) section below). Performance should be GitLab.com (see the [Tooling](#tooling) section below). Performance should be
measured over a period of _at least_ 24 hours. measured over a period of _at least_ 24 hours.
1. Add your findings based on the measurement period (screenshots of graphs, 1. Add your findings based on the measurement period (screenshots of graphs,
timings, etc) to the issue mentioned in step 1. timings, etc) to the issue mentioned in step 1.
1. Solve the problem. 1. Solve the problem.
1. Create a merge request, assign the "Performance" label and assign it to 1. Create a merge request, assign the "Performance" label and follow the [performance review process](merge_request_performance_guidelines.md).
[@yorickpeterse][yorickpeterse] for reviewing.
1. Once a change has been deployed make sure to _again_ measure for at least 24 1. Once a change has been deployed make sure to _again_ measure for at least 24
hours to see if your changes have any impact on the production environment. hours to see if your changes have any impact on the production environment.
1. Repeat until you're done. 1. Repeat until you're done.
...@@ -44,16 +43,16 @@ GitLab provides built-in tools to help improve performance and availability: ...@@ -44,16 +43,16 @@ GitLab provides built-in tools to help improve performance and availability:
- [QueryRecoder](query_recorder.md) for preventing `N+1` regressions. - [QueryRecoder](query_recorder.md) for preventing `N+1` regressions.
- [Chaos endpoints](chaos_endpoints.md) for testing failure scenarios. Intended mainly for testing availability. - [Chaos endpoints](chaos_endpoints.md) for testing failure scenarios. Intended mainly for testing availability.
GitLab employees can use GitLab.com's performance monitoring systems located at GitLab team members can use [GitLab.com's performance monitoring systems](https://about.gitlab.com/handbook/engineering/monitoring/) located at
<https://dashboards.gitlab.net>, this requires you to log in using your <https://dashboards.gitlab.net>, this requires you to log in using your
`@gitlab.com` Email address. Non-GitLab employees are advised to set up their `@gitlab.com` email address. Non-GitLab team-members are advised to set up their
own InfluxDB + Grafana stack. own InfluxDB and Grafana stack.
## Benchmarks ## Benchmarks
Benchmarks are almost always useless. Benchmarks usually only test small bits of Benchmarks are almost always useless. Benchmarks usually only test small bits of
code in isolation and often only measure the best case scenario. On top of that, code in isolation and often only measure the best case scenario. On top of that,
benchmarks for libraries (e.g., a Gem) tend to be biased in favour of the benchmarks for libraries (such as a Gem) tend to be biased in favour of the
library. After all there's little benefit to an author publishing a benchmark library. After all there's little benefit to an author publishing a benchmark
that shows they perform worse than their competitors. that shows they perform worse than their competitors.
...@@ -68,8 +67,8 @@ When writing benchmarks you should almost always use ...@@ -68,8 +67,8 @@ When writing benchmarks you should almost always use
[benchmark-ips](https://github.com/evanphx/benchmark-ips). Ruby's `Benchmark` [benchmark-ips](https://github.com/evanphx/benchmark-ips). Ruby's `Benchmark`
module that comes with the standard library is rarely useful as it runs either a module that comes with the standard library is rarely useful as it runs either a
single iteration (when using `Benchmark.bm`) or two iterations (when using single iteration (when using `Benchmark.bm`) or two iterations (when using
`Benchmark.bmbm`). Running this few iterations means external factors (e.g. a `Benchmark.bmbm`). Running this few iterations means external factors, such as a
video streaming in the background) can very easily skew the benchmark video streaming in the background, can very easily skew the benchmark
statistics. statistics.
Another problem with the `Benchmark` module is that it displays timings, not Another problem with the `Benchmark` module is that it displays timings, not
...@@ -114,17 +113,18 @@ the behaviour of suspect code in detail. ...@@ -114,17 +113,18 @@ the behaviour of suspect code in detail.
It's important to note that profiling an application *alters its performance*, It's important to note that profiling an application *alters its performance*,
and will generally be done *in an unrepresentative environment*. In particular, and will generally be done *in an unrepresentative environment*. In particular,
a method is not necessarily troublesome just because it is executed many times, a method is not necessarily troublesome just because it's executed many times,
or takes a long time to execute. Profiles are tools you can use to better or takes a long time to execute. Profiles are tools you can use to better
understand what is happening in an application - using that information wisely understand what is happening in an application - using that information wisely
is up to you! is up to you!
Keeping that in mind, to create a profile, identify (or create) a spec that Keeping that in mind, to create a profile, identify (or create) a spec that
exercises the troublesome code path, then run it using the `bin/rspec-stackprof` exercises the troublesome code path, then run it using the `bin/rspec-stackprof`
helper, e.g.: helper, for example:
```shell ```shell
$ LIMIT=10 bin/rspec-stackprof spec/policies/project_policy_spec.rb $ LIMIT=10 bin/rspec-stackprof spec/policies/project_policy_spec.rb
8/8 |====== 100 ======>| Time: 00:00:18 8/8 |====== 100 ======>| Time: 00:00:18
Finished in 18.19 seconds (files took 4.8 seconds to load) Finished in 18.19 seconds (files took 4.8 seconds to load)
...@@ -170,10 +170,11 @@ kcachegrind project_policy_spec.callgrind # Linux ...@@ -170,10 +170,11 @@ kcachegrind project_policy_spec.callgrind # Linux
qcachegrind project_policy_spec.callgrind # Mac qcachegrind project_policy_spec.callgrind # Mac
``` ```
It may be useful to zoom in on a specific method, e.g.: It may be useful to zoom in on a specific method, for example:
```shell ```shell
$ stackprof tmp/project_policy_spec.rb.dump --method warm_asset_cache $ stackprof tmp/project_policy_spec.rb.dump --method warm_asset_cache
TestEnv#warm_asset_cache (/Users/lupine/dev/gitlab.com/gitlab-org/gitlab-development-kit/gitlab/spec/support/test_env.rb:164) TestEnv#warm_asset_cache (/Users/lupine/dev/gitlab.com/gitlab-org/gitlab-development-kit/gitlab/spec/support/test_env.rb:164)
samples: 0 self (0.0%) / 6288 total (36.9%) samples: 0 self (0.0%) / 6288 total (36.9%)
callers: callers:
...@@ -239,6 +240,7 @@ shell: ...@@ -239,6 +240,7 @@ shell:
```shell ```shell
$ rake rspec_profiling:console $ rake rspec_profiling:console
irb(main):001:0> results.count irb(main):001:0> results.count
=> 231 => 231
irb(main):002:0> results.last.attributes.keys irb(main):002:0> results.last.attributes.keys
...@@ -257,9 +259,9 @@ One of the reasons of the increased memory footprint could be Ruby memory fragme ...@@ -257,9 +259,9 @@ One of the reasons of the increased memory footprint could be Ruby memory fragme
To diagnose it, you can visualize Ruby heap as described in [this post by Aaron Patterson](https://tenderlovemaking.com/2017/09/27/visualizing-your-ruby-heap.html). To diagnose it, you can visualize Ruby heap as described in [this post by Aaron Patterson](https://tenderlovemaking.com/2017/09/27/visualizing-your-ruby-heap.html).
To start, you want to dump the heap of the process you are investigating to a JSON file. To start, you want to dump the heap of the process you're investigating to a JSON file.
You need to run the command inside the process you are exploring, you may do that with `rbtrace`. You need to run the command inside the process you're exploring, you may do that with `rbtrace`.
`rbtrace` is already present in GitLab `Gemfile`, you just need to require it. `rbtrace` is already present in GitLab `Gemfile`, you just need to require it.
It could be achieved running webserver or Sidekiq with the environment variable set to `ENABLE_RBTRACE=1`. It could be achieved running webserver or Sidekiq with the environment variable set to `ENABLE_RBTRACE=1`.
...@@ -274,7 +276,7 @@ Having the JSON, you finally could render a picture using the script [provided b ...@@ -274,7 +276,7 @@ Having the JSON, you finally could render a picture using the script [provided b
```shell ```shell
ruby heapviz.rb heap.json ruby heapviz.rb heap.json
``` ```
Fragmented Ruby heap snapshot could look like this: Fragmented Ruby heap snapshot could look like this:
![Ruby heap fragmentation](img/memory_ruby_heap_fragmentation.png) ![Ruby heap fragmentation](img/memory_ruby_heap_fragmentation.png)
...@@ -295,11 +297,11 @@ There is no clear set of steps that you can follow to determine if a certain ...@@ -295,11 +297,11 @@ There is no clear set of steps that you can follow to determine if a certain
piece of code is worth optimizing. The only two things you can do are: piece of code is worth optimizing. The only two things you can do are:
1. Think about what the code does, how it's used, how many times it's called and 1. Think about what the code does, how it's used, how many times it's called and
how much time is spent in it relative to the total execution time (e.g., the how much time is spent in it relative to the total execution time (for example, the
total time spent in a web request). total time spent in a web request).
1. Ask others (preferably in the form of an issue). 1. Ask others (preferably in the form of an issue).
Some examples of changes that aren't really important/worth the effort: Some examples of changes that are not really important/worth the effort:
- Replacing double quotes with single quotes. - Replacing double quotes with single quotes.
- Replacing usage of Array with Set when the list of values is very small. - Replacing usage of Array with Set when the list of values is very small.
...@@ -309,7 +311,7 @@ Some examples of changes that aren't really important/worth the effort: ...@@ -309,7 +311,7 @@ Some examples of changes that aren't really important/worth the effort:
## Slow Operations & Sidekiq ## Slow Operations & Sidekiq
Slow operations (e.g. merging branches) or operations that are prone to errors Slow operations, like merging branches, or operations that are prone to errors
(using external APIs) should be performed in a Sidekiq worker instead of (using external APIs) should be performed in a Sidekiq worker instead of
directly in a web request as much as possible. This has numerous benefits such directly in a web request as much as possible. This has numerous benefits such
as: as:
...@@ -416,7 +418,7 @@ as omitting it may lead to style check failures. ...@@ -416,7 +418,7 @@ as omitting it may lead to style check failures.
## Anti-Patterns ## Anti-Patterns
This is a collection of [anti-patterns][anti-pattern] that should be avoided This is a collection of [anti-patterns][anti-pattern] that should be avoided
unless these changes have a measurable, significant and positive impact on unless these changes have a measurable, significant, and positive impact on
production environments. production environments.
### Moving Allocations to Constants ### Moving Allocations to Constants
...@@ -458,5 +460,4 @@ You may find some useful examples in this snippet: ...@@ -458,5 +460,4 @@ You may find some useful examples in this snippet:
<https://gitlab.com/gitlab-org/gitlab-foss/snippets/33946> <https://gitlab.com/gitlab-org/gitlab-foss/snippets/33946>
[#15607]: https://gitlab.com/gitlab-org/gitlab-foss/issues/15607 [#15607]: https://gitlab.com/gitlab-org/gitlab-foss/issues/15607
[yorickpeterse]: https://gitlab.com/yorickpeterse
[anti-pattern]: https://en.wikipedia.org/wiki/Anti-pattern [anti-pattern]: https://en.wikipedia.org/wiki/Anti-pattern
...@@ -163,19 +163,20 @@ container_scanning: ...@@ -163,19 +163,20 @@ container_scanning:
Container Scanning can be [configured](#overriding-the-container-scanning-template) Container Scanning can be [configured](#overriding-the-container-scanning-template)
using environment variables. using environment variables.
| Environment Variable | Description | Default | | Environment Variable | Description | Default |
| ------ | ------ | ------ | | ------ | ------ | ------ |
| `KLAR_TRACE` | Set to true to enable more verbose output from klar. | `"false"` | | `KLAR_TRACE` | Set to true to enable more verbose output from klar. | `"false"` |
| `DOCKER_USER` | Username for accessing a Docker registry requiring authentication. | `$CI_REGISTRY_USER` | | `DOCKER_USER` | Username for accessing a Docker registry requiring authentication. | `$CI_REGISTRY_USER` |
| `DOCKER_PASSWORD` | Password for accessing a Docker registry requiring authentication. | `$CI_REGISTRY_PASSWORD` | | `DOCKER_PASSWORD` | Password for accessing a Docker registry requiring authentication. | `$CI_REGISTRY_PASSWORD` |
| `CLAIR_OUTPUT` | Severity level threshold. Vulnerabilities with severity level higher than or equal to this threshold will be outputted. Supported levels are `Unknown`, `Negligible`, `Low`, `Medium`, `High`, `Critical` and `Defcon1`. | `Unknown` | | `CLAIR_OUTPUT` | Severity level threshold. Vulnerabilities with severity level higher than or equal to this threshold will be outputted. Supported levels are `Unknown`, `Negligible`, `Low`, `Medium`, `High`, `Critical` and `Defcon1`. | `Unknown` |
| `REGISTRY_INSECURE` | Allow [Klar](https://github.com/optiopay/klar) to access insecure registries (HTTP only). Should only be set to `true` when testing the image locally. | `"false"` | | `REGISTRY_INSECURE` | Allow [Klar](https://github.com/optiopay/klar) to access insecure registries (HTTP only). Should only be set to `true` when testing the image locally. | `"false"` |
| `CLAIR_VULNERABILITIES_DB_URL` | This variable is explicitly set in the [services section](https://gitlab.com/gitlab-org/gitlab/blob/30522ca8b901223ac8c32b633d8d67f340b159c1/lib/gitlab/ci/templates/Security/Container-Scanning.gitlab-ci.yml#L17-19) of the `Container-Scanning.gitlab-ci.yml` file and defaults to `clair-vulnerabilities-db`. This value represents the address that the [postgres server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db) is running on and **shouldn't be changed** unless you're running the image locally as described in the [Running the scanning tool](https://gitlab.com/gitlab-org/security-products/analyzers/klar/#running-the-scanning-tool) section of the [GitLab klar analyzer readme](https://gitlab.com/gitlab-org/security-products/analyzers/klar). | `clair-vulnerabilities-db` | | `CLAIR_VULNERABILITIES_DB_URL` | (**DEPRECATED - use `CLAIR_DB_CONNECTION_STRING` instead**) This variable is explicitly set in the [services section](https://gitlab.com/gitlab-org/gitlab/-/blob/898c5da43504eba87b749625da50098d345b60d6/lib/gitlab/ci/templates/Security/Container-Scanning.gitlab-ci.yml#L23) of the `Container-Scanning.gitlab-ci.yml` file and defaults to `clair-vulnerabilities-db`. This value represents the address that the [Postgres server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db) is running on and **shouldn't be changed** unless you're running the image locally as described in the [Running the standalone Container Scanning Tool](#running-the-standalone-container-scanning-tool) section. | `clair-vulnerabilities-db` |
| `CI_APPLICATION_REPOSITORY` | Docker repository URL for the image to be scanned. | `$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG` | | `CLAIR_DB_CONNECTION_STRING` | This variable represents the [connection string](https://www.postgresql.org/docs/9.3/libpq-connect.html#AEN39692) to the [Postgres server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db) database and **shouldn't be changed** unless you're running the image locally as described in the [Running the standalone Container Scanning Tool](#running-the-standalone-container-scanning-tool) section. The host value for the connection string must match the [alias](https://gitlab.com/gitlab-org/gitlab/-/blob/898c5da43504eba87b749625da50098d345b60d6/lib/gitlab/ci/templates/Security/Container-Scanning.gitlab-ci.yml#L23) value of the `Container-Scanning.gitlab-ci.yml` template file, which defaults to `clair-vulnerabilities-db`. | `postgresql://postgres:password@clair-vulnerabilities-db:5432/postgres?sslmode=disable&statement_timeout=60000` |
| `CI_APPLICATION_TAG` | Docker respository tag for the image to be scanned. | `$CI_COMMIT_SHA` | | `CI_APPLICATION_REPOSITORY` | Docker repository URL for the image to be scanned. | `$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG` |
| `CLAIR_DB_IMAGE` | The Docker image name and tag for the [Postgres server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db). It can be useful to override this value with a specific version, for example, to provide a consistent set of vulnerabilities for integration testing purposes, or to refer to a locally hosted vulnerabilities database for an on-premise air-gapped installation. | `arminc/clair-db:latest` | | `CI_APPLICATION_TAG` | Docker respository tag for the image to be scanned. | `$CI_COMMIT_SHA` |
| `CLAIR_DB_IMAGE_TAG` | (**DEPRECATED - use `CLAIR_DB_IMAGE` instead**) The Docker image tag for the [Postgres server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db). It can be useful to override this value with a specific version, for example, to provide a consistent set of vulnerabilities for integration testing purposes. | `latest` | | `CLAIR_DB_IMAGE` | The Docker image name and tag for the [Postgres server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db). It can be useful to override this value with a specific version, for example, to provide a consistent set of vulnerabilities for integration testing purposes, or to refer to a locally hosted vulnerabilities database for an on-premise air-gapped installation. | `arminc/clair-db:latest` |
| `DOCKERFILE_PATH` | The path to the `Dockerfile` to be used for generating remediations. By default, the scanner will look for a file named `Dockerfile` in the root directory of the project, so this variable should only be configured if your `Dockerfile` is in a non-standard location, such as a subdirectory. See [Solutions for vulnerabilities](#solutions-for-vulnerabilities-auto-remediation) for more details. | `Dockerfile` | | `CLAIR_DB_IMAGE_TAG` | (**DEPRECATED - use `CLAIR_DB_IMAGE` instead**) The Docker image tag for the [Postgres server hosting the vulnerabilities definitions](https://hub.docker.com/r/arminc/clair-db). It can be useful to override this value with a specific version, for example, to provide a consistent set of vulnerabilities for integration testing purposes. | `latest` |
| `DOCKERFILE_PATH` | The path to the `Dockerfile` to be used for generating remediations. By default, the scanner will look for a file named `Dockerfile` in the root directory of the project, so this variable should only be configured if your `Dockerfile` is in a non-standard location, such as a subdirectory. See [Solutions for vulnerabilities](#solutions-for-vulnerabilities-auto-remediation) for more details. | `Dockerfile` |
## Security Dashboard ## Security Dashboard
...@@ -247,6 +248,40 @@ build_latest_vulnerabilities: ...@@ -247,6 +248,40 @@ build_latest_vulnerabilities:
The above template will work for a GitLab Docker registry running on a local installation, however, if you're using a non-GitLab Docker registry, you'll need to change the `$CI_REGISTRY` value and the `docker login` credentials to match the details of your local registry. The above template will work for a GitLab Docker registry running on a local installation, however, if you're using a non-GitLab Docker registry, you'll need to change the `$CI_REGISTRY` value and the `docker login` credentials to match the details of your local registry.
## Running the standalone Container Scanning Tool
It's possible to run the [GitLab Container Scanning Tool](https://gitlab.com/gitlab-org/security-products/analyzers/klar)
against a Docker container without needing to run it within the context of a CI job. To scan an
image directly, follow these steps:
1. Run [Docker Desktop](https://www.docker.com/products/docker-desktop) or [Docker Machine](https://github.com/docker/machine).
1. Run the latest [prefilled vulnerabilities database](https://cloud.docker.com/repository/docker/arminc/clair-db) Docker image:
```shell
docker run -p 5432:5432 -d --name clair-db arminc/clair-db:latest
```
1. Configure an environment variable to point to your local machine's IP address (or insert your IP address instead of the `LOCAL_MACHINE_IP_ADDRESS` variable in the `CLAIR_DB_CONNECTION_STRING` in the next step):
```shell
export LOCAL_MACHINE_IP_ADDRESS=your.local.ip.address
```
1. Run the analyzer's Docker image, passing the image and tag you want to analyze in the `CI_APPLICATION_REPOSITORY` and `CI_APPLICATION_TAG` environment variables:
```shell
docker run \
--interactive --rm \
--volume "$PWD":/tmp/app \
-e CI_PROJECT_DIR=/tmp/app \
-e CLAIR_DB_CONNECTION_STRING="postgresql://postgres:password@${LOCAL_MACHINE_IP_ADDRESS}:5432/postgres?sslmode=disable&statement_timeout=60000" \
-e CI_APPLICATION_REPOSITORY=registry.gitlab.com/gitlab-org/security-products/dast/webgoat-8.0@sha256 \
-e CI_APPLICATION_TAG=bc09fe2e0721dfaeee79364115aeedf2174cce0947b9ae5fe7c33312ee019a4e \
registry.gitlab.com/gitlab-org/security-products/analyzers/klar
```
The results are stored in `gl-container-scanning-report.json`.
## Reports JSON format ## Reports JSON format
CAUTION: **Caution:** CAUTION: **Caution:**
......
...@@ -307,6 +307,7 @@ DAST can be [configured](#customizing-the-dast-settings) using environment varia ...@@ -307,6 +307,7 @@ DAST can be [configured](#customizing-the-dast-settings) using environment varia
| `DAST_TARGET_AVAILABILITY_TIMEOUT` | no | Time limit in seconds to wait for target availability. Scan is attempted nevertheless if it runs out. Integer. Defaults to `60`. | | `DAST_TARGET_AVAILABILITY_TIMEOUT` | no | Time limit in seconds to wait for target availability. Scan is attempted nevertheless if it runs out. Integer. Defaults to `60`. |
| `DAST_FULL_SCAN_ENABLED` | no | Switches the tool to execute [ZAP Full Scan](https://github.com/zaproxy/zaproxy/wiki/ZAP-Full-Scan) instead of [ZAP Baseline Scan](https://github.com/zaproxy/zaproxy/wiki/ZAP-Baseline-Scan). Boolean. `true`, `True`, or `1` are considered as true value, otherwise false. Defaults to `false`. | | `DAST_FULL_SCAN_ENABLED` | no | Switches the tool to execute [ZAP Full Scan](https://github.com/zaproxy/zaproxy/wiki/ZAP-Full-Scan) instead of [ZAP Baseline Scan](https://github.com/zaproxy/zaproxy/wiki/ZAP-Baseline-Scan). Boolean. `true`, `True`, or `1` are considered as true value, otherwise false. Defaults to `false`. |
| `DAST_FULL_SCAN_DOMAIN_VALIDATION_REQUIRED` | no | Requires [domain validation](#domain-validation) when running DAST full scans. Boolean. `true`, `True`, or `1` are considered as true value, otherwise false. Defaults to `false`. | | `DAST_FULL_SCAN_DOMAIN_VALIDATION_REQUIRED` | no | Requires [domain validation](#domain-validation) when running DAST full scans. Boolean. `true`, `True`, or `1` are considered as true value, otherwise false. Defaults to `false`. |
| `DAST_AUTO_UPDATE_ADDONS` | no | Set to `false` to pin the versions of ZAProxy add-ons to those provided with the DAST image. Defaults to `true`. |
### DAST command-line options ### DAST command-line options
......
...@@ -483,34 +483,29 @@ A `serverless.yml` file is not required when deploying serverless applications. ...@@ -483,34 +483,29 @@ A `serverless.yml` file is not required when deploying serverless applications.
With all the pieces in place, the next time a CI pipeline runs, the Knative application will be deployed. Navigate to With all the pieces in place, the next time a CI pipeline runs, the Knative application will be deployed. Navigate to
**CI/CD > Pipelines** and click the most recent pipeline. **CI/CD > Pipelines** and click the most recent pipeline.
### Obtain the URL for the Knative deployment ### Function details
Go to the **CI/CD > Pipelines** and click on the pipeline that deployed your app. Once all the stages of the pipeline finish, click the **deploy** stage. Go to the **Operations > Serverless** page to see the final URL of your functions.
![deploy stage](img/deploy-stage.png) ![function_details](img/function-list_v12_7.png)
The output will look like this: ### Invocation metrics
```shell On the same page as above, click on one of the function
Running with gitlab-runner 12.1.0-rc1 (6da35412) rows to bring up the function details page.
on prm-com-gitlab-org ae3bfce3
Using Docker executor with image registry.gitlab.com/gitlab-org/gitlabktl:latest ... ![function_details](img/function-details-loaded.png)
Running on runner-ae3bfc-concurrent-0 via runner-ae3bfc ...
Fetching changes... The pod count will give you the number of pods running the serverless function instances on a given cluster.
Authenticating with credentials from job payload (GitLab Registry)
$ /usr/bin/gitlabktl application deploy
Welcome to gitlabktl tool
time="2019-07-15T10:51:07Z" level=info msg="deploying registry credentials"
Creating app-hello function
Waiting for app-hello ready state
Service app-hello URL: http://app-hello.serverless.example.com
Job succeeded
```
The second to last line, labeled **Service domain** contains the URL for the For the Knative function invocations to appear,
deployment. Copy and paste the domain into your browser to see the app live. [Prometheus must be installed](../index.md#installing-applications).
![knative app](img/knative-app.png) Once Prometheus is installed, a message may appear indicating that the metrics data _is
loading or is not available at this time._ It will appear upon the first access of the
page, but should go away after a few seconds. If the message does not disappear, then it
is possible that GitLab is unable to connect to the Prometheus instance running on the
cluster.
## Configuring logging ## Configuring logging
...@@ -559,26 +554,6 @@ Or: ...@@ -559,26 +554,6 @@ Or:
1. Click on **Discover**, then select `filebeat-*` from the dropdown on the left. 1. Click on **Discover**, then select `filebeat-*` from the dropdown on the left.
1. Enter `kubernetes.container.name:"queue-proxy" AND message:/httpRequest/` into the search box. 1. Enter `kubernetes.container.name:"queue-proxy" AND message:/httpRequest/` into the search box.
## Function details
Go to the **Operations > Serverless** page and click on one of the function
rows to bring up the function details page.
![function_details](img/function-details-loaded.png)
The pod count will give you the number of pods running the serverless function instances on a given cluster.
### Prometheus support
For the Knative function invocations to appear,
[Prometheus must be installed](../index.md#installing-applications).
Once Prometheus is installed, a message may appear indicating that the metrics data _is
loading or is not available at this time._ It will appear upon the first access of the
page, but should go away after a few seconds. If the message does not disappear, then it
is possible that GitLab is unable to connect to the Prometheus instance running on the
cluster.
## Enabling TLS for Knative services ## Enabling TLS for Knative services
By default, a GitLab serverless deployment will be served over `http`. In order to serve over `https` you By default, a GitLab serverless deployment will be served over `http`. In order to serve over `https` you
......
...@@ -121,6 +121,7 @@ module API ...@@ -121,6 +121,7 @@ module API
mount ::API::Commits mount ::API::Commits
mount ::API::CommitStatuses mount ::API::CommitStatuses
mount ::API::DeployKeys mount ::API::DeployKeys
mount ::API::DeployTokens
mount ::API::Deployments mount ::API::Deployments
mount ::API::Environments mount ::API::Environments
mount ::API::ErrorTracking mount ::API::ErrorTracking
......
# frozen_string_literal: true
module API
class DeployTokens < Grape::API
include PaginationParams
before { authenticated_as_admin! }
desc 'Return all deploy tokens' do
detail 'This feature was introduced in GitLab 12.9.'
success Entities::DeployToken
end
params do
use :pagination
end
get 'deploy_tokens' do
present paginate(DeployToken.all), with: Entities::DeployToken
end
end
end
# frozen_string_literal: true
module API
module Entities
class DeployToken < Grape::Entity
expose :id, :name, :username, :expires_at, :token, :scopes
end
end
end
...@@ -7071,6 +7071,12 @@ msgstr "" ...@@ -7071,6 +7071,12 @@ msgstr ""
msgid "Email patch" msgid "Email patch"
msgstr "" msgstr ""
msgid "Email restrictions"
msgstr ""
msgid "Email restrictions for sign-ups"
msgstr ""
msgid "Email the pipelines status to a list of recipients." msgid "Email the pipelines status to a list of recipients."
msgstr "" msgstr ""
...@@ -7179,6 +7185,9 @@ msgstr "" ...@@ -7179,6 +7185,9 @@ msgstr ""
msgid "Enable classification control using an external service" msgid "Enable classification control using an external service"
msgstr "" msgstr ""
msgid "Enable email restrictions for sign ups"
msgstr ""
msgid "Enable error tracking" msgid "Enable error tracking"
msgstr "" msgstr ""
...@@ -16377,6 +16386,9 @@ msgstr "" ...@@ -16377,6 +16386,9 @@ msgstr ""
msgid "Restrict membership by email" msgid "Restrict membership by email"
msgstr "" msgstr ""
msgid "Restricts sign-ups for email addresses that match the given regex. See the %{supported_syntax_link_start}supported syntax%{supported_syntax_link_end} for more information."
msgstr ""
msgid "Resume" msgid "Resume"
msgstr "" msgstr ""
...@@ -23097,6 +23109,12 @@ msgstr "" ...@@ -23097,6 +23109,12 @@ msgstr ""
msgid "is not a valid X509 certificate." msgid "is not a valid X509 certificate."
msgstr "" msgstr ""
msgid "is not a valid regular expression"
msgstr ""
msgid "is not allowed for sign-up"
msgstr ""
msgid "is not an email you own" msgid "is not an email you own"
msgstr "" msgstr ""
......
...@@ -633,5 +633,56 @@ describe ApplicationSetting do ...@@ -633,5 +633,56 @@ describe ApplicationSetting do
end end
end end
describe 'email_restrictions' do
context 'when email restrictions are enabled' do
before do
subject.email_restrictions_enabled = true
end
it 'allows empty email restrictions' do
subject.email_restrictions = ''
expect(subject).to be_valid
end
it 'accepts valid email restrictions regex' do
subject.email_restrictions = '\+'
expect(subject).to be_valid
end
it 'does not accept invalid email restrictions regex' do
subject.email_restrictions = '+'
expect(subject).not_to be_valid
end
it 'sets an error when regex is not valid' do
subject.email_restrictions = '+'
expect(subject).not_to be_valid
expect(subject.errors.messages[:email_restrictions].first).to eq(_('is not a valid regular expression'))
end
end
context 'when email restrictions are disabled' do
before do
subject.email_restrictions_enabled = false
end
it 'allows empty email restrictions' do
subject.email_restrictions = ''
expect(subject).to be_valid
end
it 'invalid regex is not valid' do
subject.email_restrictions = '+'
expect(subject).not_to be_valid
end
end
end
it_behaves_like 'application settings examples' it_behaves_like 'application settings examples'
end end
...@@ -430,6 +430,73 @@ describe User, :do_not_mock_admin_mode do ...@@ -430,6 +430,73 @@ describe User, :do_not_mock_admin_mode do
end end
end end
context 'email restrictions' do
context 'when email restriction is disabled' do
before do
stub_application_setting(email_restrictions_enabled: false)
stub_application_setting(email_restrictions: '\+')
end
it 'does accept email address' do
user = build(:user, email: 'info+1@test.com')
expect(user).to be_valid
end
end
context 'when email restrictions is enabled' do
before do
stub_application_setting(email_restrictions_enabled: true)
stub_application_setting(email_restrictions: '([\+]|\b(\w*gitlab.com\w*)\b)')
end
it 'does not accept email address with + characters' do
user = build(:user, email: 'info+1@test.com')
expect(user).not_to be_valid
end
it 'does not accept email with a gitlab domain' do
user = build(:user, email: 'info@gitlab.com')
expect(user).not_to be_valid
end
it 'adds an error message when email is not accepted' do
user = build(:user, email: 'info@gitlab.com')
expect(user).not_to be_valid
expect(user.errors.messages[:email].first).to eq(_('is not allowed for sign-up'))
end
it 'does accept a valid email address' do
user = build(:user, email: 'info@test.com')
expect(user).to be_valid
end
context 'when feature flag is turned off' do
before do
stub_feature_flags(email_restrictions: false)
end
it 'does accept the email address' do
user = build(:user, email: 'info+1@test.com')
expect(user).to be_valid
end
end
context 'when created_by_id is set' do
it 'does accept the email address' do
user = build(:user, email: 'info+1@test.com', created_by_id: 1)
expect(user).to be_valid
end
end
end
end
context 'owns_notification_email' do context 'owns_notification_email' do
it 'accepts temp_oauth_email emails' do it 'accepts temp_oauth_email emails' do
user = build(:user, email: "temp-email-for-oauth@example.com") user = build(:user, email: "temp-email-for-oauth@example.com")
......
# frozen_string_literal: true
require 'spec_helper'
describe API::DeployTokens do
let(:creator) { create(:user) }
let(:project) { create(:project, creator_id: creator.id) }
let!(:deploy_token) { create(:deploy_token, projects: [project]) }
describe 'GET /deploy_tokens' do
subject { get api('/deploy_tokens', user) }
context 'when unauthenticated' do
let(:user) { nil }
it 'rejects the response as unauthorized' do
subject
expect(response).to have_gitlab_http_status(:unauthorized)
end
end
context 'when authenticated as non-admin user' do
let(:user) { creator }
it 'rejects the response as forbidden' do
subject
expect(response).to have_gitlab_http_status(:forbidden)
end
end
context 'when authenticated as admin' do
let(:user) { create(:admin) }
it 'returns all deploy tokens' do
subject
expect(response).to have_gitlab_http_status(:ok)
expect(response).to include_pagination_headers
expect(json_response).to be_an Array
expect(json_response.first['id']).to eq(deploy_token.id)
end
end
end
end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment