Commit 56a7627a authored by GitLab Bot's avatar GitLab Bot

Add latest changes from gitlab-org/gitlab@master

parent 47d1f417
...@@ -55,8 +55,6 @@ class RegistrationsController < Devise::RegistrationsController ...@@ -55,8 +55,6 @@ class RegistrationsController < Devise::RegistrationsController
def welcome def welcome
return redirect_to new_user_registration_path unless current_user return redirect_to new_user_registration_path unless current_user
return redirect_to stored_location_or_dashboard(current_user) if current_user.role.present? && !current_user.setup_for_company.nil? return redirect_to stored_location_or_dashboard(current_user) if current_user.role.present? && !current_user.setup_for_company.nil?
current_user.name = nil if current_user.name == current_user.username
end end
def update_registration def update_registration
......
---
title: Updated cluster-applications to v0.7.0
merge_request: 24754
author:
type: changed
...@@ -359,6 +359,7 @@ The following documentation relates to the DevOps **Secure** stage: ...@@ -359,6 +359,7 @@ The following documentation relates to the DevOps **Secure** stage:
| Secure Topics | Description | | Secure Topics | Description |
|:------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------| |:------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------|
| [Compliance Dashboard](user/application_security/compliance_dashboard/index.md) **(ULTIMATE)** | View the most recent Merge Request activity in a group. |
| [Container Scanning](user/application_security/container_scanning/index.md) **(ULTIMATE)** | Use Clair to scan docker images for known vulnerabilities. | | [Container Scanning](user/application_security/container_scanning/index.md) **(ULTIMATE)** | Use Clair to scan docker images for known vulnerabilities. |
| [Dependency List](user/application_security/dependency_list/index.md) **(ULTIMATE)** | View your project's dependencies and their known vulnerabilities. | | [Dependency List](user/application_security/dependency_list/index.md) **(ULTIMATE)** | View your project's dependencies and their known vulnerabilities. |
| [Dependency Scanning](user/application_security/dependency_scanning/index.md) **(ULTIMATE)** | Analyze your dependencies for known vulnerabilities. | | [Dependency Scanning](user/application_security/dependency_scanning/index.md) **(ULTIMATE)** | Analyze your dependencies for known vulnerabilities. |
......
...@@ -82,7 +82,7 @@ on the amount of data indexed). ...@@ -82,7 +82,7 @@ on the amount of data indexed).
To keep naming of these indexes consistent please use the following naming To keep naming of these indexes consistent please use the following naming
pattern: pattern:
``` ```plaintext
index_TABLE_on_COLUMN_trigram index_TABLE_on_COLUMN_trigram
``` ```
......
...@@ -126,7 +126,7 @@ To resume the test run, press any key. ...@@ -126,7 +126,7 @@ To resume the test run, press any key.
For example: For example:
``` ```shell
$ bin/rspec spec/features/auto_deploy_spec.rb:34 $ bin/rspec spec/features/auto_deploy_spec.rb:34
Running via Spring preloader in process 8999 Running via Spring preloader in process 8999
Run options: include {:locations=>{"./spec/features/auto_deploy_spec.rb"=>[34]}} Run options: include {:locations=>{"./spec/features/auto_deploy_spec.rb"=>[34]}}
...@@ -147,7 +147,7 @@ Note: `live_debug` only works on JavaScript enabled specs. ...@@ -147,7 +147,7 @@ Note: `live_debug` only works on JavaScript enabled specs.
Run the spec with `CHROME_HEADLESS=0`, e.g.: Run the spec with `CHROME_HEADLESS=0`, e.g.:
``` ```shell
CHROME_HEADLESS=0 bin/rspec some_spec.rb CHROME_HEADLESS=0 bin/rspec some_spec.rb
``` ```
...@@ -242,7 +242,7 @@ This can be achieved by using ...@@ -242,7 +242,7 @@ This can be achieved by using
[`before_all`](https://test-prof.evilmartians.io/#/before_all) hook [`before_all`](https://test-prof.evilmartians.io/#/before_all) hook
from the [`test-prof` gem](https://rubygems.org/gems/test-prof). from the [`test-prof` gem](https://rubygems.org/gems/test-prof).
``` ```ruby
let_it_be(:project) { create(:project) } let_it_be(:project) { create(:project) }
let_it_be(:user) { create(:user) } let_it_be(:user) { create(:user) }
...@@ -260,14 +260,14 @@ Note that if you modify an object defined inside a `let_it_be` block, ...@@ -260,14 +260,14 @@ Note that if you modify an object defined inside a `let_it_be` block,
then you will need to reload the object as needed, or specify the `reload` then you will need to reload the object as needed, or specify the `reload`
option to reload for every example. option to reload for every example.
``` ```ruby
let_it_be(:project, reload: true) { create(:project) } let_it_be(:project, reload: true) { create(:project) }
``` ```
You can also specify the `refind` option as well to completely load a You can also specify the `refind` option as well to completely load a
new object. new object.
``` ```ruby
let_it_be(:project, refind: true) { create(:project) } let_it_be(:project, refind: true) { create(:project) }
``` ```
...@@ -411,7 +411,7 @@ cause issues depending on the developer's local network. There are RSpec labels ...@@ -411,7 +411,7 @@ cause issues depending on the developer's local network. There are RSpec labels
available in `spec/support/dns.rb` which you can apply to tests if you need to available in `spec/support/dns.rb` which you can apply to tests if you need to
bypass the DNS stubbing, e.g.: bypass the DNS stubbing, e.g.:
``` ```ruby
it "really connects to Prometheus", :permit_dns do it "really connects to Prometheus", :permit_dns do
``` ```
......
...@@ -230,10 +230,10 @@ Look at a recent `review-deploy` job log, and at the Tiller logs. ...@@ -230,10 +230,10 @@ Look at a recent `review-deploy` job log, and at the Tiller logs.
```shell ```shell
# Identify if node spikes are common or load on specific nodes which may get rebalanced by the Kubernetes scheduler # Identify if node spikes are common or load on specific nodes which may get rebalanced by the Kubernetes scheduler
kubectl top nodes | sort --key 3 --numeric kubectl top nodes | sort --key 3 --numeric
# Identify pods under heavy CPU load # Identify pods under heavy CPU load
kubectl top pods | sort --key 2 --numeric kubectl top pods | sort --key 2 --numeric
``` ```
### The `logging/user/events/FailedMount` chart is going up ### The `logging/user/events/FailedMount` chart is going up
...@@ -251,21 +251,21 @@ Any secrets or config maps older than 5 days are suspect and should be deleted. ...@@ -251,21 +251,21 @@ Any secrets or config maps older than 5 days are suspect and should be deleted.
**Useful commands:** **Useful commands:**
``` ```shell
# List secrets and config maps ordered by created date # List secrets and config maps ordered by created date
kubectl get secret,cm --sort-by='{.metadata.creationTimestamp}' | grep 'review-' kubectl get secret,cm --sort-by='{.metadata.creationTimestamp}' | grep 'review-'
# Delete all secrets that are 5 to 9 days old # Delete all secrets that are 5 to 9 days old
kubectl get secret --sort-by='{.metadata.creationTimestamp}' | grep '^review-' | grep '[5-9]d$' | cut -d' ' -f1 | xargs kubectl delete secret kubectl get secret --sort-by='{.metadata.creationTimestamp}' | grep '^review-' | grep '[5-9]d$' | cut -d' ' -f1 | xargs kubectl delete secret
# Delete all secrets that are 10 to 99 days old # Delete all secrets that are 10 to 99 days old
kubectl get secret --sort-by='{.metadata.creationTimestamp}' | grep '^review-' | grep '[1-9][0-9]d$' | cut -d' ' -f1 | xargs kubectl delete secret kubectl get secret --sort-by='{.metadata.creationTimestamp}' | grep '^review-' | grep '[1-9][0-9]d$' | cut -d' ' -f1 | xargs kubectl delete secret
# Delete all config maps that are 5 to 9 days old # Delete all config maps that are 5 to 9 days old
kubectl get cm --sort-by='{.metadata.creationTimestamp}' | grep 'review-' | grep -v 'dns-gitlab-review-app' | grep '[5-9]d$' | cut -d' ' -f1 | xargs kubectl delete cm kubectl get cm --sort-by='{.metadata.creationTimestamp}' | grep 'review-' | grep -v 'dns-gitlab-review-app' | grep '[5-9]d$' | cut -d' ' -f1 | xargs kubectl delete cm
# Delete all config maps that are 10 to 99 days old # Delete all config maps that are 10 to 99 days old
kubectl get cm --sort-by='{.metadata.creationTimestamp}' | grep 'review-' | grep -v 'dns-gitlab-review-app' | grep '[1-9][0-9]d$' | cut -d' ' -f1 | xargs kubectl delete cm kubectl get cm --sort-by='{.metadata.creationTimestamp}' | grep 'review-' | grep -v 'dns-gitlab-review-app' | grep '[1-9][0-9]d$' | cut -d' ' -f1 | xargs kubectl delete cm
``` ```
### Using K9s ### Using K9s
...@@ -294,7 +294,7 @@ This in turn prevented other components of the Review App to properly start ...@@ -294,7 +294,7 @@ This in turn prevented other components of the Review App to properly start
After some digging, we found that new mounts were failing, when being performed After some digging, we found that new mounts were failing, when being performed
with transient scopes (e.g. pods) of `systemd-mount`: with transient scopes (e.g. pods) of `systemd-mount`:
``` ```plaintext
MountVolume.SetUp failed for volume "dns-gitlab-review-app-external-dns-token-sj5jm" : mount failed: exit status 1 MountVolume.SetUp failed for volume "dns-gitlab-review-app-external-dns-token-sj5jm" : mount failed: exit status 1
Mounting command: systemd-run Mounting command: systemd-run
Mounting arguments: --description=Kubernetes transient mount for /var/lib/kubelet/pods/06add1c3-87b4-11e9-80a9-42010a800107/volumes/kubernetes.io~secret/dns-gitlab-review-app-external-dns-token-sj5jm --scope -- mount -t tmpfs tmpfs /var/lib/kubelet/pods/06add1c3-87b4-11e9-80a9-42010a800107/volumes/kubernetes.io~secret/dns-gitlab-review-app-external-dns-token-sj5jm Mounting arguments: --description=Kubernetes transient mount for /var/lib/kubelet/pods/06add1c3-87b4-11e9-80a9-42010a800107/volumes/kubernetes.io~secret/dns-gitlab-review-app-external-dns-token-sj5jm --scope -- mount -t tmpfs tmpfs /var/lib/kubelet/pods/06add1c3-87b4-11e9-80a9-42010a800107/volumes/kubernetes.io~secret/dns-gitlab-review-app-external-dns-token-sj5jm
...@@ -342,7 +342,7 @@ clean up the list of non-`Running` pods. ...@@ -342,7 +342,7 @@ clean up the list of non-`Running` pods.
Following is a command to delete Review Apps based on their last deployment date Following is a command to delete Review Apps based on their last deployment date
(current date was June 6th at the time) with (current date was June 6th at the time) with
``` ```shell
helm ls -d | grep "Jun 4" | cut -f1 | xargs helm delete --purge helm ls -d | grep "Jun 4" | cut -f1 | xargs helm delete --purge
``` ```
......
...@@ -14,7 +14,7 @@ WHERE visibility_level IN (0, 20); ...@@ -14,7 +14,7 @@ WHERE visibility_level IN (0, 20);
When running this on GitLab.com, we are presented with the following output: When running this on GitLab.com, we are presented with the following output:
``` ```sql
Aggregate (cost=922411.76..922411.77 rows=1 width=8) Aggregate (cost=922411.76..922411.77 rows=1 width=8)
-> Seq Scan on projects (cost=0.00..908044.47 rows=5746914 width=0) -> Seq Scan on projects (cost=0.00..908044.47 rows=5746914 width=0)
Filter: (visibility_level = ANY ('{0,20}'::integer[])) Filter: (visibility_level = ANY ('{0,20}'::integer[]))
...@@ -35,7 +35,7 @@ WHERE visibility_level IN (0, 20); ...@@ -35,7 +35,7 @@ WHERE visibility_level IN (0, 20);
This will produce: This will produce:
``` ```sql
Aggregate (cost=922420.60..922420.61 rows=1 width=8) (actual time=3428.535..3428.535 rows=1 loops=1) Aggregate (cost=922420.60..922420.61 rows=1 width=8) (actual time=3428.535..3428.535 rows=1 loops=1)
-> Seq Scan on projects (cost=0.00..908053.18 rows=5746969 width=0) (actual time=0.041..2987.606 rows=5746940 loops=1) -> Seq Scan on projects (cost=0.00..908053.18 rows=5746969 width=0) (actual time=0.041..2987.606 rows=5746940 loops=1)
Filter: (visibility_level = ANY ('{0,20}'::integer[])) Filter: (visibility_level = ANY ('{0,20}'::integer[]))
...@@ -69,7 +69,7 @@ WHERE visibility_level IN (0, 20); ...@@ -69,7 +69,7 @@ WHERE visibility_level IN (0, 20);
This will then produce: This will then produce:
``` ```sql
Aggregate (cost=922420.60..922420.61 rows=1 width=8) (actual time=3428.535..3428.535 rows=1 loops=1) Aggregate (cost=922420.60..922420.61 rows=1 width=8) (actual time=3428.535..3428.535 rows=1 loops=1)
Buffers: shared hit=208846 Buffers: shared hit=208846
-> Seq Scan on projects (cost=0.00..908053.18 rows=5746969 width=0) (actual time=0.041..2987.606 rows=5746940 loops=1) -> Seq Scan on projects (cost=0.00..908053.18 rows=5746969 width=0) (actual time=0.041..2987.606 rows=5746940 loops=1)
...@@ -105,7 +105,7 @@ aggregate( ...@@ -105,7 +105,7 @@ aggregate(
Nodes are indicated using a `->` followed by the type of node taken. For Nodes are indicated using a `->` followed by the type of node taken. For
example: example:
``` ```sql
Aggregate (cost=922411.76..922411.77 rows=1 width=8) Aggregate (cost=922411.76..922411.77 rows=1 width=8)
-> Seq Scan on projects (cost=0.00..908044.47 rows=5746914 width=0) -> Seq Scan on projects (cost=0.00..908044.47 rows=5746914 width=0)
Filter: (visibility_level = ANY ('{0,20}'::integer[])) Filter: (visibility_level = ANY ('{0,20}'::integer[]))
...@@ -119,7 +119,7 @@ above it. ...@@ -119,7 +119,7 @@ above it.
Nested nodes will look like this: Nested nodes will look like this:
``` ```sql
Aggregate (cost=176.97..176.98 rows=1 width=8) (actual time=0.252..0.252 rows=1 loops=1) Aggregate (cost=176.97..176.98 rows=1 width=8) (actual time=0.252..0.252 rows=1 loops=1)
Buffers: shared hit=155 Buffers: shared hit=155
-> Nested Loop (cost=0.86..176.75 rows=87 width=0) (actual time=0.035..0.249 rows=36 loops=1) -> Nested Loop (cost=0.86..176.75 rows=87 width=0) (actual time=0.035..0.249 rows=36 loops=1)
...@@ -142,7 +142,7 @@ Here we first perform two separate "Index Only" scans, followed by performing a ...@@ -142,7 +142,7 @@ Here we first perform two separate "Index Only" scans, followed by performing a
Each node in a plan has a set of associated statistics, such as the cost, the Each node in a plan has a set of associated statistics, such as the cost, the
number of rows produced, the number of loops performed, and more. For example: number of rows produced, the number of loops performed, and more. For example:
``` ```sql
Seq Scan on projects (cost=0.00..908044.47 rows=5746914 width=0) Seq Scan on projects (cost=0.00..908044.47 rows=5746914 width=0)
``` ```
...@@ -157,7 +157,7 @@ influences the costs depends on a variety of settings, such as `seq_page_cost`, ...@@ -157,7 +157,7 @@ influences the costs depends on a variety of settings, such as `seq_page_cost`,
`cpu_tuple_cost`, and various others. `cpu_tuple_cost`, and various others.
The format of the costs field is as follows: The format of the costs field is as follows:
``` ```sql
STARTUP COST..TOTAL COST STARTUP COST..TOTAL COST
``` ```
...@@ -169,7 +169,7 @@ When using `EXPLAIN ANALYZE`, these statistics will also include the actual time ...@@ -169,7 +169,7 @@ When using `EXPLAIN ANALYZE`, these statistics will also include the actual time
(in milliseconds) spent, and other runtime statistics (e.g. the actual number of (in milliseconds) spent, and other runtime statistics (e.g. the actual number of
produced rows): produced rows):
``` ```sql
Seq Scan on projects (cost=0.00..908053.18 rows=5746969 width=0) (actual time=0.041..2987.606 rows=5746940 loops=1) Seq Scan on projects (cost=0.00..908053.18 rows=5746969 width=0) (actual time=0.041..2987.606 rows=5746940 loops=1)
``` ```
...@@ -181,7 +181,7 @@ Using `EXPLAIN (ANALYZE, BUFFERS)` will also give us information about the ...@@ -181,7 +181,7 @@ Using `EXPLAIN (ANALYZE, BUFFERS)` will also give us information about the
number of rows removed by a filter, the number of buffers used, and more. For number of rows removed by a filter, the number of buffers used, and more. For
example: example:
``` ```sql
Seq Scan on projects (cost=0.00..908053.18 rows=5746969 width=0) (actual time=0.041..2987.606 rows=5746940 loops=1) Seq Scan on projects (cost=0.00..908053.18 rows=5746969 width=0) (actual time=0.041..2987.606 rows=5746940 loops=1)
Filter: (visibility_level = ANY ('{0,20}'::integer[])) Filter: (visibility_level = ANY ('{0,20}'::integer[]))
Rows Removed by Filter: 65677 Rows Removed by Filter: 65677
...@@ -245,7 +245,7 @@ Sorts the input rows as specified using an `ORDER BY` statement. ...@@ -245,7 +245,7 @@ Sorts the input rows as specified using an `ORDER BY` statement.
A nested loop will execute its child nodes for every row produced by a node that A nested loop will execute its child nodes for every row produced by a node that
precedes it. For example: precedes it. For example:
``` ```sql
-> Nested Loop (cost=0.86..176.75 rows=87 width=0) (actual time=0.035..0.249 rows=36 loops=1) -> Nested Loop (cost=0.86..176.75 rows=87 width=0) (actual time=0.035..0.249 rows=36 loops=1)
Buffers: shared hit=155 Buffers: shared hit=155
-> Index Only Scan using users_pkey on users users_1 (cost=0.43..4.95 rows=87 width=4) (actual time=0.029..0.123 rows=36 loops=1) -> Index Only Scan using users_pkey on users users_1 (cost=0.43..4.95 rows=87 width=4) (actual time=0.029..0.123 rows=36 loops=1)
...@@ -287,7 +287,7 @@ WHERE twitter != ''; ...@@ -287,7 +287,7 @@ WHERE twitter != '';
This will produce the following plan: This will produce the following plan:
``` ```sql
Aggregate (cost=845110.21..845110.22 rows=1 width=8) (actual time=1271.157..1271.158 rows=1 loops=1) Aggregate (cost=845110.21..845110.22 rows=1 width=8) (actual time=1271.157..1271.158 rows=1 loops=1)
Buffers: shared hit=202662 Buffers: shared hit=202662
-> Seq Scan on users (cost=0.00..844969.99 rows=56087 width=0) (actual time=0.019..1265.883 rows=51833 loops=1) -> Seq Scan on users (cost=0.00..844969.99 rows=56087 width=0) (actual time=0.019..1265.883 rows=51833 loops=1)
...@@ -312,7 +312,7 @@ on the `users` table that we might be able to use. We can obtain this ...@@ -312,7 +312,7 @@ on the `users` table that we might be able to use. We can obtain this
information by running `\d users` in a `psql` console, then scrolling down to information by running `\d users` in a `psql` console, then scrolling down to
the `Indexes:` section: the `Indexes:` section:
``` ```sql
Indexes: Indexes:
"users_pkey" PRIMARY KEY, btree (id) "users_pkey" PRIMARY KEY, btree (id)
"users_confirmation_token_key" UNIQUE CONSTRAINT, btree (confirmation_token) "users_confirmation_token_key" UNIQUE CONSTRAINT, btree (confirmation_token)
...@@ -347,7 +347,7 @@ CREATE INDEX CONCURRENTLY twitter_test ON users (twitter); ...@@ -347,7 +347,7 @@ CREATE INDEX CONCURRENTLY twitter_test ON users (twitter);
If we now re-run our query using `EXPLAIN (ANALYZE, BUFFERS)` we get the If we now re-run our query using `EXPLAIN (ANALYZE, BUFFERS)` we get the
following plan: following plan:
``` ```sql
Aggregate (cost=61002.82..61002.83 rows=1 width=8) (actual time=297.311..297.312 rows=1 loops=1) Aggregate (cost=61002.82..61002.83 rows=1 width=8) (actual time=297.311..297.312 rows=1 loops=1)
Buffers: shared hit=51854 dirtied=19 Buffers: shared hit=51854 dirtied=19
-> Index Only Scan using twitter_test on users (cost=0.43..60873.13 rows=51877 width=0) (actual time=279.184..293.532 rows=51833 loops=1) -> Index Only Scan using twitter_test on users (cost=0.43..60873.13 rows=51877 width=0) (actual time=279.184..293.532 rows=51833 loops=1)
...@@ -364,7 +364,7 @@ seconds. However, we still use 51,854 buffers, which is about 400 MB of memory. ...@@ -364,7 +364,7 @@ seconds. However, we still use 51,854 buffers, which is about 400 MB of memory.
300 milliseconds is also quite slow for such a simple query. To understand why 300 milliseconds is also quite slow for such a simple query. To understand why
this query is still expensive, let's take a look at the following: this query is still expensive, let's take a look at the following:
``` ```sql
Index Only Scan using twitter_test on users (cost=0.43..60873.13 rows=51877 width=0) (actual time=279.184..293.532 rows=51833 loops=1) Index Only Scan using twitter_test on users (cost=0.43..60873.13 rows=51877 width=0) (actual time=279.184..293.532 rows=51833 loops=1)
Filter: ((twitter)::text <> ''::text) Filter: ((twitter)::text <> ''::text)
Rows Removed by Filter: 2487830 Rows Removed by Filter: 2487830
...@@ -401,7 +401,7 @@ CREATE INDEX CONCURRENTLY twitter_test ON users (twitter) WHERE twitter != ''; ...@@ -401,7 +401,7 @@ CREATE INDEX CONCURRENTLY twitter_test ON users (twitter) WHERE twitter != '';
Once created, if we run our query again we will be given the following plan: Once created, if we run our query again we will be given the following plan:
``` ```sql
Aggregate (cost=1608.26..1608.27 rows=1 width=8) (actual time=19.821..19.821 rows=1 loops=1) Aggregate (cost=1608.26..1608.27 rows=1 width=8) (actual time=19.821..19.821 rows=1 loops=1)
Buffers: shared hit=44036 Buffers: shared hit=44036
-> Index Only Scan using twitter_test on users (cost=0.41..1479.71 rows=51420 width=0) (actual time=0.023..15.514 rows=51833 loops=1) -> Index Only Scan using twitter_test on users (cost=0.41..1479.71 rows=51420 width=0) (actual time=0.023..15.514 rows=51833 loops=1)
...@@ -438,7 +438,7 @@ WHERE visibility_level IN (0, 20); ...@@ -438,7 +438,7 @@ WHERE visibility_level IN (0, 20);
The output of `EXPLAIN (ANALYZE, BUFFERS)` is as follows: The output of `EXPLAIN (ANALYZE, BUFFERS)` is as follows:
``` ```sql
Aggregate (cost=922420.60..922420.61 rows=1 width=8) (actual time=3428.535..3428.535 rows=1 loops=1) Aggregate (cost=922420.60..922420.61 rows=1 width=8) (actual time=3428.535..3428.535 rows=1 loops=1)
Buffers: shared hit=208846 Buffers: shared hit=208846
-> Seq Scan on projects (cost=0.00..908053.18 rows=5746969 width=0) (actual time=0.041..2987.606 rows=5746940 loops=1) -> Seq Scan on projects (cost=0.00..908053.18 rows=5746969 width=0) (actual time=0.041..2987.606 rows=5746940 loops=1)
...@@ -451,7 +451,7 @@ Execution time: 3428.596 ms ...@@ -451,7 +451,7 @@ Execution time: 3428.596 ms
Looking at the output we see the following Filter: Looking at the output we see the following Filter:
``` ```sql
Filter: (visibility_level = ANY ('{0,20}'::integer[])) Filter: (visibility_level = ANY ('{0,20}'::integer[]))
Rows Removed by Filter: 65677 Rows Removed by Filter: 65677
``` ```
...@@ -481,7 +481,7 @@ ORDER BY visibility_level ASC; ...@@ -481,7 +481,7 @@ ORDER BY visibility_level ASC;
For GitLab.com this produces: For GitLab.com this produces:
``` ```sql
visibility_level | amount visibility_level | amount
------------------+--------- ------------------+---------
0 | 5071325 0 | 5071325
...@@ -528,7 +528,7 @@ interacted with somehow? ...@@ -528,7 +528,7 @@ interacted with somehow?
Fortunately, GitLab has an answer for this, and it's a table called Fortunately, GitLab has an answer for this, and it's a table called
`user_interacted_projects`. This table has the following schema: `user_interacted_projects`. This table has the following schema:
``` ```sql
Table "public.user_interacted_projects" Table "public.user_interacted_projects"
Column | Type | Modifiers Column | Type | Modifiers
------------+---------+----------- ------------+---------+-----------
...@@ -564,7 +564,7 @@ What we do here is the following: ...@@ -564,7 +564,7 @@ What we do here is the following:
If we run this query we get the following plan: If we run this query we get the following plan:
``` ```sql
Aggregate (cost=871.03..871.04 rows=1 width=8) (actual time=9.763..9.763 rows=1 loops=1) Aggregate (cost=871.03..871.04 rows=1 width=8) (actual time=9.763..9.763 rows=1 loops=1)
-> Nested Loop (cost=0.86..870.52 rows=203 width=0) (actual time=1.072..9.748 rows=143 loops=1) -> Nested Loop (cost=0.86..870.52 rows=203 width=0) (actual time=1.072..9.748 rows=143 loops=1)
-> Index Scan using index_user_interacted_projects_on_user_id on user_interacted_projects (cost=0.43..160.71 rows=205 width=4) (actual time=0.939..2.508 rows=145 loops=1) -> Index Scan using index_user_interacted_projects_on_user_id on user_interacted_projects (cost=0.43..160.71 rows=205 width=4) (actual time=0.939..2.508 rows=145 loops=1)
...@@ -580,7 +580,7 @@ If we run this query we get the following plan: ...@@ -580,7 +580,7 @@ If we run this query we get the following plan:
Here it only took us just under 10 milliseconds to get the data. We can also see Here it only took us just under 10 milliseconds to get the data. We can also see
we're retrieving far fewer projects: we're retrieving far fewer projects:
``` ```sql
Index Scan using projects_pkey on projects (cost=0.43..3.45 rows=1 width=4) (actual time=0.049..0.050 rows=1 loops=145) Index Scan using projects_pkey on projects (cost=0.43..3.45 rows=1 width=4) (actual time=0.049..0.050 rows=1 loops=145)
Index Cond: (id = user_interacted_projects.project_id) Index Cond: (id = user_interacted_projects.project_id)
Filter: (visibility_level = ANY ('{0,20}'::integer[])) Filter: (visibility_level = ANY ('{0,20}'::integer[]))
...@@ -592,14 +592,14 @@ Here we see we perform 145 loops (`loops=145`), with every loop producing 1 row ...@@ -592,14 +592,14 @@ Here we see we perform 145 loops (`loops=145`), with every loop producing 1 row
If we look at the plan we also see our costs are very low: If we look at the plan we also see our costs are very low:
``` ```sql
Index Scan using projects_pkey on projects (cost=0.43..3.45 rows=1 width=4) (actual time=0.049..0.050 rows=1 loops=145) Index Scan using projects_pkey on projects (cost=0.43..3.45 rows=1 width=4) (actual time=0.049..0.050 rows=1 loops=145)
``` ```
Here our cost is only 3.45, and it only takes us 0.050 milliseconds to do so. Here our cost is only 3.45, and it only takes us 0.050 milliseconds to do so.
The next index scan is a bit more expensive: The next index scan is a bit more expensive:
``` ```sql
Index Scan using index_user_interacted_projects_on_user_id on user_interacted_projects (cost=0.43..160.71 rows=205 width=4) (actual time=0.939..2.508 rows=145 loops=1) Index Scan using index_user_interacted_projects_on_user_id on user_interacted_projects (cost=0.43..160.71 rows=205 width=4) (actual time=0.939..2.508 rows=145 loops=1)
``` ```
...@@ -609,7 +609,7 @@ Here the cost is 160.71 (`cost=0.43..160.71`), taking about 2.5 milliseconds ...@@ -609,7 +609,7 @@ Here the cost is 160.71 (`cost=0.43..160.71`), taking about 2.5 milliseconds
The most expensive part here is the "Nested Loop" that acts upon the result of The most expensive part here is the "Nested Loop" that acts upon the result of
these two index scans: these two index scans:
``` ```sql
Nested Loop (cost=0.86..870.52 rows=203 width=0) (actual time=1.072..9.748 rows=143 loops=1) Nested Loop (cost=0.86..870.52 rows=203 width=0) (actual time=1.072..9.748 rows=143 loops=1)
``` ```
...@@ -687,13 +687,13 @@ Execution time: 0.113 ms ...@@ -687,13 +687,13 @@ Execution time: 0.113 ms
`/chatops` slash command](chatops_on_gitlabcom.md). `/chatops` slash command](chatops_on_gitlabcom.md).
You can use chatops to get a query plan by running the following: You can use chatops to get a query plan by running the following:
``` ```sql
/chatops run explain SELECT COUNT(*) FROM projects WHERE visibility_level IN (0, 20) /chatops run explain SELECT COUNT(*) FROM projects WHERE visibility_level IN (0, 20)
``` ```
Visualising the plan using <https://explain.depesz.com/> is also supported: Visualising the plan using <https://explain.depesz.com/> is also supported:
``` ```sql
/chatops run explain --visual SELECT COUNT(*) FROM projects WHERE visibility_level IN (0, 20) /chatops run explain --visual SELECT COUNT(*) FROM projects WHERE visibility_level IN (0, 20)
``` ```
...@@ -701,7 +701,7 @@ Quoting the query is not necessary. ...@@ -701,7 +701,7 @@ Quoting the query is not necessary.
For more information about the available options, run: For more information about the available options, run:
``` ```sql
/chatops run explain --help /chatops run explain --help
``` ```
...@@ -714,31 +714,31 @@ For example, in order to test new index you can do the following: ...@@ -714,31 +714,31 @@ For example, in order to test new index you can do the following:
Create the index: Create the index:
``` ```sql
exec CREATE INDEX index_projects_marked_for_deletion ON projects (marked_for_deletion_at) WHERE marked_for_deletion_at IS NOT NULL exec CREATE INDEX index_projects_marked_for_deletion ON projects (marked_for_deletion_at) WHERE marked_for_deletion_at IS NOT NULL
``` ```
Analyze the table to update its statistics: Analyze the table to update its statistics:
``` ```sql
exec ANALYZE projects exec ANALYZE projects
``` ```
Get the query plan: Get the query plan:
``` ```sql
explain SELECT * FROM projects WHERE marked_for_deletion_at < CURRENT_DATE explain SELECT * FROM projects WHERE marked_for_deletion_at < CURRENT_DATE
``` ```
Once done you can rollback your changes: Once done you can rollback your changes:
``` ```sql
reset reset
``` ```
For more information about the available options, run: For more information about the available options, run:
``` ```sql
help help
``` ```
......
---
type: reference, howto
---
# Compliance Dashboard **(ULTIMATE)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/36524) in [GitLab Ultimate](https://about.gitlab.com/pricing/) 12.8.
The Compliance Dashboard gives you the ability to see a group's Merge Request activity
by providing a high-level view for all projects in the group. For example, code approved
for merging into production.
## Overview
To access the Compliance Dashboard for a group, navigate to **{shield}** **Security & Compliance > Compliance** on the group's menu.
![Compliance Dashboard](img/compliance_dashboard_v12_8.png)
## Use cases
This feature is for people who care about the compliance status of projects within their group.
You can use the dashboard to:
- Get an overview of the latest Merge Request for each project.
- See if Merge Requests were approved and by whom.
## Permissions
- On [GitLab Ultimate](https://about.gitlab.com/pricing/) tier.
- By **Administrators** and **Group Owners**.
...@@ -4,26 +4,22 @@ type: reference, howto ...@@ -4,26 +4,22 @@ type: reference, howto
# GitLab Secure **(ULTIMATE)** # GitLab Secure **(ULTIMATE)**
Check your application for security vulnerabilities that may lead to GitLab can check your application for security vulnerabilities that may lead to unauthorized access,
unauthorized access, data leaks, and denial of services. data leaks, denial of services, and more. GitLab reports vulnerabilities in the merge request so you
can fix them before merging. The [Security Dashboard](security_dashboard/index.md) provides a
high-level view of vulnerabilities detected in your projects, pipeline, and groups. With the
information provided, you can immediately begin risk analysis and remediation.
GitLab will perform static and dynamic tests on the code of your application,
looking for known flaws and report them in the merge request so you can fix
them before merging.
Security teams can use dashboards to get a high-level view on projects and
groups, and start remediation processes when needed.
<i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
For an overview of application security with GitLab, see For an overview of application security with GitLab, see
[Security Deep Dive](https://www.youtube.com/watch?v=k4vEJnGYy84). [Security Deep Dive](https://www.youtube.com/watch?v=k4vEJnGYy84).
## Security scanning tools ## Security scanning tools
GitLab can scan and report any vulnerabilities found in your project. GitLab uses the following tools to scan and report known vulnerabilities found in your project.
| Secure scanning tool | Description | | Secure scanning tool | Description |
|:-----------------------------------------------------------------------------|:-----------------------------------------------------------------------| |:-----------------------------------------------------------------------------|:-----------------------------------------------------------------------|
| [Compliance Dashboard](compliance_dashboard/index.md) **(ULTIMATE)** | View the most recent Merge Request activity in a group. |
| [Container Scanning](container_scanning/index.md) **(ULTIMATE)** | Scan Docker containers for known vulnerabilities. | | [Container Scanning](container_scanning/index.md) **(ULTIMATE)** | Scan Docker containers for known vulnerabilities. |
| [Dependency List](dependency_list/index.md) **(ULTIMATE)** | View your project's dependencies and their known vulnerabilities. | | [Dependency List](dependency_list/index.md) **(ULTIMATE)** | View your project's dependencies and their known vulnerabilities. |
| [Dependency Scanning](dependency_scanning/index.md) **(ULTIMATE)** | Analyze your dependencies for known vulnerabilities. | | [Dependency Scanning](dependency_scanning/index.md) **(ULTIMATE)** | Analyze your dependencies for known vulnerabilities. |
...@@ -34,26 +30,22 @@ GitLab can scan and report any vulnerabilities found in your project. ...@@ -34,26 +30,22 @@ GitLab can scan and report any vulnerabilities found in your project.
## Maintenance and update of the vulnerabilities database ## Maintenance and update of the vulnerabilities database
The various scanning tools and the vulnerabilities database are updated regularly. The scanning tools and vulnerabilities database are updated regularly.
| Secure scanning tool | Vulnerabilities database updates | | Secure scanning tool | Vulnerabilities database updates |
|:-------------------------------------------------------------|-------------------------------------------| |:-------------------------------------------------------------|-------------------------------------------|
| [Container Scanning](container_scanning/index.md) | Uses `clair` underneath and the latest `clair-db` version is used for each job run by running the [`latest` docker image tag](https://gitlab.com/gitlab-org/gitlab/blob/438a0a56dc0882f22bdd82e700554525f552d91b/lib/gitlab/ci/templates/Security/Container-Scanning.gitlab-ci.yml#L37). The `clair-db` database [is updated daily according to the author](https://github.com/arminc/clair-local-scan#clair-server-or-local). | | [Container Scanning](container_scanning/index.md) | Uses `clair`. The latest `clair-db` version is used for each job by running the [`latest` docker image tag](https://gitlab.com/gitlab-org/gitlab/blob/438a0a56dc0882f22bdd82e700554525f552d91b/lib/gitlab/ci/templates/Security/Container-Scanning.gitlab-ci.yml#L37). The `clair-db` database [is updated daily according to the author](https://github.com/arminc/clair-local-scan#clair-server-or-local). |
| [Dependency Scanning](dependency_scanning/index.md) | Relies on `bundler-audit` (for Rubygems), `retire.js` (for NPM packages) and `gemnasium` (GitLab's own tool for all libraries). `bundler-audit` and `retire.js` both fetch their vulnerabilities data from GitHub repositories, so vulnerabilities added to `ruby-advisory-db` and `retire.js` are immediately available. The tools themselves are updated once per month if there's a new version. The [Gemnasium DB](https://gitlab.com/gitlab-org/security-products/gemnasium-db) is updated at least once a week. | | [Dependency Scanning](dependency_scanning/index.md) | Relies on `bundler-audit` (for Rubygems), `retire.js` (for NPM packages), and `gemnasium` (GitLab's own tool for all libraries). Both `bundler-audit` and `retire.js` fetch their vulnerabilities data from GitHub repositories, so vulnerabilities added to `ruby-advisory-db` and `retire.js` are immediately available. The tools themselves are updated once per month if there's a new version. The [Gemnasium DB](https://gitlab.com/gitlab-org/security-products/gemnasium-db) is updated at least once a week. |
| [Dynamic Application Security Testing (DAST)](dast/index.md) | The scanning engine is updated on a periodic basis. See the [version of the underlying tool `zaproxy`](https://gitlab.com/gitlab-org/security-products/dast/blob/master/Dockerfile#L1). The scanning rules are downloaded at the runtime of the scan. | | [Dynamic Application Security Testing (DAST)](dast/index.md) | The scanning engine is updated on a periodic basis. See the [version of the underlying tool `zaproxy`](https://gitlab.com/gitlab-org/security-products/dast/blob/master/Dockerfile#L1). The scanning rules are downloaded at scan runtime. |
| [Static Application Security Testing (SAST)](sast/index.md) | Relies exclusively on [the tools GitLab is wrapping](sast/index.md#supported-languages-and-frameworks). The underlying analyzers are updated at least once per month if a relevant update is available. The vulnerabilities database is updated by the upstream tools. | | [Static Application Security Testing (SAST)](sast/index.md) | Relies exclusively on [the tools GitLab wraps](sast/index.md#supported-languages-and-frameworks). The underlying analyzers are updated at least once per month if a relevant update is available. The vulnerabilities database is updated by the upstream tools. |
You don't have to update GitLab to benefit from the latest vulnerabilities definitions, Currently, you do not have to update GitLab to benefit from the latest vulnerabilities definitions.
but you may have to in the future. The security tools are released as Docker images. The vendored job definitions to enable them use
the `x-y-stable` image tags that get overridden each time a new release of the tools is pushed. The
The security tools are released as Docker images, and the vendored job definitions Docker images are updated to match the previous GitLab releases, so users automatically get the
to enable them are using the `x-y-stable` image tags that get overridden each time a new latest versions of the scanning tools without having to do anything. There are some known issues
release of the tools is pushed. The Docker images are updated to match the with this approach, however, and there is a
previous GitLab releases, so they automatically get the latest versions of the [plan to resolve them](https://gitlab.com/gitlab-org/gitlab/issues/9725).
scanning tools without the user having to do anything.
This workflow comes with some drawbacks and there's a
[plan to change this](https://gitlab.com/gitlab-org/gitlab/issues/9725).
## Interacting with the vulnerabilities ## Interacting with the vulnerabilities
...@@ -63,14 +55,14 @@ CAUTION: **Warning:** ...@@ -63,14 +55,14 @@ CAUTION: **Warning:**
This feature is currently [Alpha](https://about.gitlab.com/handbook/product/#alpha-beta-ga) and while you can start using it, it may receive important changes in the future. This feature is currently [Alpha](https://about.gitlab.com/handbook/product/#alpha-beta-ga) and while you can start using it, it may receive important changes in the future.
Each security vulnerability in the merge request report or the Each security vulnerability in the merge request report or the
[Security Dashboard](security_dashboard/index.md) is actionable. Clicking on an [Security Dashboard](security_dashboard/index.md) is actionable. Click an entry to view detailed
entry, a detailed information will pop up with different possible options: information with several options:
- [Dismiss vulnerability](#dismissing-a-vulnerability): Dismissing a vulnerability - [Dismiss vulnerability](#dismissing-a-vulnerability): Dismissing a vulnerability styles it in
will place a ~~strikethrough~~ styling on it. strikethrough.
- [Create issue](#creating-an-issue-for-a-vulnerability): The new issue will - [Create issue](#creating-an-issue-for-a-vulnerability): Create a new issue with the title and
have the title and description pre-populated with the information from the description prepopulated with information from the vulnerability report. By default, such issues
vulnerability report and will be created as [confidential](../project/issues/confidential_issues.md) by default. are [confidential](../project/issues/confidential_issues.md).
- [Solution](#solutions-for-vulnerabilities-auto-remediation): For some vulnerabilities, - [Solution](#solutions-for-vulnerabilities-auto-remediation): For some vulnerabilities,
a solution is provided for how to fix the vulnerability. a solution is provided for how to fix the vulnerability.
...@@ -88,8 +80,8 @@ If you wish to undo this dismissal, you can click the **Undo dismiss** button. ...@@ -88,8 +80,8 @@ If you wish to undo this dismissal, you can click the **Undo dismiss** button.
When dismissing a vulnerability, it's often helpful to provide a reason for doing so. When dismissing a vulnerability, it's often helpful to provide a reason for doing so.
If you press the comment button next to **Dismiss vulnerability** in the modal, If you press the comment button next to **Dismiss vulnerability** in the modal,
a text box will appear, allowing you to add a comment with your dismissal. a text box appears for you to add a comment with your dismissal.
Once added, you can edit it or delete it. This allows you to add and update Once added, you can edit or delete it. This allows you to add and update
context for a vulnerability as you learn more over time. context for a vulnerability as you learn more over time.
![Dismissed vulnerability comment](img/dismissed_info_v12_3.png) ![Dismissed vulnerability comment](img/dismissed_info_v12_3.png)
...@@ -97,16 +89,16 @@ context for a vulnerability as you learn more over time. ...@@ -97,16 +89,16 @@ context for a vulnerability as you learn more over time.
### Creating an issue for a vulnerability ### Creating an issue for a vulnerability
You can create an issue for a vulnerability by selecting the **Create issue** You can create an issue for a vulnerability by selecting the **Create issue**
button from within the vulnerability modal or using the action buttons to the right of button from within the vulnerability modal, or by using the action buttons to the right of
a vulnerability row when in the group security dashboard. a vulnerability row in the group security dashboard.
This will create a [confidential issue](../project/issues/confidential_issues.md) This creates a [confidential issue](../project/issues/confidential_issues.md) in the project the
on the project this vulnerability came from and pre-fill it with some useful vulnerability came from, and prepopulates it with some useful information taken from the vulnerability
information taken from the vulnerability report. Once the issue is created, you report. Once the issue is created, you are redirected to it so you can edit, assign, or comment on
will be redirected to it so you can edit, assign, or comment on it. it.
Upon returning to the group security dashboard, you'll see that Upon returning to the group security dashboard, the vulnerability now has an associated issue next
the vulnerability will now have an associated issue next to the name. to the name.
![Linked issue in the group security dashboard](img/issue.png) ![Linked issue in the group security dashboard](img/issue.png)
...@@ -126,7 +118,7 @@ automatically generates. The following scanners are supported: ...@@ -126,7 +118,7 @@ automatically generates. The following scanners are supported:
Some vulnerabilities can be fixed by applying a patch that is automatically Some vulnerabilities can be fixed by applying a patch that is automatically
generated by GitLab. To apply the fix: generated by GitLab. To apply the fix:
1. Click on the vulnerability. 1. Click the vulnerability.
1. Download and review the patch file `remediation.patch`. 1. Download and review the patch file `remediation.patch`.
1. Ensure your local project has the same commit checked out that was used to generate the patch. 1. Ensure your local project has the same commit checked out that was used to generate the patch.
1. Run `git apply remediation.patch`. 1. Run `git apply remediation.patch`.
...@@ -138,13 +130,13 @@ generated by GitLab. To apply the fix: ...@@ -138,13 +130,13 @@ generated by GitLab. To apply the fix:
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/9224) in [GitLab Ultimate](https://about.gitlab.com/pricing/) 11.9. > [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/9224) in [GitLab Ultimate](https://about.gitlab.com/pricing/) 11.9.
In certain cases, GitLab will allow you to create a merge request that will In certain cases, GitLab allows you to create a merge request that automatically remediates the
automatically remediate the vulnerability. Any vulnerability that has a vulnerability. Any vulnerability that has a
[solution](#solutions-for-vulnerabilities-auto-remediation) can have a merge [solution](#solutions-for-vulnerabilities-auto-remediation) can have a merge
request created to automatically solve the issue. request created to automatically solve the issue.
If this action is available there will be a **Create merge request** button in the vulnerability modal. If this action is available, the vulnerability modal contains a **Create merge request** button.
Clicking on this button will create a merge request to apply the solution onto the source branch. Click this button to create a merge request to apply the solution onto the source branch.
![Create merge request from vulnerability](img/create_issue_with_list_hover.png) ![Create merge request from vulnerability](img/create_issue_with_list_hover.png)
...@@ -155,30 +147,29 @@ Clicking on this button will create a merge request to apply the solution onto t ...@@ -155,30 +147,29 @@ Clicking on this button will create a merge request to apply the solution onto t
Merge Request Approvals can be configured to require approval from a member of your Merge Request Approvals can be configured to require approval from a member of your
security team when a merge request would introduce one of the following security issues: security team when a merge request would introduce one of the following security issues:
- a security vulnerability - A security vulnerability
- a software license compliance violation - A software license compliance violation
This threshold is defined as `high`, `critical`, or `unknown` This threshold is defined as `high`, `critical`, or `unknown` severity. When any vulnerabilities are
severity. When any vulnerabilities are present within a merge request, an present within a merge request, an approval is required from the `Vulnerability-Check` approver
approval will be required from the `Vulnerability-Check` approver group. group.
### Enabling Security Approvals within a project ### Enabling Security Approvals within a project
To enable Security Approvals, a [project approval rule](../project/merge_requests/merge_request_approvals.md#multiple-approval-rules-premium) To enable Security Approvals, a [project approval rule](../project/merge_requests/merge_request_approvals.md#multiple-approval-rules-premium)
must be created with the case-sensitive name `Vulnerability-Check`. This approval must be created with the case-sensitive name `Vulnerability-Check`. This approval group must be set
group must be set with an "Approvals required" count greater than zero. with the number of approvals required greater than zero.
Once this group has been added to your project, the approval rule will be enabled Once this group is added to your project, the approval rule is enabled for all merge requests.
for all Merge Requests.
Any code changes made will cause the count of approvals required to reset. Any code changes cause the approvals required to reset.
An approval will be required when a security report: An approval is required when a security report:
- Contains a new vulnerability of `high`, `critical`, or `unknown` severity. - Contains a new vulnerability of `high`, `critical`, or `unknown` severity.
- Is not generated during pipeline execution. - Is not generated during pipeline execution.
An approval will be optional when a security report: An approval is optional when a security report:
- Contains no new vulnerabilities. - Contains no new vulnerabilities.
- Contains only new vulnerabilities of `low` or `medium` severity. - Contains only new vulnerabilities of `low` or `medium` severity.
...@@ -186,22 +177,22 @@ An approval will be optional when a security report: ...@@ -186,22 +177,22 @@ An approval will be optional when a security report:
### Enabling License Approvals within a project ### Enabling License Approvals within a project
To enable License Approvals, a [project approval rule](../project/merge_requests/merge_request_approvals.md#multiple-approval-rules-premium) To enable License Approvals, a [project approval rule](../project/merge_requests/merge_request_approvals.md#multiple-approval-rules-premium)
must be created with the case-sensitive name `License-Check`. This approval must be created with the case-sensitive name `License-Check`. This approval group must be set
group must be set with an "Approvals required" count greater than zero. with the number of approvals required greater than zero.
Once this group has been added to your project, the approval rule will be enabled Once this group is added to your project, the approval rule is enabled for all Merge Requests. To
for all Merge Requests. To configure how this rule behaves, you can choose which configure how this rule behaves, you can choose which licenses to `approve` or `blacklist` in the
licenses to `approve` or `blacklist` in the [project policies for License Compliance](license_compliance/index.md#project-policies-for-license-compliance)
[project policies for License Compliance](license_compliance/index.md#project-policies-for-license-compliance) section. section.
Any code changes made will cause the count of approvals required to reset. Any code changes cause the approvals required to reset.
An approval will be required when a license report: An approval is required when a license report:
- Contains a dependency that includes a software license that is `blacklisted`. - Contains a dependency that includes a software license that is `blacklisted`.
- Is not generated during pipeline execution. - Is not generated during pipeline execution.
An approval will be optional when a license report: An approval is optional when a license report:
- Contains no software license violations. - Contains no software license violations.
- Contains only new licenses that are `approved` or unknown. - Contains only new licenses that are `approved` or unknown.
...@@ -211,7 +202,7 @@ An approval will be optional when a license report: ...@@ -211,7 +202,7 @@ An approval will be optional when a license report:
### Getting error message `sast job: stage parameter should be [some stage name here]` ### Getting error message `sast job: stage parameter should be [some stage name here]`
When including a security job template like [`SAST`](sast/index.md#configuration), When including a security job template like [`SAST`](sast/index.md#configuration),
the following error can be raised, depending on your GitLab CI/CD configuration: the following error may occur, depending on your GitLab CI/CD configuration:
```plaintext ```plaintext
Found errors in your .gitlab-ci.yml: Found errors in your .gitlab-ci.yml:
...@@ -219,8 +210,7 @@ Found errors in your .gitlab-ci.yml: ...@@ -219,8 +210,7 @@ Found errors in your .gitlab-ci.yml:
* sast job: stage parameter should be unit-tests * sast job: stage parameter should be unit-tests
``` ```
This error appears when the stage (nammed `test`) of the included job isn't declared This error appears when the included job's stage (named `test`) isn't declared in `.gitlab-ci.yml`.
in `.gitlab-ci.yml`.
To fix this issue, you can either: To fix this issue, you can either:
- Add a `test` stage in your `.gitlab-ci.yml`. - Add a `test` stage in your `.gitlab-ci.yml`.
...@@ -235,5 +225,4 @@ To fix this issue, you can either: ...@@ -235,5 +225,4 @@ To fix this issue, you can either:
``` ```
[Learn more on overriding the SAST template](sast/index.md#overriding-the-sast-template). [Learn more on overriding the SAST template](sast/index.md#overriding-the-sast-template).
All the security scanning tools define their stage, so this error can occur with All the security scanning tools define their stage, so this error can occur with all of them.
all of them.
...@@ -487,6 +487,7 @@ Supported applications: ...@@ -487,6 +487,7 @@ Supported applications:
- [Sentry](#install-sentry-using-gitlab-ci) - [Sentry](#install-sentry-using-gitlab-ci)
- [GitLab Runner](#install-gitlab-runner-using-gitlab-ci) - [GitLab Runner](#install-gitlab-runner-using-gitlab-ci)
- [Cilium](#install-cilium-using-gitlab-ci) - [Cilium](#install-cilium-using-gitlab-ci)
- [JupyterHub](#install-jupyterhub-using-gitlab-ci)
### Usage ### Usage
...@@ -749,6 +750,47 @@ agent: ...@@ -749,6 +750,47 @@ agent:
enabled: false enabled: false
``` ```
### Install JupyterHub using GitLab CI
> [Introduced](https://gitlab.com/gitlab-org/cluster-integration/cluster-applications/-/merge_requests/40) in GitLab 12.8.
Enable JupyterHub in the `.gitlab/managed-apps/config.yaml` file to install it:
```yaml
jupyterhub:
installed: true
gitlabProjectIdWhitelist: []
gitlabGroupWhitelist: []
```
`gitlabProjectIdWhitelist` restricts GitLab authentication to only members of the specified projects. `gitlabGroupWhitelist` restricts GitLab authentication to only members of the specified groups. Specifying an empty array for both will allow any user on the GitLab instance to log in.
JupyterHub is installed into the `gitlab-managed-apps` namespace of your
cluster.
In order for JupyterHub to function, you must setup an [OAuth Application](../../integration/oauth_provider.md). Using the following values:
- "Redirect URI" to `http://<JupyterHub Host>/hub/oauth_callback`
- "Scope" to `api read_repository write_repository`
In addition the following variables must be specified using [CI variables](../../ci/variables/README.md):
- `JUPYTERHUB_PROXY_SECRET_TOKEN` will set [`proxy.secretToken`](https://zero-to-jupyterhub.readthedocs.io/en/stable/reference.html#proxy-secrettoken). Generate this using `openssl rand -hex 32`.
- `JUPYTERHUB_COOKIE_SECRET` will set [`hub.cookieSecret`](https://zero-to-jupyterhub.readthedocs.io/en/stable/reference.html#hub-cookiesecret). Generate this using `openssl rand -hex 32`.
- `JUPYTERHUB_HOST` is the hostname used for the installation (e.g., `jupyter.example.gitlab.com`).
- `JUPYTERHUB_GITLAB_HOST` is the hostname of the GitLab instance used for authentication (e.g., `example.gitlab.com`).
- `JUPYTERHUB_AUTH_CRYPTO_KEY` will set [`auth.state.cryptoKey`](https://zero-to-jupyterhub.readthedocs.io/en/stable/reference.html#auth-state-cryptokey). Generate this using `openssl rand -hex 32`.
- `JUPYTERHUB_AUTH_GITLAB_CLIENT_ID` the "Application ID" for the OAuth Application.
- `JUPYTERHUB_AUTH_GITLAB_CLIENT_SECRET` the "Secret" for the OAuth Application.
By default JupyterHub will be installed using a
[default values file](https://gitlab.com/gitlab-org/cluster-integration/cluster-applications/-/blob/master/src/default-data/jupyterhub/values.yaml.gotmpl).
You can customize the installation of JupyterHub by defining
`.gitlab/managed-apps/jupyterhub/values.yaml` file in your cluster management
project. Refer to the
[chart reference](https://zero-to-jupyterhub.readthedocs.io/en/stable/reference.html)
for the available configuration options.
## Upgrading applications ## Upgrading applications
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/24789) in GitLab 11.8. > [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/24789) in GitLab 11.8.
......
...@@ -131,6 +131,9 @@ Below are the shared Runners settings. ...@@ -131,6 +131,9 @@ Below are the shared Runners settings.
The full contents of our `config.toml` are: The full contents of our `config.toml` are:
NOTE: **Note:**
Settings that are not public are shown as `X`.
**Google Cloud Platform** **Google Cloud Platform**
```toml ```toml
......
apply: apply:
stage: deploy stage: deploy
image: "registry.gitlab.com/gitlab-org/cluster-integration/cluster-applications:v0.6.0" image: "registry.gitlab.com/gitlab-org/cluster-integration/cluster-applications:v0.7.0"
environment: environment:
name: production name: production
variables: variables:
...@@ -11,6 +11,7 @@ apply: ...@@ -11,6 +11,7 @@ apply:
SENTRY_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/sentry/values.yaml SENTRY_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/sentry/values.yaml
GITLAB_RUNNER_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/gitlab-runner/values.yaml GITLAB_RUNNER_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/gitlab-runner/values.yaml
CILIUM_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/cilium/values.yaml CILIUM_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/cilium/values.yaml
JUPYTERHUB_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/jupyterhub/values.yaml
script: script:
- gitlab-managed-apps /usr/local/share/gitlab-managed-apps/helmfile.yaml - gitlab-managed-apps /usr/local/share/gitlab-managed-apps/helmfile.yaml
only: only:
......
# frozen_string_literal: true # frozen_string_literal: true
RSpec.shared_examples 'a BulkInsertSafe model' do |target_class| RSpec.shared_examples 'a BulkInsertSafe model' do |klass|
# Call `.dup` on the class passed in, as a test in this set of examples
# calls `belongs_to` on the class, thereby adding a new belongs_to
# relationship to the model that can break remaining specs in the test suite.
let(:target_class) { klass.dup }
# We consider all callbacks unsafe for bulk insertions unless we have explicitly # We consider all callbacks unsafe for bulk insertions unless we have explicitly
# whitelisted them (esp. anything related to :save, :create, :commit etc.) # whitelisted them (esp. anything related to :save, :create, :commit etc.)
let(:callback_method_blacklist) do let(:callback_method_blacklist) do
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment