Commit 90a06a20 authored by GitLab Bot's avatar GitLab Bot

Add latest changes from gitlab-org/gitlab@master

parent 84a0e65a
......@@ -6,9 +6,9 @@
/doc/ @axil @marcia @eread @mikelewis
# Frontend maintainers should see everything in `app/assets/`
app/assets/ @ClemMakesApps @fatihacet @filipa @mikegreiling @timzallmann @kushalpandya @pslaughter @wortschi @ntepluhina @iamphill
*.scss @annabeldunstone @ClemMakesApps @fatihacet @filipa @mikegreiling @timzallmann @kushalpandya @pslaughter @wortschi @ntepluhina @iamphill
/scripts/frontend/ @ClemMakesApps @fatihacet @filipa @mikegreiling @timzallmann @kushalpandya @pslaughter @wortschi @ntepluhina @iamphill
app/assets/ @gitlab-org/maintainers/frontend
*.scss @annabeldunstone @gitlab-org/maintainers/frontend
/scripts/frontend/ @gitlab-org/maintainers/frontend
# Database maintainers should review changes in `db/`
db/ @gitlab-org/maintainers/database
......
......@@ -297,3 +297,6 @@ Graphql/Descriptions:
Include:
- 'app/graphql/**/*'
- 'ee/app/graphql/**/*'
RSpec/AnyInstanceOf:
Enabled: false
......@@ -43,16 +43,7 @@ export default {
'isProjectInvalid',
'projectSelectionLabel',
]),
...mapState([
'apiHost',
'connectError',
'connectSuccessful',
'enabled',
'projects',
'selectedProject',
'settingsLoading',
'token',
]),
...mapState(['enabled', 'projects', 'selectedProject', 'settingsLoading', 'token']),
},
created() {
this.setInitialState({
......@@ -65,15 +56,7 @@ export default {
});
},
methods: {
...mapActions([
'fetchProjects',
'setInitialState',
'updateApiHost',
'updateEnabled',
'updateSelectedProject',
'updateSettings',
'updateToken',
]),
...mapActions(['setInitialState', 'updateEnabled', 'updateSelectedProject', 'updateSettings']),
handleSubmit() {
this.updateSettings();
},
......@@ -95,15 +78,7 @@ export default {
s__('ErrorTracking|Active')
}}</label>
</div>
<error-tracking-form
:api-host="apiHost"
:connect-error="connectError"
:connect-successful="connectSuccessful"
:token="token"
@handle-connect="fetchProjects"
@update-api-host="updateApiHost"
@update-token="updateToken"
/>
<error-tracking-form />
<div class="form-group">
<project-dropdown
:has-projects="hasProjects"
......
<script>
import { mapActions, mapState } from 'vuex';
import { GlButton, GlFormInput } from '@gitlab/ui';
import Icon from '~/vue_shared/components/icon.vue';
export default {
components: { GlButton, GlFormInput, Icon },
props: {
apiHost: {
type: String,
required: true,
},
connectError: {
type: Boolean,
required: true,
},
connectSuccessful: {
type: Boolean,
required: true,
},
token: {
type: String,
required: true,
},
},
computed: {
...mapState(['apiHost', 'connectError', 'connectSuccessful', 'token']),
tokenInputState() {
return this.connectError ? false : null;
},
},
methods: {
...mapActions(['fetchProjects', 'updateApiHost', 'updateToken']),
},
};
</script>
......@@ -41,7 +28,7 @@ export default {
id="error-tracking-api-host"
:value="apiHost"
placeholder="https://mysentryserver.com"
@input="$emit('update-api-host', $event)"
@input="updateApiHost"
/>
<!-- eslint-enable @gitlab/vue-i18n/no-bare-attribute-strings -->
</div>
......@@ -60,15 +47,13 @@ export default {
id="error-tracking-token"
:value="token"
:state="tokenInputState"
@input="$emit('update-token', $event)"
@input="updateToken"
/>
</div>
<div class="col-4 col-md-3 gl-pl-0">
<gl-button
class="js-error-tracking-connect prepend-left-5"
@click="$emit('handle-connect')"
>{{ __('Connect') }}</gl-button
>
<gl-button class="js-error-tracking-connect prepend-left-5" @click="fetchProjects">{{
__('Connect')
}}</gl-button>
<icon
v-show="connectSuccessful"
class="js-error-tracking-connect-success prepend-left-5 text-success align-middle"
......
......@@ -387,6 +387,10 @@ span.idiff {
float: none;
}
.file-actions .ide-edit-button {
z-index: 2;
}
@include media-breakpoint-down(xs) {
display: block;
......
......@@ -5,6 +5,11 @@ class HealthController < ActionController::Base
include RequiresWhitelistedMonitoringClient
CHECKS = [
Gitlab::HealthChecks::MasterCheck
].freeze
ALL_CHECKS = [
*CHECKS,
Gitlab::HealthChecks::DbCheck,
Gitlab::HealthChecks::Redis::RedisCheck,
Gitlab::HealthChecks::Redis::CacheCheck,
......@@ -14,8 +19,9 @@ class HealthController < ActionController::Base
].freeze
def readiness
# readiness check is a collection with all above application-level checks
render_checks(*CHECKS)
# readiness check is a collection of application-level checks
# and optionally all service checks
render_checks(params[:all] ? ALL_CHECKS : CHECKS)
end
def liveness
......@@ -25,7 +31,7 @@ class HealthController < ActionController::Base
private
def render_checks(*checks)
def render_checks(checks = [])
result = Gitlab::HealthChecks::Probes::Collection
.new(*checks)
.execute
......
......@@ -364,7 +364,7 @@ class Projects::MergeRequestsController < Projects::MergeRequests::ApplicationCo
when :error
render json: { status_reason: report_comparison[:status_reason] }, status: :bad_request
else
render json: { status_reason: 'Unknown error' }, status: :internal_server_error
raise "Failed to build comparison response as comparison yielded unknown status '#{report_comparison[:status]}'"
end
end
......
......@@ -47,7 +47,7 @@ module BlobHelper
def edit_blob_button(project = @project, ref = @ref, path = @path, options = {})
return unless blob = readable_blob(options, path, project, ref)
common_classes = "btn js-edit-blob #{options[:extra_class]}"
common_classes = "btn btn-primary js-edit-blob #{options[:extra_class]}"
edit_button_tag(blob,
common_classes,
......@@ -62,7 +62,7 @@ module BlobHelper
return unless blob = readable_blob(options, path, project, ref)
edit_button_tag(blob,
'btn btn-default',
'btn btn-inverted btn-primary ide-edit-button',
_('Web IDE'),
ide_edit_path(project, ref, path, options),
project,
......@@ -108,7 +108,7 @@ module BlobHelper
path,
label: _("Delete"),
action: "delete",
btn_class: "remove",
btn_class: "default",
modal_type: "remove"
)
end
......
......@@ -44,6 +44,7 @@ class GroupPolicy < BasePolicy
rule { public_group }.policy do
enable :read_group
enable :read_package
end
rule { logged_in_viewable }.enable :read_group
......@@ -70,7 +71,10 @@ class GroupPolicy < BasePolicy
rule { has_access }.enable :read_namespace
rule { developer }.enable :admin_milestone
rule { developer }.policy do
enable :admin_milestone
enable :read_package
end
rule { reporter }.policy do
enable :read_container_image
......
---
title: Change blob edit view button styling
merge_request: 19566
author:
type: other
......@@ -1032,12 +1032,6 @@ production: &base
# enabled: true
# address: localhost
# port: 8083
# # blackout_seconds:
# # defines an interval to block healthcheck,
# # but continue accepting application requests
# # this allows Load Balancer to notice service
# # being shutdown and not interrupt any of the clients
# blackout_seconds: 10
## Prometheus settings
# Do not modify these settings here. They should be modified in /etc/gitlab/gitlab.rb
......@@ -1049,6 +1043,14 @@ production: &base
# enable: true
# listen_address: 'localhost:9090'
shutdown:
# # blackout_seconds:
# # defines an interval to block healthcheck,
# # but continue accepting application requests
# # this allows Load Balancer to notice service
# # being shutdown and not interrupt any of the clients
# blackout_seconds: 10
#
# 5. Extra customization
# ==========================
......
......@@ -676,7 +676,12 @@ Settings.monitoring['web_exporter'] ||= Settingslogic.new({})
Settings.monitoring.web_exporter['enabled'] ||= false
Settings.monitoring.web_exporter['address'] ||= 'localhost'
Settings.monitoring.web_exporter['port'] ||= 8083
Settings.monitoring.web_exporter['blackout_seconds'] ||= 10
#
# Shutdown settings
#
Settings['shutdown'] ||= Settingslogic.new({})
Settings.shutdown['blackout_seconds'] ||= 10
#
# Testing settings
......
......@@ -70,6 +70,13 @@ if defined?(::Unicorn) || defined?(::Puma)
Gitlab::Metrics::Exporter::WebExporter.instance.start
end
# DEPRECATED: TO BE REMOVED
# This is needed to implement blackout period of `web_exporter`
# https://gitlab.com/gitlab-org/gitlab/issues/35343#note_238479057
Gitlab::Cluster::LifecycleEvents.on_before_blackout_period do
Gitlab::Metrics::Exporter::WebExporter.instance.mark_as_not_running!
end
Gitlab::Cluster::LifecycleEvents.on_before_graceful_shutdown do
# We need to ensure that before we re-exec or shutdown server
# we do stop the exporter
......
......@@ -8,3 +8,15 @@ HealthCheck.setup do |config|
end
end
end
Gitlab::Cluster::LifecycleEvents.on_before_fork do
Gitlab::HealthChecks::MasterCheck.register_master
end
Gitlab::Cluster::LifecycleEvents.on_before_blackout_period do
Gitlab::HealthChecks::MasterCheck.finish_master
end
Gitlab::Cluster::LifecycleEvents.on_worker_start do
Gitlab::HealthChecks::MasterCheck.register_worker
end
......@@ -39,45 +39,62 @@ module Gitlab::Throttle
end
class Rack::Attack
# Order conditions by how expensive they are:
# 1. The most expensive is the `req.unauthenticated?` and
# `req.authenticated_user_id` as it performs an expensive
# DB/Redis query to validate the request
# 2. Slightly less expensive is the need to query DB/Redis
# to unmarshal settings (`Gitlab::Throttle.settings`)
#
# We deliberately skip `/-/health|liveness|readiness`
# from Rack Attack as they need to always be accessible
# by Load Balancer and additional measure is implemented
# (token and whitelisting) to prevent abuse.
throttle('throttle_unauthenticated', Gitlab::Throttle.unauthenticated_options) do |req|
Gitlab::Throttle.settings.throttle_unauthenticated_enabled &&
req.unauthenticated? &&
!req.should_be_skipped? &&
if !req.should_be_skipped? &&
Gitlab::Throttle.settings.throttle_unauthenticated_enabled &&
req.unauthenticated?
req.ip
end
end
throttle('throttle_authenticated_api', Gitlab::Throttle.authenticated_api_options) do |req|
Gitlab::Throttle.settings.throttle_authenticated_api_enabled &&
req.api_request? &&
if req.api_request? &&
Gitlab::Throttle.settings.throttle_authenticated_api_enabled
req.authenticated_user_id([:api])
end
end
throttle('throttle_authenticated_web', Gitlab::Throttle.authenticated_web_options) do |req|
Gitlab::Throttle.settings.throttle_authenticated_web_enabled &&
req.web_request? &&
if req.web_request? &&
Gitlab::Throttle.settings.throttle_authenticated_web_enabled
req.authenticated_user_id([:api, :rss, :ics])
end
end
throttle('throttle_unauthenticated_protected_paths', Gitlab::Throttle.protected_paths_options) do |req|
Gitlab::Throttle.protected_paths_enabled? &&
req.unauthenticated? &&
!req.should_be_skipped? &&
req.protected_path? &&
if !req.should_be_skipped? &&
req.protected_path? &&
Gitlab::Throttle.protected_paths_enabled? &&
req.unauthenticated?
req.ip
end
end
throttle('throttle_authenticated_protected_paths_api', Gitlab::Throttle.protected_paths_options) do |req|
Gitlab::Throttle.protected_paths_enabled? &&
req.api_request? &&
req.protected_path? &&
if req.api_request? &&
Gitlab::Throttle.protected_paths_enabled? &&
req.protected_path?
req.authenticated_user_id([:api])
end
end
throttle('throttle_authenticated_protected_paths_web', Gitlab::Throttle.protected_paths_options) do |req|
Gitlab::Throttle.protected_paths_enabled? &&
req.web_request? &&
req.protected_path? &&
if req.web_request? &&
Gitlab::Throttle.protected_paths_enabled? &&
req.protected_path?
req.authenticated_user_id([:api, :rss, :ics])
end
end
class Request
......@@ -97,12 +114,16 @@ class Rack::Attack
path =~ %r{^/api/v\d+/internal/}
end
def health_check_request?
path =~ %r{^/-/(health|liveness|readiness)}
end
def should_be_skipped?
api_internal_request?
api_internal_request? || health_check_request?
end
def web_request?
!api_request?
!api_request? && !health_check_request?
end
def protected_path?
......
......@@ -2,7 +2,9 @@
This is the API docs of [GitLab Packages](../administration/packages/index.md).
## List project packages
## List packages
### Within a project
> [Introduced](https://gitlab.com/gitlab-org/gitlab/merge_requests/9259) in GitLab 11.8.
......@@ -42,6 +44,47 @@ Example response:
By default, the `GET` request will return 20 results, since the API is [paginated](README.md#pagination).
### Within a group
> [Introduced](https://gitlab.com/gitlab-org/gitlab/merge_requests/18871) in GitLab 12.5.
Get a list of project packages at the group level.
When accessed without authentication, only packages of public projects are returned.
```
GET /groups/:id/packages
```
| Attribute | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `id` | integer/string | yes | ID or [URL-encoded path of the group](README.md#namespaced-path-encoding). |
| `exclude_subgroups` | boolean | false | If the param is included as true, packages from projects from subgroups are not listed. Default is `false`. |
```bash
curl --header "PRIVATE-TOKEN: <your_access_token>" https://gitlab.example.com/api/v4/group/:id/packages?exclude_subgroups=true
```
Example response:
```json
[
{
"id": 1,
"name": "com/mycompany/my-app",
"version": "1.0-SNAPSHOT",
"package_type": "maven"
},
{
"id": 2,
"name": "@foo/bar",
"version": "1.0.3",
"package_type": "npm"
}
]
```
By default, the `GET` request will return 20 results, since the API is [paginated](README.md#pagination).
## Get a project package
> [Introduced](https://gitlab.com/gitlab-org/gitlab/merge_requests/9667) in GitLab 11.9.
......
......@@ -581,6 +581,18 @@ Please check this [rules][eslint-plugin-vue-rules] for more documentation.
<component />
```
#### Component usage within templates
1. Prefer a component's kebab-cased name over other styles when using it in a template
```javascript
// bad
<MyComponent />
// good
<my-component />
```
#### Ordering
1. Tag order in `.vue` file
......
......@@ -2,6 +2,8 @@
To run a specific test with a feature flag enabled you can use the `QA::Runtime::Feature` class to enabled and disable feature flags ([via the API](../../../api/features.md)).
Note that administrator authorization is required to change feature flags. `QA::Runtime::Feature` will automatically authenticate as an administrator as long as you provide an appropriate access token via `GITLAB_QA_ADMIN_ACCESS_TOKEN` (recommended), or provide `GITLAB_ADMIN_USERNAME` and `GITLAB_ADMIN_PASSWORD`.
```ruby
context "with feature flag enabled" do
before do
......
......@@ -501,6 +501,39 @@ it('waits for an event', () => {
});
```
#### Ensuring that tests are isolated
Tests are normally architected in a pattern which requires a recurring setup and breakdown of the component under test. This is done by making use of the `beforeEach` and `afterEach` hooks.
Example
```javascript
let wrapper;
beforeEach(() => {
wrapper = mount(Component);
});
afterEach(() => {
wrapper.destroy();
});
```
When looking at this initially you'd suspect that the component is setup before each test and then broken down afterwards, providing isolation between tests.
This is however not entirely true as the `destroy` method does not remove everything which has been mutated on the `wrapper` object. For functional components, destroy only removes the rendered DOM elements from the document.
In order to ensure that a clean wrapper object and DOM are being used in each test, the breakdown of the component should rather be performed as follows:
```javascript
afterEach(() => {
wrapper.destroy();
wrapper = null;
});
```
See also the [Vue Test Utils documention on `destroy`](https://vue-test-utils.vuejs.org/api/wrapper/#destroy).
#### Migrating flaky Karma tests to Jest
Some of our Karma tests are flaky because they access the properties of a shared scope.
......
......@@ -39,7 +39,11 @@ GET http://localhost/-/liveness
## Health
Checks whether the application server is running. It does not verify the database or other services are running.
Checks whether the application server is running.
It does not verify the database or other services
are running. This endpoint circumvents Rails Controllers
and is implemented as additional middleware `BasicHealthCheck`
very early into the request processing lifecycle.
```text
GET /-/health
......@@ -59,10 +63,17 @@ GitLab OK
## Readiness
The readiness probe checks whether the GitLab instance is ready to use. It checks the dependent services (Database, Redis, Gitaly etc.) and gives a status for each.
The readiness probe checks whether the GitLab instance is ready
to accept traffic via Rails Controllers. The check by default
does validate only instance-checks.
If the `all=1` parameter is specified, the check will also validate
the dependent services (Database, Redis, Gitaly etc.)
and gives a status for each.
```text
GET /-/readiness
GET /-/readiness?all=1
```
Example request:
......@@ -75,37 +86,30 @@ Example response:
```json
{
"db_check":{
"master_check":[{
"status":"failed",
"message": "unexpected Db check result: 0"
},
"redis_check":{
"status":"ok"
},
"cache_check":{
"status":"ok"
},
"queues_check":{
"status":"ok"
},
"shared_state_check":{
"status":"ok"
},
"gitaly_check":{
"status":"ok",
"labels":{
"shard":"default"
}
}
}
"message": "unexpected Master check result: false"
}],
...
}
```
On failure, the endpoint will return a `503` HTTP status code.
This check does hit the database and Redis if authenticated via `token`.
This check is being exempt from Rack Attack.
## Liveness
DANGER: **Warning:**
In Gitlab [12.4](https://about.gitlab.com/upcoming-releases/) the response body of the Liveness check will change to match the example below.
In Gitlab [12.4](https://about.gitlab.com/upcoming-releases/)
the response body of the Liveness check was changed
to match the example below.
The liveness probe checks whether the application server is alive. Unlike the [`health`](#health) check, this check hits the database.
Checks whether the application server is running.
This probe is used to know if Rails Controllers
are not deadlocked due to a multi-threading.
```text
GET /-/liveness
......@@ -127,7 +131,9 @@ On success, the endpoint will return a `200` HTTP status code, and a response li
}
```
On failure, the endpoint will return a `500` HTTP status code.
On failure, the endpoint will return a `503` HTTP status code.
This check is being exempt from Rack Attack.
## Access token (Deprecated)
......
......@@ -111,6 +111,9 @@ This configuration doesn't benefit from the integrated detection step.
SAST has to fetch and spawn each Docker image to establish whether the
custom analyzer can scan the source code.
CAUTION: **Caution:**
Custom analyzers are not spawned automatically when [Docker In Docker](index.md#disabling-docker-in-docker-for-sast) is disabled.
## Analyzers Data
| Property \ Tool | Apex | Bandit | Brakeman | ESLint security | Find Sec Bugs | Flawfinder | Go AST Scanner | NodeJsScan | Php CS Security Audit | Security code Scan (.NET) | TSLint Security | Sobelow |
......
......@@ -192,14 +192,15 @@ SAST can be [configured](#customizing-the-sast-settings) using environment varia
The following are Docker image-related variables.
| Environment variable | Description |
|-------------------------------|--------------------------------------------------------------------------------|
| `SAST_ANALYZER_IMAGES` | Comma separated list of custom images. Default images are still enabled. Read more about [customizing analyzers](analyzers.md). |
| `SAST_ANALYZER_IMAGE_PREFIX` | Override the name of the Docker registry providing the default images (proxy). Read more about [customizing analyzers](analyzers.md). |
| `SAST_ANALYZER_IMAGE_TAG` | Override the Docker tag of the default images. Read more about [customizing analyzers](analyzers.md). |
| `SAST_DEFAULT_ANALYZERS` | Override the names of default images. Read more about [customizing analyzers](analyzers.md). |
| `SAST_DISABLE_DIND` | Disable Docker in Docker and run analyzers [individually](#disabling-docker-in-docker-for-sast). |
| `SAST_PULL_ANALYZER_IMAGES` | Pull the images from the Docker registry (set to 0 to disable). Read more about [customizing analyzers](analyzers.md). |
| Environment variable | Description |
|------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `SAST_ANALYZER_IMAGES` | Comma separated list of custom images. Default images are still enabled. Read more about [customizing analyzers](analyzers.md). Not available when [Docker in Docker is disabled](#disabling-docker-in-docker-for-sast). |
| `SAST_ANALYZER_IMAGE_PREFIX` | Override the name of the Docker registry providing the default images (proxy). Read more about [customizing analyzers](analyzers.md). |
| `SAST_ANALYZER_IMAGE_TAG` | Override the Docker tag of the default images. Read more about [customizing analyzers](analyzers.md). Not available when [Docker in Docker is disabled](#disabling-docker-in-docker-for-sast). |
| `SAST_MAJOR_VERSION` | Override the Docker tag of the default images. Only available when [Docker in Docker is disabled](#disabling-docker-in-docker-for-sast). |
| `SAST_DEFAULT_ANALYZERS` | Override the names of default images. Read more about [customizing analyzers](analyzers.md). |
| `SAST_DISABLE_DIND` | Disable Docker in Docker and run analyzers [individually](#disabling-docker-in-docker-for-sast). |
| `SAST_PULL_ANALYZER_IMAGES` | Pull the images from the Docker registry (set to 0 to disable). Read more about [customizing analyzers](analyzers.md). Not available when [Docker in Docker is disabled](#disabling-docker-in-docker-for-sast). |
#### Vulnerability filters
......@@ -224,6 +225,9 @@ The following variables configure timeouts.
| `SAST_PULL_ANALYZER_IMAGE_TIMEOUT` | 5m | Time limit when pulling the image of an analyzer. Timeouts are parsed using Go's [`ParseDuration`](https://golang.org/pkg/time/#ParseDuration). Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". For example, "300ms", "1.5h" or "2h45m". |
| `SAST_RUN_ANALYZER_TIMEOUT` | 20m | Time limit when running an analyzer. Timeouts are parsed using Go's [`ParseDuration`](https://golang.org/pkg/time/#ParseDuration). Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". For example, "300ms", "1.5h" or "2h45m".|
NOTE: **Note:**
Timeout variables are not applicable for setups with [disabled Docker In Docker](index.md#disabling-docker-in-docker-for-sast).
#### Analyzer settings
Some analyzers can be customized with environment variables.
......
......@@ -10,38 +10,39 @@ module Gitlab
#
# We have the following lifecycle events.
#
# - on_master_start:
# - on_before_fork (on master process):
#
# Unicorn/Puma Cluster: This will be called exactly once,
# on startup, before the workers are forked. This is
# called in the PARENT/MASTER process.
#
# Sidekiq/Puma Single: This is called immediately.
# Sidekiq/Puma Single: This is not called.
#
# - on_before_fork:
# - on_master_start (on master process):
#
# Unicorn/Puma Cluster: This will be called exactly once,
# on startup, before the workers are forked. This is
# called in the PARENT/MASTER process.
#
# Sidekiq/Puma Single: This is not called.
# Sidekiq/Puma Single: This is called immediately.
#
# - on_worker_start:
# - on_before_blackout_period (on master process):
#
# Unicorn/Puma Cluster: This is called in the worker process
# exactly once before processing requests.
# Unicorn/Puma Cluster: This will be called before a blackout
# period when performing graceful shutdown of master.
# This is called on `master` process.
#
# Sidekiq/Puma Single: This is called immediately.
# Sidekiq/Puma Single: This is not called.
#
# - on_before_graceful_shutdown:
# - on_before_graceful_shutdown (on master process):
#
# Unicorn/Puma Cluster: This will be called before a graceful
# shutdown of workers starts happening.
# shutdown of workers starts happening, but after blackout period.
# This is called on `master` process.
#
# Sidekiq/Puma Single: This is not called.
#
# - on_before_master_restart:
# - on_before_master_restart (on master process):
#
# Unicorn: This will be called before a new master is spun up.
# This is called on forked master before `execve` to become
......@@ -53,6 +54,13 @@ module Gitlab
#
# Sidekiq/Puma Single: This is not called.
#
# - on_worker_start (on worker process):
#
# Unicorn/Puma Cluster: This is called in the worker process
# exactly once before processing requests.
#
# Sidekiq/Puma Single: This is called immediately.
#
# Blocks will be executed in the order in which they are registered.
#
class LifecycleEvents
......@@ -74,6 +82,12 @@ module Gitlab
(@before_fork_hooks ||= []) << block
end
# Read the config/initializers/cluster_events_before_phased_restart.rb
def on_before_blackout_period(&block)
# Defer block execution
(@master_blackout_period ||= []) << block
end
# Read the config/initializers/cluster_events_before_phased_restart.rb
def on_before_graceful_shutdown(&block)
# Defer block execution
......@@ -97,27 +111,24 @@ module Gitlab
# Lifecycle integration methods (called from unicorn.rb, puma.rb, etc.)
#
def do_worker_start
@worker_start_hooks&.each do |block|
block.call
end
call(@worker_start_hooks)
end
def do_before_fork
@before_fork_hooks&.each do |block|
block.call
end
call(@before_fork_hooks)
end
def do_before_graceful_shutdown
@master_graceful_shutdown&.each do |block|
block.call
end
call(@master_blackout_period)
blackout_seconds = ::Settings.shutdown.blackout_seconds.to_i
sleep(blackout_seconds) if blackout_seconds > 0
call(@master_graceful_shutdown)
end
def do_before_master_restart
@master_restart_hooks&.each do |block|
block.call
end
call(@master_restart_hooks)
end
# DEPRECATED
......@@ -132,6 +143,10 @@ module Gitlab
private
def call(hooks)
hooks&.each(&:call)
end
def in_clustered_environment?
# Sidekiq doesn't fork
return false if Sidekiq.server?
......
# frozen_string_literal: true
module Gitlab
module HealthChecks
# This check is registered on master,
# and validated by worker
class MasterCheck
extend SimpleAbstractCheck
class << self
def register_master
# when we fork, we pass the read pipe to child
# child can then react on whether the other end
# of pipe is still available
@pipe_read, @pipe_write = IO.pipe
end
def finish_master
close_read
close_write
end
def register_worker
# fork needs to close the pipe
close_write
end
private
def close_read
@pipe_read&.close
@pipe_read = nil
end
def close_write
@pipe_write&.close
@pipe_write = nil
end
def metric_prefix
'master_check'
end
def successful?(result)
result
end
def check
# the lack of pipe is a legitimate failure of check
return false unless @pipe_read
@pipe_read.read_nonblock(1)
true
rescue IO::EAGAINWaitReadable
# if it is blocked, it means that the pipe is still open
# and there's no data waiting on it
true
rescue EOFError
# the pipe is closed
false
end
end
end
end
end
......@@ -20,6 +20,10 @@ module Gitlab
def initialize
super
# DEPRECATED:
# these `readiness_checks` are deprecated
# as presenting no value in a way how we run
# application: https://gitlab.com/gitlab-org/gitlab/issues/35343
self.readiness_checks = [
WebExporter::ExporterCheck.new(self),
Gitlab::HealthChecks::PumaCheck,
......@@ -35,6 +39,10 @@ module Gitlab
File.join(Rails.root, 'log', 'web_exporter.log')
end
def mark_as_not_running!
@running = false
end
private
def start_working
......@@ -43,24 +51,9 @@ module Gitlab
end
def stop_working
@running = false
wait_in_blackout_period if server && thread.alive?
mark_as_not_running!
super
end
def wait_in_blackout_period
return unless blackout_seconds > 0
@server.logger.info(
message: 'starting blackout...',
duration_s: blackout_seconds)
sleep(blackout_seconds)
end
def blackout_seconds
settings['blackout_seconds'].to_i
end
end
end
end
......
# frozen_string_literal: true
module RuboCop
module Cop
module RSpec
# This cop checks for `allow_any_instance_of` or `expect_any_instance_of`
# usage in specs.
#
# @example
#
# # bad
# allow_any_instance_of(User).to receive(:invalidate_issue_cache_counts)
#
# # bad
# expect_any_instance_of(User).to receive(:invalidate_issue_cache_counts)
#
# # good
# allow_next_instance_of(User) do |instance|
# allow(instance).to receive(:invalidate_issue_cache_counts)
# end
#
# # good
# expect_next_instance_of(User) do |instance|
# expect(instance).to receive(:invalidate_issue_cache_counts)
# end
#
class AnyInstanceOf < RuboCop::Cop::Cop
MESSAGE_EXPECT = 'Do not use `expect_any_instance_of` method, use `expect_next_instance_of` instead.'
MESSAGE_ALLOW = 'Do not use `allow_any_instance_of` method, use `allow_next_instance_of` instead.'
def_node_search :expect_any_instance_of?, <<~PATTERN
(send (send nil? :expect_any_instance_of ...) ...)
PATTERN
def_node_search :allow_any_instance_of?, <<~PATTERN
(send (send nil? :allow_any_instance_of ...) ...)
PATTERN
def on_send(node)
if expect_any_instance_of?(node)
add_offense(node, location: :expression, message: MESSAGE_EXPECT)
elsif allow_any_instance_of?(node)
add_offense(node, location: :expression, message: MESSAGE_ALLOW)
end
end
def autocorrect(node)
replacement =
if expect_any_instance_of?(node)
replacement_any_instance_of(node, 'expect')
elsif allow_any_instance_of?(node)
replacement_any_instance_of(node, 'allow')
end
lambda do |corrector|
corrector.replace(node.loc.expression, replacement)
end
end
private
def replacement_any_instance_of(node, rspec_prefix)
method_call =
node.receiver.source.sub(
"#{rspec_prefix}_any_instance_of",
"#{rspec_prefix}_next_instance_of")
block = <<~RUBY.chomp
do |instance|
#{rspec_prefix}(instance).#{node.method_name} #{node.children.last.source}
end
RUBY
"#{method_call} #{block}"
end
end
end
end
end
......@@ -32,6 +32,7 @@ require_relative 'cop/migration/timestamps'
require_relative 'cop/migration/update_column_in_batches'
require_relative 'cop/migration/update_large_table'
require_relative 'cop/project_path_helper'
require_relative 'cop/rspec/any_instance_of'
require_relative 'cop/rspec/be_success_matcher'
require_relative 'cop/rspec/env_assignment'
require_relative 'cop/rspec/factories_in_migration_specs'
......
......@@ -35,14 +35,17 @@ gitlab:
gitlab-shell:
resources:
requests:
cpu: 230m
cpu: 500m
memory: 25M
limits:
cpu: 345m
cpu: 750m
memory: 37.5M
maxReplicas: 3
hpa:
targetAverageValue: 130m
deployment:
livenessProbe:
timeoutSeconds: 5
sidekiq:
resources:
requests:
......
......@@ -49,7 +49,9 @@ describe AbuseReportsController do
end
it 'calls notify' do
expect_any_instance_of(AbuseReport).to receive(:notify)
expect_next_instance_of(AbuseReport) do |instance|
expect(instance).to receive(:notify)
end
post :create, params: { abuse_report: attrs }
end
......
......@@ -13,8 +13,9 @@ describe GoogleApi::AuthorizationsController do
before do
sign_in(user)
allow_any_instance_of(GoogleApi::CloudPlatform::Client)
.to receive(:get_token).and_return([token, expires_at])
allow_next_instance_of(GoogleApi::CloudPlatform::Client) do |instance|
allow(instance).to receive(:get_token).and_return([token, expires_at])
end
end
shared_examples_for 'access denied' do
......
# frozen_string_literal: true
require 'spec_helper'
describe HealthController do
include StubENV
let(:token) { Gitlab::CurrentSettings.health_check_access_token }
let(:whitelisted_ip) { '127.0.0.1' }
let(:not_whitelisted_ip) { '127.0.0.2' }
before do
allow(Settings.monitoring).to receive(:ip_whitelist).and_return([whitelisted_ip])
stub_storage_settings({}) # Hide the broken storage
stub_env('IN_MEMORY_APPLICATION_SETTINGS', 'false')
end
describe '#readiness' do
shared_context 'endpoint responding with readiness data' do
let(:request_params) { {} }
subject { get :readiness, params: request_params }
it 'responds with readiness checks data' do
subject
expect(json_response['db_check']).to contain_exactly({ 'status' => 'ok' })
expect(json_response['cache_check']).to contain_exactly({ 'status' => 'ok' })
expect(json_response['queues_check']).to contain_exactly({ 'status' => 'ok' })
expect(json_response['shared_state_check']).to contain_exactly({ 'status' => 'ok' })
expect(json_response['gitaly_check']).to contain_exactly(
{ 'status' => 'ok', 'labels' => { 'shard' => 'default' } })
end
it 'responds with readiness checks data when a failure happens' do
allow(Gitlab::HealthChecks::Redis::RedisCheck).to receive(:readiness).and_return(
Gitlab::HealthChecks::Result.new('redis_check', false, "check error"))
subject
expect(json_response['cache_check']).to contain_exactly({ 'status' => 'ok' })
expect(json_response['redis_check']).to contain_exactly(
{ 'status' => 'failed', 'message' => 'check error' })
expect(response.status).to eq(503)
expect(response.headers['X-GitLab-Custom-Error']).to eq(1)
end
end
context 'accessed from whitelisted ip' do
before do
allow(Gitlab::RequestContext).to receive(:client_ip).and_return(whitelisted_ip)
end
it_behaves_like 'endpoint responding with readiness data'
end
context 'accessed from not whitelisted ip' do
before do
allow(Gitlab::RequestContext).to receive(:client_ip).and_return(not_whitelisted_ip)
end
it 'responds with resource not found' do
get :readiness
expect(response.status).to eq(404)
end
context 'accessed with valid token' do
context 'token passed in request header' do
before do
request.headers['TOKEN'] = token
end
it_behaves_like 'endpoint responding with readiness data'
end
end
context 'token passed as URL param' do
it_behaves_like 'endpoint responding with readiness data' do
let(:request_params) { { token: token } }
end
end
end
end
describe '#liveness' do
shared_context 'endpoint responding with liveness data' do
subject { get :liveness }
it 'responds with liveness checks data' do
subject
expect(json_response).to eq('status' => 'ok')
end
end
context 'accessed from whitelisted ip' do
before do
allow(Gitlab::RequestContext).to receive(:client_ip).and_return(whitelisted_ip)
end
it_behaves_like 'endpoint responding with liveness data'
end
context 'accessed from not whitelisted ip' do
before do
allow(Gitlab::RequestContext).to receive(:client_ip).and_return(not_whitelisted_ip)
end
it 'responds with resource not found' do
get :liveness
expect(response.status).to eq(404)
end
context 'accessed with valid token' do
context 'token passed in request header' do
before do
request.headers['TOKEN'] = token
end
it_behaves_like 'endpoint responding with liveness data'
end
context 'token passed as URL param' do
it_behaves_like 'endpoint responding with liveness data' do
subject { get :liveness, params: { token: token } }
end
end
end
end
end
end
......@@ -23,7 +23,9 @@ describe MetricsController do
allow(Prometheus::Client.configuration).to receive(:multiprocess_files_dir).and_return(metrics_multiproc_dir)
allow(Gitlab::Metrics).to receive(:prometheus_metrics_enabled?).and_return(true)
allow(Settings.monitoring).to receive(:ip_whitelist).and_return([whitelisted_ip, whitelisted_ip_range])
allow_any_instance_of(MetricsService).to receive(:metrics_text).and_return("prometheus_counter 1")
allow_next_instance_of(MetricsService) do |instance|
allow(instance).to receive(:metrics_text).and_return("prometheus_counter 1")
end
end
describe '#index' do
......
......@@ -889,23 +889,6 @@ describe Projects::MergeRequestsController do
end
end
context 'when something went wrong on our system' do
let(:report) { {} }
it 'does not send polling interval' do
expect(Gitlab::PollingInterval).not_to receive(:set_header)
subject
end
it 'returns 500 HTTP status' do
subject
expect(response).to have_gitlab_http_status(:internal_server_error)
expect(json_response).to eq({ 'status_reason' => 'Unknown error' })
end
end
context 'when feature flag :ci_expose_arbitrary_artifacts_in_mr is disabled' do
let(:job_options) do
{
......@@ -1063,23 +1046,6 @@ describe Projects::MergeRequestsController do
expect(json_response).to eq({ 'status_reason' => 'Failed to parse test reports' })
end
end
context 'when something went wrong on our system' do
let(:comparison_status) { {} }
it 'does not send polling interval' do
expect(Gitlab::PollingInterval).not_to receive(:set_header)
subject
end
it 'returns 500 HTTP status' do
subject
expect(response).to have_gitlab_http_status(:internal_server_error)
expect(json_response).to eq({ 'status_reason' => 'Unknown error' })
end
end
end
describe 'POST remove_wip' do
......
......@@ -251,7 +251,9 @@ describe SnippetsController do
context 'when the snippet is spam' do
before do
allow_any_instance_of(AkismetService).to receive(:spam?).and_return(true)
allow_next_instance_of(AkismetService) do |instance|
allow(instance).to receive(:spam?).and_return(true)
end
end
context 'when the snippet is private' do
......@@ -323,7 +325,9 @@ describe SnippetsController do
context 'when the snippet is spam' do
before do
allow_any_instance_of(AkismetService).to receive(:spam?).and_return(true)
allow_next_instance_of(AkismetService) do |instance|
allow(instance).to receive(:spam?).and_return(true)
end
end
context 'when the snippet is private' do
......@@ -431,7 +435,9 @@ describe SnippetsController do
let(:snippet) { create(:personal_snippet, :public, author: user) }
before do
allow_any_instance_of(AkismetService).to receive_messages(submit_spam: true)
allow_next_instance_of(AkismetService) do |instance|
allow(instance).to receive_messages(submit_spam: true)
end
stub_application_setting(akismet_enabled: true)
end
......
......@@ -14,7 +14,9 @@ describe 'processing of SAMLResponse in dependencies' do
before do
allow(saml_strategy).to receive(:session).and_return(session_mock)
allow_any_instance_of(OneLogin::RubySaml::Response).to receive(:is_valid?).and_return(true)
allow_next_instance_of(OneLogin::RubySaml::Response) do |instance|
allow(instance).to receive(:is_valid?).and_return(true)
end
saml_strategy.send(:handle_response, mock_saml_response, {}, settings ) { }
end
......
......@@ -2,6 +2,7 @@ import Vuex from 'vuex';
import { createLocalVue, shallowMount } from '@vue/test-utils';
import { GlButton, GlFormInput } from '@gitlab/ui';
import ErrorTrackingForm from '~/error_tracking_settings/components/error_tracking_form.vue';
import createStore from '~/error_tracking_settings/store';
import { defaultProps } from '../mock';
const localVue = createLocalVue();
......@@ -9,15 +10,18 @@ localVue.use(Vuex);
describe('error tracking settings form', () => {
let wrapper;
let store;
function mountComponent() {
wrapper = shallowMount(ErrorTrackingForm, {
localVue,
store,
propsData: defaultProps,
});
}
beforeEach(() => {
store = createStore();
mountComponent();
});
......@@ -61,7 +65,7 @@ describe('error tracking settings form', () => {
describe('after a successful connection', () => {
beforeEach(() => {
wrapper.setProps({ connectSuccessful: true });
store.state.connectSuccessful = true;
});
it('shows the success checkmark', () => {
......@@ -77,7 +81,7 @@ describe('error tracking settings form', () => {
describe('after an unsuccessful connection', () => {
beforeEach(() => {
wrapper.setProps({ connectError: true });
store.state.connectError = true;
});
it('does not show the check mark', () => {
......
......@@ -210,7 +210,9 @@ describe ApplicationHelper do
let(:user) { create(:user, static_object_token: 'hunter1') }
before do
allow_any_instance_of(ApplicationSetting).to receive(:static_objects_external_storage_url).and_return('https://cdn.gitlab.com')
allow_next_instance_of(ApplicationSetting) do |instance|
allow(instance).to receive(:static_objects_external_storage_url).and_return('https://cdn.gitlab.com')
end
allow(helper).to receive(:current_user).and_return(user)
end
......
......@@ -77,7 +77,7 @@ describe Gitlab::Cluster::Mixins::PumaCluster do
mutex = Mutex.new
Gitlab::Cluster::LifecycleEvents.on_before_graceful_shutdown do
Gitlab::Cluster::LifecycleEvents.on_before_blackout_period do
mutex.synchronize do
exit(140)
end
......
......@@ -75,7 +75,7 @@ describe Gitlab::Cluster::Mixins::UnicornHttpServer do
mutex = Mutex.new
Gitlab::Cluster::LifecycleEvents.on_before_graceful_shutdown do
Gitlab::Cluster::LifecycleEvents.on_before_blackout_period do
mutex.synchronize do
exit(140)
end
......
require 'spec_helper'
require_relative './simple_check_shared'
describe Gitlab::HealthChecks::MasterCheck do
let(:result_class) { Gitlab::HealthChecks::Result }
SUCCESS_CODE = 100
FAILURE_CODE = 101
before do
described_class.register_master
end
after do
described_class.finish_master
end
describe '#readiness' do
context 'when master is running' do
it 'worker does return success' do
_, child_status = run_worker
expect(child_status.exitstatus).to eq(SUCCESS_CODE)
end
end
context 'when master finishes early' do
before do
described_class.send(:close_write)
end
it 'worker does return failure' do
_, child_status = run_worker
expect(child_status.exitstatus).to eq(FAILURE_CODE)
end
end
def run_worker
pid = fork do
described_class.register_worker
exit(described_class.readiness.success ? SUCCESS_CODE : FAILURE_CODE)
end
Process.wait2(pid)
end
end
end
......@@ -4,61 +4,41 @@ require 'spec_helper'
describe Gitlab::Metrics::Exporter::WebExporter do
let(:exporter) { described_class.new }
context 'when blackout seconds is used' do
let(:blackout_seconds) { 0 }
let(:readiness_probe) { exporter.send(:readiness_probe).execute }
before do
stub_config(
monitoring: {
web_exporter: {
enabled: true,
port: 0,
address: '127.0.0.1',
blackout_seconds: blackout_seconds
}
let(:readiness_probe) { exporter.send(:readiness_probe).execute }
before do
stub_config(
monitoring: {
web_exporter: {
enabled: true,
port: 0,
address: '127.0.0.1'
}
)
exporter.start
end
after do
exporter.stop
end
}
)
context 'when running server' do
it 'readiness probe returns succesful status' do
expect(readiness_probe.http_status).to eq(200)
expect(readiness_probe.json).to include(status: 'ok')
expect(readiness_probe.json).to include('web_exporter' => [{ 'status': 'ok' }])
end
end
context 'when blackout seconds is 10s' do
let(:blackout_seconds) { 10 }
exporter.start
end
it 'readiness probe returns a failure status' do
# during sleep we check the status of readiness probe
expect(exporter).to receive(:sleep).with(10) do
expect(readiness_probe.http_status).to eq(503)
expect(readiness_probe.json).to include(status: 'failed')
expect(readiness_probe.json).to include('web_exporter' => [{ 'status': 'failed' }])
end
after do
exporter.stop
end
exporter.stop
end
context 'when running server' do
it 'readiness probe returns succesful status' do
expect(readiness_probe.http_status).to eq(200)
expect(readiness_probe.json).to include(status: 'ok')
expect(readiness_probe.json).to include('web_exporter' => [{ 'status': 'ok' }])
end
end
context 'when blackout is disabled' do
let(:blackout_seconds) { 0 }
it 'readiness probe returns a failure status' do
expect(exporter).not_to receive(:sleep)
describe '#mark_as_not_running!' do
it 'readiness probe returns a failure status' do
exporter.mark_as_not_running!
exporter.stop
end
expect(readiness_probe.http_status).to eq(503)
expect(readiness_probe.json).to include(status: 'failed')
expect(readiness_probe.json).to include('web_exporter' => [{ 'status': 'failed' }])
end
end
end
# frozen_string_literal: true
require 'spec_helper'
describe HealthController do
include StubENV
let(:token) { Gitlab::CurrentSettings.health_check_access_token }
let(:whitelisted_ip) { '1.1.1.1' }
let(:not_whitelisted_ip) { '2.2.2.2' }
let(:params) { {} }
let(:headers) { {} }
before do
allow(Settings.monitoring).to receive(:ip_whitelist).and_return([whitelisted_ip])
stub_storage_settings({}) # Hide the broken storage
stub_env('IN_MEMORY_APPLICATION_SETTINGS', 'false')
end
shared_context 'endpoint querying database' do
it 'does query database' do
control_count = ActiveRecord::QueryRecorder.new { subject }.count
expect(control_count).not_to be_zero
end
end
shared_context 'endpoint not querying database' do
it 'does not query database' do
control_count = ActiveRecord::QueryRecorder.new { subject }.count
expect(control_count).to be_zero
end
end
shared_context 'endpoint not found' do
it 'responds with resource not found' do
subject
expect(response.status).to eq(404)
end
end
describe 'GET /-/health' do
subject { get '/-/health', params: params, headers: headers }
shared_context 'endpoint responding with health data' do
it 'responds with health checks data' do
subject
expect(response.status).to eq(200)
expect(response.body).to eq('GitLab OK')
end
end
context 'accessed from whitelisted ip' do
before do
stub_remote_addr(whitelisted_ip)
end
it_behaves_like 'endpoint responding with health data'
it_behaves_like 'endpoint not querying database'
end
context 'accessed from not whitelisted ip' do
before do
stub_remote_addr(not_whitelisted_ip)
end
it_behaves_like 'endpoint not querying database'
it_behaves_like 'endpoint not found'
end
end
describe 'GET /-/readiness' do
subject { get '/-/readiness', params: params, headers: headers }
shared_context 'endpoint responding with readiness data' do
context 'when requesting instance-checks' do
it 'responds with readiness checks data' do
expect(Gitlab::HealthChecks::MasterCheck).to receive(:check) { true }
subject
expect(json_response).to include({ 'status' => 'ok' })
expect(json_response['master_check']).to contain_exactly({ 'status' => 'ok' })
end
it 'responds with readiness checks data when a failure happens' do
expect(Gitlab::HealthChecks::MasterCheck).to receive(:check) { false }
subject
expect(json_response).to include({ 'status' => 'failed' })
expect(json_response['master_check']).to contain_exactly(
{ 'status' => 'failed', 'message' => 'unexpected Master check result: false' })
expect(response.status).to eq(503)
expect(response.headers['X-GitLab-Custom-Error']).to eq(1)
end
end
context 'when requesting all checks' do
before do
params.merge!(all: true)
end
it 'responds with readiness checks data' do
subject
expect(json_response['db_check']).to contain_exactly({ 'status' => 'ok' })
expect(json_response['cache_check']).to contain_exactly({ 'status' => 'ok' })
expect(json_response['queues_check']).to contain_exactly({ 'status' => 'ok' })
expect(json_response['shared_state_check']).to contain_exactly({ 'status' => 'ok' })
expect(json_response['gitaly_check']).to contain_exactly(
{ 'status' => 'ok', 'labels' => { 'shard' => 'default' } })
end
it 'responds with readiness checks data when a failure happens' do
allow(Gitlab::HealthChecks::Redis::RedisCheck).to receive(:readiness).and_return(
Gitlab::HealthChecks::Result.new('redis_check', false, "check error"))
subject
expect(json_response['cache_check']).to contain_exactly({ 'status' => 'ok' })
expect(json_response['redis_check']).to contain_exactly(
{ 'status' => 'failed', 'message' => 'check error' })
expect(response.status).to eq(503)
expect(response.headers['X-GitLab-Custom-Error']).to eq(1)
end
end
end
context 'accessed from whitelisted ip' do
before do
stub_remote_addr(whitelisted_ip)
end
it_behaves_like 'endpoint not querying database'
it_behaves_like 'endpoint responding with readiness data'
context 'when requesting all checks' do
before do
params.merge!(all: true)
end
it_behaves_like 'endpoint querying database'
end
end
context 'accessed from not whitelisted ip' do
before do
stub_remote_addr(not_whitelisted_ip)
end
it_behaves_like 'endpoint not querying database'
it_behaves_like 'endpoint not found'
end
context 'accessed with valid token' do
context 'token passed in request header' do
let(:headers) { { TOKEN: token } }
it_behaves_like 'endpoint responding with readiness data'
it_behaves_like 'endpoint querying database'
end
context 'token passed as URL param' do
let(:params) { { token: token } }
it_behaves_like 'endpoint responding with readiness data'
it_behaves_like 'endpoint querying database'
end
end
end
describe 'GET /-/liveness' do
subject { get '/-/liveness', params: params, headers: headers }
shared_context 'endpoint responding with liveness data' do
it 'responds with liveness checks data' do
subject
expect(json_response).to eq('status' => 'ok')
end
end
context 'accessed from whitelisted ip' do
before do
stub_remote_addr(whitelisted_ip)
end
it_behaves_like 'endpoint not querying database'
it_behaves_like 'endpoint responding with liveness data'
end
context 'accessed from not whitelisted ip' do
before do
stub_remote_addr(not_whitelisted_ip)
end
it_behaves_like 'endpoint not querying database'
it_behaves_like 'endpoint not found'
context 'accessed with valid token' do
context 'token passed in request header' do
let(:headers) { { TOKEN: token } }
it_behaves_like 'endpoint responding with liveness data'
it_behaves_like 'endpoint querying database'
end
context 'token passed as URL param' do
let(:params) { { token: token } }
it_behaves_like 'endpoint responding with liveness data'
it_behaves_like 'endpoint querying database'
end
end
end
end
def stub_remote_addr(ip)
headers.merge!(REMOTE_ADDR: ip)
end
end
# frozen_string_literal: true
require 'spec_helper'
require_relative '../../../../rubocop/cop/rspec/any_instance_of'
describe RuboCop::Cop::RSpec::AnyInstanceOf do
include CopHelper
subject(:cop) { described_class.new }
context 'when calling allow_any_instance_of' do
let(:source) do
<<~SRC
allow_any_instance_of(User).to receive(:invalidate_issue_cache_counts)
SRC
end
let(:corrected_source) do
<<~SRC
allow_next_instance_of(User) do |instance|
allow(instance).to receive(:invalidate_issue_cache_counts)
end
SRC
end
it 'registers an offence' do
inspect_source(source)
expect(cop.offenses.size).to eq(1)
end
it 'can autocorrect the source' do
expect(autocorrect_source(source)).to eq(corrected_source)
end
end
context 'when calling expect_any_instance_of' do
let(:source) do
<<~SRC
expect_any_instance_of(User).to receive(:invalidate_issue_cache_counts).with(args).and_return(double)
SRC
end
let(:corrected_source) do
<<~SRC
expect_next_instance_of(User) do |instance|
expect(instance).to receive(:invalidate_issue_cache_counts).with(args).and_return(double)
end
SRC
end
it 'registers an offence' do
inspect_source(source)
expect(cop.offenses.size).to eq(1)
end
it 'can autocorrect the source' do
expect(autocorrect_source(source)).to eq(corrected_source)
end
end
end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment