Commit a5710317 authored by Jose Ivan Vargas's avatar Jose Ivan Vargas

Merge branch 'master' into pawel/prometheus-business-metrics-ee-2273

parents 1f6e9f8d 92be6261
......@@ -107,16 +107,16 @@ gem 'carrierwave', '~> 1.2'
gem 'dropzonejs-rails', '~> 0.7.1'
# for backups
gem 'fog-aws', '~> 1.4'
gem 'fog-aws', '~> 2.0'
gem 'fog-core', '~> 1.44'
gem 'fog-google', '~> 0.5'
gem 'fog-google', '~> 1.3'
gem 'fog-local', '~> 0.3'
gem 'fog-openstack', '~> 0.1'
gem 'fog-rackspace', '~> 0.1.1'
gem 'fog-aliyun', '~> 0.2.0'
# for Google storage
gem 'google-api-client', '~> 0.13.6'
gem 'google-api-client', '~> 0.19'
# for aws storage
gem 'unf', '~> 0.1.4'
......
......@@ -213,7 +213,7 @@ GEM
et-orbi (1.0.3)
tzinfo
eventmachine (1.0.8)
excon (0.57.1)
excon (0.60.0)
execjs (2.6.0)
expression_parser (0.9.0)
factory_bot (4.8.2)
......@@ -255,19 +255,20 @@ GEM
fog-json (~> 1.0)
ipaddress (~> 0.8)
xml-simple (~> 1.1)
fog-aws (1.4.0)
fog-aws (2.0.1)
fog-core (~> 1.38)
fog-json (~> 1.0)
fog-xml (~> 0.1)
ipaddress (~> 0.8)
fog-core (1.44.3)
fog-core (1.45.0)
builder
excon (~> 0.49)
excon (~> 0.58)
formatador (~> 0.2)
fog-google (0.5.3)
fog-google (1.3.0)
fog-core
fog-json
fog-xml
google-api-client (~> 0.19.1)
fog-json (1.0.2)
fog-core (~> 1.0)
multi_json (~> 1.10)
......@@ -358,9 +359,9 @@ GEM
json
multi_json
request_store (>= 1.0)
google-api-client (0.13.6)
google-api-client (0.19.8)
addressable (~> 2.5, >= 2.5.1)
googleauth (~> 0.5)
googleauth (>= 0.5, < 0.7.0)
httpclient (>= 2.8.1, < 3.0)
mime-types (~> 3.0)
representable (~> 3.0)
......@@ -531,7 +532,7 @@ GEM
mini_portile2 (2.3.0)
minitest (5.7.0)
mousetrap-rails (1.4.6)
multi_json (1.12.2)
multi_json (1.13.1)
multi_xml (0.6.0)
multipart-post (2.0.0)
mustermann (1.0.0)
......@@ -1077,9 +1078,9 @@ DEPENDENCIES
flipper-active_record (~> 0.11.0)
flipper-active_support_cache_store (~> 0.11.0)
fog-aliyun (~> 0.2.0)
fog-aws (~> 1.4)
fog-aws (~> 2.0)
fog-core (~> 1.44)
fog-google (~> 0.5)
fog-google (~> 1.3)
fog-local (~> 0.3)
fog-openstack (~> 0.1)
fog-rackspace (~> 0.1.1)
......@@ -1101,7 +1102,7 @@ DEPENDENCIES
gollum-lib (~> 4.2)
gollum-rugged_adapter (~> 0.4.4)
gon (~> 6.1.0)
google-api-client (~> 0.13.6)
google-api-client (~> 0.19)
google-protobuf (= 3.5.1)
gpgme
grape (~> 1.0)
......
<script>
import Flash from '../../../flash';
import AssigneeTitle from './assignee_title';
import Assignees from './assignees.vue';
......@@ -6,11 +7,9 @@ import eventHub from '../../event_hub';
export default {
name: 'SidebarAssignees',
data() {
return {
store: new Store(),
loading: false,
};
components: {
AssigneeTitle,
Assignees,
},
props: {
mediator: {
......@@ -27,9 +26,28 @@ export default {
default: false,
},
},
components: {
AssigneeTitle,
Assignees,
data() {
return {
store: new Store(),
loading: false,
};
},
created() {
this.removeAssignee = this.store.removeAssignee.bind(this.store);
this.addAssignee = this.store.addAssignee.bind(this.store);
this.removeAllAssignees = this.store.removeAllAssignees.bind(this.store);
// Get events from glDropdown
eventHub.$on('sidebar.removeAssignee', this.removeAssignee);
eventHub.$on('sidebar.addAssignee', this.addAssignee);
eventHub.$on('sidebar.removeAllAssignees', this.removeAllAssignees);
eventHub.$on('sidebar.saveAssignees', this.saveAssignees);
},
beforeDestroy() {
eventHub.$off('sidebar.removeAssignee', this.removeAssignee);
eventHub.$off('sidebar.addAssignee', this.addAssignee);
eventHub.$off('sidebar.removeAllAssignees', this.removeAllAssignees);
eventHub.$off('sidebar.saveAssignees', this.saveAssignees);
},
methods: {
assignSelf() {
......@@ -54,39 +72,24 @@ export default {
});
},
},
created() {
this.removeAssignee = this.store.removeAssignee.bind(this.store);
this.addAssignee = this.store.addAssignee.bind(this.store);
this.removeAllAssignees = this.store.removeAllAssignees.bind(this.store);
// Get events from glDropdown
eventHub.$on('sidebar.removeAssignee', this.removeAssignee);
eventHub.$on('sidebar.addAssignee', this.addAssignee);
eventHub.$on('sidebar.removeAllAssignees', this.removeAllAssignees);
eventHub.$on('sidebar.saveAssignees', this.saveAssignees);
},
beforeDestroy() {
eventHub.$off('sidebar.removeAssignee', this.removeAssignee);
eventHub.$off('sidebar.addAssignee', this.addAssignee);
eventHub.$off('sidebar.removeAllAssignees', this.removeAllAssignees);
eventHub.$off('sidebar.saveAssignees', this.saveAssignees);
},
template: `
<div>
<assignee-title
:number-of-assignees="store.assignees.length"
:loading="loading || store.isFetching.assignees"
:editable="store.editable"
:show-toggle="!signedIn"
/>
<assignees
v-if="!store.isFetching.assignees"
class="value"
:root-path="store.rootPath"
:users="store.assignees"
:editable="store.editable"
@assign-self="assignSelf"
/>
</div>
`,
};
</script>
<template>
<div>
<assignee-title
:number-of-assignees="store.assignees.length"
:loading="loading || store.isFetching.assignees"
:editable="store.editable"
:show-toggle="!signedIn"
/>
<assignees
v-if="!store.isFetching.assignees"
class="value"
:root-path="store.rootPath"
:users="store.assignees"
:editable="store.editable"
@assign-self="assignSelf"
/>
</div>
</template>
import Vue from 'vue';
import SidebarTimeTracking from './components/time_tracking/sidebar_time_tracking';
import SidebarAssignees from './components/assignees/sidebar_assignees';
import SidebarAssignees from './components/assignees/sidebar_assignees.vue';
import ConfidentialIssueSidebar from './components/confidential/confidential_issue_sidebar.vue';
import SidebarMoveIssue from './lib/sidebar_move_issue';
import LockIssueSidebar from './components/lock/lock_issue_sidebar.vue';
......
......@@ -574,3 +574,17 @@
}
}
}
// EE-only
.cluster-health-graphs {
.prometheus-state {
.state-svg img {
max-height: 120px;
}
.state-description,
.state-button {
display: none;
}
}
}
......@@ -64,6 +64,22 @@ class Projects::ClustersController < Projects::ApplicationController
end
end
def metrics
return render_404 unless prometheus_adapter&.can_query?
respond_to do |format|
format.json do
metrics = prometheus_adapter.query(:cluster) || {}
if metrics.any?
render json: metrics
else
head :no_content
end
end
end
end
private
def cluster
......@@ -71,6 +87,12 @@ class Projects::ClustersController < Projects::ApplicationController
.present(current_user: current_user)
end
def prometheus_adapter
return unless cluster&.application_prometheus&.installed?
cluster.application_prometheus
end
def update_params
if cluster.managed?
params.require(:cluster).permit(
......
......@@ -17,20 +17,23 @@ class Projects::LfsStorageController < Projects::GitHttpClientController
def upload_authorize
set_workhorse_internal_api_content_type
render json: Gitlab::Workhorse.lfs_upload_ok(oid, size)
authorized = LfsObjectUploader.workhorse_authorize
authorized.merge!(LfsOid: oid, LfsSize: size)
render json: authorized
end
def upload_finalize
unless tmp_filename
render_lfs_forbidden
return
end
if store_file(oid, size, tmp_filename)
if store_file!(oid, size)
head 200
else
render plain: 'Unprocessable entity', status: 422
end
rescue ActiveRecord::RecordInvalid
render_400
rescue ObjectStorage::RemoteStoreError
render_lfs_forbidden
end
private
......@@ -51,38 +54,28 @@ class Projects::LfsStorageController < Projects::GitHttpClientController
params[:size].to_i
end
def tmp_filename
name = request.headers['X-Gitlab-Lfs-Tmp']
return if name.include?('/')
return unless oid.present? && name.start_with?(oid)
name
end
def store_file!(oid, size)
object = LfsObject.find_by(oid: oid, size: size)
unless object&.file&.exists?
object = create_file!(oid, size)
end
def store_file(oid, size, tmp_file)
# Define tmp_file_path early because we use it in "ensure"
tmp_file_path = File.join(LfsObjectUploader.workhorse_upload_path, tmp_file)
return unless object
object = LfsObject.find_or_create_by(oid: oid, size: size)
file_exists = object.file.exists? || move_tmp_file_to_storage(object, tmp_file_path)
file_exists && link_to_project(object)
ensure
FileUtils.rm_f(tmp_file_path)
link_to_project!(object)
end
def move_tmp_file_to_storage(object, path)
File.open(path) do |f|
object.file = f
def create_file!(oid, size)
LfsObject.new(oid: oid, size: size).tap do |object|
object.file.store_workhorse_file!(params, :file)
object.save!
end
object.file.store!
object.save
end
def link_to_project(object)
def link_to_project!(object)
if object && !object.projects.exists?(storage_project.id)
object.projects << storage_project
object.save
object.save!
end
end
end
module Emails
module Projects
prepend Emails::EE::Projects
def project_was_moved_email(project_id, user_id, old_path_with_namespace)
@current_user = @user = User.find user_id
@project = Project.find project_id
......@@ -38,13 +40,5 @@ module Emails
reply_to: @message.reply_to,
subject: @message.subject)
end
def mirror_was_hard_failed_email(project_id, user_id)
@project = Project.find(project_id)
user = User.find(user_id)
mail(to: user.notification_email,
subject: subject('Repository mirroring paused'))
end
end
end
......@@ -9,6 +9,12 @@ class LfsObject < ActiveRecord::Base
mount_uploader :file, LfsObjectUploader
before_save :update_file_store
def update_file_store
self.file_store = file.object_store
end
def project_allowed_access?(project)
projects.exists?(project.lfs_storage_project.id)
end
......
......@@ -87,6 +87,15 @@ class ProjectTeam
@masters ||= fetch_members(Gitlab::Access::MASTER)
end
def owners
@owners ||=
if group
group.owners
else
[project.owner]
end
end
def import(source_project, current_user = nil)
target_project = project
......
module Users
class DestroyService
prepend ::EE::Users::DestroyService
attr_accessor :current_user
def initialize(current_user)
......@@ -49,11 +51,7 @@ module Users
::Projects::DestroyService.new(project, current_user, skip_repo: project.legacy_storage?).execute
end
Project.includes(group: :owners).where(mirror_user: user).find_each do |project|
if project.group.present?
project.update(mirror_user: project.group.owners.first)
end
end
yield(user) if block_given?
MigrateToGhostUserService.new(user).execute unless options[:hard_delete]
......
......@@ -2,11 +2,6 @@ class LfsObjectUploader < GitlabUploader
extend Workhorse::UploadPath
include ObjectStorage::Concern
# LfsObject are in `tmp/upload` instead of `tmp/uploads`
def self.workhorse_upload_path
File.join(root, 'tmp/upload')
end
storage_options Gitlab.config.lfs
def filename
......
......@@ -22,6 +22,10 @@
.js-cluster-application-notice
.flash-container
-# EE-specific
- if @cluster.project.feature_available?(:cluster_health)
= render 'health'
%section.settings.no-animate.expanded#cluster-integration
= render 'banner'
= render 'integration_form'
......
......@@ -5,7 +5,7 @@ class DeleteUserWorker
delete_user = User.find(delete_user_id)
current_user = User.find(current_user_id)
Users::DestroyService.new(current_user).execute(delete_user, options.symbolize_keys)
::Users::DestroyService.new(current_user).execute(delete_user, options.symbolize_keys)
rescue Gitlab::Access::AccessDeniedError => e
Rails.logger.warn("User could not be destroyed: #{e}")
end
......
---
title: Move SidebarAssignees vue component
merge_request: 17398
author: George Tsiolis
type: performance
---
title: Upgrade GitLab Workhorse to 4.0.0
merge_request:
author:
type: added
......@@ -794,7 +794,7 @@ test:
provider: AWS # Only AWS supported at the moment
aws_access_key_id: AWS_ACCESS_KEY_ID
aws_secret_access_key: AWS_SECRET_ACCESS_KEY
region: eu-central-1
region: us-east-1
artifacts:
path: tmp/tests/artifacts
enabled: true
......@@ -808,7 +808,7 @@ test:
provider: AWS # Only AWS supported at the moment
aws_access_key_id: AWS_ACCESS_KEY_ID
aws_secret_access_key: AWS_SECRET_ACCESS_KEY
region: eu-central-1
region: us-east-1
uploads:
storage_path: tmp/tests/public
enabled: true
......@@ -818,7 +818,7 @@ test:
provider: AWS # Only AWS supported at the moment
aws_access_key_id: AWS_ACCESS_KEY_ID
aws_secret_access_key: AWS_SECRET_ACCESS_KEY
region: eu-central-1
region: us-east-1
gitlab:
host: localhost
port: 80
......
......@@ -397,6 +397,7 @@ Settings.lfs['storage_path'] = Settings.absolute(Settings.lfs['storage_path'] ||
Settings.lfs['object_store'] ||= Settingslogic.new({})
Settings.lfs['object_store']['enabled'] = false if Settings.lfs['object_store']['enabled'].nil?
Settings.lfs['object_store']['remote_directory'] ||= nil
Settings.lfs['object_store']['direct_upload'] = false if Settings.lfs['object_store']['direct_upload'].nil?
Settings.lfs['object_store']['background_upload'] = true if Settings.lfs['object_store']['background_upload'].nil?
Settings.lfs['object_store']['proxy_download'] = false if Settings.lfs['object_store']['proxy_download'].nil?
# Convert upload connection settings to use string keys, to make Fog happy
......
......@@ -28,16 +28,4 @@ if File.exist?(aws_file)
# when fog_public is false and provider is AWS or Google, defaults to 600
config.fog_authenticated_url_expiration = 1 << 29
end
# Mocking Fog requests, based on: https://github.com/carrierwaveuploader/carrierwave/wiki/How-to%3A-Test-Fog-based-uploaders
if Rails.env.test?
Fog.mock!
connection = ::Fog::Storage.new(
aws_access_key_id: AWS_CONFIG['access_key_id'],
aws_secret_access_key: AWS_CONFIG['secret_access_key'],
provider: 'AWS',
region: AWS_CONFIG['region']
)
connection.directories.create(key: AWS_CONFIG['bucket'])
end
end
- group: Cluster Health
priority: 1
metrics:
- title: "CPU Usage"
y_label: "CPU"
required_metrics: ['container_cpu_usage_seconds_total']
weight: 1
queries:
- query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{id="/"}[15m])) by (job)) without (job)'
label: Usage
unit: "cores"
- query_range: 'sum(kube_node_status_capacity_cpu_cores{kubernetes_namespace="gitlab-managed-apps"})'
label: Capacity
unit: "cores"
- title: "Memory usage"
y_label: "Memory"
required_metrics: ['container_memory_usage_bytes']
weight: 1
queries:
- query_range: 'avg(sum(container_memory_usage_bytes{id="/"}) by (job)) without (job) / 2^30'
label: Usage
unit: "GiB"
- query_range: 'sum(kube_node_status_capacity_memory_bytes{kubernetes_namespace="gitlab-managed-apps"})/2^30'
label: Capacity
unit: "GiB"
\ No newline at end of file
......@@ -245,6 +245,7 @@ constraints(ProjectUrlConstrainer.new) do
member do
get :status, format: :json
get :metrics, format: :json
scope :applications do
post '/:application', to: 'clusters/applications#create', as: :install_applications
......
......@@ -11,7 +11,7 @@
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 20180307012445) do
ActiveRecord::Schema.define(version: 20180307164427) do
# These are extensions that must be enabled in order to support this database
enable_extension "plpgsql"
......
......@@ -63,6 +63,7 @@ For source installations the following settings are nested under `lfs:` and then
|---------|-------------|---------|
| `enabled` | Enable/disable object storage | `false` |
| `remote_directory` | The bucket name where LFS objects will be stored| |
| `direct_upload` | Set to true to enable direct upload of LFS without the need of local shared storage. Option may be removed once we decide to support only single storage for all files. | `false` |
| `background_upload` | Set to false to disable automatic upload. Option may be removed once upload is direct to S3 | `true` |
| `proxy_download` | Set to false to disable proxying all files served. Option allows to reduce egress traffic as this allows clients to download directly from remote storage instead of proxying all data | `false` |
| `connection` | Various connection options described below | |
......
......@@ -40,8 +40,7 @@
createEpic() {
this.creating = true;
this.service.createEpic(this.title)
.then(res => res.json())
.then((data) => {
.then(({ data }) => {
visitUrl(data.web_url);
})
.catch(() => {
......
import Vue from 'vue';
import VueResource from 'vue-resource';
Vue.use(VueResource);
import axios from '~/lib/utils/axios_utils';
export default class NewEpicService {
constructor(endpoint) {
this.endpoint = endpoint;
this.resource = Vue.resource(this.endpoint, {});
}
createEpic(title) {
return this.resource.save({
return axios.post(this.endpoint, {
title,
});
}
......
import Vue from 'vue';
import Dashboard from '~/monitoring/components/dashboard.vue';
export default () => {
const el = document.getElementById('prometheus-graphs');
if (el && el.dataset) {
// eslint-disable-next-line no-new
new Vue({
el,
render(createElement) {
return createElement(Dashboard, {
props: {
...el.dataset,
showLegend: false,
showPanels: false,
forceSmallGraph: true,
},
});
},
});
}
};
import '~/pages/projects/clusters/show';
import initClusterHealth from './cluster_health';
document.addEventListener('DOMContentLoaded', initClusterHealth);
module Emails
module EE
module Projects
def mirror_was_hard_failed_email(project_id, user_id)
@project = Project.find(project_id)
user = User.find(user_id)
mail(to: user.notification_email,
subject: subject('Repository mirroring paused'))
end
def project_mirror_user_changed_email(new_mirror_user_id, deleted_user_name, project_id)
@project = Project.find(project_id)
@deleted_user_name = deleted_user_name
new_mirror_user = User.find(new_mirror_user_id)
mail(to: new_mirror_user.notification_email,
subject: subject('Mirror user changed'))
end
end
end
end
......@@ -61,6 +61,7 @@ class License < ActiveRecord::Base
EEU_FEATURES = EEP_FEATURES + %i[
sast
sast_container
cluster_health
dast
epics
ide
......
......@@ -34,5 +34,9 @@ module EE
mailer.mirror_was_hard_failed_email(project.id, recipient.user.id).deliver_later
end
end
def project_mirror_user_changed(new_mirror_user, deleted_user_name, project)
mailer.project_mirror_user_changed_email(new_mirror_user.id, deleted_user_name, project.id).deliver_later
end
end
end
module EE
module Users
module DestroyService
extend ::Gitlab::Utils::Override
override :execute
def execute(user, options = {})
super(user, options) do |delete_user|
mirror_cleanup(delete_user)
end
end
def mirror_cleanup(user)
user_mirrors = ::Project.where(mirror_user: user)
user_mirrors.find_each do |mirror|
new_mirror_user = first_mirror_owner(user, mirror)
mirror.update_attributes(mirror_user: new_mirror_user)
::NotificationService.new.project_mirror_user_changed(new_mirror_user, user.name, mirror)
end
end
private
def first_mirror_owner(user, mirror)
mirror_owners = mirror.team.owners
mirror_owners -= [user]
mirror_owners.first
end
end
end
end
......@@ -54,6 +54,7 @@ module Geo
def fetch_repository(redownload)
log_info("Trying to fetch #{type}")
clean_up_temporary_repository
update_registry!(started_at: DateTime.now)
if redownload
......@@ -157,17 +158,13 @@ module Geo
registry.public_send("last_#{type}_synced_at") # rubocop:disable GitlabSecurity/PublicSend
end
def random_disk_path(prefix)
random_string = SecureRandom.hex(7)
"#{repository.disk_path}_#{prefix}#{random_string}"
end
def disk_path_temp
@disk_path_temp ||= random_disk_path('')
# We use "@" as it's not allowed to use it in a group or project name
@disk_path_temp ||= "@geo-temporary/#{repository.disk_path}"
end
def deleted_disk_path_temp
@deleted_path ||= "#{repository.disk_path}+failed-geo-sync"
@deleted_path ||= "@failed-geo-sync/#{repository.disk_path}"
end
def build_temporary_repository
......@@ -175,16 +172,17 @@ module Geo
raise Gitlab::Shell::Error, 'Can not create a temporary repository'
end
log_info(
'Created temporary repository',
temp_path: disk_path_temp
)
log_info("Created temporary repository")
repository.clone.tap { |repo| repo.disk_path = disk_path_temp }
end
def clean_up_temporary_repository
gitlab_shell.remove_repository(project.repository_storage_path, disk_path_temp)
exists = gitlab_shell.exists?(project.repository_storage_path, disk_path_temp)
if exists && !gitlab_shell.remove_repository(project.repository_storage_path, disk_path_temp)
raise Gitlab::Shell::Error, "Temporary #{type} can not been removed"
end
end
def set_temp_repository_as_main
......@@ -199,6 +197,9 @@ module Geo
# Remove the deleted path in case it exists, but it may not be there
gitlab_shell.remove_repository(project.repository_storage_path, deleted_disk_path_temp)
# Make sure we have a namespace directory
gitlab_shell.add_namespace(project.repository_storage_path, deleted_disk_path_temp)
if project.repository_exists? && !gitlab_shell.mv_repository(project.repository_storage_path, repository.disk_path, deleted_disk_path_temp)
raise Gitlab::Shell::Error, 'Can not move original repository out of the way'
end
......
......@@ -10,6 +10,9 @@ module ObjectStorage
UnknownStoreError = Class.new(StandardError)
ObjectStorageUnavailable = Class.new(StandardError)
DIRECT_UPLOAD_TIMEOUT = 4.hours
TMP_UPLOAD_PATH = 'tmp/upload'.freeze
module Store
LOCAL = 1
REMOTE = 2
......@@ -124,6 +127,10 @@ module ObjectStorage
object_store_options.enabled
end
def direct_upload_enabled?
object_store_options.direct_upload
end
def background_upload_enabled?
object_store_options.background_upload
end
......@@ -151,6 +158,45 @@ module ObjectStorage
def serialization_column(model_class, mount_point)
model_class.uploader_options.dig(mount_point, :mount_on) || mount_point
end
def workhorse_authorize
if options = workhorse_remote_upload_options
{ RemoteObject: options }
else
{ TempPath: workhorse_local_upload_path }
end
end
def workhorse_local_upload_path
File.join(self.root, TMP_UPLOAD_PATH)
end
def workhorse_remote_upload_options
return unless self.object_store_enabled?
return unless self.direct_upload_enabled?
id = [CarrierWave.generate_cache_id, SecureRandom.hex].join('-')
upload_path = File.join(TMP_UPLOAD_PATH, id)
connection = ::Fog::Storage.new(self.object_store_credentials)
expire_at = Time.now + DIRECT_UPLOAD_TIMEOUT
options = { 'Content-Type' => 'application/octet-stream' }
{
ID: id,
GetURL: connection.get_object_https_url(remote_store_path, upload_path, expire_at),
DeleteURL: connection.delete_object_url(remote_store_path, upload_path, expire_at),
StoreURL: connection.put_object_url(remote_store_path, upload_path, expire_at, options)
}
end
end
# allow to configure and overwrite the filename
def filename
@filename || super || file&.filename # rubocop:disable Gitlab/ModuleWithInstanceVariables
end
def filename=(filename)
@filename = filename # rubocop:disable Gitlab/ModuleWithInstanceVariables
end
def file_storage?
......@@ -199,10 +245,6 @@ module ObjectStorage
end
end
def filename
super || file&.filename
end
#
# Move the file to another store
#
......@@ -263,6 +305,18 @@ module ObjectStorage
}
end
def store_workhorse_file!(params, identifier)
filename = params["#{identifier}.name"]
if remote_object_id = params["#{identifier}.remote_id"]
store_remote_file!(remote_object_id, filename)
elsif local_path = params["#{identifier}.path"]
store_local_file!(local_path, filename)
else
raise RemoteStoreError, 'Bad file'
end
end
private
def schedule_background_upload?
......@@ -272,6 +326,38 @@ module ObjectStorage
self.file_storage?
end
def store_remote_file!(remote_object_id, filename)
raise RemoteStoreError, 'Missing filename' unless filename
file_path = File.join(TMP_UPLOAD_PATH, remote_object_id)
file_path = Pathname.new(file_path).cleanpath.to_s
raise RemoteStoreError, 'Bad file path' unless file_path.start_with?(TMP_UPLOAD_PATH + '/')
self.object_store = Store::REMOTE
# TODO:
# This should be changed to make use of `tmp/cache` mechanism
# instead of using custom upload directory,
# using tmp/cache makes this implementation way easier than it is today
CarrierWave::Storage::Fog::File.new(self, storage, file_path).tap do |file|
raise RemoteStoreError, 'Missing file' unless file.exists?
self.filename = filename
self.file = storage.store!(file)
end
end
def store_local_file!(local_path, filename)
raise RemoteStoreError, 'Missing filename' unless filename
root_path = File.realpath(self.class.workhorse_local_upload_path)
file_path = File.realpath(local_path)
raise RemoteStoreError, 'Bad file path' unless file_path.start_with?(root_path)
self.object_store = Store::LOCAL
self.store!(UploadedFile.new(file_path, filename))
end
# this is a hack around CarrierWave. The #migrate method needs to be
# able to force the current file to the migrated file upon success.
def file=(file)
......
%p
The mirror user for #{@project.full_path} has been changed from #{@deleted_user_name} to yourself because their account was deleted.
%p
You can change this setting from the #{link_to("repository settings page", project_settings_repository_path(@project))}.
The mirror user for <%= @project.full_path %> has been changed from <%= @deleted_user_name %> to yourself because their account was deleted.
You can change this setting from the repository settings page in <%= project_settings_repository_url(@project) %>.
%section.settings.no-animate.expanded.cluster-health-graphs#cluster-health
%h4= s_('ClusterIntegration|Kubernetes cluster health')
- if @cluster&.application_prometheus&.installed?
#prometheus-graphs{ data: { "settings-path": edit_project_service_path(@project, 'prometheus'),
"clusters-path": project_clusters_path(@project),
"documentation-path": help_page_path('administration/monitoring/prometheus/index.md'),
"empty-getting-started-svg-path": image_path('illustrations/monitoring/getting_started.svg'),
"empty-loading-svg-path": image_path('illustrations/monitoring/loading.svg'),
"empty-unable-to-connect-svg-path": image_path('illustrations/monitoring/unable_to_connect.svg'),
"metrics-endpoint": metrics_namespace_project_cluster_path( format: :json ),
"project-path": project_path(@project),
"tags-path": project_tags_path(@project) } }
- else
.settings-content
%p= s_("ClusterIntegration|In order to show the health of the cluster, we'll need to provision your cluster with Prometheus to collect the required data.")
%a.btn.btn-default{ href: '#cluster-applications' }
= s_('ClusterIntegration|Install Prometheus')
---
title: 'Fix Geo: Leftover temporary directories from failed clones'
merge_request:
author:
type: fixed
---
title: Mirror owners now get assigned as mirror users when the assigned mirror users disable their accounts
merge_request: 4827
author:
type: fixed
---
title: Query cluster status
merge_request: 4701
author:
type: added
---
title: Fix Epics not getting created in a Group with existing Epics
merge_request: 4865
author:
type: fixed
---
title: Fix Geo Log Cursor not reconnecting after pgbouncer dies
merge_request: !4866
author:
type: fixed
---
title: Add support for direct uploading of LFS artifacts
merge_request:
author:
type: added
class DisableMirroringForProjectsWithInvalidMirrorUsers < ActiveRecord::Migration
DOWNTIME = false
def up
execute <<~SQL
UPDATE projects
SET mirror = FALSE, mirror_user_id = NULL
WHERE mirror = true AND
NOT EXISTS (SELECT 1 FROM users WHERE users.id = projects.mirror_user_id)
SQL
end
def down
end
end
......@@ -34,7 +34,11 @@ module Gitlab
end
def run_once!
LogCursor::Events.fetch_in_batches { |batch| handle_events(batch) }
# Wrap this with the connection to make it possible to reconnect if
# PGbouncer dies: https://github.com/rails/rails/issues/29189
ActiveRecord::Base.connection_pool.with_connection do
LogCursor::Events.fetch_in_batches { |batch| handle_events(batch) }
end
end
def handle_events(batch)
......
module Gitlab
module Prometheus
module Queries
class ClusterQuery < BaseQuery
include QueryAdditionalMetrics
def query
AdditionalMetricsParser.load_groups_from_yaml('cluster_metrics.yml')
.map(&query_group(base_query_context(8.hours.ago, Time.now)))
end
end
end
end
end
require 'spec_helper'
describe EE::Gitlab::Ci::Pipeline::Chain::RemoveUnwantedChatJobs do
let(:project) { create(:project) }
let(:project) { create(:project, :repository) }
let(:pipeline) do
build(:ci_pipeline_with_one_job, project: project, ref: 'master')
......
require 'spec_helper'
describe Gitlab::Prometheus::Queries::ClusterQuery do
let(:client) { double('prometheus_client', query_range: nil) }
subject { described_class.new(client) }
around do |example|
Timecop.freeze { example.run }
end
it 'load cluster metrics from yaml' do
expect(Gitlab::Prometheus::AdditionalMetricsParser).to receive(:load_groups_from_yaml).with('cluster_metrics.yml').and_call_original
subject.query
end
it 'sends queries to prometheus' do
subject.query
expect(client).to have_received(:query_range).with(anything, start: 8.hours.ago, stop: Time.now).at_least(1)
end
end
......@@ -185,4 +185,16 @@ describe EE::NotificationService, :mailer do
end
end
end
context 'mirror user changed' do
it 'sends email' do
mirror_user = create(:user)
project = create(:project, :mirror, mirror_user_id: mirror_user.id)
new_mirror_user = project.team.owners.first
expect(Notify).to receive(:project_mirror_user_changed_email).with(new_mirror_user.id, mirror_user.name, project.id).and_call_original
subject.project_mirror_user_changed(new_mirror_user, mirror_user.name, project)
end
end
end
require 'spec_helper'
describe Users::DestroyService do
context 'when project is a mirror' do
it 'assigns mirror_user to a project owner' do
mirror_user = create(:user)
project = create(:project, :mirror, mirror_user_id: mirror_user.id)
new_mirror_user = project.team.owners.first
expect_any_instance_of(EE::NotificationService).to receive(:project_mirror_user_changed).with(new_mirror_user, mirror_user.name, project)
expect do
described_class.new(mirror_user).execute(mirror_user)
end.to change { project.reload.mirror_user }.from(mirror_user).to(new_mirror_user)
end
end
end
......@@ -129,6 +129,12 @@ describe Geo::RepositorySyncService do
end
context 'tracking database' do
context 'temporary repositories' do
include_examples 'cleans temporary repositories' do
let(:repository) { project.repository }
end
end
it 'creates a new registry if does not exists' do
expect { subject.execute }.to change(Geo::ProjectRegistry, :count).by(1)
end
......@@ -236,7 +242,7 @@ describe Geo::RepositorySyncService do
expect(subject).to receive(:sync_repository).with(true).and_call_original
expect(subject.gitlab_shell).to receive(:mv_repository).exactly(2).times.and_call_original
expect(subject.gitlab_shell).to receive(:remove_repository).exactly(3).times.and_call_original
expect(subject.gitlab_shell).to receive(:remove_repository).exactly(2).times.and_call_original
subject.execute
......@@ -260,6 +266,20 @@ describe Geo::RepositorySyncService do
subject.execute
end
it 'cleans temporary repo after redownload' do
create(
:geo_project_registry,
project: project,
repository_retry_count: Geo::BaseSyncService::RETRY_BEFORE_REDOWNLOAD - 1,
force_to_redownload_repository: true
)
expect(subject).to receive(:fetch_geo_mirror)
expect(subject).to receive(:clean_up_temporary_repository).twice
subject.execute
end
it 'successfully redownloads the repository even if the retry time exceeds max value' do
timestamp = Time.now.utc
registry = create(
......
......@@ -108,6 +108,12 @@ RSpec.describe Geo::WikiSyncService do
end
context 'tracking database' do
context 'temporary repositories' do
include_examples 'cleans temporary repositories' do
let(:repository) { project.wiki.repository }
end
end
it 'creates a new registry if does not exists' do
expect { subject.execute }.to change(Geo::ProjectRegistry, :count).by(1)
end
......
......@@ -29,3 +29,21 @@ shared_examples 'geo base sync execution' do
end
end
end
shared_examples 'cleans temporary repositories' do
context 'there is a leftover repository' do
let(:temp_repo_path) { "@geo-temporary/#{repository.disk_path}" }
it 'removes leftover repository' do
gitlab_shell = instance_double('Gitlab::Shell')
allow(subject).to receive(:gitlab_shell).and_return(gitlab_shell)
allow(subject).to receive(:fetch_geo_mirror)
expect(gitlab_shell).to receive(:exists?).and_return(true)
expect(gitlab_shell).to receive(:remove_repository).with(project.repository_storage_path, temp_repo_path)
subject.execute
end
end
end
module StubConfiguration
def stub_object_storage_uploader(config:, uploader:, remote_directory:, enabled: true, licensed: true, background_upload: false)
Fog.mock!
def stub_object_storage_uploader(
config:, uploader:, remote_directory:, enabled: true, licensed: true,
background_upload: false, direct_upload: false
)
allow(config).to receive(:enabled) { enabled }
allow(config).to receive(:background_upload) { background_upload }
allow(config).to receive(:direct_upload) { direct_upload }
stub_licensed_features(object_storage: licensed) unless licensed == :skip
return unless enabled
Fog.mock!
::Fog::Storage.new(uploader.object_store_credentials).tap do |connection|
begin
connection.directories.create(key: remote_directory)
......
This diff is collapsed.
......@@ -3,7 +3,7 @@ require 'spec_helper'
describe BuildFinishedWorker do
describe '#perform' do
it 'schedules a ChatNotification job for a chat build' do
build = create(:ci_build, pipeline: create(:ci_pipeline, source: :chat))
build = create(:ci_build, :success, pipeline: create(:ci_pipeline, source: :chat))
expect(ChatNotificationWorker)
.to receive(:perform_async)
......@@ -13,7 +13,7 @@ describe BuildFinishedWorker do
end
it 'does not schedule a ChatNotification job for a regular build' do
build = create(:ci_build, pipeline: create(:ci_pipeline))
build = create(:ci_build, :success, pipeline: create(:ci_pipeline))
expect(ChatNotificationWorker)
.not_to receive(:perform_async)
......
......@@ -53,14 +53,6 @@ module Gitlab
params
end
def lfs_upload_ok(oid, size)
{
StoreLFSPath: LfsObjectUploader.workhorse_upload_path,
LfsOid: oid,
LfsSize: size
}
end
def artifact_upload_ok
{ TempPath: JobArtifactUploader.workhorse_upload_path }
end
......
......@@ -155,6 +155,93 @@ describe Projects::ClustersController do
end
end
describe 'GET metrics' do
let(:cluster) { create(:cluster, :provided_by_gcp, projects: [project]) }
describe 'functionality' do
let(:user) { create(:user) }
before do
project.add_master(user)
sign_in(user)
end
context "Can't query Prometheus" do
it 'returns not found' do
go
expect(response).to have_gitlab_http_status(:not_found)
end
end
context 'can query Prometheus' do
let(:prometheus_adapter) { double('prometheus_adapter', can_query?: true, query: nil) }
before do
allow(controller).to receive(:prometheus_adapter).and_return(prometheus_adapter)
end
it 'queries cluster metrics' do
go
expect(prometheus_adapter).to have_received(:query).with(:cluster)
end
context 'when response has content' do
let(:query_response) { { response: nil } }
before do
allow(prometheus_adapter).to receive(:query).and_return(query_response)
end
it 'returns prometheus query response' do
go
expect(response).to have_gitlab_http_status(:ok)
expect(response.body).to eq(query_response.to_json)
end
end
context 'when response has no content' do
let(:query_response) { {} }
before do
allow(prometheus_adapter).to receive(:query).and_return(query_response)
end
it 'returns prometheus query response' do
go
expect(response).to have_gitlab_http_status(:no_content)
end
end
end
end
def go
get :metrics, format: :json,
namespace_id: project.namespace,
project_id: project,
id: cluster
end
describe 'security' do
let(:prometheus_adapter) { double('prometheus_adapter', can_query?: true, query: nil) }
before do
allow(controller).to receive(:prometheus_adapter).and_return(prometheus_adapter)
end
it { expect { go }.to be_allowed_for(:admin) }
it { expect { go }.to be_allowed_for(:owner).of(project) }
it { expect { go }.to be_allowed_for(:master).of(project) }
it { expect { go }.to be_denied_for(:developer).of(project) }
it { expect { go }.to be_denied_for(:reporter).of(project) }
it { expect { go }.to be_denied_for(:guest).of(project) }
it { expect { go }.to be_denied_for(:user) }
it { expect { go }.to be_denied_for(:external) }
end
end
describe 'PUT update' do
context 'when cluster is provided by GCP' do
let(:cluster) { create(:cluster, :provided_by_gcp, projects: [project]) }
......
......@@ -7,6 +7,8 @@ describe Projects::Prometheus::MetricsController do
let(:prometheus_adapter) { double('prometheus_adapter', can_query?: true) }
before do
allow(controller).to receive(:project).and_return(project)
project.add_master(user)
sign_in(user)
end
......
import Vue from 'vue';
import _ from 'underscore';
import MockAdapter from 'axios-mock-adapter';
import axios from '~/lib/utils/axios_utils';
import newEpic from 'ee/epics/new_epic/components/new_epic.vue';
import * as urlUtility from '~/lib/utils/url_utility';
import mountComponent from 'spec/helpers/vue_mount_component_helper';
describe('newEpic', () => {
let vm;
const interceptor = (request, next) => {
next(request.respondWith(JSON.stringify({
web_url: gl.TEST_HOST,
}), {
status: 200,
}));
};
let mock;
beforeEach(() => {
Vue.http.interceptors.push(interceptor);
const NewEpic = Vue.extend(newEpic);
mock = new MockAdapter(axios);
mock.onPost(gl.TEST_HOST).reply(200, { web_url: gl.TEST_HOST });
vm = mountComponent(NewEpic, {
endpoint: gl.TEST_HOST,
});
});
afterEach(() => {
Vue.http.interceptors = _.without(Vue.http.interceptors, interceptor);
mock.restore();
vm.$destroy();
});
describe('alignRight', () => {
......
import _ from 'underscore';
import Vue from 'vue';
import SidebarAssignees from '~/sidebar/components/assignees/sidebar_assignees';
import SidebarAssignees from '~/sidebar/components/assignees/sidebar_assignees.vue';
import SidebarMediator from '~/sidebar/sidebar_mediator';
import SidebarService from '~/sidebar/services/sidebar_service';
import SidebarStore from '~/sidebar/stores/sidebar_store';
......
......@@ -278,6 +278,10 @@ describe Backup::Manager do
connection.directories.create(key: Gitlab.config.backup.upload.remote_directory)
end
after do
Fog.unmock!
end
context 'target path' do
it 'uses the tar filename by default' do
expect_any_instance_of(Fog::Collection).to receive(:create)
......
......@@ -1434,6 +1434,23 @@ describe Notify do
end
end
describe 'mirror user changed' do
let(:mirror_user) { create(:user) }
let(:project) { create(:project, :mirror, mirror_user_id: mirror_user.id) }
let(:new_mirror_user) { project.team.owners.first }
subject { described_class.project_mirror_user_changed_email(new_mirror_user.id, mirror_user.name, project.id) }
it_behaves_like 'an email sent from GitLab'
it_behaves_like 'it should not have Gmail Actions links'
it_behaves_like "a user cannot unsubscribe through footer link"
it 'has the correct subject and body' do
is_expected.to have_subject("#{project.name} | Mirror user changed")
is_expected.to have_html_escaped_body_text(project.full_path)
end
end
describe 'admin notification' do
let(:example_site_path) { root_path }
let(:user) { create(:user) }
......
......@@ -4,14 +4,15 @@ describe PrometheusAdapter, :use_clean_rails_memory_store_caching do
include PrometheusHelpers
include ReactiveCachingHelpers
class TestClass
include PrometheusAdapter
end
let(:project) { create(:prometheus_project) }
let(:service) { project.prometheus_service }
let(:described_class) { TestClass }
let(:described_class) do
Class.new do
include PrometheusAdapter
end
end
let(:environment_query) { Gitlab::Prometheus::Queries::EnvironmentQuery }
describe '#query' do
......
......@@ -547,7 +547,7 @@ describe Environment do
let(:project) { create(:prometheus_project) }
subject { environment.additional_metrics }
context 'when the environment has additional metrics' do
context 'when the environment has metrics' do
before do
allow(environment).to receive(:has_metrics?).and_return(true)
end
......
......@@ -990,22 +990,61 @@ describe 'Git LFS API and storage' do
end
context 'and request is sent by gitlab-workhorse to authorize the request' do
before do
put_authorize
shared_examples 'a valid response' do
before do
put_authorize
end
it 'responds with status 200' do
expect(response).to have_gitlab_http_status(200)
end
it 'uses the gitlab-workhorse content type' do
expect(response.content_type.to_s).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
end
end
it 'responds with status 200' do
expect(response).to have_gitlab_http_status(200)
shared_examples 'a local file' do
it_behaves_like 'a valid response' do
it 'responds with status 200, location of lfs store and object details' do
expect(json_response['TempPath']).to eq(LfsObjectUploader.workhorse_local_upload_path)
expect(json_response['RemoteObject']).to be_nil
expect(json_response['LfsOid']).to eq(sample_oid)
expect(json_response['LfsSize']).to eq(sample_size)
end
end
end
it 'uses the gitlab-workhorse content type' do
expect(response.content_type.to_s).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
context 'when using local storage' do
it_behaves_like 'a local file'
end
it 'responds with status 200, location of lfs store and object details' do
expect(json_response['StoreLFSPath']).to eq(LfsObjectUploader.workhorse_upload_path)
expect(json_response['LfsOid']).to eq(sample_oid)
expect(json_response['LfsSize']).to eq(sample_size)
context 'when using remote storage' do
context 'when direct upload is enabled' do
before do
stub_lfs_object_storage(enabled: true, direct_upload: true)
end
it_behaves_like 'a valid response' do
it 'responds with status 200, location of lfs remote store and object details' do
expect(json_response['TempPath']).to be_nil
expect(json_response['RemoteObject']).to have_key('ID')
expect(json_response['RemoteObject']).to have_key('GetURL')
expect(json_response['RemoteObject']).to have_key('StoreURL')
expect(json_response['RemoteObject']).to have_key('DeleteURL')
expect(json_response['LfsOid']).to eq(sample_oid)
expect(json_response['LfsSize']).to eq(sample_size)
end
end
end
context 'when direct upload is disabled' do
before do
stub_lfs_object_storage(enabled: true, direct_upload: false)
end
it_behaves_like 'a local file'
end
end
end
......@@ -1037,14 +1076,70 @@ describe 'Git LFS API and storage' do
end
context 'with object storage enabled' do
before do
stub_lfs_object_storage(background_upload: true)
context 'and direct upload enabled' do
let!(:fog_connection) do
stub_lfs_object_storage(direct_upload: true)
end
['123123', '../../123123'].each do |remote_id|
context "with invalid remote_id: #{remote_id}" do
subject do
put_finalize_with_args('file.remote_id' => remote_id)
end
it 'responds with status 403' do
subject
expect(response).to have_gitlab_http_status(403)
end
end
end
context 'with valid remote_id' do
before do
fog_connection.directories.get('lfs-objects').files.create(
key: 'tmp/upload/12312300',
body: 'content'
)
end
subject do
put_finalize_with_args(
'file.remote_id' => '12312300',
'file.name' => 'name')
end
it 'responds with status 200' do
subject
expect(response).to have_gitlab_http_status(200)
end
it 'schedules migration of file to object storage' do
subject
expect(LfsObject.last.projects).to include(project)
end
it 'have valid file' do
subject
expect(LfsObject.last.file_store).to eq(ObjectStorage::Store::REMOTE)
expect(LfsObject.last.file).to be_exists
end
end
end
it 'schedules migration of file to object storage' do
expect(ObjectStorage::BackgroundMoveWorker).to receive(:perform_async).with('LfsObjectUploader', 'LfsObject', :file, kind_of(Numeric))
context 'and background upload enabled' do
before do
stub_lfs_object_storage(background_upload: true)
end
put_finalize(with_tempfile: true)
it 'schedules migration of file to object storage' do
expect(ObjectStorage::BackgroundMoveWorker).to receive(:perform_async).with('LfsObjectUploader', 'LfsObject', :file, kind_of(Numeric))
put_finalize(with_tempfile: true)
end
end
end
end
......@@ -1064,13 +1159,12 @@ describe 'Git LFS API and storage' do
end
context 'invalid tempfiles' do
it 'rejects slashes in the tempfile name (path traversal' do
put_finalize('foo/bar')
expect(response).to have_gitlab_http_status(403)
before do
lfs_object.destroy
end
it 'rejects tempfile names that do not start with the oid' do
put_finalize("foo#{sample_oid}")
it 'rejects slashes in the tempfile name (path traversal)' do
put_finalize('../bar', with_tempfile: true)
expect(response).to have_gitlab_http_status(403)
end
end
......@@ -1160,7 +1254,7 @@ describe 'Git LFS API and storage' do
end
it 'with location of lfs store and object details' do
expect(json_response['StoreLFSPath']).to eq(LfsObjectUploader.workhorse_upload_path)
expect(json_response['TempPath']).to eq(LfsObjectUploader.workhorse_local_upload_path)
expect(json_response['LfsOid']).to eq(sample_oid)
expect(json_response['LfsSize']).to eq(sample_size)
end
......@@ -1263,10 +1357,24 @@ describe 'Git LFS API and storage' do
end
def put_finalize(lfs_tmp = lfs_tmp_file, with_tempfile: false)
setup_tempfile(lfs_tmp) if with_tempfile
upload_path = LfsObjectUploader.workhorse_local_upload_path
file_path = upload_path + '/' + lfs_tmp if lfs_tmp
if with_tempfile
FileUtils.mkdir_p(upload_path)
FileUtils.touch(file_path)
end
args = {
'file.path' => file_path,
'file.name' => File.basename(file_path)
}.compact
put "#{project.http_url_to_repo}/gitlab-lfs/objects/#{sample_oid}/#{sample_size}", nil,
headers.merge('X-Gitlab-Lfs-Tmp' => lfs_tmp).compact
put_finalize_with_args(args)
end
def put_finalize_with_args(args)
put "#{project.http_url_to_repo}/gitlab-lfs/objects/#{sample_oid}/#{sample_size}", args, headers
end
def lfs_tmp_file
......@@ -1274,10 +1382,6 @@ describe 'Git LFS API and storage' do
end
def setup_tempfile(lfs_tmp)
upload_path = LfsObjectUploader.workhorse_upload_path
FileUtils.mkdir_p(upload_path)
FileUtils.touch(File.join(upload_path, lfs_tmp))
end
end
......
......@@ -167,24 +167,6 @@ describe Users::DestroyService do
end
end
context "when the user was the mirror_user for a group project" do
let(:group_owner) { create(:user) }
let(:mirror_user) { create(:user) }
let(:group) { create(:group) }
before do
group.add_owner(group_owner)
group.add_master(mirror_user)
end
it 'updates the mirror_user to one of the group owners' do
project = create(:project, namespace_id: group.id, creator: group_owner, mirror_user: mirror_user)
service.execute(mirror_user)
expect(project.reload.mirror_user).to eq group_owner
end
end
describe "user personal's repository removal" do
before do
Sidekiq::Testing.inline! { service.execute(user) }
......
......@@ -27,7 +27,7 @@ describe GitlabUploader do
describe '#file_cache_storage?' do
context 'when file storage is used' do
before do
uploader_class.cache_storage(:file)
expect(uploader_class).to receive(:cache_storage) { CarrierWave::Storage::File }
end
it { is_expected.to be_file_cache_storage }
......@@ -35,7 +35,7 @@ describe GitlabUploader do
context 'when is remote storage' do
before do
uploader_class.cache_storage(:fog)
expect(uploader_class).to receive(:cache_storage) { CarrierWave::Storage::Fog }
end
it { is_expected.not_to be_file_cache_storage }
......
......@@ -5,14 +5,14 @@ describe DeleteUserWorker do
let!(:current_user) { create(:user) }
it "calls the DeleteUserWorker with the params it was given" do
expect_any_instance_of(Users::DestroyService).to receive(:execute)
expect_any_instance_of(EE::Users::DestroyService).to receive(:execute)
.with(user, {})
described_class.new.perform(current_user.id, user.id)
end
it "uses symbolized keys" do
expect_any_instance_of(Users::DestroyService).to receive(:execute)
expect_any_instance_of(EE::Users::DestroyService).to receive(:execute)
.with(user, test: "test")
described_class.new.perform(current_user.id, user.id, "test" => "test")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment