Commit 17f2052a authored by Stan Hu's avatar Stan Hu

Use S3 Workhorse client with consolidated object store settings

In GitLab 13.1.0, we added an S3 client to Workhorse
(https://gitlab.com/gitlab-org/gitlab-workhorse/-/merge_requests/466).
Previously this client would only be enabled if AWS instance profiles
(use_iam_profile) were used. We extend this functionality if the
consolidated object storage settings are enabled for AWS.

This will fix ETag Mismatch errors with non-AWS S3 providers and pave
the way for supporting encrypted S3 buckets with customer-provided keys.

Relates to https://gitlab.com/gitlab-org/gitlab/-/issues/220288
parent ef2ce0f3
...@@ -169,6 +169,10 @@ module ObjectStorage ...@@ -169,6 +169,10 @@ module ObjectStorage
object_store_options.connection.to_hash.deep_symbolize_keys object_store_options.connection.to_hash.deep_symbolize_keys
end end
def consolidated_settings?
object_store_options.fetch('consolidated_settings', false)
end
def remote_store_path def remote_store_path
object_store_options.remote_directory object_store_options.remote_directory
end end
...@@ -196,7 +200,7 @@ module ObjectStorage ...@@ -196,7 +200,7 @@ module ObjectStorage
id = [CarrierWave.generate_cache_id, SecureRandom.hex].join('-') id = [CarrierWave.generate_cache_id, SecureRandom.hex].join('-')
upload_path = File.join(TMP_UPLOAD_PATH, id) upload_path = File.join(TMP_UPLOAD_PATH, id)
direct_upload = ObjectStorage::DirectUpload.new(self.object_store_credentials, remote_store_path, upload_path, direct_upload = ObjectStorage::DirectUpload.new(self.object_store_credentials, remote_store_path, upload_path,
has_length: has_length, maximum_size: maximum_size) has_length: has_length, maximum_size: maximum_size, consolidated_settings: consolidated_settings?)
direct_upload.to_hash.merge(ID: id) direct_upload.to_hash.merge(ID: id)
end end
......
---
title: Enable S3 Workhorse client if consolidated object settings used
merge_request: 35480
author:
type: added
...@@ -210,7 +210,6 @@ production: &base ...@@ -210,7 +210,6 @@ production: &base
## within the types (e.g. artifacts, lfs, etc.). ## within the types (e.g. artifacts, lfs, etc.).
# object_store: # object_store:
# enabled: false # enabled: false
# remote_directory: artifacts # The bucket name
# proxy_download: false # Passthrough all downloads via GitLab instead of using Redirects to Object Storage # proxy_download: false # Passthrough all downloads via GitLab instead of using Redirects to Object Storage
# connection: # connection:
# provider: AWS # Only AWS supported at the moment # provider: AWS # Only AWS supported at the moment
......
...@@ -109,6 +109,7 @@ class ObjectStoreSettings ...@@ -109,6 +109,7 @@ class ObjectStoreSettings
# Map bucket (external name) -> remote_directory (internal representation) # Map bucket (external name) -> remote_directory (internal representation)
target_config['remote_directory'] = target_config.delete('bucket') target_config['remote_directory'] = target_config.delete('bucket')
target_config['consolidated_settings'] = true
section['object_store'] = target_config section['object_store'] = target_config
end end
end end
...@@ -120,7 +121,7 @@ class ObjectStoreSettings ...@@ -120,7 +121,7 @@ class ObjectStoreSettings
# 2. The legacy settings are not defined # 2. The legacy settings are not defined
def use_consolidated_settings? def use_consolidated_settings?
return false unless settings.dig('object_store', 'enabled') return false unless settings.dig('object_store', 'enabled')
return false unless settings.dig('object_store', 'connection') return false unless settings.dig('object_store', 'connection').present?
SUPPORTED_TYPES.each do |store| SUPPORTED_TYPES.each do |store|
# to_h is needed because something strange happens to # to_h is needed because something strange happens to
...@@ -135,7 +136,8 @@ class ObjectStoreSettings ...@@ -135,7 +136,8 @@ class ObjectStoreSettings
next unless section next unless section
return false if section.dig('object_store', 'enabled') return false if section.dig('object_store', 'enabled')
return false if section.dig('object_store', 'connection') # Omnibus defaults to an empty hash
return false if section.dig('object_store', 'connection').present?
end end
true true
......
...@@ -23,9 +23,9 @@ module ObjectStorage ...@@ -23,9 +23,9 @@ module ObjectStorage
MINIMUM_MULTIPART_SIZE = 5.megabytes MINIMUM_MULTIPART_SIZE = 5.megabytes
attr_reader :credentials, :bucket_name, :object_name attr_reader :credentials, :bucket_name, :object_name
attr_reader :has_length, :maximum_size attr_reader :has_length, :maximum_size, :consolidated_settings
def initialize(credentials, bucket_name, object_name, has_length:, maximum_size: nil) def initialize(credentials, bucket_name, object_name, has_length:, maximum_size: nil, consolidated_settings: false)
unless has_length unless has_length
raise ArgumentError, 'maximum_size has to be specified if length is unknown' unless maximum_size raise ArgumentError, 'maximum_size has to be specified if length is unknown' unless maximum_size
end end
...@@ -35,6 +35,7 @@ module ObjectStorage ...@@ -35,6 +35,7 @@ module ObjectStorage
@object_name = object_name @object_name = object_name
@has_length = has_length @has_length = has_length
@maximum_size = maximum_size @maximum_size = maximum_size
@consolidated_settings = consolidated_settings
end end
def to_hash def to_hash
...@@ -80,10 +81,12 @@ module ObjectStorage ...@@ -80,10 +81,12 @@ module ObjectStorage
end end
def use_workhorse_s3_client? def use_workhorse_s3_client?
Feature.enabled?(:use_workhorse_s3_client, default_enabled: true) && return false unless Feature.enabled?(:use_workhorse_s3_client, default_enabled: true)
credentials.fetch(:use_iam_profile, false) && return false unless credentials.fetch(:use_iam_profile, false) || consolidated_settings
# The Golang AWS SDK does not support V2 signatures # The Golang AWS SDK does not support V2 signatures
credentials.fetch(:aws_signature_version, 4).to_i >= 4 return false unless credentials.fetch(:aws_signature_version, 4).to_i >= 4
true
end end
def provider def provider
......
...@@ -6,6 +6,7 @@ RSpec.describe ObjectStorage::DirectUpload do ...@@ -6,6 +6,7 @@ RSpec.describe ObjectStorage::DirectUpload do
let(:region) { 'us-east-1' } let(:region) { 'us-east-1' }
let(:path_style) { false } let(:path_style) { false }
let(:use_iam_profile) { false } let(:use_iam_profile) { false }
let(:consolidated_settings) { false }
let(:credentials) do let(:credentials) do
{ {
provider: 'AWS', provider: 'AWS',
...@@ -23,7 +24,7 @@ RSpec.describe ObjectStorage::DirectUpload do ...@@ -23,7 +24,7 @@ RSpec.describe ObjectStorage::DirectUpload do
let(:object_name) { 'tmp/uploads/my-file' } let(:object_name) { 'tmp/uploads/my-file' }
let(:maximum_size) { 1.gigabyte } let(:maximum_size) { 1.gigabyte }
let(:direct_upload) { described_class.new(credentials, bucket_name, object_name, has_length: has_length, maximum_size: maximum_size) } let(:direct_upload) { described_class.new(credentials, bucket_name, object_name, has_length: has_length, maximum_size: maximum_size, consolidated_settings: consolidated_settings) }
before do before do
Fog.unmock! Fog.unmock!
...@@ -141,6 +142,14 @@ RSpec.describe ObjectStorage::DirectUpload do ...@@ -141,6 +142,14 @@ RSpec.describe ObjectStorage::DirectUpload do
expect(subject[:UseWorkhorseClient]).to eq(use_iam_profile) expect(subject[:UseWorkhorseClient]).to eq(use_iam_profile)
end end
end end
context 'when consolidated settings are used' do
let(:consolidated_settings) { true }
it 'enables the Workhorse client' do
expect(subject[:UseWorkhorseClient]).to be true
end
end
end end
shared_examples 'a valid Google upload' do shared_examples 'a valid Google upload' do
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment