Commit cb78be1a authored by Achilleas Pipinellis's avatar Achilleas Pipinellis

Merge branch 'ce-to-ee-2018-09-17' into 'master'

CE upstream - 2018-09-17 12:35 UTC

Closes gitlab-ce#50460, gitlab-ce#50461, #7019, gitlab-qa#323, gitlab-workhorse#181, and gitlab-org/release/framework#13

See merge request gitlab-org/gitlab-ee!7385
parents b535af15 1a7bf1f6
......@@ -312,7 +312,6 @@ gem 'peek-mysql2', '~> 1.1.0', group: :mysql
gem 'peek-pg', '~> 1.3.0', group: :postgres
gem 'peek-rblineprof', '~> 0.2.0'
gem 'peek-redis', '~> 1.2.0'
gem 'peek-sidekiq', '~> 1.0.3'
# Metrics
group :metrics do
......
......@@ -634,10 +634,6 @@ GEM
atomic (>= 1.0.0)
peek
redis
peek-sidekiq (1.0.3)
atomic (>= 1.0.0)
peek
sidekiq
pg (0.18.4)
po_to_json (1.0.1)
json (>= 1.6.0)
......@@ -1150,7 +1146,6 @@ DEPENDENCIES
peek-pg (~> 1.3.0)
peek-rblineprof (~> 0.2.0)
peek-redis (~> 1.2.0)
peek-sidekiq (~> 1.0.3)
pg (~> 0.18.2)
premailer-rails (~> 1.9.7)
prometheus-client-mmap (~> 0.9.4)
......
......@@ -638,10 +638,6 @@ GEM
atomic (>= 1.0.0)
peek
redis
peek-sidekiq (1.0.3)
atomic (>= 1.0.0)
peek
sidekiq
pg (0.18.4)
po_to_json (1.0.1)
json (>= 1.6.0)
......@@ -1159,7 +1155,6 @@ DEPENDENCIES
peek-pg (~> 1.3.0)
peek-rblineprof (~> 0.2.0)
peek-redis (~> 1.2.0)
peek-sidekiq (~> 1.0.3)
pg (~> 0.18.2)
premailer-rails (~> 1.9.7)
prometheus-client-mmap (~> 0.9.4)
......
......@@ -10,6 +10,7 @@ import service from '../services/notes_service';
import loadAwardsHandler from '../../awards_handler';
import sidebarTimeTrackingEventHub from '../../sidebar/event_hub';
import { isInViewport, scrollToElement } from '../../lib/utils/common_utils';
import mrWidgetEventHub from '../../vue_merge_request_widget/event_hub';
let eTagPoll;
......@@ -61,9 +62,11 @@ export const refetchDiscussionById = ({ commit, state }, { path, discussionId })
.catch(() => {});
});
export const deleteNote = ({ commit }, note) =>
export const deleteNote = ({ commit, dispatch }, note) =>
service.deleteNote(note.path).then(() => {
commit(types.DELETE_NOTE, note);
dispatch('updateMergeRequestWidget');
});
export const updateNote = ({ commit }, { endpoint, note }) =>
......@@ -84,20 +87,22 @@ export const replyToDiscussion = ({ commit }, { endpoint, data }) =>
return res;
});
export const createNewNote = ({ commit }, { endpoint, data }) =>
export const createNewNote = ({ commit, dispatch }, { endpoint, data }) =>
service
.createNewNote(endpoint, data)
.then(res => res.json())
.then(res => {
if (!res.errors) {
commit(types.ADD_NEW_NOTE, res);
dispatch('updateMergeRequestWidget');
}
return res;
});
export const removePlaceholderNotes = ({ commit }) => commit(types.REMOVE_PLACEHOLDER_NOTES);
export const toggleResolveNote = ({ commit }, { endpoint, isResolved, discussion }) =>
export const toggleResolveNote = ({ commit, dispatch }, { endpoint, isResolved, discussion }) =>
service
.toggleResolveNote(endpoint, isResolved)
.then(res => res.json())
......@@ -105,6 +110,8 @@ export const toggleResolveNote = ({ commit }, { endpoint, isResolved, discussion
const mutationType = discussion ? types.UPDATE_DISCUSSION : types.UPDATE_NOTE;
commit(mutationType, res);
dispatch('updateMergeRequestWidget');
});
export const closeIssue = ({ commit, dispatch, state }) => {
......@@ -333,5 +340,9 @@ export const fetchDiscussionDiffLines = ({ commit }, discussion) =>
});
});
export const updateMergeRequestWidget = () => {
mrWidgetEventHub.$emit('mr.discussion.updated');
};
// prevent babel-plugin-rewire from generating an invalid default during karma tests
export default () => {};
import {
Vue,
mrWidgetOptions,
} from './dependencies';
import { Vue, mrWidgetOptions } from './dependencies';
import Translate from '../vue_shared/translate';
Vue.use(Translate);
......
......@@ -107,10 +107,14 @@ export default {
created() {
this.initPolling();
this.bindEventHubListeners();
eventHub.$on('mr.discussion.updated', this.checkStatus);
},
mounted() {
this.handleMounted();
},
beforeDestroy() {
eventHub.$off('mr.discussion.updated', this.checkStatus);
},
methods: {
createService(store) {
const endpoints = {
......
......@@ -715,7 +715,8 @@
text-overflow: ellipsis;
.user-status-emoji {
margin: 0 $gl-padding-8 0 $gl-padding-4;
margin-left: $gl-padding-4;
margin-right: 0;
}
}
......
......@@ -11,7 +11,7 @@ class Import::GitlabProjectsController < Import::BaseController
def create
unless file_is_valid?
return redirect_back_or_default(options: { alert: "You need to upload a GitLab project export archive." })
return redirect_back_or_default(options: { alert: "You need to upload a GitLab project export archive (ending in .gz)." })
end
@project = ::Projects::GitlabProjectsImportService.new(current_user, project_params).execute
......@@ -29,7 +29,11 @@ class Import::GitlabProjectsController < Import::BaseController
private
def file_is_valid?
project_params[:file] && project_params[:file].respond_to?(:read)
return false unless project_params[:file] && project_params[:file].respond_to?(:read)
filename = project_params[:file].original_filename
ImportExportUploader::EXTENSION_WHITELIST.include?(File.extname(filename).delete('.'))
end
def verify_gitlab_project_import_enabled
......
......@@ -143,7 +143,8 @@ class Projects::ClustersController < Projects::ApplicationController
:gcp_project_id,
:zone,
:num_nodes,
:machine_type
:machine_type,
:legacy_abac
]).merge(
provider_type: :gcp,
platform_type: :kubernetes
......
......@@ -98,7 +98,7 @@ class Projects::PipelinesController < Projects::ApplicationController
render json: StageSerializer
.new(project: @project, current_user: @current_user)
.represent(@stage, details: true)
.represent(@stage, details: true, retried: params[:retried])
end
# TODO: This endpoint is used by mini-pipeline-graph
......
......@@ -198,7 +198,7 @@ class ProjectsController < Projects::ApplicationController
def download_export
if @project.export_file_exists?
send_upload(@project.export_file)
send_upload(@project.export_file, attachment: @project.export_file.filename)
else
redirect_to(
edit_project_path(@project, anchor: 'js-export-project'),
......
......@@ -193,7 +193,7 @@ module IssuablesHelper
output << content_tag(:span, (issuable_first_contribution_icon if issuable.first_contribution?), class: 'has-tooltip', title: _('1st contribution!'))
output << content_tag(:span, (issuable.task_status if issuable.tasks?), id: "task_status", class: "d-none d-sm-none d-md-inline-block")
output << content_tag(:span, (issuable.task_status if issuable.tasks?), id: "task_status", class: "d-none d-sm-none d-md-inline-block prepend-left-8")
output << content_tag(:span, (issuable.task_status_short if issuable.tasks?), id: "task_status_short", class: "d-md-none")
output.join.html_safe
......
......@@ -74,14 +74,21 @@ module MarkupHelper
# the tag contents are truncated without removing the closing tag.
def first_line_in_markdown(object, attribute, max_chars = nil, options = {})
md = markdown_field(object, attribute, options)
return nil unless md.present?
text = truncate_visible(md, max_chars || md.length) if md.present?
tags = %w(a gl-emoji b pre code p span)
tags << 'img' if options[:allow_images]
sanitize(
text = truncate_visible(md, max_chars || md.length)
text = sanitize(
text,
tags: %w(a img gl-emoji b pre code p span),
tags: tags,
attributes: Rails::Html::WhiteListSanitizer.allowed_attributes + ['style', 'data-src', 'data-name', 'data-unicode-version']
)
# since <img> tags are stripped, this can leave empty <a> tags hanging around
# (as our markdown wraps images in links)
options[:allow_images] ? text : strip_empty_link_tags(text).html_safe
end
def markdown(text, context = {})
......@@ -235,6 +242,16 @@ module MarkupHelper
end
end
def strip_empty_link_tags(text)
scrubber = Loofah::Scrubber.new do |node|
node.remove if node.name == 'a' && node.content.blank?
end
# Use `Loofah` directly instead of `sanitize`
# as we still use the `rails-deprecated_sanitizer` gem
Loofah.fragment(text).scrub!(scrubber).to_s
end
def markdown_toolbar_button(options = {})
data = options[:data].merge({ container: 'body' })
content_tag :button,
......
......@@ -658,8 +658,31 @@ module Ci
end
end
# Virtual deployment status depending on the environment status.
def deployment_status
return nil unless starts_environment?
if success?
return successful_deployment_status
elsif complete? && !success?
return :failed
end
:creating
end
private
def successful_deployment_status
if success? && last_deployment&.last?
return :last
elsif success? && last_deployment.present?
return :out_of_date
end
:creating
end
def each_test_report
Ci::JobArtifact::TEST_REPORT_FILE_TYPES.each do |file_type|
public_send("job_artifacts_#{file_type}").each_blob do |blob| # rubocop:disable GitlabSecurity/PublicSend
......
......@@ -3,6 +3,7 @@
module ProtectedBranchAccess
extend ActiveSupport::Concern
include ProtectedRefAccess
include EE::ProtectedRefAccess # Can't use prepend. It'll override wrongly
included do
......
......@@ -3,6 +3,7 @@
module ProtectedTagAccess
extend ActiveSupport::Concern
include ProtectedRefAccess
include EE::ProtectedRefAccess # Can't use prepend. It'll override wrongly
included do
......
# frozen_string_literal: true
class BuildDetailsEntity < JobEntity
include EnvironmentHelper
include RequestAwareEntity
include CiStatusHelper
expose :coverage, :erased_at, :duration
expose :tag_list, as: :tags
expose :user, using: UserEntity
expose :runner, using: RunnerEntity
expose :pipeline, using: PipelineEntity
expose :deployment_status, if: -> (*) { build.has_environment? } do
expose :deployment_status, as: :status
expose :icon do |build|
ci_label_for_status(build.status)
end
expose :persisted_environment, as: :environment, with: EnvironmentEntity
end
expose :metadata, using: BuildMetadataEntity
expose :artifact, if: -> (*) { can?(current_user, :read_build, build) } do
......
......@@ -19,6 +19,12 @@ class StageEntity < Grape::Entity
latest_statuses
end
expose :retried,
if: -> (_, opts) { opts[:retried] },
with: JobEntity do |stage|
retried_statuses
end
expose :detailed_status, as: :status, with: DetailedStatusEntity
expose :path do |stage|
......@@ -48,9 +54,19 @@ class StageEntity < Grape::Entity
@grouped_statuses ||= stage.statuses.latest_ordered.group_by(&:status)
end
def grouped_retried_statuses
@grouped_retried_statuses ||= stage.statuses.retried_ordered.group_by(&:status)
end
def latest_statuses
HasStatus::ORDERED_STATUSES.map do |ordered_status|
grouped_statuses.fetch(ordered_status, [])
end.flatten
end
def retried_statuses
HasStatus::ORDERED_STATUSES.map do |ordered_status|
grouped_retried_statuses.fetch(ordered_status, [])
end.flatten
end
end
# frozen_string_literal: true
##
# TODO:
# Almost components in this class were copied from app/models/project_services/kubernetes_service.rb
# We should dry up those classes not to repeat the same code.
# Maybe we should have a special facility (e.g. lib/kubernetes_api) to maintain all Kubernetes API caller.
module Ci
class FetchKubernetesTokenService
attr_reader :api_url, :ca_pem, :username, :password
def initialize(api_url, ca_pem, username, password)
@api_url = api_url
@ca_pem = ca_pem
@username = username
@password = password
end
def execute
read_secrets.each do |secret|
name = secret.dig('metadata', 'name')
if /default-token/ =~ name
token_base64 = secret.dig('data', 'token')
return Base64.decode64(token_base64) if token_base64
end
end
nil
end
private
def read_secrets
kubeclient = build_kubeclient!
kubeclient.get_secrets.as_json
rescue Kubeclient::HttpError => err
raise err unless err.error_code == 404
[]
end
def build_kubeclient!(api_path: 'api', api_version: 'v1')
raise "Incomplete settings" unless api_url && username && password
::Kubeclient::Client.new(
join_api_url(api_path),
api_version,
auth_options: { username: username, password: password },
ssl_options: kubeclient_ssl_options,
http_proxy_uri: ENV['http_proxy']
)
end
def join_api_url(api_path)
url = URI.parse(api_url)
prefix = url.path.sub(%r{/+\z}, '')
url.path = [prefix, api_path].join("/")
url.to_s
end
def kubeclient_ssl_options
opts = { verify_ssl: OpenSSL::SSL::VERIFY_PEER }
if ca_pem.present?
opts[:cert_store] = OpenSSL::X509::Store.new
opts[:cert_store].add_cert(OpenSSL::X509::Certificate.new(ca_pem))
end
opts
end
end
end
......@@ -9,17 +9,24 @@ module Clusters
@provider = provider
configure_provider
create_gitlab_service_account!
configure_kubernetes
cluster.save!
rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
provider.make_errored!("Failed to request to CloudPlatform; #{e.message}")
rescue Kubeclient::HttpError => e
provider.make_errored!("Failed to run Kubeclient: #{e.message}")
rescue ActiveRecord::RecordInvalid => e
provider.make_errored!("Failed to configure Google Kubernetes Engine Cluster: #{e.message}")
end
private
def create_gitlab_service_account!
Clusters::Gcp::Kubernetes::CreateServiceAccountService.new(kube_client, rbac: create_rbac_cluster?).execute
end
def configure_provider
provider.endpoint = gke_cluster.endpoint
provider.status_event = :make_created
......@@ -32,15 +39,54 @@ module Clusters
ca_cert: Base64.decode64(gke_cluster.master_auth.cluster_ca_certificate),
username: gke_cluster.master_auth.username,
password: gke_cluster.master_auth.password,
authorization_type: authorization_type,
token: request_kubernetes_token)
end
def request_kubernetes_token
Ci::FetchKubernetesTokenService.new(
Clusters::Gcp::Kubernetes::FetchKubernetesTokenService.new(kube_client).execute
end
def authorization_type
create_rbac_cluster? ? 'rbac' : 'abac'
end
def create_rbac_cluster?
!provider.legacy_abac?
end
def kube_client
@kube_client ||= build_kube_client!(
'https://' + gke_cluster.endpoint,
Base64.decode64(gke_cluster.master_auth.cluster_ca_certificate),
gke_cluster.master_auth.username,
gke_cluster.master_auth.password).execute
gke_cluster.master_auth.password,
api_groups: ['api', 'apis/rbac.authorization.k8s.io']
)
end
def build_kube_client!(api_url, ca_pem, username, password, api_groups: ['api'], api_version: 'v1')
raise "Incomplete settings" unless api_url && username && password
Gitlab::Kubernetes::KubeClient.new(
api_url,
api_groups,
api_version,
auth_options: { username: username, password: password },
ssl_options: kubeclient_ssl_options(ca_pem),
http_proxy_uri: ENV['http_proxy']
)
end
def kubeclient_ssl_options(ca_pem)
opts = { verify_ssl: OpenSSL::SSL::VERIFY_PEER }
if ca_pem.present?
opts[:cert_store] = OpenSSL::X509::Store.new
opts[:cert_store].add_cert(OpenSSL::X509::Certificate.new(ca_pem))
end
opts
end
def gke_cluster
......
# frozen_string_literal: true
module Clusters
module Gcp
module Kubernetes
SERVICE_ACCOUNT_NAME = 'gitlab'
SERVICE_ACCOUNT_NAMESPACE = 'default'
SERVICE_ACCOUNT_TOKEN_NAME = 'gitlab-token'
CLUSTER_ROLE_BINDING_NAME = 'gitlab-admin'
CLUSTER_ROLE_NAME = 'cluster-admin'
end
end
end
# frozen_string_literal: true
module Clusters
module Gcp
module Kubernetes
class CreateServiceAccountService
attr_reader :kubeclient, :rbac
def initialize(kubeclient, rbac:)
@kubeclient = kubeclient
@rbac = rbac
end
def execute
kubeclient.create_service_account(service_account_resource)
kubeclient.create_secret(service_account_token_resource)
kubeclient.create_cluster_role_binding(cluster_role_binding_resource) if rbac
end
private
def service_account_resource
Gitlab::Kubernetes::ServiceAccount.new(service_account_name, service_account_namespace).generate
end
def service_account_token_resource
Gitlab::Kubernetes::ServiceAccountToken.new(
SERVICE_ACCOUNT_TOKEN_NAME, service_account_name, service_account_namespace).generate
end
def cluster_role_binding_resource
subjects = [{ kind: 'ServiceAccount', name: service_account_name, namespace: service_account_namespace }]
Gitlab::Kubernetes::ClusterRoleBinding.new(
CLUSTER_ROLE_BINDING_NAME,
CLUSTER_ROLE_NAME,
subjects
).generate
end
def service_account_name
SERVICE_ACCOUNT_NAME
end
def service_account_namespace
SERVICE_ACCOUNT_NAMESPACE
end
end
end
end
end
# frozen_string_literal: true
module Clusters
module Gcp
module Kubernetes
class FetchKubernetesTokenService
attr_reader :kubeclient
def initialize(kubeclient)
@kubeclient = kubeclient
end
def execute
token_base64 = get_secret&.dig('data', 'token')
Base64.decode64(token_base64) if token_base64
end
private
def get_secret
kubeclient.get_secret(SERVICE_ACCOUNT_TOKEN_NAME, SERVICE_ACCOUNT_NAMESPACE).as_json
rescue Kubeclient::HttpError => err
raise err unless err.error_code == 404
nil
end
end
end
end
end
......@@ -27,7 +27,9 @@ module Clusters
provider.zone,
provider.cluster.name,
provider.num_nodes,
machine_type: provider.machine_type)
machine_type: provider.machine_type,
legacy_abac: provider.legacy_abac
)
unless operation.status == 'PENDING' || operation.status == 'RUNNING'
return provider.make_errored!("Operation status is unexpected; #{operation.status_message}")
......
......@@ -61,5 +61,15 @@
%p.form-text.text-muted
= s_('ClusterIntegration|Learn more about %{help_link_start_machine_type}machine types%{help_link_end} and %{help_link_start_pricing}pricing%{help_link_end}.').html_safe % { help_link_start_machine_type: help_link_start % { url: machine_type_link_url }, help_link_start_pricing: help_link_start % { url: pricing_link_url }, help_link_end: help_link_end }
- if rbac_clusters_feature_enabled?
.form-group
.form-check
= provider_gcp_field.check_box :legacy_abac, { class: 'form-check-input' }, false, true
= provider_gcp_field.label :legacy_abac, s_('ClusterIntegration|RBAC-enabled cluster (experimental)'), class: 'form-check-label label-bold'
.form-text.text-muted
= s_('ClusterIntegration|Enable this setting if using role-based access control (RBAC).')
= s_('ClusterIntegration|This option will allow you to install applications on RBAC clusters.')
= link_to _('More information'), help_page_path('user/project/clusters/index.md', anchor: 'role-based-access-control-rbac-experimental-support'), target: '_blank'
.form-group
= field.submit s_('ClusterIntegration|Create Kubernetes cluster'), class: 'js-gke-cluster-creation-submit btn btn-success', disabled: true
......@@ -37,5 +37,14 @@
= platform_kubernetes_field.label :namespace, s_('ClusterIntegration|Project namespace (optional, unique)')
= platform_kubernetes_field.text_field :namespace, class: 'form-control', placeholder: s_('ClusterIntegration|Project namespace')
- if rbac_clusters_feature_enabled?
.form-group
.form-check
= platform_kubernetes_field.check_box :authorization_type, { class: 'form-check-input', disabled: true }, 'rbac', 'abac'
= platform_kubernetes_field.label :authorization_type, s_('ClusterIntegration|RBAC-enabled cluster (experimental)'), class: 'form-check-label label-bold'
.form-text.text-muted
= s_('ClusterIntegration|Enable this setting if using role-based access control (RBAC).')
= s_('ClusterIntegration|This option will allow you to install applications on RBAC clusters.')
.form-group
= field.submit s_('ClusterIntegration|Save changes'), class: 'btn btn-success'
......@@ -33,6 +33,7 @@
.form-text.text-muted
= s_('ClusterIntegration|Enable this setting if using role-based access control (RBAC).')
= s_('ClusterIntegration|This option will allow you to install applications on RBAC clusters.')
= link_to _('More information'), help_page_path('user/project/clusters/index.md', anchor: 'role-based-access-control-rbac-experimental-support'), target: '_blank'
.form-group
= field.submit s_('ClusterIntegration|Add Kubernetes cluster'), class: 'btn btn-success'
......@@ -34,4 +34,4 @@
= form.label :due_date, "Due date", class: "col-form-label col-md-2 col-lg-4"
.col-8
.issuable-form-select-holder
= form.text_field :due_date, id: "issuable-due-date", class: "datepicker form-control", placeholder: "Select due date"
= form.text_field :due_date, id: "issuable-due-date", class: "datepicker form-control", placeholder: "Select due date", autocomplete: 'off'
---
title: Send deployment information in job API
merge_request: 21307
author:
type: other
---
title: Support Kubernetes RBAC for GitLab Managed Apps when creating new clusters
merge_request: 21401
author:
type: changed
---
title: Images are no longer displayed in Todo descriptions
merge_request: 21704
author:
type: fixed
---
title: Fix double title in merge request chat messages.
merge_request:
merge_request: 21670
author: Kukovskii Vladimir
type: fixed
---
title: Add retried jobs to pipeline stage
merge_request: 21558
author:
type: other
---
title: Fix import error when archive does not have the correct extension
merge_request: 21765
author:
type: fixed
---
title: Add margin between username and subsequent text in issuable header
merge_request: 21697
author:
type: other
---
title: Fixed merge request widget discussion state not updating after resolving discussions
merge_request: 21705
author:
type: fixed
---
title: Remove sidekiq info from performance bar
merge_request:
author:
type: removed
---
title: Fix object storage uploads not working with AWS v2
merge_request: 21731
author:
type: fixed
---
title: Update GitLab Shell to v8.3.3
merge_request: 21750
author:
type: fixed
......@@ -18,7 +18,6 @@ Peek.into PEEK_DB_VIEW
Peek.into Peek::Views::Gitaly
Peek.into Peek::Views::Rblineprof
Peek.into Peek::Views::Redis
Peek.into Peek::Views::Sidekiq
Peek.into Peek::Views::GC
# rubocop:disable Naming/ClassAndModuleCamelCase
......
......@@ -87,7 +87,7 @@ module.exports = function(config) {
basePath: ROOT_PATH,
browsers: ['ChromeHeadlessCustom'],
client: {
isCI: !!process.env.CI
color: !process.env.CI
},
customLaunchers: {
ChromeHeadlessCustom: {
......
......@@ -26,6 +26,11 @@ def ce_upstream?
gitlab.mr_labels.any? { |label| label == 'CE upstream' }
end
def too_many_changed_lines?(commit)
commit.diff_parent.stats[:total][:files] > 3 &&
lines_changed_in_commit(commit) >= 30
end
def lint_commits(commits)
failures = false
......@@ -102,10 +107,10 @@ def lint_commits(commits)
failures = true
end
if !details && lines_changed_in_commit(commit) >= 20
if !details && too_many_changed_lines?(commit)
fail_commit(
commit,
'Commits that change more than 20 lines ' \
'Commits that change 30 or more lines in more than three files ' \
'must describe these changes in the commit body'
)
......@@ -170,7 +175,7 @@ def lint_commits(commits)
Use:
_("Hello %{subject}) % { subject: 'world' }
_("Hello %{subject}") % { subject: 'world' }
This is an example of a bad commit message:
......
# frozen_string_literal: true
class AddLegacyAbacToClusterProvidersGcp < ActiveRecord::Migration
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
def up
add_column_with_default(:cluster_providers_gcp, :legacy_abac, :boolean, default: true)
end
def down
remove_column(:cluster_providers_gcp, :legacy_abac)
end
end
......@@ -11,7 +11,7 @@
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 20180906101639) do
ActiveRecord::Schema.define(version: 20180907015926) do
# These are extensions that must be enabled in order to support this database
enable_extension "plpgsql"
......@@ -738,6 +738,7 @@ ActiveRecord::Schema.define(version: 20180906101639) do
t.string "endpoint"
t.text "encrypted_access_token"
t.string "encrypted_access_token_iv"
t.boolean "legacy_abac", default: true, null: false
end
add_index "cluster_providers_gcp", ["cluster_id"], name: "index_cluster_providers_gcp_on_cluster_id", unique: true, using: :btree
......
......@@ -3,6 +3,8 @@
Automate GitLab via a simple and powerful API. All definitions can be found
under [`/lib/api`](https://gitlab.com/gitlab-org/gitlab-ce/tree/master/lib/api).
The main GitLab API is a [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) API. Therefore, documentation in this section assumes knowledge of REST concepts.
## Resources
Documentation for various API resources can be found separately in the
......@@ -85,8 +87,8 @@ Going forward, we will start on moving to
controller-specific endpoints. GraphQL has a number of benefits:
1. We avoid having to maintain two different APIs.
2. Callers of the API can request only what they need.
3. It is versioned by default.
1. Callers of the API can request only what they need.
1. It is versioned by default.
It will co-exist with the current v4 REST API. If we have a v5 API, this should
be a compatibility layer on top of GraphQL.
......@@ -147,8 +149,9 @@ There are three ways to authenticate with the GitLab API:
1. [Session cookie](#session-cookie)
For admins who want to authenticate with the API as a specific user, or who want to build applications or scripts that do so, two options are available:
1. [Impersonation tokens](#impersonation-tokens)
2. [Sudo](#sudo)
1. [Sudo](#sudo)
If authentication information is invalid or omitted, an error message will be
returned with status code `401`:
......@@ -227,7 +230,8 @@ Impersonation tokens are used exactly like regular personal access tokens, and c
### Sudo
> Needs admin permissions.
NOTE: **Note:**
Only available to [administrators](../user/permissions.md).
All API requests support performing an API call as if you were another user,
provided you are authenticated as an administrator with an OAuth or Personal Access Token that has the `sudo` scope.
......@@ -453,28 +457,23 @@ curl --request POST --header "PRIVATE-TOKEN: ********************" \
## `id` vs `iid`
When you work with the API, you may notice two similar fields in API entities:
`id` and `iid`. The main difference between them is scope.
Some resources have two similarly-named fields. For example, [issues](issues.md), [merge requests](merge_requests.md), and [project milestones](merge_requests.md). The fields are:
For example, an issue might have `id: 46` and `iid: 5`.
- `id`: ID that is unique across all projects.
- `iid`: additional, internal ID that is unique in the scope of a single project.
| Parameter | Description |
| --------- | ----------- |
| `id` | Is unique across all issues and is used for any API call |
| `iid` | Is unique only in scope of a single project. When you browse issues or merge requests with the Web UI, you see the `iid` |
NOTE: **Note:**
The `iid` is displayed in the web UI.
That means that if you want to get an issue via the API you should use the `id`:
If a resource has the `iid` field and the `id` field, the `iid` field is usually used instead of `id` to fetch the resource.
```
GET /projects/42/issues/:id
```
For example, suppose a project with `id: 42` has an issue with `id: 46` and `iid: 5`. In this case:
On the other hand, if you want to create a link to a web page you should use
the `iid`:
- A valid API call to retrieve the issue is `GET /projects/42/issues/5`
- An invalid API call to retrieve the issue is `GET /projects/42/issues/46`.
```
GET /projects/42/issues/:iid
```
NOTE: **Note:**
Not all resources with the `iid` field are fetched by `iid`. For guidance on which field to use, see the documentation for the specific resource.
## Data validation and error reporting
......
......@@ -19,7 +19,7 @@ Parameters:
| --------- | ---- | -------- | ----------- |
| `id` | integer/string | yes | The ID or [URL-encoded path of the project](README.md#namespaced-path-encoding) owned by the authenticated user |
| `iids[]` | Array[integer] | optional | Return only the milestones having the given `iid` |
| `state` | string | optional | Return only `active` or `closed` milestones` |
| `state` | string | optional | Return only `active` or `closed` milestones |
| `search` | string | optional | Return only milestones with a title or description matching the provided string |
```bash
......
......@@ -266,7 +266,7 @@ GET /users/:id?with_custom_attributes=true
## User creation
Creates a new user. Note only administrators can create new users. Either `password` or `reset_password` should be specified (`reset_password` takes priority).
Creates a new user. Note only administrators can create new users. Either `password` or `reset_password` should be specified (`reset_password` takes priority). If `reset_password` is `false`, then `password` is required.
```
POST /users
......@@ -509,7 +509,7 @@ PUT /user/status
When both parameters `emoji` and `message` are empty, the status will be cleared.
```bash
curl --request PUT --header "PRIVATE-TOKEN: 9koXpg98eAheJpvBs5tK" --data "emoji=coffee" --data "emoji=I crave coffee" https://gitlab.example.com/api/v4/user/status
curl --request PUT --header "PRIVATE-TOKEN: 9koXpg98eAheJpvBs5tK" --data "emoji=coffee" --data "message=I crave coffee" https://gitlab.example.com/api/v4/user/status
```
Example responses
......
......@@ -78,16 +78,18 @@ A job is defined by a list of parameters that define the job behavior.
### `extends`
> Introduced in GitLab 11.3
> Introduced in GitLab 11.3.
`extends` defines an entry name that a job, that uses `extends` is going to
`extends` defines an entry name that a job that uses `extends` is going to
inherit from.
`extends` in an alternative to using [YAML anchors](#anchors) that is a little
more flexible and readable.
It is an alternative to using [YAML anchors](#anchors) and is a little
more flexible and readable:
```yaml
.tests:
script: rake test
stage: test
only:
refs:
- branches
......@@ -95,16 +97,15 @@ more flexible and readable.
rspec:
extends: .tests
script: rake rspec
stage: test
only:
variables:
- $RSPEC
```
In the example above the `rspec` job is going to inherit from `.tests`
template. GitLab will perform a reverse deep merge, what means that it will
merge `rspec` contents into `.tests` recursively, and it is going to result in
following configuration of the `rspec` job:
In the example above, the `rspec` job is going to inherit from the `.tests`
template job. GitLab will perform a reverse deep merge, which means that it will
merge the `rspec` contents into `.tests` recursively, and this is going to result in
the following `rspec` job:
```yaml
rspec:
......@@ -117,13 +118,12 @@ rspec:
- $RSPEC
```
`.tests` in this example is a [hidden key](#hidden-keys-jobs), but it is
`.tests` in this example is a [hidden key](#hidden-keys-jobs), but it's
possible to inherit from regular jobs as well.
`extends` supports multi-level inheritance, however it is not recommended to
use more than three levels of inheritance. Maximum nesting level supported is
10 levels.
use more than three levels. The maximum nesting level that is supported is 10.
The following example has two levels of inheritance:
```yaml
.tests:
......
......@@ -31,7 +31,7 @@ only and does not directly affect the way that any regular user or
administrator would interact with GitLab.
NOTE: **Note:**
When refactoring documentation in needed, it should be submitted it in its own MR.
When refactoring documentation, it should be submitted in its own MR.
**Do not** join new features' MRs with refactoring existing docs, as they might have
different priorities.
......
......@@ -125,3 +125,6 @@ pwd
```
clear
```
### Sample Git taskflow
If you are completely new to Git, looking through some [sample taskflows](https://rogerdudler.github.io/git-guide/) will help you understand best practices for using these commands as you work.
......@@ -19,6 +19,7 @@ large deployments. It offers a number of benefits:
- No requirement for shared storage to scale
- Containers do not need `root` permissions
- Automatic SSL with Let's Encrypt
- An unprivileged GitLab Runner
- and plenty more.
Learn more about the [GitLab chart](gitlab_chart.md).
......
......@@ -128,8 +128,81 @@ applications running on the cluster.
When GitLab creates the cluster, it enables and uses the legacy
[Attribute-based access control (ABAC)](https://kubernetes.io/docs/admin/authorization/abac/).
The newer [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)
authorization will be supported in a
[future release](https://gitlab.com/gitlab-org/gitlab-ce/issues/29398).
authorization is [experimental](#role-based-access-control-rbac).
### Role-based access control (RBAC) **[CORE ONLY]**
> [Introduced](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/21401) in GitLab 11.4.
CAUTION: **Warning:**
The RBAC authorization is experimental. To enable it you need access to the
server where GitLab is installed.
The support for RBAC-enabled clusters is hidden behind a feature flag. Once
the feature flag is enabled, GitLab will create the necessary service accounts
and privileges in order to install and run [GitLab managed applications](#installing-applications).
To enable the feature flag:
1. SSH into the server where GitLab is installed.
1. Enter the Rails console:
**For Omnibus GitLab**
```sh
sudo gitlab-rails console
```
**For installations from source**
```sh
sudo -u git -H bundle exec rails console
```
1. Enable the RBAC authorization:
```ruby
Feature.enable('rbac_clusters')
```
If you are creating a [new GKE cluster via
GitLab](#adding-and-creating-a-new-gke-cluster-via-gitlab), you will be
asked if you would like to create an RBAC-enabled cluster. Enabling this
setting will create a `gitlab` service account which will be used by
GitLab to manage the newly created cluster. To enable this, this service
account will have the `cluster-admin` privilege.
If you are [adding an existing Kubernetes
cluster](#adding-an-existing-kubernetes-cluster), you will be asked if
the cluster you are adding is a RBAC-enabled cluster. Ensure the
token of the account has administrator privileges for the cluster.
In both cases above, when you install Helm Tiller into your cluster, an
RBAC-enabled cluster will create a `tiller` service account, with `cluster-admin`
privileges in the `gitlab-managed-apps` namespace. This service account will be
added to the installed Helm Tiller and will be used by Helm to install and run
[GitLab managed applications](#installing-applications).
The table below summarizes which resources will be created in a
RBAC-enabled cluster :
| Name | Kind | Details | Created when |
| --- | --- | --- | --- |
| `gitlab` | `ServiceAccount` | `default` namespace | Creating a new GKE Cluster |
| `gitlab-admin` | `ClusterRoleBinding` | `cluster-admin` roleRef | Creating a new GKE Cluster |
| `gitlab-token` | `Secret` | Token for `gitlab` ServiceAccount | Creating a new GKE Cluster |
| `tiller` | `ServiceAccount` | `gitlab-managed-apps` namespace | Installing Helm Tiller |
| `tiller-admin` | `ClusterRoleBinding` | `cluster-admin` roleRef | Installing Helm Tiller |
Helm Tiller will also create additional service accounts and other RBAC
resources for each installed application. Consult the documentation for the
Helm charts for each application for details.
NOTE: **Note:**
Auto DevOps will not successfully complete in a cluster that only has RBAC
authorization enabled. RBAC support for Auto DevOps is planned in a
[future release](https://gitlab.com/gitlab-org/gitlab-ce/issues/44597).
### Security of GitLab Runners
......@@ -162,13 +235,13 @@ with Tiller already installed, you should be careful as GitLab cannot
detect it. By installing it via the applications will result into having it
twice, which can lead to confusion during deployments.
| Application | GitLab version | Description |
| ----------- | :------------: | ----------- |
| [Helm Tiller](https://docs.helm.sh/) | 10.2+ | Helm is a package manager for Kubernetes and is required to install all the other applications. It is installed in its own pod inside the cluster which can run the `helm` CLI in a safe environment. |
| [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) | 10.2+ | Ingress can provide load balancing, SSL termination, and name-based virtual hosting. It acts as a web proxy for your applications and is useful if you want to use [Auto DevOps] or deploy your own web apps. |
| [Prometheus](https://prometheus.io/docs/introduction/overview/) | 10.4+ | Prometheus is an open-source monitoring and alerting system useful to supervise your deployed applications. |
| [GitLab Runner](https://docs.gitlab.com/runner/) | 10.6+ | GitLab Runner is the open source project that is used to run your jobs and send the results back to GitLab. It is used in conjunction with [GitLab CI/CD](https://about.gitlab.com/features/gitlab-ci-cd/), the open-source continuous integration service included with GitLab that coordinates the jobs. When installing the GitLab Runner via the applications, it will run in **privileged mode** by default. Make sure you read the [security implications](#security-implications) before doing so. |
| [JupyterHub](http://jupyter.org/) | 11.0+ | [JupyterHub](https://jupyterhub.readthedocs.io/en/stable/) is a multi-user service for managing notebooks across a team. [Jupyter Notebooks](https://jupyter-notebook.readthedocs.io/en/latest/) provide a web-based interactive programming environment used for data analysis, visualization, and machine learning. We use [this](https://gitlab.com/gitlab-org/jupyterhub-user-image/blob/master/Dockerfile) custom Jupyter image that installs additional useful packages on top of the base Jupyter. **Note**: Authentication will be enabled for any user of the GitLab server via OAuth2. HTTPS will be supported in a future release. |
| Application | GitLab version | Description | Helm Chart |
| ----------- | :------------: | ----------- | --------------- |
| [Helm Tiller](https://docs.helm.sh/) | 10.2+ | Helm is a package manager for Kubernetes and is required to install all the other applications. It is installed in its own pod inside the cluster which can run the `helm` CLI in a safe environment. | n/a |
| [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) | 10.2+ | Ingress can provide load balancing, SSL termination, and name-based virtual hosting. It acts as a web proxy for your applications and is useful if you want to use [Auto DevOps] or deploy your own web apps. | [stable/nginx-ingress](https://github.com/helm/charts/tree/master/stable/nginx-ingress) |
| [Prometheus](https://prometheus.io/docs/introduction/overview/) | 10.4+ | Prometheus is an open-source monitoring and alerting system useful to supervise your deployed applications. | [stable/prometheus](https://github.com/helm/charts/tree/master/stable/prometheus) |
| [GitLab Runner](https://docs.gitlab.com/runner/) | 10.6+ | GitLab Runner is the open source project that is used to run your jobs and send the results back to GitLab. It is used in conjunction with [GitLab CI/CD](https://about.gitlab.com/features/gitlab-ci-cd/), the open-source continuous integration service included with GitLab that coordinates the jobs. When installing the GitLab Runner via the applications, it will run in **privileged mode** by default. Make sure you read the [security implications](#security-implications) before doing so. | [runner/gitlab-runner](https://gitlab.com/charts/gitlab-runner) |
| [JupyterHub](http://jupyter.org/) | 11.0+ | [JupyterHub](https://jupyterhub.readthedocs.io/en/stable/) is a multi-user service for managing notebooks across a team. [Jupyter Notebooks](https://jupyter-notebook.readthedocs.io/en/latest/) provide a web-based interactive programming environment used for data analysis, visualization, and machine learning. We use [this](https://gitlab.com/gitlab-org/jupyterhub-user-image/blob/master/Dockerfile) custom Jupyter image that installs additional useful packages on top of the base Jupyter. **Note**: Authentication will be enabled for any user of the GitLab server via OAuth2. HTTPS will be supported in a future release. | [jupyter/jupyterhub](https://jupyterhub.github.io/helm-chart/) |
## Getting the external IP address
......
......@@ -25,12 +25,14 @@ module Gitlab
:get_config_map,
:get_namespace,
:get_pod,
:get_secret,
:get_service,
:get_service_account,
:delete_pod,
:create_config_map,
:create_namespace,
:create_pod,
:create_secret,
:create_service_account,
:update_config_map,
:update_service_account,
......
# frozen_string_literal: true
module Gitlab
module Kubernetes
class ServiceAccountToken
attr_reader :name, :service_account_name, :namespace_name
def initialize(name, service_account_name, namespace_name)
@name = name
@service_account_name = service_account_name
@namespace_name = namespace_name
end
def generate
::Kubeclient::Resource.new(metadata: metadata, type: service_acount_token_type)
end
private
# as per https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/#to-create-additional-api-tokens
def service_acount_token_type
'kubernetes.io/service-account-token'
end
def metadata
{
name: name,
namespace: namespace_name,
annotations: {
"kubernetes.io/service-account.name": service_account_name
}
}
end
end
end
end
......@@ -50,7 +50,7 @@ module GoogleApi
service.get_zone_cluster(project_id, zone, cluster_id, options: user_agent_header)
end
def projects_zones_clusters_create(project_id, zone, cluster_name, cluster_size, machine_type:)
def projects_zones_clusters_create(project_id, zone, cluster_name, cluster_size, machine_type:, legacy_abac:)
service = Google::Apis::ContainerV1::ContainerService.new
service.authorization = access_token
......@@ -63,7 +63,7 @@ module GoogleApi
"machine_type": machine_type
},
"legacy_abac": {
"enabled": true
"enabled": legacy_abac
}
}
}
......
......@@ -89,7 +89,7 @@ module ObjectStorage
method: 'PUT',
bucket_name: bucket_name,
object_name: object_name,
query: { uploadId: upload_id, partNumber: part_number },
query: { 'uploadId' => upload_id, 'partNumber' => part_number },
headers: upload_options
}, expire_at)
end
......@@ -100,7 +100,7 @@ module ObjectStorage
method: 'POST',
bucket_name: bucket_name,
object_name: object_name,
query: { uploadId: upload_id },
query: { 'uploadId' => upload_id },
headers: { 'Content-Type' => 'application/xml' }
}, expire_at)
end
......@@ -111,7 +111,7 @@ module ObjectStorage
method: 'DELETE',
bucket_name: bucket_name,
object_name: object_name,
query: { uploadId: upload_id }
query: { 'uploadId' => upload_id }
}, expire_at)
end
......
......@@ -21,6 +21,7 @@ module QA
# Disable code_quality check in Auto DevOps pipeline as it takes
# too long and times out the test
Factory::Resource::SecretVariable.fabricate! do |resource|
resource.project = project
resource.key = 'CODE_QUALITY_DISABLED'
resource.value = '1'
end
......
......@@ -3,7 +3,7 @@ require 'spec_helper'
describe Import::GitlabProjectsController do
set(:namespace) { create(:namespace) }
set(:user) { namespace.owner }
let(:file) { fixture_file_upload('spec/fixtures/doc_sample.txt', 'text/plain') }
let(:file) { fixture_file_upload('spec/fixtures/project_export.tar.gz', 'text/plain') }
before do
sign_in(user)
......
......@@ -170,12 +170,14 @@ describe Projects::ClustersController do
end
describe 'POST create for new cluster' do
let(:legacy_abac_param) { 'true' }
let(:params) do
{
cluster: {
name: 'new-cluster',
provider_gcp_attributes: {
gcp_project_id: 'gcp-project-12345'
gcp_project_id: 'gcp-project-12345',
legacy_abac: legacy_abac_param
}
}
}
......@@ -201,6 +203,18 @@ describe Projects::ClustersController do
expect(response).to redirect_to(project_cluster_path(project, project.clusters.first))
expect(project.clusters.first).to be_gcp
expect(project.clusters.first).to be_kubernetes
expect(project.clusters.first.provider_gcp).to be_legacy_abac
end
context 'when legacy_abac param is false' do
let(:legacy_abac_param) { 'false' }
it 'creates a new cluster with legacy_abac_disabled' do
expect(ClusterProvisionWorker).to receive(:perform_async)
expect { go }.to change { Clusters::Cluster.count }
.and change { Clusters::Providers::Gcp.count }
expect(project.clusters.first.provider_gcp).not_to be_legacy_abac
end
end
end
......
......@@ -86,7 +86,7 @@ describe Projects::JobsController, :clean_gitlab_redis_shared_state do
def create_job(name, status)
pipeline = create(:ci_pipeline, project: project)
create(:ci_build, :tags, :triggered, :artifacts,
pipeline: pipeline, name: name, status: status)
pipeline: pipeline, name: name, status: status)
end
end
......@@ -206,6 +206,29 @@ describe Projects::JobsController, :clean_gitlab_redis_shared_state do
expect(json_response['status']['illustration']).to have_key('title')
end
end
context 'with no deployment' do
let(:job) { create(:ci_build, :success, pipeline: pipeline) }
it 'does not exposes the deployment information' do
expect(response).to have_gitlab_http_status(:ok)
expect(json_response['deployment_status']).to be_nil
end
end
context 'with deployment' do
let(:merge_request) { create(:merge_request, source_project: project) }
let(:environment) { create(:environment, project: project, name: 'staging', state: :available) }
let(:job) { create(:ci_build, :success, environment: environment.name, pipeline: pipeline) }
it 'exposes the deployment information' do
expect(response).to have_gitlab_http_status(:ok)
expect(json_response).to match_schema('job/job_details')
expect(json_response['deployment_status']["status"]).to eq 'creating'
expect(json_response['deployment_status']["icon"]).to eq 'passed'
expect(json_response['deployment_status']["environment"]).not_to be_nil
end
end
end
context 'when requesting JSON job is triggered' do
......
......@@ -193,14 +193,34 @@ describe Projects::PipelinesController do
context 'when accessing existing stage' do
before do
create(:ci_build, :retried, :failed, pipeline: pipeline, stage: 'build')
create(:ci_build, pipeline: pipeline, stage: 'build')
end
context 'without retried' do
before do
get_stage('build')
end
get_stage('build')
it 'returns pipeline jobs without the retried builds' do
expect(response).to have_gitlab_http_status(:ok)
expect(response).to match_response_schema('pipeline_stage')
expect(json_response['latest_statuses'].length).to eq 1
expect(json_response).not_to have_key('retried')
end
end
it 'returns html source for stage dropdown' do
expect(response).to have_gitlab_http_status(:ok)
expect(response).to match_response_schema('pipeline_stage')
context 'with retried' do
before do
get_stage('build', retried: true)
end
it 'returns pipelines jobs with the retried builds' do
expect(response).to have_gitlab_http_status(:ok)
expect(response).to match_response_schema('pipeline_stage')
expect(json_response['latest_statuses'].length).to eq 1
expect(json_response['retried'].length).to eq 1
end
end
end
......@@ -214,12 +234,13 @@ describe Projects::PipelinesController do
end
end
def get_stage(name)
get :stage, namespace_id: project.namespace,
project_id: project,
id: pipeline.id,
stage: name,
format: :json
def get_stage(name, params = {})
get :stage, **params.merge(
namespace_id: project.namespace,
project_id: project,
id: pipeline.id,
stage: name,
format: :json)
end
end
......
......@@ -33,6 +33,32 @@ describe 'Gcp Cluster', :js do
context 'when user filled form with valid parameters' do
subject { click_button 'Create Kubernetes cluster' }
shared_examples 'valid cluster gcp form' do
it 'users sees a form with the GCP token' do
expect(page).to have_selector(:css, 'form[data-token="token"]')
end
it 'user sees a cluster details page and creation status' do
subject
expect(page).to have_content('Kubernetes cluster is being created on Google Kubernetes Engine...')
Clusters::Cluster.last.provider.make_created!
expect(page).to have_content('Kubernetes cluster was successfully created on Google Kubernetes Engine')
end
it 'user sees a error if something wrong during creation' do
subject
expect(page).to have_content('Kubernetes cluster is being created on Google Kubernetes Engine...')
Clusters::Cluster.last.provider.make_errored!('Something wrong!')
expect(page).to have_content('Something wrong!')
end
end
before do
allow_any_instance_of(GoogleApi::CloudPlatform::Client)
.to receive(:projects_zones_clusters_create) do
......@@ -56,28 +82,16 @@ describe 'Gcp Cluster', :js do
fill_in 'cluster[provider_gcp_attributes][machine_type]', with: 'n1-standard-2'
end
it 'users sees a form with the GCP token' do
expect(page).to have_selector(:css, 'form[data-token="token"]')
end
it 'user sees a cluster details page and creation status' do
subject
expect(page).to have_content('Kubernetes cluster is being created on Google Kubernetes Engine...')
Clusters::Cluster.last.provider.make_created!
expect(page).to have_content('Kubernetes cluster was successfully created on Google Kubernetes Engine')
end
it 'user sees a error if something wrong during creation' do
subject
it_behaves_like 'valid cluster gcp form'
expect(page).to have_content('Kubernetes cluster is being created on Google Kubernetes Engine...')
context 'rbac_clusters feature flag is enabled' do
before do
stub_feature_flags(rbac_clusters: true)
Clusters::Cluster.last.provider.make_errored!('Something wrong!')
check 'cluster_provider_gcp_attributes_legacy_abac'
end
expect(page).to have_content('Something wrong!')
it_behaves_like 'valid cluster gcp form'
end
end
......
......@@ -21,42 +21,43 @@ describe 'User Cluster', :js do
end
context 'when user filled form with valid parameters' do
shared_examples 'valid cluster user form' do
it 'user sees a cluster details page' do
subject
expect(page).to have_content('Kubernetes cluster integration')
expect(page.find_field('cluster[name]').value).to eq('dev-cluster')
expect(page.find_field('cluster[platform_kubernetes_attributes][api_url]').value)
.to have_content('http://example.com')
expect(page.find_field('cluster[platform_kubernetes_attributes][token]').value)
.to have_content('my-token')
end
end
before do
fill_in 'cluster_name', with: 'dev-cluster'
fill_in 'cluster_platform_kubernetes_attributes_api_url', with: 'http://example.com'
fill_in 'cluster_platform_kubernetes_attributes_token', with: 'my-token'
click_button 'Add Kubernetes cluster'
end
it 'user sees a cluster details page' do
expect(page).to have_content('Kubernetes cluster integration')
expect(page.find_field('cluster[name]').value).to eq('dev-cluster')
expect(page.find_field('cluster[platform_kubernetes_attributes][api_url]').value)
.to have_content('http://example.com')
expect(page.find_field('cluster[platform_kubernetes_attributes][token]').value)
.to have_content('my-token')
end
end
subject { click_button 'Add Kubernetes cluster' }
context 'rbac_clusters feature flag is enabled' do
before do
stub_feature_flags(rbac_clusters: true)
it_behaves_like 'valid cluster user form'
fill_in 'cluster_name', with: 'dev-cluster'
fill_in 'cluster_platform_kubernetes_attributes_api_url', with: 'http://example.com'
fill_in 'cluster_platform_kubernetes_attributes_token', with: 'my-token'
check 'cluster_platform_kubernetes_attributes_authorization_type'
click_button 'Add Kubernetes cluster'
end
context 'rbac_clusters feature flag is enabled' do
before do
stub_feature_flags(rbac_clusters: true)
check 'cluster_platform_kubernetes_attributes_authorization_type'
end
it_behaves_like 'valid cluster user form'
it 'user sees a cluster details page' do
expect(page).to have_content('Kubernetes cluster integration')
expect(page.find_field('cluster[name]').value).to eq('dev-cluster')
expect(page.find_field('cluster[platform_kubernetes_attributes][api_url]').value)
.to have_content('http://example.com')
expect(page.find_field('cluster[platform_kubernetes_attributes][token]').value)
.to have_content('my-token')
expect(page.find_field('cluster[platform_kubernetes_attributes][authorization_type]', disabled: true)).to be_checked
it 'user sees a cluster details page with RBAC enabled' do
subject
expect(page.find_field('cluster[platform_kubernetes_attributes][authorization_type]', disabled: true)).to be_checked
end
end
end
......
......@@ -49,6 +49,8 @@ describe 'Import/Export - project export integration test', :js do
expect(file_permissions(project.export_path)).to eq(0700)
expect(project.export_file.path).to include('tar.gz')
in_directory_with_expanded_export(project) do |exit_status, tmpdir|
expect(exit_status).to eq(0)
......
{
"additionalProperties": false,
"properties": {
"created_at": {
"type": "string"
},
"id": {
"type": "integer"
},
"iid": {
"type": "integer"
},
"last?": {
"type": "boolean"
},
"ref": {
"additionalProperties": false,
"properties": {
"name": {
"type": "string"
}
},
"required": [
"name"
],
"type": "object"
},
"sha": {
"type": "string"
},
"tag": {
"type": "boolean"
}
"type": "object",
"required": [
"sha",
"created_at",
"iid",
"tag",
"last?",
"ref",
"id"
],
"properties": {
"created_at": { "type": "string" },
"id": { "type": "integer" },
"iid": { "type": "integer" },
"last?": { "type": "boolean" },
"ref": {
"type": "object",
"required": [
"name"
],
"properties": {
"name": { "type": "string" }
},
"additionalProperties": false
},
"required": [
"sha",
"created_at",
"iid",
"tag",
"last?",
"ref",
"id"
],
"type": "object"
"sha": { "type": "string" },
"tag": { "type": "boolean" }
},
"additionalProperties": false
}
{
"type": "object",
"allOf": [
{ "$ref": "../public_api/v4/commit/basic.json" },
{
"type": "object",
"required": [
"author_gravatar_url",
"commit_url",
"commit_path",
"author"
],
"properties": {
"author_gravatar_url": { "type": "string" },
"commit_url": { "type": "string" },
"commit_path": { "type": "string" },
"author": {
"oneOf": [
{ "type": "null" },
{ "type": "user.json" }
]
}
},
"additionalProperties": false
}
]
}
......@@ -5,13 +5,19 @@
"state",
"avatar_url",
"web_url",
"path"
"path",
"name",
"username"
],
"properties": {
"id": { "type": "integer" },
"state": { "type": "string" },
"avatar_url": { "type": "string" },
"web_url": { "type": "string" },
"path": { "type": "string" }
}
"path": { "type": "string" },
"name": { "type": "string" },
"username": { "type": "string" },
"status_tooltip_html": { "$ref": "../types/nullable_string.json" }
},
"additionalProperties": false
}
{
"type": "object",
"required": [
"id",
"name",
"state",
"external_url",
"environment_type",
"has_stop_action",
"environment_path",
"stop_path",
"folder_path",
"created_at",
"updated_at",
"can_stop"
],
"properties": {
"id": { "type": "integer" },
"name": { "type": "string" },
"state": { "type": "string" },
"external_url": { "$ref": "types/nullable_string.json" },
"environment_type": { "$ref": "types/nullable_string.json" },
"has_stop_action": { "type": "boolean" },
"environment_path": { "type": "string" },
"stop_path": { "type": "string" },
"folder_path": { "type": "string" },
"created_at": { "type": "string", "format": "date-time" },
"updated_at": { "type": "string", "format": "date-time" },
"can_stop": { "type": "boolean" },
"last_deployment": {
"oneOf": [
{ "type": "null" },
{ "$ref": "deployment.json" }
]
}
},
"additionalProperties": false
}
{
"type": "object",
"required": [
"status",
"icon",
"environment"
],
"properties": {
"status": {
"oneOf": [
{
"type": "string",
"enum": [
"last",
"creating",
"failed",
"out_of_date"
]
},
{ "type": "null" }
]
},
"icon": { "type": "string" },
"environment": { "$ref": "../environment.json" }
},
"additionalProperties": false
}
......@@ -25,7 +25,9 @@
"playable": { "type": "boolean" },
"created_at": { "type": "string" },
"updated_at": { "type": "string" },
"status": { "$ref": "../status/ci_detailed_status.json" }
"status": { "$ref": "../status/ci_detailed_status.json" },
"callout_message": { "type": "string" },
"recoverable": { "type": "boolean" }
},
"additionalProperties": true
}
......@@ -6,6 +6,7 @@
"properties": {
"artifact": { "$ref": "artifact.json" },
"terminal_path": { "type": "string" },
"trigger": { "$ref": "trigger.json" }
"trigger": { "$ref": "trigger.json" },
"deployment_status": { "$ref": "deployment_status.json" }
}
}
......@@ -16,6 +16,11 @@
"items": { "$ref": "job/job.json" },
"optional": true
},
"retried": {
"type": "array",
"items": { "$ref": "job/job.json" },
"optional": true
},
"status": { "$ref": "status/ci_detailed_status.json" },
"path": { "type": "string" },
"dropdown_path": { "type": "string" }
......
{
"oneOf": [
{ "type": "null" },
{ "type": "string" }
]
}
......@@ -339,11 +339,25 @@ describe MarkupHelper do
expect(first_line_in_markdown(object, attribute, 150, project: project)).to eq(expected)
end
it 'preserves data-src for lazy images' do
object = create_object("![ImageTest](/uploads/test.png)")
image_url = "data-src=\".*/uploads/test.png\""
context 'when images are allowed' do
it 'preserves data-src for lazy images' do
object = create_object("![ImageTest](/uploads/test.png)")
image_url = "data-src=\".*/uploads/test.png\""
text = first_line_in_markdown(object, attribute, 150, project: project, allow_images: true)
expect(text).to match(image_url)
expect(text).to match('<a')
end
end
expect(first_line_in_markdown(object, attribute, 150, project: project)).to match(image_url)
context 'when images are not allowed' do
it 'removes any images' do
object = create_object("![ImageTest](/uploads/test.png)")
text = first_line_in_markdown(object, attribute, 150, project: project)
expect(text).not_to match('<img')
expect(text).not_to match('<a')
end
end
context 'labels formatting' do
......
......@@ -3,6 +3,7 @@ import _ from 'underscore';
import { headersInterceptor } from 'spec/helpers/vue_resource_helper';
import * as actions from '~/notes/stores/actions';
import createStore from '~/notes/stores';
import mrWidgetEventHub from '~/vue_merge_request_widget/event_hub';
import testAction from '../../helpers/vuex_action_helper';
import { resetStore } from '../helpers';
import {
......@@ -317,4 +318,195 @@ describe('Actions Notes Store', () => {
);
});
});
describe('deleteNote', () => {
const interceptor = (request, next) => {
next(
request.respondWith(JSON.stringify({}), {
status: 200,
}),
);
};
beforeEach(() => {
Vue.http.interceptors.push(interceptor);
});
afterEach(() => {
Vue.http.interceptors = _.without(Vue.http.interceptors, interceptor);
});
it('commits DELETE_NOTE and dispatches updateMergeRequestWidget', done => {
const note = { path: `${gl.TEST_HOST}`, id: 1 };
testAction(
actions.deleteNote,
note,
store.state,
[
{
type: 'DELETE_NOTE',
payload: note,
},
],
[
{
type: 'updateMergeRequestWidget',
},
],
done,
);
});
});
describe('createNewNote', () => {
describe('success', () => {
const res = {
id: 1,
valid: true,
};
const interceptor = (request, next) => {
next(
request.respondWith(JSON.stringify(res), {
status: 200,
}),
);
};
beforeEach(() => {
Vue.http.interceptors.push(interceptor);
});
afterEach(() => {
Vue.http.interceptors = _.without(Vue.http.interceptors, interceptor);
});
it('commits ADD_NEW_NOTE and dispatches updateMergeRequestWidget', done => {
testAction(
actions.createNewNote,
{ endpoint: `${gl.TEST_HOST}`, data: {} },
store.state,
[
{
type: 'ADD_NEW_NOTE',
payload: res,
},
],
[
{
type: 'updateMergeRequestWidget',
},
],
done,
);
});
});
describe('error', () => {
const res = {
errors: ['error'],
};
const interceptor = (request, next) => {
next(
request.respondWith(JSON.stringify(res), {
status: 200,
}),
);
};
beforeEach(() => {
Vue.http.interceptors.push(interceptor);
});
afterEach(() => {
Vue.http.interceptors = _.without(Vue.http.interceptors, interceptor);
});
it('does not commit ADD_NEW_NOTE or dispatch updateMergeRequestWidget', done => {
testAction(
actions.createNewNote,
{ endpoint: `${gl.TEST_HOST}`, data: {} },
store.state,
[],
[],
done,
);
});
});
});
describe('toggleResolveNote', () => {
const res = {
resolved: true,
};
const interceptor = (request, next) => {
next(
request.respondWith(JSON.stringify(res), {
status: 200,
}),
);
};
beforeEach(() => {
Vue.http.interceptors.push(interceptor);
});
afterEach(() => {
Vue.http.interceptors = _.without(Vue.http.interceptors, interceptor);
});
describe('as note', () => {
it('commits UPDATE_NOTE and dispatches updateMergeRequestWidget', done => {
testAction(
actions.toggleResolveNote,
{ endpoint: `${gl.TEST_HOST}`, isResolved: true, discussion: false },
store.state,
[
{
type: 'UPDATE_NOTE',
payload: res,
},
],
[
{
type: 'updateMergeRequestWidget',
},
],
done,
);
});
});
describe('as discussion', () => {
it('commits UPDATE_DISCUSSION and dispatches updateMergeRequestWidget', done => {
testAction(
actions.toggleResolveNote,
{ endpoint: `${gl.TEST_HOST}`, isResolved: true, discussion: true },
store.state,
[
{
type: 'UPDATE_DISCUSSION',
payload: res,
},
],
[
{
type: 'updateMergeRequestWidget',
},
],
done,
);
});
});
});
describe('updateMergeRequestWidget', () => {
it('calls mrWidget checkStatus', () => {
spyOn(mrWidgetEventHub, '$emit');
actions.updateMergeRequestWidget();
expect(mrWidgetEventHub.$emit).toHaveBeenCalledWith('mr.discussion.updated');
});
});
});
/* eslint-disable
jasmine/no-global-setup, jasmine/no-unsafe-spy, no-underscore-dangle, no-console
*/
/* global __karma__ */
import $ from 'jquery';
import 'vendor/jasmine-jquery';
......@@ -42,8 +41,8 @@ jasmine.getJSONFixtures().fixturesPath = FIXTURES_PATH;
beforeAll(() => {
jasmine.addMatchers(
jasmineDiff(jasmine, {
colors: !__karma__.config.isCi,
inline: !__karma__.config.isCi,
colors: window.__karma__.config.color,
inline: window.__karma__.config.color,
}),
);
jasmine.addMatchers(customMatchers);
......
......@@ -116,12 +116,14 @@ describe Gitlab::Kubernetes::KubeClient do
:get_config_map,
:get_pod,
:get_namespace,
:get_secret,
:get_service,
:get_service_account,
:delete_pod,
:create_config_map,
:create_namespace,
:create_pod,
:create_secret,
:create_service_account,
:update_config_map,
:update_service_account
......
# frozen_string_literal: true
require 'spec_helper'
describe Gitlab::Kubernetes::ServiceAccountToken do
let(:name) { 'token-name' }
let(:service_account_name) { 'a_service_account' }
let(:namespace_name) { 'a_namespace' }
let(:service_account_token) { described_class.new(name, service_account_name, namespace_name) }
it { expect(service_account_token.name).to eq(name) }
it { expect(service_account_token.service_account_name).to eq(service_account_name) }
it { expect(service_account_token.namespace_name).to eq(namespace_name) }
describe '#generate' do
let(:resource) do
::Kubeclient::Resource.new(
metadata: {
name: name,
namespace: namespace_name,
annotations: {
'kubernetes.io/service-account.name': service_account_name
}
},
type: 'kubernetes.io/service-account-token'
)
end
subject { service_account_token.generate }
it 'should build a Kubeclient Resource' do
is_expected.to eq(resource)
end
end
end
......@@ -66,25 +66,30 @@ describe GoogleApi::CloudPlatform::Client do
describe '#projects_zones_clusters_create' do
subject do
client.projects_zones_clusters_create(
spy, spy, cluster_name, cluster_size, machine_type: machine_type)
project_id, zone, cluster_name, cluster_size, machine_type: machine_type, legacy_abac: legacy_abac)
end
let(:project_id) { 'project-123' }
let(:zone) { 'us-central1-a' }
let(:cluster_name) { 'test-cluster' }
let(:cluster_size) { 1 }
let(:machine_type) { 'n1-standard-2' }
let(:legacy_abac) { true }
let(:create_cluster_request_body) { double('Google::Apis::ContainerV1::CreateClusterRequest') }
let(:operation) { double }
before do
allow_any_instance_of(Google::Apis::ContainerV1::ContainerService)
.to receive(:create_cluster).with(any_args, options: user_agent_options)
.to receive(:create_cluster).with(any_args)
.and_return(operation)
end
it { is_expected.to eq(operation) }
it 'sets corresponded parameters' do
expect_any_instance_of(Google::Apis::ContainerV1::CreateClusterRequest)
.to receive(:initialize).with(
expect_any_instance_of(Google::Apis::ContainerV1::ContainerService)
.to receive(:create_cluster).with(project_id, zone, create_cluster_request_body, options: user_agent_options)
expect(Google::Apis::ContainerV1::CreateClusterRequest)
.to receive(:new).with(
{
"cluster": {
"name": cluster_name,
......@@ -96,9 +101,35 @@ describe GoogleApi::CloudPlatform::Client do
"enabled": true
}
}
} )
} ).and_return(create_cluster_request_body)
expect(subject).to eq operation
end
context 'create without legacy_abac' do
let(:legacy_abac) { false }
it 'sets corresponded parameters' do
expect_any_instance_of(Google::Apis::ContainerV1::ContainerService)
.to receive(:create_cluster).with(project_id, zone, create_cluster_request_body, options: user_agent_options)
expect(Google::Apis::ContainerV1::CreateClusterRequest)
.to receive(:new).with(
{
"cluster": {
"name": cluster_name,
"initial_node_count": cluster_size,
"node_config": {
"machine_type": machine_type
},
"legacy_abac": {
"enabled": false
}
}
} ).and_return(create_cluster_request_body)
subject
expect(subject).to eq operation
end
end
end
......
......@@ -83,6 +83,16 @@ describe ObjectStorage::DirectUpload do
expect(subject[:MultipartUpload][:AbortURL]).to start_with(storage_url)
expect(subject[:MultipartUpload][:AbortURL]).to include('uploadId=myUpload')
end
it 'uses only strings in query parameters' do
expect(direct_upload.send(:connection)).to receive(:signed_url).at_least(:once) do |params|
if params[:query]
expect(params[:query].keys.all? { |key| key.is_a?(String) }).to be_truthy
end
end
subject
end
end
shared_examples 'a valid upload without multipart data' do
......
......@@ -2983,4 +2983,46 @@ describe Ci::Build do
end
end
end
describe '#deployment_status' do
context 'when build is a last deployment' do
let(:build) { create(:ci_build, :success, environment: 'production') }
let(:environment) { create(:environment, name: 'production', project: build.project) }
let!(:deployment) { create(:deployment, environment: environment, project: environment.project, deployable: build) }
it { expect(build.deployment_status).to eq(:last) }
end
context 'when there is a newer build with deployment' do
let(:build) { create(:ci_build, :success, environment: 'production') }
let(:environment) { create(:environment, name: 'production', project: build.project) }
let!(:deployment) { create(:deployment, environment: environment, project: environment.project, deployable: build) }
let!(:last_deployment) { create(:deployment, environment: environment, project: environment.project) }
it { expect(build.deployment_status).to eq(:out_of_date) }
end
context 'when build with deployment has failed' do
let(:build) { create(:ci_build, :failed, environment: 'production') }
let(:environment) { create(:environment, name: 'production', project: build.project) }
let!(:deployment) { create(:deployment, environment: environment, project: environment.project, deployable: build) }
it { expect(build.deployment_status).to eq(:failed) }
end
context 'when build with deployment is running' do
let(:build) { create(:ci_build, environment: 'production') }
let(:environment) { create(:environment, name: 'production', project: build.project) }
let!(:deployment) { create(:deployment, environment: environment, project: environment.project, deployable: build) }
it { expect(build.deployment_status).to eq(:creating) }
end
context 'when build is successful but deployment is not ready yet' do
let(:build) { create(:ci_build, :success, environment: 'production') }
let(:environment) { create(:environment, name: 'production', project: build.project) }
it { expect(build.deployment_status).to eq(:creating) }
end
end
end
......@@ -74,6 +74,24 @@ describe Clusters::Providers::Gcp do
end
end
describe '#legacy_abac?' do
let(:gcp) { build(:cluster_provider_gcp) }
subject { gcp }
it 'should default to true' do
is_expected.to be_legacy_abac
end
context 'legacy_abac is set to false' do
let(:gcp) { build(:cluster_provider_gcp, legacy_abac: false) }
it 'is false' do
is_expected.not_to be_legacy_abac
end
end
end
describe '#state_machine' do
context 'when any => [:created]' do
let(:gcp) { build(:cluster_provider_gcp, :creating) }
......
require 'spec_helper'
describe Ci::FetchKubernetesTokenService do
describe '#execute' do
subject { described_class.new(api_url, ca_pem, username, password).execute }
let(:api_url) { 'http://111.111.111.111' }
let(:ca_pem) { '' }
let(:username) { 'admin' }
let(:password) { 'xxx' }
context 'when params correct' do
let(:token) { 'xxx.token.xxx' }
let(:secrets_json) do
[
{
'metadata': {
name: metadata_name
},
'data': {
'token': Base64.encode64(token)
}
}
]
end
before do
allow_any_instance_of(Kubeclient::Client)
.to receive(:get_secrets).and_return(secrets_json)
end
context 'when default-token exists' do
let(:metadata_name) { 'default-token-123' }
it { is_expected.to eq(token) }
end
context 'when default-token does not exist' do
let(:metadata_name) { 'another-token-123' }
it { is_expected.to be_nil }
end
end
context 'when api_url is nil' do
let(:api_url) { nil }
it { expect { subject }.to raise_error("Incomplete settings") }
end
context 'when username is nil' do
let(:username) { nil }
it { expect { subject }.to raise_error("Incomplete settings") }
end
context 'when password is nil' do
let(:password) { nil }
it { expect { subject }.to raise_error("Incomplete settings") }
end
end
end
......@@ -12,9 +12,11 @@ describe Clusters::Gcp::FinalizeCreationService do
let(:zone) { provider.zone }
let(:cluster_name) { cluster.name }
subject { described_class.new.execute(provider) }
shared_examples 'success' do
it 'configures provider and kubernetes' do
described_class.new.execute(provider)
subject
expect(provider).to be_created
end
......@@ -22,7 +24,7 @@ describe Clusters::Gcp::FinalizeCreationService do
shared_examples 'error' do
it 'sets an error to provider object' do
described_class.new.execute(provider)
subject
expect(provider.reload).to be_errored
end
......@@ -33,6 +35,7 @@ describe Clusters::Gcp::FinalizeCreationService do
let(:api_url) { 'https://' + endpoint }
let(:username) { 'sample-username' }
let(:password) { 'sample-password' }
let(:secret_name) { 'gitlab-token' }
before do
stub_cloud_platform_get_zone_cluster(
......@@ -43,60 +46,102 @@ describe Clusters::Gcp::FinalizeCreationService do
password: password
}
)
stub_kubeclient_discover(api_url)
end
context 'when suceeded to fetch kuberenetes token' do
let(:token) { 'sample-token' }
context 'service account and token created' do
before do
stub_kubeclient_get_secrets(
api_url,
{
token: Base64.encode64(token)
} )
stub_kubeclient_discover(api_url)
stub_kubeclient_create_service_account(api_url)
stub_kubeclient_create_secret(api_url)
end
it_behaves_like 'success'
shared_context 'kubernetes token successfully fetched' do
let(:token) { 'sample-token' }
before do
stub_kubeclient_get_secret(
api_url,
{
metadata_name: secret_name,
token: Base64.encode64(token)
} )
end
end
context 'provider legacy_abac is enabled' do
include_context 'kubernetes token successfully fetched'
it_behaves_like 'success'
it 'has corresponded data' do
described_class.new.execute(provider)
cluster.reload
provider.reload
platform.reload
it 'properly configures database models' do
subject
expect(provider.endpoint).to eq(endpoint)
expect(platform.api_url).to eq(api_url)
expect(platform.ca_cert).to eq(Base64.decode64(load_sample_cert))
expect(platform.username).to eq(username)
expect(platform.password).to eq(password)
expect(platform.token).to eq(token)
cluster.reload
expect(provider.endpoint).to eq(endpoint)
expect(platform.api_url).to eq(api_url)
expect(platform.ca_cert).to eq(Base64.decode64(load_sample_cert))
expect(platform.username).to eq(username)
expect(platform.password).to eq(password)
expect(platform).to be_abac
expect(platform.authorization_type).to eq('abac')
expect(platform.token).to eq(token)
end
end
end
context 'when default-token is not found' do
before do
stub_kubeclient_get_secrets(api_url, metadata_name: 'aaaa')
context 'provider legacy_abac is disabled' do
before do
provider.legacy_abac = false
end
include_context 'kubernetes token successfully fetched'
context 'cluster role binding created' do
before do
stub_kubeclient_create_cluster_role_binding(api_url)
end
it_behaves_like 'success'
it 'properly configures database models' do
subject
cluster.reload
expect(provider.endpoint).to eq(endpoint)
expect(platform.api_url).to eq(api_url)
expect(platform.ca_cert).to eq(Base64.decode64(load_sample_cert))
expect(platform.username).to eq(username)
expect(platform.password).to eq(password)
expect(platform).to be_rbac
expect(platform.token).to eq(token)
end
end
end
it_behaves_like 'error'
end
context 'when token is empty' do
before do
stub_kubeclient_get_secret(api_url, token: '', metadata_name: secret_name)
end
context 'when token is empty' do
before do
stub_kubeclient_get_secrets(api_url, token: '')
it_behaves_like 'error'
end
it_behaves_like 'error'
end
context 'when failed to fetch kubernetes token' do
before do
stub_kubeclient_get_secret_error(api_url, secret_name)
end
context 'when failed to fetch kuberenetes token' do
before do
stub_kubeclient_get_secrets_error(api_url)
it_behaves_like 'error'
end
it_behaves_like 'error'
context 'when service account fails to create' do
before do
stub_kubeclient_create_service_account_error(api_url)
end
it_behaves_like 'error'
end
end
end
......
# frozen_string_literal: true
require 'spec_helper'
describe Clusters::Gcp::Kubernetes::CreateServiceAccountService do
include KubernetesHelpers
let(:service) { described_class.new(kubeclient, rbac: rbac) }
describe '#execute' do
let(:rbac) { false }
let(:api_url) { 'http://111.111.111.111' }
let(:username) { 'admin' }
let(:password) { 'xxx' }
let(:kubeclient) do
Gitlab::Kubernetes::KubeClient.new(
api_url,
['api', 'apis/rbac.authorization.k8s.io'],
auth_options: { username: username, password: password }
)
end
subject { service.execute }
context 'when params are correct' do
before do
stub_kubeclient_discover(api_url)
stub_kubeclient_create_service_account(api_url)
stub_kubeclient_create_secret(api_url)
end
shared_examples 'creates service account and token' do
it 'creates a kubernetes service account' do
subject
expect(WebMock).to have_requested(:post, api_url + '/api/v1/namespaces/default/serviceaccounts').with(
body: hash_including(
kind: 'ServiceAccount',
metadata: { name: 'gitlab', namespace: 'default' }
)
)
end
it 'creates a kubernetes secret of type ServiceAccountToken' do
subject
expect(WebMock).to have_requested(:post, api_url + '/api/v1/namespaces/default/secrets').with(
body: hash_including(
kind: 'Secret',
metadata: {
name: 'gitlab-token',
namespace: 'default',
annotations: {
'kubernetes.io/service-account.name': 'gitlab'
}
},
type: 'kubernetes.io/service-account-token'
)
)
end
end
context 'abac enabled cluster' do
it_behaves_like 'creates service account and token'
end
context 'rbac enabled cluster' do
let(:rbac) { true }
before do
stub_kubeclient_create_cluster_role_binding(api_url)
end
it_behaves_like 'creates service account and token'
it 'creates a kubernetes cluster role binding' do
subject
expect(WebMock).to have_requested(:post, api_url + '/apis/rbac.authorization.k8s.io/v1/clusterrolebindings').with(
body: hash_including(
kind: 'ClusterRoleBinding',
metadata: { name: 'gitlab-admin' },
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
kind: 'ClusterRole',
name: 'cluster-admin'
},
subjects: [{ kind: 'ServiceAccount', namespace: 'default', name: 'gitlab' }]
)
)
end
end
end
end
end
# frozen_string_literal: true
require 'fast_spec_helper'
describe Clusters::Gcp::Kubernetes::FetchKubernetesTokenService do
describe '#execute' do
let(:api_url) { 'http://111.111.111.111' }
let(:username) { 'admin' }
let(:password) { 'xxx' }
let(:kubeclient) do
Gitlab::Kubernetes::KubeClient.new(
api_url,
['api', 'apis/rbac.authorization.k8s.io'],
auth_options: { username: username, password: password }
)
end
subject { described_class.new(kubeclient).execute }
context 'when params correct' do
let(:decoded_token) { 'xxx.token.xxx' }
let(:token) { Base64.encode64(decoded_token) }
let(:secret_json) do
{
'metadata': {
name: 'gitlab-token'
},
'data': {
'token': token
}
}
end
before do
allow_any_instance_of(Kubeclient::Client)
.to receive(:get_secret).and_return(secret_json)
end
context 'when gitlab-token exists' do
let(:metadata_name) { 'gitlab-token' }
it { is_expected.to eq(decoded_token) }
end
context 'when gitlab-token does not exist' do
let(:secret_json) { {} }
it { is_expected.to be_nil }
end
context 'when token is nil' do
let(:token) { nil }
it { is_expected.to be_nil }
end
end
end
end
......@@ -44,31 +44,49 @@ module KubernetesHelpers
WebMock.stub_request(:get, deployments_url).to_return(response || kube_deployments_response)
end
def stub_kubeclient_get_secrets(api_url, **options)
WebMock.stub_request(:get, api_url + '/api/v1/secrets')
.to_return(kube_response(kube_v1_secrets_body(options)))
def stub_kubeclient_get_secret(api_url, namespace: 'default', **options)
options[:metadata_name] ||= "default-token-1"
WebMock.stub_request(:get, api_url + "/api/v1/namespaces/#{namespace}/secrets/#{options[:metadata_name]}")
.to_return(kube_response(kube_v1_secret_body(options)))
end
def stub_kubeclient_get_secrets_error(api_url)
WebMock.stub_request(:get, api_url + '/api/v1/secrets')
def stub_kubeclient_get_secret_error(api_url, name, namespace: 'default')
WebMock.stub_request(:get, api_url + "/api/v1/namespaces/#{namespace}/secrets/#{name}")
.to_return(status: [404, "Internal Server Error"])
end
def kube_v1_secrets_body(**options)
def stub_kubeclient_create_service_account(api_url, namespace: 'default')
WebMock.stub_request(:post, api_url + "/api/v1/namespaces/#{namespace}/serviceaccounts")
.to_return(kube_response({}))
end
def stub_kubeclient_create_service_account_error(api_url, namespace: 'default')
WebMock.stub_request(:post, api_url + "/api/v1/namespaces/#{namespace}/serviceaccounts")
.to_return(status: [500, "Internal Server Error"])
end
def stub_kubeclient_create_secret(api_url, namespace: 'default')
WebMock.stub_request(:post, api_url + "/api/v1/namespaces/#{namespace}/secrets")
.to_return(kube_response({}))
end
def stub_kubeclient_create_cluster_role_binding(api_url)
WebMock.stub_request(:post, api_url + '/apis/rbac.authorization.k8s.io/v1/clusterrolebindings')
.to_return(kube_response({}))
end
def kube_v1_secret_body(**options)
{
"kind" => "SecretList",
"apiVersion": "v1",
"items" => [
{
"metadata": {
"name": options[:metadata_name] || "default-token-1",
"namespace": "kube-system"
},
"data": {
"token": options[:token] || Base64.encode64('token-sample-123')
}
}
]
"metadata": {
"name": options[:metadata_name] || "default-token-1",
"namespace": "kube-system"
},
"data": {
"token": options[:token] || Base64.encode64('token-sample-123')
}
}
end
......@@ -79,6 +97,7 @@ module KubernetesHelpers
{ "name" => "pods", "namespaced" => true, "kind" => "Pod" },
{ "name" => "deployments", "namespaced" => true, "kind" => "Deployment" },
{ "name" => "secrets", "namespaced" => true, "kind" => "Secret" },
{ "name" => "serviceaccounts", "namespaced" => true, "kind" => "ServiceAccount" },
{ "name" => "services", "namespaced" => true, "kind" => "Service" }
]
}
......@@ -91,6 +110,7 @@ module KubernetesHelpers
{ "name" => "pods", "namespaced" => true, "kind" => "Pod" },
{ "name" => "deployments", "namespaced" => true, "kind" => "Deployment" },
{ "name" => "secrets", "namespaced" => true, "kind" => "Secret" },
{ "name" => "serviceaccounts", "namespaced" => true, "kind" => "ServiceAccount" },
{ "name" => "services", "namespaced" => true, "kind" => "Service" }
]
}
......
......@@ -3,6 +3,7 @@ require 'active_support/hash_with_indifferent_access'
require 'active_support/dependencies'
require_dependency 'gitlab'
require_dependency Gitlab.root.join('ee/spec/support/helpers/ee/stub_configuration')
module StubConfiguration
......
......@@ -7,7 +7,8 @@ shared_context 'valid cluster create params' do
gcp_project_id: 'gcp-project',
zone: 'us-central1-a',
num_nodes: 1,
machine_type: 'machine_type-a'
machine_type: 'machine_type-a',
legacy_abac: 'true'
}
}
end
......@@ -29,6 +30,10 @@ shared_context 'invalid cluster create params' do
end
shared_examples 'create cluster service success' do
before do
stub_feature_flags(rbac_clusters: false)
end
it 'creates a cluster object and performs a worker' do
expect(ClusterProvisionWorker).to receive(:perform_async)
......@@ -44,6 +49,7 @@ shared_examples 'create cluster service success' do
expect(subject.provider.num_nodes).to eq(1)
expect(subject.provider.machine_type).to eq('machine_type-a')
expect(subject.provider.access_token).to eq(access_token)
expect(subject.provider).to be_legacy_abac
expect(subject.platform).to be_nil
end
end
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment