Commit f9b48766 authored by Kushal Pandya's avatar Kushal Pandya

Merge branch 'add-option-to-remove-legacy-tiller-server' into 'master'

Add option to remove legacy Tiller server

See merge request gitlab-org/gitlab!47457
parents 9e5c284c f74bc6b5
...@@ -52,6 +52,7 @@ export default class Clusters { ...@@ -52,6 +52,7 @@ export default class Clusters {
clusterStatus, clusterStatus,
clusterStatusReason, clusterStatusReason,
helpPath, helpPath,
helmHelpPath,
ingressHelpPath, ingressHelpPath,
ingressDnsHelpPath, ingressDnsHelpPath,
ingressModSecurityHelpPath, ingressModSecurityHelpPath,
...@@ -68,8 +69,9 @@ export default class Clusters { ...@@ -68,8 +69,9 @@ export default class Clusters {
this.clusterBannerDismissedKey = `cluster_${this.clusterId}_banner_dismissed`; this.clusterBannerDismissedKey = `cluster_${this.clusterId}_banner_dismissed`;
this.store = new ClustersStore(); this.store = new ClustersStore();
this.store.setHelpPaths( this.store.setHelpPaths({
helpPath, helpPath,
helmHelpPath,
ingressHelpPath, ingressHelpPath,
ingressDnsHelpPath, ingressDnsHelpPath,
ingressModSecurityHelpPath, ingressModSecurityHelpPath,
...@@ -78,7 +80,7 @@ export default class Clusters { ...@@ -78,7 +80,7 @@ export default class Clusters {
deployBoardsHelpPath, deployBoardsHelpPath,
cloudRunHelpPath, cloudRunHelpPath,
ciliumHelpPath, ciliumHelpPath,
); });
this.store.setManagePrometheusPath(managePrometheusPath); this.store.setManagePrometheusPath(managePrometheusPath);
this.store.updateStatus(clusterStatus); this.store.updateStatus(clusterStatus);
this.store.updateStatusReason(clusterStatusReason); this.store.updateStatusReason(clusterStatusReason);
...@@ -162,6 +164,7 @@ export default class Clusters { ...@@ -162,6 +164,7 @@ export default class Clusters {
type, type,
applications: this.state.applications, applications: this.state.applications,
helpPath: this.state.helpPath, helpPath: this.state.helpPath,
helmHelpPath: this.state.helmHelpPath,
ingressHelpPath: this.state.ingressHelpPath, ingressHelpPath: this.state.ingressHelpPath,
managePrometheusPath: this.state.managePrometheusPath, managePrometheusPath: this.state.managePrometheusPath,
ingressDnsHelpPath: this.state.ingressDnsHelpPath, ingressDnsHelpPath: this.state.ingressDnsHelpPath,
......
<script> <script>
import { GlLoadingIcon, GlSprintf, GlLink } from '@gitlab/ui'; import { GlLoadingIcon, GlSprintf, GlLink } from '@gitlab/ui';
import gitlabLogo from 'images/cluster_app_logos/gitlab.png'; import gitlabLogo from 'images/cluster_app_logos/gitlab.png';
import helmLogo from 'images/cluster_app_logos/helm.png';
import jupyterhubLogo from 'images/cluster_app_logos/jupyterhub.png'; import jupyterhubLogo from 'images/cluster_app_logos/jupyterhub.png';
import kubernetesLogo from 'images/cluster_app_logos/kubernetes.png'; import kubernetesLogo from 'images/cluster_app_logos/kubernetes.png';
import certManagerLogo from 'images/cluster_app_logos/cert_manager.png'; import certManagerLogo from 'images/cluster_app_logos/cert_manager.png';
...@@ -46,6 +47,11 @@ export default { ...@@ -46,6 +47,11 @@ export default {
required: false, required: false,
default: '', default: '',
}, },
helmHelpPath: {
type: String,
required: false,
default: '',
},
ingressHelpPath: { ingressHelpPath: {
type: String, type: String,
required: false, required: false,
...@@ -150,6 +156,7 @@ export default { ...@@ -150,6 +156,7 @@ export default {
}, },
logos: { logos: {
gitlabLogo, gitlabLogo,
helmLogo,
jupyterhubLogo, jupyterhubLogo,
kubernetesLogo, kubernetesLogo,
certManagerLogo, certManagerLogo,
...@@ -172,6 +179,35 @@ export default { ...@@ -172,6 +179,35 @@ export default {
</p> </p>
<div class="cluster-application-list gl-mt-3"> <div class="cluster-application-list gl-mt-3">
<application-row
v-if="applications.helm.installed || applications.helm.uninstalling"
id="helm"
:logo-url="$options.logos.helmLogo"
:title="applications.helm.title"
:status="applications.helm.status"
:status-reason="applications.helm.statusReason"
:request-status="applications.helm.requestStatus"
:request-reason="applications.helm.requestReason"
:installed="applications.helm.installed"
:install-failed="applications.helm.installFailed"
:uninstallable="applications.helm.uninstallable"
:uninstall-successful="applications.helm.uninstallSuccessful"
:uninstall-failed="applications.helm.uninstallFailed"
title-link="https://v2.helm.sh/"
>
<template #description>
<p>
{{
s__(`ClusterIntegration|Can be safely removed. Prior to GitLab
13.2, GitLab used a remote Tiller server to manage the
applications. GitLab no longer uses this server.
Uninstalling this server will not affect your other
applications. This row will disappear afterwards.`)
}}
<gl-link :href="helmHelpPath">{{ __('More information') }}</gl-link>
</p>
</template>
</application-row>
<application-row <application-row
:id="ingressId" :id="ingressId"
:logo-url="$options.logos.kubernetesLogo" :logo-url="$options.logos.kubernetesLogo"
......
...@@ -16,7 +16,7 @@ import { ...@@ -16,7 +16,7 @@ import {
const CUSTOM_APP_WARNING_TEXT = { const CUSTOM_APP_WARNING_TEXT = {
[HELM]: sprintf( [HELM]: sprintf(
s__( s__(
'ClusterIntegration|The associated Tiller pod, the %{gitlabManagedAppsNamespace} namespace, and all of its resources will be deleted and cannot be restored.', 'ClusterIntegration|The associated Tiller pod will be deleted and cannot be restored. Your other applications will remain unaffected.',
), ),
{ {
gitlabManagedAppsNamespace: '<code>gitlab-managed-apps</code>', gitlabManagedAppsNamespace: '<code>gitlab-managed-apps</code>',
......
...@@ -193,6 +193,12 @@ const applicationStateMachine = { ...@@ -193,6 +193,12 @@ const applicationStateMachine = {
uninstallSuccessful: true, uninstallSuccessful: true,
}, },
}, },
[NOT_INSTALLABLE]: {
target: NOT_INSTALLABLE,
effects: {
uninstallSuccessful: true,
},
},
[UNINSTALL_ERRORED]: { [UNINSTALL_ERRORED]: {
target: INSTALLED, target: INSTALLED,
effects: { effects: {
......
...@@ -36,6 +36,7 @@ export default class ClusterStore { ...@@ -36,6 +36,7 @@ export default class ClusterStore {
constructor() { constructor() {
this.state = { this.state = {
helpPath: null, helpPath: null,
helmHelpPath: null,
ingressHelpPath: null, ingressHelpPath: null,
environmentsHelpPath: null, environmentsHelpPath: null,
clustersHelpPath: null, clustersHelpPath: null,
...@@ -49,7 +50,7 @@ export default class ClusterStore { ...@@ -49,7 +50,7 @@ export default class ClusterStore {
applications: { applications: {
helm: { helm: {
...applicationInitialState, ...applicationInitialState,
title: s__('ClusterIntegration|Helm Tiller'), title: s__('ClusterIntegration|Legacy Helm Tiller server'),
}, },
ingress: { ingress: {
...applicationInitialState, ...applicationInitialState,
...@@ -126,26 +127,10 @@ export default class ClusterStore { ...@@ -126,26 +127,10 @@ export default class ClusterStore {
}; };
} }
setHelpPaths( setHelpPaths(helpPaths) {
helpPath, Object.assign(this.state, {
ingressHelpPath, ...helpPaths,
ingressDnsHelpPath, });
ingressModSecurityHelpPath,
environmentsHelpPath,
clustersHelpPath,
deployBoardsHelpPath,
cloudRunHelpPath,
ciliumHelpPath,
) {
this.state.helpPath = helpPath;
this.state.ingressHelpPath = ingressHelpPath;
this.state.ingressDnsHelpPath = ingressDnsHelpPath;
this.state.ingressModSecurityHelpPath = ingressModSecurityHelpPath;
this.state.environmentsHelpPath = environmentsHelpPath;
this.state.clustersHelpPath = clustersHelpPath;
this.state.deployBoardsHelpPath = deployBoardsHelpPath;
this.state.cloudRunHelpPath = cloudRunHelpPath;
this.state.ciliumHelpPath = ciliumHelpPath;
} }
setManagePrometheusPath(managePrometheusPath) { setManagePrometheusPath(managePrometheusPath) {
......
...@@ -4,8 +4,8 @@ require 'openssl' ...@@ -4,8 +4,8 @@ require 'openssl'
module Clusters module Clusters
module Applications module Applications
# DEPRECATED: This model represents the Helm 2 Tiller server, and is no longer being actively used. # DEPRECATED: This model represents the Helm 2 Tiller server.
# It is being kept around for a potential cleanup of the unused Tiller server. # It is being kept around to enable the cleanup of the unused Tiller server.
class Helm < ApplicationRecord class Helm < ApplicationRecord
self.table_name = 'clusters_applications_helm' self.table_name = 'clusters_applications_helm'
...@@ -27,29 +27,11 @@ module Clusters ...@@ -27,29 +27,11 @@ module Clusters
end end
def set_initial_status def set_initial_status
return unless not_installable? # The legacy Tiller server is not installable, which is the initial status of every app
self.status = status_states[:installable] if cluster&.platform_kubernetes_active?
end
# It can only be uninstalled if there are no other applications installed
# or with intermitent installation statuses in the database.
def allowed_to_uninstall?
strong_memoize(:allowed_to_uninstall) do
applications = nil
Clusters::Cluster::APPLICATIONS.each do |application_name, klass|
next if application_name == 'helm'
extra_apps = Clusters::Applications::Helm.where('EXISTS (?)', klass.select(1).where(cluster_id: cluster_id))
applications = applications ? applications.or(extra_apps) : extra_apps
end
!applications.exists?
end
end end
# DEPRECATED: This command is only for development and testing purposes, to simulate
# a Helm 2 cluster with an existing Tiller server.
def install_command def install_command
Gitlab::Kubernetes::Helm::V2::InitCommand.new( Gitlab::Kubernetes::Helm::V2::InitCommand.new(
name: name, name: name,
...@@ -70,13 +52,6 @@ module Clusters ...@@ -70,13 +52,6 @@ module Clusters
ca_key.present? && ca_cert.present? ca_key.present? && ca_cert.present?
end end
def post_uninstall
cluster.kubeclient.delete_namespace(Gitlab::Kubernetes::Helm::NAMESPACE)
rescue Kubeclient::ResourceNotFoundError
# we actually don't care if the namespace is not present
# since we want to delete it anyway.
end
private private
def files def files
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
provider_type: @cluster.provider_type, provider_type: @cluster.provider_type,
pre_installed_knative: @cluster.knative_pre_installed? ? 'true': 'false', pre_installed_knative: @cluster.knative_pre_installed? ? 'true': 'false',
help_path: help_page_path('user/project/clusters/index.md', anchor: 'installing-applications'), help_path: help_page_path('user/project/clusters/index.md', anchor: 'installing-applications'),
helm_help_path: help_page_path('user/clusters/applications.md', anchor: 'helm'),
ingress_help_path: help_page_path('user/project/clusters/index.md', anchor: 'getting-the-external-endpoint'), ingress_help_path: help_page_path('user/project/clusters/index.md', anchor: 'getting-the-external-endpoint'),
ingress_dns_help_path: help_page_path('user/clusters/applications.md', anchor: 'pointing-your-dns-at-the-external-endpoint'), ingress_dns_help_path: help_page_path('user/clusters/applications.md', anchor: 'pointing-your-dns-at-the-external-endpoint'),
ingress_mod_security_help_path: help_page_path('user/clusters/applications.md', anchor: 'web-application-firewall-modsecurity'), ingress_mod_security_help_path: help_page_path('user/clusters/applications.md', anchor: 'web-application-firewall-modsecurity'),
......
---
title: Add option to uninstall the legacy Tiller server for clusters added before GitLab 13.2
merge_request: 47457
author:
type: changed
...@@ -65,6 +65,7 @@ supported by GitLab before installing any of the applications. ...@@ -65,6 +65,7 @@ supported by GitLab before installing any of the applications.
> - Introduced in GitLab 11.6 for group-level clusters. > - Introduced in GitLab 11.6 for group-level clusters.
> - [Uses a local Tiller](https://gitlab.com/gitlab-org/gitlab/-/issues/209736) in GitLab 13.2 and later. > - [Uses a local Tiller](https://gitlab.com/gitlab-org/gitlab/-/issues/209736) in GitLab 13.2 and later.
> - [Uses Helm 3](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/46267) for clusters created with GitLab 13.6 and later. > - [Uses Helm 3](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/46267) for clusters created with GitLab 13.6 and later.
> - [Offers legacy Tiller removal](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47457) in GitLab 13.7 and later.
[Helm](https://helm.sh/docs/) is a package manager for Kubernetes and is [Helm](https://helm.sh/docs/) is a package manager for Kubernetes and is
used to install the GitLab-managed apps. GitLab runs each `helm` command used to install the GitLab-managed apps. GitLab runs each `helm` command
...@@ -72,12 +73,12 @@ in a pod within the `gitlab-managed-apps` namespace inside the cluster. ...@@ -72,12 +73,12 @@ in a pod within the `gitlab-managed-apps` namespace inside the cluster.
- For clusters created on GitLab 13.6 and newer, GitLab uses Helm 3 to manage - For clusters created on GitLab 13.6 and newer, GitLab uses Helm 3 to manage
applications. applications.
- For clusters created on versions of GitLab prior to 13.6, GitLab uses - For clusters created on versions of GitLab prior to 13.6, GitLab uses Helm 2
Helm 2 with a local [Tiller](https://v2.helm.sh/docs/glossary/#tiller) server. with a local [Tiller](https://v2.helm.sh/docs/glossary/#tiller) server. Prior
Prior to [GitLab 13.2](https://gitlab.com/gitlab-org/gitlab/-/issues/209736), to [GitLab 13.2](https://gitlab.com/gitlab-org/gitlab/-/issues/209736), GitLab
GitLab used an in-cluster Tiller server in the `gitlab-managed-apps` used an in-cluster Tiller server in the `gitlab-managed-apps` namespace. You
namespace. You can safely remove this server after upgrading to GitLab 13.2 can safely uninstall the server from GitLab's application page if you have
or newer. previously installed it. This will not affect your other applications.
GitLab's Helm integration does not support installing applications behind a proxy, GitLab's Helm integration does not support installing applications behind a proxy,
but a [workaround](../../topics/autodevops/index.md#install-applications-behind-a-proxy) but a [workaround](../../topics/autodevops/index.md#install-applications-behind-a-proxy)
......
...@@ -22,17 +22,6 @@ module Gitlab ...@@ -22,17 +22,6 @@ module Gitlab
def repository_update_command def repository_update_command
'helm repo update' 'helm repo update'
end end
def optional_tls_flags
return [] unless files.key?(:'ca.pem')
[
'--tls',
'--tls-ca-cert', "#{files_dir}/ca.pem",
'--tls-cert', "#{files_dir}/cert.pem",
'--tls-key', "#{files_dir}/key.pem"
]
end
end end
end end
end end
......
...@@ -9,9 +9,8 @@ module Gitlab ...@@ -9,9 +9,8 @@ module Gitlab
def generate_script def generate_script
super + [ super + [
reset_helm_command, init_command,
delete_tiller_replicaset, reset_helm_command
delete_tiller_clusterrolebinding
].join("\n") ].join("\n")
end end
...@@ -21,27 +20,8 @@ module Gitlab ...@@ -21,27 +20,8 @@ module Gitlab
private private
# This method can be delete once we upgrade Helm to > 12.13.0
# https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/27096#note_159695900
#
# Tracking this method to be removed here:
# https://gitlab.com/gitlab-org/gitlab-foss/issues/52791#note_199374155
def delete_tiller_replicaset
delete_args = %w[replicaset -n gitlab-managed-apps -l name=tiller]
Gitlab::Kubernetes::KubectlCmd.delete(*delete_args)
end
def delete_tiller_clusterrolebinding
delete_args = %w[clusterrolebinding tiller-admin]
Gitlab::Kubernetes::KubectlCmd.delete(*delete_args)
end
def reset_helm_command def reset_helm_command
command = %w[helm reset] + optional_tls_flags 'helm reset --force'
command.shelljoin
end end
end end
end end
......
...@@ -5842,6 +5842,9 @@ msgstr "" ...@@ -5842,6 +5842,9 @@ msgstr ""
msgid "ClusterIntegration|CA Certificate" msgid "ClusterIntegration|CA Certificate"
msgstr "" msgstr ""
msgid "ClusterIntegration|Can be safely removed. Prior to GitLab 13.2, GitLab used a remote Tiller server to manage the applications. GitLab no longer uses this server. Uninstalling this server will not affect your other applications. This row will disappear afterwards."
msgstr ""
msgid "ClusterIntegration|Cert-Manager" msgid "ClusterIntegration|Cert-Manager"
msgstr "" msgstr ""
...@@ -6100,9 +6103,6 @@ msgstr "" ...@@ -6100,9 +6103,6 @@ msgstr ""
msgid "ClusterIntegration|HTTP Error" msgid "ClusterIntegration|HTTP Error"
msgstr "" msgstr ""
msgid "ClusterIntegration|Helm Tiller"
msgstr ""
msgid "ClusterIntegration|Helm release failed to install" msgid "ClusterIntegration|Helm release failed to install"
msgstr "" msgstr ""
...@@ -6205,6 +6205,9 @@ msgstr "" ...@@ -6205,6 +6205,9 @@ msgstr ""
msgid "ClusterIntegration|Learn more about instance Kubernetes clusters" msgid "ClusterIntegration|Learn more about instance Kubernetes clusters"
msgstr "" msgstr ""
msgid "ClusterIntegration|Legacy Helm Tiller server"
msgstr ""
msgid "ClusterIntegration|Loading IAM Roles" msgid "ClusterIntegration|Loading IAM Roles"
msgstr "" msgstr ""
...@@ -6538,7 +6541,7 @@ msgstr "" ...@@ -6538,7 +6541,7 @@ msgstr ""
msgid "ClusterIntegration|The associated IP and all deployed services will be deleted and cannot be restored. Uninstalling Knative will also remove Istio from your cluster. This will not effect any other applications." msgid "ClusterIntegration|The associated IP and all deployed services will be deleted and cannot be restored. Uninstalling Knative will also remove Istio from your cluster. This will not effect any other applications."
msgstr "" msgstr ""
msgid "ClusterIntegration|The associated Tiller pod, the %{gitlabManagedAppsNamespace} namespace, and all of its resources will be deleted and cannot be restored." msgid "ClusterIntegration|The associated Tiller pod will be deleted and cannot be restored. Your other applications will remain unaffected."
msgstr "" msgstr ""
msgid "ClusterIntegration|The associated load balancer and IP will be deleted and cannot be restored." msgid "ClusterIntegration|The associated load balancer and IP will be deleted and cannot be restored."
......
...@@ -22,7 +22,7 @@ RSpec.describe Admin::Clusters::ApplicationsController do ...@@ -22,7 +22,7 @@ RSpec.describe Admin::Clusters::ApplicationsController do
post :create, params: params post :create, params: params
end end
let(:application) { 'helm' } let(:application) { 'ingress' }
let(:params) { { application: application, id: cluster.id } } let(:params) { { application: application, id: cluster.id } }
describe 'functionality' do describe 'functionality' do
...@@ -37,7 +37,7 @@ RSpec.describe Admin::Clusters::ApplicationsController do ...@@ -37,7 +37,7 @@ RSpec.describe Admin::Clusters::ApplicationsController do
expect { subject }.to change { current_application.count } expect { subject }.to change { current_application.count }
expect(response).to have_gitlab_http_status(:no_content) expect(response).to have_gitlab_http_status(:no_content)
expect(cluster.application_helm).to be_scheduled expect(cluster.application_ingress).to be_scheduled
end end
context 'when cluster do not exists' do context 'when cluster do not exists' do
...@@ -61,7 +61,7 @@ RSpec.describe Admin::Clusters::ApplicationsController do ...@@ -61,7 +61,7 @@ RSpec.describe Admin::Clusters::ApplicationsController do
context 'when application is already installing' do context 'when application is already installing' do
before do before do
create(:clusters_applications_helm, :installing, cluster: cluster) create(:clusters_applications_ingress, :installing, cluster: cluster)
end end
it 'returns 400' do it 'returns 400' do
......
...@@ -28,7 +28,7 @@ RSpec.describe Groups::Clusters::ApplicationsController do ...@@ -28,7 +28,7 @@ RSpec.describe Groups::Clusters::ApplicationsController do
post :create, params: params.merge(group_id: group) post :create, params: params.merge(group_id: group)
end end
let(:application) { 'helm' } let(:application) { 'ingress' }
let(:params) { { application: application, id: cluster.id } } let(:params) { { application: application, id: cluster.id } }
describe 'functionality' do describe 'functionality' do
...@@ -44,7 +44,7 @@ RSpec.describe Groups::Clusters::ApplicationsController do ...@@ -44,7 +44,7 @@ RSpec.describe Groups::Clusters::ApplicationsController do
expect { subject }.to change { current_application.count } expect { subject }.to change { current_application.count }
expect(response).to have_gitlab_http_status(:no_content) expect(response).to have_gitlab_http_status(:no_content)
expect(cluster.application_helm).to be_scheduled expect(cluster.application_ingress).to be_scheduled
end end
context 'when cluster do not exists' do context 'when cluster do not exists' do
...@@ -68,7 +68,7 @@ RSpec.describe Groups::Clusters::ApplicationsController do ...@@ -68,7 +68,7 @@ RSpec.describe Groups::Clusters::ApplicationsController do
context 'when application is already installing' do context 'when application is already installing' do
before do before do
create(:clusters_applications_helm, :installing, cluster: cluster) create(:clusters_applications_ingress, :installing, cluster: cluster)
end end
it 'returns 400' do it 'returns 400' do
......
...@@ -32,7 +32,7 @@ RSpec.describe Projects::Clusters::ApplicationsController do ...@@ -32,7 +32,7 @@ RSpec.describe Projects::Clusters::ApplicationsController do
let(:cluster) { create(:cluster, :project, :provided_by_gcp) } let(:cluster) { create(:cluster, :project, :provided_by_gcp) }
let(:project) { cluster.project } let(:project) { cluster.project }
let(:application) { 'helm' } let(:application) { 'ingress' }
let(:params) { { application: application, id: cluster.id } } let(:params) { { application: application, id: cluster.id } }
describe 'functionality' do describe 'functionality' do
...@@ -48,7 +48,7 @@ RSpec.describe Projects::Clusters::ApplicationsController do ...@@ -48,7 +48,7 @@ RSpec.describe Projects::Clusters::ApplicationsController do
expect { subject }.to change { current_application.count } expect { subject }.to change { current_application.count }
expect(response).to have_gitlab_http_status(:no_content) expect(response).to have_gitlab_http_status(:no_content)
expect(cluster.application_helm).to be_scheduled expect(cluster.application_ingress).to be_scheduled
end end
context 'when cluster do not exists' do context 'when cluster do not exists' do
...@@ -72,7 +72,7 @@ RSpec.describe Projects::Clusters::ApplicationsController do ...@@ -72,7 +72,7 @@ RSpec.describe Projects::Clusters::ApplicationsController do
context 'when application is already installing' do context 'when application is already installing' do
before do before do
create(:clusters_applications_helm, :installing, cluster: cluster) create(:clusters_applications_ingress, :installing, cluster: cluster)
end end
it 'returns 400' do it 'returns 400' do
......
...@@ -50,6 +50,7 @@ describe('Clusters Store', () => { ...@@ -50,6 +50,7 @@ describe('Clusters Store', () => {
expect(store.state).toEqual({ expect(store.state).toEqual({
helpPath: null, helpPath: null,
helmHelpPath: null,
ingressHelpPath: null, ingressHelpPath: null,
environmentsHelpPath: null, environmentsHelpPath: null,
clustersHelpPath: null, clustersHelpPath: null,
...@@ -62,7 +63,7 @@ describe('Clusters Store', () => { ...@@ -62,7 +63,7 @@ describe('Clusters Store', () => {
rbac: false, rbac: false,
applications: { applications: {
helm: { helm: {
title: 'Helm Tiller', title: 'Legacy Helm Tiller server',
status: mockResponseData.applications[0].status, status: mockResponseData.applications[0].status,
statusReason: mockResponseData.applications[0].status_reason, statusReason: mockResponseData.applications[0].status_reason,
requestReason: null, requestReason: null,
......
...@@ -12,32 +12,14 @@ RSpec.describe Gitlab::Kubernetes::Helm::V2::ResetCommand do ...@@ -12,32 +12,14 @@ RSpec.describe Gitlab::Kubernetes::Helm::V2::ResetCommand do
it_behaves_like 'helm command generator' do it_behaves_like 'helm command generator' do
let(:commands) do let(:commands) do
<<~EOS <<~EOS
helm reset export HELM_HOST="localhost:44134"
kubectl delete replicaset -n gitlab-managed-apps -l name\\=tiller tiller -listen ${HELM_HOST} -alsologtostderr &
kubectl delete clusterrolebinding tiller-admin helm init --client-only
helm reset --force
EOS EOS
end end
end end
context 'when there is a ca.pem file' do
let(:files) { { 'ca.pem': 'some file content' } }
it_behaves_like 'helm command generator' do
let(:commands) do
<<~EOS1.squish + "\n" + <<~EOS2
helm reset
--tls
--tls-ca-cert /data/helm/helm/config/ca.pem
--tls-cert /data/helm/helm/config/cert.pem
--tls-key /data/helm/helm/config/key.pem
EOS1
kubectl delete replicaset -n gitlab-managed-apps -l name\\=tiller
kubectl delete clusterrolebinding tiller-admin
EOS2
end
end
end
describe '#pod_name' do describe '#pod_name' do
subject { reset_command.pod_name } subject { reset_command.pod_name }
......
...@@ -19,35 +19,9 @@ RSpec.describe Clusters::Applications::Helm do ...@@ -19,35 +19,9 @@ RSpec.describe Clusters::Applications::Helm do
end end
describe '#can_uninstall?' do describe '#can_uninstall?' do
context "with other existing applications" do subject(:application) { build(:clusters_applications_helm).can_uninstall? }
Clusters::Cluster::APPLICATIONS.keys.each do |application_name|
next if application_name == 'helm'
it "is false when #{application_name} is installed" do it { is_expected.to eq true }
cluster_application = create("clusters_applications_#{application_name}".to_sym)
helm = cluster_application.cluster.application_helm
expect(helm.allowed_to_uninstall?).to be_falsy
end
end
it 'executes a single query only' do
cluster_application = create(:clusters_applications_ingress)
helm = cluster_application.cluster.application_helm
query_count = ActiveRecord::QueryRecorder.new { helm.allowed_to_uninstall? }.count
expect(query_count).to eq(1)
end
end
context "without other existing applications" do
subject { helm.can_uninstall? }
let(:helm) { create(:clusters_applications_helm) }
it { is_expected.to be_truthy }
end
end end
describe '#issue_client_cert' do describe '#issue_client_cert' do
...@@ -135,14 +109,4 @@ RSpec.describe Clusters::Applications::Helm do ...@@ -135,14 +109,4 @@ RSpec.describe Clusters::Applications::Helm do
end end
end end
end end
describe '#post_uninstall' do
let(:helm) { create(:clusters_applications_helm, :installed) }
it do
expect(helm.cluster.kubeclient).to receive(:delete_namespace).with('gitlab-managed-apps')
helm.post_uninstall
end
end
end end
...@@ -7,7 +7,7 @@ RSpec.describe Clusters::Applications::CreateService do ...@@ -7,7 +7,7 @@ RSpec.describe Clusters::Applications::CreateService do
let(:cluster) { create(:cluster, :project, :provided_by_gcp) } let(:cluster) { create(:cluster, :project, :provided_by_gcp) }
let(:user) { create(:user) } let(:user) { create(:user) }
let(:params) { { application: 'helm' } } let(:params) { { application: 'ingress' } }
let(:service) { described_class.new(cluster, user, params) } let(:service) { described_class.new(cluster, user, params) }
describe '#execute' do describe '#execute' do
...@@ -23,16 +23,16 @@ RSpec.describe Clusters::Applications::CreateService do ...@@ -23,16 +23,16 @@ RSpec.describe Clusters::Applications::CreateService do
subject subject
cluster.reload cluster.reload
end.to change(cluster, :application_helm) end.to change(cluster, :application_ingress)
end end
context 'application already installed' do context 'application already installed' do
let!(:application) { create(:clusters_applications_helm, :installed, cluster: cluster) } let!(:application) { create(:clusters_applications_ingress, :installed, cluster: cluster) }
it 'does not create a new application' do it 'does not create a new application' do
expect do expect do
subject subject
end.not_to change(Clusters::Applications::Helm, :count) end.not_to change(Clusters::Applications::Ingress, :count)
end end
it 'schedules an upgrade for the application' do it 'schedules an upgrade for the application' do
...@@ -43,10 +43,6 @@ RSpec.describe Clusters::Applications::CreateService do ...@@ -43,10 +43,6 @@ RSpec.describe Clusters::Applications::CreateService do
end end
context 'known applications' do context 'known applications' do
before do
create(:clusters_applications_helm, :installed, cluster: cluster)
end
context 'ingress application' do context 'ingress application' do
let(:params) do let(:params) do
{ {
...@@ -215,19 +211,17 @@ RSpec.describe Clusters::Applications::CreateService do ...@@ -215,19 +211,17 @@ RSpec.describe Clusters::Applications::CreateService do
using RSpec::Parameterized::TableSyntax using RSpec::Parameterized::TableSyntax
where(:application, :association, :allowed, :pre_create_helm, :pre_create_ingress) do where(:application, :association, :allowed, :pre_create_ingress) do
'helm' | :application_helm | true | false | false 'ingress' | :application_ingress | true | false
'ingress' | :application_ingress | true | true | false 'runner' | :application_runner | true | false
'runner' | :application_runner | true | true | false 'prometheus' | :application_prometheus | true | false
'prometheus' | :application_prometheus | true | true | false 'jupyter' | :application_jupyter | true | true
'jupyter' | :application_jupyter | true | true | true
end end
with_them do with_them do
before do before do
klass = "Clusters::Applications::#{application.titleize}" klass = "Clusters::Applications::#{application.titleize}"
allow_any_instance_of(klass.constantize).to receive(:make_scheduled!).and_call_original allow_any_instance_of(klass.constantize).to receive(:make_scheduled!).and_call_original
create(:clusters_applications_helm, :installed, cluster: cluster) if pre_create_helm
create(:clusters_applications_ingress, :installed, cluster: cluster, external_hostname: 'example.com') if pre_create_ingress create(:clusters_applications_ingress, :installed, cluster: cluster, external_hostname: 'example.com') if pre_create_ingress
end end
...@@ -252,7 +246,7 @@ RSpec.describe Clusters::Applications::CreateService do ...@@ -252,7 +246,7 @@ RSpec.describe Clusters::Applications::CreateService do
it 'makes the application scheduled' do it 'makes the application scheduled' do
expect do expect do
subject subject
end.to change { Clusters::Applications::Helm.with_status(:scheduled).count }.by(1) end.to change { Clusters::Applications::Ingress.with_status(:scheduled).count }.by(1)
end end
it 'schedules an install via worker' do it 'schedules an install via worker' do
...@@ -266,7 +260,7 @@ RSpec.describe Clusters::Applications::CreateService do ...@@ -266,7 +260,7 @@ RSpec.describe Clusters::Applications::CreateService do
end end
context 'when application is associated with a cluster' do context 'when application is associated with a cluster' do
let(:application) { create(:clusters_applications_helm, :installable, cluster: cluster) } let(:application) { create(:clusters_applications_ingress, :installable, cluster: cluster) }
let(:worker_arguments) { [application.name, application.id] } let(:worker_arguments) { [application.name, application.id] }
it_behaves_like 'installable applications' it_behaves_like 'installable applications'
...@@ -280,7 +274,7 @@ RSpec.describe Clusters::Applications::CreateService do ...@@ -280,7 +274,7 @@ RSpec.describe Clusters::Applications::CreateService do
end end
context 'when installation is already in progress' do context 'when installation is already in progress' do
let!(:application) { create(:clusters_applications_helm, :installing, cluster: cluster) } let!(:application) { create(:clusters_applications_ingress, :installing, cluster: cluster) }
it 'raises an exception' do it 'raises an exception' do
expect { subject } expect { subject }
...@@ -295,7 +289,7 @@ RSpec.describe Clusters::Applications::CreateService do ...@@ -295,7 +289,7 @@ RSpec.describe Clusters::Applications::CreateService do
context 'when application is installed' do context 'when application is installed' do
%i(installed updated).each do |status| %i(installed updated).each do |status|
let(:application) { create(:clusters_applications_helm, status, cluster: cluster) } let(:application) { create(:clusters_applications_ingress, status, cluster: cluster) }
it 'schedules an upgrade via worker' do it 'schedules an upgrade via worker' do
expect(ClusterUpgradeAppWorker) expect(ClusterUpgradeAppWorker)
......
...@@ -67,7 +67,8 @@ RSpec.describe Clusters::Cleanup::AppService do ...@@ -67,7 +67,8 @@ RSpec.describe Clusters::Cleanup::AppService do
it 'only uninstalls apps that are not dependencies for other installed apps' do it 'only uninstalls apps that are not dependencies for other installed apps' do
expect(Clusters::Applications::UninstallWorker) expect(Clusters::Applications::UninstallWorker)
.not_to receive(:perform_async).with(helm.name, helm.id) .to receive(:perform_async).with(helm.name, helm.id)
.and_call_original
expect(Clusters::Applications::UninstallWorker) expect(Clusters::Applications::UninstallWorker)
.not_to receive(:perform_async).with(ingress.name, ingress.id) .not_to receive(:perform_async).with(ingress.name, ingress.id)
...@@ -85,7 +86,7 @@ RSpec.describe Clusters::Cleanup::AppService do ...@@ -85,7 +86,7 @@ RSpec.describe Clusters::Cleanup::AppService do
it 'logs application uninstalls and next execution' do it 'logs application uninstalls and next execution' do
expect(logger).to receive(:info) expect(logger).to receive(:info)
.with(log_meta.merge(event: :uninstalling_app, application: kind_of(String))).twice .with(log_meta.merge(event: :uninstalling_app, application: kind_of(String))).exactly(3).times
expect(logger).to receive(:info) expect(logger).to receive(:info)
.with(log_meta.merge(event: :scheduling_execution, next_execution: 1)) .with(log_meta.merge(event: :scheduling_execution, next_execution: 1))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment