Commit 355df111 authored by Shinya Maeda's avatar Shinya Maeda

Introduce AutoDeleteCronWorker for Environments

This commit adds the cron worker for
environments that marked for deletion.
This also cleans up the unused environment refs.

Changelog: added
parent 99f91ccc
......@@ -78,6 +78,7 @@ class Environment < ApplicationRecord
scope :for_name, -> (name) { where(name: name) }
scope :preload_cluster, -> { preload(last_deployment: :cluster) }
scope :auto_stoppable, -> (limit) { available.where('auto_stop_at < ?', Time.zone.now).limit(limit) }
scope :auto_deletable, -> (limit) { stopped.where('auto_delete_at < ?', Time.zone.now).limit(limit) }
##
# Search environments which have names like the given query.
......
......@@ -256,6 +256,15 @@
:weight: 1
:idempotent: true
:tags: []
- :name: cronjob:environments_auto_delete_cron
:worker_name: Environments::AutoDeleteCronWorker
:feature_category: :continuous_delivery
:has_external_dependencies:
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent: true
:tags: []
- :name: cronjob:environments_auto_stop_cron
:worker_name: Environments::AutoStopCronWorker
:feature_category: :continuous_delivery
......
# frozen_string_literal: true
module Environments
class AutoDeleteCronWorker
include ApplicationWorker
include ::Gitlab::LoopHelpers
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext
data_consistency :always
feature_category :continuous_delivery
deduplicate :until_executed, including_scheduled: true
idempotent!
LOOP_TIMEOUT = 45.minutes
LOOP_LIMIT = 1000
BATCH_SIZE = 100
def perform
loop_until(timeout: LOOP_TIMEOUT, limit: LOOP_LIMIT) do
destroy_in_batch
end
end
private
def destroy_in_batch
environments = Environment.auto_deletable(BATCH_SIZE)
return false if environments.empty?
environments.each(&:destroy)
end
end
end
......@@ -470,6 +470,9 @@ production: &base
# Stop expired environments
environments_auto_stop_cron_worker:
cron: "24 * * * *"
# Delete stopped environments
environments_auto_delete_cron_worker:
cron: "34 * * * *"
# Periodically run 'git fsck' on all repositories. If started more than
# once per hour you will have concurrent 'git fsck' jobs.
repository_check_worker:
......
......@@ -444,6 +444,9 @@ Settings.cron_jobs['ci_schedule_delete_objects_worker']['job_class'] = 'Ci::Sche
Settings.cron_jobs['environments_auto_stop_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['environments_auto_stop_cron_worker']['cron'] ||= '24 * * * *'
Settings.cron_jobs['environments_auto_stop_cron_worker']['job_class'] = 'Environments::AutoStopCronWorker'
Settings.cron_jobs['environments_auto_delete_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['environments_auto_delete_cron_worker']['cron'] ||= '34 * * * *'
Settings.cron_jobs['environments_auto_delete_cron_worker']['job_class'] = 'Environments::AutoDeleteCronWorker'
Settings.cron_jobs['repository_check_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['repository_check_worker']['cron'] ||= '20 * * * *'
Settings.cron_jobs['repository_check_worker']['job_class'] = 'RepositoryCheck::DispatchWorker'
......
# frozen_string_literal: true
class CreateIndexOnEnvironmentsAutoDeleteAt < ActiveRecord::Migration[6.1]
include Gitlab::Database::MigrationHelpers
disable_ddl_transaction!
INDEX_NAME = 'index_environments_on_state_and_auto_delete_at'
def up
add_concurrent_index :environments,
%i[auto_delete_at],
where: "auto_delete_at IS NOT NULL AND state = 'stopped'",
name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :environments, INDEX_NAME
end
end
b64ba2a9ee42497aa9f60ca76f4925076cb77e73fd79bb9b10362cd48d11252b
\ No newline at end of file
......@@ -23677,6 +23677,8 @@ CREATE INDEX index_environments_on_project_id_and_tier ON environments USING btr
CREATE INDEX index_environments_on_project_id_state_environment_type ON environments USING btree (project_id, state, environment_type);
CREATE INDEX index_environments_on_state_and_auto_delete_at ON environments USING btree (auto_delete_at) WHERE ((auto_delete_at IS NOT NULL) AND ((state)::text = 'stopped'::text));
CREATE INDEX index_environments_on_state_and_auto_stop_at ON environments USING btree (state, auto_stop_at) WHERE ((auto_stop_at IS NOT NULL) AND ((state)::text = 'available'::text));
CREATE UNIQUE INDEX index_epic_board_list_preferences_on_user_and_list ON boards_epic_list_user_preferences USING btree (user_id, epic_list_id);
......@@ -75,6 +75,11 @@ FactoryBot.define do
auto_stop_at { 1.day.ago }
end
trait :auto_deletable do
state { :stopped }
auto_delete_at { 1.day.ago }
end
trait :will_auto_stop do
auto_stop_at { 1.day.from_now }
end
......
......@@ -215,6 +215,24 @@ RSpec.describe Environment, :use_clean_rails_memory_store_caching do
end
end
describe '.auto_deletable' do
subject { described_class.auto_deletable(limit) }
let(:limit) { 100 }
context 'when environment is auto-deletable' do
let!(:environment) { create(:environment, :auto_deletable) }
it { is_expected.to eq([environment]) }
end
context 'when environment is not auto-deletable' do
let!(:environment) { create(:environment) }
it { is_expected.to be_empty }
end
end
describe '.stop_actions' do
subject { environments.stop_actions }
......
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Environments::AutoDeleteCronWorker do
include CreateEnvironmentsHelpers
let(:worker) { described_class.new }
describe '#perform' do
subject { worker.perform }
let_it_be(:project) { create(:project, :repository) }
let!(:environment) { create(:environment, :auto_deletable, project: project) }
it 'deletes the environment' do
expect { subject }.to change { Environment.count }.by(-1)
end
context 'when environment is not stopped' do
let!(:environment) { create(:environment, :available, auto_delete_at: 1.day.ago, project: project) }
it 'does not delete the environment' do
expect { subject }.not_to change { Environment.count }
end
end
context 'when auto_delete_at is null' do
let!(:environment) { create(:environment, :stopped, auto_delete_at: nil, project: project) }
it 'does not delete the environment' do
expect { subject }.not_to change { Environment.count }
end
end
context 'with multiple deletable environments' do
let!(:other_environment) { create(:environment, :auto_deletable, project: project) }
it 'deletes all deletable environments' do
expect { subject }.to change { Environment.count }.by(-2)
end
context 'when loop reached loop limit' do
before do
stub_const("#{described_class}::LOOP_LIMIT", 1)
stub_const("#{described_class}::BATCH_SIZE", 1)
end
it 'deletes only one deletable environment' do
expect { subject }.to change { Environment.count }.by(-1)
end
end
context 'when batch size is less than the number of environments' do
before do
stub_const("#{described_class}::BATCH_SIZE", 1)
end
it 'deletes all deletable environments' do
expect { subject }.to change { Environment.count }.by(-2)
end
end
end
context 'with multiple deployments' do
it 'deletes the deployment records and refs' do
deployment_1 = create(:deployment, environment: environment, project: project)
deployment_2 = create(:deployment, environment: environment, project: project)
deployment_1.create_ref
deployment_2.create_ref
expect(project.repository.commit(deployment_1.ref_path)).to be_present
expect(project.repository.commit(deployment_2.ref_path)).to be_present
expect { subject }.to change { Deployment.count }.by(-2)
expect(project.repository.commit(deployment_1.ref_path)).not_to be_present
expect(project.repository.commit(deployment_2.ref_path)).not_to be_present
end
end
context 'when loop reached timeout' do
before do
stub_const("#{described_class}::LOOP_TIMEOUT", 0.seconds)
stub_const("#{described_class}::LOOP_LIMIT", 100_000)
allow_next_instance_of(described_class) do |worker|
allow(worker).to receive(:destroy_in_batch) { true }
end
end
it 'does not delete the environment' do
expect { subject }.not_to change { Environment.count }
end
end
context 'with idempotent flag' do
include_examples 'an idempotent worker' do
it 'deletes the environment' do
expect { subject }.to change { Environment.count }.by(-1)
end
end
end
end
end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment