Commit 2eb5b757 authored by Robert Speicher's avatar Robert Speicher

Merge branch '299178-follow-up-from-cached-sidebar-issues-count-in-group-sidebar' into 'master'

Cache merge requests count in group sidebar [RUN ALL RSPEC] [RUN AS-IF-FOSS]

See merge request gitlab-org/gitlab!55971
parents 0317fdc9 1795a4fb
......@@ -67,7 +67,7 @@ module GroupsHelper
def group_open_issues_count(group)
if Feature.enabled?(:cached_sidebar_open_issues_count, group, default_enabled: :yaml)
cached_open_group_issues_count(group)
cached_issuables_count(group, type: :issues)
else
number_with_delimiter(group_issues_count(state: 'opened'))
end
......@@ -80,18 +80,11 @@ module GroupsHelper
.count
end
def cached_open_group_issues_count(group)
count_service = Groups::OpenIssuesCountService
issues_count = count_service.new(group, current_user).count
if issues_count > count_service::CACHED_COUNT_THRESHOLD
ActiveSupport::NumberHelper
.number_to_human(
issues_count,
units: { thousand: 'k', million: 'm' }, precision: 1, significant: false, format: '%n%u'
)
def group_open_merge_requests_count(group)
if Feature.enabled?(:cached_sidebar_merge_requests_count, group, default_enabled: :yaml)
cached_issuables_count(@group, type: :merge_requests)
else
number_with_delimiter(issues_count)
number_with_delimiter(group_merge_requests_count(state: 'opened'))
end
end
......@@ -102,6 +95,14 @@ module GroupsHelper
.count
end
def cached_issuables_count(group, type: nil)
count_service = issuables_count_service_class(type)
return unless count_service.present?
issuables_count = count_service.new(group, current_user).count
format_issuables_count(count_service, issuables_count)
end
def group_dependency_proxy_url(group)
# The namespace path can include uppercase letters, which
# Docker doesn't allow. The proxy expects it to be downcased.
......@@ -306,6 +307,26 @@ module GroupsHelper
def ancestor_locked_and_has_been_overridden(group)
s_("GroupSettings|This setting is applied on %{ancestor_group} and has been overridden on this subgroup.").html_safe % { ancestor_group: ancestor_group(group) }
end
def issuables_count_service_class(type)
if type == :issues
Groups::OpenIssuesCountService
elsif type == :merge_requests
Groups::MergeRequestsCountService
end
end
def format_issuables_count(count_service, count)
if count > count_service::CACHED_COUNT_THRESHOLD
ActiveSupport::NumberHelper
.number_to_human(
count,
units: { thousand: 'k', million: 'm' }, precision: 1, significant: false, format: '%n%u'
)
else
number_with_delimiter(count)
end
end
end
GroupsHelper.prepend_if_ee('EE::GroupsHelper')
# frozen_string_literal: true
module Groups
class CountService < BaseCountService
include Gitlab::Utils::StrongMemoize
VERSION = 1
CACHED_COUNT_THRESHOLD = 1000
EXPIRATION_TIME = 24.hours
attr_reader :group, :user
def initialize(group, user = nil)
@group = group
@user = user
end
def count
cached_count = Rails.cache.read(cache_key)
return cached_count unless cached_count.blank?
refreshed_count = uncached_count
update_cache_for_key(cache_key) { refreshed_count } if refreshed_count > CACHED_COUNT_THRESHOLD
refreshed_count
end
def cache_key
['groups', "#{issuable_key}_count_service", VERSION, group.id, cache_key_name]
end
private
def relation_for_count
raise NotImplementedError
end
def cache_options
super.merge({ expires_in: EXPIRATION_TIME })
end
def cache_key_name
raise NotImplementedError, 'cache_key_name must be implemented and return a String'
end
def issuable_key
raise NotImplementedError, 'issuable_key must be implemented and return a String'
end
end
end
# frozen_string_literal: true
module Groups
# Service class for counting and caching the number of open merge requests of a group.
class MergeRequestsCountService < Groups::CountService
private
def cache_key_name
'open_merge_requests_count'
end
def relation_for_count
MergeRequestsFinder
.new(user, group_id: group.id, state: 'opened', non_archived: true, include_subgroups: true)
.execute
end
def issuable_key
'open_merge_requests'
end
end
end
......@@ -2,47 +2,12 @@
module Groups
# Service class for counting and caching the number of open issues of a group.
class OpenIssuesCountService < BaseCountService
include Gitlab::Utils::StrongMemoize
VERSION = 1
class OpenIssuesCountService < Groups::CountService
PUBLIC_COUNT_KEY = 'group_public_open_issues_count'
TOTAL_COUNT_KEY = 'group_total_open_issues_count'
CACHED_COUNT_THRESHOLD = 1000
EXPIRATION_TIME = 24.hours
attr_reader :group, :user
def initialize(group, user = nil)
@group = group
@user = user
end
# Reads count value from cache and return it if present.
# If empty or expired, #uncached_count will calculate the issues count for the group and
# compare it with the threshold. If it is greater, it will be written to the cache and returned.
# If below, it will be returned without being cached.
# This results in only caching large counts and calculating the rest with every call to maintain
# accuracy.
def count
cached_count = Rails.cache.read(cache_key)
return cached_count unless cached_count.blank?
refreshed_count = uncached_count
update_cache_for_key(cache_key) { refreshed_count } if refreshed_count > CACHED_COUNT_THRESHOLD
refreshed_count
end
def cache_key(key = nil)
['groups', 'open_issues_count_service', VERSION, group.id, cache_key_name]
end
private
def cache_options
super.merge({ expires_in: EXPIRATION_TIME })
end
def cache_key_name
public_only? ? PUBLIC_COUNT_KEY : TOTAL_COUNT_KEY
end
......@@ -60,5 +25,9 @@ module Groups
def relation_for_count
IssuesFinder.new(user, group_id: group.id, state: 'opened', non_archived: true, include_subgroups: true, public_only: public_only?).execute
end
def issuable_key
'open_issues'
end
end
end
......@@ -2,7 +2,7 @@
- page_title _("Merge Requests")
- if group_merge_requests_count(state: 'all') == 0
- if @merge_requests&.size == 0
= render 'shared/empty_states/merge_requests', project_select_button: true
- else
.top-area
......
- issues_count = group_open_issues_count(@group)
- merge_requests_count = group_merge_requests_count(state: 'opened')
- merge_requests_count = group_open_merge_requests_count(@group)
- aside_title = @group.subgroup? ? _('Subgroup navigation') : _('Group navigation')
- overview_title = @group.subgroup? ? _('Subgroup overview') : _('Group overview')
......@@ -92,13 +92,13 @@
= sprite_icon('git-merge')
%span.nav-item-name
= _('Merge Requests')
%span.badge.badge-pill.count= number_with_delimiter(merge_requests_count)
%span.badge.badge-pill.count= merge_requests_count
%ul.sidebar-sub-level-items.is-fly-out-only
= nav_link(path: 'groups#merge_requests', html_options: { class: "fly-out-top-item" } ) do
= link_to merge_requests_group_path(@group) do
%strong.fly-out-top-item-name
= _('Merge Requests')
%span.badge.badge-pill.count.merge_counter.js-merge-counter.fly-out-badge= number_with_delimiter(merge_requests_count)
%span.badge.badge-pill.count.merge_counter.js-merge-counter.fly-out-badge= merge_requests_count
= render_if_exists "layouts/nav/ee/security_link" # EE-specific
......
---
title: Cache open merge requests count in group sidebar
merge_request: 55971
author:
type: performance
---
name: cached_sidebar_merge_requests_count
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/55971
rollout_issue_url:
milestone: '13.11'
type: development
group: group::product planning
default_enabled: true
......@@ -28,6 +28,39 @@ You can [search and filter the results](../../search/index.md#filtering-issue-an
![Group Issues list view](img/group_merge_requests_list_view.png)
## Cached merge request count
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/299542) in GitLab 13.11.
> - It's [deployed behind a feature flag](../../feature_flags.md), enabled by default.
> - It's enabled on GitLab.com.
> - It's recommended for production use.
> - For GitLab self-managed instances, GitLab administrators can opt to [disable it](#enable-or-disable-cached-merge-request-count).
WARNING:
This feature might not be available to you. Check the **version history** note above for details.
In a group, the sidebar displays the total count of open merge requests and this value is cached if higher
than 1000. The cached value is rounded to thousands (or millions) and updated every 24 hours.
### Enable or disable cached merge request count **(FREE SELF)**
Cached merge request count in the left sidebar is under development but ready for production use. It is
deployed behind a feature flag that is **enabled by default**.
[GitLab administrators with access to the GitLab Rails console](../../../administration/feature_flags.md)
can disable it.
To disable it:
```ruby
Feature.disable(:cached_sidebar_merge_requests_count)
```
To enable it:
```ruby
Feature.enable(:cached_sidebar_merge_requests_count)
```
## Semi-linear history merge requests
A merge commit is created for every merge, but the branch is only merged if
......
......@@ -481,45 +481,68 @@ RSpec.describe GroupsHelper do
end
end
describe '#cached_open_group_issues_count' do
describe '#cached_issuables_count' do
let_it_be(:current_user) { create(:user) }
let_it_be(:group) { create(:group, name: 'group') }
let_it_be(:count_service) { Groups::OpenIssuesCountService }
subject { helper.cached_issuables_count(group, type: type) }
before do
allow(helper).to receive(:current_user) { current_user }
allow(count_service).to receive(:new).and_call_original
end
it 'returns all digits for count value under 1000' do
allow_next_instance_of(count_service) do |service|
allow(service).to receive(:count).and_return(999)
shared_examples 'caching issuables count' do
it 'calls the correct service class' do
subject
expect(count_service).to have_received(:new).with(group, current_user)
end
expect(helper.cached_open_group_issues_count(group)).to eq('999')
end
it 'returns all digits for count value under 1000' do
allow_next_instance_of(count_service) do |service|
allow(service).to receive(:count).and_return(999)
end
it 'returns truncated digits for count value over 1000' do
allow_next_instance_of(count_service) do |service|
allow(service).to receive(:count).and_return(2300)
expect(subject).to eq('999')
end
expect(helper.cached_open_group_issues_count(group)).to eq('2.3k')
end
it 'returns truncated digits for count value over 1000' do
allow_next_instance_of(count_service) do |service|
allow(service).to receive(:count).and_return(2300)
end
it 'returns truncated digits for count value over 10000' do
allow_next_instance_of(count_service) do |service|
allow(service).to receive(:count).and_return(12560)
expect(subject).to eq('2.3k')
end
expect(helper.cached_open_group_issues_count(group)).to eq('12.6k')
end
it 'returns truncated digits for count value over 10000' do
allow_next_instance_of(count_service) do |service|
allow(service).to receive(:count).and_return(12560)
end
it 'returns truncated digits for count value over 100000' do
allow_next_instance_of(count_service) do |service|
allow(service).to receive(:count).and_return(112560)
expect(subject).to eq('12.6k')
end
it 'returns truncated digits for count value over 100000' do
allow_next_instance_of(count_service) do |service|
allow(service).to receive(:count).and_return(112560)
end
expect(subject).to eq('112.6k')
end
end
context 'with issue type' do
let(:type) { :issues }
let(:count_service) { Groups::OpenIssuesCountService }
it_behaves_like 'caching issuables count'
end
context 'with merge request type' do
let(:type) { :merge_requests }
let(:count_service) { Groups::MergeRequestsCountService }
expect(helper.cached_open_group_issues_count(group)).to eq('112.6k')
it_behaves_like 'caching issuables count'
end
end
end
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Groups::MergeRequestsCountService, :use_clean_rails_memory_store_caching do
let_it_be(:user) { create(:user) }
let_it_be(:group) { create(:group, :public)}
let_it_be(:project) { create(:project, :repository, namespace: group) }
let_it_be(:merge_request) { create(:merge_request, source_project: project, target_project: project) }
subject { described_class.new(group, user) }
describe '#relation_for_count' do
before do
group.add_reporter(user)
allow(MergeRequestsFinder).to receive(:new).and_call_original
end
it 'uses the MergeRequestsFinder to scope merge requests' do
expect(MergeRequestsFinder)
.to receive(:new)
.with(user, group_id: group.id, state: 'opened', non_archived: true, include_subgroups: true)
subject.count
end
end
it_behaves_like 'a counter caching service with threshold'
end
......@@ -54,53 +54,7 @@ RSpec.describe Groups::OpenIssuesCountService, :use_clean_rails_memory_store_cac
end
end
context 'with different cache values' do
let(:public_count_key) { subject.cache_key(described_class::PUBLIC_COUNT_KEY) }
let(:under_threshold) { described_class::CACHED_COUNT_THRESHOLD - 1 }
let(:over_threshold) { described_class::CACHED_COUNT_THRESHOLD + 1 }
context 'when cache is empty' do
before do
Rails.cache.delete(public_count_key)
end
it 'refreshes cache if value over threshold' do
allow(subject).to receive(:uncached_count).and_return(over_threshold)
expect(subject.count).to eq(over_threshold)
expect(Rails.cache.read(public_count_key)).to eq(over_threshold)
end
it 'does not refresh cache if value under threshold' do
allow(subject).to receive(:uncached_count).and_return(under_threshold)
expect(subject.count).to eq(under_threshold)
expect(Rails.cache.read(public_count_key)).to be_nil
end
end
context 'when cached count is under the threshold value' do
before do
Rails.cache.write(public_count_key, under_threshold)
end
it 'does not refresh cache' do
expect(Rails.cache).not_to receive(:write)
expect(subject.count).to eq(under_threshold)
end
end
context 'when cached count is over the threshold value' do
before do
Rails.cache.write(public_count_key, over_threshold)
end
it 'does not refresh cache' do
expect(Rails.cache).not_to receive(:write)
expect(subject.count).to eq(over_threshold)
end
end
end
it_behaves_like 'a counter caching service with threshold'
end
end
end
# frozen_string_literal: true
# The calling spec should use `:use_clean_rails_memory_store_caching`
# when including this shared example. E.g.:
#
# describe MyCountService, :use_clean_rails_memory_store_caching do
# it_behaves_like 'a counter caching service with threshold'
# end
RSpec.shared_examples 'a counter caching service with threshold' do
let(:cache_key) { subject.cache_key }
let(:under_threshold) { described_class::CACHED_COUNT_THRESHOLD - 1 }
let(:over_threshold) { described_class::CACHED_COUNT_THRESHOLD + 1 }
context 'when cache is empty' do
before do
Rails.cache.delete(cache_key)
end
it 'refreshes cache if value over threshold' do
allow(subject).to receive(:uncached_count).and_return(over_threshold)
expect(subject.count).to eq(over_threshold)
expect(Rails.cache.read(cache_key)).to eq(over_threshold)
end
it 'does not refresh cache if value under threshold' do
allow(subject).to receive(:uncached_count).and_return(under_threshold)
expect(subject.count).to eq(under_threshold)
expect(Rails.cache.read(cache_key)).to be_nil
end
end
context 'when cached count is under the threshold value' do
before do
Rails.cache.write(cache_key, under_threshold)
end
it 'does not refresh cache' do
expect(Rails.cache).not_to receive(:write)
expect(subject.count).to eq(under_threshold)
end
end
context 'when cached count is over the threshold value' do
before do
Rails.cache.write(cache_key, over_threshold)
end
it 'does not refresh cache' do
expect(Rails.cache).not_to receive(:write)
expect(subject.count).to eq(over_threshold)
end
end
end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment