Commit 5cf2def4 authored by GitLab Bot's avatar GitLab Bot

Automatic merge of gitlab-org/gitlab master

parents 8c1e88aa 7b1d0dfb
...@@ -164,7 +164,7 @@ gem 'wikicloth', '0.8.1' ...@@ -164,7 +164,7 @@ gem 'wikicloth', '0.8.1'
gem 'asciidoctor', '~> 2.0.10' gem 'asciidoctor', '~> 2.0.10'
gem 'asciidoctor-include-ext', '~> 0.3.1', require: false gem 'asciidoctor-include-ext', '~> 0.3.1', require: false
gem 'asciidoctor-plantuml', '~> 0.0.12' gem 'asciidoctor-plantuml', '~> 0.0.12'
gem 'asciidoctor-kroki', '~> 0.4.0', require: false gem 'asciidoctor-kroki', '~> 0.5.0', require: false
gem 'rouge', '~> 3.26.0' gem 'rouge', '~> 3.26.0'
gem 'truncato', '~> 0.7.11' gem 'truncato', '~> 0.7.11'
gem 'bootstrap_form', '~> 4.2.0' gem 'bootstrap_form', '~> 4.2.0'
......
...@@ -92,10 +92,10 @@ GEM ...@@ -92,10 +92,10 @@ GEM
faraday_middleware (~> 1.0) faraday_middleware (~> 1.0)
faraday_middleware-multi_json (~> 0.0) faraday_middleware-multi_json (~> 0.0)
oauth2 (~> 1.4) oauth2 (~> 1.4)
asciidoctor (2.0.12) asciidoctor (2.0.15)
asciidoctor-include-ext (0.3.1) asciidoctor-include-ext (0.3.1)
asciidoctor (>= 1.5.6, < 3.0.0) asciidoctor (>= 1.5.6, < 3.0.0)
asciidoctor-kroki (0.4.0) asciidoctor-kroki (0.5.0)
asciidoctor (~> 2.0) asciidoctor (~> 2.0)
asciidoctor-plantuml (0.0.12) asciidoctor-plantuml (0.0.12)
asciidoctor (>= 1.5.6, < 3.0.0) asciidoctor (>= 1.5.6, < 3.0.0)
...@@ -1409,7 +1409,7 @@ DEPENDENCIES ...@@ -1409,7 +1409,7 @@ DEPENDENCIES
asana (~> 0.10.3) asana (~> 0.10.3)
asciidoctor (~> 2.0.10) asciidoctor (~> 2.0.10)
asciidoctor-include-ext (~> 0.3.1) asciidoctor-include-ext (~> 0.3.1)
asciidoctor-kroki (~> 0.4.0) asciidoctor-kroki (~> 0.5.0)
asciidoctor-plantuml (~> 0.0.12) asciidoctor-plantuml (~> 0.0.12)
atlassian-jwt (~> 0.2.0) atlassian-jwt (~> 0.2.0)
attr_encrypted (~> 3.1.0) attr_encrypted (~> 3.1.0)
......
# frozen_string_literal: true # frozen_string_literal: true
module IntegrationsHelper module IntegrationsHelper
def integration_event_description(event) def integration_event_description(integration, event)
case event case integration
when "push", "push_events" when Integrations::Jira
s_("ProjectService|Trigger event for pushes to the repository.") jira_integration_event_description(event)
when "tag_push", "tag_push_events" when Integrations::Teamcity
s_("ProjectService|Trigger event for new tags pushed to the repository.") teamcity_integration_event_description(event)
when "note", "note_events" else
s_("ProjectService|Trigger event for new comments.") default_integration_event_description(event)
when "confidential_note", "confidential_note_events"
s_("ProjectService|Trigger event for new comments on confidential issues.")
when "issue", "issue_events"
s_("ProjectService|Trigger event when an issue is created, updated, or closed.")
when "confidential_issue", "confidential_issue_events"
s_("ProjectService|Trigger event when a confidential issue is created, updated, or closed.")
when "merge_request", "merge_request_events"
s_("ProjectService|Trigger event when a merge request is created, updated, or merged.")
when "pipeline", "pipeline_events"
s_("ProjectService|Trigger event when a pipeline status changes.")
when "wiki_page", "wiki_page_events"
s_("ProjectService|Trigger event when a wiki page is created or updated.")
when "commit", "commit_events"
s_("ProjectService|Trigger event when a commit is created or updated.")
when "deployment"
s_("ProjectService|Trigger event when a deployment starts or finishes.")
when "alert"
s_("ProjectService|Trigger event when a new, unique alert is recorded.")
end end
end end
...@@ -144,6 +126,53 @@ module IntegrationsHelper ...@@ -144,6 +126,53 @@ module IntegrationsHelper
private private
def jira_integration_event_description(event)
case event
when "merge_request", "merge_request_events"
s_("JiraService|Jira comments are created when an issue is referenced in a merge request.")
when "commit", "commit_events"
s_("JiraService|Jira comments are created when an issue is referenced in a commit.")
end
end
def teamcity_integration_event_description(event)
case event
when 'push', 'push_events'
s_('TeamcityIntegration|Trigger TeamCity CI after every push to the repository, except branch delete')
when 'merge_request', 'merge_request_events'
s_('TeamcityIntegration|Trigger TeamCity CI after a merge request has been created or updated')
end
end
def default_integration_event_description(event)
case event
when "push", "push_events"
s_("ProjectService|Trigger event for pushes to the repository.")
when "tag_push", "tag_push_events"
s_("ProjectService|Trigger event for new tags pushed to the repository.")
when "note", "note_events"
s_("ProjectService|Trigger event for new comments.")
when "confidential_note", "confidential_note_events"
s_("ProjectService|Trigger event for new comments on confidential issues.")
when "issue", "issue_events"
s_("ProjectService|Trigger event when an issue is created, updated, or closed.")
when "confidential_issue", "confidential_issue_events"
s_("ProjectService|Trigger event when a confidential issue is created, updated, or closed.")
when "merge_request", "merge_request_events"
s_("ProjectService|Trigger event when a merge request is created, updated, or merged.")
when "pipeline", "pipeline_events"
s_("ProjectService|Trigger event when a pipeline status changes.")
when "wiki_page", "wiki_page_events"
s_("ProjectService|Trigger event when a wiki page is created or updated.")
when "commit", "commit_events"
s_("ProjectService|Trigger event when a commit is created or updated.")
when "deployment"
s_("ProjectService|Trigger event when a deployment starts or finishes.")
when "alert"
s_("ProjectService|Trigger event when a new, unique alert is recorded.")
end
end
def trigger_events_for_integration(integration) def trigger_events_for_integration(integration)
ServiceEventSerializer.new(service: integration).represent(integration.configurable_events).to_json ServiceEventSerializer.new(service: integration).represent(integration.configurable_events).to_json
end end
......
...@@ -172,10 +172,6 @@ class Integration < ApplicationRecord ...@@ -172,10 +172,6 @@ class Integration < ApplicationRecord
'push' 'push'
end end
def self.event_description(event)
IntegrationsHelper.integration_event_description(event)
end
def self.find_or_create_templates def self.find_or_create_templates
create_nonexistent_templates create_nonexistent_templates
for_template for_template
......
...@@ -577,15 +577,6 @@ module Integrations ...@@ -577,15 +577,6 @@ module Integrations
data_fields.deployment_server! data_fields.deployment_server!
end end
end end
def self.event_description(event)
case event
when "merge_request", "merge_request_events"
s_("JiraService|Jira comments are created when an issue is referenced in a merge request.")
when "commit", "commit_events"
s_("JiraService|Jira comments are created when an issue is referenced in a commit.")
end
end
end end
end end
......
...@@ -29,15 +29,6 @@ module Integrations ...@@ -29,15 +29,6 @@ module Integrations
def supported_events def supported_events
%w(push merge_request) %w(push merge_request)
end end
def event_description(event)
case event
when 'push', 'push_events'
'TeamCity CI will be triggered after every push to the repository except branch delete'
when 'merge_request', 'merge_request_events'
'TeamCity CI will be triggered after a merge request has been created or updated'
end
end
end end
def compose_service_hook def compose_service_hook
......
...@@ -14,7 +14,7 @@ class ServiceEventEntity < Grape::Entity ...@@ -14,7 +14,7 @@ class ServiceEventEntity < Grape::Entity
end end
expose :description do |event| expose :description do |event|
IntegrationsHelper.integration_event_description(event) IntegrationsHelper.integration_event_description(integration, event)
end end
expose :field, if: -> (_, _) { event_field } do expose :field, if: -> (_, _) { event_field } do
......
# frozen_string_literal: true
module ServicePing
class BuildPayloadService
def execute
return {} unless allowed_to_report?
raw_payload
end
private
def allowed_to_report?
product_intelligence_enabled? && !User.single_user&.requires_usage_stats_consent?
end
def product_intelligence_enabled?
::Gitlab::CurrentSettings.usage_ping_enabled?
end
def raw_payload
@raw_payload ||= ::Gitlab::UsageData.data(force_refresh: true)
end
end
end
ServicePing::BuildPayloadService.prepend_mod_with('ServicePing::BuildPayloadService')
# frozen_string_literal: true
# Ensure that locked attributes can not be changed using a counter.
# TODO: this can be removed once `asciidoctor` gem is > 2.0.12
# and https://github.com/asciidoctor/asciidoctor/issues/3939 is merged
module Asciidoctor
module DocumentPatch
def counter(name, seed = nil)
return @parent_document.counter(name, seed) if @parent_document # rubocop: disable Gitlab/ModuleWithInstanceVariables
unless attribute_locked? name
super
end
end
end
end
class Asciidoctor::Document
prepend Asciidoctor::DocumentPatch
end
...@@ -237,3 +237,17 @@ For installations from source: ...@@ -237,3 +237,17 @@ For installations from source:
```shell ```shell
RAILS_ENV=production sudo -u git -H bundle exec rake gitlab:packages:migrate RAILS_ENV=production sudo -u git -H bundle exec rake gitlab:packages:migrate
``` ```
You can optionally track progress and verify that all packages migrated successfully.
From the [PostgreSQL console](https://docs.gitlab.com/omnibus/settings/database.html#connecting-to-the-bundled-postgresql-database)
(`sudo gitlab-psql -d gitlabhq_production` for Omnibus GitLab), verify that `objectstg` below (where
`file_store=2`) has the count of all packages:
```shell
gitlabhq_production=# SELECT count(*) AS total, sum(case when file_store = '1' then 1 else 0 end) AS filesystem, sum(case when file_store = '2' then 1 else 0 end) AS objectstg FROM packages_package_files;
total | filesystem | objectstg
------+------------+-----------
34 | 0 | 34
```
...@@ -93,6 +93,7 @@ are very appreciative of the work done by translators and proofreaders! ...@@ -93,6 +93,7 @@ are very appreciative of the work done by translators and proofreaders!
- Portuguese, Brazilian - Portuguese, Brazilian
- Paulo George Gomes Bezerra - [GitLab](https://gitlab.com/paulobezerra), [CrowdIn](https://crowdin.com/profile/paulogomes.rep) - Paulo George Gomes Bezerra - [GitLab](https://gitlab.com/paulobezerra), [CrowdIn](https://crowdin.com/profile/paulogomes.rep)
- André Gama - [GitLab](https://gitlab.com/andregamma), [CrowdIn](https://crowdin.com/profile/ToeOficial) - André Gama - [GitLab](https://gitlab.com/andregamma), [CrowdIn](https://crowdin.com/profile/ToeOficial)
- Eduardo Addad de Oliveira - [GitLab](https://gitlab.com/eduardoaddad), [CrowdIn](https://crowdin.com/profile/eduardoaddad)
- Romanian - Romanian
- Proofreaders needed. - Proofreaders needed.
- Russian - Russian
......
...@@ -134,6 +134,9 @@ If you are not using the GitHub integration, you can still perform an authorizat ...@@ -134,6 +134,9 @@ If you are not using the GitHub integration, you can still perform an authorizat
1. Hit the **List Your GitHub Repositories** button and wait while GitLab reads your repositories' information. 1. Hit the **List Your GitHub Repositories** button and wait while GitLab reads your repositories' information.
Once done, you'll be taken to the importer page to select the repositories to import. Once done, you'll be taken to the importer page to select the repositories to import.
To use a newer personal access token in imports after previously performing these steps, sign out of
your GitLab account and sign in again, or revoke the older personal access token in GitHub.
### Select which repositories to import ### Select which repositories to import
After you have authorized access to your GitHub repositories, you are redirected to the GitHub importer page and After you have authorized access to your GitHub repositories, you are redirected to the GitHub importer page and
......
# frozen_string_literal: true
module EE
module ServicePing
module BuildPayloadService
extend ::Gitlab::Utils::Override
STANDARD_CATEGORY = 'Standard'
SUBSCRIPTION_CATEGORY = 'Subscription'
OPTIONAL_CATEGORY = 'Optional'
OPERATIONAL_CATEGORY = 'Operational'
override :execute
def execute
return super unless ::License.current.present?
filtered_usage_data(super)
end
private
override :product_intelligence_enabled?
def product_intelligence_enabled?
::License.current&.usage_ping? || super
end
def filtered_usage_data(payload = raw_payload, parents = [])
payload.keep_if do |label, node|
if leaf?(node)
permitted_categories.include?(metric_category(label, parents))
else
filtered_usage_data(node, parents.dup << label)
end
end
end
def permitted_categories
@permitted_categories ||= collect_permitted_categories
end
def collect_permitted_categories
categories = [STANDARD_CATEGORY, SUBSCRIPTION_CATEGORY]
categories << OPTIONAL_CATEGORY if ::Gitlab::CurrentSettings.usage_ping_enabled?
categories << OPERATIONAL_CATEGORY if ::License.current.usage_ping?
categories
end
def metric_category(key, parent_keys)
key_path = parent_keys.dup.append(key).join('.')
metric_definitions[key_path]&.attributes&.fetch(:data_category, OPTIONAL_CATEGORY)
end
def metric_definitions
@metric_definitions ||= ::Gitlab::Usage::MetricDefinition.definitions
end
def leaf?(node)
!node.is_a?(Hash)
end
end
end
end
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe ServicePing::BuildPayloadService do
describe '#execute' do
subject(:service_ping_payload) { described_class.new.execute }
include_context 'stubbed service ping metrics definitions' do
let(:subscription_metrics) do
[
metric_attributes('license_md5', "Subscription")
]
end
end
before do
allow(User).to receive(:single_user).and_return(double(:user, requires_usage_stats_consent?: false))
end
context 'GitLab instance have a license' do
# License.current.present? == true
context 'Instance consented to submit optional product intelligence data' do
before do
# Gitlab::CurrentSettings.usage_ping_enabled? == true
stub_config_setting(usage_ping_enabled: true)
end
context 'Instance subscribes to free TAM service' do
before do
# License.current.usage_ping? == true
create_current_license(usage_ping_required_metrics_enabled: true)
end
it_behaves_like 'complete service ping payload'
end
context 'Instance does NOT subscribe to free TAM service' do
before do
# License.current.usage_ping? == false
create_current_license(usage_ping_required_metrics_enabled: false)
end
it_behaves_like 'service ping payload with all expected metrics' do
let(:expected_metrics) { standard_metrics + subscription_metrics + optional_metrics }
end
it_behaves_like 'service ping payload without restricted metrics' do
let(:restricted_metrics) { operational_metrics }
end
end
end
context 'Instance does NOT consented to submit optional product intelligence data' do
before do
# Gitlab::CurrentSettings.usage_ping_enabled? == false
stub_config_setting(usage_ping_enabled: false)
end
context 'Instance subscribes to free TAM service' do
before do
# License.current.usage_ping? == true
create_current_license(usage_ping_required_metrics_enabled: true)
end
it_behaves_like 'service ping payload with all expected metrics' do
let(:expected_metrics) { standard_metrics + subscription_metrics + operational_metrics }
end
it_behaves_like 'service ping payload without restricted metrics' do
let(:restricted_metrics) { optional_metrics }
end
end
context 'Instance does NOT subscribe to free TAM service' do
before do
# License.current.usage_ping? == false
create_current_license(usage_ping_required_metrics_enabled: false)
end
it 'returns empty service ping payload' do
expect(service_ping_payload).to eq({})
end
end
end
end
end
end
...@@ -21,6 +21,14 @@ RSpec.configure do |config| ...@@ -21,6 +21,14 @@ RSpec.configure do |config|
TestLicense.init TestLicense.init
end end
config.before(:context, :without_license) do
License.destroy_all # rubocop: disable Cop/DestroyAll
end
config.after(:context, :without_license) do
TestLicense.init
end
config.around(:each, :geo_tracking_db) do |example| config.around(:each, :geo_tracking_db) do |example|
example.run if Gitlab::Geo.geo_database_configured? example.run if Gitlab::Geo.geo_database_configured?
end end
......
...@@ -23,14 +23,14 @@ module API ...@@ -23,14 +23,14 @@ module API
INTEGRATIONS = integrations.freeze INTEGRATIONS = integrations.freeze
integration_classes.each do |service| integration_classes.each do |integration|
event_names = service.try(:event_names) || next event_names = integration.try(:event_names) || next
event_names.each do |event_name| event_names.each do |event_name|
INTEGRATIONS[service.to_param.tr("_", "-")] << { INTEGRATIONS[integration.to_param.tr("_", "-")] << {
required: false, required: false,
name: event_name.to_sym, name: event_name.to_sym,
type: String, type: String,
desc: service.event_description(event_name) desc: IntegrationsHelper.integration_event_description(integration, event_name)
} }
end end
end end
......
...@@ -44,6 +44,52 @@ module Gitlab ...@@ -44,6 +44,52 @@ module Gitlab
# TODO: Switch to individual job interval (prereq: https://gitlab.com/gitlab-org/gitlab/-/issues/328801) # TODO: Switch to individual job interval (prereq: https://gitlab.com/gitlab-org/gitlab/-/issues/328801)
duration.to_f / batched_migration.interval duration.to_f / batched_migration.interval
end end
def split_and_retry!
with_lock do
raise 'Only failed jobs can be split' unless failed?
new_batch_size = batch_size / 2
raise 'Job cannot be split further' if new_batch_size < 1
batching_strategy = batched_migration.batch_class.new
next_batch_bounds = batching_strategy.next_batch(
batched_migration.table_name,
batched_migration.column_name,
batch_min_value: min_value,
batch_size: new_batch_size
)
midpoint = next_batch_bounds.last
# We don't want the midpoint to go over the existing max_value because
# those IDs would already be in the next batched migration job.
# This could happen when a lot of records in the current batch are deleted.
#
# In this case, we just lower the batch size so that future calls to this
# method could eventually split the job if it continues to fail.
if midpoint >= max_value
update!(batch_size: new_batch_size, status: :pending)
else
old_max_value = max_value
update!(
batch_size: new_batch_size,
max_value: midpoint,
attempts: 0,
status: :pending,
started_at: nil,
finished_at: nil,
metrics: {}
)
new_record = dup
new_record.min_value = midpoint.next
new_record.max_value = old_max_value
new_record.save!
end
end
end
end end
end end
end end
......
...@@ -16,14 +16,20 @@ module Gitlab ...@@ -16,14 +16,20 @@ module Gitlab
# end # end
class << self class << self
def start(&block) def start(&block)
return @metric_start&.call unless block_given?
@metric_start = block @metric_start = block
end end
def finish(&block) def finish(&block)
return @metric_finish&.call unless block_given?
@metric_finish = block @metric_finish = block
end end
def relation(&block) def relation(&block)
return @metric_relation&.call unless block_given?
@metric_relation = block @metric_relation = block
end end
...@@ -32,15 +38,21 @@ module Gitlab ...@@ -32,15 +38,21 @@ module Gitlab
@column = column @column = column
end end
attr_reader :metric_operation, :metric_relation, :metric_start, :metric_finish, :column def cache_start_and_finish_as(cache_key)
@cache_key = cache_key
end
attr_reader :metric_operation, :metric_relation, :metric_start, :metric_finish, :column, :cache_key
end end
def value def value
start, finish = get_or_cache_batch_ids
method(self.class.metric_operation) method(self.class.metric_operation)
.call(relation, .call(relation,
self.class.column, self.class.column,
start: self.class.metric_start&.call, start: start,
finish: self.class.metric_finish&.call) finish: finish)
end end
def to_sql def to_sql
...@@ -73,6 +85,22 @@ module Gitlab ...@@ -73,6 +85,22 @@ module Gitlab
raise "Unknown time frame: #{time_frame} for DatabaseMetric" raise "Unknown time frame: #{time_frame} for DatabaseMetric"
end end
end end
def get_or_cache_batch_ids
return [self.class.start, self.class.finish] unless self.class.cache_key.present?
key_name = "metric_instrumentation/#{self.class.cache_key}"
start = Gitlab::Cache.fetch_once("#{key_name}_minimum_id", expires_in: 1.day) do
self.class.start
end
finish = Gitlab::Cache.fetch_once("#{key_name}_maximum_id", expires_in: 1.day) do
self.class.finish
end
[start, finish]
end
end end
end end
end end
......
...@@ -31905,6 +31905,12 @@ msgstr "" ...@@ -31905,6 +31905,12 @@ msgstr ""
msgid "Team domain" msgid "Team domain"
msgstr "" msgstr ""
msgid "TeamcityIntegration|Trigger TeamCity CI after a merge request has been created or updated"
msgstr ""
msgid "TeamcityIntegration|Trigger TeamCity CI after every push to the repository, except branch delete"
msgstr ""
msgid "Telephone number" msgid "Telephone number"
msgstr "" msgstr ""
......
...@@ -3,6 +3,22 @@ ...@@ -3,6 +3,22 @@
require 'spec_helper' require 'spec_helper'
RSpec.describe IntegrationsHelper do RSpec.describe IntegrationsHelper do
describe '#integration_event_description' do
subject(:description) { helper.integration_event_description(integration, 'merge_request_events') }
context 'when integration is Jira' do
let(:integration) { Integrations::Jira.new }
it { is_expected.to include('Jira') }
end
context 'when integration is Team City' do
let(:integration) { Integrations::Teamcity.new }
it { is_expected.to include('TeamCity') }
end
end
describe '#integration_form_data' do describe '#integration_form_data' do
let(:fields) do let(:fields) do
[ [
......
...@@ -124,4 +124,73 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedJob, type: :model d ...@@ -124,4 +124,73 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedJob, type: :model d
end end
end end
end end
describe '#split_and_retry!' do
let!(:job) { create(:batched_background_migration_job, batch_size: 10, min_value: 6, max_value: 15, status: :failed) }
it 'splits the job into two and marks them as pending' do
allow_next_instance_of(Gitlab::BackgroundMigration::BatchingStrategies::PrimaryKeyBatchingStrategy) do |batch_class|
allow(batch_class).to receive(:next_batch).with(anything, anything, batch_min_value: 6, batch_size: 5).and_return([6, 10])
end
expect { job.split_and_retry! }.to change { described_class.count }.by(1)
expect(job).to have_attributes(
min_value: 6,
max_value: 10,
batch_size: 5,
status: 'pending',
attempts: 0,
started_at: nil,
finished_at: nil,
metrics: {}
)
new_job = described_class.last
expect(new_job).to have_attributes(
batched_background_migration_id: job.batched_background_migration_id,
min_value: 11,
max_value: 15,
batch_size: 5,
status: 'pending',
attempts: 0,
started_at: nil,
finished_at: nil,
metrics: {}
)
expect(new_job.created_at).not_to eq(job.created_at)
end
context 'when job is not failed' do
let!(:job) { create(:batched_background_migration_job, status: :succeeded) }
it 'raises an exception' do
expect { job.split_and_retry! }.to raise_error 'Only failed jobs can be split'
end
end
context 'when batch size is already 1' do
let!(:job) { create(:batched_background_migration_job, batch_size: 1, status: :failed) }
it 'raises an exception' do
expect { job.split_and_retry! }.to raise_error 'Job cannot be split further'
end
end
context 'when computed midpoint is larger than the max value of the batch' do
before do
allow_next_instance_of(Gitlab::BackgroundMigration::BatchingStrategies::PrimaryKeyBatchingStrategy) do |batch_class|
allow(batch_class).to receive(:next_batch).with(anything, anything, batch_min_value: 6, batch_size: 5).and_return([6, 16])
end
end
it 'lowers the batch size and marks the job as pending' do
expect { job.split_and_retry! }.not_to change { described_class.count }
expect(job.batch_size).to eq(5)
expect(job.status).to eq('pending')
end
end
end
end end
...@@ -6,7 +6,7 @@ RSpec.describe Gitlab::Kroki do ...@@ -6,7 +6,7 @@ RSpec.describe Gitlab::Kroki do
describe '.formats' do describe '.formats' do
def default_formats def default_formats
%w[bytefield c4plantuml ditaa erd graphviz nomnoml plantuml svgbob umlet vega vegalite wavedrom].freeze %w[bytefield c4plantuml ditaa erd graphviz nomnoml pikchr plantuml svgbob umlet vega vegalite wavedrom].freeze
end end
subject { described_class.formats(Gitlab::CurrentSettings) } subject { described_class.formats(Gitlab::CurrentSettings) }
......
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Usage::Metrics::Instrumentations::DatabaseMetric do
subject do
described_class.tap do |m|
m.relation { Issue }
m.operation :count
m.start { m.relation.minimum(:id) }
m.finish { m.relation.maximum(:id) }
end.new(time_frame: 'all')
end
describe '#value' do
let_it_be(:issue_1) { create(:issue) }
let_it_be(:issue_2) { create(:issue) }
let_it_be(:issue_3) { create(:issue) }
let_it_be(:issues) { Issue.all }
before do
allow(ActiveRecord::Base.connection).to receive(:transaction_open?).and_return(false)
end
it 'calculates a correct result' do
expect(subject.value).to eq(3)
end
it 'does not cache the result of start and finish', :request_store, :use_clean_rails_redis_caching do
expect(Gitlab::Cache).not_to receive(:fetch_once)
expect(subject).to receive(:count).with(any_args, hash_including(start: issues.min_by(&:id).id, finish: issues.max_by(&:id).id)).and_call_original
subject.value
expect(Rails.cache.read('metric_instrumentation/special_issue_count_minimum_id')).to eq(nil)
expect(Rails.cache.read('metric_instrumentation/special_issue_count_maximum_id')).to eq(nil)
end
context 'with start and finish not called' do
subject do
described_class.tap do |m|
m.relation { Issue }
m.operation :count
end.new(time_frame: 'all')
end
it 'calculates a correct result' do
expect(subject.value).to eq(3)
end
end
context 'with cache_start_and_finish_as called' do
subject do
described_class.tap do |m|
m.relation { Issue }
m.operation :count
m.start { m.relation.minimum(:id) }
m.finish { m.relation.maximum(:id) }
m.cache_start_and_finish_as :special_issue_count
end.new(time_frame: 'all')
end
it 'caches using the key name passed', :request_store, :use_clean_rails_redis_caching do
expect(Gitlab::Cache).to receive(:fetch_once).with('metric_instrumentation/special_issue_count_minimum_id', any_args).and_call_original
expect(Gitlab::Cache).to receive(:fetch_once).with('metric_instrumentation/special_issue_count_maximum_id', any_args).and_call_original
expect(subject).to receive(:count).with(any_args, hash_including(start: issues.min_by(&:id).id, finish: issues.max_by(&:id).id)).and_call_original
subject.value
expect(Rails.cache.read('metric_instrumentation/special_issue_count_minimum_id')).to eq(issues.min_by(&:id).id)
expect(Rails.cache.read('metric_instrumentation/special_issue_count_maximum_id')).to eq(issues.max_by(&:id).id)
end
end
end
end
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe ServicePing::BuildPayloadService do
describe '#execute', :without_license do
subject(:service_ping_payload) { described_class.new.execute }
include_context 'stubbed service ping metrics definitions' do
let(:subscription_metrics) do
[
metric_attributes('active_user_count', "Subscription")
]
end
end
context 'when usage_ping_enabled setting is false' do
before do
# Gitlab::CurrentSettings.usage_ping_enabled? == false
stub_config_setting(usage_ping_enabled: false)
end
it 'returns empty service ping payload' do
expect(service_ping_payload).to eq({})
end
end
context 'when usage_ping_enabled setting is true' do
before do
# Gitlab::CurrentSettings.usage_ping_enabled? == true
stub_config_setting(usage_ping_enabled: true)
end
it_behaves_like 'complete service ping payload'
context 'with require stats consent enabled' do
before do
allow(User).to receive(:single_user).and_return(double(:user, requires_usage_stats_consent?: true))
end
it 'returns empty service ping payload' do
expect(service_ping_payload).to eq({})
end
end
end
end
end
# frozen_string_literal: true
RSpec::Matchers.define :have_usage_metric do |key_path|
match do |payload|
payload = payload.deep_stringify_keys
key_path.split('.').each do |part|
break false unless payload&.has_key?(part)
payload = payload[part]
end
end
failure_message do
"Payload does not contain metric with key path: '#{key_path}'"
end
failure_message_when_negated do
"Payload contains restricted metric with key path: '#{key_path}'"
end
end
# frozen_string_literal: true
RSpec.shared_context 'stubbed service ping metrics definitions' do
include UsageDataHelpers
let(:metrics_definitions) { standard_metrics + subscription_metrics + operational_metrics + optional_metrics }
let(:standard_metrics) do
[
metric_attributes('uuid', "Standard")
]
end
let(:operational_metrics) do
[
metric_attributes('counts.merge_requests', "Operational"),
metric_attributes('counts.todos', "Operational")
]
end
let(:optional_metrics) do
[
metric_attributes('counts.boards', "Optional"),
metric_attributes('gitaly.filesystems', '').except('data_category')
]
end
before do
stub_usage_data_connections
stub_object_store_settings
allow(Gitlab::Usage::MetricDefinition).to(
receive(:definitions)
.and_return(metrics_definitions.to_h { |definition| [definition['key_path'], Gitlab::Usage::MetricDefinition.new('', definition.symbolize_keys)] })
)
end
def metric_attributes(key_path, category)
{
'key_path' => key_path,
'data_category' => category
}
end
end
# frozen_string_literal: true
RSpec.shared_examples 'complete service ping payload' do
it_behaves_like 'service ping payload with all expected metrics' do
let(:expected_metrics) do
standard_metrics + subscription_metrics + operational_metrics + optional_metrics
end
end
end
# frozen_string_literal: true
RSpec.shared_examples 'service ping payload with all expected metrics' do
specify do
aggregate_failures do
expected_metrics.each do |metric|
is_expected.to have_usage_metric metric['key_path']
end
end
end
end
# frozen_string_literal: true
RSpec.shared_examples 'service ping payload without restricted metrics' do
specify do
aggregate_failures do
restricted_metrics.each do |metric|
is_expected.not_to have_usage_metric metric['key_path']
end
end
end
end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment