Commit c9687bdf authored by GitLab Bot's avatar GitLab Bot

Add latest changes from gitlab-org/gitlab@master

parent 3f3e4bcc
...@@ -122,9 +122,14 @@ export default { ...@@ -122,9 +122,14 @@ export default {
this.$store.subscribeAction({ this.$store.subscribeAction({
after: this.handleVuexActionDispatch, after: this.handleVuexActionDispatch,
}); });
document.addEventListener('click', this.handleDocumentClick);
},
beforeDestroy() {
document.removeEventListener('click', this.handleDocumentClick);
}, },
methods: { methods: {
...mapActions(['setInitialState']), ...mapActions(['setInitialState', 'toggleDropdownContents']),
/** /**
* This method differentiates between * This method differentiates between
* dispatched actions and calls necessary method. * dispatched actions and calls necessary method.
...@@ -138,6 +143,22 @@ export default { ...@@ -138,6 +143,22 @@ export default {
this.handleDropdownClose(state.labels.filter(label => label.touched)); this.handleDropdownClose(state.labels.filter(label => label.touched));
} }
}, },
/**
* This method listens for document-wide click event
* and toggle dropdown if user clicks anywhere outside
* the dropdown while dropdown is visible.
*/
handleDocumentClick({ target }) {
if (
this.showDropdownButton &&
this.showDropdownContents &&
!target?.classList.contains('js-sidebar-dropdown-toggle') &&
!this.$refs.dropdownButtonCollapsed?.$el.contains(target) &&
!this.$refs.dropdownContents?.$el.contains(target)
) {
this.toggleDropdownContents();
}
},
handleDropdownClose(labels) { handleDropdownClose(labels) {
// Only emit label updates if there are any labels to update // Only emit label updates if there are any labels to update
// on UI. // on UI.
...@@ -156,6 +177,7 @@ export default { ...@@ -156,6 +177,7 @@ export default {
<div v-if="!dropdownOnly"> <div v-if="!dropdownOnly">
<dropdown-value-collapsed <dropdown-value-collapsed
v-if="allowLabelCreate" v-if="allowLabelCreate"
ref="dropdownButtonCollapsed"
:labels="selectedLabels" :labels="selectedLabels"
@onValueClick="handleCollapsedValueClick" @onValueClick="handleCollapsedValueClick"
/> />
...@@ -167,7 +189,7 @@ export default { ...@@ -167,7 +189,7 @@ export default {
<slot></slot> <slot></slot>
</dropdown-value> </dropdown-value>
<dropdown-button v-show="showDropdownButton" /> <dropdown-button v-show="showDropdownButton" />
<dropdown-contents v-if="showDropdownButton && showDropdownContents" /> <dropdown-contents v-if="showDropdownButton && showDropdownContents" ref="dropdownContents" />
</div> </div>
</div> </div>
</template> </template>
...@@ -413,6 +413,7 @@ img.emoji { ...@@ -413,6 +413,7 @@ img.emoji {
.prepend-left-20 { margin-left: 20px; } .prepend-left-20 { margin-left: 20px; }
.prepend-left-32 { margin-left: 32px; } .prepend-left-32 { margin-left: 32px; }
.prepend-left-64 { margin-left: 64px; } .prepend-left-64 { margin-left: 64px; }
.append-right-2 { margin-right: 2px; }
.append-right-4 { margin-right: 4px; } .append-right-4 { margin-right: 4px; }
.append-right-5 { margin-right: 5px; } .append-right-5 { margin-right: 5px; }
.append-right-8 { margin-right: 8px; } .append-right-8 { margin-right: 8px; }
...@@ -424,6 +425,7 @@ img.emoji { ...@@ -424,6 +425,7 @@ img.emoji {
.append-right-48 { margin-right: 48px; } .append-right-48 { margin-right: 48px; }
.prepend-right-32 { margin-right: 32px; } .prepend-right-32 { margin-right: 32px; }
.append-bottom-0 { margin-bottom: 0; } .append-bottom-0 { margin-bottom: 0; }
.append-bottom-2 { margin-bottom: 2px; }
.append-bottom-4 { margin-bottom: $gl-padding-4; } .append-bottom-4 { margin-bottom: $gl-padding-4; }
.append-bottom-5 { margin-bottom: 5px; } .append-bottom-5 { margin-bottom: 5px; }
.append-bottom-8 { margin-bottom: $grid-size; } .append-bottom-8 { margin-bottom: $grid-size; }
......
...@@ -33,7 +33,7 @@ class Dashboard::ProjectsController < Dashboard::ApplicationController ...@@ -33,7 +33,7 @@ class Dashboard::ProjectsController < Dashboard::ApplicationController
# rubocop: disable CodeReuse/ActiveRecord # rubocop: disable CodeReuse/ActiveRecord
def starred def starred
@projects = load_projects(params.merge(starred: true)) @projects = load_projects(params.merge(starred: true))
.includes(:forked_from_project, :tags).page(params[:page]) .includes(:forked_from_project, :tags)
@groups = [] @groups = []
...@@ -51,7 +51,7 @@ class Dashboard::ProjectsController < Dashboard::ApplicationController ...@@ -51,7 +51,7 @@ class Dashboard::ProjectsController < Dashboard::ApplicationController
private private
def projects def projects
@projects ||= load_projects(params.merge(non_public: true)).page(params[:page]) @projects ||= load_projects(params.merge(non_public: true))
end end
def render_projects def render_projects
...@@ -73,6 +73,7 @@ class Dashboard::ProjectsController < Dashboard::ApplicationController ...@@ -73,6 +73,7 @@ class Dashboard::ProjectsController < Dashboard::ApplicationController
.execute .execute
.includes(:route, :creator, :group, namespace: [:route, :owner]) .includes(:route, :creator, :group, namespace: [:route, :owner])
.preload(:project_feature) .preload(:project_feature)
.page(finder_params[:page])
prepare_projects_for_rendering(projects) prepare_projects_for_rendering(projects)
end end
......
...@@ -67,6 +67,7 @@ class Issue < ApplicationRecord ...@@ -67,6 +67,7 @@ class Issue < ApplicationRecord
scope :order_due_date_desc, -> { reorder(::Gitlab::Database.nulls_last_order('due_date', 'DESC')) } scope :order_due_date_desc, -> { reorder(::Gitlab::Database.nulls_last_order('due_date', 'DESC')) }
scope :order_closest_future_date, -> { reorder(Arel.sql('CASE WHEN issues.due_date >= CURRENT_DATE THEN 0 ELSE 1 END ASC, ABS(CURRENT_DATE - issues.due_date) ASC')) } scope :order_closest_future_date, -> { reorder(Arel.sql('CASE WHEN issues.due_date >= CURRENT_DATE THEN 0 ELSE 1 END ASC, ABS(CURRENT_DATE - issues.due_date) ASC')) }
scope :order_relative_position_asc, -> { reorder(::Gitlab::Database.nulls_last_order('relative_position', 'ASC')) } scope :order_relative_position_asc, -> { reorder(::Gitlab::Database.nulls_last_order('relative_position', 'ASC')) }
scope :order_closed_date_desc, -> { reorder(closed_at: :desc) }
scope :preload_associated_models, -> { preload(:labels, project: :namespace) } scope :preload_associated_models, -> { preload(:labels, project: :namespace) }
scope :with_api_entity_associations, -> { preload(:timelogs, :assignees, :author, :notes, :labels, project: [:route, { namespace: :route }] ) } scope :with_api_entity_associations, -> { preload(:timelogs, :assignees, :author, :notes, :labels, project: [:route, { namespace: :route }] ) }
......
...@@ -32,9 +32,12 @@ class Service < ApplicationRecord ...@@ -32,9 +32,12 @@ class Service < ApplicationRecord
belongs_to :project, inverse_of: :services belongs_to :project, inverse_of: :services
has_one :service_hook has_one :service_hook
validates :project_id, presence: true, unless: -> { template? } validates :project_id, presence: true, unless: -> { template? || instance? }
validates :project_id, absence: true, if: -> { instance? }
validates :type, presence: true validates :type, presence: true
validates :template, uniqueness: { scope: :type }, if: -> { template? } validates :template, uniqueness: { scope: :type }, if: -> { template? }
validates :instance, uniqueness: { scope: :type }, if: -> { instance? }
validate :validate_is_instance_or_template
scope :visible, -> { where.not(type: 'GitlabIssueTrackerService') } scope :visible, -> { where.not(type: 'GitlabIssueTrackerService') }
scope :issue_trackers, -> { where(category: 'issue_tracker') } scope :issue_trackers, -> { where(category: 'issue_tracker') }
...@@ -326,6 +329,10 @@ class Service < ApplicationRecord ...@@ -326,6 +329,10 @@ class Service < ApplicationRecord
private private
def validate_is_instance_or_template
errors.add(:template, 'The service should be a service template or instance-level integration') if template? && instance?
end
def cache_project_has_external_issue_tracker def cache_project_has_external_issue_tracker
if project && !project.destroyed? if project && !project.destroyed?
project.cache_has_external_issue_tracker project.cache_has_external_issue_tracker
......
...@@ -10,6 +10,8 @@ module Boards ...@@ -10,6 +10,8 @@ module Boards
end end
def execute def execute
return fetch_issues.order_closed_date_desc if list&.closed?
fetch_issues.order_by_position_and_priority(with_cte: can_attempt_search_optimization?) fetch_issues.order_by_position_and_priority(with_cte: can_attempt_search_optimization?)
end end
......
...@@ -138,7 +138,9 @@ module Metrics ...@@ -138,7 +138,9 @@ module Metrics
end end
# Identifies the name of the datasource for a dashboard # Identifies the name of the datasource for a dashboard
# based on the panelId query parameter found in the url # based on the panelId query parameter found in the url.
#
# If no panel is specified, defaults to the first valid panel.
class DatasourceNameParser class DatasourceNameParser
def initialize(grafana_url, grafana_dashboard) def initialize(grafana_url, grafana_dashboard)
@grafana_url, @grafana_dashboard = grafana_url, grafana_dashboard @grafana_url, @grafana_dashboard = grafana_url, grafana_dashboard
...@@ -146,15 +148,29 @@ module Metrics ...@@ -146,15 +148,29 @@ module Metrics
def parse def parse
@grafana_dashboard[:dashboard][:panels] @grafana_dashboard[:dashboard][:panels]
.find { |panel| panel[:id].to_s == query_params[:panelId] } .find { |panel| panel_id ? matching_panel?(panel) : valid_panel?(panel) }
.try(:[], :datasource) .try(:[], :datasource)
end end
private private
def panel_id
query_params[:panelId]
end
def query_params def query_params
Gitlab::Metrics::Dashboard::Url.parse_query(@grafana_url) Gitlab::Metrics::Dashboard::Url.parse_query(@grafana_url)
end end
def matching_panel?(panel)
panel[:id].to_s == panel_id
end
def valid_panel?(panel)
::Grafana::Validator
.new(@grafana_dashboard, nil, panel, query_params)
.valid?
end
end end
end end
end end
---
title: Sort closed issues on issue boards using time of closing
merge_request: 23442
author: briankabiro
type: changed
---
title: Add instance column to services table
merge_request: 25714
author:
type: other
---
title: Put System Metrics chart group first in default dashboard
merge_request: 26355
author:
type: other
---
title: Optimize Project counters with respository enabled counter
merge_request: 26698
author:
type: performance
---
title: Fix missing RSS feed events
merge_request: 19524
author:
type: fixed
---
title: Default to first valid panel in unspecified Grafana embeds
merge_request: 21932
author:
type: changed
---
title: Update cluster-applications image to v0.11 with a runner bugfix, updated cert-manager,
and vault as a new app
merge_request: 26842
author:
type: changed
dashboard: 'Environment metrics' dashboard: 'Environment metrics'
priority: 1 priority: 1
panel_groups: panel_groups:
- group: System metrics (Kubernetes)
priority: 15
panels:
- title: "Memory Usage (Total)"
type: "area-chart"
y_label: "Total Memory Used (GB)"
weight: 4
metrics:
- id: system_metrics_kubernetes_container_memory_total
query_range: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) /1024/1024/1024'
label: Total (GB)
unit: GB
- title: "Core Usage (Total)"
type: "area-chart"
y_label: "Total Cores"
weight: 3
metrics:
- id: system_metrics_kubernetes_container_cores_total
query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job)'
label: Total (cores)
unit: "cores"
- title: "Memory Usage (Pod average)"
type: "line-chart"
y_label: "Memory Used per Pod (MB)"
weight: 2
metrics:
- id: system_metrics_kubernetes_container_memory_average
query_range: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="%{kube_namespace}"}) without (job)) /1024/1024'
label: Pod average (MB)
unit: MB
- title: "Canary: Memory Usage (Pod Average)"
type: "line-chart"
y_label: "Memory Used per Pod (MB)"
weight: 2
metrics:
- id: system_metrics_kubernetes_container_memory_average_canary
query_range: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}) without (job)) /1024/1024'
label: Pod average (MB)
unit: MB
track: canary
- title: "Core Usage (Pod Average)"
type: "line-chart"
y_label: "Cores per Pod"
weight: 1
metrics:
- id: system_metrics_kubernetes_container_core_usage
query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="%{kube_namespace}"}[15m])) by (pod_name))'
label: Pod average (cores)
unit: "cores"
- title: "Canary: Core Usage (Pod Average)"
type: "line-chart"
y_label: "Cores per Pod"
weight: 1
metrics:
- id: system_metrics_kubernetes_container_core_usage_canary
query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}[15m])) by (pod_name))'
label: Pod average (cores)
unit: "cores"
track: canary
- title: "Knative function invocations"
type: "area-chart"
y_label: "Invocations"
weight: 1
metrics:
- id: system_metrics_knative_function_invocation_count
query_range: 'sum(ceil(rate(istio_requests_total{destination_service_namespace="%{kube_namespace}", destination_service=~"%{function_name}.*"}[1m])*60))'
label: invocations / minute
unit: requests
# NGINX Ingress metrics for pre-0.16.0 versions # NGINX Ingress metrics for pre-0.16.0 versions
- group: Response metrics (NGINX Ingress VTS) - group: Response metrics (NGINX Ingress VTS)
priority: 10 priority: 10
...@@ -150,79 +218,3 @@ panel_groups: ...@@ -150,79 +218,3 @@ panel_groups:
query_range: 'sum(rate(nginx_server_requests{code="5xx", %{environment_filter}}[2m]))' query_range: 'sum(rate(nginx_server_requests{code="5xx", %{environment_filter}}[2m]))'
label: HTTP Errors label: HTTP Errors
unit: "errors / sec" unit: "errors / sec"
- group: System metrics (Kubernetes)
priority: 5
panels:
- title: "Memory Usage (Total)"
type: "area-chart"
y_label: "Total Memory Used (GB)"
y_axis:
format: "gibibytes"
weight: 4
metrics:
- id: system_metrics_kubernetes_container_memory_total
query_range: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) /1024/1024/1024'
label: Total (GB)
unit: GB
- title: "Core Usage (Total)"
type: "area-chart"
y_label: "Total Cores"
weight: 3
metrics:
- id: system_metrics_kubernetes_container_cores_total
query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job)'
label: Total (cores)
unit: "cores"
- title: "Memory Usage (Pod average)"
type: "line-chart"
y_label: "Memory Used per Pod (MB)"
y_axis:
format: "mebibytes"
weight: 2
metrics:
- id: system_metrics_kubernetes_container_memory_average
query_range: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="%{kube_namespace}"}) without (job)) /1024/1024'
label: Pod average (MB)
unit: MB
- title: "Canary: Memory Usage (Pod Average)"
type: "line-chart"
y_label: "Memory Used per Pod (MB)"
y_axis:
format: "mebibytes"
weight: 2
metrics:
- id: system_metrics_kubernetes_container_memory_average_canary
query_range: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}) without (job)) /1024/1024'
label: Pod average (MB)
unit: MB
track: canary
- title: "Core Usage (Pod Average)"
type: "line-chart"
y_label: "Cores per Pod"
weight: 1
metrics:
- id: system_metrics_kubernetes_container_core_usage
query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="%{kube_namespace}"}[15m])) by (pod_name))'
label: Pod average (cores)
unit: "cores"
- title: "Canary: Core Usage (Pod Average)"
type: "line-chart"
y_label: "Cores per Pod"
weight: 1
metrics:
- id: system_metrics_kubernetes_container_core_usage_canary
query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}[15m])) by (pod_name))'
label: Pod average (cores)
unit: "cores"
track: canary
- title: "Knative function invocations"
type: "area-chart"
y_label: "Invocations"
y_axis:
precision: 0
weight: 1
metrics:
- id: system_metrics_knative_function_invocation_count
query_range: 'sum(ceil(rate(istio_requests_total{destination_service_namespace="%{kube_namespace}", destination_service=~"%{function_name}.*"}[1m])*60))'
label: invocations / minute
unit: requests
# frozen_string_literal: true
class AddIndexOnProjectIdAndRepositoryAccessLevelToProjectFeatures < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
INDEX_NAME = 'index_project_features_on_project_id_ral_20'
disable_ddl_transaction!
def up
add_concurrent_index :project_features, :project_id, where: 'repository_access_level = 20', name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :project_features, INDEX_NAME
end
end
# frozen_string_literal: true
class AddInstanceToServices < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
def up
add_column_with_default(:services, :instance, :boolean, default: false)
end
def down
remove_column(:services, :instance)
end
end
# frozen_string_literal: true
class AddIndexToServiceUniqueInstancePerType < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
def up
add_concurrent_index(:services, [:type, :instance], unique: true, where: 'instance IS TRUE')
end
def down
remove_concurrent_index(:services, [:type, :instance])
end
end
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
# #
# It's strongly recommended that you check this file into your version control system. # It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 2020_03_09_195710) do ActiveRecord::Schema.define(version: 2020_03_10_135823) do
# These are extensions that must be enabled in order to support this database # These are extensions that must be enabled in order to support this database
enable_extension "pg_trgm" enable_extension "pg_trgm"
...@@ -3265,6 +3265,7 @@ ActiveRecord::Schema.define(version: 2020_03_09_195710) do ...@@ -3265,6 +3265,7 @@ ActiveRecord::Schema.define(version: 2020_03_09_195710) do
t.integer "forking_access_level" t.integer "forking_access_level"
t.index ["project_id"], name: "index_project_features_on_project_id", unique: true t.index ["project_id"], name: "index_project_features_on_project_id", unique: true
t.index ["project_id"], name: "index_project_features_on_project_id_bal_20", where: "(builds_access_level = 20)" t.index ["project_id"], name: "index_project_features_on_project_id_bal_20", where: "(builds_access_level = 20)"
t.index ["project_id"], name: "index_project_features_on_project_id_ral_20", where: "(repository_access_level = 20)"
end end
create_table "project_group_links", id: :serial, force: :cascade do |t| create_table "project_group_links", id: :serial, force: :cascade do |t|
...@@ -3939,8 +3940,10 @@ ActiveRecord::Schema.define(version: 2020_03_09_195710) do ...@@ -3939,8 +3940,10 @@ ActiveRecord::Schema.define(version: 2020_03_09_195710) do
t.string "description", limit: 500 t.string "description", limit: 500
t.boolean "comment_on_event_enabled", default: true, null: false t.boolean "comment_on_event_enabled", default: true, null: false
t.boolean "template", default: false t.boolean "template", default: false
t.boolean "instance", default: false, null: false
t.index ["project_id"], name: "index_services_on_project_id" t.index ["project_id"], name: "index_services_on_project_id"
t.index ["template"], name: "index_services_on_template" t.index ["template"], name: "index_services_on_template"
t.index ["type", "instance"], name: "index_services_on_type_and_instance", unique: true, where: "(instance IS TRUE)"
t.index ["type", "template"], name: "index_services_on_type_and_template", unique: true, where: "(template IS TRUE)" t.index ["type", "template"], name: "index_services_on_type_and_template", unique: true, where: "(template IS TRUE)"
t.index ["type"], name: "index_services_on_type" t.index ["type"], name: "index_services_on_type"
end end
......
...@@ -2023,6 +2023,11 @@ type Epic implements Noteable { ...@@ -2023,6 +2023,11 @@ type Epic implements Noteable {
""" """
hasIssues: Boolean! hasIssues: Boolean!
"""
Current health status of the epic
"""
healthStatus: EpicHealthStatus
""" """
ID of the epic ID of the epic
""" """
...@@ -2349,6 +2354,26 @@ type EpicEdge { ...@@ -2349,6 +2354,26 @@ type EpicEdge {
node: Epic node: Epic
} }
"""
Health status of child issues
"""
type EpicHealthStatus {
"""
Number of issues at risk
"""
issuesAtRisk: Int
"""
Number of issues that need attention
"""
issuesNeedingAttention: Int
"""
Number of issues on track
"""
issuesOnTrack: Int
}
""" """
Relationship between an epic and an issue Relationship between an epic and an issue
""" """
......
...@@ -5186,6 +5186,20 @@ ...@@ -5186,6 +5186,20 @@
"isDeprecated": false, "isDeprecated": false,
"deprecationReason": null "deprecationReason": null
}, },
{
"name": "healthStatus",
"description": "Current health status of the epic",
"args": [
],
"type": {
"kind": "OBJECT",
"name": "EpicHealthStatus",
"ofType": null
},
"isDeprecated": false,
"deprecationReason": null
},
{ {
"name": "id", "name": "id",
"description": "ID of the epic", "description": "ID of the epic",
...@@ -13084,6 +13098,61 @@ ...@@ -13084,6 +13098,61 @@
"enumValues": null, "enumValues": null,
"possibleTypes": null "possibleTypes": null
}, },
{
"kind": "OBJECT",
"name": "EpicHealthStatus",
"description": "Health status of child issues",
"fields": [
{
"name": "issuesAtRisk",
"description": "Number of issues at risk",
"args": [
],
"type": {
"kind": "SCALAR",
"name": "Int",
"ofType": null
},
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "issuesNeedingAttention",
"description": "Number of issues that need attention",
"args": [
],
"type": {
"kind": "SCALAR",
"name": "Int",
"ofType": null
},
"isDeprecated": false,
"deprecationReason": null
},
{
"name": "issuesOnTrack",
"description": "Number of issues on track",
"args": [
],
"type": {
"kind": "SCALAR",
"name": "Int",
"ofType": null
},
"isDeprecated": false,
"deprecationReason": null
}
],
"inputFields": null,
"interfaces": [
],
"enumValues": null,
"possibleTypes": null
},
{ {
"kind": "OBJECT", "kind": "OBJECT",
"name": "TimelogConnection", "name": "TimelogConnection",
......
...@@ -327,6 +327,7 @@ Represents an epic. ...@@ -327,6 +327,7 @@ Represents an epic.
| `group` | Group! | Group to which the epic belongs | | `group` | Group! | Group to which the epic belongs |
| `hasChildren` | Boolean! | Indicates if the epic has children | | `hasChildren` | Boolean! | Indicates if the epic has children |
| `hasIssues` | Boolean! | Indicates if the epic has direct issues | | `hasIssues` | Boolean! | Indicates if the epic has direct issues |
| `healthStatus` | EpicHealthStatus | Current health status of the epic |
| `id` | ID! | ID of the epic | | `id` | ID! | ID of the epic |
| `iid` | ID! | Internal ID of the epic | | `iid` | ID! | Internal ID of the epic |
| `parent` | Epic | Parent epic of the epic | | `parent` | Epic | Parent epic of the epic |
...@@ -377,6 +378,16 @@ Total weight of open and closed descendant issues ...@@ -377,6 +378,16 @@ Total weight of open and closed descendant issues
| `closedIssues` | Int | Total weight of completed (closed) issues in this epic, including epic descendants | | `closedIssues` | Int | Total weight of completed (closed) issues in this epic, including epic descendants |
| `openedIssues` | Int | Total weight of opened issues in this epic, including epic descendants | | `openedIssues` | Int | Total weight of opened issues in this epic, including epic descendants |
## EpicHealthStatus
Health status of child issues
| Name | Type | Description |
| --- | ---- | ---------- |
| `issuesAtRisk` | Int | Number of issues at risk |
| `issuesNeedingAttention` | Int | Number of issues that need attention |
| `issuesOnTrack` | Int | Number of issues on track |
## EpicIssue ## EpicIssue
Relationship between an epic and an issue Relationship between an epic and an issue
......
...@@ -261,15 +261,6 @@ Do not include the same information in multiple places. [Link to a SSOT instead. ...@@ -261,15 +261,6 @@ Do not include the same information in multiple places. [Link to a SSOT instead.
Some features are also objects. For example, "GitLab's Merge Requests support X" and Some features are also objects. For example, "GitLab's Merge Requests support X" and
"Create a new merge request for Z." "Create a new merge request for Z."
- Use common contractions when it helps create a friendly and informal tone, especially in tutorials and [UIs](https://design.gitlab.com/content/punctuation/#contractions).
- Do use contractions like: _it's_, _can't_, _wouldn't_, _you're_, _you've_, _haven't_, don't, _we're_, _that's_, and _won't_. Contractions in instructional documentation such as tutorials can help create a friendly and informal tone.
- Avoid less common contractions such as: _he'd_, _it'll_, _should've_, and _there'd_.
- Do not use contractions in reference documentation. Examples:
- You cannot set a limit higher than 1000.
- For `parameter1`, the default is 10.
- Do not use contractions with a proper noun and a verb, such as _GitLab's creating X_.
- Avoid using contractions when you need to emphasize a negative, such as "Do **not** install X with Y."
- Avoid use of the future tense: - Avoid use of the future tense:
- Instead of "after you execute this command, GitLab will display the result", use "after you execute this command, GitLab displays the result". - Instead of "after you execute this command, GitLab will display the result", use "after you execute this command, GitLab displays the result".
- Only use the future tense to convey when the action or result will actually occur at a future time. - Only use the future tense to convey when the action or result will actually occur at a future time.
...@@ -286,6 +277,58 @@ as even native users of English might misunderstand them. ...@@ -286,6 +277,58 @@ as even native users of English might misunderstand them.
- Instead of "e.g.", use "for example," "such as," "for instance," or "like." - Instead of "e.g.", use "for example," "such as," "for instance," or "like."
- Instead of "etc.", either use "and so on" or consider editing it out, since it can be vague. - Instead of "etc.", either use "and so on" or consider editing it out, since it can be vague.
### Contractions
- Use common contractions when it helps create a friendly and informal tone, especially in tutorials, instructional documentation, and [UIs](https://design.gitlab.com/content/punctuation/#contractions).
| Do | Don't |
|----------|-----------|
| it's | it is |
| can't | cannot |
| wouldn't | would not |
| you're | you are |
| you've | you have |
| haven't | have not |
| don't | do not |
| we're | we are |
| that's' | that is |
| won't | will not |
- Avoid less common contractions:
| Do | Don't |
|--------------|-------------|
| he would | he'd |
| it will | it'll |
| should have | should've |
| there would | there'd |
- Do not use contractions with a proper noun and a verb. For example:
| Do | Don't |
|----------------------|---------------------|
| GitLab is creating X | GitLab's creating X |
- Do not use contractions when you need to emphasize a negative. For example:
| Do | Don't |
|-----------------------------|----------------------------|
| Do **not** install X with Y | **Don't** install X with Y |
- Do not use contractions in reference documentation. For example:
| Do | Don't |
|------------------------------------------|----------------------------|
| Do **not** set a limit greater than 1000 | **Don't** set a limit greater than 1000 |
| For `parameter1`, the default is 10 | For `parameter1`, the default's 10 |
- Avoid contractions in error messages. Examples:
| Do | Don't |
|------------------------------------------|----------------------------|
| Requests to localhost are not allowed | Requests to localhost aren't allowed |
| Specified URL cannot be used | Specified URL can't be used |
## Text ## Text
- [Write in Markdown](#markdown). - [Write in Markdown](#markdown).
......
...@@ -182,7 +182,7 @@ If your epic contains one or more [child epics](#multi-level-child-epics-ultimat ...@@ -182,7 +182,7 @@ If your epic contains one or more [child epics](#multi-level-child-epics-ultimat
have a [start or due date](#start-date-and-due-date), a have a [start or due date](#start-date-and-due-date), a
[roadmap](../roadmap/index.md) view of the child epics is listed under the parent epic. [roadmap](../roadmap/index.md) view of the child epics is listed under the parent epic.
![Child epics roadmap](img/epic_view_roadmap_v12.3.png) ![Child epics roadmap](img/epic_view_roadmap_v12_9.png)
## Reordering issues and child epics ## Reordering issues and child epics
......
...@@ -10,7 +10,12 @@ An Epic within a group containing **Start date** and/or **Due date** ...@@ -10,7 +10,12 @@ An Epic within a group containing **Start date** and/or **Due date**
can be visualized in a form of a timeline (e.g. a Gantt chart). The Epics Roadmap page can be visualized in a form of a timeline (e.g. a Gantt chart). The Epics Roadmap page
shows such a visualization for all the epics which are under a group and/or its subgroups. shows such a visualization for all the epics which are under a group and/or its subgroups.
![roadmap view](img/roadmap_view.png) > [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/5164) in GitLab 12.9.
On the epic bars, you can see their title, progress, and completed weight percentage.
When you hover over an epic bar, a popover appears with its description, start and due dates, and weight completed.
![roadmap view](img/roadmap_view_v12_9.png)
A dropdown allows you to show only open or closed epics. By default, all epics are shown. A dropdown allows you to show only open or closed epics. By default, all epics are shown.
...@@ -68,11 +73,7 @@ the timeline header represent the days of the week. ...@@ -68,11 +73,7 @@ the timeline header represent the days of the week.
## Timeline bar for an epic ## Timeline bar for an epic
The timeline bar indicates the approximate position of an epic based on its start The timeline bar indicates the approximate position of an epic based on its start and due date.
and due date. If an epic doesn't have a due date, the timeline bar fades
away towards the future. Similarly, if an epic doesn't have a start date, the
timeline bar becomes more visible as it approaches the epic's due date on the
timeline.
<!-- ## Troubleshooting <!-- ## Troubleshooting
......
...@@ -820,7 +820,7 @@ Prerequisites for embedding from a Grafana instance: ...@@ -820,7 +820,7 @@ Prerequisites for embedding from a Grafana instance:
![Grafana Metric Panel](img/grafana_panel_v12_5.png) ![Grafana Metric Panel](img/grafana_panel_v12_5.png)
1. In the upper-left corner of the page, select a specific value for each variable required for the queries in the chart. 1. In the upper-left corner of the page, select a specific value for each variable required for the queries in the chart.
![Select Query Variables](img/select_query_variables_v12_5.png) ![Select Query Variables](img/select_query_variables_v12_5.png)
1. In Grafana, click on a panel's title, then click **Share** to open the panel's sharing dialog to the **Link** tab. 1. In Grafana, click on a panel's title, then click **Share** to open the panel's sharing dialog to the **Link** tab. If you click the _dashboard's_ share panel instead, GitLab will attempt to embed the first supported panel on the dashboard (if available).
1. If your Prometheus queries use Grafana's custom template variables, ensure that "Template variables" option is toggled to **On**. Of Grafana global template variables, only `$__interval`, `$__from`, and `$__to` are currently supported. Toggle **On** the "Current time range" option to specify the time range of the chart. Otherwise, the default range will be the last 8 hours. 1. If your Prometheus queries use Grafana's custom template variables, ensure that "Template variables" option is toggled to **On**. Of Grafana global template variables, only `$__interval`, `$__from`, and `$__to` are currently supported. Toggle **On** the "Current time range" option to specify the time range of the chart. Otherwise, the default range will be the last 8 hours.
![Grafana Sharing Dialog](img/grafana_sharing_dialog_v12_5.png) ![Grafana Sharing Dialog](img/grafana_sharing_dialog_v12_5.png)
1. Click **Copy** to copy the URL to the clipboard. 1. Click **Copy** to copy the URL to the clipboard.
......
...@@ -17,8 +17,6 @@ module Banzai ...@@ -17,8 +17,6 @@ module Banzai
def embed_params(node) def embed_params(node)
query_params = Gitlab::Metrics::Dashboard::Url.parse_query(node['href']) query_params = Gitlab::Metrics::Dashboard::Url.parse_query(node['href'])
return unless query_params.include?(:panelId)
time_window = Grafana::TimeWindow.new(query_params[:from], query_params[:to]) time_window = Grafana::TimeWindow.new(query_params[:from], query_params[:to])
url = url_with_window(node['href'], query_params, time_window.in_milliseconds) url = url_with_window(node['href'], query_params, time_window.in_milliseconds)
......
apply: apply:
stage: deploy stage: deploy
image: "registry.gitlab.com/gitlab-org/cluster-integration/cluster-applications:v0.9.0" image: "registry.gitlab.com/gitlab-org/cluster-integration/cluster-applications:v0.11.0"
environment: environment:
name: production name: production
variables: variables:
...@@ -15,6 +15,7 @@ apply: ...@@ -15,6 +15,7 @@ apply:
JUPYTERHUB_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/jupyterhub/values.yaml JUPYTERHUB_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/jupyterhub/values.yaml
PROMETHEUS_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/prometheus/values.yaml PROMETHEUS_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/prometheus/values.yaml
ELASTIC_STACK_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/elastic-stack/values.yaml ELASTIC_STACK_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/elastic-stack/values.yaml
VAULT_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/vault/values.yaml
script: script:
- gitlab-managed-apps /usr/local/share/gitlab-managed-apps/helmfile.yaml - gitlab-managed-apps /usr/local/share/gitlab-managed-apps/helmfile.yaml
only: only:
......
...@@ -248,6 +248,7 @@ excluded_attributes: ...@@ -248,6 +248,7 @@ excluded_attributes:
- :token_encrypted - :token_encrypted
services: services:
- :template - :template
- :instance
error_tracking_setting: error_tracking_setting:
- :encrypted_token - :encrypted_token
- :encrypted_token_iv - :encrypted_token_iv
......
...@@ -13,12 +13,7 @@ module Gitlab ...@@ -13,12 +13,7 @@ module Gitlab
# Reformats the specified panel in the Gitlab # Reformats the specified panel in the Gitlab
# dashboard-yml format # dashboard-yml format
def transform! def transform!
InputFormatValidator.new( validate_input!
grafana_dashboard,
datasource,
panel,
query_params
).validate!
new_dashboard = formatted_dashboard new_dashboard = formatted_dashboard
...@@ -28,6 +23,17 @@ module Gitlab ...@@ -28,6 +23,17 @@ module Gitlab
private private
def validate_input!
::Grafana::Validator.new(
grafana_dashboard,
datasource,
panel,
query_params
).validate!
rescue ::Grafana::Validator::Error => e
raise ::Gitlab::Metrics::Dashboard::Errors::DashboardProcessingError, e.message
end
def formatted_dashboard def formatted_dashboard
{ panel_groups: [{ panels: [formatted_panel] }] } { panel_groups: [{ panels: [formatted_panel] }] }
end end
...@@ -56,11 +62,25 @@ module Gitlab ...@@ -56,11 +62,25 @@ module Gitlab
def panel def panel
strong_memoize(:panel) do strong_memoize(:panel) do
grafana_dashboard[:dashboard][:panels].find do |panel| grafana_dashboard[:dashboard][:panels].find do |panel|
panel[:id].to_s == query_params[:panelId] query_params[:panelId] ? matching_panel?(panel) : valid_panel?(panel)
end end
end end
end end
# Determines whether a given panel is the one
# specified by the linked grafana url
def matching_panel?(panel)
panel[:id].to_s == query_params[:panelId]
end
# Determines whether any given panel has the potenial
# to return valid results from grafana/prometheus
def valid_panel?(panel)
::Grafana::Validator
.new(grafana_dashboard, datasource, panel, query_params)
.valid?
end
# Grafana url query parameters. Includes information # Grafana url query parameters. Includes information
# on which panel to select and time range. # on which panel to select and time range.
def query_params def query_params
...@@ -141,83 +161,6 @@ module Gitlab ...@@ -141,83 +161,6 @@ module Gitlab
params[:grafana_url] params[:grafana_url]
end end
end end
class InputFormatValidator
include ::Gitlab::Metrics::Dashboard::Errors
attr_reader :grafana_dashboard, :datasource, :panel, :query_params
UNSUPPORTED_GRAFANA_GLOBAL_VARS = %w(
$__interval_ms
$__timeFilter
$__name
$timeFilter
$interval
).freeze
def initialize(grafana_dashboard, datasource, panel, query_params)
@grafana_dashboard = grafana_dashboard
@datasource = datasource
@panel = panel
@query_params = query_params
end
def validate!
validate_query_params!
validate_datasource!
validate_panel_type!
validate_variable_definitions!
validate_global_variables!
end
private
def validate_datasource!
return if datasource[:access] == 'proxy' && datasource[:type] == 'prometheus'
raise_error 'Only Prometheus datasources with proxy access in Grafana are supported.'
end
def validate_query_params!
return if [:panelId, :from, :to].all? { |param| query_params.include?(param) }
raise_error 'Grafana query parameters must include panelId, from, and to.'
end
def validate_panel_type!
return if panel[:type] == 'graph' && panel[:lines]
raise_error 'Panel type must be a line graph.'
end
def validate_variable_definitions!
return unless grafana_dashboard[:dashboard][:templating]
return if grafana_dashboard[:dashboard][:templating][:list].all? do |variable|
query_params[:"var-#{variable[:name]}"].present?
end
raise_error 'All Grafana variables must be defined in the query parameters.'
end
def validate_global_variables!
return unless panel_contains_unsupported_vars?
raise_error 'Prometheus must not include'
end
def panel_contains_unsupported_vars?
panel[:targets].any? do |target|
UNSUPPORTED_GRAFANA_GLOBAL_VARS.any? do |variable|
target[:expr].include?(variable)
end
end
end
def raise_error(message)
raise DashboardProcessingError.new(message)
end
end
end end
end end
end end
......
# frozen_string_literal: true
# Performs checks on whether resources from Grafana can be handled
# We have certain restrictions on which formats we accept.
# Some are technical requirements, others are simplifications.
module Grafana
class Validator
Error = Class.new(StandardError)
attr_reader :grafana_dashboard, :datasource, :panel, :query_params
UNSUPPORTED_GRAFANA_GLOBAL_VARS = %w(
$__interval_ms
$__timeFilter
$__name
$timeFilter
$interval
).freeze
def initialize(grafana_dashboard, datasource, panel, query_params)
@grafana_dashboard = grafana_dashboard
@datasource = datasource
@panel = panel
@query_params = query_params
end
def validate!
validate_query_params!
validate_panel_type!
validate_variable_definitions!
validate_global_variables!
validate_datasource! if datasource
end
def valid?
validate!
true
rescue ::Grafana::Validator::Error
false
end
private
# See defaults in Banzai::Filter::InlineGrafanaMetricsFilter.
def validate_query_params!
return if [:from, :to].all? { |param| query_params.include?(param) }
raise_error 'Grafana query parameters must include from and to.'
end
# We may choose to support other panel types in future.
def validate_panel_type!
return if panel && panel[:type] == 'graph' && panel[:lines]
raise_error 'Panel type must be a line graph.'
end
# We must require variable definitions to create valid prometheus queries.
def validate_variable_definitions!
return unless grafana_dashboard[:dashboard][:templating]
return if grafana_dashboard[:dashboard][:templating][:list].all? do |variable|
query_params[:"var-#{variable[:name]}"].present?
end
raise_error 'All Grafana variables must be defined in the query parameters.'
end
# We may choose to support further Grafana variables in future.
def validate_global_variables!
return unless panel_contains_unsupported_vars?
raise_error "Prometheus must not include #{UNSUPPORTED_GRAFANA_GLOBAL_VARS}"
end
# We may choose to support additional datasources in future.
def validate_datasource!
return if datasource[:access] == 'proxy' && datasource[:type] == 'prometheus'
raise_error 'Only Prometheus datasources with proxy access in Grafana are supported.'
end
def panel_contains_unsupported_vars?
panel[:targets].any? do |target|
UNSUPPORTED_GRAFANA_GLOBAL_VARS.any? do |variable|
target[:expr].include?(variable)
end
end
end
def raise_error(message)
raise Validator::Error, message
end
end
end
...@@ -220,6 +220,9 @@ msgstr "" ...@@ -220,6 +220,9 @@ msgstr ""
msgid "%{commit_author_link} authored %{commit_timeago}" msgid "%{commit_author_link} authored %{commit_timeago}"
msgstr "" msgstr ""
msgid "%{completedWeight} of %{totalWeight} weight completed"
msgstr ""
msgid "%{cores} cores" msgid "%{cores} cores"
msgstr "" msgstr ""
...@@ -600,6 +603,9 @@ msgid_plural "- Users" ...@@ -600,6 +603,9 @@ msgid_plural "- Users"
msgstr[0] "" msgstr[0] ""
msgstr[1] "" msgstr[1] ""
msgid "- of - weight completed"
msgstr ""
msgid "- show less" msgid "- show less"
msgstr "" msgstr ""
...@@ -7795,6 +7801,9 @@ msgstr "" ...@@ -7795,6 +7801,9 @@ msgstr ""
msgid "Epics|An error occurred while saving the %{epicDateType} date" msgid "Epics|An error occurred while saving the %{epicDateType} date"
msgstr "" msgstr ""
msgid "Epics|An error occurred while updating labels."
msgstr ""
msgid "Epics|Are you sure you want to remove %{bStart}%{targetIssueTitle}%{bEnd} from %{bStart}%{parentEpicTitle}%{bEnd}?" msgid "Epics|Are you sure you want to remove %{bStart}%{targetIssueTitle}%{bEnd} from %{bStart}%{parentEpicTitle}%{bEnd}?"
msgstr "" msgstr ""
...@@ -9925,10 +9934,13 @@ msgstr "" ...@@ -9925,10 +9934,13 @@ msgstr ""
msgid "Group: %{name}" msgid "Group: %{name}"
msgstr "" msgstr ""
msgid "GroupRoadmap|%{startDateInWords} &ndash; %{endDateInWords}" msgid "GroupRoadmap|%{dateWord} – No end date"
msgstr ""
msgid "GroupRoadmap|%{startDateInWords} – %{endDateInWords}"
msgstr "" msgstr ""
msgid "GroupRoadmap|From %{dateWord}" msgid "GroupRoadmap|No start date – %{dateWord}"
msgstr "" msgstr ""
msgid "GroupRoadmap|Something went wrong while fetching epics" msgid "GroupRoadmap|Something went wrong while fetching epics"
...@@ -9949,9 +9961,6 @@ msgstr "" ...@@ -9949,9 +9961,6 @@ msgstr ""
msgid "GroupRoadmap|To widen your search, change or remove filters; from %{startDate} to %{endDate}." msgid "GroupRoadmap|To widen your search, change or remove filters; from %{startDate} to %{endDate}."
msgstr "" msgstr ""
msgid "GroupRoadmap|Until %{dateWord}"
msgstr ""
msgid "GroupSAML|Certificate fingerprint" msgid "GroupSAML|Certificate fingerprint"
msgstr "" msgstr ""
......
...@@ -57,6 +57,18 @@ describe Boards::IssuesController do ...@@ -57,6 +57,18 @@ describe Boards::IssuesController do
expect(development.issues.map(&:relative_position)).not_to include(nil) expect(development.issues.map(&:relative_position)).not_to include(nil)
end end
it 'returns issues by closed_at in descending order in closed list' do
create(:closed_issue, project: project, title: 'New Issue 1', closed_at: 1.day.ago)
create(:closed_issue, project: project, title: 'New Issue 2', closed_at: 1.week.ago)
list_issues user: user, board: board, list: board.lists.last.id
expect(response).to have_gitlab_http_status(:ok)
expect(json_response['issues'].length).to eq(2)
expect(json_response['issues'][0]['title']).to eq('New Issue 1')
expect(json_response['issues'][1]['title']).to eq('New Issue 2')
end
it 'avoids N+1 database queries' do it 'avoids N+1 database queries' do
create(:labeled_issue, project: project, labels: [development]) create(:labeled_issue, project: project, labels: [development])
control_count = ActiveRecord::QueryRecorder.new { list_issues(user: user, board: board, list: list2) }.count control_count = ActiveRecord::QueryRecorder.new { list_issues(user: user, board: board, list: list2) }.count
......
...@@ -86,58 +86,11 @@ describe Dashboard::ProjectsController do ...@@ -86,58 +86,11 @@ describe Dashboard::ProjectsController do
end end
describe 'GET /starred.json' do describe 'GET /starred.json' do
subject { get :starred, format: :json }
let(:projects) { create_list(:project, 2, creator: user) }
before do before do
allow(Kaminari.config).to receive(:default_per_page).and_return(1) get :starred, format: :json
projects.each do |project|
project.add_developer(user)
create(:users_star_project, project_id: project.id, user_id: user.id)
end
end
it 'returns success' do
subject
expect(response).to have_gitlab_http_status(:ok)
end end
it 'paginates the records' do it { is_expected.to respond_with(:success) }
subject
expect(assigns(:projects).count).to eq(1)
end
end
end
context 'atom requests' do
let(:user) { create(:user) }
before do
sign_in(user)
end
describe '#index' do
context 'project pagination' do
let(:projects) { create_list(:project, 2, creator: user) }
before do
allow(Kaminari.config).to receive(:default_per_page).and_return(1)
projects.each do |project|
project.add_developer(user)
end
end
it 'does not paginate projects, even if page number is passed' do
get :index, format: :atom
expect(assigns(:events).count).to eq(2)
end
end
end end
end end
end end
...@@ -4,6 +4,11 @@ FactoryBot.define do ...@@ -4,6 +4,11 @@ FactoryBot.define do
factory :service do factory :service do
project project
type { 'Service' } type { 'Service' }
trait :instance do
project { nil }
instance { true }
end
end end
factory :custom_issue_tracker_service, class: 'CustomIssueTrackerService' do factory :custom_issue_tracker_service, class: 'CustomIssueTrackerService' do
......
...@@ -47,6 +47,31 @@ describe 'Issue Boards', :js do ...@@ -47,6 +47,31 @@ describe 'Issue Boards', :js do
end end
end end
context 'closed issues' do
let!(:issue7) { create(:closed_issue, project: project, title: 'Closed issue 1', closed_at: 1.day.ago) }
let!(:issue8) { create(:closed_issue, project: project, title: 'Closed issue 2', closed_at: 1.week.ago) }
let!(:issue9) { create(:closed_issue, project: project, title: 'Closed issue 3', closed_at: 2.weeks.ago) }
before do
visit project_board_path(project, board)
wait_for_requests
expect(page).to have_selector('.board', count: 3)
end
it 'orders issues by closed_at' do
wait_for_requests
page.within(find('.board:nth-child(3)')) do
first, second, third = all('.board-card').to_a
expect(first).to have_content(issue7.title)
expect(second).to have_content(issue8.title)
expect(third).to have_content(issue9.title)
end
end
end
context 'ordering in list' do context 'ordering in list' do
before do before do
visit project_board_path(project, board) visit project_board_path(project, board)
......
...@@ -111,6 +111,28 @@ ...@@ -111,6 +111,28 @@
"active": false, "active": false,
"properties": {}, "properties": {},
"template": true, "template": true,
"instance": false,
"push_events": true,
"issues_events": true,
"merge_requests_events": true,
"tag_push_events": true,
"note_events": true,
"job_events": true,
"type": "TeamcityService",
"category": "ci",
"default": false,
"wiki_page_events": true
},
{
"id": 101,
"title": "JetBrains TeamCity CI",
"project_id": 5,
"created_at": "2016-06-14T15:01:51.315Z",
"updated_at": "2016-06-14T15:01:51.315Z",
"active": false,
"properties": {},
"template": false,
"instance": true,
"push_events": true, "push_events": true,
"issues_events": true, "issues_events": true,
"merge_requests_events": true, "merge_requests_events": true,
......
...@@ -69,7 +69,7 @@ describe('Time series component', () => { ...@@ -69,7 +69,7 @@ describe('Time series component', () => {
mockedQueryResultFixture, mockedQueryResultFixture,
); );
// dashboard is a dynamically generated fixture and stored at environment_metrics_dashboard.json // dashboard is a dynamically generated fixture and stored at environment_metrics_dashboard.json
[mockGraphData] = store.state.monitoringDashboard.dashboard.panelGroups[0].panels; [mockGraphData] = store.state.monitoringDashboard.dashboard.panelGroups[1].panels;
}); });
describe('general functions', () => { describe('general functions', () => {
......
...@@ -89,8 +89,8 @@ describe('Monitoring store Getters', () => { ...@@ -89,8 +89,8 @@ describe('Monitoring store Getters', () => {
expect(getMetricStates()).toEqual([metricStates.OK]); expect(getMetricStates()).toEqual([metricStates.OK]);
// Filtered by groups // Filtered by groups
expect(getMetricStates(state.dashboard.panelGroups[0].key)).toEqual([metricStates.OK]); expect(getMetricStates(state.dashboard.panelGroups[1].key)).toEqual([metricStates.OK]);
expect(getMetricStates(state.dashboard.panelGroups[1].key)).toEqual([]); expect(getMetricStates(state.dashboard.panelGroups[2].key)).toEqual([]);
}); });
it('on multiple metrics errors', () => { it('on multiple metrics errors', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, metricsDashboardPayload); mutations[types.RECEIVE_METRICS_DATA_SUCCESS](state, metricsDashboardPayload);
...@@ -118,18 +118,18 @@ describe('Monitoring store Getters', () => { ...@@ -118,18 +118,18 @@ describe('Monitoring store Getters', () => {
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixture); mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixture);
// An error in 2 groups // An error in 2 groups
mutations[types.RECEIVE_METRIC_RESULT_FAILURE](state, { mutations[types.RECEIVE_METRIC_RESULT_FAILURE](state, {
metricId: groups[0].panels[1].metrics[0].metricId, metricId: groups[1].panels[1].metrics[0].metricId,
}); });
mutations[types.RECEIVE_METRIC_RESULT_FAILURE](state, { mutations[types.RECEIVE_METRIC_RESULT_FAILURE](state, {
metricId: groups[1].panels[0].metrics[0].metricId, metricId: groups[2].panels[0].metrics[0].metricId,
}); });
expect(getMetricStates()).toEqual([metricStates.OK, metricStates.UNKNOWN_ERROR]); expect(getMetricStates()).toEqual([metricStates.OK, metricStates.UNKNOWN_ERROR]);
expect(getMetricStates(groups[0].key)).toEqual([ expect(getMetricStates(groups[1].key)).toEqual([
metricStates.OK, metricStates.OK,
metricStates.UNKNOWN_ERROR, metricStates.UNKNOWN_ERROR,
]); ]);
expect(getMetricStates(groups[1].key)).toEqual([metricStates.UNKNOWN_ERROR]); expect(getMetricStates(groups[2].key)).toEqual([metricStates.UNKNOWN_ERROR]);
}); });
}); });
}); });
...@@ -210,13 +210,13 @@ describe('Monitoring store Getters', () => { ...@@ -210,13 +210,13 @@ describe('Monitoring store Getters', () => {
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixtureStatusCode); mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](state, mockedQueryResultFixtureStatusCode);
// First group has metrics // First group has metrics
expect(metricsWithData(state.dashboard.panelGroups[0].key)).toEqual([ expect(metricsWithData(state.dashboard.panelGroups[1].key)).toEqual([
mockedQueryResultFixture.metricId, mockedQueryResultFixture.metricId,
mockedQueryResultFixtureStatusCode.metricId, mockedQueryResultFixtureStatusCode.metricId,
]); ]);
// Second group has no metrics // Second group has no metrics
expect(metricsWithData(state.dashboard.panelGroups[1].key)).toEqual([]); expect(metricsWithData(state.dashboard.panelGroups[2].key)).toEqual([]);
}); });
}); });
}); });
......
...@@ -32,12 +32,13 @@ describe('Monitoring mutations', () => { ...@@ -32,12 +32,13 @@ describe('Monitoring mutations', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](stateCopy, payload); mutations[types.RECEIVE_METRICS_DATA_SUCCESS](stateCopy, payload);
const groups = getGroups(); const groups = getGroups();
expect(groups[0].key).toBe('response-metrics-nginx-ingress-vts-0'); expect(groups[0].key).toBe('system-metrics-kubernetes-0');
expect(groups[1].key).toBe('response-metrics-nginx-ingress-1'); expect(groups[1].key).toBe('response-metrics-nginx-ingress-vts-1');
expect(groups[2].key).toBe('response-metrics-nginx-ingress-2');
}); });
it('normalizes values', () => { it('normalizes values', () => {
mutations[types.RECEIVE_METRICS_DATA_SUCCESS](stateCopy, payload); mutations[types.RECEIVE_METRICS_DATA_SUCCESS](stateCopy, payload);
const expectedLabel = '5xx Errors (%)'; const expectedLabel = 'Pod average (MB)';
const { label, queryRange } = getGroups()[0].panels[2].metrics[0]; const { label, queryRange } = getGroups()[0].panels[2].metrics[0];
expect(label).toEqual(expectedLabel); expect(label).toEqual(expectedLabel);
...@@ -51,7 +52,7 @@ describe('Monitoring mutations', () => { ...@@ -51,7 +52,7 @@ describe('Monitoring mutations', () => {
expect(groups).toBeDefined(); expect(groups).toBeDefined();
expect(groups).toHaveLength(6); expect(groups).toHaveLength(6);
expect(groups[0].panels).toHaveLength(3); expect(groups[0].panels).toHaveLength(7);
expect(groups[0].panels[0].metrics).toHaveLength(1); expect(groups[0].panels[0].metrics).toHaveLength(1);
expect(groups[0].panels[1].metrics).toHaveLength(1); expect(groups[0].panels[1].metrics).toHaveLength(1);
expect(groups[0].panels[2].metrics).toHaveLength(1); expect(groups[0].panels[2].metrics).toHaveLength(1);
...@@ -65,9 +66,12 @@ describe('Monitoring mutations', () => { ...@@ -65,9 +66,12 @@ describe('Monitoring mutations', () => {
const groups = getGroups(); const groups = getGroups();
expect(groups[0].panels[0].metrics[0].metricId).toEqual( expect(groups[0].panels[0].metrics[0].metricId).toEqual(
'undefined_response_metrics_nginx_ingress_throughput_status_code', 'undefined_system_metrics_kubernetes_container_memory_total',
); );
expect(groups[1].panels[0].metrics[0].metricId).toEqual( expect(groups[1].panels[0].metrics[0].metricId).toEqual(
'undefined_response_metrics_nginx_ingress_throughput_status_code',
);
expect(groups[2].panels[0].metrics[0].metricId).toEqual(
'undefined_response_metrics_nginx_ingress_16_throughput_status_code', 'undefined_response_metrics_nginx_ingress_16_throughput_status_code',
); );
}); });
...@@ -135,7 +139,7 @@ describe('Monitoring mutations', () => { ...@@ -135,7 +139,7 @@ describe('Monitoring mutations', () => {
}, },
]; ];
const dashboard = metricsDashboardPayload; const dashboard = metricsDashboardPayload;
const getMetric = () => stateCopy.dashboard.panelGroups[0].panels[0].metrics[0]; const getMetric = () => stateCopy.dashboard.panelGroups[1].panels[0].metrics[0];
describe('REQUEST_METRIC_RESULT', () => { describe('REQUEST_METRIC_RESULT', () => {
beforeEach(() => { beforeEach(() => {
......
...@@ -46,11 +46,9 @@ describe Banzai::Filter::InlineGrafanaMetricsFilter do ...@@ -46,11 +46,9 @@ describe Banzai::Filter::InlineGrafanaMetricsFilter do
end end
context 'when "panelId" parameter is missing' do context 'when "panelId" parameter is missing' do
let(:dashboard_path) { '/d/XDaNK6amz/gitlab-omnibus-redis' } let(:dashboard_path) { '/d/XDaNK6amz/gitlab-omnibus-redis?from=1570397739557&to=1570484139557' }
it 'leaves the markdown unchanged' do it_behaves_like 'a metrics embed filter'
expect(unescape(doc.to_s)).to eq(input)
end
end end
context 'when time window parameters are missing' do context 'when time window parameters are missing' do
...@@ -86,6 +84,14 @@ describe Banzai::Filter::InlineGrafanaMetricsFilter do ...@@ -86,6 +84,14 @@ describe Banzai::Filter::InlineGrafanaMetricsFilter do
end end
end end
context 'when no parameters are provided' do
let(:dashboard_path) { '/d/XDaNK6amz/gitlab-omnibus-redis' }
it 'inserts a placeholder' do
expect(embed_url).to be_present
end
end
private private
# Nokogiri escapes the URLs, but we don't care about that # Nokogiri escapes the URLs, but we don't care about that
......
...@@ -703,6 +703,12 @@ describe Gitlab::ImportExport::Project::TreeRestorer do ...@@ -703,6 +703,12 @@ describe Gitlab::ImportExport::Project::TreeRestorer do
expect(project.services.where(template: true).count).to eq(0) expect(project.services.where(template: true).count).to eq(0)
end end
it 'does not import any instance services' do
expect(restored_project_json).to eq(true)
expect(project.services.where(instance: true).count).to eq(0)
end
it 'imports labels' do it 'imports labels' do
create(:group_label, name: 'Another label', group: project.group) create(:group_label, name: 'Another label', group: project.group)
......
...@@ -459,6 +459,7 @@ Service: ...@@ -459,6 +459,7 @@ Service:
- active - active
- properties - properties
- template - template
- instance
- push_events - push_events
- issues_events - issues_events
- commit_events - commit_events
......
...@@ -11,8 +11,9 @@ describe Gitlab::Metrics::Dashboard::Stages::GrafanaFormatter do ...@@ -11,8 +11,9 @@ describe Gitlab::Metrics::Dashboard::Stages::GrafanaFormatter do
describe '#transform!' do describe '#transform!' do
let(:grafana_dashboard) { JSON.parse(fixture_file('grafana/simplified_dashboard_response.json'), symbolize_names: true) } let(:grafana_dashboard) { JSON.parse(fixture_file('grafana/simplified_dashboard_response.json'), symbolize_names: true) }
let(:datasource) { JSON.parse(fixture_file('grafana/datasource_response.json'), symbolize_names: true) } let(:datasource) { JSON.parse(fixture_file('grafana/datasource_response.json'), symbolize_names: true) }
let(:expected_dashboard) { JSON.parse(fixture_file('grafana/expected_grafana_embed.json'), symbolize_names: true) }
let(:dashboard) { described_class.new(project, {}, params).transform! } subject(:dashboard) { described_class.new(project, {}, params).transform! }
let(:params) do let(:params) do
{ {
...@@ -23,83 +24,34 @@ describe Gitlab::Metrics::Dashboard::Stages::GrafanaFormatter do ...@@ -23,83 +24,34 @@ describe Gitlab::Metrics::Dashboard::Stages::GrafanaFormatter do
end end
context 'when the query and resources are configured correctly' do context 'when the query and resources are configured correctly' do
let(:expected_dashboard) { JSON.parse(fixture_file('grafana/expected_grafana_embed.json'), symbolize_names: true) } it { is_expected.to eq expected_dashboard }
it 'generates a gitlab-yml formatted dashboard' do
expect(dashboard).to eq(expected_dashboard)
end
end end
context 'when the inputs are invalid' do context 'when a panelId is not included in the grafana_url' do
shared_examples_for 'processing error' do before do
it 'raises a processing error' do params[:grafana_url].gsub('&panelId=8', '')
expect { dashboard }
.to raise_error(Gitlab::Metrics::Dashboard::Stages::InputFormatValidator::DashboardProcessingError)
end
end
context 'when the datasource is not proxyable' do
before do
params[:datasource][:access] = 'not-proxy'
end
it_behaves_like 'processing error'
end end
context 'when query param "panelId" is not specified' do it { is_expected.to eq expected_dashboard }
before do
params[:grafana_url].gsub!('panelId=8', '')
end
it_behaves_like 'processing error'
end
context 'when query param "from" is not specified' do
before do
params[:grafana_url].gsub!('from=1570397739557', '')
end
it_behaves_like 'processing error'
end
context 'when query param "to" is not specified' do context 'when there is also no valid panel in the dashboard' do
before do before do
params[:grafana_url].gsub!('to=1570484139557', '') params[:grafana_dashboard][:dashboard][:panels] = []
end end
it_behaves_like 'processing error' it 'raises a processing error' do
end expect { dashboard }.to raise_error(::Gitlab::Metrics::Dashboard::Errors::DashboardProcessingError)
context 'when the panel is not a graph' do
before do
params[:grafana_dashboard][:dashboard][:panels][0][:type] = 'singlestat'
end end
it_behaves_like 'processing error'
end end
end
context 'when the panel is not a line graph' do context 'when an input is invalid' do
before do before do
params[:grafana_dashboard][:dashboard][:panels][0][:lines] = false params[:datasource][:access] = 'not-proxy'
end
it_behaves_like 'processing error'
end
context 'when the query dashboard includes undefined variables' do
before do
params[:grafana_url].gsub!('&var-instance=localhost:9121', '')
end
it_behaves_like 'processing error'
end end
context 'when the expression contains unsupported global variables' do it 'raises a processing error' do
before do expect { dashboard }.to raise_error(::Gitlab::Metrics::Dashboard::Errors::DashboardProcessingError)
params[:grafana_dashboard][:dashboard][:panels][0][:targets][0][:expr] = 'sum(important_metric[$__interval_ms])'
end
it_behaves_like 'processing error'
end end
end end
end end
......
# frozen_string_literal: true
require 'spec_helper'
describe Grafana::Validator do
let(:grafana_dashboard) { JSON.parse(fixture_file('grafana/simplified_dashboard_response.json'), symbolize_names: true) }
let(:datasource) { JSON.parse(fixture_file('grafana/datasource_response.json'), symbolize_names: true) }
let(:panel) { grafana_dashboard[:dashboard][:panels].first }
let(:query_params) do
{
from: '1570397739557',
to: '1570484139557',
panelId: '8',
'var-instance': 'localhost:9121'
}
end
describe 'validate!' do
shared_examples_for 'processing error' do |message|
it 'raises a processing error' do
expect { subject }
.to raise_error(::Grafana::Validator::Error, message)
end
end
subject { described_class.new(grafana_dashboard, datasource, panel, query_params).validate! }
it 'does not raise an error' do
expect { subject }.not_to raise_error
end
context 'when query param "from" is not specified' do
before do
query_params.delete(:from)
end
it_behaves_like 'processing error', 'Grafana query parameters must include from and to.'
end
context 'when query param "to" is not specified' do
before do
query_params.delete(:to)
end
it_behaves_like 'processing error', 'Grafana query parameters must include from and to.'
end
context 'when the panel is not provided' do
let(:panel) { nil }
it_behaves_like 'processing error', 'Panel type must be a line graph.'
end
context 'when the panel is not a graph' do
before do
panel[:type] = 'singlestat'
end
it_behaves_like 'processing error', 'Panel type must be a line graph.'
end
context 'when the panel is not a line graph' do
before do
panel[:lines] = false
end
it_behaves_like 'processing error', 'Panel type must be a line graph.'
end
context 'when the query dashboard includes undefined variables' do
before do
query_params.delete(:'var-instance')
end
it_behaves_like 'processing error', 'All Grafana variables must be defined in the query parameters.'
end
context 'when the expression contains unsupported global variables' do
before do
grafana_dashboard[:dashboard][:panels][0][:targets][0][:expr] = 'sum(important_metric[$__interval_ms])'
end
it_behaves_like 'processing error', "Prometheus must not include #{described_class::UNSUPPORTED_GRAFANA_GLOBAL_VARS}"
end
context 'when the datasource is not proxyable' do
before do
datasource[:access] = 'not-proxy'
end
it_behaves_like 'processing error', 'Only Prometheus datasources with proxy access in Grafana are supported.'
end
# Skipping datasource validation allows for checks to be
# run without a secondary call to Grafana API
context 'when the datasource is not provided' do
let(:datasource) { nil }
it 'does not raise an error' do
expect { subject }.not_to raise_error
end
end
end
describe 'valid?' do
subject { described_class.new(grafana_dashboard, datasource, panel, query_params).valid? }
context 'with valid arguments' do
it { is_expected.to be true }
end
context 'with invalid arguments' do
let(:query_params) { {} }
it { is_expected.to be false }
end
end
end
...@@ -18,6 +18,20 @@ describe Service do ...@@ -18,6 +18,20 @@ describe Service do
expect(build(:service, project_id: nil, template: false)).to be_invalid expect(build(:service, project_id: nil, template: false)).to be_invalid
end end
it 'validates presence of project_id if not instance', :aggregate_failures do
expect(build(:service, project_id: nil, instance: true)).to be_valid
expect(build(:service, project_id: nil, instance: false)).to be_invalid
end
it 'validates absence of project_id if instance', :aggregate_failures do
expect(build(:service, project_id: nil, instance: true)).to be_valid
expect(build(:service, instance: true)).to be_invalid
end
it 'validates service is template or instance' do
expect(build(:service, project_id: nil, template: true, instance: true)).to be_invalid
end
context 'with an existing service template' do context 'with an existing service template' do
before do before do
create(:service, type: 'Service', template: true) create(:service, type: 'Service', template: true)
...@@ -27,6 +41,16 @@ describe Service do ...@@ -27,6 +41,16 @@ describe Service do
expect(build(:service, type: 'Service', template: true)).to be_invalid expect(build(:service, type: 'Service', template: true)).to be_invalid
end end
end end
context 'with an existing instance service' do
before do
create(:service, :instance)
end
it 'validates only one service instance per type' do
expect(build(:service, :instance)).to be_invalid
end
end
end end
describe 'Scopes' do describe 'Scopes' do
......
...@@ -33,11 +33,11 @@ describe Boards::Issues::ListService do ...@@ -33,11 +33,11 @@ describe Boards::Issues::ListService do
let!(:list1_issue3) { create(:labeled_issue, project: project, milestone: m1, labels: [development, p1]) } let!(:list1_issue3) { create(:labeled_issue, project: project, milestone: m1, labels: [development, p1]) }
let!(:list2_issue1) { create(:labeled_issue, project: project, milestone: m1, labels: [testing]) } let!(:list2_issue1) { create(:labeled_issue, project: project, milestone: m1, labels: [testing]) }
let!(:closed_issue1) { create(:labeled_issue, :closed, project: project, labels: [bug]) } let!(:closed_issue1) { create(:labeled_issue, :closed, project: project, labels: [bug], closed_at: 1.day.ago) }
let!(:closed_issue2) { create(:labeled_issue, :closed, project: project, labels: [p3]) } let!(:closed_issue2) { create(:labeled_issue, :closed, project: project, labels: [p3], closed_at: 2.days.ago) }
let!(:closed_issue3) { create(:issue, :closed, project: project) } let!(:closed_issue3) { create(:issue, :closed, project: project, closed_at: 1.week.ago) }
let!(:closed_issue4) { create(:labeled_issue, :closed, project: project, labels: [p1]) } let!(:closed_issue4) { create(:labeled_issue, :closed, project: project, labels: [p1], closed_at: 1.year.ago) }
let!(:closed_issue5) { create(:labeled_issue, :closed, project: project, labels: [development]) } let!(:closed_issue5) { create(:labeled_issue, :closed, project: project, labels: [development], closed_at: 2.years.ago) }
let(:parent) { project } let(:parent) { project }
...@@ -94,11 +94,11 @@ describe Boards::Issues::ListService do ...@@ -94,11 +94,11 @@ describe Boards::Issues::ListService do
let!(:list1_issue3) { create(:labeled_issue, project: project1, milestone: m1, labels: [development, p1, p1_project1]) } let!(:list1_issue3) { create(:labeled_issue, project: project1, milestone: m1, labels: [development, p1, p1_project1]) }
let!(:list2_issue1) { create(:labeled_issue, project: project1, milestone: m1, labels: [testing]) } let!(:list2_issue1) { create(:labeled_issue, project: project1, milestone: m1, labels: [testing]) }
let!(:closed_issue1) { create(:labeled_issue, :closed, project: project, labels: [bug]) } let!(:closed_issue1) { create(:labeled_issue, :closed, project: project, labels: [bug], closed_at: 1.day.ago) }
let!(:closed_issue2) { create(:labeled_issue, :closed, project: project, labels: [p3, p3_project]) } let!(:closed_issue2) { create(:labeled_issue, :closed, project: project, labels: [p3, p3_project], closed_at: 2.days.ago) }
let!(:closed_issue3) { create(:issue, :closed, project: project1) } let!(:closed_issue3) { create(:issue, :closed, project: project1, closed_at: 1.week.ago) }
let!(:closed_issue4) { create(:labeled_issue, :closed, project: project1, labels: [p1, p1_project1]) } let!(:closed_issue4) { create(:labeled_issue, :closed, project: project1, labels: [p1, p1_project1], closed_at: 1.year.ago) }
let!(:closed_issue5) { create(:labeled_issue, :closed, project: project1, labels: [development]) } let!(:closed_issue5) { create(:labeled_issue, :closed, project: project1, labels: [development], closed_at: 2.years.ago) }
before do before do
group.add_developer(user) group.add_developer(user)
......
...@@ -36,20 +36,22 @@ RSpec.shared_examples 'issues list service' do ...@@ -36,20 +36,22 @@ RSpec.shared_examples 'issues list service' do
expect(issues).to eq [opened_issue2, reopened_issue1, opened_issue1] expect(issues).to eq [opened_issue2, reopened_issue1, opened_issue1]
end end
it 'returns closed issues when listing issues from Closed' do it 'returns opened issues that have label list applied when listing issues from a label list' do
params = { board_id: board.id, id: closed.id } params = { board_id: board.id, id: list1.id }
issues = described_class.new(parent, user, params).execute issues = described_class.new(parent, user, params).execute
expect(issues).to eq [closed_issue4, closed_issue2, closed_issue5, closed_issue3, closed_issue1] expect(issues).to eq [list1_issue3, list1_issue1, list1_issue2]
end end
end
it 'returns opened issues that have label list applied when listing issues from a label list' do context 'issues are ordered by date of closing' do
params = { board_id: board.id, id: list1.id } it 'returns closed issues when listing issues from Closed' do
params = { board_id: board.id, id: closed.id }
issues = described_class.new(parent, user, params).execute issues = described_class.new(parent, user, params).execute
expect(issues).to eq [list1_issue3, list1_issue1, list1_issue2] expect(issues).to eq [closed_issue1, closed_issue2, closed_issue3, closed_issue4, closed_issue5]
end end
end end
......
...@@ -67,6 +67,7 @@ RSpec.shared_examples 'valid dashboard cloning process' do |dashboard_template, ...@@ -67,6 +67,7 @@ RSpec.shared_examples 'valid dashboard cloning process' do |dashboard_template,
it 'delegates commit creation to Files::CreateService', :aggregate_failures do it 'delegates commit creation to Files::CreateService', :aggregate_failures do
service_instance = instance_double(::Files::CreateService) service_instance = instance_double(::Files::CreateService)
allow(::Gitlab::Metrics::Dashboard::Processor).to receive(:new).and_return(double(process: file_content_hash))
expect(::Files::CreateService).to receive(:new).with(project, user, dashboard_attrs).and_return(service_instance) expect(::Files::CreateService).to receive(:new).with(project, user, dashboard_attrs).and_return(service_instance)
expect(service_instance).to receive(:execute).and_return(status: :success) expect(service_instance).to receive(:execute).and_return(status: :success)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment