Commit 488f9105 authored by Grant Young's avatar Grant Young Committed by Toon Claes

Rename Browser Performance Widget in code

Named only 'performance' in code so we want to refactor since
we'll have multiple incoming
parent cd2aa237
...@@ -34,6 +34,7 @@ module Ci ...@@ -34,6 +34,7 @@ module Ci
license_management: 'gl-license-management-report.json', license_management: 'gl-license-management-report.json',
license_scanning: 'gl-license-scanning-report.json', license_scanning: 'gl-license-scanning-report.json',
performance: 'performance.json', performance: 'performance.json',
browser_performance: 'browser-performance.json',
metrics: 'metrics.txt', metrics: 'metrics.txt',
lsif: 'lsif.json', lsif: 'lsif.json',
dotenv: '.env', dotenv: '.env',
...@@ -73,6 +74,7 @@ module Ci ...@@ -73,6 +74,7 @@ module Ci
license_management: :raw, license_management: :raw,
license_scanning: :raw, license_scanning: :raw,
performance: :raw, performance: :raw,
browser_performance: :raw,
terraform: :raw, terraform: :raw,
requirements: :raw, requirements: :raw,
coverage_fuzzing: :raw coverage_fuzzing: :raw
...@@ -93,6 +95,7 @@ module Ci ...@@ -93,6 +95,7 @@ module Ci
lsif lsif
metrics metrics
performance performance
browser_performance
sast sast
secret_detection secret_detection
requirements requirements
...@@ -180,7 +183,7 @@ module Ci ...@@ -180,7 +183,7 @@ module Ci
codequality: 9, ## EE-specific codequality: 9, ## EE-specific
license_management: 10, ## EE-specific license_management: 10, ## EE-specific
license_scanning: 101, ## EE-specific till 13.0 license_scanning: 101, ## EE-specific till 13.0
performance: 11, ## EE-specific performance: 11, ## EE-specific till 13.2
metrics: 12, ## EE-specific metrics: 12, ## EE-specific
metrics_referee: 13, ## runner referees metrics_referee: 13, ## runner referees
network_referee: 14, ## runner referees network_referee: 14, ## runner referees
...@@ -192,7 +195,8 @@ module Ci ...@@ -192,7 +195,8 @@ module Ci
cluster_applications: 20, cluster_applications: 20,
secret_detection: 21, ## EE-specific secret_detection: 21, ## EE-specific
requirements: 22, ## EE-specific requirements: 22, ## EE-specific
coverage_fuzzing: 23 ## EE-specific coverage_fuzzing: 23, ## EE-specific
browser_performance: 24 ## EE-specific
} }
enum file_format: { enum file_format: {
......
# frozen_string_literal: true
class AddBrowserPerformanceToPlanLimits < ActiveRecord::Migration[6.0]
DOWNTIME = false
def change
add_column :plan_limits, "ci_max_artifact_size_browser_performance", :integer, default: 0, null: false
end
end
...@@ -13814,7 +13814,8 @@ CREATE TABLE public.plan_limits ( ...@@ -13814,7 +13814,8 @@ CREATE TABLE public.plan_limits (
ci_max_artifact_size_cluster_applications integer DEFAULT 0 NOT NULL, ci_max_artifact_size_cluster_applications integer DEFAULT 0 NOT NULL,
ci_max_artifact_size_secret_detection integer DEFAULT 0 NOT NULL, ci_max_artifact_size_secret_detection integer DEFAULT 0 NOT NULL,
ci_max_artifact_size_requirements integer DEFAULT 0 NOT NULL, ci_max_artifact_size_requirements integer DEFAULT 0 NOT NULL,
ci_max_artifact_size_coverage_fuzzing integer DEFAULT 0 NOT NULL ci_max_artifact_size_coverage_fuzzing integer DEFAULT 0 NOT NULL,
ci_max_artifact_size_browser_performance integer DEFAULT 0 NOT NULL
); );
CREATE SEQUENCE public.plan_limits_id_seq CREATE SEQUENCE public.plan_limits_id_seq
...@@ -23643,5 +23644,6 @@ COPY "schema_migrations" (version) FROM STDIN; ...@@ -23643,5 +23644,6 @@ COPY "schema_migrations" (version) FROM STDIN;
20200706005325 20200706005325
20200706170536 20200706170536
20200707071941 20200707071941
20200707094341
\. \.
...@@ -251,10 +251,10 @@ dashboards. ...@@ -251,10 +251,10 @@ dashboards.
> - Introduced in GitLab 11.5. > - Introduced in GitLab 11.5.
> - Requires GitLab Runner 11.5 and above. > - Requires GitLab Runner 11.5 and above.
The `performance` report collects [Performance metrics](../../user/project/merge_requests/browser_performance_testing.md) The `performance` report collects [Browser Performance Testing metrics](../../user/project/merge_requests/browser_performance_testing.md)
as artifacts. as artifacts.
The collected Performance report will be uploaded to GitLab as an artifact and will The collected Browser Performance report will be uploaded to GitLab as an artifact and will
be automatically shown in merge requests. be automatically shown in merge requests.
#### `artifacts:reports:metrics` **(PREMIUM)** #### `artifacts:reports:metrics` **(PREMIUM)**
......
...@@ -117,7 +117,7 @@ The following table lists available parameters for jobs: ...@@ -117,7 +117,7 @@ The following table lists available parameters for jobs:
| [`when`](#when) | When to run job. Also available: `when:manual` and `when:delayed`. | | [`when`](#when) | When to run job. Also available: `when:manual` and `when:delayed`. |
| [`environment`](#environment) | Name of an environment to which the job deploys. Also available: `environment:name`, `environment:url`, `environment:on_stop`, `environment:auto_stop_in` and `environment:action`. | | [`environment`](#environment) | Name of an environment to which the job deploys. Also available: `environment:name`, `environment:url`, `environment:on_stop`, `environment:auto_stop_in` and `environment:action`. |
| [`cache`](#cache) | List of files that should be cached between subsequent runs. Also available: `cache:paths`, `cache:key`, `cache:untracked`, and `cache:policy`. | | [`cache`](#cache) | List of files that should be cached between subsequent runs. Also available: `cache:paths`, `cache:key`, `cache:untracked`, and `cache:policy`. |
| [`artifacts`](#artifacts) | List of files and directories to attach to a job on success. Also available: `artifacts:paths`, `artifacts:exclude`, `artifacts:expose_as`, `artifacts:name`, `artifacts:untracked`, `artifacts:when`, `artifacts:expire_in`, `artifacts:reports`, `artifacts:reports:junit`, `artifacts:reports:cobertura`, and `artifacts:reports:terraform`.<br><br>In GitLab [Enterprise Edition](https://about.gitlab.com/pricing/), these are available: `artifacts:reports:codequality`, `artifacts:reports:sast`, `artifacts:reports:dependency_scanning`, `artifacts:reports:container_scanning`, `artifacts:reports:dast`, `artifacts:reports:license_scanning`, `artifacts:reports:license_management` (removed in GitLab 13.0),`artifacts:reports:performance` and `artifacts:reports:metrics`. | | [`artifacts`](#artifacts) | List of files and directories to attach to a job on success. Also available: `artifacts:paths`, `artifacts:exclude`, `artifacts:expose_as`, `artifacts:name`, `artifacts:untracked`, `artifacts:when`, `artifacts:expire_in`, `artifacts:reports`, `artifacts:reports:junit`, `artifacts:reports:cobertura`, and `artifacts:reports:terraform`.<br><br>In GitLab [Enterprise Edition](https://about.gitlab.com/pricing/), these are available: `artifacts:reports:codequality`, `artifacts:reports:sast`, `artifacts:reports:dependency_scanning`, `artifacts:reports:container_scanning`, `artifacts:reports:dast`, `artifacts:reports:license_scanning`, `artifacts:reports:license_management` (removed in GitLab 13.0), `artifacts:reports:performance` and `artifacts:reports:metrics`. |
| [`dependencies`](#dependencies) | Restrict which artifacts are passed to a specific job by providing a list of jobs to fetch artifacts from. | | [`dependencies`](#dependencies) | Restrict which artifacts are passed to a specific job by providing a list of jobs to fetch artifacts from. |
| [`coverage`](#coverage) | Code coverage settings for a given job. | | [`coverage`](#coverage) | Code coverage settings for a given job. |
| [`retry`](#retry) | When and how many times a job can be auto-retried in case of a failure. | | [`retry`](#retry) | When and how many times a job can be auto-retried in case of a failure. |
...@@ -3148,7 +3148,7 @@ These are the available report types: ...@@ -3148,7 +3148,7 @@ These are the available report types:
| [`artifacts:reports:dast`](../pipelines/job_artifacts.md#artifactsreportsdast-ultimate) **(ULTIMATE)** | The `dast` report collects Dynamic Application Security Testing vulnerabilities. | | [`artifacts:reports:dast`](../pipelines/job_artifacts.md#artifactsreportsdast-ultimate) **(ULTIMATE)** | The `dast` report collects Dynamic Application Security Testing vulnerabilities. |
| [`artifacts:reports:license_management`](../pipelines/job_artifacts.md#artifactsreportslicense_management-ultimate) **(ULTIMATE)** | The `license_management` report collects Licenses (*removed from GitLab 13.0*). | | [`artifacts:reports:license_management`](../pipelines/job_artifacts.md#artifactsreportslicense_management-ultimate) **(ULTIMATE)** | The `license_management` report collects Licenses (*removed from GitLab 13.0*). |
| [`artifacts:reports:license_scanning`](../pipelines/job_artifacts.md#artifactsreportslicense_scanning-ultimate) **(ULTIMATE)** | The `license_scanning` report collects Licenses. | | [`artifacts:reports:license_scanning`](../pipelines/job_artifacts.md#artifactsreportslicense_scanning-ultimate) **(ULTIMATE)** | The `license_scanning` report collects Licenses. |
| [`artifacts:reports:performance`](../pipelines/job_artifacts.md#artifactsreportsperformance-premium) **(PREMIUM)** | The `performance` report collects Performance metrics. | | [`artifacts:reports:performance`](../pipelines/job_artifacts.md#artifactsreportsperformance-premium) **(PREMIUM)** | The `performance` report collects Browser Performance metrics. |
| [`artifacts:reports:metrics`](../pipelines/job_artifacts.md#artifactsreportsmetrics-premium) **(PREMIUM)** | The `metrics` report collects Metrics. | | [`artifacts:reports:metrics`](../pipelines/job_artifacts.md#artifactsreportsmetrics-premium) **(PREMIUM)** | The `metrics` report collects Metrics. |
#### `dependencies` #### `dependencies`
......
...@@ -10,20 +10,16 @@ type: reference, howto ...@@ -10,20 +10,16 @@ type: reference, howto
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/3507) in [GitLab Premium](https://about.gitlab.com/pricing/) 10.3. > [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/3507) in [GitLab Premium](https://about.gitlab.com/pricing/) 10.3.
If your application offers a web interface and you're using If your application offers a web interface and you're using
[GitLab CI/CD](../../../ci/README.md), you can quickly determine the performance [GitLab CI/CD](../../../ci/README.md), you can quickly determine the rendering performance
impact of pending code changes. impact of pending code changes in the browser.
## Overview ## Overview
GitLab uses [Sitespeed.io](https://www.sitespeed.io), a free and open source GitLab uses [Sitespeed.io](https://www.sitespeed.io), a free and open source
tool, for measuring the performance of web sites. GitLab has built a simple tool, for measuring the rendering performance of web sites. The
[Sitespeed plugin](https://gitlab.com/gitlab-org/gl-performance) which outputs [Sitespeed plugin](https://gitlab.com/gitlab-org/gl-performance) that GitLab built outputs
the performance score for each page analyzed in a file called `performance.json`. the performance score for each page analyzed in a file called `browser-performance.json`
The [Sitespeed.io performance score](https://examples.sitespeed.io/6.0/2017-11-23-23-43-35/help.html) this data can be shown on Merge Requests.
is a composite value based on best practices.
GitLab can [show the Performance report](#how-browser-performance-testing-works)
in the merge request widget area.
## Use cases ## Use cases
...@@ -41,7 +37,7 @@ Consider the following workflow: ...@@ -41,7 +37,7 @@ Consider the following workflow:
## How browser performance testing works ## How browser performance testing works
First, define a job in your `.gitlab-ci.yml` file that generates the First, define a job in your `.gitlab-ci.yml` file that generates the
[Performance report artifact](../../../ci/pipelines/job_artifacts.md#artifactsreportsperformance-premium). [Browser Performance report artifact](../../../ci/pipelines/job_artifacts.md#artifactsreportsperformance-premium).
GitLab then checks this report, compares key performance metrics for each page GitLab then checks this report, compares key performance metrics for each page
between the source and target branches, and shows the information in the merge request. between the source and target branches, and shows the information in the merge request.
...@@ -49,12 +45,13 @@ For an example Performance job, see ...@@ -49,12 +45,13 @@ For an example Performance job, see
[Configuring Browser Performance Testing](#configuring-browser-performance-testing). [Configuring Browser Performance Testing](#configuring-browser-performance-testing).
NOTE: **Note:** NOTE: **Note:**
If the Performance report has no data to compare, such as when you add the If the Browser Performance report has no data to compare, such as when you add the
Performance job in your `.gitlab-ci.yml` for the very first time, no information Browser Performance job in your `.gitlab-ci.yml` for the very first time,
displays in the merge request widget area. Consecutive merge requests will have data for the Browser Performance report widget won't show. It must have run at least
comparison, and the Performance report will be shown properly. once on the target branch (`master`, for example), before it will display in a
merge request targeting that branch.
![Performance Widget](img/browser_performance_testing.png) ![Browser Performance Widget](img/browser_performance_testing.png)
## Configuring Browser Performance Testing ## Configuring Browser Performance Testing
...@@ -64,21 +61,7 @@ using Docker-in-Docker. ...@@ -64,21 +61,7 @@ using Docker-in-Docker.
1. First, set up GitLab Runner with a 1. First, set up GitLab Runner with a
[Docker-in-Docker build](../../../ci/docker/using_docker_build.md#use-docker-in-docker-workflow-with-docker-executor). [Docker-in-Docker build](../../../ci/docker/using_docker_build.md#use-docker-in-docker-workflow-with-docker-executor).
1. After configuring the Runner, add a new job to `.gitlab-ci.yml` that generates 1. Configure the default Browser Performance Testing CI job as follows in your `.gitlab-ci.yml` file:
the expected report.
1. Define the `performance` job according to your version of GitLab:
- For GitLab 12.4 and later - [include](../../../ci/yaml/README.md#includetemplate) the
[`Browser-Performance.gitlab-ci.yml` template](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Verify/Browser-Performance.gitlab-ci.yml) provided as a part of your GitLab installation.
- For GitLab versions earlier than 12.4 - Copy and use the job as defined in the
[`Browser-Performance.gitlab-ci.yml` template](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Verify/Browser-Performance.gitlab-ci.yml).
CAUTION: **Caution:**
The job definition provided by the template does not support Kubernetes yet.
For a complete example of a more complex setup that works in Kubernetes, see
[`Browser-Performance-Testing.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Browser-Performance-Testing.gitlab-ci.yml).
1. Add the following to your `.gitlab-ci.yml` file:
```yaml ```yaml
include: include:
...@@ -89,24 +72,32 @@ using Docker-in-Docker. ...@@ -89,24 +72,32 @@ using Docker-in-Docker.
URL: https://example.com URL: https://example.com
``` ```
CAUTION: **Caution:** NOTE: **Note:**
The job definition provided by the template is supported in GitLab 11.5 and later versions. For versions before 12.4, see the information for [older GitLab versions](#gitlab-versions-123-and-older).
It also requires GitLab Runner 11.5 or later. For earlier versions, use the If you are using a Kubernetes cluster, use [`template: Jobs/Browser-Performance-Testing.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Browser-Performance-Testing.gitlab-ci.yml)
[previous job definitions](#previous-job-definitions). instead.
The above example creates a `performance` job in your CI/CD pipeline and runs The above example creates a `performance` job in your CI/CD pipeline and runs
sitespeed.io against the webpage you defined in `URL` to gather key metrics. sitespeed.io against the webpage you defined in `URL` to gather key metrics.
The [GitLab plugin for sitespeed.io](https://gitlab.com/gitlab-org/gl-performance)
is downloaded to save the report as a [Performance report artifact](../../../ci/pipelines/job_artifacts.md#artifactsreportsperformance-premium)
that you can later download and analyze. Due to implementation limitations, we always
take the latest Performance artifact available.
The full HTML sitespeed.io report is saved as an artifact, and if The example uses a CI/CD template that is included in all GitLab installations since
[GitLab Pages](../pages/index.md) is enabled, it can be viewed directly in your browser. 12.4, but it will not work with Kubernetes clusters. If you are using GitLab 12.3
or older, you must [add the configuration manually](#gitlab-versions-123-and-older)
The template uses the [GitLab plugin for sitespeed.io](https://gitlab.com/gitlab-org/gl-performance),
and it saves the full HTML sitespeed.io report as a [Browser Performance report artifact](../../../ci/pipelines/job_artifacts.md#artifactsreportsperformance-premium)
that you can later download and analyze. This implementation always takes the latest
Browser Performance artifact available. If [GitLab Pages](../pages/index.md) is enabled,
you can view the report directly in your browser.
You can also customize the jobs with environment variables:
- `SITESPEED_IMAGE`: Configure the Docker image to use for the job (default `sitespeedio/sitespeed.io`), but not the image version.
- `SITESPEED_VERSION`: Configure the version of the Docker image to use for the job (default `13.3.0`).
- `SITESPEED_OPTIONS`: Configure any additional sitespeed.io options as required (default `nil`). Refer to the [sitespeed.io documentation](https://www.sitespeed.io/documentation/sitespeed.io/configuration/) for more details.
You can also customize options by setting the `SITESPEED_OPTIONS` variable.
For example, you can override the number of runs sitespeed.io For example, you can override the number of runs sitespeed.io
makes on the given URL: makes on the given URL, and change the version:
```yaml ```yaml
include: include:
...@@ -114,18 +105,11 @@ include: ...@@ -114,18 +105,11 @@ include:
performance: performance:
variables: variables:
URL: https://example.com URL: https://www.sitespeed.io/
SITESPEED_VERSION: 13.2.0
SITESPEED_OPTIONS: -n 5 SITESPEED_OPTIONS: -n 5
``` ```
For further customization options for sitespeed.io, including the ability to provide a
list of URLs to test, please see the
[Sitespeed.io Configuration](https://www.sitespeed.io/documentation/sitespeed.io/configuration/)
documentation.
TIP: **Tip:**
Key metrics are automatically extracted and shown in the merge request widget.
### Configuring degradation threshold ### Configuring degradation threshold
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/27599) in GitLab 13.0. > [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/27599) in GitLab 13.0.
...@@ -152,15 +136,12 @@ The above CI YAML configuration is great for testing against static environments ...@@ -152,15 +136,12 @@ The above CI YAML configuration is great for testing against static environments
be extended for dynamic environments, but a few extra steps are required: be extended for dynamic environments, but a few extra steps are required:
1. The `performance` job should run after the dynamic environment has started. 1. The `performance` job should run after the dynamic environment has started.
1. In the `review` job, persist the hostname and upload it as an artifact so 1. In the `review` job:
it's available to the `performance` job. The same can be done for static 1. Generate a URL list file with the dynamic URL.
environments like staging and production to unify the code path. You can save it 1. Save the file as an artifact, for example with `echo $CI_ENVIRONMENT_URL > environment_url.txt`
as an artifact with `echo $CI_ENVIRONMENT_URL > environment_url.txt` in your job's `script`.
in your job's `script`. 1. Pass the list as the URL environment variable (which can be a URL or a file containing URLs)
1. In the `performance` job, read the previous artifact into an environment to the `performance` job.
variable. In this case, use `$URL` because the sitespeed.io command
uses it for the URL parameter. Because Review App URLs are dynamic, define
the `URL` variable through `before_script` instead of `variables`.
1. You can now run the sitespeed.io container against the desired hostname and 1. You can now run the sitespeed.io container against the desired hostname and
paths. paths.
...@@ -193,20 +174,21 @@ review: ...@@ -193,20 +174,21 @@ review:
performance: performance:
dependencies: dependencies:
- review - review
before_script: variables:
- export URL=$(cat environment_url.txt) URL: environment_url.txt
``` ```
### Previous job definitions ### GitLab versions 12.3 and older
CAUTION: **Caution:** Browser Performance Testing has gone through several changes since it's introduction.
Before GitLab 11.5, the Performance job and artifact had to be named specifically In this section we'll detail these changes and how you can run the test based on your
to automatically extract report data and show it in the merge request widget. GitLab version:
While these old job definitions are still maintained, they have been deprecated
and may be removed in next major release, GitLab 12.0.
GitLab recommends you update your current `.gitlab-ci.yml` configuration to reflect that change.
For GitLab 11.4 and earlier, the job should look like: - In GitLab 12.4 [a job template was made available](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Verify/Browser-Performance.gitlab-ci.yml).
- In 13.2 the feature was renamed from `Performance` to `Browser Performance` with
additional template variables. The job name in the template is still `performance`
for compatibility reasons, but may be renamed to match in a future iteration.
- For 11.5 to 12.3 no template is available and the job has to be defined manually as follows:
```yaml ```yaml
performance: performance:
...@@ -214,28 +196,45 @@ performance: ...@@ -214,28 +196,45 @@ performance:
image: docker:git image: docker:git
variables: variables:
URL: https://example.com URL: https://example.com
SITESPEED_VERSION: 13.3.0
SITESPEED_OPTIONS: ''
services: services:
- docker:stable-dind - docker:stable-dind
script: script:
- mkdir gitlab-exporter - mkdir gitlab-exporter
- wget -O ./gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/master/index.js - wget -O ./gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/master/index.js
- mkdir sitespeed-results - mkdir sitespeed-results
- docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.3.1 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results $URL - docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:$SITESPEED_VERSION --plugins.add ./gitlab-exporter --outputFolder sitespeed-results $URL $SITESPEED_OPTIONS
- mv sitespeed-results/data/performance.json performance.json - mv sitespeed-results/data/performance.json performance.json
artifacts: artifacts:
paths: paths:
- performance.json - performance.json
- sitespeed-results/ - sitespeed-results/
reports:
performance: performance.json
``` ```
<!-- ## Troubleshooting - For 11.4 and earlier the job should be defined as follows:
Include any troubleshooting steps that you can foresee. If you know beforehand what issues ```yaml
one might have when setting this up, or when something is changed, or on upgrading, it's performance:
important to describe those, too. Think of things that may go wrong and include them here. stage: performance
This is important to minimize requests for support, and to avoid doc comments with image: docker:git
questions that you know someone might ask. variables:
URL: https://example.com
services:
- docker:stable-dind
script:
- mkdir gitlab-exporter
- wget -O ./gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/master/index.js
- mkdir sitespeed-results
- docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.3.1 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results $URL
- mv sitespeed-results/data/performance.json performance.json
artifacts:
paths:
- performance.json
- sitespeed-results/
```
Each scenario can be a third-level heading, e.g. `### Getting error message X`. Upgrading to the latest version and using the templates is recommended, to ensure
If you have none to add when creating a doc, leave this section in place you receive the latest updates, including updates to the sitespeed.io versions.
but commented out to help encourage others to add to it in the future. -->
...@@ -14,7 +14,6 @@ import MrWidgetApprovals from './components/approvals/approvals.vue'; ...@@ -14,7 +14,6 @@ import MrWidgetApprovals from './components/approvals/approvals.vue';
import MrWidgetGeoSecondaryNode from './components/states/mr_widget_secondary_geo_node.vue'; import MrWidgetGeoSecondaryNode from './components/states/mr_widget_secondary_geo_node.vue';
import MergeTrainHelperText from './components/merge_train_helper_text.vue'; import MergeTrainHelperText from './components/merge_train_helper_text.vue';
import { MTWPS_MERGE_STRATEGY } from '~/vue_merge_request_widget/constants'; import { MTWPS_MERGE_STRATEGY } from '~/vue_merge_request_widget/constants';
import { TOTAL_SCORE_METRIC_NAME } from 'ee/vue_merge_request_widget/stores/constants';
export default { export default {
components: { components: {
...@@ -33,9 +32,9 @@ export default { ...@@ -33,9 +32,9 @@ export default {
data() { data() {
return { return {
isLoadingCodequality: false, isLoadingCodequality: false,
isLoadingPerformance: false, isLoadingBrowserPerformance: false,
loadingCodequalityFailed: false, loadingCodequalityFailed: false,
loadingPerformanceFailed: false, loadingBrowserPerformanceFailed: false,
loadingLicenseReportFailed: false, loadingLicenseReportFailed: false,
}; };
}, },
...@@ -59,36 +58,36 @@ export default { ...@@ -59,36 +58,36 @@ export default {
this.mr.codeclimateMetrics.resolvedIssues.length > 0)) this.mr.codeclimateMetrics.resolvedIssues.length > 0))
); );
}, },
hasPerformanceMetrics() { hasBrowserPerformanceMetrics() {
return ( return (
this.mr.performanceMetrics && this.mr.browserPerformanceMetrics?.degraded?.length > 0 ||
((this.mr.performanceMetrics.degraded && this.mr.performanceMetrics.degraded.length > 0) || this.mr.browserPerformanceMetrics?.improved?.length > 0 ||
(this.mr.performanceMetrics.improved && this.mr.performanceMetrics.improved.length > 0)) this.mr.browserPerformanceMetrics?.same?.length > 0
); );
}, },
hasPerformancePaths() { hasBrowserPerformancePaths() {
const { performance } = this.mr || {}; const browserPerformance = this.mr?.browserPerformance || {};
return Boolean(performance?.head_path && performance?.base_path); return Boolean(browserPerformance?.head_path && browserPerformance?.base_path);
}, },
degradedTotalScore() { degradedBrowserPerformanceTotalScore() {
return this.mr?.performanceMetrics?.degraded.find( return this.mr?.browserPerformanceMetrics?.degraded.find(
metric => metric.name === TOTAL_SCORE_METRIC_NAME, metric => metric.name === __('Total Score'),
); );
}, },
hasPerformanceDegradation() { hasBrowserPerformanceDegradation() {
const threshold = this.mr?.performance?.degradation_threshold || 0; const threshold = this.mr?.browserPerformance?.degradation_threshold || 0;
if (!threshold) { if (!threshold) {
return true; return true;
} }
const totalScoreDelta = this.degradedTotalScore?.delta || 0; const totalScoreDelta = this.degradedBrowserPerformanceTotalScore?.delta || 0;
return threshold + totalScoreDelta <= 0; return threshold + totalScoreDelta <= 0;
}, },
shouldRenderPerformance() { shouldRenderBrowserPerformance() {
return this.hasPerformancePaths && this.hasPerformanceDegradation; return this.hasBrowserPerformancePaths && this.hasBrowserPerformanceDegradation;
}, },
shouldRenderSecurityReport() { shouldRenderSecurityReport() {
const { enabledReports } = this.mr; const { enabledReports } = this.mr;
...@@ -139,37 +138,40 @@ export default { ...@@ -139,37 +138,40 @@ export default {
return {}; return {};
}, },
performanceText() { browserPerformanceText() {
const { improved, degraded } = this.mr.performanceMetrics; const { improved, degraded, same } = this.mr.browserPerformanceMetrics;
const text = []; const text = [];
const reportNumbers = [];
if (!improved.length && !degraded.length) {
text.push(s__('ciReport|No changes to performance metrics')); if (improved.length || degraded.length || same.length) {
} else if (improved.length || degraded.length) { text.push(s__('ciReport|Browser performance test metrics: '));
text.push(s__('ciReport|Performance metrics'));
if (degraded.length > 0)
if (improved.length) { reportNumbers.push(
text.push(n__(' improved on %d point', ' improved on %d points', improved.length)); sprintf(s__('ciReport|%{degradedNum} degraded'), { degradedNum: degraded.length }),
} );
if (same.length > 0)
if (improved.length > 0 && degraded.length > 0) { reportNumbers.push(sprintf(s__('ciReport|%{sameNum} same'), { sameNum: same.length }));
text.push(__(' and')); if (improved.length > 0)
} reportNumbers.push(
sprintf(s__('ciReport|%{improvedNum} improved'), { improvedNum: improved.length }),
if (degraded.length) { );
text.push(n__(' degraded on %d point', ' degraded on %d points', degraded.length)); } else {
} text.push(s__('ciReport|Browser performance test metrics: No changes'));
} }
return text.join(''); return [...text, ...reportNumbers.join(', ')].join('');
}, },
codequalityStatus() { codequalityStatus() {
return this.checkReportStatus(this.isLoadingCodequality, this.loadingCodequalityFailed); return this.checkReportStatus(this.isLoadingCodequality, this.loadingCodequalityFailed);
}, },
performanceStatus() { browserPerformanceStatus() {
return this.checkReportStatus(this.isLoadingPerformance, this.loadingPerformanceFailed); return this.checkReportStatus(
this.isLoadingBrowserPerformance,
this.loadingBrowserPerformanceFailed,
);
}, },
shouldRenderMergeTrainHelperText() { shouldRenderMergeTrainHelperText() {
...@@ -191,9 +193,9 @@ export default { ...@@ -191,9 +193,9 @@ export default {
this.fetchCodeQuality(); this.fetchCodeQuality();
} }
}, },
hasPerformancePaths(newVal) { hasBrowserPerformancePaths(newVal) {
if (newVal) { if (newVal) {
this.fetchPerformance(); this.fetchBrowserPerformance();
} }
}, },
}, },
...@@ -241,19 +243,20 @@ export default { ...@@ -241,19 +243,20 @@ export default {
}); });
}, },
fetchPerformance() { fetchBrowserPerformance() {
const { head_path, base_path } = this.mr.performance; const { head_path, base_path } = this.mr.browserPerformance;
this.isLoadingPerformance = true; this.isLoadingBrowserPerformance = true;
Promise.all([this.service.fetchReport(head_path), this.service.fetchReport(base_path)]) Promise.all([this.service.fetchReport(head_path), this.service.fetchReport(base_path)])
.then(values => { .then(values => {
this.mr.comparePerformanceMetrics(values[0], values[1]); this.mr.compareBrowserPerformanceMetrics(values[0], values[1]);
this.isLoadingPerformance = false;
}) })
.catch(() => { .catch(() => {
this.isLoadingPerformance = false; this.loadingBrowserPerformanceFailed = true;
this.loadingPerformanceFailed = true; })
.finally(() => {
this.isLoadingBrowserPerformance = false;
}); });
}, },
...@@ -308,16 +311,17 @@ export default { ...@@ -308,16 +311,17 @@ export default {
class="js-codequality-widget mr-widget-border-top mr-report" class="js-codequality-widget mr-widget-border-top mr-report"
/> />
<report-section <report-section
v-if="shouldRenderPerformance" v-if="shouldRenderBrowserPerformance"
:status="performanceStatus" :status="browserPerformanceStatus"
:loading-text="translateText('performance').loading" :loading-text="translateText('browser-performance').loading"
:error-text="translateText('performance').error" :error-text="translateText('browser-performance').error"
:success-text="performanceText" :success-text="browserPerformanceText"
:unresolved-issues="mr.performanceMetrics.degraded" :unresolved-issues="mr.browserPerformanceMetrics.degraded"
:resolved-issues="mr.performanceMetrics.improved" :resolved-issues="mr.browserPerformanceMetrics.improved"
:has-issues="hasPerformanceMetrics" :neutral-issues="mr.browserPerformanceMetrics.same"
:has-issues="hasBrowserPerformanceMetrics"
:component="$options.componentNames.PerformanceIssueBody" :component="$options.componentNames.PerformanceIssueBody"
class="js-performance-widget mr-widget-border-top mr-report" class="js-browser-performance-widget mr-widget-border-top mr-report"
/> />
<grouped-metrics-reports-app <grouped-metrics-reports-app
v-if="mr.metricsReportsPath" v-if="mr.metricsReportsPath"
......
/* eslint-disable import/prefer-default-export */
// This is the name of Sitespeed's Overall Score metric in the performance report
export const TOTAL_SCORE_METRIC_NAME = 'Total Score';
...@@ -29,7 +29,7 @@ export default class MergeRequestStore extends CEMergeRequestStore { ...@@ -29,7 +29,7 @@ export default class MergeRequestStore extends CEMergeRequestStore {
this.appUrl = gon && gon.gitlab_url; this.appUrl = gon && gon.gitlab_url;
this.initCodeclimate(data); this.initCodeclimate(data);
this.initPerformanceReport(data); this.initBrowserPerformanceReport(data);
this.licenseScanning = data.license_scanning; this.licenseScanning = data.license_scanning;
this.metricsReportsPath = data.metrics_reports_path; this.metricsReportsPath = data.metrics_reports_path;
...@@ -85,11 +85,12 @@ export default class MergeRequestStore extends CEMergeRequestStore { ...@@ -85,11 +85,12 @@ export default class MergeRequestStore extends CEMergeRequestStore {
}; };
} }
initPerformanceReport(data) { initBrowserPerformanceReport(data) {
this.performance = data.performance; this.browserPerformance = data.browser_performance;
this.performanceMetrics = { this.browserPerformanceMetrics = {
improved: [], improved: [],
degraded: [], degraded: [],
same: [],
}; };
} }
...@@ -119,11 +120,12 @@ export default class MergeRequestStore extends CEMergeRequestStore { ...@@ -119,11 +120,12 @@ export default class MergeRequestStore extends CEMergeRequestStore {
); );
} }
comparePerformanceMetrics(headMetrics, baseMetrics) { compareBrowserPerformanceMetrics(headMetrics, baseMetrics) {
const headMetricsIndexed = MergeRequestStore.normalizePerformanceMetrics(headMetrics); const headMetricsIndexed = MergeRequestStore.normalizeBrowserPerformanceMetrics(headMetrics);
const baseMetricsIndexed = MergeRequestStore.normalizePerformanceMetrics(baseMetrics); const baseMetricsIndexed = MergeRequestStore.normalizeBrowserPerformanceMetrics(baseMetrics);
const improved = []; const improved = [];
const degraded = []; const degraded = [];
const same = [];
Object.keys(headMetricsIndexed).forEach(subject => { Object.keys(headMetricsIndexed).forEach(subject => {
const subjectMetrics = headMetricsIndexed[subject]; const subjectMetrics = headMetricsIndexed[subject];
...@@ -150,18 +152,20 @@ export default class MergeRequestStore extends CEMergeRequestStore { ...@@ -150,18 +152,20 @@ export default class MergeRequestStore extends CEMergeRequestStore {
} else { } else {
degraded.push(metricData); degraded.push(metricData);
} }
} else {
same.push(metricData);
} }
} }
}); });
}); });
this.performanceMetrics = { improved, degraded }; this.browserPerformanceMetrics = { improved, degraded, same };
} }
// normalize performance metrics by indexing on performance subject and metric name // normalize browser performance metrics by indexing on performance subject and metric name
static normalizePerformanceMetrics(performanceData) { static normalizeBrowserPerformanceMetrics(browserPerformanceData) {
const indexedSubjects = {}; const indexedSubjects = {};
performanceData.forEach(({ subject, metrics }) => { browserPerformanceData.forEach(({ subject, metrics }) => {
const indexedMetrics = {}; const indexedMetrics = {};
metrics.forEach(({ name, ...data }) => { metrics.forEach(({ name, ...data }) => {
indexedMetrics[name] = data; indexedMetrics[name] = data;
......
...@@ -21,6 +21,7 @@ module EE ...@@ -21,6 +21,7 @@ module EE
DAST_REPORT_TYPES = %w[dast].freeze DAST_REPORT_TYPES = %w[dast].freeze
REQUIREMENTS_REPORT_FILE_TYPES = %w[requirements].freeze REQUIREMENTS_REPORT_FILE_TYPES = %w[requirements].freeze
COVERAGE_FUZZING_REPORT_TYPES = %w[coverage_fuzzing].freeze COVERAGE_FUZZING_REPORT_TYPES = %w[coverage_fuzzing].freeze
BROWSER_PERFORMANCE_REPORT_FILE_TYPES = %w[browser_performance performance].freeze
scope :project_id_in, ->(ids) { where(project_id: ids) } scope :project_id_in, ->(ids) { where(project_id: ids) }
scope :with_files_stored_remotely, -> { where(file_store: ::JobArtifactUploader::Store::REMOTE) } scope :with_files_stored_remotely, -> { where(file_store: ::JobArtifactUploader::Store::REMOTE) }
...@@ -64,6 +65,7 @@ module EE ...@@ -64,6 +65,7 @@ module EE
def self.associated_file_types_for(file_type) def self.associated_file_types_for(file_type)
return unless file_types.include?(file_type) return unless file_types.include?(file_type)
return LICENSE_SCANNING_REPORT_FILE_TYPES if LICENSE_SCANNING_REPORT_FILE_TYPES.include?(file_type) return LICENSE_SCANNING_REPORT_FILE_TYPES if LICENSE_SCANNING_REPORT_FILE_TYPES.include?(file_type)
return BROWSER_PERFORMANCE_REPORT_FILE_TYPES if BROWSER_PERFORMANCE_REPORT_FILE_TYPES.include?(file_type)
[file_type] [file_type]
end end
......
...@@ -45,6 +45,7 @@ module EE ...@@ -45,6 +45,7 @@ module EE
container_scanning: %i[container_scanning], container_scanning: %i[container_scanning],
dast: %i[dast], dast: %i[dast],
performance: %i[merge_request_performance_metrics], performance: %i[merge_request_performance_metrics],
browser_performance: %i[merge_request_performance_metrics],
license_management: %i[license_scanning], license_management: %i[license_scanning],
license_scanning: %i[license_scanning], license_scanning: %i[license_scanning],
metrics: %i[metrics_reports], metrics: %i[metrics_reports],
......
...@@ -34,13 +34,13 @@ module EE ...@@ -34,13 +34,13 @@ module EE
download_project_job_artifacts_path( download_project_job_artifacts_path(
job_artifact.project, job_artifact.project,
job_artifact.job, job_artifact.job,
file_type: file_type, file_type: job_artifact.file_type,
proxy: true) proxy: true)
end end
end end
def degradation_threshold def degradation_threshold(file_type)
if (job_artifact = batch_lookup_report_artifact_for_file_type(:performance)) && if (job_artifact = batch_lookup_report_artifact_for_file_type(file_type)) &&
can?(current_user, :read_build, job_artifact.job) can?(current_user, :read_build, job_artifact.job)
job_artifact.job.degradation_threshold job_artifact.job.degradation_threshold
end end
......
...@@ -26,18 +26,18 @@ module EE ...@@ -26,18 +26,18 @@ module EE
end end
end end
expose :performance, if: -> (mr, _) { head_pipeline_downloadable_path_for_report_type(:performance) } do expose :browser_performance, if: -> (mr, _) { head_pipeline_downloadable_path_for_report_type(:browser_performance) } do
expose :degradation_threshold do |merge_request| expose :degradation_threshold do |merge_request|
merge_request.head_pipeline&.present(current_user: current_user) merge_request.head_pipeline&.present(current_user: current_user)
&.degradation_threshold &.degradation_threshold(:browser_performance)
end end
expose :head_path do |merge_request| expose :head_path do |merge_request|
head_pipeline_downloadable_path_for_report_type(:performance) head_pipeline_downloadable_path_for_report_type(:browser_performance)
end end
expose :base_path do |merge_request| expose :base_path do |merge_request|
base_pipeline_downloadable_path_for_report_type(:performance) base_pipeline_downloadable_path_for_report_type(:browser_performance)
end end
end end
......
---
title: Renamed Browser Performance Testing feature to be clearer, CI report now also
shows unchanged values
merge_request: 34634
author:
type: changed
...@@ -6,7 +6,7 @@ FactoryBot.define do ...@@ -6,7 +6,7 @@ FactoryBot.define do
failure_reason { Ci::Build.failure_reasons[:protected_environment_failure] } failure_reason { Ci::Build.failure_reasons[:protected_environment_failure] }
end end
%i[codequality container_scanning dast dependency_scanning license_management license_scanning performance sast secret_detection].each do |report_type| %i[codequality container_scanning dast dependency_scanning license_management license_scanning performance browser_performance sast secret_detection].each do |report_type|
trait "legacy_#{report_type}".to_sym do trait "legacy_#{report_type}".to_sym do
success success
artifacts artifacts
......
...@@ -221,6 +221,16 @@ FactoryBot.define do ...@@ -221,6 +221,16 @@ FactoryBot.define do
end end
end end
trait :browser_performance do
file_format { :raw }
file_type { :browser_performance }
after(:build) do |artifact, _|
artifact.file = fixture_file_upload(
Rails.root.join('spec/fixtures/trace/sample_trace'), 'text/plain')
end
end
trait :dependency_scanning do trait :dependency_scanning do
file_format { :raw } file_format { :raw }
file_type { :dependency_scanning } file_type { :dependency_scanning }
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
FactoryBot.define do FactoryBot.define do
factory :ee_ci_pipeline, class: 'Ci::Pipeline', parent: :ci_pipeline do factory :ee_ci_pipeline, class: 'Ci::Pipeline', parent: :ci_pipeline do
%i[container_scanning dast dependency_list dependency_scanning license_management license_scanning sast secret_detection coverage_fuzzing].each do |report_type| %i[browser_performance codequality container_scanning coverage_fuzzing dast dependency_list dependency_scanning license_management license_scanning sast secret_detection].each do |report_type|
trait "with_#{report_type}_report".to_sym do trait "with_#{report_type}_report".to_sym do
status { :success } status { :success }
......
...@@ -11,8 +11,8 @@ import { trimText } from 'helpers/text_helper'; ...@@ -11,8 +11,8 @@ import { trimText } from 'helpers/text_helper';
import mockData, { import mockData, {
baseIssues, baseIssues,
headIssues, headIssues,
basePerformance, baseBrowserPerformance,
headPerformance, headBrowserPerformance,
parsedBaseIssues, parsedBaseIssues,
parsedHeadIssues, parsedHeadIssues,
} from './mock_data'; } from './mock_data';
...@@ -40,7 +40,7 @@ describe('ee merge request widget options', () => { ...@@ -40,7 +40,7 @@ describe('ee merge request widget options', () => {
let mock; let mock;
let Component; let Component;
const DEFAULT_PERFORMANCE = { const DEFAULT_BROWSER_PERFORMANCE = {
head_path: 'head.json', head_path: 'head.json',
base_path: 'base.json', base_path: 'base.json',
}; };
...@@ -70,13 +70,13 @@ describe('ee merge request widget options', () => { ...@@ -70,13 +70,13 @@ describe('ee merge request widget options', () => {
}); });
}); });
const findPerformanceWidget = () => vm.$el.querySelector('.js-performance-widget'); const findBrowserPerformanceWidget = () => vm.$el.querySelector('.js-browser-performance-widget');
const findSecurityWidget = () => vm.$el.querySelector('.js-security-widget'); const findSecurityWidget = () => vm.$el.querySelector('.js-security-widget');
const setPerformance = (data = {}) => { const setBrowserPerformance = (data = {}) => {
const performance = { ...DEFAULT_PERFORMANCE, ...data }; const browserPerformance = { ...DEFAULT_BROWSER_PERFORMANCE, ...data };
gl.mrWidgetData.performance = performance; gl.mrWidgetData.browserPerformance = browserPerformance;
vm.mr.performance = performance; vm.mr.browserPerformance = browserPerformance;
}; };
const VULNERABILITY_FEEDBACK_ENDPOINT = 'vulnerability_feedback_path'; const VULNERABILITY_FEEDBACK_ENDPOINT = 'vulnerability_feedback_path';
...@@ -485,25 +485,25 @@ describe('ee merge request widget options', () => { ...@@ -485,25 +485,25 @@ describe('ee merge request widget options', () => {
}); });
}); });
describe('performance', () => { describe('browser_performance', () => {
beforeEach(() => { beforeEach(() => {
gl.mrWidgetData = { gl.mrWidgetData = {
...mockData, ...mockData,
performance: {}, browserPerformance: {},
}; };
}); });
describe('when it is loading', () => { describe('when it is loading', () => {
it('should render loading indicator', done => { it('should render loading indicator', done => {
mock.onGet('head.json').reply(200, headPerformance); mock.onGet('head.json').reply(200, headBrowserPerformance);
mock.onGet('base.json').reply(200, basePerformance); mock.onGet('base.json').reply(200, baseBrowserPerformance);
vm = mountComponent(Component, { mrData: gl.mrWidgetData }); vm = mountComponent(Component, { mrData: gl.mrWidgetData });
vm.mr.performance = { ...DEFAULT_PERFORMANCE }; vm.mr.browserPerformance = { ...DEFAULT_BROWSER_PERFORMANCE };
vm.$nextTick(() => { vm.$nextTick(() => {
expect(trimText(findPerformanceWidget().textContent)).toContain( expect(trimText(findBrowserPerformanceWidget().textContent)).toContain(
'Loading performance report', 'Loading browser-performance report',
); );
done(); done();
...@@ -513,21 +513,23 @@ describe('ee merge request widget options', () => { ...@@ -513,21 +513,23 @@ describe('ee merge request widget options', () => {
describe('with successful request', () => { describe('with successful request', () => {
beforeEach(() => { beforeEach(() => {
mock.onGet(DEFAULT_PERFORMANCE.head_path).reply(200, headPerformance); mock.onGet(DEFAULT_BROWSER_PERFORMANCE.head_path).reply(200, headBrowserPerformance);
mock.onGet(DEFAULT_PERFORMANCE.base_path).reply(200, basePerformance); mock.onGet(DEFAULT_BROWSER_PERFORMANCE.base_path).reply(200, baseBrowserPerformance);
vm = mountComponent(Component, { mrData: gl.mrWidgetData }); vm = mountComponent(Component, { mrData: gl.mrWidgetData });
}); });
describe('default', () => { describe('default', () => {
beforeEach(() => { beforeEach(() => {
setPerformance(); setBrowserPerformance();
}); });
it('should render provided data', done => { it('should render provided data', done => {
setImmediate(() => { setImmediate(() => {
expect( expect(
trimText(vm.$el.querySelector('.js-performance-widget .js-code-text').textContent), trimText(
).toEqual('Performance metrics improved on 2 points and degraded on 1 point'); vm.$el.querySelector('.js-browser-performance-widget .js-code-text').textContent,
),
).toEqual('Browser performance test metrics: 2 degraded, 1 same, 1 improved');
done(); done();
}); });
}); });
...@@ -535,14 +537,16 @@ describe('ee merge request widget options', () => { ...@@ -535,14 +537,16 @@ describe('ee merge request widget options', () => {
describe('text connector', () => { describe('text connector', () => {
it('should only render information about fixed issues', done => { it('should only render information about fixed issues', done => {
setImmediate(() => { setImmediate(() => {
vm.mr.performanceMetrics.degraded = []; vm.mr.browserPerformanceMetrics.degraded = [];
vm.mr.browserPerformanceMetrics.same = [];
Vue.nextTick(() => { Vue.nextTick(() => {
expect( expect(
trimText( trimText(
vm.$el.querySelector('.js-performance-widget .js-code-text').textContent, vm.$el.querySelector('.js-browser-performance-widget .js-code-text')
.textContent,
), ),
).toEqual('Performance metrics improved on 2 points'); ).toEqual('Browser performance test metrics: 1 improved');
done(); done();
}); });
}); });
...@@ -550,14 +554,16 @@ describe('ee merge request widget options', () => { ...@@ -550,14 +554,16 @@ describe('ee merge request widget options', () => {
it('should only render information about added issues', done => { it('should only render information about added issues', done => {
setImmediate(() => { setImmediate(() => {
vm.mr.performanceMetrics.improved = []; vm.mr.browserPerformanceMetrics.improved = [];
vm.mr.browserPerformanceMetrics.same = [];
Vue.nextTick(() => { Vue.nextTick(() => {
expect( expect(
trimText( trimText(
vm.$el.querySelector('.js-performance-widget .js-code-text').textContent, vm.$el.querySelector('.js-browser-performance-widget .js-code-text')
.textContent,
), ),
).toEqual('Performance metrics degraded on 1 point'); ).toEqual('Browser performance test metrics: 2 degraded');
done(); done();
}); });
}); });
...@@ -573,18 +579,18 @@ describe('ee merge request widget options', () => { ...@@ -573,18 +579,18 @@ describe('ee merge request widget options', () => {
'with degradation_threshold = $degradation_threshold', 'with degradation_threshold = $degradation_threshold',
({ degradation_threshold, shouldExist }) => { ({ degradation_threshold, shouldExist }) => {
beforeEach(() => { beforeEach(() => {
setPerformance({ degradation_threshold }); setBrowserPerformance({ degradation_threshold });
return waitForPromises(); return waitForPromises();
}); });
if (shouldExist) { if (shouldExist) {
it('should render widget when total score degradation is above threshold', () => { it('should render widget when total score degradation is above threshold', () => {
expect(findPerformanceWidget()).toExist(); expect(findBrowserPerformanceWidget()).toExist();
}); });
} else { } else {
it('should not render widget when total score degradation is below threshold', () => { it('should not render widget when total score degradation is below threshold', () => {
expect(findPerformanceWidget()).not.toExist(); expect(findBrowserPerformanceWidget()).not.toExist();
}); });
} }
}, },
...@@ -593,12 +599,12 @@ describe('ee merge request widget options', () => { ...@@ -593,12 +599,12 @@ describe('ee merge request widget options', () => {
describe('with empty successful request', () => { describe('with empty successful request', () => {
beforeEach(done => { beforeEach(done => {
mock.onGet(DEFAULT_PERFORMANCE.head_path).reply(200, []); mock.onGet(DEFAULT_BROWSER_PERFORMANCE.head_path).reply(200, []);
mock.onGet(DEFAULT_PERFORMANCE.base_path).reply(200, []); mock.onGet(DEFAULT_BROWSER_PERFORMANCE.base_path).reply(200, []);
vm = mountComponent(Component, { mrData: gl.mrWidgetData }); vm = mountComponent(Component, { mrData: gl.mrWidgetData });
gl.mrWidgetData.performance = { ...DEFAULT_PERFORMANCE }; gl.mrWidgetData.browserPerformance = { ...DEFAULT_BROWSER_PERFORMANCE };
vm.mr.performance = gl.mrWidgetData.performance; vm.mr.browserPerformance = gl.mrWidgetData.browserPerformance;
// wait for network request from component watch update method // wait for network request from component watch update method
setImmediate(done); setImmediate(done);
...@@ -606,38 +612,44 @@ describe('ee merge request widget options', () => { ...@@ -606,38 +612,44 @@ describe('ee merge request widget options', () => {
it('should render provided data', () => { it('should render provided data', () => {
expect( expect(
trimText(vm.$el.querySelector('.js-performance-widget .js-code-text').textContent), trimText(
).toEqual('No changes to performance metrics'); vm.$el.querySelector('.js-browser-performance-widget .js-code-text').textContent,
),
).toEqual('Browser performance test metrics: No changes');
}); });
it('does not show Expand button', () => { it('does not show Expand button', () => {
const expandButton = vm.$el.querySelector('.js-performance-widget .js-collapse-btn'); const expandButton = vm.$el.querySelector(
'.js-browser-performance-widget .js-collapse-btn',
);
expect(expandButton).toBeNull(); expect(expandButton).toBeNull();
}); });
it('shows success icon', () => { it('shows success icon', () => {
expect( expect(
vm.$el.querySelector('.js-performance-widget .js-ci-status-icon-success'), vm.$el.querySelector('.js-browser-performance-widget .js-ci-status-icon-success'),
).not.toBeNull(); ).not.toBeNull();
}); });
}); });
describe('with failed request', () => { describe('with failed request', () => {
beforeEach(() => { beforeEach(() => {
mock.onGet(DEFAULT_PERFORMANCE.head_path).reply(500, []); mock.onGet(DEFAULT_BROWSER_PERFORMANCE.head_path).reply(500, []);
mock.onGet(DEFAULT_PERFORMANCE.base_path).reply(500, []); mock.onGet(DEFAULT_BROWSER_PERFORMANCE.base_path).reply(500, []);
vm = mountComponent(Component, { mrData: gl.mrWidgetData }); vm = mountComponent(Component, { mrData: gl.mrWidgetData });
gl.mrWidgetData.performance = { ...DEFAULT_PERFORMANCE }; gl.mrWidgetData.browserPerformance = { ...DEFAULT_BROWSER_PERFORMANCE };
vm.mr.performance = gl.mrWidgetData.performance; vm.mr.browserPerformance = gl.mrWidgetData.browserPerformance;
}); });
it('should render error indicator', done => { it('should render error indicator', done => {
setImmediate(() => { setImmediate(() => {
expect( expect(
trimText(vm.$el.querySelector('.js-performance-widget .js-code-text').textContent), trimText(
).toContain('Failed to load performance report'); vm.$el.querySelector('.js-browser-performance-widget .js-code-text').textContent,
),
).toContain('Failed to load browser-performance report');
done(); done();
}); });
}); });
......
...@@ -99,63 +99,56 @@ export const parsedBaseIssues = [ ...@@ -99,63 +99,56 @@ export const parsedBaseIssues = [
}, },
]; ];
export const headPerformance = [ export const headBrowserPerformance = [
{ {
subject: '/some/path', subject: '/some/path',
metrics: [
{
name: 'Sitespeed Score',
value: 85,
},
],
},
{
subject: '/some/other/path',
metrics: [ metrics: [
{ {
name: 'Total Score', name: 'Total Score',
value: 79, value: 80,
desiredSize: 'larger', desiredSize: 'larger',
}, },
{ {
name: 'Requests', name: 'Requests',
value: 3, value: 30,
desiredSize: 'smaller', desiredSize: 'smaller',
}, },
],
},
{
subject: '/yet/another/path',
metrics: [
{ {
name: 'Sitespeed Score', name: 'Speed Index',
value: 80, value: 1155,
desiredSize: 'smaller',
},
{
name: 'Transfer Size (KB)',
value: '1070.1',
desiredSize: 'smaller',
}, },
], ],
}, },
]; ];
export const basePerformance = [ export const baseBrowserPerformance = [
{ {
subject: '/some/path', subject: '/some/path',
metrics: [
{
name: 'Sitespeed Score',
value: 84,
},
],
},
{
subject: '/some/other/path',
metrics: [ metrics: [
{ {
name: 'Total Score', name: 'Total Score',
value: 80, value: 82,
desiredSize: 'larger', desiredSize: 'larger',
}, },
{ {
name: 'Requests', name: 'Requests',
value: 4, value: 30,
desiredSize: 'smaller',
},
{
name: 'Speed Index',
value: 1165,
desiredSize: 'smaller',
},
{
name: 'Transfer Size (KB)',
value: '1065.1',
desiredSize: 'smaller', desiredSize: 'smaller',
}, },
], ],
......
...@@ -43,7 +43,7 @@ RSpec.describe 'Jobs/Browser-Performance-Testing.gitlab-ci.yml' do ...@@ -43,7 +43,7 @@ RSpec.describe 'Jobs/Browser-Performance-Testing.gitlab-ci.yml' do
expect(pipeline.errors).to be_empty expect(pipeline.errors).to be_empty
end end
shared_examples_for 'performance job on tag or branch' do shared_examples_for 'browser_performance job on tag or branch' do
it 'by default' do it 'by default' do
expect(build_names).to include('performance') expect(build_names).to include('performance')
end end
...@@ -56,19 +56,19 @@ RSpec.describe 'Jobs/Browser-Performance-Testing.gitlab-ci.yml' do ...@@ -56,19 +56,19 @@ RSpec.describe 'Jobs/Browser-Performance-Testing.gitlab-ci.yml' do
end end
context 'on master' do context 'on master' do
it_behaves_like 'performance job on tag or branch' it_behaves_like 'browser_performance job on tag or branch'
end end
context 'on another branch' do context 'on another branch' do
let(:pipeline_ref) { 'feature' } let(:pipeline_ref) { 'feature' }
it_behaves_like 'performance job on tag or branch' it_behaves_like 'browser_performance job on tag or branch'
end end
context 'on tag' do context 'on tag' do
let(:pipeline_ref) { 'v1.0.0' } let(:pipeline_ref) { 'v1.0.0' }
it_behaves_like 'performance job on tag or branch' it_behaves_like 'browser_performance job on tag or branch'
end end
context 'on merge request' do context 'on merge request' do
......
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'Verify/Browser-Performance.gitlab-ci.yml' do
subject(:template) do
<<~YAML
stages:
- test
- performance
include:
- template: 'Verify/Browser-Performance.gitlab-ci.yml'
placeholder:
script:
- keep pipeline validator happy by having a job when stages are intentionally empty
YAML
end
describe 'the created pipeline' do
let(:user) { create(:admin) }
let(:project) { create(:project, :repository) }
let(:default_branch) { 'master' }
let(:pipeline_ref) { default_branch }
let(:service) { Ci::CreatePipelineService.new(project, user, ref: pipeline_ref) }
let(:pipeline) { service.execute!(:push) }
let(:build_names) { pipeline.builds.pluck(:name) }
before do
stub_ci_pipeline_yaml_file(template)
allow_any_instance_of(Ci::BuildScheduleWorker).to receive(:perform).and_return(true)
allow(project).to receive(:default_branch).and_return(default_branch)
end
it 'has no errors' do
expect(pipeline.errors).to be_empty
end
shared_examples_for 'browser_performance job on tag or branch' do
it 'by default' do
expect(build_names).to include('performance')
end
end
context 'on master' do
it_behaves_like 'browser_performance job on tag or branch'
end
context 'on another branch' do
let(:pipeline_ref) { 'feature' }
it_behaves_like 'browser_performance job on tag or branch'
end
context 'on tag' do
let(:pipeline_ref) { 'v1.0.0' }
it_behaves_like 'browser_performance job on tag or branch'
end
context 'on merge request' do
let(:service) { MergeRequests::CreatePipelineService.new(project, user) }
let(:merge_request) { create(:merge_request, :simple, source_project: project) }
let(:pipeline) { service.execute(merge_request) }
it 'has no jobs' do
expect(pipeline).to be_merge_request_event
expect(build_names).to be_empty
end
end
end
end
...@@ -63,55 +63,44 @@ RSpec.describe Ci::Pipeline do ...@@ -63,55 +63,44 @@ RSpec.describe Ci::Pipeline do
end end
describe '#batch_lookup_report_artifact_for_file_type' do describe '#batch_lookup_report_artifact_for_file_type' do
subject(:artifact) { pipeline.batch_lookup_report_artifact_for_file_type(file_type) } shared_examples '#batch_lookup_report_artifact_for_file_type' do |file_type, license|
context 'when feature is available' do
let(:build_artifact) { build.job_artifacts.sample } before do
stub_licensed_features("#{license}": true)
context 'with security report artifact' do end
let!(:build) { create(:ee_ci_build, :dependency_scanning, :success, pipeline: pipeline) }
let(:file_type) { :dependency_scanning }
before do
stub_licensed_features(dependency_scanning: true)
end
it 'returns right kind of artifacts' do it "returns the #{file_type} artifact" do
is_expected.to eq(build_artifact) expect(pipeline.batch_lookup_report_artifact_for_file_type(file_type)).to eq(pipeline.job_artifacts.sample)
end
end end
context 'when looking for other type of artifact' do context 'when feature is not available' do
let(:file_type) { :codequality } before do
stub_licensed_features("#{license}": false)
end
it 'returns nothing' do it "doesn't return the #{file_type} artifact" do
is_expected.to be_nil expect(pipeline.batch_lookup_report_artifact_for_file_type(file_type)).to be_nil
end end
end end
end end
context 'with license compliance artifact' do context 'with security report artifact' do
before do let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_dependency_scanning_report, project: project) }
stub_licensed_features(license_scanning: true)
end
[:license_management, :license_scanning].each do |artifact_type| include_examples '#batch_lookup_report_artifact_for_file_type', :dependency_scanning, :dependency_scanning
let!(:build) { create(:ee_ci_build, artifact_type, :success, pipeline: pipeline) } end
context 'when looking for license_scanning' do context 'with license scanning artifact' do
let(:file_type) { :license_scanning } let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_license_scanning_report, project: project) }
it 'returns artifact' do include_examples '#batch_lookup_report_artifact_for_file_type', :license_scanning, :license_scanning
is_expected.to eq(build_artifact) end
end
end
context 'when looking for license_management' do context 'with browser performance artifact' do
let(:file_type) { :license_management } let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_browser_performance_report, project: project) }
it 'returns artifact' do include_examples '#batch_lookup_report_artifact_for_file_type', :browser_performance, :merge_request_performance_metrics
is_expected.to eq(build_artifact)
end
end
end
end end
end end
......
...@@ -91,9 +91,10 @@ RSpec.describe EE::Ci::JobArtifact do ...@@ -91,9 +91,10 @@ RSpec.describe EE::Ci::JobArtifact do
subject { Ci::JobArtifact.associated_file_types_for(file_type) } subject { Ci::JobArtifact.associated_file_types_for(file_type) }
where(:file_type, :result) do where(:file_type, :result) do
'license_scanning' | %w(license_management license_scanning) 'license_scanning' | %w(license_management license_scanning)
'codequality' | %w(codequality) 'codequality' | %w(codequality)
'quality' | nil 'browser_performance' | %w(browser_performance performance)
'quality' | nil
end end
with_them do with_them do
......
...@@ -83,4 +83,103 @@ RSpec.describe Ci::PipelinePresenter do ...@@ -83,4 +83,103 @@ RSpec.describe Ci::PipelinePresenter do
it { is_expected.to be_falsey } it { is_expected.to be_falsey }
end end
end end
describe '#downloadable_path_for_report_type' do
let(:current_user) { create(:user) }
before do
allow(presenter).to receive(:current_user) { current_user }
end
shared_examples '#downloadable_path_for_report_type' do |file_type, license|
context 'when feature is available' do
before do
stub_licensed_features("#{license}": true)
project.add_reporter(current_user)
end
it 'returns the downloadable path' do
expect(presenter.downloadable_path_for_report_type(file_type)).to include(
"#{project.full_path}/-/jobs/#{pipeline.builds.last.id}/artifacts/download?file_type=#{pipeline.builds.last.job_artifacts.last.file_type}")
end
end
context 'when feature is not available' do
before do
stub_licensed_features("#{license}": false)
project.add_reporter(current_user)
end
it 'doesn\'t return the downloadable path' do
expect(presenter.downloadable_path_for_report_type(file_type)).to eq(nil)
end
end
context 'when user is not authorized' do
before do
stub_licensed_features("#{license}": true)
project.add_guest(current_user)
end
it 'doesn\'t return the downloadable path' do
expect(presenter.downloadable_path_for_report_type(file_type)).to eq(nil)
end
end
end
context 'with browser_performance artifact' do
let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_browser_performance_report, project: project) }
include_examples '#downloadable_path_for_report_type', :browser_performance, :merge_request_performance_metrics
end
context 'with license_scanning artifact' do
let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_license_scanning_report, project: project) }
include_examples '#downloadable_path_for_report_type', :license_scanning, :license_scanning
end
end
describe '#degradation_threshold' do
let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_browser_performance_report, project: project) }
let(:current_user) { create(:user) }
before do
allow(presenter).to receive(:current_user) { current_user }
allow_any_instance_of(Ci::Build).to receive(:degradation_threshold).and_return(1)
end
context 'when feature is available' do
before do
project.add_reporter(current_user)
stub_licensed_features(merge_request_performance_metrics: true)
end
it 'returns the degradation threshold' do
expect(presenter.degradation_threshold(:browser_performance)).to eq(1)
end
end
context 'when feature is not available' do
before do
project.add_reporter(current_user)
stub_licensed_features(merge_request_performance_metrics: false)
end
it 'doesn\'t return the degradation threshold' do
expect(presenter.degradation_threshold(:browser_performance)).to eq(nil)
end
end
context 'when user is not authorized' do
before do
project.add_guest(current_user)
stub_licensed_features(merge_request_performance_metrics: true)
end
it 'doesn\'t return the degradation threshold' do
expect(presenter.degradation_threshold(:browser_performance)).to eq(nil)
end
end
end
end end
...@@ -32,7 +32,7 @@ RSpec.describe MergeRequestWidgetEntity do ...@@ -32,7 +32,7 @@ RSpec.describe MergeRequestWidgetEntity do
end end
def create_all_artifacts def create_all_artifacts
artifacts = %i(codequality performance) artifacts = %i(codequality performance browser_performance)
artifacts.each do |artifact_type| artifacts.each do |artifact_type|
create(:ee_ci_build, artifact_type, :success, pipeline: pipeline, project: pipeline.project) create(:ee_ci_build, artifact_type, :success, pipeline: pipeline, project: pipeline.project)
...@@ -63,8 +63,9 @@ RSpec.describe MergeRequestWidgetEntity do ...@@ -63,8 +63,9 @@ RSpec.describe MergeRequestWidgetEntity do
using RSpec::Parameterized::TableSyntax using RSpec::Parameterized::TableSyntax
where(:json_entry, :artifact_type) do where(:json_entry, :artifact_type) do
:codeclimate | :codequality :codeclimate | :codequality
:performance | :performance :browser_performance | :browser_performance
:browser_performance | :performance
end end
with_them do with_them do
...@@ -109,38 +110,59 @@ RSpec.describe MergeRequestWidgetEntity do ...@@ -109,38 +110,59 @@ RSpec.describe MergeRequestWidgetEntity do
) )
allow(head_pipeline).to receive(:available_licensed_report_type?).and_return(true) allow(head_pipeline).to receive(:available_licensed_report_type?).and_return(true)
create(
:ee_ci_build,
:performance,
pipeline: head_pipeline,
yaml_variables: yaml_variables
)
end end
context "when head pipeline's performance build has the threshold variable defined" do shared_examples 'degradation_threshold' do
let(:yaml_variables) do context "when head pipeline's browser performance build has the threshold variable defined" do
[ let(:yaml_variables) do
{ key: 'FOO', value: 'BAR' }, [
{ key: 'DEGRADATION_THRESHOLD', value: '5' } { key: 'FOO', value: 'BAR' },
] { key: 'DEGRADATION_THRESHOLD', value: '5' }
]
end
it "returns the value of the variable" do
expect(subject.as_json[:browser_performance][:degradation_threshold]).to eq(5)
end
end end
it "returns the value of the variable" do context "when head pipeline's browser performance build has no threshold variable defined" do
expect(subject.as_json[:performance][:degradation_threshold]).to eq(5) let(:yaml_variables) do
[
{ key: 'FOO', value: 'BAR' }
]
end
it "returns nil" do
expect(subject.as_json[:browser_performance][:degradation_threshold]).to be_nil
end
end end
end end
context "when head pipeline's performance build has no threshold variable defined" do context 'with browser_performance artifact' do
let(:yaml_variables) do before do
[ create(
{ key: 'FOO', value: 'BAR' } :ee_ci_build,
] :browser_performance,
pipeline: head_pipeline,
yaml_variables: yaml_variables
)
end end
it "returns nil" do include_examples 'degradation_threshold'
expect(subject.as_json[:performance][:degradation_threshold]).to be_nil end
context 'with performance artifact' do
before do
create(
:ee_ci_build,
:performance,
pipeline: head_pipeline,
yaml_variables: yaml_variables
)
end end
include_examples 'degradation_threshold'
end end
end end
......
...@@ -13,7 +13,7 @@ module Gitlab ...@@ -13,7 +13,7 @@ module Gitlab
ALLOWED_KEYS = ALLOWED_KEYS =
%i[junit codequality sast secret_detection dependency_scanning container_scanning %i[junit codequality sast secret_detection dependency_scanning container_scanning
dast performance license_management license_scanning metrics lsif dast performance browser_performance license_management license_scanning metrics lsif
dotenv cobertura terraform accessibility cluster_applications dotenv cobertura terraform accessibility cluster_applications
requirements coverage_fuzzing].freeze requirements coverage_fuzzing].freeze
...@@ -33,6 +33,7 @@ module Gitlab ...@@ -33,6 +33,7 @@ module Gitlab
validates :container_scanning, array_of_strings_or_string: true validates :container_scanning, array_of_strings_or_string: true
validates :dast, array_of_strings_or_string: true validates :dast, array_of_strings_or_string: true
validates :performance, array_of_strings_or_string: true validates :performance, array_of_strings_or_string: true
validates :browser_performance, array_of_strings_or_string: true
validates :license_management, array_of_strings_or_string: true validates :license_management, array_of_strings_or_string: true
validates :license_scanning, array_of_strings_or_string: true validates :license_scanning, array_of_strings_or_string: true
validates :metrics, array_of_strings_or_string: true validates :metrics, array_of_strings_or_string: true
......
# Read more about the feature here: https://docs.gitlab.com/ee/user/project/merge_requests/browser_performance_testing.html
performance: performance:
stage: performance stage: performance
image: docker:19.03.11 image: docker:19.03.11
allow_failure: true allow_failure: true
variables: variables:
DOCKER_TLS_CERTDIR: "" DOCKER_TLS_CERTDIR: ""
SITESPEED_IMAGE: "sitespeedio/sitespeed.io:11.2.0" SITESPEED_IMAGE: sitespeedio/sitespeed.io
SITESPEED_VERSION: 13.3.0
SITESPEED_OPTIONS: ''
services: services:
- docker:19.03.11-dind - docker:19.03.11-dind
script: script:
...@@ -16,22 +20,22 @@ performance: ...@@ -16,22 +20,22 @@ performance:
fi fi
- export CI_ENVIRONMENT_URL=$(cat environment_url.txt) - export CI_ENVIRONMENT_URL=$(cat environment_url.txt)
- mkdir gitlab-exporter - mkdir gitlab-exporter
- wget -O gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/1.0.0/index.js - wget -O gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/1.0.1/index.js
- mkdir sitespeed-results - mkdir sitespeed-results
- docker pull --quiet ${SITESPEED_IMAGE}
- | - |
if [ -f .gitlab-urls.txt ] if [ -f .gitlab-urls.txt ]
then then
sed -i -e 's@^@'"$CI_ENVIRONMENT_URL"'@' .gitlab-urls.txt sed -i -e 's@^@'"$CI_ENVIRONMENT_URL"'@' .gitlab-urls.txt
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io ${SITESPEED_IMAGE} --plugins.add ./gitlab-exporter --outputFolder sitespeed-results .gitlab-urls.txt docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io $SITESPEED_IMAGE:$SITESPEED_VERSION --plugins.add ./gitlab-exporter --outputFolder sitespeed-results .gitlab-urls.txt $SITESPEED_OPTIONS
else else
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io ${SITESPEED_IMAGE} --plugins.add ./gitlab-exporter --outputFolder sitespeed-results "$CI_ENVIRONMENT_URL" docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io $SITESPEED_IMAGE:$SITESPEED_VERSION --plugins.add ./gitlab-exporter --outputFolder sitespeed-results "$CI_ENVIRONMENT_URL" $SITESPEED_OPTIONS
fi fi
- mv sitespeed-results/data/performance.json performance.json - mv sitespeed-results/data/performance.json browser-performance.json
artifacts: artifacts:
paths: paths:
- performance.json
- sitespeed-results/ - sitespeed-results/
reports:
browser_performance: browser-performance.json
rules: rules:
- if: '$CI_KUBERNETES_ACTIVE == null || $CI_KUBERNETES_ACTIVE == ""' - if: '$CI_KUBERNETES_ACTIVE == null || $CI_KUBERNETES_ACTIVE == ""'
when: never when: never
......
...@@ -10,8 +10,9 @@ performance: ...@@ -10,8 +10,9 @@ performance:
stage: performance stage: performance
image: docker:git image: docker:git
variables: variables:
URL: https://example.com URL: ''
SITESPEED_VERSION: 11.2.0 SITESPEED_IMAGE: sitespeedio/sitespeed.io
SITESPEED_VERSION: 13.3.0
SITESPEED_OPTIONS: '' SITESPEED_OPTIONS: ''
services: services:
- docker:stable-dind - docker:stable-dind
...@@ -19,11 +20,10 @@ performance: ...@@ -19,11 +20,10 @@ performance:
- mkdir gitlab-exporter - mkdir gitlab-exporter
- wget -O ./gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/master/index.js - wget -O ./gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/master/index.js
- mkdir sitespeed-results - mkdir sitespeed-results
- docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:$SITESPEED_VERSION --plugins.add ./gitlab-exporter --outputFolder sitespeed-results $URL $SITESPEED_OPTIONS - docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io $SITESPEED_IMAGE:$SITESPEED_VERSION --plugins.add ./gitlab-exporter --outputFolder sitespeed-results $URL $SITESPEED_OPTIONS
- mv sitespeed-results/data/performance.json performance.json - mv sitespeed-results/data/performance.json browser-performance.json
artifacts: artifacts:
paths: paths:
- performance.json
- sitespeed-results/ - sitespeed-results/
reports: reports:
performance: performance.json browser_performance: browser-performance.json
...@@ -24446,6 +24446,9 @@ msgstr "" ...@@ -24446,6 +24446,9 @@ msgstr ""
msgid "Total Contributions" msgid "Total Contributions"
msgstr "" msgstr ""
msgid "Total Score"
msgstr ""
msgid "Total artifacts size: %{total_size}" msgid "Total artifacts size: %{total_size}"
msgstr "" msgstr ""
...@@ -27076,6 +27079,12 @@ msgstr "" ...@@ -27076,6 +27079,12 @@ msgstr ""
msgid "cannot merge" msgid "cannot merge"
msgstr "" msgstr ""
msgid "ciReport|%{degradedNum} degraded"
msgstr ""
msgid "ciReport|%{improvedNum} improved"
msgstr ""
msgid "ciReport|%{linkStartTag}Learn more about Container Scanning %{linkEndTag}" msgid "ciReport|%{linkStartTag}Learn more about Container Scanning %{linkEndTag}"
msgstr "" msgstr ""
...@@ -27103,6 +27112,9 @@ msgstr "" ...@@ -27103,6 +27112,9 @@ msgstr ""
msgid "ciReport|%{reportType}: Loading resulted in an error" msgid "ciReport|%{reportType}: Loading resulted in an error"
msgstr "" msgstr ""
msgid "ciReport|%{sameNum} same"
msgstr ""
msgid "ciReport|(errors when loading results)" msgid "ciReport|(errors when loading results)"
msgstr "" msgstr ""
...@@ -27127,6 +27139,12 @@ msgstr "" ...@@ -27127,6 +27139,12 @@ msgstr ""
msgid "ciReport|Base pipeline codequality artifact not found" msgid "ciReport|Base pipeline codequality artifact not found"
msgstr "" msgstr ""
msgid "ciReport|Browser performance test metrics: "
msgstr ""
msgid "ciReport|Browser performance test metrics: No changes"
msgstr ""
msgid "ciReport|Code quality" msgid "ciReport|Code quality"
msgstr "" msgstr ""
...@@ -27199,15 +27217,9 @@ msgstr "" ...@@ -27199,15 +27217,9 @@ msgstr ""
msgid "ciReport|No changes to code quality" msgid "ciReport|No changes to code quality"
msgstr "" msgstr ""
msgid "ciReport|No changes to performance metrics"
msgstr ""
msgid "ciReport|No code quality issues found" msgid "ciReport|No code quality issues found"
msgstr "" msgstr ""
msgid "ciReport|Performance metrics"
msgstr ""
msgid "ciReport|Resolve with merge request" msgid "ciReport|Resolve with merge request"
msgstr "" msgstr ""
......
...@@ -44,6 +44,8 @@ RSpec.describe Gitlab::Ci::Config::Entry::Reports do ...@@ -44,6 +44,8 @@ RSpec.describe Gitlab::Ci::Config::Entry::Reports do
:license_management | 'gl-license-management-report.json' :license_management | 'gl-license-management-report.json'
:license_scanning | 'gl-license-scanning-report.json' :license_scanning | 'gl-license-scanning-report.json'
:performance | 'performance.json' :performance | 'performance.json'
:browser_performance | 'browser-performance.json'
:browser_performance | 'performance.json'
:lsif | 'lsif.json' :lsif | 'lsif.json'
:dotenv | 'build.dotenv' :dotenv | 'build.dotenv'
:cobertura | 'cobertura-coverage.xml' :cobertura | 'cobertura-coverage.xml'
......
...@@ -190,6 +190,7 @@ RSpec.describe PlanLimits do ...@@ -190,6 +190,7 @@ RSpec.describe PlanLimits do
ci_max_artifact_size_license_management ci_max_artifact_size_license_management
ci_max_artifact_size_license_scanning ci_max_artifact_size_license_scanning
ci_max_artifact_size_performance ci_max_artifact_size_performance
ci_max_artifact_size_browser_performance
ci_max_artifact_size_metrics ci_max_artifact_size_metrics
ci_max_artifact_size_metrics_referee ci_max_artifact_size_metrics_referee
ci_max_artifact_size_network_referee ci_max_artifact_size_network_referee
......
...@@ -33,8 +33,8 @@ RSpec.describe Ci::RetryBuildService do ...@@ -33,8 +33,8 @@ RSpec.describe Ci::RetryBuildService do
job_artifacts_sast job_artifacts_secret_detection job_artifacts_dependency_scanning job_artifacts_sast job_artifacts_secret_detection job_artifacts_dependency_scanning
job_artifacts_container_scanning job_artifacts_dast job_artifacts_container_scanning job_artifacts_dast
job_artifacts_license_management job_artifacts_license_scanning job_artifacts_license_management job_artifacts_license_scanning
job_artifacts_performance job_artifacts_lsif job_artifacts_performance job_artifacts_browser_performance
job_artifacts_terraform job_artifacts_cluster_applications job_artifacts_lsif job_artifacts_terraform job_artifacts_cluster_applications
job_artifacts_codequality job_artifacts_metrics scheduled_at job_artifacts_codequality job_artifacts_metrics scheduled_at
job_variables waiting_for_resource_at job_artifacts_metrics_referee job_variables waiting_for_resource_at job_artifacts_metrics_referee
job_artifacts_network_referee job_artifacts_dotenv job_artifacts_network_referee job_artifacts_dotenv
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment