Commit 136accf6 authored by Grant Young's avatar Grant Young Committed by Sean McGivern

Switch Browser Perf jobs back to performance

Maintains compatability for now, will be addressed later.
parent 7d7596f4
...@@ -35,6 +35,7 @@ module Ci ...@@ -35,6 +35,7 @@ module Ci
license_scanning: 'gl-license-scanning-report.json', license_scanning: 'gl-license-scanning-report.json',
performance: 'performance.json', performance: 'performance.json',
browser_performance: 'browser-performance.json', browser_performance: 'browser-performance.json',
load_performance: 'load-performance.json',
metrics: 'metrics.txt', metrics: 'metrics.txt',
lsif: 'lsif.json', lsif: 'lsif.json',
dotenv: '.env', dotenv: '.env',
...@@ -75,6 +76,7 @@ module Ci ...@@ -75,6 +76,7 @@ module Ci
license_scanning: :raw, license_scanning: :raw,
performance: :raw, performance: :raw,
browser_performance: :raw, browser_performance: :raw,
load_performance: :raw,
terraform: :raw, terraform: :raw,
requirements: :raw, requirements: :raw,
coverage_fuzzing: :raw coverage_fuzzing: :raw
...@@ -96,6 +98,7 @@ module Ci ...@@ -96,6 +98,7 @@ module Ci
metrics metrics
performance performance
browser_performance browser_performance
load_performance
sast sast
secret_detection secret_detection
requirements requirements
...@@ -196,7 +199,8 @@ module Ci ...@@ -196,7 +199,8 @@ module Ci
secret_detection: 21, ## EE-specific secret_detection: 21, ## EE-specific
requirements: 22, ## EE-specific requirements: 22, ## EE-specific
coverage_fuzzing: 23, ## EE-specific coverage_fuzzing: 23, ## EE-specific
browser_performance: 24 ## EE-specific browser_performance: 24, ## EE-specific
load_performance: 25 ## EE-specific
} }
enum file_format: { enum file_format: {
......
# frozen_string_literal: true
class AddLoadPerformanceToPlanLimits < ActiveRecord::Migration[6.0]
DOWNTIME = false
def change
add_column :plan_limits, "ci_max_artifact_size_load_performance", :integer, default: 0, null: false
end
end
...@@ -13815,7 +13815,8 @@ CREATE TABLE public.plan_limits ( ...@@ -13815,7 +13815,8 @@ CREATE TABLE public.plan_limits (
ci_max_artifact_size_secret_detection integer DEFAULT 0 NOT NULL, ci_max_artifact_size_secret_detection integer DEFAULT 0 NOT NULL,
ci_max_artifact_size_requirements integer DEFAULT 0 NOT NULL, ci_max_artifact_size_requirements integer DEFAULT 0 NOT NULL,
ci_max_artifact_size_coverage_fuzzing integer DEFAULT 0 NOT NULL, ci_max_artifact_size_coverage_fuzzing integer DEFAULT 0 NOT NULL,
ci_max_artifact_size_browser_performance integer DEFAULT 0 NOT NULL ci_max_artifact_size_browser_performance integer DEFAULT 0 NOT NULL,
ci_max_artifact_size_load_performance integer DEFAULT 0 NOT NULL
); );
CREATE SEQUENCE public.plan_limits_id_seq CREATE SEQUENCE public.plan_limits_id_seq
...@@ -23645,5 +23646,6 @@ COPY "schema_migrations" (version) FROM STDIN; ...@@ -23645,5 +23646,6 @@ COPY "schema_migrations" (version) FROM STDIN;
20200706170536 20200706170536
20200707071941 20200707071941
20200707094341 20200707094341
20200707095849
\. \.
...@@ -257,6 +257,17 @@ as artifacts. ...@@ -257,6 +257,17 @@ as artifacts.
The collected Browser Performance report will be uploaded to GitLab as an artifact and will The collected Browser Performance report will be uploaded to GitLab as an artifact and will
be automatically shown in merge requests. be automatically shown in merge requests.
#### `artifacts:reports:load_performance` **(PREMIUM)**
> - Introduced in [GitLab 13.2](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/35260) in [GitLab Premium](https://about.gitlab.com/pricing/) 13.2.
> - Requires GitLab Runner 11.5 and above.
The `load_performance` report collects [Load Performance Testing metrics](../../user/project/merge_requests/load_performance_testing.md)
as artifacts.
The report is uploaded to GitLab as an artifact and is
shown in merge requests automatically.
#### `artifacts:reports:metrics` **(PREMIUM)** #### `artifacts:reports:metrics` **(PREMIUM)**
> Introduced in GitLab 11.10. > Introduced in GitLab 11.10.
......
...@@ -117,7 +117,7 @@ The following table lists available parameters for jobs: ...@@ -117,7 +117,7 @@ The following table lists available parameters for jobs:
| [`when`](#when) | When to run job. Also available: `when:manual` and `when:delayed`. | | [`when`](#when) | When to run job. Also available: `when:manual` and `when:delayed`. |
| [`environment`](#environment) | Name of an environment to which the job deploys. Also available: `environment:name`, `environment:url`, `environment:on_stop`, `environment:auto_stop_in` and `environment:action`. | | [`environment`](#environment) | Name of an environment to which the job deploys. Also available: `environment:name`, `environment:url`, `environment:on_stop`, `environment:auto_stop_in` and `environment:action`. |
| [`cache`](#cache) | List of files that should be cached between subsequent runs. Also available: `cache:paths`, `cache:key`, `cache:untracked`, and `cache:policy`. | | [`cache`](#cache) | List of files that should be cached between subsequent runs. Also available: `cache:paths`, `cache:key`, `cache:untracked`, and `cache:policy`. |
| [`artifacts`](#artifacts) | List of files and directories to attach to a job on success. Also available: `artifacts:paths`, `artifacts:exclude`, `artifacts:expose_as`, `artifacts:name`, `artifacts:untracked`, `artifacts:when`, `artifacts:expire_in`, `artifacts:reports`, `artifacts:reports:junit`, `artifacts:reports:cobertura`, and `artifacts:reports:terraform`.<br><br>In GitLab [Enterprise Edition](https://about.gitlab.com/pricing/), these are available: `artifacts:reports:codequality`, `artifacts:reports:sast`, `artifacts:reports:dependency_scanning`, `artifacts:reports:container_scanning`, `artifacts:reports:dast`, `artifacts:reports:license_scanning`, `artifacts:reports:license_management` (removed in GitLab 13.0), `artifacts:reports:performance` and `artifacts:reports:metrics`. | | [`artifacts`](#artifacts) | List of files and directories to attach to a job on success. Also available: `artifacts:paths`, `artifacts:exclude`, `artifacts:expose_as`, `artifacts:name`, `artifacts:untracked`, `artifacts:when`, `artifacts:expire_in`, `artifacts:reports`, `artifacts:reports:junit`, `artifacts:reports:cobertura`, and `artifacts:reports:terraform`.<br><br>In GitLab [Enterprise Edition](https://about.gitlab.com/pricing/), these are available: `artifacts:reports:codequality`, `artifacts:reports:sast`, `artifacts:reports:dependency_scanning`, `artifacts:reports:container_scanning`, `artifacts:reports:dast`, `artifacts:reports:license_scanning`, `artifacts:reports:license_management` (removed in GitLab 13.0), `artifacts:reports:performance`, `artifacts:reports:load_performance`, and `artifacts:reports:metrics`. |
| [`dependencies`](#dependencies) | Restrict which artifacts are passed to a specific job by providing a list of jobs to fetch artifacts from. | | [`dependencies`](#dependencies) | Restrict which artifacts are passed to a specific job by providing a list of jobs to fetch artifacts from. |
| [`coverage`](#coverage) | Code coverage settings for a given job. | | [`coverage`](#coverage) | Code coverage settings for a given job. |
| [`retry`](#retry) | When and how many times a job can be auto-retried in case of a failure. | | [`retry`](#retry) | When and how many times a job can be auto-retried in case of a failure. |
...@@ -3149,6 +3149,7 @@ These are the available report types: ...@@ -3149,6 +3149,7 @@ These are the available report types:
| [`artifacts:reports:license_management`](../pipelines/job_artifacts.md#artifactsreportslicense_management-ultimate) **(ULTIMATE)** | The `license_management` report collects Licenses (*removed from GitLab 13.0*). | | [`artifacts:reports:license_management`](../pipelines/job_artifacts.md#artifactsreportslicense_management-ultimate) **(ULTIMATE)** | The `license_management` report collects Licenses (*removed from GitLab 13.0*). |
| [`artifacts:reports:license_scanning`](../pipelines/job_artifacts.md#artifactsreportslicense_scanning-ultimate) **(ULTIMATE)** | The `license_scanning` report collects Licenses. | | [`artifacts:reports:license_scanning`](../pipelines/job_artifacts.md#artifactsreportslicense_scanning-ultimate) **(ULTIMATE)** | The `license_scanning` report collects Licenses. |
| [`artifacts:reports:performance`](../pipelines/job_artifacts.md#artifactsreportsperformance-premium) **(PREMIUM)** | The `performance` report collects Browser Performance metrics. | | [`artifacts:reports:performance`](../pipelines/job_artifacts.md#artifactsreportsperformance-premium) **(PREMIUM)** | The `performance` report collects Browser Performance metrics. |
| [`artifacts:reports:load_performance`](../pipelines/job_artifacts.md#artifactsreportsload_performance-premium) **(PREMIUM)** | The `load_performance` report collects load performance metrics. |
| [`artifacts:reports:metrics`](../pipelines/job_artifacts.md#artifactsreportsmetrics-premium) **(PREMIUM)** | The `metrics` report collects Metrics. | | [`artifacts:reports:metrics`](../pipelines/job_artifacts.md#artifactsreportsmetrics-premium) **(PREMIUM)** | The `metrics` report collects Metrics. |
#### `dependencies` #### `dependencies`
......
...@@ -364,7 +364,8 @@ The following table lists variables used to disable jobs. ...@@ -364,7 +364,8 @@ The following table lists variables used to disable jobs.
| `DAST_DISABLED` | From GitLab 11.0, used to disable the `dast` job. If the variable is present, the job won't be created. | | `DAST_DISABLED` | From GitLab 11.0, used to disable the `dast` job. If the variable is present, the job won't be created. |
| `DEPENDENCY_SCANNING_DISABLED` | From GitLab 11.0, used to disable the `dependency_scanning` job. If the variable is present, the job won't be created. | | `DEPENDENCY_SCANNING_DISABLED` | From GitLab 11.0, used to disable the `dependency_scanning` job. If the variable is present, the job won't be created. |
| `LICENSE_MANAGEMENT_DISABLED` | From GitLab 11.0, used to disable the `license_management` job. If the variable is present, the job won't be created. | | `LICENSE_MANAGEMENT_DISABLED` | From GitLab 11.0, used to disable the `license_management` job. If the variable is present, the job won't be created. |
| `PERFORMANCE_DISABLED` | From GitLab 11.0, used to disable the `performance` job. If the variable is present, the job won't be created. | | `PERFORMANCE_DISABLED` | From GitLab 11.0, used to disable the browser `performance` job. If the variable is present, the job won't be created. |
| `LOAD_PERFORMANCE_DISABLED` | From GitLab 13.2, used to disable the `load_performance` job. If the variable is present, the job won't be created. |
| `REVIEW_DISABLED` | From GitLab 11.0, used to disable the `review` and the manual `review:stop` job. If the variable is present, these jobs won't be created. | | `REVIEW_DISABLED` | From GitLab 11.0, used to disable the `review` and the manual `review:stop` job. If the variable is present, these jobs won't be created. |
| `SAST_DISABLED` | From GitLab 11.0, used to disable the `sast` job. If the variable is present, the job won't be created. | | `SAST_DISABLED` | From GitLab 11.0, used to disable the `sast` job. If the variable is present, the job won't be created. |
| `TEST_DISABLED` | From GitLab 11.0, used to disable the `test` job. If the variable is present, the job won't be created. | | `TEST_DISABLED` | From GitLab 11.0, used to disable the `test` job. If the variable is present, the job won't be created. |
......
---
stage: Verify
group: Testing
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#designated-technical-writers
type: reference, howto
---
# Load Performance Testing **(PREMIUM)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/10683) in [GitLab Premium](https://about.gitlab.com/pricing/) 13.2.
With Load Performance Testing, you can test the impact of any pending code changes
to your application's backend in [GitLab CI/CD](../../../ci/README.md).
GitLab uses [k6](https://k6.io/), a free and open source
tool, for measuring the system performance of applications under
load.
Unlike [Browser Performance Testing](browser_performance_testing.md), which is
used to measure how web sites perform in client browsers, Load Performance Testing
can be used to perform various types of [load tests](https://k6.io/docs/#use-cases)
against application endpoints such as APIs, Web Controllers, and so on.
This can be used to test how the backend or the server performs at scale.
For example, you can use Load Performance Testing to perform many concurrent
GET calls to a popular API endpoint in your application to see how it performs.
## How Load Performance Testing works
First, define a job in your `.gitlab-ci.yml` file that generates the
[Load Performance report artifact](../../../ci/pipelines/job_artifacts.md#artifactsreportsload_performance-premium).
GitLab checks this report, compares key load performance metrics
between the source and target branches, and then shows the information in a merge request widget:
![Load Performance Widget](img/load_performance_testing.png)
Next, you need to configure the test environment and write the k6 test.
The key performance metrics that the merge request widget shows after the test completes are:
- Checks: The percentage pass rate of the [checks](https://k6.io/docs/using-k6/checks) configured in the k6 test.
- TTFB P90: The 90th percentile of how long it took to start receiving responses, aka the [Time to First Byte](https://en.wikipedia.org/wiki/Time_to_first_byte) (TTFB).
- TTFB P95: The 95th percentile for TTFB.
- RPS: The average requests per second (RPS) rate the test was able to achieve.
NOTE: **Note:**
If the Load Performance report has no data to compare, such as when you add the
Load Performance job in your `.gitlab-ci.yml` for the very first time,
the Load Performance report widget won't show. It must have run at least
once on the target branch (`master`, for example), before it will display in a
merge request targeting that branch.
## Configure the Load Performance Testing job
Configuring your Load Performance Testing job can be broken down into several distinct parts:
- Determine the test parameters such as throughput, and so on.
- Set up the target test environment for load performance testing.
- Design and write the k6 test.
### Determine the test parameters
The first thing you need to do is determine the [type of load test](https://k6.io/docs/test-types/introduction)
you want to run, and how it will run (for example, the number of users, throughput, and so on).
Refer to the [k6 docs](https://k6.io/docs/), especially the [k6 testing guides](https://k6.io/docs/testing-guides),
for guidance on the above and more.
### Test Environment setup
A large part of the effort around load performance testing is to prepare the target test environment
for high loads. You should ensure it's able to handle the
[throughput](https://k6.io/blog/monthly-visits-concurrent-users) it will be tested with.
It's also typically required to have representative test data in the target environment
for the load performance test to use.
We strongly recommend [not running these tests against a production environment](https://k6.io/our-beliefs#load-test-in-a-pre-production-environment).
### Write the load performance test
After the environment is prepared, you can write the k6 test itself. k6 is a flexible
tool and can be used to run [many kinds of performance tests](https://k6.io/docs/test-types/introduction).
Refer to the [k6 documentation](https://k6.io/docs/) for detailed information on how to write tests.
### Configure the test in GitLab CI/CD
When your k6 test is ready, the next step is to configure the load performance
testing job in GitLab CI/CD. The easiest way to do this is to use the
[`Verify/Load-Performance-Testing.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Verify/Load-Performance-Testing.gitlab-ci.yml)
template that is included with GitLab.
NOTE: **Note:**
For large scale k6 tests you need to ensure the GitLab Runner instance performing the actual
test is able to handle running the test. Refer to [k6's guidance](https://k6.io/docs/testing-guides/running-large-tests#hardware-considerations)
for spec details. The [default shared GitLab.com runners](../../gitlab_com/#linux-shared-runners)
likely have insufficient specs to handle most large k6 tests.
This template runs the
[k6 Docker container](https://hub.docker.com/r/loadimpact/k6/) in the job and provides several ways to customize the
job.
An example configuration workflow:
1. Set up a GitLab Runner that can run Docker containers, such as a Runner using the
[Docker-in-Docker workflow](../../../ci/docker/using_docker_build.md#use-docker-in-docker-workflow-with-docker-executor).
1. Configure the default Load Performance Testing CI job in your `.gitlab-ci.yml` file.
You need to include the template and configure it with variables:
```yaml
include:
template: Verify/Load-Performance-Testing.gitlab-ci.yml
load_performance:
variables:
K6_TEST_FILE: <PATH TO K6 TEST FILE IN PROJECT>
```
The above example creates a `load_performance` job in your CI/CD pipeline that runs
the k6 test.
NOTE: **Note:**
For Kubernetes setups a different template should be used: [`Jobs/Load-Performance-Testing.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Load-Performance-Testing.gitlab-ci.yml).
k6 has [various options](https://k6.io/docs/using-k6/options) to configure how it will run tests, such as what throughput (RPS) to run with,
how long the test should run, and so on. Almost all options can be configured in the test itself, but as
you can also pass command line options via the `K6_OPTIONS` variable.
For example, you can override the duration of the test with a CLI option:
```yaml
include:
template: Verify/Load-Performance-Testing.gitlab-ci.yml
load_performance:
variables:
K6_TEST_FILE: <PATH TO K6 TEST FILE IN PROJECT>
K6_OPTIONS: '--duration 30s'
```
GitLab only displays the key performance metrics in the MR widget if k6's results are saved
via [summary export](https://k6.io/docs/results-visualization/json#summary-export)
as a [Load Performance report artifact](../../../ci/pipelines/job_artifacts.md#artifactsreportsload_performance-premium).
The latest Load Performance artifact available is always used.
If [GitLab Pages](../pages/index.md) is enabled, you can view the report directly in your browser.
### Load Performance testing in Review Apps
The CI/CD YAML configuration example above works for testing against static environments,
but it can be extended to work with [review apps](../../../ci/review_apps) or
[dynamic environments](../../../ci/environments) with a few extra steps.
The best approach is to capture the dynamic URL into a custom environment variable that
is then [inherited](../../../ci/variables/README.md#inherit-environment-variables)
by the `load_performance` job. The k6 test script to be run should then be configured to
use that environment URL, such as: ``http.get(`${__ENV.ENVIRONMENT_URL`})``.
For example:
1. In the `review` job:
1. Capture the dynamic URL and save it into a `.env` file, e.g. `echo "ENVIRONMENT_URL=$CI_ENVIRONMENT_URL" >> review.env`.
1. Set the `.env` file to be an [`artifacts:reports:dotenv` report](../../../ci/variables/README.md#inherit-environment-variables).
1. Set the `load_performance` job to depend on the review job, so it inherits the environment variable.
1. Configure the k6 test script to use the environment variable in it's steps.
Your `.gitlab-ci.yml` file might be similar to:
```yaml
stages:
- deploy
- performance
include:
template: Verify/Load-Performance-Testing.gitlab-ci.yml
review:
stage: deploy
environment:
name: review/$CI_COMMIT_REF_NAME
url: http://$CI_ENVIRONMENT_SLUG.example.com
script:
- run_deploy_script
- echo "ENVIRONMENT_URL=$CI_ENVIRONMENT_URL" >> review.env
artifacts:
reports:
dotenv:
review.env
rules:
- if: '$CI_COMMIT_BRANCH' # Modify to match your pipeline rules, or use `only/except` if needed.
load_performance:
dependencies:
- review
rules:
- if: '$CI_COMMIT_BRANCH' # Modify to match your pipeline rules, or use `only/except` if needed.
```
...@@ -6,8 +6,8 @@ ...@@ -6,8 +6,8 @@
import ReportLink from '~/reports/components/report_link.vue'; import ReportLink from '~/reports/components/report_link.vue';
function formatScore(value) { function formatScore(value) {
if (Math.floor(value) !== value) { if (Number(value) && !Number.isInteger(value)) {
return parseFloat(value).toFixed(2); return (Math.floor(parseFloat(value) * 100) / 100).toFixed(2);
} }
return value; return value;
} }
......
...@@ -33,8 +33,10 @@ export default { ...@@ -33,8 +33,10 @@ export default {
return { return {
isLoadingCodequality: false, isLoadingCodequality: false,
isLoadingBrowserPerformance: false, isLoadingBrowserPerformance: false,
isLoadingLoadPerformance: false,
loadingCodequalityFailed: false, loadingCodequalityFailed: false,
loadingBrowserPerformanceFailed: false, loadingBrowserPerformanceFailed: false,
loadingLoadPerformanceFailed: false,
loadingLicenseReportFailed: false, loadingLicenseReportFailed: false,
}; };
}, },
...@@ -89,6 +91,18 @@ export default { ...@@ -89,6 +91,18 @@ export default {
shouldRenderBrowserPerformance() { shouldRenderBrowserPerformance() {
return this.hasBrowserPerformancePaths && this.hasBrowserPerformanceDegradation; return this.hasBrowserPerformancePaths && this.hasBrowserPerformanceDegradation;
}, },
hasLoadPerformanceMetrics() {
return (
this.mr.loadPerformanceMetrics?.degraded?.length > 0 ||
this.mr.loadPerformanceMetrics?.improved?.length > 0 ||
this.mr.loadPerformanceMetrics?.same?.length > 0
);
},
hasLoadPerformancePaths() {
const loadPerformance = this.mr?.loadPerformance || {};
return Boolean(loadPerformance.head_path && loadPerformance.base_path);
},
shouldRenderSecurityReport() { shouldRenderSecurityReport() {
const { enabledReports } = this.mr; const { enabledReports } = this.mr;
return ( return (
...@@ -163,6 +177,31 @@ export default { ...@@ -163,6 +177,31 @@ export default {
return [...text, ...reportNumbers.join(', ')].join(''); return [...text, ...reportNumbers.join(', ')].join('');
}, },
loadPerformanceText() {
const { improved, degraded, same } = this.mr.loadPerformanceMetrics;
const text = [];
const reportNumbers = [];
if (improved.length || degraded.length || same.length) {
text.push(s__('ciReport|Load performance test metrics: '));
if (degraded.length > 0)
reportNumbers.push(
sprintf(s__('ciReport|%{degradedNum} degraded'), { degradedNum: degraded.length }),
);
if (same.length > 0)
reportNumbers.push(sprintf(s__('ciReport|%{sameNum} same'), { sameNum: same.length }));
if (improved.length > 0)
reportNumbers.push(
sprintf(s__('ciReport|%{improvedNum} improved'), { improvedNum: improved.length }),
);
} else {
text.push(s__('ciReport|Load performance test metrics: No changes'));
}
return [...text, ...reportNumbers.join(', ')].join('');
},
codequalityStatus() { codequalityStatus() {
return this.checkReportStatus(this.isLoadingCodequality, this.loadingCodequalityFailed); return this.checkReportStatus(this.isLoadingCodequality, this.loadingCodequalityFailed);
}, },
...@@ -174,6 +213,13 @@ export default { ...@@ -174,6 +213,13 @@ export default {
); );
}, },
loadPerformanceStatus() {
return this.checkReportStatus(
this.isLoadingLoadPerformance,
this.loadingLoadPerformanceFailed,
);
},
shouldRenderMergeTrainHelperText() { shouldRenderMergeTrainHelperText() {
return ( return (
this.mr.pipeline && this.mr.pipeline &&
...@@ -198,6 +244,11 @@ export default { ...@@ -198,6 +244,11 @@ export default {
this.fetchBrowserPerformance(); this.fetchBrowserPerformance();
} }
}, },
hasLoadPerformancePaths(newVal) {
if (newVal) {
this.fetchLoadPerformance();
}
},
}, },
methods: { methods: {
getServiceEndpoints(store) { getServiceEndpoints(store) {
...@@ -260,6 +311,23 @@ export default { ...@@ -260,6 +311,23 @@ export default {
}); });
}, },
fetchLoadPerformance() {
const { head_path, base_path } = this.mr.loadPerformance;
this.isLoadingLoadPerformance = true;
Promise.all([this.service.fetchReport(head_path), this.service.fetchReport(base_path)])
.then(values => {
this.mr.compareLoadPerformanceMetrics(values[0], values[1]);
})
.catch(() => {
this.loadingLoadPerformanceFailed = true;
})
.finally(() => {
this.isLoadingLoadPerformance = false;
});
},
translateText(type) { translateText(type) {
return { return {
error: sprintf(s__('ciReport|Failed to load %{reportName} report'), { error: sprintf(s__('ciReport|Failed to load %{reportName} report'), {
...@@ -323,6 +391,19 @@ export default { ...@@ -323,6 +391,19 @@ export default {
:component="$options.componentNames.PerformanceIssueBody" :component="$options.componentNames.PerformanceIssueBody"
class="js-browser-performance-widget mr-widget-border-top mr-report" class="js-browser-performance-widget mr-widget-border-top mr-report"
/> />
<report-section
v-if="hasLoadPerformancePaths"
:status="loadPerformanceStatus"
:loading-text="translateText('load-performance').loading"
:error-text="translateText('load-performance').error"
:success-text="loadPerformanceText"
:unresolved-issues="mr.loadPerformanceMetrics.degraded"
:resolved-issues="mr.loadPerformanceMetrics.improved"
:neutral-issues="mr.loadPerformanceMetrics.same"
:has-issues="hasLoadPerformanceMetrics"
:component="$options.componentNames.PerformanceIssueBody"
class="js-load-performance-widget mr-widget-border-top mr-report"
/>
<grouped-metrics-reports-app <grouped-metrics-reports-app
v-if="mr.metricsReportsPath" v-if="mr.metricsReportsPath"
:endpoint="mr.metricsReportsPath" :endpoint="mr.metricsReportsPath"
......
...@@ -2,6 +2,7 @@ import CEMergeRequestStore from '~/vue_merge_request_widget/stores/mr_widget_sto ...@@ -2,6 +2,7 @@ import CEMergeRequestStore from '~/vue_merge_request_widget/stores/mr_widget_sto
import { convertObjectPropsToCamelCase } from '~/lib/utils/common_utils'; import { convertObjectPropsToCamelCase } from '~/lib/utils/common_utils';
import { mapApprovalsResponse, mapApprovalRulesResponse } from '../mappers'; import { mapApprovalsResponse, mapApprovalRulesResponse } from '../mappers';
import CodeQualityComparisonWorker from '../workers/code_quality_comparison_worker'; import CodeQualityComparisonWorker from '../workers/code_quality_comparison_worker';
import { s__ } from '~/locale';
export default class MergeRequestStore extends CEMergeRequestStore { export default class MergeRequestStore extends CEMergeRequestStore {
constructor(data) { constructor(data) {
...@@ -30,6 +31,7 @@ export default class MergeRequestStore extends CEMergeRequestStore { ...@@ -30,6 +31,7 @@ export default class MergeRequestStore extends CEMergeRequestStore {
this.initCodeclimate(data); this.initCodeclimate(data);
this.initBrowserPerformanceReport(data); this.initBrowserPerformanceReport(data);
this.initLoadPerformanceReport(data);
this.licenseScanning = data.license_scanning; this.licenseScanning = data.license_scanning;
this.metricsReportsPath = data.metrics_reports_path; this.metricsReportsPath = data.metrics_reports_path;
...@@ -94,6 +96,15 @@ export default class MergeRequestStore extends CEMergeRequestStore { ...@@ -94,6 +96,15 @@ export default class MergeRequestStore extends CEMergeRequestStore {
}; };
} }
initLoadPerformanceReport(data) {
this.loadPerformance = data.load_performance;
this.loadPerformanceMetrics = {
improved: [],
degraded: [],
same: [],
};
}
static doCodeClimateComparison(headIssues, baseIssues) { static doCodeClimateComparison(headIssues, baseIssues) {
// Do these comparisons in worker threads to avoid blocking the main thread // Do these comparisons in worker threads to avoid blocking the main thread
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
...@@ -176,6 +187,72 @@ export default class MergeRequestStore extends CEMergeRequestStore { ...@@ -176,6 +187,72 @@ export default class MergeRequestStore extends CEMergeRequestStore {
return indexedSubjects; return indexedSubjects;
} }
compareLoadPerformanceMetrics(headMetrics, baseMetrics) {
const headMetricsIndexed = MergeRequestStore.normalizeLoadPerformanceMetrics(headMetrics);
const baseMetricsIndexed = MergeRequestStore.normalizeLoadPerformanceMetrics(baseMetrics);
const improved = [];
const degraded = [];
const same = [];
Object.keys(headMetricsIndexed).forEach(metric => {
const headMetricData = headMetricsIndexed[metric];
if (metric in baseMetricsIndexed) {
const baseMetricData = baseMetricsIndexed[metric];
const metricData = {
name: metric,
score: headMetricData,
delta: parseFloat((parseFloat(headMetricData) - parseFloat(baseMetricData)).toFixed(2)),
};
if (metricData.delta !== 0.0) {
const isImproved = [s__('ciReport|RPS'), s__('ciReport|Checks')].includes(metric)
? metricData.delta > 0
: metricData.delta < 0;
if (isImproved) {
improved.push(metricData);
} else {
degraded.push(metricData);
}
} else {
same.push(metricData);
}
}
});
this.loadPerformanceMetrics = { improved, degraded, same };
}
// normalize load performance metrics for comsumption
static normalizeLoadPerformanceMetrics(loadPerformanceData) {
if (!('metrics' in loadPerformanceData)) return {};
const { metrics } = loadPerformanceData;
const indexedMetrics = {};
Object.keys(loadPerformanceData.metrics).forEach(metric => {
switch (metric) {
case 'http_reqs':
indexedMetrics[s__('ciReport|RPS')] = metrics.http_reqs.rate;
break;
case 'http_req_waiting':
indexedMetrics[s__('ciReport|TTFB P90')] = metrics.http_req_waiting['p(90)'];
indexedMetrics[s__('ciReport|TTFB P95')] = metrics.http_req_waiting['p(95)'];
break;
case 'checks':
indexedMetrics[s__('ciReport|Checks')] = `${(
(metrics.checks.passes / (metrics.checks.passes + metrics.checks.fails)) *
100.0
).toFixed(2)}%`;
break;
default:
break;
}
});
return indexedMetrics;
}
static parseCodeclimateMetrics(issues = [], path = '') { static parseCodeclimateMetrics(issues = [], path = '') {
return issues.map(issue => { return issues.map(issue => {
const parsedIssue = { const parsedIssue = {
......
...@@ -46,6 +46,7 @@ module EE ...@@ -46,6 +46,7 @@ module EE
dast: %i[dast], dast: %i[dast],
performance: %i[merge_request_performance_metrics], performance: %i[merge_request_performance_metrics],
browser_performance: %i[merge_request_performance_metrics], browser_performance: %i[merge_request_performance_metrics],
load_performance: %i[merge_request_performance_metrics],
license_management: %i[license_scanning], license_management: %i[license_scanning],
license_scanning: %i[license_scanning], license_scanning: %i[license_scanning],
metrics: %i[metrics_reports], metrics: %i[metrics_reports],
......
...@@ -41,6 +41,16 @@ module EE ...@@ -41,6 +41,16 @@ module EE
end end
end end
expose :load_performance, if: -> (mr, _) { head_pipeline_downloadable_path_for_report_type(:load_performance) } do
expose :head_path do |merge_request|
head_pipeline_downloadable_path_for_report_type(:load_performance)
end
expose :base_path do |merge_request|
base_pipeline_downloadable_path_for_report_type(:load_performance)
end
end
expose :enabled_reports do |merge_request| expose :enabled_reports do |merge_request|
merge_request.enabled_reports merge_request.enabled_reports
end end
......
---
title: Added MR Load Performance Testing feature
merge_request: 35260
author:
type: added
...@@ -6,7 +6,7 @@ FactoryBot.define do ...@@ -6,7 +6,7 @@ FactoryBot.define do
failure_reason { Ci::Build.failure_reasons[:protected_environment_failure] } failure_reason { Ci::Build.failure_reasons[:protected_environment_failure] }
end end
%i[codequality container_scanning dast dependency_scanning license_management license_scanning performance browser_performance sast secret_detection].each do |report_type| %i[codequality container_scanning dast dependency_scanning license_management license_scanning performance browser_performance load_performance sast secret_detection].each do |report_type|
trait "legacy_#{report_type}".to_sym do trait "legacy_#{report_type}".to_sym do
success success
artifacts artifacts
......
...@@ -231,6 +231,16 @@ FactoryBot.define do ...@@ -231,6 +231,16 @@ FactoryBot.define do
end end
end end
trait :load_performance do
file_format { :raw }
file_type { :load_performance }
after(:build) do |artifact, _|
artifact.file = fixture_file_upload(
Rails.root.join('spec/fixtures/trace/sample_trace'), 'text/plain')
end
end
trait :dependency_scanning do trait :dependency_scanning do
file_format { :raw } file_format { :raw }
file_type { :dependency_scanning } file_type { :dependency_scanning }
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
FactoryBot.define do FactoryBot.define do
factory :ee_ci_pipeline, class: 'Ci::Pipeline', parent: :ci_pipeline do factory :ee_ci_pipeline, class: 'Ci::Pipeline', parent: :ci_pipeline do
%i[browser_performance codequality container_scanning coverage_fuzzing dast dependency_list dependency_scanning license_management license_scanning sast secret_detection].each do |report_type| %i[browser_performance codequality container_scanning coverage_fuzzing dast dependency_list dependency_scanning license_management license_scanning load_performance sast secret_detection].each do |report_type|
trait "with_#{report_type}_report".to_sym do trait "with_#{report_type}_report".to_sym do
status { :success } status { :success }
......
...@@ -33,6 +33,6 @@ describe('performance issue body', () => { ...@@ -33,6 +33,6 @@ describe('performance issue body', () => {
}); });
it('renders issue delta formatted', () => { it('renders issue delta formatted', () => {
expect(wrapper.text()).toContain('(+0.20)'); expect(wrapper.text()).toContain('(+0.19)');
}); });
}); });
...@@ -13,6 +13,8 @@ import mockData, { ...@@ -13,6 +13,8 @@ import mockData, {
headIssues, headIssues,
baseBrowserPerformance, baseBrowserPerformance,
headBrowserPerformance, headBrowserPerformance,
baseLoadPerformance,
headLoadPerformance,
parsedBaseIssues, parsedBaseIssues,
parsedHeadIssues, parsedHeadIssues,
} from './mock_data'; } from './mock_data';
...@@ -45,6 +47,11 @@ describe('ee merge request widget options', () => { ...@@ -45,6 +47,11 @@ describe('ee merge request widget options', () => {
base_path: 'base.json', base_path: 'base.json',
}; };
const DEFAULT_LOAD_PERFORMANCE = {
head_path: 'head.json',
base_path: 'base.json',
};
beforeEach(() => { beforeEach(() => {
delete mrWidgetOptions.extends.el; // Prevent component mounting delete mrWidgetOptions.extends.el; // Prevent component mounting
...@@ -71,6 +78,7 @@ describe('ee merge request widget options', () => { ...@@ -71,6 +78,7 @@ describe('ee merge request widget options', () => {
}); });
const findBrowserPerformanceWidget = () => vm.$el.querySelector('.js-browser-performance-widget'); const findBrowserPerformanceWidget = () => vm.$el.querySelector('.js-browser-performance-widget');
const findLoadPerformanceWidget = () => vm.$el.querySelector('.js-load-performance-widget');
const findSecurityWidget = () => vm.$el.querySelector('.js-security-widget'); const findSecurityWidget = () => vm.$el.querySelector('.js-security-widget');
const setBrowserPerformance = (data = {}) => { const setBrowserPerformance = (data = {}) => {
...@@ -79,6 +87,12 @@ describe('ee merge request widget options', () => { ...@@ -79,6 +87,12 @@ describe('ee merge request widget options', () => {
vm.mr.browserPerformance = browserPerformance; vm.mr.browserPerformance = browserPerformance;
}; };
const setLoadPerformance = (data = {}) => {
const loadPerformance = { ...DEFAULT_LOAD_PERFORMANCE, ...data };
gl.mrWidgetData.loadPerformance = loadPerformance;
vm.mr.loadPerformance = loadPerformance;
};
const VULNERABILITY_FEEDBACK_ENDPOINT = 'vulnerability_feedback_path'; const VULNERABILITY_FEEDBACK_ENDPOINT = 'vulnerability_feedback_path';
describe('SAST', () => { describe('SAST', () => {
...@@ -656,6 +670,138 @@ describe('ee merge request widget options', () => { ...@@ -656,6 +670,138 @@ describe('ee merge request widget options', () => {
}); });
}); });
describe('load_performance', () => {
beforeEach(() => {
gl.mrWidgetData = {
...mockData,
loadPerformance: {},
};
});
describe('when it is loading', () => {
it('should render loading indicator', done => {
mock.onGet(DEFAULT_LOAD_PERFORMANCE.head_path).reply(200, headLoadPerformance);
mock.onGet(DEFAULT_LOAD_PERFORMANCE.base_path).reply(200, baseLoadPerformance);
vm = mountComponent(Component, { mrData: gl.mrWidgetData });
vm.mr.loadPerformance = { ...DEFAULT_LOAD_PERFORMANCE };
vm.$nextTick(() => {
expect(trimText(findLoadPerformanceWidget().textContent)).toContain(
'Loading load-performance report',
);
done();
});
});
});
describe('with successful request', () => {
beforeEach(() => {
mock.onGet(DEFAULT_LOAD_PERFORMANCE.head_path).reply(200, headLoadPerformance);
mock.onGet(DEFAULT_LOAD_PERFORMANCE.base_path).reply(200, baseLoadPerformance);
vm = mountComponent(Component, { mrData: gl.mrWidgetData });
});
describe('default', () => {
beforeEach(done => {
setLoadPerformance();
// wait for network request from component watch update method
setImmediate(done);
});
it('should render provided data', () => {
expect(
trimText(vm.$el.querySelector('.js-load-performance-widget .js-code-text').textContent),
).toBe('Load performance test metrics: 1 degraded, 1 same, 2 improved');
});
describe('text connector', () => {
it('should only render information about fixed issues', done => {
vm.mr.loadPerformanceMetrics.degraded = [];
vm.mr.loadPerformanceMetrics.same = [];
Vue.nextTick(() => {
expect(
trimText(
vm.$el.querySelector('.js-load-performance-widget .js-code-text').textContent,
),
).toBe('Load performance test metrics: 2 improved');
done();
});
});
it('should only render information about added issues', done => {
vm.mr.loadPerformanceMetrics.improved = [];
vm.mr.loadPerformanceMetrics.same = [];
Vue.nextTick(() => {
expect(
trimText(
vm.$el.querySelector('.js-load-performance-widget .js-code-text').textContent,
),
).toBe('Load performance test metrics: 1 degraded');
done();
});
});
});
});
});
describe('with empty successful request', () => {
beforeEach(done => {
mock.onGet(DEFAULT_LOAD_PERFORMANCE.head_path).reply(200, {});
mock.onGet(DEFAULT_LOAD_PERFORMANCE.base_path).reply(200, {});
vm = mountComponent(Component, { mrData: gl.mrWidgetData });
gl.mrWidgetData.loadPerformance = { ...DEFAULT_LOAD_PERFORMANCE };
vm.mr.loadPerformance = gl.mrWidgetData.loadPerformance;
// wait for network request from component watch update method
setImmediate(done);
});
it('should render provided data', () => {
expect(
trimText(vm.$el.querySelector('.js-load-performance-widget .js-code-text').textContent),
).toBe('Load performance test metrics: No changes');
});
it('does not show Expand button', () => {
const expandButton = vm.$el.querySelector('.js-load-performance-widget .js-collapse-btn');
expect(expandButton).toBeNull();
});
it('shows success icon', () => {
expect(
vm.$el.querySelector('.js-load-performance-widget .js-ci-status-icon-success'),
).not.toBeNull();
});
});
describe('with failed request', () => {
beforeEach(() => {
mock.onGet(DEFAULT_LOAD_PERFORMANCE.head_path).reply(500, []);
mock.onGet(DEFAULT_LOAD_PERFORMANCE.base_path).reply(500, []);
vm = mountComponent(Component, { mrData: gl.mrWidgetData });
gl.mrWidgetData.loadPerformance = { ...DEFAULT_LOAD_PERFORMANCE };
vm.mr.loadPerformance = gl.mrWidgetData.loadPerformance;
});
it('should render error indicator', done => {
setImmediate(() => {
expect(
trimText(vm.$el.querySelector('.js-load-performance-widget .js-code-text').textContent),
).toContain('Failed to load load-performance report');
done();
});
});
});
});
describe('Container Scanning', () => { describe('Container Scanning', () => {
const CONTAINER_SCANNING_ENDPOINT = 'container_scanning'; const CONTAINER_SCANNING_ENDPOINT = 'container_scanning';
......
...@@ -99,6 +99,7 @@ export const parsedBaseIssues = [ ...@@ -99,6 +99,7 @@ export const parsedBaseIssues = [
}, },
]; ];
// Browser Performance Testing
export const headBrowserPerformance = [ export const headBrowserPerformance = [
{ {
subject: '/some/path', subject: '/some/path',
...@@ -155,6 +156,51 @@ export const baseBrowserPerformance = [ ...@@ -155,6 +156,51 @@ export const baseBrowserPerformance = [
}, },
]; ];
// Load Performance Testing
export const headLoadPerformance = {
metrics: {
checks: {
fails: 0,
passes: 45,
value: 0,
},
http_req_waiting: {
avg: 104.3543911111111,
max: 247.8693,
med: 99.1985,
min: 98.1397,
'p(90)': 100.60016,
'p(95)': 125.45588000000023,
},
http_reqs: {
count: 45,
rate: 8.999484329547917,
},
},
};
export const baseLoadPerformance = {
metrics: {
checks: {
fails: 0,
passes: 39,
value: 0,
},
http_req_waiting: {
avg: 118.28965641025643,
max: 674.4383,
med: 98.2503,
min: 97.1357,
'p(90)': 104.09862000000001,
'p(95)': 101.22848,
},
http_reqs: {
count: 39,
rate: 7.799590989448514,
},
},
};
export const codequalityParsedIssues = [ export const codequalityParsedIssues = [
{ {
name: 'Insecure Dependency', name: 'Insecure Dependency',
......
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'Jobs/Load-Performance-Testing.gitlab-ci.yml' do
subject(:template) do
<<~YAML
stages:
- test
- performance
include:
- template: 'Jobs/Load-Performance-Testing.gitlab-ci.yml'
placeholder:
script:
- keep pipeline validator happy by having a job when stages are intentionally empty
YAML
end
describe 'the created pipeline' do
let(:user) { create(:admin) }
let(:project) do
create(:project, :repository, variables: [
build(:ci_variable, key: 'CI_KUBERNETES_ACTIVE', value: 'true')
])
end
let(:default_branch) { 'master' }
let(:pipeline_ref) { default_branch }
let(:service) { Ci::CreatePipelineService.new(project, user, ref: pipeline_ref) }
let(:pipeline) { service.execute!(:push) }
let(:build_names) { pipeline.builds.pluck(:name) }
before do
stub_ci_pipeline_yaml_file(template)
allow_any_instance_of(Ci::BuildScheduleWorker).to receive(:perform).and_return(true)
allow(project).to receive(:default_branch).and_return(default_branch)
end
it 'has no errors' do
expect(pipeline.errors).to be_empty
end
shared_examples_for 'load_performance job on tag or branch' do
it 'by default' do
expect(build_names).to include('load_performance')
end
it 'when LOAD_PERFORMANCE_DISABLED' do
create(:ci_variable, project: project, key: 'LOAD_PERFORMANCE_DISABLED', value: '1')
expect(build_names).not_to include('load_performance')
end
end
context 'on master' do
it_behaves_like 'load_performance job on tag or branch'
end
context 'on another branch' do
let(:pipeline_ref) { 'feature' }
it_behaves_like 'load_performance job on tag or branch'
end
context 'on tag' do
let(:pipeline_ref) { 'v1.0.0' }
it_behaves_like 'load_performance job on tag or branch'
end
context 'on merge request' do
let(:service) { MergeRequests::CreatePipelineService.new(project, user) }
let(:merge_request) { create(:merge_request, :simple, source_project: project) }
let(:pipeline) { service.execute(merge_request) }
it 'has no jobs' do
expect(pipeline).to be_merge_request_event
expect(build_names).to be_empty
end
end
end
end
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'Verify/Load-Performance-Testing.gitlab-ci.yml' do
subject(:template) do
<<~YAML
stages:
- test
- performance
include:
- template: 'Verify/Load-Performance-Testing.gitlab-ci.yml'
placeholder:
script:
- keep pipeline validator happy by having a job when stages are intentionally empty
YAML
end
describe 'the created pipeline' do
let(:user) { create(:admin) }
let(:project) { create(:project, :repository) }
let(:default_branch) { 'master' }
let(:pipeline_ref) { default_branch }
let(:service) { Ci::CreatePipelineService.new(project, user, ref: pipeline_ref) }
let(:pipeline) { service.execute!(:push) }
let(:build_names) { pipeline.builds.pluck(:name) }
before do
stub_ci_pipeline_yaml_file(template)
allow_any_instance_of(Ci::BuildScheduleWorker).to receive(:perform).and_return(true)
allow(project).to receive(:default_branch).and_return(default_branch)
end
it 'has no errors' do
expect(pipeline.errors).to be_empty
end
shared_examples_for 'load_performance job on tag or branch' do
it 'by default' do
expect(build_names).to include('load_performance')
end
end
context 'on master' do
it_behaves_like 'load_performance job on tag or branch'
end
context 'on another branch' do
let(:pipeline_ref) { 'feature' }
it_behaves_like 'load_performance job on tag or branch'
end
context 'on tag' do
let(:pipeline_ref) { 'v1.0.0' }
it_behaves_like 'load_performance job on tag or branch'
end
context 'on merge request' do
let(:service) { MergeRequests::CreatePipelineService.new(project, user) }
let(:merge_request) { create(:merge_request, :simple, source_project: project) }
let(:pipeline) { service.execute(merge_request) }
it 'has no jobs' do
expect(pipeline).to be_merge_request_event
expect(build_names).to be_empty
end
end
end
end
...@@ -102,6 +102,12 @@ RSpec.describe Ci::Pipeline do ...@@ -102,6 +102,12 @@ RSpec.describe Ci::Pipeline do
include_examples '#batch_lookup_report_artifact_for_file_type', :browser_performance, :merge_request_performance_metrics include_examples '#batch_lookup_report_artifact_for_file_type', :browser_performance, :merge_request_performance_metrics
end end
context 'with load performance artifact' do
let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_load_performance_report, project: project) }
include_examples '#batch_lookup_report_artifact_for_file_type', :load_performance, :merge_request_performance_metrics
end
end end
describe '#expose_license_scanning_data?' do describe '#expose_license_scanning_data?' do
......
...@@ -94,6 +94,7 @@ RSpec.describe EE::Ci::JobArtifact do ...@@ -94,6 +94,7 @@ RSpec.describe EE::Ci::JobArtifact do
'license_scanning' | %w(license_management license_scanning) 'license_scanning' | %w(license_management license_scanning)
'codequality' | %w(codequality) 'codequality' | %w(codequality)
'browser_performance' | %w(browser_performance performance) 'browser_performance' | %w(browser_performance performance)
'load_performance' | %w(load_performance)
'quality' | nil 'quality' | nil
end end
......
...@@ -133,6 +133,12 @@ RSpec.describe Ci::PipelinePresenter do ...@@ -133,6 +133,12 @@ RSpec.describe Ci::PipelinePresenter do
include_examples '#downloadable_path_for_report_type', :browser_performance, :merge_request_performance_metrics include_examples '#downloadable_path_for_report_type', :browser_performance, :merge_request_performance_metrics
end end
context 'with load_performance artifact' do
let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_load_performance_report, project: project) }
include_examples '#downloadable_path_for_report_type', :load_performance, :merge_request_performance_metrics
end
context 'with license_scanning artifact' do context 'with license_scanning artifact' do
let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_license_scanning_report, project: project) } let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_license_scanning_report, project: project) }
......
...@@ -32,7 +32,7 @@ RSpec.describe MergeRequestWidgetEntity do ...@@ -32,7 +32,7 @@ RSpec.describe MergeRequestWidgetEntity do
end end
def create_all_artifacts def create_all_artifacts
artifacts = %i(codequality performance browser_performance) artifacts = %i(codequality performance browser_performance load_performance)
artifacts.each do |artifact_type| artifacts.each do |artifact_type|
create(:ee_ci_build, artifact_type, :success, pipeline: pipeline, project: pipeline.project) create(:ee_ci_build, artifact_type, :success, pipeline: pipeline, project: pipeline.project)
...@@ -62,10 +62,11 @@ RSpec.describe MergeRequestWidgetEntity do ...@@ -62,10 +62,11 @@ RSpec.describe MergeRequestWidgetEntity do
describe 'test report artifacts', :request_store do describe 'test report artifacts', :request_store do
using RSpec::Parameterized::TableSyntax using RSpec::Parameterized::TableSyntax
where(:json_entry, :artifact_type) do where(:json_entry, :artifact_type, :exposures) do
:codeclimate | :codequality :codeclimate | :codequality | []
:browser_performance | :browser_performance :browser_performance | :browser_performance | [:degradation_threshold, :head_path, :base_path]
:browser_performance | :performance :browser_performance | :performance | [:degradation_threshold, :head_path, :base_path]
:load_performance | :load_performance | [:head_path, :base_path]
end end
with_them do with_them do
...@@ -88,6 +89,9 @@ RSpec.describe MergeRequestWidgetEntity do ...@@ -88,6 +89,9 @@ RSpec.describe MergeRequestWidgetEntity do
it "has data entry" do it "has data entry" do
expect(subject.as_json).to include(json_entry) expect(subject.as_json).to include(json_entry)
exposures.each do |exposure|
expect(subject.as_json[json_entry]).to include(exposure)
end
end end
end end
......
...@@ -13,7 +13,7 @@ module Gitlab ...@@ -13,7 +13,7 @@ module Gitlab
ALLOWED_KEYS = ALLOWED_KEYS =
%i[junit codequality sast secret_detection dependency_scanning container_scanning %i[junit codequality sast secret_detection dependency_scanning container_scanning
dast performance browser_performance license_management license_scanning metrics lsif dast performance browser_performance load_performance license_management license_scanning metrics lsif
dotenv cobertura terraform accessibility cluster_applications dotenv cobertura terraform accessibility cluster_applications
requirements coverage_fuzzing].freeze requirements coverage_fuzzing].freeze
...@@ -34,6 +34,7 @@ module Gitlab ...@@ -34,6 +34,7 @@ module Gitlab
validates :dast, array_of_strings_or_string: true validates :dast, array_of_strings_or_string: true
validates :performance, array_of_strings_or_string: true validates :performance, array_of_strings_or_string: true
validates :browser_performance, array_of_strings_or_string: true validates :browser_performance, array_of_strings_or_string: true
validates :load_performance, array_of_strings_or_string: true
validates :license_management, array_of_strings_or_string: true validates :license_management, array_of_strings_or_string: true
validates :license_scanning, array_of_strings_or_string: true validates :license_scanning, array_of_strings_or_string: true
validates :metrics, array_of_strings_or_string: true validates :metrics, array_of_strings_or_string: true
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
# * code_quality: CODE_QUALITY_DISABLED # * code_quality: CODE_QUALITY_DISABLED
# * license_management: LICENSE_MANAGEMENT_DISABLED # * license_management: LICENSE_MANAGEMENT_DISABLED
# * performance: PERFORMANCE_DISABLED # * performance: PERFORMANCE_DISABLED
# * load_performance: LOAD_PERFORMANCE_DISABLED
# * sast: SAST_DISABLED # * sast: SAST_DISABLED
# * secret_detection: SECRET_DETECTION_DISABLED # * secret_detection: SECRET_DETECTION_DISABLED
# * dependency_scanning: DEPENDENCY_SCANNING_DISABLED # * dependency_scanning: DEPENDENCY_SCANNING_DISABLED
......
load_performance:
stage: performance
image: docker:19.03.11
allow_failure: true
variables:
DOCKER_TLS_CERTDIR: ""
K6_IMAGE: loadimpact/k6
K6_VERSION: 0.26.2
K6_TEST_FILE: github.com/loadimpact/k6/samples/http_get.js
K6_OPTIONS: ''
services:
- docker:19.03.11-dind
script:
- |
if ! docker info &>/dev/null; then
if [ -z "$DOCKER_HOST" -a "$KUBERNETES_PORT" ]; then
export DOCKER_HOST='tcp://localhost:2375'
fi
fi
- docker run --rm -v "$(pwd)":/k6 -w /k6 $K6_IMAGE:$K6_VERSION run $K6_TEST_FILE --summary-export=load-performance.json $K6_OPTIONS
artifacts:
reports:
load_performance: load-performance.json
rules:
- if: '$CI_KUBERNETES_ACTIVE == null || $CI_KUBERNETES_ACTIVE == ""'
when: never
- if: '$LOAD_PERFORMANCE_DISABLED'
when: never
- if: '$CI_COMMIT_TAG || $CI_COMMIT_BRANCH'
# Read more about the feature here: https://docs.gitlab.com/ee/user/project/merge_requests/load_performance_testing.html
stages:
- build
- test
- deploy
- performance
load_performance:
stage: performance
image: docker:git
variables:
K6_IMAGE: loadimpact/k6
K6_VERSION: 0.26.2
K6_TEST_FILE: github.com/loadimpact/k6/samples/http_get.js
K6_OPTIONS: ''
services:
- docker:stable-dind
script:
- docker run --rm -v "$(pwd)":/k6 -w /k6 $K6_IMAGE:$K6_VERSION run $K6_TEST_FILE --summary-export=load-performance.json $K6_OPTIONS
artifacts:
reports:
load_performance: load-performance.json
...@@ -27169,6 +27169,9 @@ msgstr "" ...@@ -27169,6 +27169,9 @@ msgstr ""
msgid "ciReport|Browser performance test metrics: No changes" msgid "ciReport|Browser performance test metrics: No changes"
msgstr "" msgstr ""
msgid "ciReport|Checks"
msgstr ""
msgid "ciReport|Code quality" msgid "ciReport|Code quality"
msgstr "" msgstr ""
...@@ -27229,6 +27232,12 @@ msgstr "" ...@@ -27229,6 +27232,12 @@ msgstr ""
msgid "ciReport|Learn more about interacting with security reports" msgid "ciReport|Learn more about interacting with security reports"
msgstr "" msgstr ""
msgid "ciReport|Load performance test metrics: "
msgstr ""
msgid "ciReport|Load performance test metrics: No changes"
msgstr ""
msgid "ciReport|Loading %{reportName} report" msgid "ciReport|Loading %{reportName} report"
msgstr "" msgstr ""
...@@ -27244,6 +27253,9 @@ msgstr "" ...@@ -27244,6 +27253,9 @@ msgstr ""
msgid "ciReport|No code quality issues found" msgid "ciReport|No code quality issues found"
msgstr "" msgstr ""
msgid "ciReport|RPS"
msgstr ""
msgid "ciReport|Resolve with merge request" msgid "ciReport|Resolve with merge request"
msgstr "" msgstr ""
...@@ -27271,6 +27283,12 @@ msgstr "" ...@@ -27271,6 +27283,12 @@ msgstr ""
msgid "ciReport|Static Application Security Testing (SAST) detects known vulnerabilities in your source code." msgid "ciReport|Static Application Security Testing (SAST) detects known vulnerabilities in your source code."
msgstr "" msgstr ""
msgid "ciReport|TTFB P90"
msgstr ""
msgid "ciReport|TTFB P95"
msgstr ""
msgid "ciReport|There was an error creating the issue. Please try again." msgid "ciReport|There was an error creating the issue. Please try again."
msgstr "" msgstr ""
......
...@@ -46,6 +46,7 @@ RSpec.describe Gitlab::Ci::Config::Entry::Reports do ...@@ -46,6 +46,7 @@ RSpec.describe Gitlab::Ci::Config::Entry::Reports do
:performance | 'performance.json' :performance | 'performance.json'
:browser_performance | 'browser-performance.json' :browser_performance | 'browser-performance.json'
:browser_performance | 'performance.json' :browser_performance | 'performance.json'
:load_performance | 'load-performance.json'
:lsif | 'lsif.json' :lsif | 'lsif.json'
:dotenv | 'build.dotenv' :dotenv | 'build.dotenv'
:cobertura | 'cobertura-coverage.xml' :cobertura | 'cobertura-coverage.xml'
......
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'Verify/Load-Performance-Testing.gitlab-ci.yml' do
subject(:template) do
<<~YAML
stages:
- test
- performance
include:
- template: 'Verify/Load-Performance-Testing.gitlab-ci.yml'
placeholder:
script:
- keep pipeline validator happy by having a job when stages are intentionally empty
YAML
end
describe 'the created pipeline' do
let(:user) { create(:admin) }
let(:project) { create(:project, :repository) }
let(:default_branch) { 'master' }
let(:pipeline_ref) { default_branch }
let(:service) { Ci::CreatePipelineService.new(project, user, ref: pipeline_ref) }
let(:pipeline) { service.execute!(:push) }
let(:build_names) { pipeline.builds.pluck(:name) }
before do
stub_ci_pipeline_yaml_file(template)
allow_any_instance_of(Ci::BuildScheduleWorker).to receive(:perform).and_return(true)
allow(project).to receive(:default_branch).and_return(default_branch)
end
it 'has no errors' do
expect(pipeline.errors).to be_empty
end
shared_examples_for 'load_performance job on tag or branch' do
it 'by default' do
expect(build_names).to include('load_performance')
end
end
context 'on master' do
it_behaves_like 'load_performance job on tag or branch'
end
context 'on another branch' do
let(:pipeline_ref) { 'feature' }
it_behaves_like 'load_performance job on tag or branch'
end
context 'on tag' do
let(:pipeline_ref) { 'v1.0.0' }
it_behaves_like 'load_performance job on tag or branch'
end
context 'on merge request' do
let(:service) { MergeRequests::CreatePipelineService.new(project, user) }
let(:merge_request) { create(:merge_request, :simple, source_project: project) }
let(:pipeline) { service.execute(merge_request) }
it 'has no jobs' do
expect(pipeline).to be_merge_request_event
expect(build_names).to be_empty
end
end
end
end
...@@ -191,6 +191,7 @@ RSpec.describe PlanLimits do ...@@ -191,6 +191,7 @@ RSpec.describe PlanLimits do
ci_max_artifact_size_license_scanning ci_max_artifact_size_license_scanning
ci_max_artifact_size_performance ci_max_artifact_size_performance
ci_max_artifact_size_browser_performance ci_max_artifact_size_browser_performance
ci_max_artifact_size_load_performance
ci_max_artifact_size_metrics ci_max_artifact_size_metrics
ci_max_artifact_size_metrics_referee ci_max_artifact_size_metrics_referee
ci_max_artifact_size_network_referee ci_max_artifact_size_network_referee
......
...@@ -33,7 +33,7 @@ RSpec.describe Ci::RetryBuildService do ...@@ -33,7 +33,7 @@ RSpec.describe Ci::RetryBuildService do
job_artifacts_sast job_artifacts_secret_detection job_artifacts_dependency_scanning job_artifacts_sast job_artifacts_secret_detection job_artifacts_dependency_scanning
job_artifacts_container_scanning job_artifacts_dast job_artifacts_container_scanning job_artifacts_dast
job_artifacts_license_management job_artifacts_license_scanning job_artifacts_license_management job_artifacts_license_scanning
job_artifacts_performance job_artifacts_browser_performance job_artifacts_performance job_artifacts_browser_performance job_artifacts_load_performance
job_artifacts_lsif job_artifacts_terraform job_artifacts_cluster_applications job_artifacts_lsif job_artifacts_terraform job_artifacts_cluster_applications
job_artifacts_codequality job_artifacts_metrics scheduled_at job_artifacts_codequality job_artifacts_metrics scheduled_at
job_variables waiting_for_resource_at job_artifacts_metrics_referee job_variables waiting_for_resource_at job_artifacts_metrics_referee
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment