Commit d27b03c7 authored by Emily Ring's avatar Emily Ring Committed by Thong Kuah

Added cluster size to cluster list

Updated javascripts/clusters_list to include node information
Display node size on cluster index
Updated associated tests
parent bedde99a
...@@ -43,17 +43,17 @@ export default { ...@@ -43,17 +43,17 @@ export default {
key: 'environment_scope', key: 'environment_scope',
label: __('Environment scope'), label: __('Environment scope'),
}, },
// Wait for backend to send these fields {
// { key: 'node_size',
// key: 'size', label: __('Nodes'),
// label: __('Size'), },
// }, // Fields are missing calculation methods and not ready to display
// { // {
// key: 'cpu', // key: 'node_cpu',
// label: __('Total cores (vCPUs)'), // label: __('Total cores (vCPUs)'),
// }, // },
// { // {
// key: 'memory', // key: 'node_memory',
// label: __('Total memory (GB)'), // label: __('Total memory (GB)'),
// }, // },
{ {
...@@ -111,6 +111,14 @@ export default { ...@@ -111,6 +111,14 @@ export default {
></div> ></div>
</div> </div>
</template> </template>
<template #cell(node_size)="{ item }">
<span v-if="item.nodes">{{ item.nodes.length }}</span>
<small v-else class="gl-font-sm gl-font-style-italic gl-text-gray-400">{{
__('Unknown')
}}</small>
</template>
<template #cell(cluster_type)="{value}"> <template #cell(cluster_type)="{value}">
<gl-badge variant="light"> <gl-badge variant="light">
{{ value }} {{ value }}
......
...@@ -6,6 +6,8 @@ export const CLUSTER_TYPES = { ...@@ -6,6 +6,8 @@ export const CLUSTER_TYPES = {
instance_type: __('Instance'), instance_type: __('Instance'),
}; };
export const MAX_REQUESTS = 3;
export const STATUSES = { export const STATUSES = {
default: { className: 'bg-white', title: __('Unknown') }, default: { className: 'bg-white', title: __('Unknown') },
disabled: { className: 'disabled', title: __('Disabled') }, disabled: { className: 'disabled', title: __('Disabled') },
......
...@@ -2,10 +2,23 @@ import Poll from '~/lib/utils/poll'; ...@@ -2,10 +2,23 @@ import Poll from '~/lib/utils/poll';
import axios from '~/lib/utils/axios_utils'; import axios from '~/lib/utils/axios_utils';
import flash from '~/flash'; import flash from '~/flash';
import { __ } from '~/locale'; import { __ } from '~/locale';
import { MAX_REQUESTS } from '../constants';
import { parseIntPagination, normalizeHeaders } from '~/lib/utils/common_utils'; import { parseIntPagination, normalizeHeaders } from '~/lib/utils/common_utils';
import * as Sentry from '@sentry/browser';
import * as types from './mutation_types'; import * as types from './mutation_types';
const allNodesPresent = (clusters, retryCount) => {
/*
Nodes are coming from external Kubernetes clusters.
They may fail for reasons GitLab cannot control.
MAX_REQUESTS will ensure this poll stops at some point.
*/
return retryCount > MAX_REQUESTS || clusters.every(cluster => cluster.nodes != null);
};
export const fetchClusters = ({ state, commit }) => { export const fetchClusters = ({ state, commit }) => {
let retryCount = 0;
const poll = new Poll({ const poll = new Poll({
resource: { resource: {
fetchClusters: paginatedEndPoint => axios.get(paginatedEndPoint), fetchClusters: paginatedEndPoint => axios.get(paginatedEndPoint),
...@@ -13,16 +26,40 @@ export const fetchClusters = ({ state, commit }) => { ...@@ -13,16 +26,40 @@ export const fetchClusters = ({ state, commit }) => {
data: `${state.endpoint}?page=${state.page}`, data: `${state.endpoint}?page=${state.page}`,
method: 'fetchClusters', method: 'fetchClusters',
successCallback: ({ data, headers }) => { successCallback: ({ data, headers }) => {
if (data.clusters) { retryCount += 1;
const normalizedHeaders = normalizeHeaders(headers);
const paginationInformation = parseIntPagination(normalizedHeaders); try {
if (data.clusters) {
const normalizedHeaders = normalizeHeaders(headers);
const paginationInformation = parseIntPagination(normalizedHeaders);
commit(types.SET_CLUSTERS_DATA, { data, paginationInformation });
commit(types.SET_LOADING_STATE, false);
commit(types.SET_CLUSTERS_DATA, { data, paginationInformation }); if (allNodesPresent(data.clusters, retryCount)) {
commit(types.SET_LOADING_STATE, false); poll.stop();
}
}
} catch (error) {
poll.stop(); poll.stop();
Sentry.withScope(scope => {
scope.setTag('javascript_clusters_list', 'fetchClustersSuccessCallback');
Sentry.captureException(error);
});
} }
}, },
errorCallback: () => flash(__('An error occurred while loading clusters')), errorCallback: response => {
poll.stop();
commit(types.SET_LOADING_STATE, false);
flash(__('Clusters|An error occurred while loading clusters'));
Sentry.withScope(scope => {
scope.setTag('javascript_clusters_list', 'fetchClustersErrorCallback');
Sentry.captureException(response);
});
},
}); });
poll.makeRequest(); poll.makeRequest();
......
...@@ -23,6 +23,7 @@ class Clusters::ClustersController < Clusters::BaseController ...@@ -23,6 +23,7 @@ class Clusters::ClustersController < Clusters::BaseController
respond_to do |format| respond_to do |format|
format.html format.html
format.json do format.json do
Gitlab::PollingInterval.set_header(response, interval: STATUS_POLLING_INTERVAL)
serializer = ClusterSerializer.new(current_user: current_user) serializer = ClusterSerializer.new(current_user: current_user)
render json: { render json: {
......
---
title: Added node size to cluster index
merge_request: 32435
author:
type: changed
...@@ -2219,9 +2219,6 @@ msgstr "" ...@@ -2219,9 +2219,6 @@ msgstr ""
msgid "An error occurred while loading chart data" msgid "An error occurred while loading chart data"
msgstr "" msgstr ""
msgid "An error occurred while loading clusters"
msgstr ""
msgid "An error occurred while loading commit signatures" msgid "An error occurred while loading commit signatures"
msgstr "" msgstr ""
...@@ -5417,6 +5414,9 @@ msgstr "" ...@@ -5417,6 +5414,9 @@ msgstr ""
msgid "ClusterIntergation|Select service role" msgid "ClusterIntergation|Select service role"
msgstr "" msgstr ""
msgid "Clusters|An error occurred while loading clusters"
msgstr ""
msgid "Code" msgid "Code"
msgstr "" msgstr ""
......
...@@ -42,6 +42,13 @@ describe Admin::ClustersController do ...@@ -42,6 +42,13 @@ describe Admin::ClustersController do
expect(response).to match_response_schema('cluster_list') expect(response).to match_response_schema('cluster_list')
end end
it 'sets the polling interval header for json requests' do
get_index(format: :json)
expect(response).to have_gitlab_http_status(:ok)
expect(response.headers['Poll-Interval']).to eq("10000")
end
context 'when page is specified' do context 'when page is specified' do
let(:last_page) { Clusters::Cluster.instance_type.page.total_pages } let(:last_page) { Clusters::Cluster.instance_type.page.total_pages }
let(:total_count) { Clusters::Cluster.instance_type.page.total_count } let(:total_count) { Clusters::Cluster.instance_type.page.total_count }
......
...@@ -47,6 +47,13 @@ describe Groups::ClustersController do ...@@ -47,6 +47,13 @@ describe Groups::ClustersController do
expect(response).to match_response_schema('cluster_list') expect(response).to match_response_schema('cluster_list')
end end
it 'sets the polling interval header for json requests' do
go(format: :json)
expect(response).to have_gitlab_http_status(:ok)
expect(response.headers['Poll-Interval']).to eq("10000")
end
context 'when page is specified' do context 'when page is specified' do
let(:last_page) { group.clusters.page.total_pages } let(:last_page) { group.clusters.page.total_pages }
let(:total_count) { group.clusters.page.total_count } let(:total_count) { group.clusters.page.total_count }
......
...@@ -41,6 +41,13 @@ describe Projects::ClustersController do ...@@ -41,6 +41,13 @@ describe Projects::ClustersController do
expect(response).to match_response_schema('cluster_list') expect(response).to match_response_schema('cluster_list')
end end
it 'sets the polling interval header for json requests' do
go(format: :json)
expect(response).to have_gitlab_http_status(:ok)
expect(response.headers['Poll-Interval']).to eq("10000")
end
context 'when page is specified' do context 'when page is specified' do
let(:last_page) { project.clusters.page.total_pages } let(:last_page) { project.clusters.page.total_pages }
let(:total_count) { project.clusters.page.total_count } let(:total_count) { project.clusters.page.total_count }
......
...@@ -28,13 +28,17 @@ describe('Clusters', () => { ...@@ -28,13 +28,17 @@ describe('Clusters', () => {
return axios.waitForAll(); return axios.waitForAll();
}; };
const paginationHeader = (total = apiData.clusters.length, perPage = 20, currentPage = 1) => {
return {
'x-total': total,
'x-per-page': perPage,
'x-page': currentPage,
};
};
beforeEach(() => { beforeEach(() => {
mock = new MockAdapter(axios); mock = new MockAdapter(axios);
mockPollingApi(200, apiData, { mockPollingApi(200, apiData, paginationHeader());
'x-total': apiData.clusters.length,
'x-per-page': 20,
'x-page': 1,
});
return mountWrapper(); return mountWrapper();
}); });
...@@ -99,17 +103,30 @@ describe('Clusters', () => { ...@@ -99,17 +103,30 @@ describe('Clusters', () => {
}); });
}); });
describe('nodes present', () => {
it.each`
nodeSize | lineNumber
${'Unknown'} | ${0}
${'1'} | ${1}
${'2'} | ${2}
${'Unknown'} | ${3}
${'Unknown'} | ${4}
${'Unknown'} | ${5}
`('renders node size for each cluster', ({ nodeSize, lineNumber }) => {
const sizes = findTable().findAll('td:nth-child(3)');
const size = sizes.at(lineNumber);
expect(size.text()).toBe(nodeSize);
});
});
describe('pagination', () => { describe('pagination', () => {
const perPage = apiData.clusters.length; const perPage = apiData.clusters.length;
const totalFirstPage = 100; const totalFirstPage = 100;
const totalSecondPage = 500; const totalSecondPage = 500;
beforeEach(() => { beforeEach(() => {
mockPollingApi(200, apiData, { mockPollingApi(200, apiData, paginationHeader(totalFirstPage, perPage, 1));
'x-total': totalFirstPage,
'x-per-page': perPage,
'x-page': 1,
});
return mountWrapper(); return mountWrapper();
}); });
...@@ -123,11 +140,7 @@ describe('Clusters', () => { ...@@ -123,11 +140,7 @@ describe('Clusters', () => {
describe('when updating currentPage', () => { describe('when updating currentPage', () => {
beforeEach(() => { beforeEach(() => {
mockPollingApi(200, apiData, { mockPollingApi(200, apiData, paginationHeader(totalSecondPage, perPage, 2));
'x-total': totalSecondPage,
'x-per-page': perPage,
'x-page': 2,
});
wrapper.setData({ currentPage: 2 }); wrapper.setData({ currentPage: 2 });
return axios.waitForAll(); return axios.waitForAll();
}); });
......
export const clusterList = [ export const clusterList = [
{ {
name: 'My Cluster 1', name: 'My Cluster 1',
environmentScope: '*', environment_scope: '*',
size: '3', cluster_type: 'group_type',
clusterType: 'group_type',
status: 'disabled', status: 'disabled',
cpu: '6 (100% free)', nodes: null,
memory: '22.50 (30% free)',
}, },
{ {
name: 'My Cluster 2', name: 'My Cluster 2',
environmentScope: 'development', environment_scope: 'development',
size: '12', cluster_type: 'project_type',
clusterType: 'project_type',
status: 'unreachable', status: 'unreachable',
cpu: '3 (50% free)', nodes: [{ usage: { cpu: '246155922n', memory: '1255212Ki' } }],
memory: '11 (60% free)',
}, },
{ {
name: 'My Cluster 3', name: 'My Cluster 3',
environmentScope: 'development', environment_scope: 'development',
size: '12', cluster_type: 'project_type',
clusterType: 'project_type',
status: 'authentication_failure', status: 'authentication_failure',
cpu: '1 (0% free)', nodes: [
memory: '22 (33% free)', { usage: { cpu: '246155922n', memory: '1255212Ki' } },
{ usage: { cpu: '307051934n', memory: '1379136Ki' } },
],
}, },
{ {
name: 'My Cluster 4', name: 'My Cluster 4',
environmentScope: 'production', environment_scope: 'production',
size: '12', cluster_type: 'project_type',
clusterType: 'project_type',
status: 'deleting', status: 'deleting',
cpu: '6 (100% free)',
memory: '45 (15% free)',
}, },
{ {
name: 'My Cluster 5', name: 'My Cluster 5',
environmentScope: 'development', environment_scope: 'development',
size: '12', cluster_type: 'project_type',
clusterType: 'project_type',
status: 'created', status: 'created',
cpu: '6 (100% free)',
memory: '20.12 (35% free)',
}, },
{ {
name: 'My Cluster 6', name: 'My Cluster 6',
environmentScope: '*', environment_scope: '*',
size: '1', cluster_type: 'project_type',
clusterType: 'project_type',
status: 'cleanup_ongoing', status: 'cleanup_ongoing',
cpu: '6 (100% free)',
memory: '20.12 (35% free)',
}, },
]; ];
......
import MockAdapter from 'axios-mock-adapter'; import MockAdapter from 'axios-mock-adapter';
import Poll from '~/lib/utils/poll';
import flashError from '~/flash'; import flashError from '~/flash';
import testAction from 'helpers/vuex_action_helper'; import testAction from 'helpers/vuex_action_helper';
import axios from '~/lib/utils/axios_utils'; import axios from '~/lib/utils/axios_utils';
import waitForPromises from 'helpers/wait_for_promises';
import { apiData } from '../mock_data'; import { apiData } from '../mock_data';
import { MAX_REQUESTS } from '~/clusters_list/constants';
import * as types from '~/clusters_list/store/mutation_types'; import * as types from '~/clusters_list/store/mutation_types';
import * as actions from '~/clusters_list/store/actions'; import * as actions from '~/clusters_list/store/actions';
import * as Sentry from '@sentry/browser';
jest.mock('~/flash.js'); jest.mock('~/flash.js');
...@@ -12,6 +16,24 @@ describe('Clusters store actions', () => { ...@@ -12,6 +16,24 @@ describe('Clusters store actions', () => {
describe('fetchClusters', () => { describe('fetchClusters', () => {
let mock; let mock;
const headers = {
'x-next-page': 1,
'x-total': apiData.clusters.length,
'x-total-pages': 1,
'x-per-page': 20,
'x-page': 1,
'x-prev-page': 1,
};
const paginationInformation = {
nextPage: 1,
page: 1,
perPage: 20,
previousPage: 1,
total: apiData.clusters.length,
totalPages: 1,
};
beforeEach(() => { beforeEach(() => {
mock = new MockAdapter(axios); mock = new MockAdapter(axios);
}); });
...@@ -19,21 +41,6 @@ describe('Clusters store actions', () => { ...@@ -19,21 +41,6 @@ describe('Clusters store actions', () => {
afterEach(() => mock.restore()); afterEach(() => mock.restore());
it('should commit SET_CLUSTERS_DATA with received response', done => { it('should commit SET_CLUSTERS_DATA with received response', done => {
const headers = {
'x-total': apiData.clusters.length,
'x-per-page': 20,
'x-page': 1,
};
const paginationInformation = {
nextPage: NaN,
page: 1,
perPage: 20,
previousPage: NaN,
total: apiData.clusters.length,
totalPages: NaN,
};
mock.onGet().reply(200, apiData, headers); mock.onGet().reply(200, apiData, headers);
testAction( testAction(
...@@ -52,9 +59,110 @@ describe('Clusters store actions', () => { ...@@ -52,9 +59,110 @@ describe('Clusters store actions', () => {
it('should show flash on API error', done => { it('should show flash on API error', done => {
mock.onGet().reply(400, 'Not Found'); mock.onGet().reply(400, 'Not Found');
testAction(actions.fetchClusters, { endpoint: apiData.endpoint }, {}, [], [], () => { testAction(
expect(flashError).toHaveBeenCalledWith(expect.stringMatching('error')); actions.fetchClusters,
done(); { endpoint: apiData.endpoint },
{},
[{ type: types.SET_LOADING_STATE, payload: false }],
[],
() => {
expect(flashError).toHaveBeenCalledWith(expect.stringMatching('error'));
done();
},
);
});
describe('multiple api requests', () => {
let captureException;
let pollRequest;
let pollStop;
const pollInterval = 10;
const pollHeaders = { 'poll-interval': pollInterval, ...headers };
beforeEach(() => {
captureException = jest.spyOn(Sentry, 'captureException');
pollRequest = jest.spyOn(Poll.prototype, 'makeRequest');
pollStop = jest.spyOn(Poll.prototype, 'stop');
mock.onGet().reply(200, apiData, pollHeaders);
});
afterEach(() => {
captureException.mockRestore();
pollRequest.mockRestore();
pollStop.mockRestore();
});
it('should stop polling after MAX Requests', done => {
testAction(
actions.fetchClusters,
{ endpoint: apiData.endpoint },
{},
[
{ type: types.SET_CLUSTERS_DATA, payload: { data: apiData, paginationInformation } },
{ type: types.SET_LOADING_STATE, payload: false },
],
[],
() => {
expect(pollRequest).toHaveBeenCalledTimes(1);
expect(pollStop).toHaveBeenCalledTimes(0);
jest.advanceTimersByTime(pollInterval);
waitForPromises()
.then(() => {
expect(pollRequest).toHaveBeenCalledTimes(2);
expect(pollStop).toHaveBeenCalledTimes(0);
jest.advanceTimersByTime(pollInterval);
})
.then(() => waitForPromises())
.then(() => {
expect(pollRequest).toHaveBeenCalledTimes(MAX_REQUESTS);
expect(pollStop).toHaveBeenCalledTimes(0);
jest.advanceTimersByTime(pollInterval);
})
.then(() => waitForPromises())
.then(() => {
expect(pollRequest).toHaveBeenCalledTimes(MAX_REQUESTS + 1);
// Stops poll once it exceeds the MAX_REQUESTS limit
expect(pollStop).toHaveBeenCalledTimes(1);
jest.advanceTimersByTime(pollInterval);
})
.then(() => waitForPromises())
.then(() => {
// Additional poll requests are not made once pollStop is called
expect(pollRequest).toHaveBeenCalledTimes(MAX_REQUESTS + 1);
expect(pollStop).toHaveBeenCalledTimes(1);
})
.then(done)
.catch(done.fail);
},
);
});
it('should stop polling and report to Sentry when data is invalid', done => {
const badApiResponse = { clusters: {} };
mock.onGet().reply(200, badApiResponse, pollHeaders);
testAction(
actions.fetchClusters,
{ endpoint: apiData.endpoint },
{},
[
{
type: types.SET_CLUSTERS_DATA,
payload: { data: badApiResponse, paginationInformation },
},
{ type: types.SET_LOADING_STATE, payload: false },
],
[],
() => {
expect(pollRequest).toHaveBeenCalledTimes(1);
expect(pollStop).toHaveBeenCalledTimes(1);
expect(captureException).toHaveBeenCalledTimes(1);
done();
},
);
}); });
}); });
}); });
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment