Add specs for brokers reducer
This commit is contained in:
parent
5930023365
commit
91e93a727e
4 changed files with 152 additions and 1 deletions
|
@ -8,7 +8,7 @@ jest.mock('lodash', () => ({
|
|||
now: () => 1234567890,
|
||||
}));
|
||||
|
||||
describe('Clusters reducer', () => {
|
||||
describe('Alerts reducer', () => {
|
||||
it('does not create error alert', () => {
|
||||
expect(reducer(undefined, createTopicAction.failure({}))).toEqual({});
|
||||
});
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
export const brokersPayload = [
|
||||
{ id: 1, host: 'b-1.test.kafka.amazonaws.com' },
|
||||
{ id: 2, host: 'b-2.test.kafka.amazonaws.com' },
|
||||
];
|
||||
|
||||
export const brokerStatsPayload = {
|
||||
brokerCount: 2,
|
||||
zooKeeperStatus: 1,
|
||||
activeControllers: 1,
|
||||
onlinePartitionCount: 138,
|
||||
offlinePartitionCount: 0,
|
||||
inSyncReplicasCount: 239,
|
||||
outOfSyncReplicasCount: 0,
|
||||
underReplicatedPartitionCount: 0,
|
||||
diskUsage: [
|
||||
{ brokerId: 1, segmentSize: 16848434, segmentCount: 118 },
|
||||
{ brokerId: 2, segmentSize: 12345678, segmentCount: 121 },
|
||||
],
|
||||
version: '2.2.1',
|
||||
};
|
||||
|
||||
export const brokersReducerState = {
|
||||
items: [],
|
||||
brokerCount: 2,
|
||||
zooKeeperStatus: 1,
|
||||
activeControllers: 1,
|
||||
onlinePartitionCount: 138,
|
||||
offlinePartitionCount: 0,
|
||||
inSyncReplicasCount: 239,
|
||||
outOfSyncReplicasCount: 0,
|
||||
underReplicatedPartitionCount: 0,
|
||||
diskUsage: [
|
||||
{ brokerId: 1, segmentSize: 16848434, segmentCount: 118 },
|
||||
{ brokerId: 2, segmentSize: 12345678, segmentCount: 121 },
|
||||
],
|
||||
version: '2.2.1',
|
||||
};
|
|
@ -0,0 +1,31 @@
|
|||
import { fetchBrokersAction, fetchClusterStatsAction } from 'redux/actions';
|
||||
import reducer, { initialState } from 'redux/reducers/brokers/reducer';
|
||||
|
||||
import {
|
||||
brokersPayload,
|
||||
brokerStatsPayload,
|
||||
brokersReducerState,
|
||||
} from './fixtures';
|
||||
|
||||
describe('Clusters reducer', () => {
|
||||
it('returns the initial state', () => {
|
||||
expect(reducer(undefined, fetchBrokersAction.request())).toEqual(
|
||||
initialState
|
||||
);
|
||||
});
|
||||
|
||||
it('reacts on GET_BROKERS__SUCCESS and returns payload', () => {
|
||||
expect(
|
||||
reducer(initialState, fetchBrokersAction.success(brokersPayload))
|
||||
).toEqual({
|
||||
...initialState,
|
||||
items: brokersPayload,
|
||||
});
|
||||
});
|
||||
|
||||
it('reacts on GET_BROKER_METRICS__SUCCESS and returns payload', () => {
|
||||
expect(
|
||||
reducer(initialState, fetchClusterStatsAction.success(brokerStatsPayload))
|
||||
).toEqual(brokersReducerState);
|
||||
});
|
||||
});
|
|
@ -0,0 +1,83 @@
|
|||
import configureStore from 'redux/store/configureStore';
|
||||
import * as selectors from 'redux/reducers/brokers/selectors';
|
||||
import { fetchBrokersAction, fetchClusterStatsAction } from 'redux/actions';
|
||||
|
||||
import { brokersPayload, brokerStatsPayload } from './fixtures';
|
||||
|
||||
const { dispatch, getState } = configureStore();
|
||||
|
||||
describe('Brokers selectors', () => {
|
||||
describe('Initial State', () => {
|
||||
it('returns broker count', () => {
|
||||
expect(selectors.getBrokerCount(getState())).toEqual(0);
|
||||
});
|
||||
it('returns zooKeeper status', () => {
|
||||
expect(selectors.getZooKeeperStatus(getState())).toEqual(0);
|
||||
});
|
||||
it('returns active controllers', () => {
|
||||
expect(selectors.getActiveControllers(getState())).toEqual(0);
|
||||
});
|
||||
it('returns online partition count', () => {
|
||||
expect(selectors.getOnlinePartitionCount(getState())).toEqual(0);
|
||||
});
|
||||
it('returns offline partition count', () => {
|
||||
expect(selectors.getOfflinePartitionCount(getState())).toEqual(0);
|
||||
});
|
||||
it('returns in sync replicas count', () => {
|
||||
expect(selectors.getInSyncReplicasCount(getState())).toEqual(0);
|
||||
});
|
||||
it('returns out of sync replicas count', () => {
|
||||
expect(selectors.getOutOfSyncReplicasCount(getState())).toEqual(0);
|
||||
});
|
||||
it('returns under replicated partition count', () => {
|
||||
expect(selectors.getUnderReplicatedPartitionCount(getState())).toEqual(0);
|
||||
});
|
||||
it('returns disk usage', () => {
|
||||
expect(selectors.getDiskUsage(getState())).toEqual([]);
|
||||
});
|
||||
it('returns version', () => {
|
||||
expect(selectors.getVersion(getState())).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('state', () => {
|
||||
beforeAll(() => {
|
||||
dispatch(fetchBrokersAction.success(brokersPayload));
|
||||
dispatch(fetchClusterStatsAction.success(brokerStatsPayload));
|
||||
});
|
||||
|
||||
it('returns broker count', () => {
|
||||
expect(selectors.getBrokerCount(getState())).toEqual(2);
|
||||
});
|
||||
it('returns zooKeeper status', () => {
|
||||
expect(selectors.getZooKeeperStatus(getState())).toEqual(1);
|
||||
});
|
||||
it('returns active controllers', () => {
|
||||
expect(selectors.getActiveControllers(getState())).toEqual(1);
|
||||
});
|
||||
it('returns online partition count', () => {
|
||||
expect(selectors.getOnlinePartitionCount(getState())).toEqual(138);
|
||||
});
|
||||
it('returns offline partition count', () => {
|
||||
expect(selectors.getOfflinePartitionCount(getState())).toEqual(0);
|
||||
});
|
||||
it('returns in sync replicas count', () => {
|
||||
expect(selectors.getInSyncReplicasCount(getState())).toEqual(239);
|
||||
});
|
||||
it('returns out of sync replicas count', () => {
|
||||
expect(selectors.getOutOfSyncReplicasCount(getState())).toEqual(0);
|
||||
});
|
||||
it('returns under replicated partition count', () => {
|
||||
expect(selectors.getUnderReplicatedPartitionCount(getState())).toEqual(0);
|
||||
});
|
||||
it('returns disk usage', () => {
|
||||
expect(selectors.getDiskUsage(getState())).toEqual([
|
||||
{ brokerId: 1, segmentCount: 118, segmentSize: 16848434 },
|
||||
{ brokerId: 2, segmentCount: 121, segmentSize: 12345678 },
|
||||
]);
|
||||
});
|
||||
it('returns version', () => {
|
||||
expect(selectors.getVersion(getState())).toEqual('2.2.1');
|
||||
});
|
||||
});
|
||||
});
|
Loading…
Add table
Reference in a new issue