add isr count and out of sync replicas count
This commit is contained in:
parent
ef39834651
commit
01c2c8b20b
4 changed files with 13 additions and 24 deletions
|
@ -38,7 +38,7 @@ public class KafkaCluster {
|
||||||
Exception lastKafkaException;
|
Exception lastKafkaException;
|
||||||
Exception lastZookeeperException;
|
Exception lastZookeeperException;
|
||||||
|
|
||||||
public TopicDetails getTopicDetails(String key) {
|
public TopicDetails getOrCreateTopicDetails(String key) {
|
||||||
var topicDetails = topicDetailsMap.get(key);
|
var topicDetails = topicDetailsMap.get(key);
|
||||||
if(topicDetails == null) {
|
if(topicDetails == null) {
|
||||||
topicDetailsMap.putIfAbsent(key, new TopicDetails());
|
topicDetailsMap.putIfAbsent(key, new TopicDetails());
|
||||||
|
|
|
@ -44,7 +44,7 @@ public class ClusterService {
|
||||||
public Mono<ResponseEntity<TopicDetails>> getTopicDetails(String clusterId, String topicName) {
|
public Mono<ResponseEntity<TopicDetails>> getTopicDetails(String clusterId, String topicName) {
|
||||||
KafkaCluster cluster = clustersStorage.getClusterById(clusterId);
|
KafkaCluster cluster = clustersStorage.getClusterById(clusterId);
|
||||||
if (cluster == null) return null;
|
if (cluster == null) return null;
|
||||||
return Mono.just(ResponseEntity.ok(cluster.getTopicDetails(topicName)));
|
return Mono.just(ResponseEntity.ok(cluster.getOrCreateTopicDetails(topicName)));
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<ResponseEntity<Flux<TopicConfig>>> getTopicConfigs(String clusterId, String topicName) {
|
public Mono<ResponseEntity<Flux<TopicConfig>>> getTopicConfigs(String clusterId, String topicName) {
|
||||||
|
|
|
@ -134,6 +134,8 @@ public class KafkaService {
|
||||||
kafkaCluster.getBrokersMetrics().setOnlinePartitionCount(0);
|
kafkaCluster.getBrokersMetrics().setOnlinePartitionCount(0);
|
||||||
kafkaCluster.getBrokersMetrics().setOfflinePartitionCount(0);
|
kafkaCluster.getBrokersMetrics().setOfflinePartitionCount(0);
|
||||||
kafkaCluster.getBrokersMetrics().setUnderReplicatedPartitionCount(0);
|
kafkaCluster.getBrokersMetrics().setUnderReplicatedPartitionCount(0);
|
||||||
|
kafkaCluster.getBrokersMetrics().setInSyncReplicasCount(0);
|
||||||
|
kafkaCluster.getBrokersMetrics().setOutOfSyncReplicasCount(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Topic collectTopicData(KafkaCluster kafkaCluster, TopicDescription topicDescription) {
|
private Topic collectTopicData(KafkaCluster kafkaCluster, TopicDescription topicDescription) {
|
||||||
|
@ -177,12 +179,17 @@ public class KafkaService {
|
||||||
kafkaCluster.getBrokersMetrics().setOfflinePartitionCount(kafkaCluster.getBrokersMetrics().getOfflinePartitionCount() + 1);
|
kafkaCluster.getBrokersMetrics().setOfflinePartitionCount(kafkaCluster.getBrokersMetrics().getOfflinePartitionCount() + 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kafkaCluster.getCluster().setOnlinePartitionCount(kafkaCluster.getBrokersMetrics().getOnlinePartitionCount());
|
kafkaCluster.getCluster().setOnlinePartitionCount(kafkaCluster.getBrokersMetrics().getOnlinePartitionCount());
|
||||||
kafkaCluster.getBrokersMetrics().setUnderReplicatedPartitionCount(
|
kafkaCluster.getBrokersMetrics().setUnderReplicatedPartitionCount(
|
||||||
kafkaCluster.getBrokersMetrics().getUnderReplicatedPartitionCount() + urpCount);
|
kafkaCluster.getBrokersMetrics().getUnderReplicatedPartitionCount() + urpCount);
|
||||||
topic.setPartitions(partitions);
|
kafkaCluster.getBrokersMetrics().setInSyncReplicasCount(
|
||||||
|
kafkaCluster.getBrokersMetrics().getInSyncReplicasCount() + inSyncReplicasCount);
|
||||||
|
kafkaCluster.getBrokersMetrics().setOutOfSyncReplicasCount(
|
||||||
|
kafkaCluster.getBrokersMetrics().getOutOfSyncReplicasCount() + (replicasCount - inSyncReplicasCount));
|
||||||
|
|
||||||
TopicDetails topicDetails = kafkaCluster.getTopicDetails(topicDescription.name());
|
topic.setPartitions(partitions);
|
||||||
|
TopicDetails topicDetails = kafkaCluster.getOrCreateTopicDetails(topicDescription.name());
|
||||||
topicDetails.setReplicas(replicasCount);
|
topicDetails.setReplicas(replicasCount);
|
||||||
topicDetails.setPartitionCount(topicDescription.partitions().size());
|
topicDetails.setPartitionCount(topicDescription.partitions().size());
|
||||||
topicDetails.setInSyncReplicas(inSyncReplicasCount);
|
topicDetails.setInSyncReplicas(inSyncReplicasCount);
|
||||||
|
|
|
@ -206,10 +206,6 @@ components:
|
||||||
BrokersMetrics:
|
BrokersMetrics:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
clusterId:
|
|
||||||
type: string
|
|
||||||
bytesInPerSec:
|
|
||||||
type: integer
|
|
||||||
brokerCount:
|
brokerCount:
|
||||||
type: integer
|
type: integer
|
||||||
zooKeeperStatus:
|
zooKeeperStatus:
|
||||||
|
@ -218,29 +214,15 @@ components:
|
||||||
type: integer
|
type: integer
|
||||||
uncleanLeaderElectionCount:
|
uncleanLeaderElectionCount:
|
||||||
type: integer
|
type: integer
|
||||||
networkPoolUsage:
|
|
||||||
type: number
|
|
||||||
requestPoolUsage:
|
|
||||||
type: number
|
|
||||||
onlinePartitionCount:
|
onlinePartitionCount:
|
||||||
type: integer
|
type: integer
|
||||||
underReplicatedPartitionCount:
|
underReplicatedPartitionCount:
|
||||||
type: integer
|
type: integer
|
||||||
offlinePartitionCount:
|
offlinePartitionCount:
|
||||||
type: integer
|
type: integer
|
||||||
diskUsage:
|
inSyncReplicasCount:
|
||||||
$ref: '#/components/schemas/DiskUsage'
|
|
||||||
diskUsageDistribution:
|
|
||||||
type: string
|
|
||||||
required:
|
|
||||||
- id
|
|
||||||
|
|
||||||
DiskUsage:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
brokerId:
|
|
||||||
type: integer
|
type: integer
|
||||||
segmentSize:
|
outOfSyncReplicasCount:
|
||||||
type: integer
|
type: integer
|
||||||
|
|
||||||
Topic:
|
Topic:
|
||||||
|
|
Loading…
Add table
Reference in a new issue