parent
bc2efc9bee
commit
8057dc123f
19 changed files with 204 additions and 132 deletions
|
@ -10,6 +10,10 @@ public class SimpleRecordDeserializer implements RecordDeserializer {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Object deserialize(ConsumerRecord<Bytes, Bytes> record) {
|
public Object deserialize(ConsumerRecord<Bytes, Bytes> record) {
|
||||||
return stringDeserializer.deserialize(record.topic(), record.value().get());
|
if (record.value()!=null) {
|
||||||
|
return stringDeserializer.deserialize(record.topic(), record.value().get());
|
||||||
|
} else {
|
||||||
|
return "empty";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,7 @@
|
||||||
package com.provectus.kafka.ui.cluster.mapper;
|
package com.provectus.kafka.ui.cluster.mapper;
|
||||||
|
|
||||||
import com.provectus.kafka.ui.cluster.config.ClustersProperties;
|
import com.provectus.kafka.ui.cluster.config.ClustersProperties;
|
||||||
import com.provectus.kafka.ui.cluster.model.InternalClusterMetrics;
|
import com.provectus.kafka.ui.cluster.model.*;
|
||||||
import com.provectus.kafka.ui.cluster.model.InternalTopic;
|
|
||||||
import com.provectus.kafka.ui.cluster.model.InternalTopicConfig;
|
|
||||||
import com.provectus.kafka.ui.cluster.model.KafkaCluster;
|
|
||||||
import com.provectus.kafka.ui.model.*;
|
import com.provectus.kafka.ui.model.*;
|
||||||
import org.mapstruct.Mapper;
|
import org.mapstruct.Mapper;
|
||||||
import org.mapstruct.Mapping;
|
import org.mapstruct.Mapping;
|
||||||
|
@ -19,8 +16,10 @@ public interface ClusterMapper {
|
||||||
Cluster toCluster(KafkaCluster cluster);
|
Cluster toCluster(KafkaCluster cluster);
|
||||||
|
|
||||||
KafkaCluster toKafkaCluster(ClustersProperties.Cluster clusterProperties);
|
KafkaCluster toKafkaCluster(ClustersProperties.Cluster clusterProperties);
|
||||||
BrokersMetrics toBrokerMetrics(InternalClusterMetrics metrics);
|
ClusterMetrics toClusterMetrics(InternalClusterMetrics metrics);
|
||||||
|
BrokerMetrics toBrokerMetrics(InternalBrokerMetrics metrics);
|
||||||
Topic toTopic(InternalTopic topic);
|
Topic toTopic(InternalTopic topic);
|
||||||
TopicDetails toTopicDetails(InternalTopic topic);
|
TopicDetails toTopicDetails(InternalTopic topic);
|
||||||
TopicConfig toTopicConfig(InternalTopicConfig topic);
|
TopicConfig toTopicConfig(InternalTopicConfig topic);
|
||||||
|
Replica toReplica(InternalReplica replica);
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,5 +10,5 @@ import java.util.List;
|
||||||
@Builder(toBuilder = true)
|
@Builder(toBuilder = true)
|
||||||
public class InternalBrokerMetrics {
|
public class InternalBrokerMetrics {
|
||||||
private final Long segmentSize;
|
private final Long segmentSize;
|
||||||
private final List<Metric> jmxMetrics;
|
private final List<Metric> metrics;
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,11 +6,13 @@ import lombok.Data;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
@Data
|
@Data
|
||||||
@Builder
|
@Builder(toBuilder = true)
|
||||||
public class InternalPartition {
|
public class InternalPartition {
|
||||||
private final int partition;
|
private final int partition;
|
||||||
private final Integer leader;
|
private final Integer leader;
|
||||||
private final List<InternalReplica> replicas;
|
private final List<InternalReplica> replicas;
|
||||||
private final int inSyncReplicasCount;
|
private final int inSyncReplicasCount;
|
||||||
private final int replicasCount;
|
private final int replicasCount;
|
||||||
|
private final long offsetMin;
|
||||||
|
private final long offsetMax;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
package com.provectus.kafka.ui.cluster.model;
|
package com.provectus.kafka.ui.cluster.model;
|
||||||
|
|
||||||
import com.provectus.kafka.ui.model.TopicPartitionDto;
|
|
||||||
import lombok.Builder;
|
import lombok.Builder;
|
||||||
import lombok.Data;
|
import lombok.Data;
|
||||||
import org.apache.kafka.common.TopicPartition;
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
|
|
@ -11,7 +11,7 @@ import java.util.Map;
|
||||||
public class KafkaCluster {
|
public class KafkaCluster {
|
||||||
|
|
||||||
private final String name;
|
private final String name;
|
||||||
private final int jmxPort;
|
private final Integer jmxPort;
|
||||||
private final String bootstrapServers;
|
private final String bootstrapServers;
|
||||||
private final String zookeeper;
|
private final String zookeeper;
|
||||||
private final String schemaRegistry;
|
private final String schemaRegistry;
|
||||||
|
|
|
@ -38,17 +38,22 @@ public class ClusterService {
|
||||||
.collect(Collectors.toList());
|
.collect(Collectors.toList());
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<BrokersMetrics> getBrokersMetrics(String name, Integer id) {
|
public Mono<BrokerMetrics> getBrokerMetrics(String name, Integer id) {
|
||||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(name)
|
return Mono.justOrEmpty(clustersStorage.getClusterByName(name)
|
||||||
.map(KafkaCluster::getMetrics)
|
.map( c -> c.getMetrics().getInternalBrokerMetrics())
|
||||||
.map(s -> {
|
.map( m -> m.get(id))
|
||||||
var brokerMetrics = clusterMapper.toBrokerMetrics(s);
|
.map(clusterMapper::toBrokerMetrics));
|
||||||
brokerMetrics.setMetrics(s.getInternalBrokerMetrics().get(id).getJmxMetrics());
|
|
||||||
brokerMetrics.setSegmentZise(Long.valueOf(s.getSegmentSize()).intValue());
|
|
||||||
return brokerMetrics;
|
|
||||||
}));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Mono<ClusterMetrics> getClusterMetrics(String name) {
|
||||||
|
return Mono.justOrEmpty(
|
||||||
|
clustersStorage.getClusterByName(name)
|
||||||
|
.map(KafkaCluster::getMetrics)
|
||||||
|
.map(clusterMapper::toClusterMetrics)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
public List<Topic> getTopics(String name) {
|
public List<Topic> getTopics(String name) {
|
||||||
return clustersStorage.getClusterByName(name)
|
return clustersStorage.getClusterByName(name)
|
||||||
.map(c ->
|
.map(c ->
|
||||||
|
@ -60,12 +65,15 @@ public class ClusterService {
|
||||||
|
|
||||||
public Optional<TopicDetails> getTopicDetails(String name, String topicName) {
|
public Optional<TopicDetails> getTopicDetails(String name, String topicName) {
|
||||||
return clustersStorage.getClusterByName(name)
|
return clustersStorage.getClusterByName(name)
|
||||||
.map(c -> {
|
.flatMap( c ->
|
||||||
var topic = c.getTopics().get(topicName);
|
Optional.ofNullable(
|
||||||
return clusterMapper
|
c.getTopics().get(topicName)
|
||||||
.toTopicDetails(topic)
|
).map(
|
||||||
.partitions(kafkaService.partitionDtoList(topic, c));
|
t -> t.toBuilder().partitions(
|
||||||
});
|
kafkaService.getTopicPartitions(c, t)
|
||||||
|
).build()
|
||||||
|
).map(clusterMapper::toTopicDetails)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Optional<List<TopicConfig>> getTopicConfigs(String name, String topicName) {
|
public Optional<List<TopicConfig>> getTopicConfigs(String name, String topicName) {
|
||||||
|
@ -143,6 +151,7 @@ public class ClusterService {
|
||||||
return clustersStorage.getClusterByName(clusterName).map(cl ->
|
return clustersStorage.getClusterByName(clusterName).map(cl ->
|
||||||
topicFormData
|
topicFormData
|
||||||
.flatMap(t -> kafkaService.updateTopic(cl, topicName, t))
|
.flatMap(t -> kafkaService.updateTopic(cl, topicName, t))
|
||||||
|
.map(clusterMapper::toTopic)
|
||||||
.flatMap(t -> updateCluster(t, clusterName, cl))
|
.flatMap(t -> updateCluster(t, clusterName, cl))
|
||||||
)
|
)
|
||||||
.orElse(Mono.empty());
|
.orElse(Mono.empty());
|
||||||
|
@ -161,4 +170,5 @@ public class ClusterService {
|
||||||
.map(c -> consumingService.loadMessages(c, topicName, consumerPosition, query, limit))
|
.map(c -> consumingService.loadMessages(c, topicName, consumerPosition, query, limit))
|
||||||
.orElse(Flux.empty());
|
.orElse(Flux.empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,10 +68,12 @@ public class ClusterUtil {
|
||||||
) {
|
) {
|
||||||
return consumer.assignment().topicPartitions().stream()
|
return consumer.assignment().topicPartitions().stream()
|
||||||
.map(tp -> {
|
.map(tp -> {
|
||||||
Long currentOffset = groupOffsets.get(tp).offset();
|
Long currentOffset = Optional.ofNullable(
|
||||||
Long endOffset = endOffsets.get(tp);
|
groupOffsets.get(tp)).map(o -> o.offset()).orElse(0L);
|
||||||
|
Long endOffset = Optional.ofNullable(endOffsets.get(tp)).orElse(0L);
|
||||||
ConsumerTopicPartitionDetail cd = new ConsumerTopicPartitionDetail();
|
ConsumerTopicPartitionDetail cd = new ConsumerTopicPartitionDetail();
|
||||||
cd.setConsumerId(consumer.consumerId());
|
cd.setConsumerId(consumer.consumerId());
|
||||||
|
cd.setHost(consumer.host());
|
||||||
cd.setTopic(tp.topic());
|
cd.setTopic(tp.topic());
|
||||||
cd.setPartition(tp.partition());
|
cd.setPartition(tp.partition());
|
||||||
cd.setCurrentOffset(currentOffset);
|
cd.setCurrentOffset(currentOffset);
|
||||||
|
@ -116,7 +118,7 @@ public class ClusterUtil {
|
||||||
|
|
||||||
int urpCount = partitions.stream()
|
int urpCount = partitions.stream()
|
||||||
.flatMap(partition -> partition.getReplicas().stream())
|
.flatMap(partition -> partition.getReplicas().stream())
|
||||||
.filter(InternalReplica::isInSync).mapToInt(e -> 1)
|
.filter(p -> !p.isInSync()).mapToInt(e -> 1)
|
||||||
.sum();
|
.sum();
|
||||||
|
|
||||||
int inSyncReplicasCount = partitions.stream()
|
int inSyncReplicasCount = partitions.stream()
|
||||||
|
@ -199,6 +201,10 @@ public class ClusterUtil {
|
||||||
.filter(entry -> entry.name().contains(CLUSTER_VERSION_PARAM_KEY))
|
.filter(entry -> entry.name().contains(CLUSTER_VERSION_PARAM_KEY))
|
||||||
.findFirst().orElseThrow().value();
|
.findFirst().orElseThrow().value();
|
||||||
try {
|
try {
|
||||||
|
final String[] parts = version.split("\\.");
|
||||||
|
if (parts.length>2) {
|
||||||
|
version = parts[0] + "." + parts[1];
|
||||||
|
}
|
||||||
return Float.parseFloat(version.split("-")[0]) <= 2.3f
|
return Float.parseFloat(version.split("-")[0]) <= 2.3f
|
||||||
? ExtendedAdminClient.SupportedFeature.ALTER_CONFIGS : ExtendedAdminClient.SupportedFeature.INCREMENTAL_ALTER_CONFIGS;
|
? ExtendedAdminClient.SupportedFeature.ALTER_CONFIGS : ExtendedAdminClient.SupportedFeature.INCREMENTAL_ALTER_CONFIGS;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
@ -207,24 +213,6 @@ public class ClusterUtil {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Topic convertToTopic(InternalTopic internalTopic) {
|
|
||||||
Topic topic = new Topic();
|
|
||||||
topic.setName(internalTopic.getName());
|
|
||||||
List<Partition> partitions = internalTopic.getPartitions().stream().flatMap(s -> {
|
|
||||||
Partition partition = new Partition();
|
|
||||||
partition.setPartition(s.getPartition());
|
|
||||||
partition.setLeader(s.getLeader());
|
|
||||||
partition.setReplicas(s.getReplicas().stream().flatMap(r -> {
|
|
||||||
Replica replica = new Replica();
|
|
||||||
replica.setBroker(r.getBroker());
|
|
||||||
return Stream.of(replica);
|
|
||||||
}).collect(Collectors.toList()));
|
|
||||||
return Stream.of(partition);
|
|
||||||
}).collect(Collectors.toList());
|
|
||||||
topic.setPartitions(partitions);
|
|
||||||
return topic;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static <T, R> Map<T, R> toSingleMap (Stream<Map<T, R>> streamOfMaps) {
|
public static <T, R> Map<T, R> toSingleMap (Stream<Map<T, R>> streamOfMaps) {
|
||||||
return streamOfMaps.reduce((map1, map2) -> Stream.concat(map1.entrySet().stream(), map2.entrySet().stream())
|
return streamOfMaps.reduce((map1, map2) -> Stream.concat(map1.entrySet().stream(), map2.entrySet().stream())
|
||||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))).orElseThrow();
|
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))).orElseThrow();
|
||||||
|
|
|
@ -100,7 +100,7 @@ public class JmxClusterUtil {
|
||||||
public List<MetricDto> convertToMetricDto(InternalClusterMetrics internalClusterMetrics) {
|
public List<MetricDto> convertToMetricDto(InternalClusterMetrics internalClusterMetrics) {
|
||||||
return internalClusterMetrics.getInternalBrokerMetrics().values().stream()
|
return internalClusterMetrics.getInternalBrokerMetrics().values().stream()
|
||||||
.map(c ->
|
.map(c ->
|
||||||
c.getJmxMetrics().stream()
|
c.getMetrics().stream()
|
||||||
.filter(j -> isSameMetric(j.getCanonicalName()))
|
.filter(j -> isSameMetric(j.getCanonicalName()))
|
||||||
.map(j -> j.getValue().entrySet().stream()
|
.map(j -> j.getValue().entrySet().stream()
|
||||||
.map(e -> new MetricDto(j.getCanonicalName(), e.getKey(), e.getValue()))))
|
.map(e -> new MetricDto(j.getCanonicalName(), e.getKey(), e.getValue()))))
|
||||||
|
|
|
@ -265,7 +265,7 @@ public class KafkaService {
|
||||||
}
|
}
|
||||||
|
|
||||||
@SneakyThrows
|
@SneakyThrows
|
||||||
public Mono<Topic> updateTopic(KafkaCluster cluster, String topicName, TopicFormData topicFormData) {
|
public Mono<InternalTopic> updateTopic(KafkaCluster cluster, String topicName, TopicFormData topicFormData) {
|
||||||
ConfigResource topicCR = new ConfigResource(ConfigResource.Type.TOPIC, topicName);
|
ConfigResource topicCR = new ConfigResource(ConfigResource.Type.TOPIC, topicName);
|
||||||
return getOrCreateAdminClient(cluster)
|
return getOrCreateAdminClient(cluster)
|
||||||
.flatMap(ac -> {
|
.flatMap(ac -> {
|
||||||
|
@ -281,11 +281,10 @@ public class KafkaService {
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
private Mono<Topic> getUpdatedTopic (ExtendedAdminClient ac, String topicName) {
|
private Mono<InternalTopic> getUpdatedTopic (ExtendedAdminClient ac, String topicName) {
|
||||||
return getTopicsData(ac.getAdminClient())
|
return getTopicsData(ac.getAdminClient())
|
||||||
.map(s -> s.stream()
|
.map(s -> s.stream()
|
||||||
.filter(t -> t.getName().equals(topicName)).findFirst().orElseThrow())
|
.filter(t -> t.getName().equals(topicName)).findFirst().orElseThrow());
|
||||||
.map(ClusterUtil::convertToTopic);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<String> incrementalAlterConfig(TopicFormData topicFormData, ConfigResource topicCR, ExtendedAdminClient ac) {
|
private Mono<String> incrementalAlterConfig(TopicFormData topicFormData, ConfigResource topicCR, ExtendedAdminClient ac) {
|
||||||
|
@ -346,6 +345,8 @@ public class KafkaService {
|
||||||
|
|
||||||
public List<Metric> getJmxMetric(String clusterName, Node node) {
|
public List<Metric> getJmxMetric(String clusterName, Node node) {
|
||||||
return clustersStorage.getClusterByName(clusterName)
|
return clustersStorage.getClusterByName(clusterName)
|
||||||
|
.filter( c -> c.getJmxPort() != null)
|
||||||
|
.filter( c -> c.getJmxPort() > 0)
|
||||||
.map(c -> jmxClusterUtil.getJmxMetrics(c.getJmxPort(), node.host())).orElse(Collections.emptyList());
|
.map(c -> jmxClusterUtil.getJmxMetrics(c.getJmxPort(), node.host())).orElse(Collections.emptyList());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -357,7 +358,7 @@ public class KafkaService {
|
||||||
return ClusterUtil.toMono(ac.describeCluster().nodes())
|
return ClusterUtil.toMono(ac.describeCluster().nodes())
|
||||||
.flatMapIterable(nodes -> nodes)
|
.flatMapIterable(nodes -> nodes)
|
||||||
.map(broker -> Map.of(broker.id(), InternalBrokerMetrics.builder().
|
.map(broker -> Map.of(broker.id(), InternalBrokerMetrics.builder().
|
||||||
jmxMetrics(getJmxMetric(clusterName, broker)).build()))
|
metrics(getJmxMetric(clusterName, broker)).build()))
|
||||||
.collectList()
|
.collectList()
|
||||||
.map(s -> internalClusterMetrics.toBuilder().internalBrokerMetrics(ClusterUtil.toSingleMap(s.stream())).build());
|
.map(s -> internalClusterMetrics.toBuilder().internalBrokerMetrics(ClusterUtil.toSingleMap(s.stream())).build());
|
||||||
}
|
}
|
||||||
|
@ -377,22 +378,25 @@ public class KafkaService {
|
||||||
.collect(Collectors.toList())).build();
|
.collect(Collectors.toList())).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<TopicPartitionDto> partitionDtoList (InternalTopic topic, KafkaCluster cluster) {
|
public List<InternalPartition> getTopicPartitions(KafkaCluster c, InternalTopic topic ) {
|
||||||
var topicPartitions = topic.getPartitions().stream().map(t -> new TopicPartition(topic.getName(), t.getPartition())).collect(Collectors.toList());
|
var tps = topic.getPartitions().stream()
|
||||||
return getTopicPartitionOffset(cluster, topicPartitions);
|
.map(t -> new TopicPartition(topic.getName(), t.getPartition()))
|
||||||
}
|
.collect(Collectors.toList());
|
||||||
|
Map<Integer, InternalPartition> partitions =
|
||||||
|
topic.getPartitions().stream().collect(Collectors.toMap(
|
||||||
|
InternalPartition::getPartition,
|
||||||
|
tp -> tp
|
||||||
|
));
|
||||||
|
|
||||||
private List<TopicPartitionDto> getTopicPartitionOffset(KafkaCluster c, List<TopicPartition> topicPartitions ) {
|
|
||||||
try (var consumer = createConsumer(c)) {
|
try (var consumer = createConsumer(c)) {
|
||||||
final Map<TopicPartition, Long> earliest = consumer.beginningOffsets(topicPartitions);
|
final Map<TopicPartition, Long> earliest = consumer.beginningOffsets(tps);
|
||||||
final Map<TopicPartition, Long> latest = consumer.endOffsets(topicPartitions);
|
final Map<TopicPartition, Long> latest = consumer.endOffsets(tps);
|
||||||
|
|
||||||
return topicPartitions.stream()
|
return tps.stream()
|
||||||
.map( tp -> new TopicPartitionDto()
|
.map( tp -> partitions.get(tp.partition()).toBuilder()
|
||||||
.topic(tp.topic())
|
|
||||||
.partition(tp.partition())
|
|
||||||
.offsetMin(Optional.ofNullable(earliest.get(tp)).orElse(0L))
|
.offsetMin(Optional.ofNullable(earliest.get(tp)).orElse(0L))
|
||||||
.offsetMax(Optional.ofNullable(latest.get(tp)).orElse(0L))
|
.offsetMax(Optional.ofNullable(latest.get(tp)).orElse(0L))
|
||||||
|
.build()
|
||||||
).collect(Collectors.toList());
|
).collect(Collectors.toList());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
return Collections.emptyList();
|
return Collections.emptyList();
|
||||||
|
|
|
@ -30,12 +30,19 @@ public class MetricsRestController implements ApiClustersApi {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<BrokersMetrics>> getBrokersMetrics(String clusterName, Integer id, ServerWebExchange exchange) {
|
public Mono<ResponseEntity<BrokerMetrics>> getBrokersMetrics(String clusterName, Integer id, ServerWebExchange exchange) {
|
||||||
return clusterService.getBrokersMetrics(clusterName, id)
|
return clusterService.getBrokerMetrics(clusterName, id)
|
||||||
.map(ResponseEntity::ok)
|
.map(ResponseEntity::ok)
|
||||||
.onErrorReturn(ResponseEntity.notFound().build());
|
.onErrorReturn(ResponseEntity.notFound().build());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Mono<ResponseEntity<ClusterMetrics>> getClusterMetrics(String clusterName, ServerWebExchange exchange) {
|
||||||
|
return clusterService.getClusterMetrics(clusterName)
|
||||||
|
.map(ResponseEntity::ok)
|
||||||
|
.onErrorReturn(ResponseEntity.notFound().build());
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<Flux<Topic>>> getTopics(String clusterName, ServerWebExchange exchange) {
|
public Mono<ResponseEntity<Flux<Topic>>> getTopics(String clusterName, ServerWebExchange exchange) {
|
||||||
return Mono.just(ResponseEntity.ok(Flux.fromIterable(clusterService.getTopics(clusterName))));
|
return Mono.just(ResponseEntity.ok(Flux.fromIterable(clusterService.getTopics(clusterName))));
|
||||||
|
|
|
@ -52,6 +52,26 @@ paths:
|
||||||
items:
|
items:
|
||||||
$ref: '#/components/schemas/Broker'
|
$ref: '#/components/schemas/Broker'
|
||||||
|
|
||||||
|
/api/clusters/{clusterName}/metrics:
|
||||||
|
get:
|
||||||
|
tags:
|
||||||
|
- /api/clusters
|
||||||
|
summary: getClusterMetrics
|
||||||
|
operationId: getClusterMetrics
|
||||||
|
parameters:
|
||||||
|
- name: clusterName
|
||||||
|
in: path
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: OK
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/ClusterMetrics'
|
||||||
|
|
||||||
/api/clusters/{clusterName}/metrics/broker/{id}:
|
/api/clusters/{clusterName}/metrics/broker/{id}:
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
|
@ -75,7 +95,7 @@ paths:
|
||||||
content:
|
content:
|
||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/BrokersMetrics'
|
$ref: '#/components/schemas/BrokerMetrics'
|
||||||
|
|
||||||
/api/clusters/{clusterName}/topics:
|
/api/clusters/{clusterName}/topics:
|
||||||
get:
|
get:
|
||||||
|
@ -327,16 +347,16 @@ components:
|
||||||
- online
|
- online
|
||||||
- offline
|
- offline
|
||||||
|
|
||||||
BrokersMetrics:
|
ClusterMetrics:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
brokerCount:
|
||||||
|
type: integer
|
||||||
zooKeeperStatus:
|
zooKeeperStatus:
|
||||||
type: integer
|
type: integer
|
||||||
activeControllers:
|
activeControllers:
|
||||||
type: integer
|
type: integer
|
||||||
uncleanLeaderElectionCount:
|
onlinePartitionCount:
|
||||||
type: integer
|
|
||||||
underReplicatedPartitionCount:
|
|
||||||
type: integer
|
type: integer
|
||||||
offlinePartitionCount:
|
offlinePartitionCount:
|
||||||
type: integer
|
type: integer
|
||||||
|
@ -344,6 +364,25 @@ components:
|
||||||
type: integer
|
type: integer
|
||||||
outOfSyncReplicasCount:
|
outOfSyncReplicasCount:
|
||||||
type: integer
|
type: integer
|
||||||
|
underReplicatedPartitionCount:
|
||||||
|
type: integer
|
||||||
|
diskUsage:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/BrokerDiskUsage'
|
||||||
|
|
||||||
|
BrokerDiskUsage:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
brokerId:
|
||||||
|
type: integer
|
||||||
|
segmentSize:
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
|
||||||
|
BrokerMetrics:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
segmentZise:
|
segmentZise:
|
||||||
type: integer
|
type: integer
|
||||||
metrics:
|
metrics:
|
||||||
|
@ -358,22 +397,24 @@ components:
|
||||||
type: string
|
type: string
|
||||||
internal:
|
internal:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
partitionCount:
|
||||||
|
type: integer
|
||||||
|
replicationFactor:
|
||||||
|
type: integer
|
||||||
|
replicas:
|
||||||
|
type: integer
|
||||||
|
inSyncReplicas:
|
||||||
|
type: integer
|
||||||
|
segmentSize:
|
||||||
|
type: integer
|
||||||
|
segmentCount:
|
||||||
|
type: integer
|
||||||
|
underReplicatedPartitions:
|
||||||
|
type: integer
|
||||||
partitions:
|
partitions:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
$ref: '#/components/schemas/Partition'
|
$ref: "#/components/schemas/Partition"
|
||||||
|
|
||||||
Partition:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
partition:
|
|
||||||
type: integer
|
|
||||||
leader:
|
|
||||||
type: integer
|
|
||||||
replicas:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
$ref: '#/components/schemas/Replica'
|
|
||||||
|
|
||||||
Replica:
|
Replica:
|
||||||
type: object
|
type: object
|
||||||
|
@ -388,10 +429,14 @@ components:
|
||||||
TopicDetails:
|
TopicDetails:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
internal:
|
||||||
|
type: boolean
|
||||||
partitions:
|
partitions:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
$ref: "#/components/schemas/TopicPartitionDto"
|
$ref: "#/components/schemas/Partition"
|
||||||
partitionCount:
|
partitionCount:
|
||||||
type: integer
|
type: integer
|
||||||
replicationFactor:
|
replicationFactor:
|
||||||
|
@ -490,13 +535,17 @@ components:
|
||||||
- OFFSET
|
- OFFSET
|
||||||
- TIMESTAMP
|
- TIMESTAMP
|
||||||
|
|
||||||
TopicPartitionDto:
|
Partition:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
topic:
|
|
||||||
type: string
|
|
||||||
partition:
|
partition:
|
||||||
type: integer
|
type: integer
|
||||||
|
leader:
|
||||||
|
type: integer
|
||||||
|
replicas:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/Replica'
|
||||||
offsetMax:
|
offsetMax:
|
||||||
type: integer
|
type: integer
|
||||||
format: int64
|
format: int64
|
||||||
|
@ -516,6 +565,8 @@ components:
|
||||||
type: string
|
type: string
|
||||||
topic:
|
topic:
|
||||||
type: string
|
type: string
|
||||||
|
host:
|
||||||
|
type: string
|
||||||
partition:
|
partition:
|
||||||
type: integer
|
type: integer
|
||||||
currentOffset:
|
currentOffset:
|
||||||
|
|
|
@ -58,6 +58,7 @@ const Details: React.FC<Props> = ({
|
||||||
<thead>
|
<thead>
|
||||||
<tr>
|
<tr>
|
||||||
<th>Consumer ID</th>
|
<th>Consumer ID</th>
|
||||||
|
<th>Host</th>
|
||||||
<th>Topic</th>
|
<th>Topic</th>
|
||||||
<th>Partition</th>
|
<th>Partition</th>
|
||||||
<th>Messages behind</th>
|
<th>Messages behind</th>
|
||||||
|
|
|
@ -12,6 +12,7 @@ const ListItem: React.FC<Props> = ({ clusterName, consumer }) => {
|
||||||
return (
|
return (
|
||||||
<tr>
|
<tr>
|
||||||
<td>{consumer.consumerId}</td>
|
<td>{consumer.consumerId}</td>
|
||||||
|
<td>{consumer.host}</td>
|
||||||
<td>
|
<td>
|
||||||
<NavLink
|
<NavLink
|
||||||
exact
|
exact
|
||||||
|
|
|
@ -172,8 +172,8 @@ const Messages: React.FC<Props> = ({
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
const getTimestampDate = (timestamp: number) => {
|
const getTimestampDate = (timestamp: string) => {
|
||||||
return format(new Date(timestamp * 1000), 'MM.dd.yyyy HH:mm:ss');
|
return format(Date.parse(timestamp), 'yyyy-MM-dd HH:mm:ss');
|
||||||
};
|
};
|
||||||
|
|
||||||
const getMessageContentHeaders = React.useMemo(() => {
|
const getMessageContentHeaders = React.useMemo(() => {
|
||||||
|
|
|
@ -23,10 +23,9 @@ const Overview: React.FC<Props> = ({
|
||||||
replicationFactor,
|
replicationFactor,
|
||||||
fetchTopicDetails,
|
fetchTopicDetails,
|
||||||
}) => {
|
}) => {
|
||||||
React.useEffect(
|
React.useEffect(() => {
|
||||||
() => { fetchTopicDetails(clusterName, topicName); },
|
fetchTopicDetails(clusterName, topicName);
|
||||||
[fetchTopicDetails, clusterName, topicName],
|
}, [fetchTopicDetails, clusterName, topicName]);
|
||||||
);
|
|
||||||
|
|
||||||
if (!isFetched) {
|
if (!isFetched) {
|
||||||
return null;
|
return null;
|
||||||
|
@ -35,18 +34,18 @@ const Overview: React.FC<Props> = ({
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<MetricsWrapper>
|
<MetricsWrapper>
|
||||||
<Indicator label="Partitions">
|
<Indicator label="Partitions">{partitionCount}</Indicator>
|
||||||
{partitionCount}
|
<Indicator label="Replication Factor">{replicationFactor}</Indicator>
|
||||||
</Indicator>
|
|
||||||
<Indicator label="Replication Factor">
|
|
||||||
{replicationFactor}
|
|
||||||
</Indicator>
|
|
||||||
<Indicator label="URP" title="Under replicated partitions">
|
<Indicator label="URP" title="Under replicated partitions">
|
||||||
{underReplicatedPartitions}
|
{underReplicatedPartitions}
|
||||||
</Indicator>
|
</Indicator>
|
||||||
<Indicator label="In sync replicas">
|
<Indicator label="In sync replicas">
|
||||||
{inSyncReplicas}
|
{inSyncReplicas}
|
||||||
<span className="subtitle has-text-weight-light"> of {replicas}</span>
|
<span className="subtitle has-text-weight-light">
|
||||||
|
{' '}
|
||||||
|
of
|
||||||
|
{replicas}
|
||||||
|
</span>
|
||||||
</Indicator>
|
</Indicator>
|
||||||
<Indicator label="Type">
|
<Indicator label="Type">
|
||||||
<span className="tag is-primary">
|
<span className="tag is-primary">
|
||||||
|
@ -60,15 +59,20 @@ const Overview: React.FC<Props> = ({
|
||||||
<tr>
|
<tr>
|
||||||
<th>Partition ID</th>
|
<th>Partition ID</th>
|
||||||
<th>Broker leader</th>
|
<th>Broker leader</th>
|
||||||
|
<th>Min offset</th>
|
||||||
|
<th>Max offset</th>
|
||||||
</tr>
|
</tr>
|
||||||
</thead>
|
</thead>
|
||||||
<tbody>
|
<tbody>
|
||||||
{partitions && partitions.map(({ partition, leader }) => (
|
{partitions &&
|
||||||
<tr key={`partition-list-item-key-${partition}`}>
|
partitions.map(({ partition, leader, offsetMin, offsetMax }) => (
|
||||||
<td>{partition}</td>
|
<tr key={`partition-list-item-key-${partition}`}>
|
||||||
<td>{leader}</td>
|
<td>{partition}</td>
|
||||||
</tr>
|
<td>{leader}</td>
|
||||||
))}
|
<td>{offsetMin}</td>
|
||||||
|
<td>{offsetMax}</td>
|
||||||
|
</tr>
|
||||||
|
))}
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -1,17 +1,14 @@
|
||||||
import {
|
import { Broker, ClusterName, BrokerMetrics } from 'redux/interfaces';
|
||||||
Broker,
|
import { BASE_URL, BASE_PARAMS } from 'lib/constants';
|
||||||
ClusterName,
|
|
||||||
BrokerMetrics,
|
|
||||||
} from 'redux/interfaces';
|
|
||||||
import {
|
|
||||||
BASE_URL,
|
|
||||||
BASE_PARAMS,
|
|
||||||
} from 'lib/constants';
|
|
||||||
|
|
||||||
export const getBrokers = (clusterName: ClusterName): Promise<Broker[]> =>
|
export const getBrokers = (clusterName: ClusterName): Promise<Broker[]> =>
|
||||||
fetch(`${BASE_URL}/clusters/${clusterName}/brokers`, { ...BASE_PARAMS })
|
fetch(`${BASE_URL}/clusters/${clusterName}/brokers`, {
|
||||||
.then(res => res.json());
|
...BASE_PARAMS,
|
||||||
|
}).then((res) => res.json());
|
||||||
|
|
||||||
export const getBrokerMetrics = (clusterName: ClusterName): Promise<BrokerMetrics> =>
|
export const getBrokerMetrics = (
|
||||||
fetch(`${BASE_URL}/clusters/${clusterName}/metrics/broker`, { ...BASE_PARAMS })
|
clusterName: ClusterName
|
||||||
.then(res => res.json());
|
): Promise<BrokerMetrics> =>
|
||||||
|
fetch(`${BASE_URL}/clusters/${clusterName}/metrics`, {
|
||||||
|
...BASE_PARAMS,
|
||||||
|
}).then((res) => res.json());
|
||||||
|
|
|
@ -16,16 +16,18 @@ export interface ConsumerGroupDetails {
|
||||||
export interface Consumer {
|
export interface Consumer {
|
||||||
consumerId: string;
|
consumerId: string;
|
||||||
topic: string;
|
topic: string;
|
||||||
|
host: string;
|
||||||
partition: number;
|
partition: number;
|
||||||
messagesBehind: number;
|
messagesBehind: number;
|
||||||
currentOffset: number;
|
currentOffset: number;
|
||||||
endOffset: number;
|
endOffset: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface ConsumerGroupDetailedInfo extends ConsumerGroup, ConsumerGroupDetails {
|
export interface ConsumerGroupDetailedInfo
|
||||||
}
|
extends ConsumerGroup,
|
||||||
|
ConsumerGroupDetails {}
|
||||||
|
|
||||||
export interface ConsumerGroupsState {
|
export interface ConsumerGroupsState {
|
||||||
byID: { [consumerGroupID: string]: ConsumerGroupDetailedInfo },
|
byID: { [consumerGroupID: string]: ConsumerGroupDetailedInfo };
|
||||||
allIDs: string[]
|
allIDs: string[];
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,8 @@ export interface TopicReplica {
|
||||||
export interface TopicPartition {
|
export interface TopicPartition {
|
||||||
partition: number;
|
partition: number;
|
||||||
leader: number;
|
leader: number;
|
||||||
|
offsetMin: number;
|
||||||
|
offsetMax: number;
|
||||||
replicas: TopicReplica[];
|
replicas: TopicReplica[];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,25 +37,26 @@ export interface TopicCustomParamOption {
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface TopicDetails {
|
export interface TopicDetails {
|
||||||
partitionCount?: number;
|
partitions: TopicPartition[];
|
||||||
replicationFactor?: number;
|
|
||||||
replicas?: number;
|
|
||||||
segmentSize?: number;
|
|
||||||
inSyncReplicas?: number;
|
|
||||||
segmentCount?: number;
|
|
||||||
underReplicatedPartitions?: number;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface Topic {
|
export interface Topic {
|
||||||
name: TopicName;
|
name: TopicName;
|
||||||
internal: boolean;
|
internal: boolean;
|
||||||
|
partitionCount?: number;
|
||||||
|
replicationFactor?: number;
|
||||||
|
replicas?: number;
|
||||||
|
inSyncReplicas?: number;
|
||||||
|
segmentSize?: number;
|
||||||
|
segmentCount?: number;
|
||||||
|
underReplicatedPartitions?: number;
|
||||||
partitions: TopicPartition[];
|
partitions: TopicPartition[];
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface TopicMessage {
|
export interface TopicMessage {
|
||||||
partition: number;
|
partition: number;
|
||||||
offset: number;
|
offset: number;
|
||||||
timestamp: number;
|
timestamp: string;
|
||||||
timestampType: string;
|
timestampType: string;
|
||||||
key: string;
|
key: string;
|
||||||
headers: Record<string, string>;
|
headers: Record<string, string>;
|
||||||
|
|
Loading…
Add table
Reference in a new issue