parent
ec7c8bbf5e
commit
465a3726e8
5 changed files with 121 additions and 34 deletions
|
@ -0,0 +1,15 @@
|
||||||
|
package com.provectus.kafka.ui.cluster.model;
|
||||||
|
|
||||||
|
import lombok.Value;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import com.provectus.kafka.ui.model.SeekType;
|
||||||
|
|
||||||
|
@Value
|
||||||
|
public class ConsumerPosition {
|
||||||
|
|
||||||
|
private SeekType seekType;
|
||||||
|
private Map<Integer, Long> seekTo;
|
||||||
|
|
||||||
|
}
|
|
@ -2,6 +2,7 @@ package com.provectus.kafka.ui.cluster.service;
|
||||||
|
|
||||||
import com.provectus.kafka.ui.cluster.mapper.ClusterMapper;
|
import com.provectus.kafka.ui.cluster.mapper.ClusterMapper;
|
||||||
import com.provectus.kafka.ui.cluster.model.ClustersStorage;
|
import com.provectus.kafka.ui.cluster.model.ClustersStorage;
|
||||||
|
import com.provectus.kafka.ui.cluster.model.ConsumerPosition;
|
||||||
import com.provectus.kafka.ui.cluster.model.KafkaCluster;
|
import com.provectus.kafka.ui.cluster.model.KafkaCluster;
|
||||||
import com.provectus.kafka.ui.cluster.util.ClusterUtil;
|
import com.provectus.kafka.ui.cluster.util.ClusterUtil;
|
||||||
import com.provectus.kafka.ui.kafka.KafkaService;
|
import com.provectus.kafka.ui.kafka.KafkaService;
|
||||||
|
@ -149,9 +150,9 @@ public class ClusterService {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
public Flux<TopicMessage> getMessages(String clusterName, String topicName, Integer partition, Long offset, OffsetDateTime timestamp) {
|
public Flux<TopicMessage> getMessages(String clusterName, String topicName, ConsumerPosition consumerPosition, Integer limit) {
|
||||||
return clustersStorage.getClusterByName(clusterName)
|
return clustersStorage.getClusterByName(clusterName)
|
||||||
.map(c -> consumingService.loadMessages(c, topicName))
|
.map(c -> consumingService.loadMessages(c, topicName, consumerPosition, limit))
|
||||||
.orElse(Flux.empty());
|
.orElse(Flux.empty());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import lombok.extern.log4j.Log4j2;
|
||||||
|
|
||||||
import java.time.Duration;
|
import java.time.Duration;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
@ -15,10 +16,11 @@ import org.apache.kafka.common.TopicPartition;
|
||||||
import org.apache.kafka.common.utils.Bytes;
|
import org.apache.kafka.common.utils.Bytes;
|
||||||
import org.springframework.stereotype.Service;
|
import org.springframework.stereotype.Service;
|
||||||
|
|
||||||
import com.provectus.kafka.ui.cluster.model.InternalTopic;
|
import com.provectus.kafka.ui.cluster.model.ConsumerPosition;
|
||||||
import com.provectus.kafka.ui.cluster.model.KafkaCluster;
|
import com.provectus.kafka.ui.cluster.model.KafkaCluster;
|
||||||
import com.provectus.kafka.ui.cluster.util.ClusterUtil;
|
import com.provectus.kafka.ui.cluster.util.ClusterUtil;
|
||||||
import com.provectus.kafka.ui.kafka.KafkaService;
|
import com.provectus.kafka.ui.kafka.KafkaService;
|
||||||
|
import com.provectus.kafka.ui.model.SeekType;
|
||||||
import com.provectus.kafka.ui.model.TopicMessage;
|
import com.provectus.kafka.ui.model.TopicMessage;
|
||||||
|
|
||||||
import reactor.core.publisher.Flux;
|
import reactor.core.publisher.Flux;
|
||||||
|
@ -30,18 +32,21 @@ import reactor.core.scheduler.Schedulers;
|
||||||
@RequiredArgsConstructor
|
@RequiredArgsConstructor
|
||||||
public class ConsumingService {
|
public class ConsumingService {
|
||||||
|
|
||||||
|
private static final int MAX_RECORD_LIMIT = 100;
|
||||||
// TODO: make this configurable
|
private static final int DEFAULT_RECORD_LIMIT = 20;
|
||||||
private static final int BATCH_SIZE = 20;
|
private static final int MAX_POLLS_COUNT = 30;
|
||||||
|
|
||||||
private final KafkaService kafkaService;
|
private final KafkaService kafkaService;
|
||||||
|
|
||||||
public Flux<TopicMessage> loadMessages(KafkaCluster cluster, String topic) {
|
public Flux<TopicMessage> loadMessages(KafkaCluster cluster, String topic, ConsumerPosition consumerPosition, Integer limit) {
|
||||||
RecordEmitter emitter = new RecordEmitter(kafkaService, cluster, topic);
|
int recordsLimit = Optional.ofNullable(limit)
|
||||||
|
.map(s -> Math.min(s, MAX_RECORD_LIMIT))
|
||||||
|
.orElse(DEFAULT_RECORD_LIMIT);
|
||||||
|
RecordEmitter emitter = new RecordEmitter(kafkaService, cluster, topic, consumerPosition);
|
||||||
return Flux.create(emitter::emit)
|
return Flux.create(emitter::emit)
|
||||||
.subscribeOn(Schedulers.boundedElastic())
|
.subscribeOn(Schedulers.boundedElastic())
|
||||||
.map(ClusterUtil::mapToTopicMessage)
|
.map(ClusterUtil::mapToTopicMessage)
|
||||||
.limitRequest(BATCH_SIZE);
|
.limitRequest(recordsLimit);
|
||||||
}
|
}
|
||||||
|
|
||||||
@RequiredArgsConstructor
|
@RequiredArgsConstructor
|
||||||
|
@ -52,11 +57,14 @@ public class ConsumingService {
|
||||||
private final KafkaService kafkaService;
|
private final KafkaService kafkaService;
|
||||||
private final KafkaCluster cluster;
|
private final KafkaCluster cluster;
|
||||||
private final String topic;
|
private final String topic;
|
||||||
|
private final ConsumerPosition consumerPosition;
|
||||||
|
|
||||||
public void emit(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
|
public void emit(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
|
||||||
try (KafkaConsumer<Bytes, Bytes> consumer = kafkaService.createConsumer(cluster)) {
|
try (KafkaConsumer<Bytes, Bytes> consumer = kafkaService.createConsumer(cluster)) {
|
||||||
assignPartitions(consumer, topic);
|
assignPartitions(consumer);
|
||||||
while (!sink.isCancelled()) {
|
seekOffsets(consumer);
|
||||||
|
int pollsCount = 0;
|
||||||
|
while (!sink.isCancelled() || ++pollsCount > MAX_POLLS_COUNT) {
|
||||||
ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
|
ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
|
||||||
log.info("{} records polled", records.count());
|
log.info("{} records polled", records.count());
|
||||||
records.iterator()
|
records.iterator()
|
||||||
|
@ -68,16 +76,50 @@ public class ConsumingService {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assignPartitions(KafkaConsumer<Bytes, Bytes> consumer, String topicName) {
|
private List<TopicPartition> getRequestedPartitions() {
|
||||||
List<TopicPartition> partitions = Optional.ofNullable(cluster.getTopics().get(topicName))
|
Map<Integer, Long> partitionPositions = consumerPosition.getSeekTo();
|
||||||
.orElseThrow(() -> new IllegalArgumentException("Unknown topic: " + topicName))
|
|
||||||
|
return Optional.ofNullable(cluster.getTopics().get(topic))
|
||||||
|
.orElseThrow(() -> new IllegalArgumentException("Unknown topic: " + topic))
|
||||||
.getPartitions().stream()
|
.getPartitions().stream()
|
||||||
.map(partitionInfo -> new TopicPartition(topicName, partitionInfo.getPartition()))
|
.filter(internalPartition -> partitionPositions.isEmpty() || partitionPositions.containsKey(internalPartition.getPartition()))
|
||||||
|
.map(partitionInfo -> new TopicPartition(topic, partitionInfo.getPartition()))
|
||||||
.collect(Collectors.toList());
|
.collect(Collectors.toList());
|
||||||
|
}
|
||||||
|
|
||||||
|
private void assignPartitions(KafkaConsumer<Bytes, Bytes> consumer) {
|
||||||
|
List<TopicPartition> partitions = getRequestedPartitions();
|
||||||
|
|
||||||
consumer.assign(partitions);
|
consumer.assign(partitions);
|
||||||
// TODO: seek to requested offsets
|
}
|
||||||
|
|
||||||
|
private void seekOffsets(KafkaConsumer<Bytes, Bytes> consumer) {
|
||||||
|
SeekType seekType = consumerPosition.getSeekType();
|
||||||
|
switch (seekType) {
|
||||||
|
case OFFSET:
|
||||||
|
consumerPosition.getSeekTo().forEach((partition, offset) -> {
|
||||||
|
TopicPartition topicPartition = new TopicPartition(topic, partition);
|
||||||
|
consumer.seek(topicPartition, offset);
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
case TIMESTAMP:
|
||||||
|
Map<TopicPartition, Long> timestampsToSearch = consumerPosition.getSeekTo().entrySet().stream()
|
||||||
|
.collect(Collectors.toMap(
|
||||||
|
partitionPosition -> new TopicPartition(topic, partitionPosition.getKey()),
|
||||||
|
Map.Entry::getValue
|
||||||
|
));
|
||||||
|
consumer.offsetsForTimes(timestampsToSearch)
|
||||||
|
.forEach((topicPartition, offsetAndTimestamp) ->
|
||||||
|
consumer.seek(topicPartition, offsetAndTimestamp.offset())
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
case BEGINNING:
|
||||||
|
List<TopicPartition> partitions = getRequestedPartitions();
|
||||||
consumer.seekToBeginning(partitions);
|
consumer.seekToBeginning(partitions);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new IllegalArgumentException("Unknown seekType: " + seekType);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,12 @@
|
||||||
package com.provectus.kafka.ui.rest;
|
package com.provectus.kafka.ui.rest;
|
||||||
|
|
||||||
import com.provectus.kafka.ui.api.ApiClustersApi;
|
import com.provectus.kafka.ui.api.ApiClustersApi;
|
||||||
|
import com.provectus.kafka.ui.cluster.model.ConsumerPosition;
|
||||||
import com.provectus.kafka.ui.cluster.service.ClusterService;
|
import com.provectus.kafka.ui.cluster.service.ClusterService;
|
||||||
import com.provectus.kafka.ui.model.*;
|
import com.provectus.kafka.ui.model.*;
|
||||||
import lombok.RequiredArgsConstructor;
|
import lombok.RequiredArgsConstructor;
|
||||||
|
|
||||||
|
import org.apache.commons.lang3.tuple.Pair;
|
||||||
import org.springframework.http.HttpStatus;
|
import org.springframework.http.HttpStatus;
|
||||||
import org.springframework.http.ResponseEntity;
|
import org.springframework.http.ResponseEntity;
|
||||||
import org.springframework.web.bind.annotation.RestController;
|
import org.springframework.web.bind.annotation.RestController;
|
||||||
|
@ -11,8 +14,11 @@ import org.springframework.web.server.ServerWebExchange;
|
||||||
import reactor.core.publisher.Flux;
|
import reactor.core.publisher.Flux;
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
|
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.function.Function;
|
||||||
|
|
||||||
import javax.validation.Valid;
|
import javax.validation.Valid;
|
||||||
import java.time.OffsetDateTime;
|
|
||||||
|
|
||||||
@RestController
|
@RestController
|
||||||
@RequiredArgsConstructor
|
@RequiredArgsConstructor
|
||||||
|
@ -59,10 +65,9 @@ public class MetricsRestController implements ApiClustersApi {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<Flux<TopicMessage>>> getTopicMessages(String clusterName, String topicName, @Valid Integer partition, @Valid Long offset, @Valid OffsetDateTime timestamp, ServerWebExchange exchange) {
|
public Mono<ResponseEntity<Flux<TopicMessage>>> getTopicMessages(String clusterName, String topicName, @Valid SeekType seekType, @Valid List<String> seekTo, @Valid Integer limit, ServerWebExchange exchange) {
|
||||||
return Mono.just(
|
return parseConsumerPosition(seekType, seekTo)
|
||||||
ResponseEntity.ok(clusterService.getMessages(clusterName, topicName, partition, offset, timestamp))
|
.map(consumerPosition -> ResponseEntity.ok(clusterService.getMessages(clusterName, topicName, consumerPosition, limit)));
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -94,4 +99,20 @@ public class MetricsRestController implements ApiClustersApi {
|
||||||
public Mono<ResponseEntity<Topic>> updateTopic(String clusterId, String topicName, @Valid Mono<TopicFormData> topicFormData, ServerWebExchange exchange) {
|
public Mono<ResponseEntity<Topic>> updateTopic(String clusterId, String topicName, @Valid Mono<TopicFormData> topicFormData, ServerWebExchange exchange) {
|
||||||
return clusterService.updateTopic(clusterId, topicName, topicFormData).map(ResponseEntity::ok);
|
return clusterService.updateTopic(clusterId, topicName, topicFormData).map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private Mono<ConsumerPosition> parseConsumerPosition(SeekType seekType, List<String> seekTo) {
|
||||||
|
return Mono.justOrEmpty(seekTo)
|
||||||
|
.defaultIfEmpty(Collections.emptyList())
|
||||||
|
.flatMapIterable(Function.identity())
|
||||||
|
.map(p -> {
|
||||||
|
String[] splited = p.split("::");
|
||||||
|
if (splited.length != 2) {
|
||||||
|
throw new IllegalArgumentException("Wrong seekTo argument format. See API docs for details");
|
||||||
|
}
|
||||||
|
|
||||||
|
return Pair.of(Integer.parseInt(splited[0]), Long.parseLong(splited[1]));
|
||||||
|
})
|
||||||
|
.collectMap(Pair::getKey, Pair::getValue)
|
||||||
|
.map(positions -> new ConsumerPosition(seekType != null ? seekType : SeekType.BEGINNING, positions));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -214,20 +214,21 @@ paths:
|
||||||
required: true
|
required: true
|
||||||
schema:
|
schema:
|
||||||
type: string
|
type: string
|
||||||
- name: partition
|
- name: seekType
|
||||||
in: query
|
in: query
|
||||||
schema:
|
schema:
|
||||||
type: integer
|
$ref: "#/components/schemas/SeekType"
|
||||||
- name: offset
|
- name: seekTo
|
||||||
in: query
|
|
||||||
schema:
|
|
||||||
type: integer
|
|
||||||
format: int64
|
|
||||||
- name: timestamp
|
|
||||||
in: query
|
in: query
|
||||||
schema:
|
schema:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
type: string
|
type: string
|
||||||
format: date-time
|
description: The format is [partition]::[offset] for specifying offsets or [partition]::[timstamp in millis] for specifying timestamps
|
||||||
|
- name: limit
|
||||||
|
in: query
|
||||||
|
schema:
|
||||||
|
type: integer
|
||||||
responses:
|
responses:
|
||||||
200:
|
200:
|
||||||
description: OK
|
description: OK
|
||||||
|
@ -463,6 +464,13 @@ components:
|
||||||
- offset
|
- offset
|
||||||
- timestamp
|
- timestamp
|
||||||
|
|
||||||
|
SeekType:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- BEGINNING
|
||||||
|
- OFFSET
|
||||||
|
- TIMESTAMP
|
||||||
|
|
||||||
TopicPartitionDto:
|
TopicPartitionDto:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
Loading…
Add table
Reference in a new issue