Code optimisations (#1648)

* Code optimisations

Co-authored-by: German Osin <germanosin@Germans-MacBook-Pro.local>
Co-authored-by: Roman Zabaluev <rzabaluev@provectus.com>
Co-authored-by: German Osin <germanosin@Germans-MBP.lan>
This commit is contained in:
German Osin 2022-02-22 14:49:33 +03:00 committed by GitHub
parent 9cfbf20b70
commit cd24ff631e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 230 additions and 273 deletions

View file

@ -32,10 +32,10 @@ public abstract class AbstractEmitter {
return records; return records;
} }
protected FluxSink<TopicMessageEventDTO> sendMessage(FluxSink<TopicMessageEventDTO> sink, protected void sendMessage(FluxSink<TopicMessageEventDTO> sink,
ConsumerRecord<Bytes, Bytes> msg) { ConsumerRecord<Bytes, Bytes> msg) {
final TopicMessageDTO topicMessage = ClusterUtil.mapToTopicMessage(msg, recordDeserializer); final TopicMessageDTO topicMessage = ClusterUtil.mapToTopicMessage(msg, recordDeserializer);
return sink.next( sink.next(
new TopicMessageEventDTO() new TopicMessageEventDTO()
.type(TopicMessageEventDTO.TypeEnum.MESSAGE) .type(TopicMessageEventDTO.TypeEnum.MESSAGE)
.message(topicMessage) .message(topicMessage)

View file

@ -4,5 +4,5 @@ public enum Feature {
KAFKA_CONNECT, KAFKA_CONNECT,
KSQL_DB, KSQL_DB,
SCHEMA_REGISTRY, SCHEMA_REGISTRY,
TOPIC_DELETION; TOPIC_DELETION
} }

View file

@ -12,6 +12,7 @@ import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.admin.AdminClientConfig;
import org.springframework.beans.factory.annotation.Value; import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service; import org.springframework.stereotype.Service;
import org.springframework.util.StringUtils;
import reactor.core.publisher.Mono; import reactor.core.publisher.Mono;
@Service @Service

View file

@ -48,8 +48,6 @@ import reactor.core.scheduler.Schedulers;
@RequiredArgsConstructor @RequiredArgsConstructor
@Slf4j @Slf4j
public class MessagesService { public class MessagesService {
private final AdminClientService adminClientService; private final AdminClientService adminClientService;
private final DeserializationService deserializationService; private final DeserializationService deserializationService;
private final ConsumerGroupService consumerGroupService; private final ConsumerGroupService consumerGroupService;
@ -85,7 +83,7 @@ public class MessagesService {
if (msg.getPartition() != null if (msg.getPartition() != null
&& msg.getPartition() > metricsCache.get(cluster).getTopicDescriptions() && msg.getPartition() > metricsCache.get(cluster).getTopicDescriptions()
.get(topic).partitions().size() - 1) { .get(topic).partitions().size() - 1) {
throw new ValidationException("Invalid partition"); return Mono.error(new ValidationException("Invalid partition"));
} }
RecordSerDe serde = RecordSerDe serde =
deserializationService.getRecordDeserializerForCluster(cluster); deserializationService.getRecordDeserializerForCluster(cluster);
@ -118,6 +116,8 @@ public class MessagesService {
} }
}); });
return Mono.fromFuture(cf); return Mono.fromFuture(cf);
} catch (Throwable e) {
return Mono.error(e);
} }
} }

View file

@ -296,7 +296,7 @@ public class TopicsService {
.collect(toList()); .collect(toList());
// Iterate brokers and try to add them in assignment // Iterate brokers and try to add them in assignment
// while (partition replicas count != requested replication factor) // while partition replicas count != requested replication factor
for (Integer broker : brokers) { for (Integer broker : brokers) {
if (!assignmentList.contains(broker)) { if (!assignmentList.contains(broker)) {
assignmentList.add(broker); assignmentList.add(broker);
@ -324,7 +324,7 @@ public class TopicsService {
.collect(toList()); .collect(toList());
// Iterate brokers and try to remove them from assignment // Iterate brokers and try to remove them from assignment
// while (partition replicas count != requested replication factor) // while partition replicas count != requested replication factor
for (Integer broker : brokersUsageList) { for (Integer broker : brokersUsageList) {
// Check is the broker the leader of partition // Check is the broker the leader of partition
if (!topic.getPartitions().get(partition).getLeader() if (!topic.getPartitions().get(partition).getLeader()

View file

@ -18,7 +18,7 @@ public class ResponseParser {
} }
public static Optional<KsqlApiClient.KsqlResponseTable> parseSelectResponse(JsonNode jsonNode) { public static Optional<KsqlApiClient.KsqlResponseTable> parseSelectResponse(JsonNode jsonNode) {
// in response we getting either header record or row data // in response, we're getting either header record or row data
if (arrayFieldNonEmpty(jsonNode, "header")) { if (arrayFieldNonEmpty(jsonNode, "header")) {
return Optional.of( return Optional.of(
KsqlApiClient.KsqlResponseTable.builder() KsqlApiClient.KsqlResponseTable.builder()

View file

@ -156,21 +156,21 @@ public class JmxClusterUtil {
.metrics(metrics) .metrics(metrics)
.internalBrokerMetrics(perBrokerJmxMetrics) .internalBrokerMetrics(perBrokerJmxMetrics)
.bytesInPerSec(findTopicMetrics( .bytesInPerSec(findTopicMetrics(
metrics, JmxMetricsName.BytesInPerSec, JmxMetricsValueName.FiveMinuteRate)) metrics, JmxMetricsName.BYTES_IN_PER_SEC, JmxMetricsValueName.FIFTEEN_MINUTE_RATE))
.bytesOutPerSec(findTopicMetrics( .bytesOutPerSec(findTopicMetrics(
metrics, JmxMetricsName.BytesOutPerSec, JmxMetricsValueName.FiveMinuteRate)) metrics, JmxMetricsName.BYTES_OUT_PER_SEC, JmxMetricsValueName.FIFTEEN_MINUTE_RATE))
.build(); .build();
} }
private Map<String, BigDecimal> findTopicMetrics(List<MetricDTO> metrics, private Map<String, BigDecimal> findTopicMetrics(List<MetricDTO> metrics,
JmxMetricsName metricsName, JmxMetricsName metricsName,
JmxMetricsValueName valueName) { JmxMetricsValueName valueName) {
return metrics.stream().filter(m -> metricsName.name().equals(m.getName())) return metrics.stream().filter(m -> metricsName.getValue().equals(m.getName()))
.filter(m -> m.getParams().containsKey("topic")) .filter(m -> m.getParams().containsKey("topic"))
.filter(m -> m.getValue().containsKey(valueName.name())) .filter(m -> m.getValue().containsKey(valueName.getValue()))
.map(m -> Tuples.of( .map(m -> Tuples.of(
m.getParams().get("topic"), m.getParams().get("topic"),
m.getValue().get(valueName.name()) m.getValue().get(valueName.getValue())
)).collect(groupingBy( )).collect(groupingBy(
Tuple2::getT1, Tuple2::getT1,
reducing(BigDecimal.ZERO, Tuple2::getT2, BigDecimal::add) reducing(BigDecimal.ZERO, Tuple2::getT2, BigDecimal::add)
@ -204,7 +204,7 @@ public class JmxClusterUtil {
private boolean isWellKnownMetric(MetricDTO metric) { private boolean isWellKnownMetric(MetricDTO metric) {
final Optional<String> param = final Optional<String> param =
Optional.ofNullable(metric.getParams().get(NAME_METRIC_FIELD)).filter(p -> Optional.ofNullable(metric.getParams().get(NAME_METRIC_FIELD)).filter(p ->
Arrays.stream(JmxMetricsName.values()).map(Enum::name) Arrays.stream(JmxMetricsName.values()).map(JmxMetricsName::getValue)
.anyMatch(n -> n.equals(p)) .anyMatch(n -> n.equals(p))
); );
return metric.getCanonicalName().contains(KAFKA_SERVER_PARAM) && param.isPresent(); return metric.getCanonicalName().contains(KAFKA_SERVER_PARAM) && param.isPresent();

View file

@ -1,31 +1,41 @@
package com.provectus.kafka.ui.util; package com.provectus.kafka.ui.util;
public enum JmxMetricsName { public enum JmxMetricsName {
MessagesInPerSec, MESSAGES_IN_PER_SEC("MessagesInPerSec"),
BytesInPerSec, BYTES_IN_PER_SEC("BytesInPerSec"),
ReplicationBytesInPerSec, REPLICATION_BYTES_IN_PER_SEC("ReplicationBytesInPerSec"),
RequestsPerSec, REQUESTS_PER_SEC("RequestsPerSec"),
ErrorsPerSec, ERRORS_PER_SEC("ErrorsPerSec"),
MessageConversionsPerSec, MESSAGE_CONVERSIONS_PER_SEC("MessageConversionsPerSec"),
BytesOutPerSec, BYTES_OUT_PER_SEC("BytesOutPerSec"),
ReplicationBytesOutPerSec, REPLICATION_BYTES_OUT_PER_SEC("ReplicationBytesOutPerSec"),
NoKeyCompactedTopicRecordsPerSec, NO_KEY_COMPACTED_TOPIC_RECORDS_PER_SEC("NoKeyCompactedTopicRecordsPerSec"),
InvalidMagicNumberRecordsPerSec, INVALID_MAGIC_NUMBER_RECORDS_PER_SEC("InvalidMagicNumberRecordsPerSec"),
InvalidMessageCrcRecordsPerSec, INVALID_MESSAGE_CRC_RECORDS_PER_SEC("InvalidMessageCrcRecordsPerSec"),
InvalidOffsetOrSequenceRecordsPerSec, INVALID_OFFSET_OR_SEQUENCE_RECORDS_PER_SEC("InvalidOffsetOrSequenceRecordsPerSec"),
UncleanLeaderElectionsPerSec, UNCLEAN_LEADER_ELECTIONS_PER_SEC("UncleanLeaderElectionsPerSec"),
IsrShrinksPerSec, ISR_SHRINKS_PER_SEC("IsrShrinksPerSec"),
IsrExpandsPerSec, ISR_EXPANDS_PER_SEC("IsrExpandsPerSec"),
ReassignmentBytesOutPerSec, REASSIGNMENT_BYTES_OUT_PER_SEC("ReassignmentBytesOutPerSec"),
ReassignmentBytesInPerSec, REASSIGNMENT_BYTES_IN_PER_SEC("ReassignmentBytesInPerSec"),
ProduceMessageConversionsPerSec, PRODUCE_MESSAGE_CONVERSIONS_PER_SEC("ProduceMessageConversionsPerSec"),
FailedFetchRequestsPerSec, FAILED_FETCH_REQUESTS_PER_SEC("FailedFetchRequestsPerSec"),
ZooKeeperSyncConnectsPerSec, ZOOKEEPER_SYNC_CONNECTS_PER_SEC("ZooKeeperSyncConnectsPerSec"),
BytesRejectedPerSec, BYTES_REJECTED_PER_SEC("BytesRejectedPerSec"),
ZooKeeperAuthFailuresPerSec, ZOO_KEEPER_AUTH_FAILURES_PER_SEC("ZooKeeperAuthFailuresPerSec"),
TotalFetchRequestsPerSec, TOTAL_FETCH_REQUESTS_PER_SEC("TotalFetchRequestsPerSec"),
FailedIsrUpdatesPerSec, FAILED_ISR_UPDATES_PER_SEC("FailedIsrUpdatesPerSec"),
IncrementalFetchSessionEvictionsPerSec, INCREMENTAL_FETCH_SESSION_EVICTIONS_PER_SEC("IncrementalFetchSessionEvictionsPerSec"),
FetchMessageConversionsPerSec, FETCH_MESSAGE_CONVERSIONS_PER_SEC("FetchMessageConversionsPerSec"),
FailedProduceRequestsPerSec FAILED_PRODUCE_REQUESTS_PER_SEC("FailedProduceRequestsPerSe");
private final String value;
JmxMetricsName(String value) {
this.value = value;
}
public String getValue() {
return value;
}
} }

View file

@ -1,9 +1,19 @@
package com.provectus.kafka.ui.util; package com.provectus.kafka.ui.util;
public enum JmxMetricsValueName { public enum JmxMetricsValueName {
Count, COUNT("Count"),
OneMinuteRate, ONE_MINUTE_RATE("OneMinuteRate"),
FifteenMinuteRate, FIFTEEN_MINUTE_RATE("FifteenMinuteRate"),
FiveMinuteRate, FIVE_MINUTE_RATE("FiveMinuteRate"),
MeanRate MEAN_RATE("MeanRate");
private final String value;
JmxMetricsValueName(String value) {
this.value = value;
}
public String getValue() {
return value;
}
} }

View file

@ -30,7 +30,7 @@ import java.util.Map;
public final class KafkaConstants { public final class KafkaConstants {
private static final String LONG_MAX_STRING = Long.valueOf(Long.MAX_VALUE).toString(); private static final String LONG_MAX_STRING = Long.toString(Long.MAX_VALUE);
public static final Map<String, String> TOPIC_DEFAULT_CONFIGS = Map.ofEntries( public static final Map<String, String> TOPIC_DEFAULT_CONFIGS = Map.ofEntries(
new AbstractMap.SimpleEntry<>(CLEANUP_POLICY_CONFIG, CLEANUP_POLICY_DELETE), new AbstractMap.SimpleEntry<>(CLEANUP_POLICY_CONFIG, CLEANUP_POLICY_DELETE),

View file

@ -16,9 +16,7 @@ public class ResultSizeLimiter implements Predicate<TopicMessageEventDTO> {
public boolean test(TopicMessageEventDTO event) { public boolean test(TopicMessageEventDTO event) {
if (event.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE)) { if (event.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE)) {
final int i = processed.incrementAndGet(); final int i = processed.incrementAndGet();
if (i > limit) { return i <= limit;
return false;
}
} }
return true; return true;
} }

View file

@ -22,6 +22,8 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.web.reactive.AutoConfigureWebTestClient; import org.springframework.boot.test.autoconfigure.web.reactive.AutoConfigureWebTestClient;
import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.web.reactive.server.WebTestClient; import org.springframework.test.web.reactive.server.WebTestClient;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
@ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class}) @ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class})
@Slf4j @Slf4j
@ -48,8 +50,13 @@ public class KafkaConsumerTests extends AbstractBaseTest {
.isOk(); .isOk();
try (KafkaTestProducer<String, String> producer = KafkaTestProducer.forKafka(kafka)) { try (KafkaTestProducer<String, String> producer = KafkaTestProducer.forKafka(kafka)) {
Flux.fromStream(
Stream.of("one", "two", "three", "four") Stream.of("one", "two", "three", "four")
.forEach(value -> producer.send(topicName, value)); .map(value -> Mono.fromFuture(producer.send(topicName, value)))
).blockLast();
} catch (Throwable e) {
log.error("Error on sending", e);
throw new RuntimeException(e);
} }
long count = webTestClient.get() long count = webTestClient.get()

View file

@ -76,12 +76,12 @@ class TailingEmitterTest extends AbstractBaseTest {
Awaitility.await() Awaitility.await()
.atMost(Duration.ofSeconds(60)) .atMost(Duration.ofSeconds(60))
.pollInSameThread() .pollInSameThread()
.untilAsserted(() -> { .untilAsserted(() ->
assertThat(fluxOutput) assertThat(fluxOutput)
.filteredOn(msg -> msg.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) .filteredOn(msg -> msg.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
.extracting(msg -> msg.getMessage().getContent()) .extracting(msg -> msg.getMessage().getContent())
.hasSameElementsAs(expectedValues); .hasSameElementsAs(expectedValues)
}); );
} }
@Test @Test
@ -101,12 +101,12 @@ class TailingEmitterTest extends AbstractBaseTest {
Awaitility.await() Awaitility.await()
.atMost(Duration.ofSeconds(60)) .atMost(Duration.ofSeconds(60))
.pollInSameThread() .pollInSameThread()
.untilAsserted(() -> { .untilAsserted(() ->
assertThat(fluxOutput) assertThat(fluxOutput)
.filteredOn(msg -> msg.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) .filteredOn(msg -> msg.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
.extracting(msg -> msg.getMessage().getContent()) .extracting(msg -> msg.getMessage().getContent())
.hasSameElementsAs(expectedValues); .hasSameElementsAs(expectedValues)
}); );
} }
private Flux<TopicMessageEventDTO> createTailingFlux( private Flux<TopicMessageEventDTO> createTailingFlux(

View file

@ -1,6 +1,7 @@
package com.provectus.kafka.ui.producer; package com.provectus.kafka.ui.producer;
import java.util.Map; import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerConfig;
@ -25,12 +26,20 @@ public class KafkaTestProducer<KeyT, ValueT> implements AutoCloseable {
))); )));
} }
public Future<RecordMetadata> send(String topic, ValueT value) { public CompletableFuture<RecordMetadata> send(String topic, ValueT value) {
return producer.send(new ProducerRecord<>(topic, value)); return send(new ProducerRecord<>(topic, value));
} }
public Future<RecordMetadata> send(ProducerRecord<KeyT, ValueT> record) { public CompletableFuture<RecordMetadata> send(ProducerRecord<KeyT, ValueT> record) {
return producer.send(record); CompletableFuture<RecordMetadata> cf = new CompletableFuture<>();
producer.send(record, (m, e) -> {
if (e != null) {
cf.completeExceptionally(e);
} else {
cf.complete(m);
}
});
return cf;
} }
@Override @Override

View file

@ -1,7 +1,5 @@
package com.provectus.kafka.ui.service; package com.provectus.kafka.ui.service;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import com.provectus.kafka.ui.AbstractBaseTest; import com.provectus.kafka.ui.AbstractBaseTest;
import com.provectus.kafka.ui.mapper.ClusterMapperImpl; import com.provectus.kafka.ui.mapper.ClusterMapperImpl;
import com.provectus.kafka.ui.mapper.DescribeLogDirsMapper; import com.provectus.kafka.ui.mapper.DescribeLogDirsMapper;
@ -45,15 +43,4 @@ class BrokerServiceTest extends AbstractBaseTest {
.verifyComplete(); .verifyComplete();
} }
@Test
void getBrokersNull() {
assertThatThrownBy(() -> brokerService.getBrokers(null)).isInstanceOf(NullPointerException.class);
}
@Test
void getBrokersEmpty() {
assertThatThrownBy(() -> brokerService.getBrokers(KafkaCluster.builder().build())).isInstanceOf(
NullPointerException.class);
}
} }

View file

@ -4,6 +4,7 @@ import static org.assertj.core.api.Assertions.assertThat;
import com.provectus.kafka.ui.AbstractBaseTest; import com.provectus.kafka.ui.AbstractBaseTest;
import com.provectus.kafka.ui.model.BrokerConfigDTO; import com.provectus.kafka.ui.model.BrokerConfigDTO;
import java.time.Duration;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Optional; import java.util.Optional;
@ -13,6 +14,7 @@ import org.springframework.boot.test.autoconfigure.web.reactive.AutoConfigureWeb
import org.springframework.core.ParameterizedTypeReference; import org.springframework.core.ParameterizedTypeReference;
import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.web.reactive.server.WebTestClient; import org.springframework.test.web.reactive.server.WebTestClient;
import org.testcontainers.shaded.org.awaitility.Awaitility;
@ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class}) @ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class})
@AutoConfigureWebTestClient(timeout = "60000") @AutoConfigureWebTestClient(timeout = "60000")
@ -22,29 +24,33 @@ public class ConfigTest extends AbstractBaseTest {
private WebTestClient webTestClient; private WebTestClient webTestClient;
@Test @Test
public void testAlterConfig() throws Exception { public void testAlterConfig() {
String name = "background.threads"; String name = "background.threads";
Optional<BrokerConfigDTO> bc = getConfig(name); Optional<BrokerConfigDTO> bc = getConfig(name);
assertThat(bc.isPresent()).isTrue(); assertThat(bc.isPresent()).isTrue();
assertThat(bc.get().getValue()).isEqualTo("10"); assertThat(bc.get().getValue()).isEqualTo("10");
final String newValue = "5";
webTestClient.put() webTestClient.put()
.uri("/api/clusters/{clusterName}/brokers/{id}/configs/{name}", LOCAL, 1, name) .uri("/api/clusters/{clusterName}/brokers/{id}/configs/{name}", LOCAL, 1, name)
.bodyValue(Map.of( .bodyValue(Map.of(
"name", name, "name", name,
"value", "5" "value", newValue
) )
) )
.exchange() .exchange()
.expectStatus().isOk(); .expectStatus().isOk();
// Without sleep it returns old config so we need to wait a little bit Awaitility.await()
Thread.sleep(1000); .atMost(Duration.ofSeconds(10))
.pollInSameThread()
.untilAsserted(() -> {
Optional<BrokerConfigDTO> bcc = getConfig(name); Optional<BrokerConfigDTO> bcc = getConfig(name);
assertThat(bcc.isPresent()).isTrue(); assertThat(bcc.isPresent()).isTrue();
assertThat(bcc.get().getValue()).isEqualTo("5"); assertThat(bcc.get().getValue()).isEqualTo(newValue);
});
} }
@Test @Test

View file

@ -1,7 +1,6 @@
package com.provectus.kafka.ui.service; package com.provectus.kafka.ui.service;
import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import com.provectus.kafka.ui.AbstractBaseTest; import com.provectus.kafka.ui.AbstractBaseTest;
import com.provectus.kafka.ui.exception.NotFoundException; import com.provectus.kafka.ui.exception.NotFoundException;
@ -32,6 +31,8 @@ import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.ContextConfiguration;
import reactor.core.publisher.Mono;
import reactor.test.StepVerifier;
@ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class}) @ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class})
public class OffsetsResetServiceTest extends AbstractBaseTest { public class OffsetsResetServiceTest extends AbstractBaseTest {
@ -75,45 +76,45 @@ public class OffsetsResetServiceTest extends AbstractBaseTest {
@Test @Test
void failsIfGroupDoesNotExists() { void failsIfGroupDoesNotExists() {
assertThatThrownBy( List<Mono<?>> expectedNotFound = List.of(
() -> offsetsResetService offsetsResetService
.resetToEarliest(CLUSTER, "non-existing-group", topic, null).block() .resetToEarliest(CLUSTER, "non-existing-group", topic, null),
).isInstanceOf(NotFoundException.class); offsetsResetService
assertThatThrownBy( .resetToLatest(CLUSTER, "non-existing-group", topic, null),
() -> offsetsResetService offsetsResetService
.resetToLatest(CLUSTER, "non-existing-group", topic, null).block() .resetToTimestamp(CLUSTER, "non-existing-group", topic, null, System.currentTimeMillis()),
).isInstanceOf(NotFoundException.class); offsetsResetService
assertThatThrownBy(() -> offsetsResetService
.resetToTimestamp(CLUSTER, "non-existing-group", topic, null, System.currentTimeMillis())
.block()
).isInstanceOf(NotFoundException.class);
assertThatThrownBy(
() -> offsetsResetService
.resetToOffsets(CLUSTER, "non-existing-group", topic, Map.of()) .resetToOffsets(CLUSTER, "non-existing-group", topic, Map.of())
.block() );
).isInstanceOf(NotFoundException.class);
for (Mono<?> mono : expectedNotFound) {
StepVerifier.create(mono)
.expectErrorMatches(t -> t instanceof NotFoundException)
.verify();
}
} }
@Test @Test
void failsIfGroupIsActive() { void failsIfGroupIsActive() {
// starting consumer to activate group // starting consumer to activate group
try (var consumer = groupConsumer()) { try (var consumer = groupConsumer()) {
consumer.subscribe(Pattern.compile("no-such-topic-pattern")); consumer.subscribe(Pattern.compile("no-such-topic-pattern"));
consumer.poll(Duration.ofMillis(100)); consumer.poll(Duration.ofMillis(100));
assertThatThrownBy(() -> List<Mono<?>> expectedValidationError = List.of(
offsetsResetService.resetToEarliest(CLUSTER, groupId, topic, null).block() offsetsResetService.resetToEarliest(CLUSTER, groupId, topic, null),
).isInstanceOf(ValidationException.class); offsetsResetService.resetToLatest(CLUSTER, groupId, topic, null),
assertThatThrownBy( offsetsResetService
() -> offsetsResetService.resetToLatest(CLUSTER, groupId, topic, null).block() .resetToTimestamp(CLUSTER, groupId, topic, null, System.currentTimeMillis()),
).isInstanceOf(ValidationException.class); offsetsResetService.resetToOffsets(CLUSTER, groupId, topic, Map.of())
assertThatThrownBy(() -> offsetsResetService );
.resetToTimestamp(CLUSTER, groupId, topic, null, System.currentTimeMillis())
.block() for (Mono<?> mono : expectedValidationError) {
).isInstanceOf(ValidationException.class); StepVerifier.create(mono)
assertThatThrownBy( .expectErrorMatches(t -> t instanceof ValidationException)
() -> offsetsResetService.resetToOffsets(CLUSTER, groupId, topic, Map.of()).block() .verify();
).isInstanceOf(ValidationException.class); }
} }
} }

View file

@ -24,6 +24,8 @@ import java.util.Map;
import java.util.Properties; import java.util.Properties;
import java.util.UUID; import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.ThreadLocalRandom;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import lombok.Value; import lombok.Value;
import lombok.extern.slf4j.Slf4j; import lombok.extern.slf4j.Slf4j;
@ -40,6 +42,8 @@ import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.ContextConfiguration;
import reactor.core.publisher.Flux; import reactor.core.publisher.Flux;
import reactor.core.publisher.FluxSink;
import reactor.test.StepVerifier;
@Slf4j @Slf4j
@ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class}) @ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class})
@ -106,22 +110,17 @@ class RecordEmitterTest extends AbstractBaseTest {
), new SimpleRecordSerDe() ), new SimpleRecordSerDe()
); );
Long polledValues = Flux.create(forwardEmitter) StepVerifier.create(
Flux.create(forwardEmitter)
.filter(m -> m.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE)) .filter(m -> m.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
.limitRequest(100) .take(100)
.count() ).expectNextCount(0).expectComplete().verify();
.block();
assertThat(polledValues).isZero(); StepVerifier.create(
Flux.create(backwardEmitter)
polledValues = Flux.create(backwardEmitter)
.filter(m -> m.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE)) .filter(m -> m.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
.limitRequest(100) .take(100)
.count() ).expectNextCount(0).expectComplete().verify();
.block();
assertThat(polledValues).isZero();
} }
@Test @Test
@ -136,35 +135,15 @@ class RecordEmitterTest extends AbstractBaseTest {
var backwardEmitter = new BackwardRecordEmitter( var backwardEmitter = new BackwardRecordEmitter(
this::createConsumer, this::createConsumer,
new OffsetsSeekBackward(TOPIC, new OffsetsSeekBackward(TOPIC,
new ConsumerPosition(BEGINNING, Map.of(), FORWARD), new ConsumerPosition(BEGINNING, Map.of(), BACKWARD),
PARTITIONS * MSGS_PER_PARTITION PARTITIONS * MSGS_PER_PARTITION
), new SimpleRecordSerDe() ), new SimpleRecordSerDe()
); );
var polledValues = Flux.create(forwardEmitter) List<String> expectedValues = SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList());
.filter(m -> m.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
.limitRequest(Long.MAX_VALUE)
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
.map(TopicMessageEventDTO::getMessage)
.map(m -> m.getContent().toString())
.collect(Collectors.toList())
.block();
assertThat(polledValues).containsExactlyInAnyOrderElementsOf(
SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList()));
polledValues = Flux.create(backwardEmitter)
.filter(m -> m.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
.limitRequest(Long.MAX_VALUE)
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
.map(TopicMessageEventDTO::getMessage)
.map(m -> m.getContent().toString())
.collect(Collectors.toList())
.block();
assertThat(polledValues).containsExactlyInAnyOrderElementsOf(
SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList()));
expectEmitter(forwardEmitter, expectedValues);
expectEmitter(backwardEmitter, expectedValues);
} }
@Test @Test
@ -190,37 +169,19 @@ class RecordEmitterTest extends AbstractBaseTest {
), new SimpleRecordSerDe() ), new SimpleRecordSerDe()
); );
var polledValues = Flux.create(forwardEmitter)
.filter(m -> m.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
.limitRequest(Long.MAX_VALUE)
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
.map(TopicMessageEventDTO::getMessage)
.map(m -> m.getContent().toString())
.collect(Collectors.toList())
.block();
var expectedValues = SENT_RECORDS.stream() var expectedValues = SENT_RECORDS.stream()
.filter(r -> r.getOffset() >= targetOffsets.get(r.getTp())) .filter(r -> r.getOffset() >= targetOffsets.get(r.getTp()))
.map(Record::getValue) .map(Record::getValue)
.collect(Collectors.toList()); .collect(Collectors.toList());
assertThat(polledValues).containsExactlyInAnyOrderElementsOf(expectedValues); expectEmitter(forwardEmitter, expectedValues);
expectedValues = SENT_RECORDS.stream() expectedValues = SENT_RECORDS.stream()
.filter(r -> r.getOffset() < targetOffsets.get(r.getTp())) .filter(r -> r.getOffset() < targetOffsets.get(r.getTp()))
.map(Record::getValue) .map(Record::getValue)
.collect(Collectors.toList()); .collect(Collectors.toList());
polledValues = Flux.create(backwardEmitter) expectEmitter(backwardEmitter, expectedValues);
.filter(m -> m.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
.limitRequest(Long.MAX_VALUE)
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
.map(TopicMessageEventDTO::getMessage)
.map(m -> m.getContent().toString())
.collect(Collectors.toList())
.block();
assertThat(polledValues).containsExactlyInAnyOrderElementsOf(expectedValues);
} }
@Test @Test
@ -253,36 +214,19 @@ class RecordEmitterTest extends AbstractBaseTest {
), new SimpleRecordSerDe() ), new SimpleRecordSerDe()
); );
var polledValues = Flux.create(forwardEmitter)
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
.map(TopicMessageEventDTO::getMessage)
.map(m -> m.getContent().toString())
.limitRequest(Long.MAX_VALUE)
.collect(Collectors.toList())
.block();
var expectedValues = SENT_RECORDS.stream() var expectedValues = SENT_RECORDS.stream()
.filter(r -> r.getTimestamp() >= targetTimestamps.get(r.getTp())) .filter(r -> r.getTimestamp() >= targetTimestamps.get(r.getTp()))
.map(Record::getValue) .map(Record::getValue)
.collect(Collectors.toList()); .collect(Collectors.toList());
assertThat(polledValues).containsExactlyInAnyOrderElementsOf(expectedValues); expectEmitter(forwardEmitter, expectedValues);
polledValues = Flux.create(backwardEmitter)
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
.map(TopicMessageEventDTO::getMessage)
.map(m -> m.getContent().toString())
.limitRequest(Long.MAX_VALUE)
.collect(Collectors.toList())
.block();
expectedValues = SENT_RECORDS.stream() expectedValues = SENT_RECORDS.stream()
.filter(r -> r.getTimestamp() < targetTimestamps.get(r.getTp())) .filter(r -> r.getTimestamp() < targetTimestamps.get(r.getTp()))
.map(Record::getValue) .map(Record::getValue)
.collect(Collectors.toList()); .collect(Collectors.toList());
assertThat(polledValues).containsExactlyInAnyOrderElementsOf(expectedValues); expectEmitter(backwardEmitter, expectedValues);
} }
@Test @Test
@ -301,22 +245,15 @@ class RecordEmitterTest extends AbstractBaseTest {
), new SimpleRecordSerDe() ), new SimpleRecordSerDe()
); );
var polledValues = Flux.create(backwardEmitter)
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
.map(TopicMessageEventDTO::getMessage)
.map(m -> m.getContent().toString())
.limitRequest(numMessages)
.collect(Collectors.toList())
.block();
var expectedValues = SENT_RECORDS.stream() var expectedValues = SENT_RECORDS.stream()
.filter(r -> r.getOffset() < targetOffsets.get(r.getTp())) .filter(r -> r.getOffset() < targetOffsets.get(r.getTp()))
.filter(r -> r.getOffset() >= (targetOffsets.get(r.getTp()) - (100 / PARTITIONS))) .filter(r -> r.getOffset() >= (targetOffsets.get(r.getTp()) - (numMessages / PARTITIONS)))
.map(Record::getValue) .map(Record::getValue)
.collect(Collectors.toList()); .collect(Collectors.toList());
assertThat(expectedValues).size().isEqualTo(numMessages);
assertThat(polledValues).containsExactlyInAnyOrderElementsOf(expectedValues); expectEmitter(backwardEmitter, expectedValues);
} }
@Test @Test
@ -334,15 +271,39 @@ class RecordEmitterTest extends AbstractBaseTest {
), new SimpleRecordSerDe() ), new SimpleRecordSerDe()
); );
var polledValues = Flux.create(backwardEmitter) expectEmitter(backwardEmitter,
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE)) 100,
.map(TopicMessageEventDTO::getMessage) e -> e.expectNextCount(0),
.map(m -> m.getContent().toString()) StepVerifier.Assertions::hasNotDroppedElements
.limitRequest(Long.MAX_VALUE) );
.collect(Collectors.toList()) }
.block();
assertThat(polledValues).isEmpty(); private void expectEmitter(Consumer<FluxSink<TopicMessageEventDTO>> emitter, List<String> expectedValues) {
expectEmitter(emitter,
expectedValues.size(),
e -> e.recordWith(ArrayList::new)
.expectNextCount(expectedValues.size())
.expectRecordedMatches(r -> r.containsAll(expectedValues))
.consumeRecordedWith(r -> log.info("Collected collection: {}", r)),
v -> {}
);
}
private void expectEmitter(
Consumer<FluxSink<TopicMessageEventDTO>> emitter,
int take,
Function<StepVerifier.Step<String>, StepVerifier.Step<String>> stepConsumer,
Consumer<StepVerifier.Assertions> assertionsConsumer) {
StepVerifier.FirstStep<String> firstStep = StepVerifier.create(
Flux.create(emitter)
.filter(m -> m.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
.take(take)
.map(m -> m.getMessage().getContent())
);
StepVerifier.Step<String> step = stepConsumer.apply(firstStep);
assertionsConsumer.accept(step.expectComplete().verifyThenAssertThat());
} }
private KafkaConsumer<Bytes, Bytes> createConsumer() { private KafkaConsumer<Bytes, Bytes> createConsumer() {

View file

@ -85,7 +85,7 @@ public class SchemaRegistryPaginationTest {
.toArray(String[]::new) .toArray(String[]::new)
); );
var schemas = controller.getSchemas(LOCAL_KAFKA_CLUSTER_NAME, var schemas = controller.getSchemas(LOCAL_KAFKA_CLUSTER_NAME,
0, -1, null, null).block();; 0, -1, null, null).block();
assertThat(schemas.getBody().getPageCount()).isEqualTo(4); assertThat(schemas.getBody().getPageCount()).isEqualTo(4);
assertThat(schemas.getBody().getSchemas()).hasSize(25); assertThat(schemas.getBody().getSchemas()).hasSize(25);

View file

@ -1,7 +1,6 @@
package com.provectus.kafka.ui.service; package com.provectus.kafka.ui.service;
import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import com.provectus.kafka.ui.AbstractBaseTest; import com.provectus.kafka.ui.AbstractBaseTest;
@ -29,6 +28,7 @@ import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.ContextConfiguration;
import reactor.test.StepVerifier;
@ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class}) @ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class})
public class SendAndReadTests extends AbstractBaseTest { public class SendAndReadTests extends AbstractBaseTest {
@ -526,8 +526,9 @@ public class SendAndReadTests extends AbstractBaseTest {
public void assertSendThrowsException() { public void assertSendThrowsException() {
String topic = createTopicAndCreateSchemas(); String topic = createTopicAndCreateSchemas();
try { try {
assertThatThrownBy(() -> StepVerifier.create(
messagesService.sendMessage(targetCluster, topic, msgToSend).block()); messagesService.sendMessage(targetCluster, topic, msgToSend)
).expectError().verify();
} finally { } finally {
deleteTopic(topic); deleteTopic(topic);
} }

View file

@ -13,7 +13,9 @@ import com.provectus.kafka.ui.model.TableDTO;
import java.util.List; import java.util.List;
import lombok.SneakyThrows; import lombok.SneakyThrows;
import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DynamicTest;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestFactory;
import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.junit.jupiter.MockitoExtension; import org.mockito.junit.jupiter.MockitoExtension;
@ -53,64 +55,29 @@ class ShowStrategyTest {
assertFalse(strategy.test("LIST PROPERTIES;")); assertFalse(strategy.test("LIST PROPERTIES;"));
} }
@Test @TestFactory
void shouldSerializeStreamsResponse() { public Iterable<DynamicTest> shouldSerialize() {
JsonNode node = getResponseWithData("streams"); return List.of(
strategy.test("show streams;"); shouldSerializeGenerate("streams", "show streams;"),
shouldSerializeGenerate("tables", "show tables;"),
shouldSerializeGenerate("topics", "show topics;"),
shouldSerializeGenerate("properties", "show properties;"),
shouldSerializeGenerate("functions", "show functions;"),
shouldSerializeGenerate("queries", "show queries;")
);
}
public DynamicTest shouldSerializeGenerate(final String key, final String sql) {
return DynamicTest.dynamicTest("Should serialize " + key,
() -> {
JsonNode node = getResponseWithData(key);
strategy.test(sql);
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node); KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
TableDTO table = serializedResponse.getData(); TableDTO table = serializedResponse.getData();
assertThat(table.getHeaders()).isEqualTo(List.of("header")); assertThat(table.getHeaders()).isEqualTo(List.of("header"));
assertThat(table.getRows()).isEqualTo(List.of(List.of("value"))); assertThat(table.getRows()).isEqualTo(List.of(List.of("value")));
} }
);
@Test
void shouldSerializeTablesResponse() {
JsonNode node = getResponseWithData("tables");
strategy.test("show tables;");
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
TableDTO table = serializedResponse.getData();
assertThat(table.getHeaders()).isEqualTo(List.of("header"));
assertThat(table.getRows()).isEqualTo(List.of(List.of("value")));
}
@Test
void shouldSerializeTopicsResponse() {
JsonNode node = getResponseWithData("topics");
strategy.test("show topics;");
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
TableDTO table = serializedResponse.getData();
assertThat(table.getHeaders()).isEqualTo(List.of("header"));
assertThat(table.getRows()).isEqualTo(List.of(List.of("value")));
}
@Test
void shouldSerializePropertiesResponse() {
JsonNode node = getResponseWithData("properties");
strategy.test("show properties;");
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
TableDTO table = serializedResponse.getData();
assertThat(table.getHeaders()).isEqualTo(List.of("header"));
assertThat(table.getRows()).isEqualTo(List.of(List.of("value")));
}
@Test
void shouldSerializeFunctionsResponse() {
JsonNode node = getResponseWithData("functions");
strategy.test("show functions;");
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
TableDTO table = serializedResponse.getData();
assertThat(table.getHeaders()).isEqualTo(List.of("header"));
assertThat(table.getRows()).isEqualTo(List.of(List.of("value")));
}
@Test
void shouldSerializeQueriesResponse() {
JsonNode node = getResponseWithData("queries");
strategy.test("show queries;");
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
TableDTO table = serializedResponse.getData();
assertThat(table.getHeaders()).isEqualTo(List.of("header"));
assertThat(table.getRows()).isEqualTo(List.of(List.of("value")));
} }
@Test @Test

View file

@ -5,7 +5,6 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import com.github.fge.jsonschema.core.exceptions.ProcessingException; import com.github.fge.jsonschema.core.exceptions.ProcessingException;
import com.github.fge.jsonschema.core.report.ProcessingReport; import com.github.fge.jsonschema.core.report.ProcessingReport;
import com.github.fge.jsonschema.main.JsonSchemaFactory; import com.github.fge.jsonschema.main.JsonSchemaFactory;
import com.provectus.kafka.ui.serde.schemaregistry.AvroMessageFormatter;
import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils; import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;