OffsetsResetServiceTest.java 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. package com.provectus.kafka.ui.service;
  2. import static org.assertj.core.api.Assertions.assertThat;
  3. import com.provectus.kafka.ui.AbstractIntegrationTest;
  4. import com.provectus.kafka.ui.exception.NotFoundException;
  5. import com.provectus.kafka.ui.exception.ValidationException;
  6. import com.provectus.kafka.ui.model.KafkaCluster;
  7. import java.time.Duration;
  8. import java.util.List;
  9. import java.util.Map;
  10. import java.util.Properties;
  11. import java.util.UUID;
  12. import java.util.regex.Pattern;
  13. import java.util.stream.Collectors;
  14. import java.util.stream.IntStream;
  15. import java.util.stream.Stream;
  16. import org.apache.kafka.clients.admin.NewTopic;
  17. import org.apache.kafka.clients.consumer.Consumer;
  18. import org.apache.kafka.clients.consumer.ConsumerConfig;
  19. import org.apache.kafka.clients.consumer.KafkaConsumer;
  20. import org.apache.kafka.clients.consumer.OffsetAndMetadata;
  21. import org.apache.kafka.clients.producer.KafkaProducer;
  22. import org.apache.kafka.clients.producer.ProducerConfig;
  23. import org.apache.kafka.clients.producer.ProducerRecord;
  24. import org.apache.kafka.common.TopicPartition;
  25. import org.apache.kafka.common.serialization.BytesDeserializer;
  26. import org.apache.kafka.common.serialization.BytesSerializer;
  27. import org.apache.kafka.common.utils.Bytes;
  28. import org.junit.jupiter.api.AfterEach;
  29. import org.junit.jupiter.api.BeforeEach;
  30. import org.junit.jupiter.api.Test;
  31. import reactor.core.publisher.Mono;
  32. import reactor.test.StepVerifier;
  33. public class OffsetsResetServiceTest extends AbstractIntegrationTest {
  34. private static final int PARTITIONS = 5;
  35. private final String groupId = "OffsetsResetServiceTestGroup-" + UUID.randomUUID();
  36. private final String topic = "OffsetsResetServiceTestTopic-" + UUID.randomUUID();
  37. private KafkaCluster cluster;
  38. private OffsetsResetService offsetsResetService;
  39. @BeforeEach
  40. void init() {
  41. cluster = applicationContext.getBean(ClustersStorage.class).getClusterByName(LOCAL).get();
  42. offsetsResetService = new OffsetsResetService(applicationContext.getBean(AdminClientService.class));
  43. createTopic(new NewTopic(topic, PARTITIONS, (short) 1));
  44. createConsumerGroup();
  45. }
  46. @AfterEach
  47. void cleanUp() {
  48. deleteTopic(topic);
  49. }
  50. private void createConsumerGroup() {
  51. try (var consumer = groupConsumer()) {
  52. consumer.subscribe(Pattern.compile("no-such-topic-pattern"));
  53. consumer.poll(Duration.ofMillis(200));
  54. consumer.commitSync();
  55. }
  56. }
  57. @Test
  58. void failsIfGroupDoesNotExists() {
  59. List<Mono<?>> expectedNotFound = List.of(
  60. offsetsResetService
  61. .resetToEarliest(cluster, "non-existing-group", topic, null),
  62. offsetsResetService
  63. .resetToLatest(cluster, "non-existing-group", topic, null),
  64. offsetsResetService
  65. .resetToTimestamp(cluster, "non-existing-group", topic, null, System.currentTimeMillis()),
  66. offsetsResetService
  67. .resetToOffsets(cluster, "non-existing-group", topic, Map.of())
  68. );
  69. for (Mono<?> mono : expectedNotFound) {
  70. StepVerifier.create(mono)
  71. .expectErrorMatches(t -> t instanceof NotFoundException)
  72. .verify();
  73. }
  74. }
  75. @Test
  76. void failsIfGroupIsActive() {
  77. // starting consumer to activate group
  78. try (var consumer = groupConsumer()) {
  79. consumer.subscribe(Pattern.compile("no-such-topic-pattern"));
  80. consumer.poll(Duration.ofMillis(100));
  81. List<Mono<?>> expectedValidationError = List.of(
  82. offsetsResetService.resetToEarliest(cluster, groupId, topic, null),
  83. offsetsResetService.resetToLatest(cluster, groupId, topic, null),
  84. offsetsResetService
  85. .resetToTimestamp(cluster, groupId, topic, null, System.currentTimeMillis()),
  86. offsetsResetService.resetToOffsets(cluster, groupId, topic, Map.of())
  87. );
  88. for (Mono<?> mono : expectedValidationError) {
  89. StepVerifier.create(mono)
  90. .expectErrorMatches(t -> t instanceof ValidationException)
  91. .verify();
  92. }
  93. }
  94. }
  95. @Test
  96. void resetToOffsets() {
  97. sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10));
  98. var expectedOffsets = Map.of(0, 5L, 1, 5L, 2, 5L);
  99. offsetsResetService.resetToOffsets(cluster, groupId, topic, expectedOffsets).block();
  100. assertOffsets(expectedOffsets);
  101. }
  102. @Test
  103. void resetToOffsetsCommitsEarliestOrLatestOffsetsIfOffsetsBoundsNotValid() {
  104. sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10));
  105. var offsetsWithInValidBounds = Map.of(0, -2L, 1, 5L, 2, 500L);
  106. var expectedOffsets = Map.of(0, 0L, 1, 5L, 2, 10L);
  107. offsetsResetService.resetToOffsets(cluster, groupId, topic, offsetsWithInValidBounds).block();
  108. assertOffsets(expectedOffsets);
  109. }
  110. @Test
  111. void resetToEarliest() {
  112. sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10));
  113. commit(Map.of(0, 5L, 1, 5L, 2, 5L));
  114. offsetsResetService.resetToEarliest(cluster, groupId, topic, List.of(0, 1)).block();
  115. assertOffsets(Map.of(0, 0L, 1, 0L, 2, 5L));
  116. commit(Map.of(0, 5L, 1, 5L, 2, 5L));
  117. offsetsResetService.resetToEarliest(cluster, groupId, topic, null).block();
  118. assertOffsets(Map.of(0, 0L, 1, 0L, 2, 0L, 3, 0L, 4, 0L));
  119. }
  120. @Test
  121. void resetToLatest() {
  122. sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10, 3, 10, 4, 10));
  123. commit(Map.of(0, 5L, 1, 5L, 2, 5L));
  124. offsetsResetService.resetToLatest(cluster, groupId, topic, List.of(0, 1)).block();
  125. assertOffsets(Map.of(0, 10L, 1, 10L, 2, 5L));
  126. commit(Map.of(0, 5L, 1, 5L, 2, 5L));
  127. offsetsResetService.resetToLatest(cluster, groupId, topic, null).block();
  128. assertOffsets(Map.of(0, 10L, 1, 10L, 2, 10L, 3, 10L, 4, 10L));
  129. }
  130. @Test
  131. void resetToTimestamp() {
  132. send(
  133. Stream.of(
  134. new ProducerRecord<Bytes, Bytes>(topic, 0, 1000L, null, null),
  135. new ProducerRecord<Bytes, Bytes>(topic, 0, 1500L, null, null),
  136. new ProducerRecord<Bytes, Bytes>(topic, 0, 2000L, null, null),
  137. new ProducerRecord<Bytes, Bytes>(topic, 1, 1000L, null, null),
  138. new ProducerRecord<Bytes, Bytes>(topic, 1, 2000L, null, null),
  139. new ProducerRecord<Bytes, Bytes>(topic, 2, 1000L, null, null),
  140. new ProducerRecord<Bytes, Bytes>(topic, 2, 1100L, null, null),
  141. new ProducerRecord<Bytes, Bytes>(topic, 2, 1200L, null, null)));
  142. offsetsResetService.resetToTimestamp(
  143. cluster, groupId, topic, List.of(0, 1, 2, 3), 1600L
  144. ).block();
  145. assertOffsets(Map.of(0, 2L, 1, 1L, 2, 3L, 3, 0L));
  146. }
  147. private void commit(Map<Integer, Long> offsetsToCommit) {
  148. try (var consumer = groupConsumer()) {
  149. consumer.commitSync(
  150. offsetsToCommit.entrySet().stream()
  151. .collect(Collectors.toMap(
  152. e -> new TopicPartition(topic, e.getKey()),
  153. e -> new OffsetAndMetadata(e.getValue())))
  154. );
  155. }
  156. }
  157. private void sendMsgsToPartition(Map<Integer, Integer> msgsCountForPartitions) {
  158. Bytes bytes = new Bytes("noMatter".getBytes());
  159. send(
  160. msgsCountForPartitions.entrySet().stream()
  161. .flatMap(e ->
  162. IntStream.range(0, e.getValue())
  163. .mapToObj(i -> new ProducerRecord<>(topic, e.getKey(), bytes, bytes))));
  164. }
  165. private void send(Stream<ProducerRecord<Bytes, Bytes>> toSend) {
  166. var properties = new Properties();
  167. properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers());
  168. var serializer = new BytesSerializer();
  169. try (var producer = new KafkaProducer<>(properties, serializer, serializer)) {
  170. toSend.forEach(producer::send);
  171. producer.flush();
  172. }
  173. }
  174. private void assertOffsets(Map<Integer, Long> expectedOffsets) {
  175. try (var consumer = groupConsumer()) {
  176. var tps = expectedOffsets.keySet().stream()
  177. .map(idx -> new TopicPartition(topic, idx))
  178. .collect(Collectors.toSet());
  179. var actualOffsets = consumer.committed(tps).entrySet().stream()
  180. .collect(Collectors.toMap(e -> e.getKey().partition(), e -> e.getValue().offset()));
  181. assertThat(actualOffsets).isEqualTo(expectedOffsets);
  182. }
  183. }
  184. private Consumer<?, ?> groupConsumer() {
  185. Properties props = new Properties();
  186. props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui-" + UUID.randomUUID());
  187. props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
  188. props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
  189. props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
  190. props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
  191. props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
  192. return new KafkaConsumer<>(props);
  193. }
  194. }