OffsetsResetServiceTest.java 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. package com.provectus.kafka.ui.service;
  2. import static org.assertj.core.api.Assertions.assertThat;
  3. import com.provectus.kafka.ui.AbstractIntegrationTest;
  4. import com.provectus.kafka.ui.exception.NotFoundException;
  5. import com.provectus.kafka.ui.exception.ValidationException;
  6. import com.provectus.kafka.ui.model.KafkaCluster;
  7. import java.time.Duration;
  8. import java.util.List;
  9. import java.util.Map;
  10. import java.util.Properties;
  11. import java.util.UUID;
  12. import java.util.regex.Pattern;
  13. import java.util.stream.Collectors;
  14. import java.util.stream.IntStream;
  15. import java.util.stream.Stream;
  16. import org.apache.kafka.clients.admin.NewTopic;
  17. import org.apache.kafka.clients.consumer.Consumer;
  18. import org.apache.kafka.clients.consumer.ConsumerConfig;
  19. import org.apache.kafka.clients.consumer.KafkaConsumer;
  20. import org.apache.kafka.clients.consumer.OffsetAndMetadata;
  21. import org.apache.kafka.clients.producer.KafkaProducer;
  22. import org.apache.kafka.clients.producer.ProducerConfig;
  23. import org.apache.kafka.clients.producer.ProducerRecord;
  24. import org.apache.kafka.common.TopicPartition;
  25. import org.apache.kafka.common.serialization.BytesDeserializer;
  26. import org.apache.kafka.common.serialization.BytesSerializer;
  27. import org.apache.kafka.common.utils.Bytes;
  28. import org.junit.jupiter.api.AfterEach;
  29. import org.junit.jupiter.api.BeforeEach;
  30. import org.junit.jupiter.api.Test;
  31. import reactor.core.publisher.Mono;
  32. import reactor.test.StepVerifier;
  33. public class OffsetsResetServiceTest extends AbstractIntegrationTest {
  34. private static final int PARTITIONS = 5;
  35. private static final KafkaCluster CLUSTER =
  36. KafkaCluster.builder()
  37. .name(LOCAL)
  38. .bootstrapServers(kafka.getBootstrapServers())
  39. .properties(new Properties())
  40. .build();
  41. private final String groupId = "OffsetsResetServiceTestGroup-" + UUID.randomUUID();
  42. private final String topic = "OffsetsResetServiceTestTopic-" + UUID.randomUUID();
  43. private OffsetsResetService offsetsResetService;
  44. @BeforeEach
  45. void init() {
  46. AdminClientServiceImpl adminClientService = new AdminClientServiceImpl();
  47. adminClientService.setClientTimeout(5_000);
  48. offsetsResetService = new OffsetsResetService(adminClientService);
  49. createTopic(new NewTopic(topic, PARTITIONS, (short) 1));
  50. createConsumerGroup();
  51. }
  52. @AfterEach
  53. void cleanUp() {
  54. deleteTopic(topic);
  55. }
  56. private void createConsumerGroup() {
  57. try (var consumer = groupConsumer()) {
  58. consumer.subscribe(Pattern.compile("no-such-topic-pattern"));
  59. consumer.poll(Duration.ofMillis(200));
  60. consumer.commitSync();
  61. }
  62. }
  63. @Test
  64. void failsIfGroupDoesNotExists() {
  65. List<Mono<?>> expectedNotFound = List.of(
  66. offsetsResetService
  67. .resetToEarliest(CLUSTER, "non-existing-group", topic, null),
  68. offsetsResetService
  69. .resetToLatest(CLUSTER, "non-existing-group", topic, null),
  70. offsetsResetService
  71. .resetToTimestamp(CLUSTER, "non-existing-group", topic, null, System.currentTimeMillis()),
  72. offsetsResetService
  73. .resetToOffsets(CLUSTER, "non-existing-group", topic, Map.of())
  74. );
  75. for (Mono<?> mono : expectedNotFound) {
  76. StepVerifier.create(mono)
  77. .expectErrorMatches(t -> t instanceof NotFoundException)
  78. .verify();
  79. }
  80. }
  81. @Test
  82. void failsIfGroupIsActive() {
  83. // starting consumer to activate group
  84. try (var consumer = groupConsumer()) {
  85. consumer.subscribe(Pattern.compile("no-such-topic-pattern"));
  86. consumer.poll(Duration.ofMillis(100));
  87. List<Mono<?>> expectedValidationError = List.of(
  88. offsetsResetService.resetToEarliest(CLUSTER, groupId, topic, null),
  89. offsetsResetService.resetToLatest(CLUSTER, groupId, topic, null),
  90. offsetsResetService
  91. .resetToTimestamp(CLUSTER, groupId, topic, null, System.currentTimeMillis()),
  92. offsetsResetService.resetToOffsets(CLUSTER, groupId, topic, Map.of())
  93. );
  94. for (Mono<?> mono : expectedValidationError) {
  95. StepVerifier.create(mono)
  96. .expectErrorMatches(t -> t instanceof ValidationException)
  97. .verify();
  98. }
  99. }
  100. }
  101. @Test
  102. void resetToOffsets() {
  103. sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10));
  104. var expectedOffsets = Map.of(0, 5L, 1, 5L, 2, 5L);
  105. offsetsResetService.resetToOffsets(CLUSTER, groupId, topic, expectedOffsets).block();
  106. assertOffsets(expectedOffsets);
  107. }
  108. @Test
  109. void resetToOffsetsCommitsEarliestOrLatestOffsetsIfOffsetsBoundsNotValid() {
  110. sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10));
  111. var offsetsWithInValidBounds = Map.of(0, -2L, 1, 5L, 2, 500L);
  112. var expectedOffsets = Map.of(0, 0L, 1, 5L, 2, 10L);
  113. offsetsResetService.resetToOffsets(CLUSTER, groupId, topic, offsetsWithInValidBounds).block();
  114. assertOffsets(expectedOffsets);
  115. }
  116. @Test
  117. void resetToEarliest() {
  118. sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10));
  119. commit(Map.of(0, 5L, 1, 5L, 2, 5L));
  120. offsetsResetService.resetToEarliest(CLUSTER, groupId, topic, List.of(0, 1)).block();
  121. assertOffsets(Map.of(0, 0L, 1, 0L, 2, 5L));
  122. commit(Map.of(0, 5L, 1, 5L, 2, 5L));
  123. offsetsResetService.resetToEarliest(CLUSTER, groupId, topic, null).block();
  124. assertOffsets(Map.of(0, 0L, 1, 0L, 2, 0L, 3, 0L, 4, 0L));
  125. }
  126. @Test
  127. void resetToLatest() {
  128. sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10, 3, 10, 4, 10));
  129. commit(Map.of(0, 5L, 1, 5L, 2, 5L));
  130. offsetsResetService.resetToLatest(CLUSTER, groupId, topic, List.of(0, 1)).block();
  131. assertOffsets(Map.of(0, 10L, 1, 10L, 2, 5L));
  132. commit(Map.of(0, 5L, 1, 5L, 2, 5L));
  133. offsetsResetService.resetToLatest(CLUSTER, groupId, topic, null).block();
  134. assertOffsets(Map.of(0, 10L, 1, 10L, 2, 10L, 3, 10L, 4, 10L));
  135. }
  136. @Test
  137. void resetToTimestamp() {
  138. send(
  139. Stream.of(
  140. new ProducerRecord<Bytes, Bytes>(topic, 0, 1000L, null, null),
  141. new ProducerRecord<Bytes, Bytes>(topic, 0, 1500L, null, null),
  142. new ProducerRecord<Bytes, Bytes>(topic, 0, 2000L, null, null),
  143. new ProducerRecord<Bytes, Bytes>(topic, 1, 1000L, null, null),
  144. new ProducerRecord<Bytes, Bytes>(topic, 1, 2000L, null, null),
  145. new ProducerRecord<Bytes, Bytes>(topic, 2, 1000L, null, null),
  146. new ProducerRecord<Bytes, Bytes>(topic, 2, 1100L, null, null),
  147. new ProducerRecord<Bytes, Bytes>(topic, 2, 1200L, null, null)));
  148. offsetsResetService.resetToTimestamp(
  149. CLUSTER, groupId, topic, List.of(0, 1, 2, 3), 1600L
  150. ).block();
  151. assertOffsets(Map.of(0, 2L, 1, 1L, 2, 3L, 3, 0L));
  152. }
  153. private void commit(Map<Integer, Long> offsetsToCommit) {
  154. try (var consumer = groupConsumer()) {
  155. consumer.commitSync(
  156. offsetsToCommit.entrySet().stream()
  157. .collect(Collectors.toMap(
  158. e -> new TopicPartition(topic, e.getKey()),
  159. e -> new OffsetAndMetadata(e.getValue())))
  160. );
  161. }
  162. }
  163. private void sendMsgsToPartition(Map<Integer, Integer> msgsCountForPartitions) {
  164. Bytes bytes = new Bytes("noMatter".getBytes());
  165. send(
  166. msgsCountForPartitions.entrySet().stream()
  167. .flatMap(e ->
  168. IntStream.range(0, e.getValue())
  169. .mapToObj(i -> new ProducerRecord<>(topic, e.getKey(), bytes, bytes))));
  170. }
  171. private void send(Stream<ProducerRecord<Bytes, Bytes>> toSend) {
  172. var properties = new Properties();
  173. properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers());
  174. var serializer = new BytesSerializer();
  175. try (var producer = new KafkaProducer<>(properties, serializer, serializer)) {
  176. toSend.forEach(producer::send);
  177. producer.flush();
  178. }
  179. }
  180. private void assertOffsets(Map<Integer, Long> expectedOffsets) {
  181. try (var consumer = groupConsumer()) {
  182. var tps = expectedOffsets.keySet().stream()
  183. .map(idx -> new TopicPartition(topic, idx))
  184. .collect(Collectors.toSet());
  185. var actualOffsets = consumer.committed(tps).entrySet().stream()
  186. .collect(Collectors.toMap(e -> e.getKey().partition(), e -> e.getValue().offset()));
  187. assertThat(actualOffsets).isEqualTo(expectedOffsets);
  188. }
  189. }
  190. private Consumer<?, ?> groupConsumer() {
  191. Properties props = new Properties();
  192. props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui-" + UUID.randomUUID());
  193. props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.getBootstrapServers());
  194. props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
  195. props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
  196. props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
  197. props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
  198. return new KafkaConsumer<>(props);
  199. }
  200. }