|
@@ -1,68 +1,63 @@
|
|
package com.provectus.kafka.ui.emitter;
|
|
package com.provectus.kafka.ui.emitter;
|
|
|
|
|
|
|
|
+import com.google.common.base.Preconditions;
|
|
import com.google.common.base.Stopwatch;
|
|
import com.google.common.base.Stopwatch;
|
|
|
|
+import com.provectus.kafka.ui.util.ApplicationMetrics;
|
|
import java.time.Duration;
|
|
import java.time.Duration;
|
|
-import java.util.Iterator;
|
|
|
|
-import java.util.List;
|
|
|
|
|
|
+import java.util.Properties;
|
|
import lombok.RequiredArgsConstructor;
|
|
import lombok.RequiredArgsConstructor;
|
|
import lombok.experimental.Delegate;
|
|
import lombok.experimental.Delegate;
|
|
import org.apache.kafka.clients.consumer.Consumer;
|
|
import org.apache.kafka.clients.consumer.Consumer;
|
|
-import org.apache.kafka.clients.consumer.ConsumerRecord;
|
|
|
|
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
|
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
|
|
|
+import org.apache.kafka.clients.consumer.KafkaConsumer;
|
|
import org.apache.kafka.common.TopicPartition;
|
|
import org.apache.kafka.common.TopicPartition;
|
|
-import org.apache.kafka.common.header.Header;
|
|
|
|
import org.apache.kafka.common.utils.Bytes;
|
|
import org.apache.kafka.common.utils.Bytes;
|
|
|
|
|
|
|
|
|
|
@RequiredArgsConstructor
|
|
@RequiredArgsConstructor
|
|
public class EnhancedConsumer implements Consumer<Bytes, Bytes> {
|
|
public class EnhancedConsumer implements Consumer<Bytes, Bytes> {
|
|
|
|
|
|
- public record PolledRecords(int count, int bytes, Duration elapsed, ConsumerRecords<Bytes, Bytes> records)
|
|
|
|
- implements Iterable<ConsumerRecord<Bytes, Bytes>> {
|
|
|
|
-
|
|
|
|
- static PolledRecords create(ConsumerRecords<Bytes, Bytes> polled, Duration pollDuration) {
|
|
|
|
- return new PolledRecords(
|
|
|
|
- polled.count(),
|
|
|
|
- calculatePolledRecSize(polled),
|
|
|
|
- pollDuration,
|
|
|
|
- polled
|
|
|
|
- );
|
|
|
|
- }
|
|
|
|
|
|
+ @Delegate
|
|
|
|
+ private final Consumer<Bytes, Bytes> consumer;
|
|
|
|
+ private final PollingThrottler throttler;
|
|
|
|
+ private final ApplicationMetrics metrics;
|
|
|
|
|
|
- public List<ConsumerRecord<Bytes, Bytes>> records(TopicPartition tp) {
|
|
|
|
- return records.records(tp);
|
|
|
|
- }
|
|
|
|
|
|
+ public static EnhancedConsumer create(Properties properties,
|
|
|
|
+ PollingThrottler throttler,
|
|
|
|
+ ApplicationMetrics metrics) {
|
|
|
|
+ return new EnhancedConsumer(createInternalConsumer(properties, metrics), throttler, metrics);
|
|
|
|
+ }
|
|
|
|
|
|
- @Override
|
|
|
|
- public Iterator<ConsumerRecord<Bytes, Bytes>> iterator() {
|
|
|
|
- return records.iterator();
|
|
|
|
|
|
+ private static KafkaConsumer<Bytes, Bytes> createInternalConsumer(Properties properties, ApplicationMetrics metrics) {
|
|
|
|
+ metrics.activeConsumers().incrementAndGet();
|
|
|
|
+ try {
|
|
|
|
+ return new KafkaConsumer<>(properties) {
|
|
|
|
+ @Override
|
|
|
|
+ public void close(Duration timeout) {
|
|
|
|
+ metrics.activeConsumers().decrementAndGet();
|
|
|
|
+ super.close(timeout);
|
|
|
|
+ }
|
|
|
|
+ };
|
|
|
|
+ } catch (Exception e) {
|
|
|
|
+ metrics.activeConsumers().decrementAndGet();
|
|
|
|
+ throw e;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- @Delegate
|
|
|
|
- private final Consumer<Bytes, Bytes> consumer;
|
|
|
|
- private final PollingThrottler throttler;
|
|
|
|
-
|
|
|
|
public PolledRecords pollEnhanced(Duration dur) {
|
|
public PolledRecords pollEnhanced(Duration dur) {
|
|
var stopwatch = Stopwatch.createStarted();
|
|
var stopwatch = Stopwatch.createStarted();
|
|
ConsumerRecords<Bytes, Bytes> polled = consumer.poll(dur);
|
|
ConsumerRecords<Bytes, Bytes> polled = consumer.poll(dur);
|
|
PolledRecords polledEnhanced = PolledRecords.create(polled, stopwatch.elapsed());
|
|
PolledRecords polledEnhanced = PolledRecords.create(polled, stopwatch.elapsed());
|
|
- throttler.throttleAfterPoll(polledEnhanced.bytes);
|
|
|
|
|
|
+ var throttled = throttler.throttleAfterPoll(polledEnhanced.bytes());
|
|
|
|
+ metrics.meterPolledRecords(topic(), polledEnhanced, throttled);
|
|
return polledEnhanced;
|
|
return polledEnhanced;
|
|
}
|
|
}
|
|
|
|
|
|
- private static int calculatePolledRecSize(Iterable<ConsumerRecord<Bytes, Bytes>> recs) {
|
|
|
|
- int polledBytes = 0;
|
|
|
|
- for (ConsumerRecord<Bytes, Bytes> rec : recs) {
|
|
|
|
- for (Header header : rec.headers()) {
|
|
|
|
- polledBytes +=
|
|
|
|
- (header.key() != null ? header.key().getBytes().length : 0)
|
|
|
|
- + (header.value() != null ? header.value().length : 0);
|
|
|
|
- }
|
|
|
|
- polledBytes += rec.key() == null ? 0 : rec.serializedKeySize();
|
|
|
|
- polledBytes += rec.value() == null ? 0 : rec.serializedValueSize();
|
|
|
|
- }
|
|
|
|
- return polledBytes;
|
|
|
|
|
|
+ private String topic() {
|
|
|
|
+ var topics = consumer.assignment().stream().map(TopicPartition::topic).toList();
|
|
|
|
+ // we assume that consumer will always read single topic
|
|
|
|
+ Preconditions.checkArgument(topics.size() == 1);
|
|
|
|
+ return topics.get(0);
|
|
}
|
|
}
|
|
|
|
|
|
}
|
|
}
|