- fixed merge conflicts
- renamed pageSize to perPage
This commit is contained in:
parent
a8e992ff89
commit
774c0a75b2
4 changed files with 16 additions and 23 deletions
|
@ -1,12 +1,8 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import com.provectus.kafka.ui.api.TopicsApi;
|
||||
import com.provectus.kafka.ui.model.*;
|
||||
import com.provectus.kafka.ui.service.ClusterService;
|
||||
import com.provectus.kafka.ui.model.Topic;
|
||||
import com.provectus.kafka.ui.model.TopicConfig;
|
||||
import com.provectus.kafka.ui.model.TopicDetails;
|
||||
import com.provectus.kafka.ui.model.TopicFormData;
|
||||
import javax.validation.Valid;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.springframework.http.HttpStatus;
|
||||
|
@ -16,6 +12,9 @@ import org.springframework.web.server.ServerWebExchange;
|
|||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
import javax.validation.Valid;
|
||||
import java.util.Optional;
|
||||
|
||||
@RestController
|
||||
@RequiredArgsConstructor
|
||||
@Log4j2
|
||||
|
@ -59,8 +58,8 @@ public class TopicsController implements TopicsApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<Topic>>> getTopics(String clusterName, ServerWebExchange exchange) {
|
||||
return Mono.just(ResponseEntity.ok(Flux.fromIterable(clusterService.getTopics(clusterName))));
|
||||
public Mono<ResponseEntity<TopicsResponse>> getTopics(String clusterName, @Valid Integer page, @Valid Integer perPage, ServerWebExchange exchange) {
|
||||
return Mono.just(ResponseEntity.ok(clusterService.getTopics(clusterName, Optional.ofNullable(page), Optional.ofNullable(perPage))));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -64,19 +64,19 @@ public class ClusterService {
|
|||
}
|
||||
|
||||
|
||||
public TopicsResponse getTopics(String name, Optional<Integer> page, Optional<Integer> nullablePageSize) {
|
||||
public TopicsResponse getTopics(String name, Optional<Integer> page, Optional<Integer> nullablePerPage) {
|
||||
Predicate<Integer> positiveInt = i -> i > 0;
|
||||
int pageSize = nullablePageSize.filter(positiveInt).orElse(DEFAULT_PAGE_SIZE);
|
||||
var topicsToSkip = (page.filter(positiveInt).orElse(1) - 1) * pageSize;
|
||||
int perPage = nullablePerPage.filter(positiveInt).orElse(DEFAULT_PAGE_SIZE);
|
||||
var topicsToSkip = (page.filter(positiveInt).orElse(1) - 1) * perPage;
|
||||
var cluster = clustersStorage.getClusterByName(name).orElseThrow(() -> new NotFoundException("No such cluster"));
|
||||
var totalPages = (cluster.getTopics().size() / pageSize) + (cluster.getTopics().size() % pageSize == 0 ? 0 : 1);
|
||||
var totalPages = (cluster.getTopics().size() / perPage) + (cluster.getTopics().size() % perPage == 0 ? 0 : 1);
|
||||
return new TopicsResponse()
|
||||
.pageCount(totalPages)
|
||||
.topics(
|
||||
cluster.getTopics().values().stream()
|
||||
.sorted(Comparator.comparing(InternalTopic::getName))
|
||||
.skip(topicsToSkip)
|
||||
.limit(pageSize)
|
||||
.limit(perPage)
|
||||
.map(clusterMapper::toTopic)
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
package com.provectus.kafka.ui.cluster.service;
|
||||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import com.provectus.kafka.ui.cluster.mapper.ClusterMapper;
|
||||
import com.provectus.kafka.ui.cluster.model.ClustersStorage;
|
||||
import com.provectus.kafka.ui.cluster.model.InternalTopic;
|
||||
import com.provectus.kafka.ui.cluster.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.kafka.KafkaService;
|
||||
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||
import com.provectus.kafka.ui.model.InternalTopic;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.Topic;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
|
@ -34,10 +32,6 @@ class ClusterServiceTest {
|
|||
private ClustersStorage clustersStorage;
|
||||
@Spy
|
||||
private final ClusterMapper clusterMapper = Mappers.getMapper(ClusterMapper.class);
|
||||
@Mock
|
||||
private KafkaService kafkaService;
|
||||
@Mock
|
||||
private ConsumingService consumingService;
|
||||
|
||||
@Test
|
||||
public void shouldListFirst20Topics() {
|
||||
|
|
|
@ -135,7 +135,7 @@ paths:
|
|||
required: false
|
||||
schema:
|
||||
type: integer
|
||||
- name: pageSize
|
||||
- name: perPage
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
|
|
Loading…
Add table
Reference in a new issue