Support clustered endpoint for Schema Registry (#1483)

* Add failover support for Schema Registry

* Base schema id on primary node

* Made code thread safe

* Remove unnecessary synchronize

* Remove duplicated url field with InternalSchemaRegistry

* Fix maven warnings about dynamic versioning (#1559)

* Bump @types/react-redux from 7.1.18 to 7.1.22 in /kafka-ui-react-app (#1462)

Bumps [@types/react-redux](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/react-redux) from 7.1.18 to 7.1.22.
- [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases)
- [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/react-redux)

---
updated-dependencies:
- dependency-name: "@types/react-redux"
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

* Bump @types/jest from 27.0.3 to 27.4.0 in /kafka-ui-react-app (#1458)

Bumps [@types/jest](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/jest) from 27.0.3 to 27.4.0.
- [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases)
- [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/jest)

---
updated-dependencies:
- dependency-name: "@types/jest"
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

* Console banner updated (#1319)

* banner changed to fix new name

* width adjusted to 80 to fit all terminals

Co-authored-by: iliax <ikuramshin@provectus.com>
Co-authored-by: Roman Zabaluev <rzabaluev@provectus.com>

* Add an example for SSL w/ kafka (#1568)

Signed-off-by: Roman Zabaluev <rzabaluev@provectus.com>

Co-authored-by: Ruslan Ibragimov <ruibragimov@provectus.com>

* Smart filters: Groovy script messages filter implementation (reopened) (#1547)

* groovy script messages filter added

* ISSUE-943: Topic messages tailing implementation (#1515)

* Topic messages tailing implementation

* Implemented topics sorting by size (#1539)

Co-authored-by: Roman Zabaluev <rzabaluev@provectus.com>

* [ISSUE-1512]Added sorting by topics size

* [ISSUE-1512]Added sorting by topics size

* Add sort by Size.Refactoring sort order

* correct a little mistake

* Improve test coverage

* got rid code dupliction

* refactoring

Co-authored-by: ValentinPrischepa <valentin.prischepa@gmail.com>
Co-authored-by: Anton Zorin <ant.zorin@gmail.com>
Co-authored-by: Oleg Shur <workshur@gmail.com>

* Implement recreating a topic

* [ISSUE-998][backend] Add functionality to re-create topic in one click

* [ISSUE-998][backend] Add functionality to re-create topic in one click

* [ISSUE-998][backend] Add functionality to re-create topic in one click

Co-authored-by: Roman Zabaluev <rzabaluev@provectus.com>

* Run the app in the container as a non-root user (#1575)

* Run as a non-root user. Fixes #1555

Signed-off-by: Roman Zabaluev <rzabaluev@provectus.com>

* Fix line break

Signed-off-by: Roman Zabaluev <rzabaluev@provectus.com>

Co-authored-by: Ruslan Ibragimov <94184844+5hin0bi@users.noreply.github.com>

* [FIXED issue/1545] added feedback to the user when a message content is copied to clipboard (#1570)

* added alert after "Copy to clipborad"

* moved main logic to useDataSaver

* fixed typographical mistake

* updated useDataSaver test

* made adaptive heigth in connectors config component (#1583)

Co-authored-by: Anton Zorin <zorii4@Antons-MacBook-Pro.local>

* Bump http-proxy-middleware from 2.0.1 to 2.0.3 in /kafka-ui-react-app (#1579)

Bumps [http-proxy-middleware](https://github.com/chimurai/http-proxy-middleware) from 2.0.1 to 2.0.3.
- [Release notes](https://github.com/chimurai/http-proxy-middleware/releases)
- [Changelog](https://github.com/chimurai/http-proxy-middleware/blob/master/CHANGELOG.md)
- [Commits](https://github.com/chimurai/http-proxy-middleware/compare/v2.0.1...v2.0.3)

---
updated-dependencies:
- dependency-name: http-proxy-middleware
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

* Allow running sonar within PR of a fork (#1581)

* Allow running sonar within PR of a fork

* remove github token from envs on mvn verify

* Wrap failover in Mono.as

* Use failoverable uri instead of primary node one for accessing the schemaregistry

* Added urls to similarly named configureWebClient methods

Co-authored-by: Jonas Geiregat (31198) <jonas.geiregat@tvh.com>
This commit is contained in:
Jonas Geiregat 2022-03-14 10:27:49 +01:00 committed by GitHub
parent ea9a145583
commit 07a7836773
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 281 additions and 61 deletions

View file

@ -12,6 +12,7 @@ import com.provectus.kafka.ui.model.CompatibilityLevelDTO;
import com.provectus.kafka.ui.model.ConfigSourceDTO;
import com.provectus.kafka.ui.model.ConfigSynonymDTO;
import com.provectus.kafka.ui.model.ConnectDTO;
import com.provectus.kafka.ui.model.FailoverUrlList;
import com.provectus.kafka.ui.model.Feature;
import com.provectus.kafka.ui.model.InternalBrokerConfig;
import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
@ -97,8 +98,8 @@ public interface ClusterMapper {
internalSchemaRegistry.url(
clusterProperties.getSchemaRegistry() != null
? Arrays.asList(clusterProperties.getSchemaRegistry().split(","))
: Collections.emptyList()
? new FailoverUrlList(Arrays.asList(clusterProperties.getSchemaRegistry().split(",")))
: new FailoverUrlList(Collections.emptyList())
);
if (clusterProperties.getSchemaRegistryAuth() != null) {

View file

@ -0,0 +1,59 @@
package com.provectus.kafka.ui.model;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import lombok.experimental.Delegate;
public class FailoverUrlList {
public static final int DEFAULT_RETRY_GRACE_PERIOD_IN_MS = 5000;
private final Map<Integer, Instant> failures = new ConcurrentHashMap<>();
private final AtomicInteger index = new AtomicInteger(0);
@Delegate
private final List<String> urls;
private final int retryGracePeriodInMs;
public FailoverUrlList(List<String> urls) {
this(urls, DEFAULT_RETRY_GRACE_PERIOD_IN_MS);
}
public FailoverUrlList(List<String> urls, int retryGracePeriodInMs) {
if (urls != null && !urls.isEmpty()) {
this.urls = new ArrayList<>(urls);
} else {
throw new IllegalArgumentException("Expected at least one URL to be passed in constructor");
}
this.retryGracePeriodInMs = retryGracePeriodInMs;
}
public String current() {
return this.urls.get(this.index.get());
}
public void fail(String url) {
int currentIndex = this.index.get();
if ((this.urls.get(currentIndex)).equals(url)) {
this.failures.put(currentIndex, Instant.now());
this.index.compareAndSet(currentIndex, (currentIndex + 1) % this.urls.size());
}
}
public boolean isFailoverAvailable() {
var now = Instant.now();
return this.urls.size() > this.failures.size()
|| this.failures
.values()
.stream()
.anyMatch(e -> now.isAfter(e.plusMillis(retryGracePeriodInMs)));
}
@Override
public String toString() {
return this.urls.toString();
}
}

View file

@ -1,6 +1,5 @@
package com.provectus.kafka.ui.model;
import java.util.List;
import lombok.Builder;
import lombok.Data;
@ -9,10 +8,21 @@ import lombok.Data;
public class InternalSchemaRegistry {
private final String username;
private final String password;
private final List<String> url;
private final FailoverUrlList url;
public String getFirstUrl() {
return url != null && !url.isEmpty() ? url.iterator().next() : null;
public String getPrimaryNodeUri() {
return url.get(0);
}
public String getUri() {
return url.current();
}
public void markAsUnavailable(String url) {
this.url.fail(url);
}
public boolean isFailoverAvailable() {
return this.url.isFailoverAvailable();
}
}

View file

@ -32,6 +32,7 @@ import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.Callable;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
@ -71,7 +72,10 @@ public class SchemaRegistryAwareRecordSerDe implements RecordSerDe {
"You specified password but do not specified username");
}
return new CachedSchemaRegistryClient(
cluster.getSchemaRegistry().getUrl(),
cluster.getSchemaRegistry()
.getUrl()
.stream()
.collect(Collectors.toUnmodifiableList()),
1_000,
schemaProviders,
configs
@ -224,7 +228,7 @@ public class SchemaRegistryAwareRecordSerDe implements RecordSerDe {
private String convertSchema(SchemaMetadata schema) {
String jsonSchema;
URI basePath = new URI(cluster.getSchemaRegistry().getFirstUrl())
URI basePath = new URI(cluster.getSchemaRegistry().getPrimaryNodeUri())
.resolve(Integer.toString(schema.getId()));
final ParsedSchema schemaById = schemaRegistryClient.getSchemaById(schema.getId());

View file

@ -21,6 +21,7 @@ import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityCheck;
import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityLevel;
import com.provectus.kafka.ui.model.schemaregistry.InternalNewSchema;
import com.provectus.kafka.ui.model.schemaregistry.SubjectIdResponse;
import java.io.IOException;
import java.net.URI;
import java.util.Collections;
import java.util.Formatter;
@ -28,6 +29,7 @@ import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@ -42,6 +44,7 @@ import org.springframework.util.MultiValueMap;
import org.springframework.web.reactive.function.BodyInserters;
import org.springframework.web.reactive.function.client.ClientResponse;
import org.springframework.web.reactive.function.client.WebClient;
import org.springframework.web.reactive.function.client.WebClientRequestException;
import org.springframework.web.util.UriComponentsBuilder;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
@ -79,7 +82,9 @@ public class SchemaRegistryService {
URL_SUBJECTS)
.retrieve()
.bodyToMono(String[].class)
.doOnError(e -> log.error("Unexpected error", e));
.doOnError(e -> log.error("Unexpected error", e))
.as(m -> failoverAble(m,
new FailoverMono<>(cluster.getSchemaRegistry(), () -> this.getAllSubjectNames(cluster))));
}
public Flux<SchemaSubjectDTO> getAllVersionsBySubject(KafkaCluster cluster, String subject) {
@ -96,7 +101,9 @@ public class SchemaRegistryService {
.retrieve()
.onStatus(NOT_FOUND::equals,
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName)))
.bodyToFlux(Integer.class);
.bodyToFlux(Integer.class)
.as(f -> failoverAble(f, new FailoverFlux<>(cluster.getSchemaRegistry(),
() -> this.getSubjectVersions(cluster, schemaName))));
}
public Mono<SchemaSubjectDTO> getSchemaSubjectByVersion(KafkaCluster cluster, String schemaName,
@ -114,7 +121,7 @@ public class SchemaRegistryService {
return configuredWebClient(
cluster,
HttpMethod.GET,
URL_SUBJECT_BY_VERSION,
SchemaRegistryService.URL_SUBJECT_BY_VERSION,
List.of(schemaName, version))
.retrieve()
.onStatus(NOT_FOUND::equals,
@ -128,7 +135,9 @@ public class SchemaRegistryService {
String compatibilityLevel = tuple.getT2().getCompatibility().getValue();
schema.setCompatibilityLevel(compatibilityLevel);
return schema;
});
})
.as(m -> failoverAble(m, new FailoverMono<>(cluster.getSchemaRegistry(),
() -> this.getSchemaSubject(cluster, schemaName, version))));
}
/**
@ -154,16 +163,18 @@ public class SchemaRegistryService {
private Mono<Void> deleteSchemaSubject(KafkaCluster cluster, String schemaName,
String version) {
return configuredWebClient(
cluster,
HttpMethod.DELETE,
URL_SUBJECT_BY_VERSION,
List.of(schemaName, version))
.retrieve()
.onStatus(NOT_FOUND::equals,
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA_VERSION, schemaName, version))
)
.toBodilessEntity()
.then();
cluster,
HttpMethod.DELETE,
SchemaRegistryService.URL_SUBJECT_BY_VERSION,
List.of(schemaName, version))
.retrieve()
.onStatus(NOT_FOUND::equals,
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA_VERSION, schemaName, version))
)
.toBodilessEntity()
.then()
.as(m -> failoverAble(m, new FailoverMono<>(cluster.getSchemaRegistry(),
() -> this.deleteSchemaSubject(cluster, schemaName, version))));
}
public Mono<Void> deleteSchemaSubjectEntirely(KafkaCluster cluster,
@ -176,7 +187,9 @@ public class SchemaRegistryService {
.retrieve()
.onStatus(HttpStatus::isError, errorOnSchemaDeleteFailure(schemaName))
.toBodilessEntity()
.then();
.then()
.as(m -> failoverAble(m, new FailoverMono<>(cluster.getSchemaRegistry(),
() -> this.deleteSchemaSubjectEntirely(cluster, schemaName))));
}
/**
@ -202,19 +215,20 @@ public class SchemaRegistryService {
Mono<InternalNewSchema> newSchemaSubject,
KafkaCluster cluster) {
return configuredWebClient(
cluster,
HttpMethod.POST,
URL_SUBJECT_VERSIONS,
subject)
.contentType(MediaType.APPLICATION_JSON)
.body(BodyInserters.fromPublisher(newSchemaSubject, InternalNewSchema.class))
.retrieve()
.onStatus(UNPROCESSABLE_ENTITY::equals,
r -> r.bodyToMono(ErrorResponse.class)
.flatMap(x -> Mono.error(isUnrecognizedFieldSchemaTypeMessage(x.getMessage())
? new SchemaTypeNotSupportedException()
: new UnprocessableEntityException(x.getMessage()))))
.bodyToMono(SubjectIdResponse.class);
cluster,
HttpMethod.POST,
URL_SUBJECT_VERSIONS, subject)
.contentType(MediaType.APPLICATION_JSON)
.body(BodyInserters.fromPublisher(newSchemaSubject, InternalNewSchema.class))
.retrieve()
.onStatus(UNPROCESSABLE_ENTITY::equals,
r -> r.bodyToMono(ErrorResponse.class)
.flatMap(x -> Mono.error(isUnrecognizedFieldSchemaTypeMessage(x.getMessage())
? new SchemaTypeNotSupportedException()
: new UnprocessableEntityException(x.getMessage()))))
.bodyToMono(SubjectIdResponse.class)
.as(m -> failoverAble(m, new FailoverMono<>(cluster.getSchemaRegistry(),
() -> submitNewSchema(subject, newSchemaSubject, cluster))));
}
@NotNull
@ -233,16 +247,18 @@ public class SchemaRegistryService {
Mono<CompatibilityLevelDTO> compatibilityLevel) {
String configEndpoint = Objects.isNull(schemaName) ? "/config" : "/config/{schemaName}";
return configuredWebClient(
cluster,
HttpMethod.PUT,
configEndpoint,
cluster,
HttpMethod.PUT,
configEndpoint,
schemaName)
.contentType(MediaType.APPLICATION_JSON)
.body(BodyInserters.fromPublisher(compatibilityLevel, CompatibilityLevelDTO.class))
.retrieve()
.onStatus(NOT_FOUND::equals,
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName)))
.bodyToMono(Void.class);
.contentType(MediaType.APPLICATION_JSON)
.body(BodyInserters.fromPublisher(compatibilityLevel, CompatibilityLevelDTO.class))
.retrieve()
.onStatus(NOT_FOUND::equals,
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName)))
.bodyToMono(Void.class)
.as(m -> failoverAble(m, new FailoverMono<>(cluster.getSchemaRegistry(),
() -> this.updateSchemaCompatibility(cluster, schemaName, compatibilityLevel))));
}
public Mono<Void> updateSchemaCompatibility(KafkaCluster cluster,
@ -280,17 +296,19 @@ public class SchemaRegistryService {
public Mono<CompatibilityCheckResponseDTO> checksSchemaCompatibility(
KafkaCluster cluster, String schemaName, Mono<NewSchemaSubjectDTO> newSchemaSubject) {
return configuredWebClient(
cluster,
HttpMethod.POST,
"/compatibility/subjects/{schemaName}/versions/latest",
cluster,
HttpMethod.POST,
"/compatibility/subjects/{schemaName}/versions/latest",
schemaName)
.contentType(MediaType.APPLICATION_JSON)
.body(BodyInserters.fromPublisher(newSchemaSubject, NewSchemaSubjectDTO.class))
.retrieve()
.onStatus(NOT_FOUND::equals,
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName)))
.bodyToMono(InternalCompatibilityCheck.class)
.map(mapper::toCompatibilityCheckResponse);
.contentType(MediaType.APPLICATION_JSON)
.body(BodyInserters.fromPublisher(newSchemaSubject, NewSchemaSubjectDTO.class))
.retrieve()
.onStatus(NOT_FOUND::equals,
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName)))
.bodyToMono(InternalCompatibilityCheck.class)
.map(mapper::toCompatibilityCheckResponse)
.as(m -> failoverAble(m, new FailoverMono<>(cluster.getSchemaRegistry(),
() -> this.checksSchemaCompatibility(cluster, schemaName, newSchemaSubject))));
}
public String formatted(String str, Object... args) {
@ -318,7 +336,8 @@ public class SchemaRegistryService {
return errorMessage.contains(UNRECOGNIZED_FIELD_SCHEMA_TYPE);
}
private WebClient.RequestBodySpec configuredWebClient(KafkaCluster cluster, HttpMethod method, String uri) {
private WebClient.RequestBodySpec configuredWebClient(KafkaCluster cluster, HttpMethod method,
String uri) {
return configuredWebClient(cluster, method, uri, Collections.emptyList(),
new LinkedMultiValueMap<>());
}
@ -335,20 +354,20 @@ public class SchemaRegistryService {
}
private WebClient.RequestBodySpec configuredWebClient(KafkaCluster cluster,
HttpMethod method, String uri,
HttpMethod method, String path,
List<String> uriVariables,
MultiValueMap<String, String> queryParams) {
final var schemaRegistry = cluster.getSchemaRegistry();
return webClient
.method(method)
.uri(buildUri(schemaRegistry, uri, uriVariables, queryParams))
.uri(buildUri(schemaRegistry, path, uriVariables, queryParams))
.headers(headers -> setBasicAuthIfEnabled(schemaRegistry, headers));
}
private URI buildUri(InternalSchemaRegistry schemaRegistry, String uri, List<String> uriVariables,
private URI buildUri(InternalSchemaRegistry schemaRegistry, String path, List<String> uriVariables,
MultiValueMap<String, String> queryParams) {
final var builder = UriComponentsBuilder
.fromHttpUrl(schemaRegistry.getFirstUrl() + uri);
.fromHttpUrl(schemaRegistry.getUri() + path);
builder.queryParams(queryParams);
return builder.buildAndExpand(uriVariables.toArray()).toUri();
}
@ -361,4 +380,59 @@ public class SchemaRegistryService {
return Mono.error(new SchemaFailedToDeleteException(schemaName));
};
}
private <T> Mono<T> failoverAble(Mono<T> request, FailoverMono<T> failoverMethod) {
return request.onErrorResume(failoverMethod::failover);
}
private <T> Flux<T> failoverAble(Flux<T> request, FailoverFlux<T> failoverMethod) {
return request.onErrorResume(failoverMethod::failover);
}
private abstract static class Failover<E> {
private final InternalSchemaRegistry schemaRegistry;
private final Supplier<E> failover;
private Failover(InternalSchemaRegistry schemaRegistry, Supplier<E> failover) {
this.schemaRegistry = Objects.requireNonNull(schemaRegistry);
this.failover = Objects.requireNonNull(failover);
}
abstract E error(Throwable error);
public E failover(Throwable error) {
if (error instanceof WebClientRequestException
&& error.getCause() instanceof IOException
&& schemaRegistry.isFailoverAvailable()) {
var uri = ((WebClientRequestException) error).getUri();
schemaRegistry.markAsUnavailable(String.format("%s://%s", uri.getScheme(), uri.getAuthority()));
return failover.get();
}
return error(error);
}
}
private static class FailoverMono<T> extends Failover<Mono<T>> {
private FailoverMono(InternalSchemaRegistry schemaRegistry, Supplier<Mono<T>> failover) {
super(schemaRegistry, failover);
}
@Override
Mono<T> error(Throwable error) {
return Mono.error(error);
}
}
private static class FailoverFlux<T> extends Failover<Flux<T>> {
private FailoverFlux(InternalSchemaRegistry schemaRegistry, Supplier<Flux<T>> failover) {
super(schemaRegistry, failover);
}
@Override
Flux<T> error(Throwable error) {
return Flux.error(error);
}
}
}

View file

@ -17,6 +17,7 @@ import org.springframework.context.ApplicationContextInitializer;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.util.SocketUtils;
import org.testcontainers.containers.KafkaContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.utility.DockerImageName;
@ -59,7 +60,9 @@ public abstract class AbstractIntegrationTest {
public void initialize(@NotNull ConfigurableApplicationContext context) {
System.setProperty("kafka.clusters.0.name", LOCAL);
System.setProperty("kafka.clusters.0.bootstrapServers", kafka.getBootstrapServers());
System.setProperty("kafka.clusters.0.schemaRegistry", schemaRegistry.getUrl());
// List unavailable hosts to verify failover
System.setProperty("kafka.clusters.0.schemaRegistry", String.format("http://localhost:%1$s,http://localhost:%1$s,%2$s",
SocketUtils.findAvailableTcpPort(), schemaRegistry.getUrl()));
System.setProperty("kafka.clusters.0.kafkaConnect.0.name", "kafka-connect");
System.setProperty("kafka.clusters.0.kafkaConnect.0.address", kafkaConnect.getTarget());

View file

@ -0,0 +1,69 @@
package com.provectus.kafka.ui.model;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.List;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
class FailoverUrlListTest {
public static final int RETRY_GRACE_PERIOD_IN_MS = 10;
@Nested
@SuppressWarnings("all")
class ShouldHaveFailoverAvailableWhen {
private FailoverUrlList failoverUrlList;
@BeforeEach
void before() {
failoverUrlList = new FailoverUrlList(List.of("localhost:123", "farawayhost:5678"), RETRY_GRACE_PERIOD_IN_MS);
}
@Test
void thereAreNoFailures() {
assertThat(failoverUrlList.isFailoverAvailable()).isTrue();
}
@Test
void withLessFailuresThenAvailableUrls() {
failoverUrlList.fail(failoverUrlList.current());
assertThat(failoverUrlList.isFailoverAvailable()).isTrue();
}
@Test
void withAllFailuresAndAtLeastOneAfterTheGraceTimeoutPeriod() throws InterruptedException {
failoverUrlList.fail(failoverUrlList.current());
failoverUrlList.fail(failoverUrlList.current());
Thread.sleep(RETRY_GRACE_PERIOD_IN_MS + 1);
assertThat(failoverUrlList.isFailoverAvailable()).isTrue();
}
@Nested
@SuppressWarnings("all")
class ShouldNotHaveFailoverAvailableWhen {
private FailoverUrlList failoverUrlList;
@BeforeEach
void before() {
failoverUrlList = new FailoverUrlList(List.of("localhost:123", "farawayhost:5678"), 1000);
}
@Test
void allFailuresWithinGracePeriod() {
failoverUrlList.fail(failoverUrlList.current());
failoverUrlList.fail(failoverUrlList.current());
assertThat(failoverUrlList.isFailoverAvailable()).isFalse();
}
}
}
}