Merge pull request #46559 from thaJeztah/24.0_backport_no_min_max
[24.0 backport] rename uses of "max", "min", which are now builtins in go1.21
This commit is contained in:
commit
fa2f6f98be
9 changed files with 32 additions and 36 deletions
|
@ -16,11 +16,11 @@ func compare(v1, v2 string) int {
|
|||
otherTab = strings.Split(v2, ".")
|
||||
)
|
||||
|
||||
max := len(currTab)
|
||||
if len(otherTab) > max {
|
||||
max = len(otherTab)
|
||||
maxVer := len(currTab)
|
||||
if len(otherTab) > maxVer {
|
||||
maxVer = len(otherTab)
|
||||
}
|
||||
for i := 0; i < max; i++ {
|
||||
for i := 0; i < maxVer; i++ {
|
||||
var currInt, otherInt int
|
||||
|
||||
if len(currTab) > i {
|
||||
|
|
|
@ -387,7 +387,7 @@ func (b *limitedBuffer) Write(data []byte) (int, error) {
|
|||
|
||||
bufLen := b.buf.Len()
|
||||
dataLen := len(data)
|
||||
keep := min(maxOutputLen-bufLen, dataLen)
|
||||
keep := minInt(maxOutputLen-bufLen, dataLen)
|
||||
if keep > 0 {
|
||||
b.buf.Write(data[:keep])
|
||||
}
|
||||
|
@ -417,7 +417,7 @@ func timeoutWithDefault(configuredValue time.Duration, defaultValue time.Duratio
|
|||
return configuredValue
|
||||
}
|
||||
|
||||
func min(x, y int) int {
|
||||
func minInt(x, y int) int {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
|
|
|
@ -18,8 +18,8 @@ func (i *ImageService) GetContainerLayerSize(ctx context.Context, containerID st
|
|||
// GetLayerFolders returns the layer folders from an image RootFS
|
||||
func (i *ImageService) GetLayerFolders(img *image.Image, rwLayer layer.RWLayer) ([]string, error) {
|
||||
folders := []string{}
|
||||
max := len(img.RootFS.DiffIDs)
|
||||
for index := 1; index <= max; index++ {
|
||||
rd := len(img.RootFS.DiffIDs)
|
||||
for index := 1; index <= rd; index++ {
|
||||
// FIXME: why does this mutate the RootFS?
|
||||
img.RootFS.DiffIDs = img.RootFS.DiffIDs[:index]
|
||||
if !system.IsOSSupported(img.OperatingSystem()) {
|
||||
|
|
|
@ -95,12 +95,8 @@ func JobComplete(client client.CommonAPIClient, service swarmtypes.Service) func
|
|||
jobIteration = service.JobStatus.JobIteration
|
||||
}
|
||||
|
||||
maxRaw := service.Spec.Mode.ReplicatedJob.MaxConcurrent
|
||||
totalRaw := service.Spec.Mode.ReplicatedJob.TotalCompletions
|
||||
|
||||
max := int(*maxRaw)
|
||||
total := int(*totalRaw)
|
||||
|
||||
maxConcurrent := int(*service.Spec.Mode.ReplicatedJob.MaxConcurrent)
|
||||
totalCompletions := int(*service.Spec.Mode.ReplicatedJob.TotalCompletions)
|
||||
previousResult := ""
|
||||
|
||||
return func(log poll.LogT) poll.Result {
|
||||
|
@ -134,16 +130,16 @@ func JobComplete(client client.CommonAPIClient, service swarmtypes.Service) func
|
|||
}
|
||||
|
||||
switch {
|
||||
case running > max:
|
||||
case running > maxConcurrent:
|
||||
return poll.Error(fmt.Errorf(
|
||||
"number of running tasks (%v) exceeds max (%v)", running, max,
|
||||
"number of running tasks (%v) exceeds max (%v)", running, maxConcurrent,
|
||||
))
|
||||
case (completed + running) > total:
|
||||
case (completed + running) > totalCompletions:
|
||||
return poll.Error(fmt.Errorf(
|
||||
"number of tasks exceeds total (%v), %v running and %v completed",
|
||||
total, running, completed,
|
||||
totalCompletions, running, completed,
|
||||
))
|
||||
case completed == total && running == 0:
|
||||
case completed == totalCompletions && running == 0:
|
||||
return poll.Success()
|
||||
default:
|
||||
newRes := fmt.Sprintf(
|
||||
|
@ -157,7 +153,7 @@ func JobComplete(client client.CommonAPIClient, service swarmtypes.Service) func
|
|||
|
||||
return poll.Continue(
|
||||
"Job not yet finished, %v completed and %v running out of %v total",
|
||||
completed, running, total,
|
||||
completed, running, totalCompletions,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
func max(x, y int) int {
|
||||
func maxInt(x, y int) int {
|
||||
if x >= y {
|
||||
return x
|
||||
}
|
||||
|
@ -404,7 +404,7 @@ func TestChangesDirsMutated(t *testing.T) {
|
|||
{filepath.FromSlash("/symlinknew"), ChangeAdd},
|
||||
}...)
|
||||
|
||||
for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
|
||||
for i := 0; i < maxInt(len(changes), len(expectedChanges)); i++ {
|
||||
if i >= len(expectedChanges) {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
}
|
||||
|
@ -530,7 +530,7 @@ func checkChanges(expectedChanges, changes []Change, t *testing.T) {
|
|||
skip.If(t, runtime.GOOS != "windows" && os.Getuid() != 0, "skipping test that requires root")
|
||||
sort.Sort(changesByPath(expectedChanges))
|
||||
sort.Sort(changesByPath(changes))
|
||||
for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
|
||||
for i := 0; i < maxInt(len(changes), len(expectedChanges)); i++ {
|
||||
if i >= len(expectedChanges) {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
}
|
||||
|
|
|
@ -227,13 +227,13 @@ func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool,
|
|||
}
|
||||
|
||||
func backoff(retries int) time.Duration {
|
||||
b, max := 1, defaultTimeOut
|
||||
for b < max && retries > 0 {
|
||||
b, maxTimeout := 1, defaultTimeOut
|
||||
for b < maxTimeout && retries > 0 {
|
||||
b *= 2
|
||||
retries--
|
||||
}
|
||||
if b > max {
|
||||
b = max
|
||||
if b > maxTimeout {
|
||||
b = maxTimeout
|
||||
}
|
||||
return time.Duration(b) * time.Second
|
||||
}
|
||||
|
|
|
@ -151,13 +151,13 @@ func isCpusetListAvailable(provided, available string) (bool, error) {
|
|||
}
|
||||
// 8192 is the normal maximum number of CPUs in Linux, so accept numbers up to this
|
||||
// or more if we actually have more CPUs.
|
||||
max := 8192
|
||||
maxCPUs := 8192
|
||||
for m := range parsedAvailable {
|
||||
if m > max {
|
||||
max = m
|
||||
if m > maxCPUs {
|
||||
maxCPUs = m
|
||||
}
|
||||
}
|
||||
parsedProvided, err := parsers.ParseUintListMaximum(provided, max)
|
||||
parsedProvided, err := parsers.ParseUintListMaximum(provided, maxCPUs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
|
@ -209,9 +209,9 @@ func TestNewTailReader(t *testing.T) {
|
|||
test := test
|
||||
t.Parallel()
|
||||
|
||||
max := len(test.data)
|
||||
if max > 10 {
|
||||
max = 10
|
||||
maxLen := len(test.data)
|
||||
if maxLen > 10 {
|
||||
maxLen = 10
|
||||
}
|
||||
|
||||
s := strings.Join(test.data, string(delim))
|
||||
|
@ -219,7 +219,7 @@ func TestNewTailReader(t *testing.T) {
|
|||
s += string(delim)
|
||||
}
|
||||
|
||||
for i := 1; i <= max; i++ {
|
||||
for i := 1; i <= maxLen; i++ {
|
||||
t.Run(fmt.Sprintf("%d lines", i), func(t *testing.T) {
|
||||
i := i
|
||||
t.Parallel()
|
||||
|
|
|
@ -86,7 +86,7 @@ func (rm *RestartManager) ShouldRestart(exitCode uint32, hasBeenManuallyStopped
|
|||
restart = true
|
||||
case rm.policy.IsOnFailure():
|
||||
// the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count
|
||||
if max := rm.policy.MaximumRetryCount; max == 0 || rm.restartCount < max {
|
||||
if maxRetryCount := rm.policy.MaximumRetryCount; maxRetryCount == 0 || rm.restartCount < maxRetryCount {
|
||||
restart = exitCode != 0
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue