Merge pull request #44475 from thaJeztah/22.06_backport_config_fix_panic
[22.06 backport] daemon/config: use strings.Cut(), fix panic in BuilderGCFilter
This commit is contained in:
commit
77a01aaec7
9 changed files with 52 additions and 38 deletions
|
@ -2,7 +2,6 @@ package config
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
|
@ -28,7 +27,7 @@ func (x *BuilderGCFilter) MarshalJSON() ([]byte, error) {
|
|||
for _, k := range keys {
|
||||
values := f.Get(k)
|
||||
for _, v := range values {
|
||||
arr = append(arr, fmt.Sprintf("%s=%s", k, v))
|
||||
arr = append(arr, k+"="+v)
|
||||
}
|
||||
}
|
||||
return json.Marshal(arr)
|
||||
|
@ -45,9 +44,9 @@ func (x *BuilderGCFilter) UnmarshalJSON(data []byte) error {
|
|||
return err
|
||||
}
|
||||
for _, s := range arr {
|
||||
fields := strings.SplitN(s, "=", 2)
|
||||
name := strings.ToLower(strings.TrimSpace(fields[0]))
|
||||
value := strings.TrimSpace(fields[1])
|
||||
name, value, _ := strings.Cut(s, "=")
|
||||
name = strings.ToLower(strings.TrimSpace(name))
|
||||
value = strings.TrimSpace(value)
|
||||
f.Add(name, value)
|
||||
}
|
||||
*x = BuilderGCFilter(f)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
|
@ -42,3 +43,16 @@ func TestBuilderGC(t *testing.T) {
|
|||
assert.Assert(t, filters.Args(cfg.Builder.GC.Policy[0].Filter).UniqueExactMatch("unused-for", "2200h"))
|
||||
assert.Assert(t, filters.Args(cfg.Builder.GC.Policy[1].Filter).UniqueExactMatch("unused-for", "3300h"))
|
||||
}
|
||||
|
||||
// TestBuilderGCFilterUnmarshal is a regression test for https://github.com/moby/moby/issues/44361,
|
||||
// where and incorrectly formatted gc filter option ("unused-for2200h",
|
||||
// missing a "=" separator). resulted in a panic during unmarshal.
|
||||
func TestBuilderGCFilterUnmarshal(t *testing.T) {
|
||||
var cfg BuilderGCConfig
|
||||
err := json.Unmarshal([]byte(`{"poliCy": [{"keepStorage": "10GB", "filter": ["unused-for2200h"]}]}`), &cfg)
|
||||
assert.Check(t, err)
|
||||
expectedPolicy := []BuilderGCRule{{
|
||||
KeepStorage: "10GB", Filter: BuilderGCFilter(filters.NewArgs(filters.Arg("unused-for2200h", ""))),
|
||||
}}
|
||||
assert.DeepEqual(t, cfg.Policy, expectedPolicy, cmp.AllowUnexported(BuilderGCFilter{}))
|
||||
}
|
||||
|
|
|
@ -241,7 +241,7 @@ type CommonConfig struct {
|
|||
|
||||
DNSConfig
|
||||
LogConfig
|
||||
BridgeConfig // bridgeConfig holds bridge network specific configuration.
|
||||
BridgeConfig // BridgeConfig holds bridge network specific configuration.
|
||||
NetworkConfig
|
||||
registry.ServiceOptions
|
||||
|
||||
|
@ -323,19 +323,19 @@ func New() *Config {
|
|||
func GetConflictFreeLabels(labels []string) ([]string, error) {
|
||||
labelMap := map[string]string{}
|
||||
for _, label := range labels {
|
||||
stringSlice := strings.SplitN(label, "=", 2)
|
||||
if len(stringSlice) > 1 {
|
||||
key, val, ok := strings.Cut(label, "=")
|
||||
if ok {
|
||||
// If there is a conflict we will return an error
|
||||
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
|
||||
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
|
||||
if v, ok := labelMap[key]; ok && v != val {
|
||||
return nil, errors.Errorf("conflict labels for %s=%s and %s=%s", key, val, key, v)
|
||||
}
|
||||
labelMap[stringSlice[0]] = stringSlice[1]
|
||||
labelMap[key] = val
|
||||
}
|
||||
}
|
||||
|
||||
newLabels := []string{}
|
||||
for k, v := range labelMap {
|
||||
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
|
||||
newLabels = append(newLabels, k+"="+v)
|
||||
}
|
||||
return newLabels, nil
|
||||
}
|
||||
|
@ -528,7 +528,7 @@ func findConfigurationConflicts(config map[string]interface{}, flags *pflag.Flag
|
|||
for key := range unknownKeys {
|
||||
unknown = append(unknown, key)
|
||||
}
|
||||
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
|
||||
return errors.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
|
||||
}
|
||||
|
||||
var conflicts []string
|
||||
|
@ -562,7 +562,7 @@ func findConfigurationConflicts(config map[string]interface{}, flags *pflag.Flag
|
|||
flags.Visit(duplicatedConflicts)
|
||||
|
||||
if len(conflicts) > 0 {
|
||||
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
|
||||
return errors.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -579,7 +579,7 @@ func Validate(config *Config) error {
|
|||
// validate log-level
|
||||
if config.LogLevel != "" {
|
||||
if _, err := logrus.ParseLevel(config.LogLevel); err != nil {
|
||||
return fmt.Errorf("invalid logging level: %s", config.LogLevel)
|
||||
return errors.Errorf("invalid logging level: %s", config.LogLevel)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -606,22 +606,22 @@ func Validate(config *Config) error {
|
|||
|
||||
// TODO(thaJeztah) Validations below should not accept "0" to be valid; see Validate() for a more in-depth description of this problem
|
||||
if config.Mtu < 0 {
|
||||
return fmt.Errorf("invalid default MTU: %d", config.Mtu)
|
||||
return errors.Errorf("invalid default MTU: %d", config.Mtu)
|
||||
}
|
||||
if config.MaxConcurrentDownloads < 0 {
|
||||
return fmt.Errorf("invalid max concurrent downloads: %d", config.MaxConcurrentDownloads)
|
||||
return errors.Errorf("invalid max concurrent downloads: %d", config.MaxConcurrentDownloads)
|
||||
}
|
||||
if config.MaxConcurrentUploads < 0 {
|
||||
return fmt.Errorf("invalid max concurrent uploads: %d", config.MaxConcurrentUploads)
|
||||
return errors.Errorf("invalid max concurrent uploads: %d", config.MaxConcurrentUploads)
|
||||
}
|
||||
if config.MaxDownloadAttempts < 0 {
|
||||
return fmt.Errorf("invalid max download attempts: %d", config.MaxDownloadAttempts)
|
||||
return errors.Errorf("invalid max download attempts: %d", config.MaxDownloadAttempts)
|
||||
}
|
||||
|
||||
// validate that "default" runtime is not reset
|
||||
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
|
||||
if _, ok := runtimes[StockRuntimeName]; ok {
|
||||
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
|
||||
return errors.Errorf("runtime name '%s' is reserved", StockRuntimeName)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -633,7 +633,7 @@ func Validate(config *Config) error {
|
|||
if !builtinRuntimes[defaultRuntime] {
|
||||
runtimes := config.GetAllRuntimes()
|
||||
if _, ok := runtimes[defaultRuntime]; !ok && !IsPermissibleC8dRuntimeName(defaultRuntime) {
|
||||
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
|
||||
return errors.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -38,7 +37,8 @@ func setRootKeyLimit(limit int) error {
|
|||
return err
|
||||
}
|
||||
defer keys.Close()
|
||||
if _, err := fmt.Fprintf(keys, "%d", limit); err != nil {
|
||||
_, err = keys.WriteString(strconv.Itoa(limit))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bytes, err := os.OpenFile(rootBytesFile, os.O_WRONLY, 0)
|
||||
|
@ -46,7 +46,7 @@ func setRootKeyLimit(limit int) error {
|
|||
return err
|
||||
}
|
||||
defer bytes.Close()
|
||||
_, err = fmt.Fprintf(bytes, "%d", limit*rootKeyByteMultiplier)
|
||||
_, err = bytes.WriteString(strconv.Itoa(limit * rootKeyByteMultiplier))
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
|
@ -126,10 +127,9 @@ func (daemon *Daemon) killWithSignal(container *containerpkg.Container, stopSign
|
|||
}
|
||||
}
|
||||
|
||||
attributes := map[string]string{
|
||||
"signal": fmt.Sprintf("%d", stopSignal),
|
||||
}
|
||||
daemon.LogContainerEventWithAttributes(container, "kill", attributes)
|
||||
daemon.LogContainerEventWithAttributes(container, "kill", map[string]string{
|
||||
"signal": strconv.Itoa(int(stopSignal)),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package links // import "github.com/docker/docker/daemon/links"
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -200,7 +201,7 @@ func TestLinkPortRangeEnv(t *testing.T) {
|
|||
if env[tcpaddr] != "172.0.17.2" {
|
||||
t.Fatalf("Expected env %s = 172.0.17.2, got %s", tcpaddr, env[tcpaddr])
|
||||
}
|
||||
if env[tcpport] != fmt.Sprintf("%d", i) {
|
||||
if env[tcpport] != strconv.Itoa(i) {
|
||||
t.Fatalf("Expected env %s = %d, got %s", tcpport, i, env[tcpport])
|
||||
}
|
||||
if env[tcpproto] != "tcp" {
|
||||
|
|
|
@ -5,7 +5,7 @@ package daemon // import "github.com/docker/docker/daemon"
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
|
@ -49,12 +49,12 @@ func (daemon *Daemon) reloadPlatform(conf *config.Config, attributes map[string]
|
|||
if runtimeList.Len() > 0 {
|
||||
runtimeList.WriteRune(' ')
|
||||
}
|
||||
runtimeList.WriteString(fmt.Sprintf("%s:%s", name, rt.Path))
|
||||
runtimeList.WriteString(name + ":" + rt.Path)
|
||||
}
|
||||
|
||||
attributes["runtimes"] = runtimeList.String()
|
||||
attributes["default-runtime"] = daemon.configStore.DefaultRuntime
|
||||
attributes["default-shm-size"] = fmt.Sprintf("%d", daemon.configStore.ShmSize)
|
||||
attributes["default-shm-size"] = strconv.FormatInt(int64(daemon.configStore.ShmSize), 10)
|
||||
attributes["default-ipc-mode"] = daemon.configStore.IpcMode
|
||||
attributes["default-cgroupns-mode"] = daemon.configStore.CgroupNamespaceMode
|
||||
|
||||
|
|
|
@ -2,7 +2,8 @@ package daemon // import "github.com/docker/docker/daemon"
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
|
||||
|
@ -22,8 +23,8 @@ func (daemon *Daemon) ContainerResize(name string, height, width int) error {
|
|||
|
||||
if err = daemon.containerd.ResizeTerminal(context.Background(), container.ID, libcontainerdtypes.InitProcessName, width, height); err == nil {
|
||||
attributes := map[string]string{
|
||||
"height": fmt.Sprintf("%d", height),
|
||||
"width": fmt.Sprintf("%d", width),
|
||||
"height": strconv.Itoa(height),
|
||||
"width": strconv.Itoa(width),
|
||||
}
|
||||
daemon.LogContainerEventWithAttributes(container, "resize", attributes)
|
||||
}
|
||||
|
@ -48,6 +49,6 @@ func (daemon *Daemon) ContainerExecResize(name string, height, width int) error
|
|||
case <-ec.Started:
|
||||
return daemon.containerd.ResizeTerminal(context.Background(), ec.ContainerID, ec.ID, width, height)
|
||||
case <-timeout.C:
|
||||
return fmt.Errorf("timeout waiting for exec session ready")
|
||||
return errors.New("timeout waiting for exec session ready")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
@ -56,7 +55,7 @@ func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error
|
|||
runtimeDir := filepath.Join(daemon.configStore.Root, "runtimes")
|
||||
// Remove old temp directory if any
|
||||
os.RemoveAll(runtimeDir + "-old")
|
||||
tmpDir, err := ioutils.TempDir(daemon.configStore.Root, "gen-runtimes")
|
||||
tmpDir, err := os.MkdirTemp(daemon.configStore.Root, "gen-runtimes")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get temp dir to generate runtime scripts")
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue