Browse Source

Merge pull request #43980 from corhere/rcu-daemon-config

daemon: read-copy-update the daemon config
Cory Snider 2 years ago
parent
commit
cc51f0b3d3
72 changed files with 2427 additions and 1034 deletions
  1. 6 8
      api/server/router/build/build.go
  2. 2 2
      api/server/router/system/system.go
  3. 1 1
      api/server/router/system/system_routes.go
  4. 0 11
      api/types/types.go
  5. 11 8
      cmd/dockerd/daemon.go
  6. 0 71
      daemon/config/config.go
  7. 0 21
      daemon/config/config_linux.go
  8. 0 30
      daemon/config/config_linux_test.go
  9. 0 13
      daemon/config/config_windows.go
  10. 5 4
      daemon/container.go
  11. 29 28
      daemon/container_operations.go
  12. 3 2
      daemon/container_operations_unix.go
  13. 2 1
      daemon/container_operations_windows.go
  14. 7 4
      daemon/container_unix_test.go
  15. 1 2
      daemon/containerd/resolver.go
  16. 3 7
      daemon/containerd/service.go
  17. 10 9
      daemon/create.go
  18. 115 84
      daemon/daemon.go
  19. 7 7
      daemon/daemon_linux.go
  20. 13 13
      daemon/daemon_linux_test.go
  21. 1 1
      daemon/daemon_test.go
  22. 33 42
      daemon/daemon_unix.go
  23. 16 14
      daemon/daemon_unix_test.go
  24. 0 4
      daemon/daemon_unsupported.go
  25. 20 28
      daemon/daemon_windows.go
  26. 10 5
      daemon/delete.go
  27. 2 1
      daemon/exec.go
  28. 3 2
      daemon/exec_linux.go
  29. 4 3
      daemon/exec_linux_test.go
  30. 2 1
      daemon/exec_windows.go
  31. 26 25
      daemon/info.go
  32. 64 42
      daemon/info_unix.go
  33. 6 5
      daemon/info_windows.go
  34. 5 4
      daemon/inspect.go
  35. 1 1
      daemon/inspect_linux.go
  36. 6 5
      daemon/inspect_test.go
  37. 9 12
      daemon/logs.go
  38. 3 2
      daemon/metrics_unix.go
  39. 5 2
      daemon/metrics_unsupported.go
  40. 9 5
      daemon/monitor.go
  41. 8 7
      daemon/network.go
  42. 36 36
      daemon/oci_linux.go
  43. 8 10
      daemon/oci_linux_test.go
  44. 4 3
      daemon/oci_windows.go
  45. 2 1
      daemon/prune.go
  46. 179 160
      daemon/reload.go
  47. 24 29
      daemon/reload_test.go
  48. 16 23
      daemon/reload_unix.go
  49. 1 1
      daemon/reload_windows.go
  50. 3 3
      daemon/restart.go
  51. 188 104
      daemon/runtime_unix.go
  52. 281 85
      daemon/runtime_unix_test.go
  53. 13 1
      daemon/runtime_windows.go
  54. 11 10
      daemon/start.go
  55. 4 4
      daemon/start_unix.go
  56. 1 1
      daemon/start_windows.go
  57. 2 1
      daemon/update.go
  58. 1 1
      integration-cli/docker_cli_daemon_test.go
  59. 12 22
      registry/service.go
  60. 4 0
      vendor.mod
  61. 8 2
      vendor.sum
  62. 21 0
      vendor/github.com/mitchellh/copystructure/LICENSE
  63. 21 0
      vendor/github.com/mitchellh/copystructure/README.md
  64. 15 0
      vendor/github.com/mitchellh/copystructure/copier_time.go
  65. 631 0
      vendor/github.com/mitchellh/copystructure/copystructure.go
  66. 1 0
      vendor/github.com/mitchellh/reflectwalk/.travis.yml
  67. 21 0
      vendor/github.com/mitchellh/reflectwalk/LICENSE
  68. 6 0
      vendor/github.com/mitchellh/reflectwalk/README.md
  69. 19 0
      vendor/github.com/mitchellh/reflectwalk/location.go
  70. 16 0
      vendor/github.com/mitchellh/reflectwalk/location_string.go
  71. 420 0
      vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
  72. 10 0
      vendor/modules.txt

+ 6 - 8
api/server/router/build/build.go

@@ -9,18 +9,16 @@ import (
 
 // buildRouter is a router to talk with the build controller
 type buildRouter struct {
-	backend  Backend
-	daemon   experimentalProvider
-	routes   []router.Route
-	features *map[string]bool
+	backend Backend
+	daemon  experimentalProvider
+	routes  []router.Route
 }
 
 // NewRouter initializes a new build router
-func NewRouter(b Backend, d experimentalProvider, features *map[string]bool) router.Router {
+func NewRouter(b Backend, d experimentalProvider) router.Router {
 	r := &buildRouter{
-		backend:  b,
-		daemon:   d,
-		features: features,
+		backend: b,
+		daemon:  d,
 	}
 	r.initRoutes()
 	return r

+ 2 - 2
api/server/router/system/system.go

@@ -12,11 +12,11 @@ type systemRouter struct {
 	cluster  ClusterBackend
 	routes   []router.Route
 	builder  *buildkit.Builder
-	features *map[string]bool
+	features func() map[string]bool
 }
 
 // NewRouter initializes a new system router
-func NewRouter(b Backend, c ClusterBackend, builder *buildkit.Builder, features *map[string]bool) router.Router {
+func NewRouter(b Backend, c ClusterBackend, builder *buildkit.Builder, features func() map[string]bool) router.Router {
 	r := &systemRouter{
 		backend:  b,
 		cluster:  c,

+ 1 - 1
api/server/router/system/system_routes.go

@@ -31,7 +31,7 @@ func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r
 	w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate")
 	w.Header().Add("Pragma", "no-cache")
 
-	builderVersion := build.BuilderVersion(*s.features)
+	builderVersion := build.BuilderVersion(s.features())
 	if bv := builderVersion; bv != "" {
 		w.Header().Set("Builder-Version", string(bv))
 	}

+ 0 - 11
api/types/types.go

@@ -16,7 +16,6 @@ import (
 	"github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/api/types/volume"
 	"github.com/docker/go-connections/nat"
-	"github.com/opencontainers/runtime-spec/specs-go/features"
 )
 
 const (
@@ -657,16 +656,6 @@ type Runtime struct {
 
 	Type    string                 `json:"runtimeType,omitempty"`
 	Options map[string]interface{} `json:"options,omitempty"`
-
-	// This is exposed here only for internal use
-	ShimConfig *ShimConfig        `json:"-"`
-	Features   *features.Features `json:"-"`
-}
-
-// ShimConfig is used by runtime to configure containerd shims
-type ShimConfig struct {
-	Binary string
-	Opts   interface{}
 }
 
 // DiskUsageObject represents an object type used for disk usage query filtering.

+ 11 - 8
cmd/dockerd/daemon.go

@@ -337,7 +337,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 type routerOptions struct {
 	sessionManager *session.Manager
 	buildBackend   *buildbackend.Backend
-	features       *map[string]bool
+	features       func() map[string]bool
 	buildkit       *buildkit.Builder
 	daemon         *daemon.Daemon
 	cluster        *cluster.Cluster
@@ -357,7 +357,7 @@ func newRouterOptions(ctx context.Context, config *config.Config, d *daemon.Daem
 	cgroupParent := newCgroupParent(config)
 	ro := routerOptions{
 		sessionManager: sm,
-		features:       d.Features(),
+		features:       d.Features,
 		daemon:         d,
 	}
 
@@ -369,9 +369,9 @@ func newRouterOptions(ctx context.Context, config *config.Config, d *daemon.Daem
 		ImageTagger:         d.ImageService(),
 		NetworkController:   d.NetworkController(),
 		DefaultCgroupParent: cgroupParent,
-		RegistryHosts:       d.RegistryHosts(),
+		RegistryHosts:       d.RegistryHosts,
 		BuilderConfig:       config.Builder,
-		Rootless:            d.Rootless(),
+		Rootless:            daemon.Rootless(config),
 		IdentityMapping:     d.IdentityMapping(),
 		DNSConfig:           config.DNSConfig,
 		ApparmorProfile:     daemon.DefaultApparmorProfile(),
@@ -397,18 +397,21 @@ func newRouterOptions(ctx context.Context, config *config.Config, d *daemon.Daem
 
 func (cli *DaemonCli) reloadConfig() {
 	reload := func(c *config.Config) {
-		// Revalidate and reload the authorization plugins
 		if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil {
 			logrus.Fatalf("Error validating authorization plugin: %v", err)
 			return
 		}
-		cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins)
 
 		if err := cli.d.Reload(c); err != nil {
 			logrus.Errorf("Error reconfiguring the daemon: %v", err)
 			return
 		}
 
+		// Apply our own configuration only after the daemon reload has succeeded. We
+		// don't want to partially apply the config if the daemon is unhappy with it.
+
+		cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins)
+
 		if c.IsValueSet("debug") {
 			debugEnabled := debug.IsEnabled()
 			switch {
@@ -593,9 +596,9 @@ func (opts routerOptions) Build() []router.Router {
 			opts.daemon.ImageService().DistributionServices().ImageStore,
 			opts.daemon.ImageService().DistributionServices().LayerStore,
 		),
-		systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features),
+		systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.daemon.Features),
 		volume.NewRouter(opts.daemon.VolumesService(), opts.cluster),
-		build.NewRouter(opts.buildBackend, opts.daemon, opts.features),
+		build.NewRouter(opts.buildBackend, opts.daemon),
 		sessionrouter.NewRouter(opts.sessionManager),
 		swarmrouter.NewRouter(opts.cluster),
 		pluginrouter.NewRouter(opts.daemon.PluginManager()),

+ 0 - 71
daemon/config/config.go

@@ -7,16 +7,13 @@ import (
 	"net"
 	"net/url"
 	"os"
-	"path/filepath"
 	"strings"
-	"sync"
 
 	"golang.org/x/text/encoding"
 	"golang.org/x/text/encoding/unicode"
 	"golang.org/x/text/transform"
 
 	"github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
-	"github.com/containerd/containerd/runtime/v2/shim"
 	"github.com/docker/docker/opts"
 	"github.com/docker/docker/registry"
 	"github.com/imdario/mergo"
@@ -57,9 +54,6 @@ const (
 	// DefaultPluginNamespace is the name of the default containerd namespace used for plugins.
 	DefaultPluginNamespace = "plugins.moby"
 
-	// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
-	LinuxV2RuntimeName = "io.containerd.runc.v2"
-
 	// SeccompProfileDefault is the built-in default seccomp profile.
 	SeccompProfileDefault = "builtin"
 	// SeccompProfileUnconfined is a special profile name for seccomp to use an
@@ -67,11 +61,6 @@ const (
 	SeccompProfileUnconfined = "unconfined"
 )
 
-var builtinRuntimes = map[string]bool{
-	StockRuntimeName:   true,
-	LinuxV2RuntimeName: true,
-}
-
 // flatOptions contains configuration keys
 // that MUST NOT be parsed as deep structures.
 // Use this to differentiate these options
@@ -227,7 +216,6 @@ type CommonConfig struct {
 	NetworkConfig
 	registry.ServiceOptions
 
-	sync.Mutex
 	// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
 	// It should probably be handled outside this package.
 	ValuesSet map[string]interface{} `json:"-"`
@@ -639,26 +627,10 @@ func Validate(config *Config) error {
 		return errors.Errorf("invalid max download attempts: %d", config.MaxDownloadAttempts)
 	}
 
-	// validate that "default" runtime is not reset
-	if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
-		if _, ok := runtimes[StockRuntimeName]; ok {
-			return errors.Errorf("runtime name '%s' is reserved", StockRuntimeName)
-		}
-	}
-
 	if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
 		return err
 	}
 
-	if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
-		if !builtinRuntimes[defaultRuntime] {
-			runtimes := config.GetAllRuntimes()
-			if _, ok := runtimes[defaultRuntime]; !ok && !IsPermissibleC8dRuntimeName(defaultRuntime) {
-				return errors.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
-			}
-		}
-	}
-
 	for _, h := range config.Hosts {
 		if _, err := opts.ValidateHost(h); err != nil {
 			return err
@@ -669,15 +641,6 @@ func Validate(config *Config) error {
 	return config.ValidatePlatformConfig()
 }
 
-// GetDefaultRuntimeName returns the current default runtime
-func (conf *Config) GetDefaultRuntimeName() string {
-	conf.Lock()
-	rt := conf.DefaultRuntime
-	conf.Unlock()
-
-	return rt
-}
-
 // MaskCredentials masks credentials that are in an URL.
 func MaskCredentials(rawURL string) string {
 	parsedURL, err := url.Parse(rawURL)
@@ -687,37 +650,3 @@ func MaskCredentials(rawURL string) string {
 	parsedURL.User = url.UserPassword("xxxxx", "xxxxx")
 	return parsedURL.String()
 }
-
-// IsPermissibleC8dRuntimeName tests whether name is safe to pass into
-// containerd as a runtime name, and whether the name is well-formed.
-// It does not check if the runtime is installed.
-//
-// A runtime name containing slash characters is interpreted by containerd as
-// the path to a runtime binary. If we allowed this, anyone with Engine API
-// access could get containerd to execute an arbitrary binary as root. Although
-// Engine API access is already equivalent to root on the host, the runtime name
-// has not historically been a vector to run arbitrary code as root so users are
-// not expecting it to become one.
-//
-// This restriction is not configurable. There are viable workarounds for
-// legitimate use cases: administrators and runtime developers can make runtimes
-// available for use with Docker by installing them onto PATH following the
-// [binary naming convention] for containerd Runtime v2.
-//
-// [binary naming convention]: https://github.com/containerd/containerd/blob/main/runtime/v2/README.md#binary-naming
-func IsPermissibleC8dRuntimeName(name string) bool {
-	// containerd uses a rather permissive test to validate runtime names:
-	//
-	//   - Any name for which filepath.IsAbs(name) is interpreted as the absolute
-	//     path to a shim binary. We want to block this behaviour.
-	//   - Any name which contains at least one '.' character and no '/' characters
-	//     and does not begin with a '.' character is a valid runtime name. The shim
-	//     binary name is derived from the final two components of the name and
-	//     searched for on the PATH. The name "a.." is technically valid per
-	//     containerd's implementation: it would resolve to a binary named
-	//     "containerd-shim---".
-	//
-	// https://github.com/containerd/containerd/blob/11ded166c15f92450958078cd13c6d87131ec563/runtime/v2/manager.go#L297-L317
-	// https://github.com/containerd/containerd/blob/11ded166c15f92450958078cd13c6d87131ec563/runtime/v2/shim/util.go#L83-L93
-	return !filepath.IsAbs(name) && !strings.ContainsRune(name, '/') && shim.BinaryName(name) != ""
-}

+ 0 - 21
daemon/config/config_linux.go

@@ -81,25 +81,6 @@ type Config struct {
 	Rootless   bool   `json:"rootless,omitempty"`
 }
 
-// GetRuntime returns the runtime path and arguments for a given
-// runtime name
-func (conf *Config) GetRuntime(name string) *types.Runtime {
-	conf.Lock()
-	defer conf.Unlock()
-	if rt, ok := conf.Runtimes[name]; ok {
-		return &rt
-	}
-	return nil
-}
-
-// GetAllRuntimes returns a copy of the runtimes map
-func (conf *Config) GetAllRuntimes() map[string]types.Runtime {
-	conf.Lock()
-	rts := conf.Runtimes
-	conf.Unlock()
-	return rts
-}
-
 // GetExecRoot returns the user configured Exec-root
 func (conf *Config) GetExecRoot() string {
 	return conf.ExecRoot
@@ -107,8 +88,6 @@ func (conf *Config) GetExecRoot() string {
 
 // GetInitPath returns the configured docker-init path
 func (conf *Config) GetInitPath() string {
-	conf.Lock()
-	defer conf.Unlock()
 	if conf.InitPath != "" {
 		return conf.InitPath
 	}

+ 0 - 30
daemon/config/config_linux_test.go

@@ -3,10 +3,8 @@ package config // import "github.com/docker/docker/daemon/config"
 import (
 	"testing"
 
-	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/opts"
 	units "github.com/docker/go-units"
-	"github.com/imdario/mergo"
 	"github.com/spf13/pflag"
 	"gotest.tools/v3/assert"
 	is "gotest.tools/v3/assert/cmp"
@@ -123,34 +121,6 @@ func TestDaemonConfigurationMergeShmSize(t *testing.T) {
 	assert.Check(t, is.Equal(int64(expectedValue), cc.ShmSize.Value()))
 }
 
-func TestUnixValidateConfigurationErrors(t *testing.T) {
-	testCases := []struct {
-		doc         string
-		config      *Config
-		expectedErr string
-	}{
-		{
-			doc: `cannot override the stock runtime`,
-			config: &Config{
-				Runtimes: map[string]types.Runtime{
-					StockRuntimeName: {},
-				},
-			},
-			expectedErr: `runtime name 'runc' is reserved`,
-		},
-	}
-	for _, tc := range testCases {
-		tc := tc
-		t.Run(tc.doc, func(t *testing.T) {
-			cfg, err := New()
-			assert.NilError(t, err)
-			assert.Check(t, mergo.Merge(cfg, tc.config, mergo.WithOverride))
-			err = Validate(cfg)
-			assert.ErrorContains(t, err, tc.expectedErr)
-		})
-	}
-}
-
 func TestUnixGetInitPath(t *testing.T) {
 	testCases := []struct {
 		config           *Config

+ 0 - 13
daemon/config/config_windows.go

@@ -3,8 +3,6 @@ package config // import "github.com/docker/docker/daemon/config"
 import (
 	"os"
 	"path/filepath"
-
-	"github.com/docker/docker/api/types"
 )
 
 const (
@@ -30,17 +28,6 @@ type Config struct {
 	// for the Windows daemon.)
 }
 
-// GetRuntime returns the runtime path and arguments for a given
-// runtime name
-func (conf *Config) GetRuntime(name string) *types.Runtime {
-	return nil
-}
-
-// GetAllRuntimes returns a copy of the runtimes map
-func (conf *Config) GetAllRuntimes() map[string]types.Runtime {
-	return map[string]types.Runtime{}
-}
-
 // GetExecRoot returns the user configured Exec-root
 func (conf *Config) GetExecRoot() string {
 	return ""

+ 5 - 4
daemon/container.go

@@ -10,6 +10,7 @@ import (
 	containertypes "github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/api/types/strslice"
 	"github.com/docker/docker/container"
+	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/daemon/network"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/image"
@@ -206,10 +207,10 @@ func (daemon *Daemon) generateHostname(id string, config *containertypes.Config)
 	}
 }
 
-func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error {
+func (daemon *Daemon) setSecurityOptions(cfg *config.Config, container *container.Container, hostConfig *containertypes.HostConfig) error {
 	container.Lock()
 	defer container.Unlock()
-	return daemon.parseSecurityOpt(&container.SecurityOptions, hostConfig)
+	return daemon.parseSecurityOpt(cfg, &container.SecurityOptions, hostConfig)
 }
 
 func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error {
@@ -234,7 +235,7 @@ func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *
 
 // verifyContainerSettings performs validation of the hostconfig and config
 // structures.
-func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) (warnings []string, err error) {
+func (daemon *Daemon) verifyContainerSettings(daemonCfg *configStore, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) (warnings []string, err error) {
 	// First perform verification of settings common across all platforms.
 	if err = validateContainerConfig(config); err != nil {
 		return warnings, err
@@ -244,7 +245,7 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostCon
 	}
 
 	// Now do platform-specific verification
-	warnings, err = verifyPlatformContainerSettings(daemon, hostConfig, update)
+	warnings, err = verifyPlatformContainerSettings(daemon, daemonCfg, hostConfig, update)
 	for _, w := range warnings {
 		logrus.Warn(w)
 	}

+ 29 - 28
daemon/container_operations.go

@@ -12,6 +12,7 @@ import (
 	containertypes "github.com/docker/docker/api/types/container"
 	networktypes "github.com/docker/docker/api/types/network"
 	"github.com/docker/docker/container"
+	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/daemon/network"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/libnetwork"
@@ -26,19 +27,19 @@ import (
 	"github.com/sirupsen/logrus"
 )
 
-func (daemon *Daemon) getDNSSearchSettings(container *container.Container) []string {
+func (daemon *Daemon) getDNSSearchSettings(cfg *config.Config, container *container.Container) []string {
 	if len(container.HostConfig.DNSSearch) > 0 {
 		return container.HostConfig.DNSSearch
 	}
 
-	if len(daemon.configStore.DNSSearch) > 0 {
-		return daemon.configStore.DNSSearch
+	if len(cfg.DNSSearch) > 0 {
+		return cfg.DNSSearch
 	}
 
 	return nil
 }
 
-func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]libnetwork.SandboxOption, error) {
+func (daemon *Daemon) buildSandboxOptions(cfg *config.Config, container *container.Container) ([]libnetwork.SandboxOption, error) {
 	var (
 		sboxOptions []libnetwork.SandboxOption
 		err         error
@@ -61,21 +62,21 @@ func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]lib
 		sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey())
 	}
 
-	if err = daemon.setupPathsAndSandboxOptions(container, &sboxOptions); err != nil {
+	if err = daemon.setupPathsAndSandboxOptions(container, cfg, &sboxOptions); err != nil {
 		return nil, err
 	}
 
 	if len(container.HostConfig.DNS) > 0 {
 		dns = container.HostConfig.DNS
-	} else if len(daemon.configStore.DNS) > 0 {
-		dns = daemon.configStore.DNS
+	} else if len(cfg.DNS) > 0 {
+		dns = cfg.DNS
 	}
 
 	for _, d := range dns {
 		sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d))
 	}
 
-	dnsSearch := daemon.getDNSSearchSettings(container)
+	dnsSearch := daemon.getDNSSearchSettings(cfg, container)
 
 	for _, ds := range dnsSearch {
 		sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds))
@@ -83,8 +84,8 @@ func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]lib
 
 	if len(container.HostConfig.DNSOptions) > 0 {
 		dnsOptions = container.HostConfig.DNSOptions
-	} else if len(daemon.configStore.DNSOptions) > 0 {
-		dnsOptions = daemon.configStore.DNSOptions
+	} else if len(cfg.DNSOptions) > 0 {
+		dnsOptions = cfg.DNSOptions
 	}
 
 	for _, ds := range dnsOptions {
@@ -112,7 +113,7 @@ func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]lib
 		// value with the IP address stored in the daemon level HostGatewayIP
 		// config variable
 		if ip == opts.HostGatewayName {
-			gateway := daemon.configStore.HostGatewayIP.String()
+			gateway := cfg.HostGatewayIP.String()
 			if gateway == "" {
 				return nil, fmt.Errorf("unable to derive the IP value for host-gateway")
 			}
@@ -218,7 +219,7 @@ func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]lib
 	}
 
 	for alias, parent := range daemon.parents(container) {
-		if daemon.configStore.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() {
+		if cfg.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() {
 			continue
 		}
 
@@ -291,13 +292,13 @@ func (daemon *Daemon) updateNetworkSettings(container *container.Container, n li
 	return nil
 }
 
-func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Container, n libnetwork.Network, ep *libnetwork.Endpoint) error {
+func (daemon *Daemon) updateEndpointNetworkSettings(cfg *config.Config, container *container.Container, n libnetwork.Network, ep *libnetwork.Endpoint) error {
 	if err := buildEndpointInfo(container.NetworkSettings, n, ep); err != nil {
 		return err
 	}
 
 	if container.HostConfig.NetworkMode == runconfig.DefaultDaemonNetworkMode() {
-		container.NetworkSettings.Bridge = daemon.configStore.BridgeConfig.Iface
+		container.NetworkSettings.Bridge = cfg.BridgeConfig.Iface
 	}
 
 	return nil
@@ -305,7 +306,7 @@ func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Contain
 
 // UpdateNetwork is used to update the container's network (e.g. when linked containers
 // get removed/unlinked).
-func (daemon *Daemon) updateNetwork(container *container.Container) error {
+func (daemon *Daemon) updateNetwork(cfg *config.Config, container *container.Container) error {
 	var (
 		start = time.Now()
 		ctrl  = daemon.netController
@@ -335,7 +336,7 @@ func (daemon *Daemon) updateNetwork(container *container.Container) error {
 		return nil
 	}
 
-	sbOptions, err := daemon.buildSandboxOptions(container)
+	sbOptions, err := daemon.buildSandboxOptions(cfg, container)
 	if err != nil {
 		return fmt.Errorf("Update network failed: %v", err)
 	}
@@ -519,7 +520,7 @@ func (daemon *Daemon) updateContainerNetworkSettings(container *container.Contai
 	}
 }
 
-func (daemon *Daemon) allocateNetwork(container *container.Container) (retErr error) {
+func (daemon *Daemon) allocateNetwork(cfg *config.Config, container *container.Container) (retErr error) {
 	if daemon.netController == nil {
 		return nil
 	}
@@ -552,7 +553,7 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) (retErr er
 	defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName()
 	if nConf, ok := container.NetworkSettings.Networks[defaultNetName]; ok {
 		cleanOperationalData(nConf)
-		if err := daemon.connectToNetwork(container, defaultNetName, nConf.EndpointSettings, updateSettings); err != nil {
+		if err := daemon.connectToNetwork(cfg, container, defaultNetName, nConf.EndpointSettings, updateSettings); err != nil {
 			return err
 		}
 	}
@@ -569,7 +570,7 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) (retErr er
 
 	for netName, epConf := range networks {
 		cleanOperationalData(epConf)
-		if err := daemon.connectToNetwork(container, netName, epConf.EndpointSettings, updateSettings); err != nil {
+		if err := daemon.connectToNetwork(cfg, container, netName, epConf.EndpointSettings, updateSettings); err != nil {
 			return err
 		}
 	}
@@ -578,7 +579,7 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) (retErr er
 	// create its network sandbox now if not present
 	if len(networks) == 0 {
 		if nil == daemon.getNetworkSandbox(container) {
-			sbOptions, err := daemon.buildSandboxOptions(container)
+			sbOptions, err := daemon.buildSandboxOptions(cfg, container)
 			if err != nil {
 				return err
 			}
@@ -722,13 +723,13 @@ func (daemon *Daemon) updateNetworkConfig(container *container.Container, n libn
 	return nil
 }
 
-func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) {
+func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) {
 	start := time.Now()
 	if container.HostConfig.NetworkMode.IsContainer() {
 		return runconfig.ErrConflictSharedNetwork
 	}
 	if containertypes.NetworkMode(idOrName).IsBridge() &&
-		daemon.configStore.DisableBridge {
+		cfg.DisableBridge {
 		container.Config.NetworkDisabled = true
 		return nil
 	}
@@ -766,7 +767,7 @@ func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName
 
 	controller := daemon.netController
 	sb := daemon.getNetworkSandbox(container)
-	createOptions, err := buildCreateEndpointOptions(container, n, endpointConfig, sb, daemon.configStore.DNS)
+	createOptions, err := buildCreateEndpointOptions(container, n, endpointConfig, sb, cfg.DNS)
 	if err != nil {
 		return err
 	}
@@ -790,12 +791,12 @@ func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName
 
 	delete(container.NetworkSettings.Networks, n.ID())
 
-	if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil {
+	if err := daemon.updateEndpointNetworkSettings(cfg, container, n, ep); err != nil {
 		return err
 	}
 
 	if sb == nil {
-		sbOptions, err := daemon.buildSandboxOptions(container)
+		sbOptions, err := daemon.buildSandboxOptions(cfg, container)
 		if err != nil {
 			return err
 		}
@@ -946,7 +947,7 @@ func (daemon *Daemon) tryDetachContainerFromClusterNetwork(network libnetwork.Ne
 	daemon.LogNetworkEventWithAttributes(network, "disconnect", attributes)
 }
 
-func (daemon *Daemon) initializeNetworking(container *container.Container) error {
+func (daemon *Daemon) initializeNetworking(cfg *config.Config, container *container.Container) error {
 	var err error
 
 	if container.HostConfig.NetworkMode.IsContainer() {
@@ -975,7 +976,7 @@ func (daemon *Daemon) initializeNetworking(container *container.Container) error
 		}
 	}
 
-	if err := daemon.allocateNetwork(container); err != nil {
+	if err := daemon.allocateNetwork(cfg, container); err != nil {
 		return err
 	}
 
@@ -1074,7 +1075,7 @@ func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName
 			}
 		}
 	} else {
-		if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil {
+		if err := daemon.connectToNetwork(&daemon.config().Config, container, idOrName, endpointConfig, true); err != nil {
 			return err
 		}
 	}

+ 3 - 2
daemon/container_operations_unix.go

@@ -10,6 +10,7 @@ import (
 	"syscall"
 
 	"github.com/docker/docker/container"
+	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/daemon/links"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/libnetwork"
@@ -380,7 +381,7 @@ func serviceDiscoveryOnDefaultNetwork() bool {
 	return false
 }
 
-func (daemon *Daemon) setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error {
+func (daemon *Daemon) setupPathsAndSandboxOptions(container *container.Container, cfg *config.Config, sboxOptions *[]libnetwork.SandboxOption) error {
 	var err error
 
 	// Set the correct paths for /etc/hosts and /etc/resolv.conf, based on the
@@ -427,7 +428,7 @@ func (daemon *Daemon) setupPathsAndSandboxOptions(container *container.Container
 		// Copy the host's resolv.conf for the container (/run/systemd/resolve/resolv.conf or /etc/resolv.conf)
 		*sboxOptions = append(
 			*sboxOptions,
-			libnetwork.OptionOriginResolvConfPath(daemon.configStore.GetResolvConf()),
+			libnetwork.OptionOriginResolvConfPath(cfg.GetResolvConf()),
 		)
 	}
 

+ 2 - 1
daemon/container_operations_windows.go

@@ -5,6 +5,7 @@ import (
 	"os"
 
 	"github.com/docker/docker/container"
+	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/libnetwork"
 	"github.com/docker/docker/pkg/system"
 	"github.com/pkg/errors"
@@ -161,7 +162,7 @@ func serviceDiscoveryOnDefaultNetwork() bool {
 	return true
 }
 
-func (daemon *Daemon) setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error {
+func (daemon *Daemon) setupPathsAndSandboxOptions(container *container.Container, cfg *config.Config, sboxOptions *[]libnetwork.SandboxOption) error {
 	return nil
 }
 

+ 7 - 4
daemon/container_unix_test.go

@@ -31,10 +31,13 @@ func TestContainerWarningHostAndPublishPorts(t *testing.T) {
 			NetworkMode:  "host",
 			PortBindings: tc.ports,
 		}
-		cs := &config.Config{}
-		configureRuntimes(cs)
-		d := &Daemon{configStore: cs}
-		wrns, err := d.verifyContainerSettings(hostConfig, &containertypes.Config{}, false)
+		d := &Daemon{}
+		cfg, err := config.New()
+		assert.NilError(t, err)
+		runtimes, err := setupRuntimes(cfg)
+		assert.NilError(t, err)
+		daemonCfg := &configStore{Config: *cfg, Runtimes: runtimes}
+		wrns, err := d.verifyContainerSettings(daemonCfg, hostConfig, &containertypes.Config{}, false)
 		assert.NilError(t, err)
 		assert.DeepEqual(t, tc.warnings, wrns)
 	}

+ 1 - 2
daemon/containerd/resolver.go

@@ -16,9 +16,8 @@ import (
 
 func (i *ImageService) newResolverFromAuthConfig(ctx context.Context, authConfig *registrytypes.AuthConfig) (remotes.Resolver, docker.StatusTracker) {
 	tracker := docker.NewInMemoryTracker()
-	hostsFn := i.registryHosts.RegistryHosts()
 
-	hosts := hostsWrapper(hostsFn, authConfig, i.registryService)
+	hosts := hostsWrapper(i.registryHosts, authConfig, i.registryService)
 	headers := http.Header{}
 	headers.Set("User-Agent", dockerversion.DockerUserAgent(ctx))
 

+ 3 - 7
daemon/containerd/service.go

@@ -31,16 +31,12 @@ type ImageService struct {
 	client          *containerd.Client
 	containers      container.Store
 	snapshotter     string
-	registryHosts   RegistryHostsProvider
+	registryHosts   docker.RegistryHosts
 	registryService RegistryConfigProvider
 	eventsService   *daemonevents.Events
 	pruneRunning    atomic.Bool
 }
 
-type RegistryHostsProvider interface {
-	RegistryHosts() docker.RegistryHosts
-}
-
 type RegistryConfigProvider interface {
 	IsInsecureRegistry(host string) bool
 	ResolveRepository(name reference.Named) (*registry.RepositoryInfo, error)
@@ -50,7 +46,7 @@ type ImageServiceConfig struct {
 	Client        *containerd.Client
 	Containers    container.Store
 	Snapshotter   string
-	HostsProvider RegistryHostsProvider
+	RegistryHosts docker.RegistryHosts
 	Registry      RegistryConfigProvider
 	EventsService *daemonevents.Events
 }
@@ -61,7 +57,7 @@ func NewService(config ImageServiceConfig) *ImageService {
 		client:          config.Client,
 		containers:      config.Containers,
 		snapshotter:     config.Snapshotter,
-		registryHosts:   config.HostsProvider,
+		registryHosts:   config.RegistryHosts,
 		registryService: config.Registry,
 		eventsService:   config.EventsService,
 	}

+ 10 - 9
daemon/create.go

@@ -14,6 +14,7 @@ import (
 	imagetypes "github.com/docker/docker/api/types/image"
 	networktypes "github.com/docker/docker/api/types/network"
 	"github.com/docker/docker/container"
+	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/daemon/images"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/image"
@@ -34,7 +35,7 @@ type createOpts struct {
 
 // CreateManagedContainer creates a container that is managed by a Service
 func (daemon *Daemon) CreateManagedContainer(ctx context.Context, params types.ContainerCreateConfig) (containertypes.CreateResponse, error) {
-	return daemon.containerCreate(ctx, createOpts{
+	return daemon.containerCreate(ctx, daemon.config(), createOpts{
 		params:  params,
 		managed: true,
 	})
@@ -42,7 +43,7 @@ func (daemon *Daemon) CreateManagedContainer(ctx context.Context, params types.C
 
 // ContainerCreate creates a regular container
 func (daemon *Daemon) ContainerCreate(ctx context.Context, params types.ContainerCreateConfig) (containertypes.CreateResponse, error) {
-	return daemon.containerCreate(ctx, createOpts{
+	return daemon.containerCreate(ctx, daemon.config(), createOpts{
 		params: params,
 	})
 }
@@ -50,19 +51,19 @@ func (daemon *Daemon) ContainerCreate(ctx context.Context, params types.Containe
 // ContainerCreateIgnoreImagesArgsEscaped creates a regular container. This is called from the builder RUN case
 // and ensures that we do not take the images ArgsEscaped
 func (daemon *Daemon) ContainerCreateIgnoreImagesArgsEscaped(ctx context.Context, params types.ContainerCreateConfig) (containertypes.CreateResponse, error) {
-	return daemon.containerCreate(ctx, createOpts{
+	return daemon.containerCreate(ctx, daemon.config(), createOpts{
 		params:                  params,
 		ignoreImagesArgsEscaped: true,
 	})
 }
 
-func (daemon *Daemon) containerCreate(ctx context.Context, opts createOpts) (containertypes.CreateResponse, error) {
+func (daemon *Daemon) containerCreate(ctx context.Context, daemonCfg *configStore, opts createOpts) (containertypes.CreateResponse, error) {
 	start := time.Now()
 	if opts.params.Config == nil {
 		return containertypes.CreateResponse{}, errdefs.InvalidParameter(errors.New("Config cannot be empty in order to create a container"))
 	}
 
-	warnings, err := daemon.verifyContainerSettings(opts.params.HostConfig, opts.params.Config, false)
+	warnings, err := daemon.verifyContainerSettings(daemonCfg, opts.params.HostConfig, opts.params.Config, false)
 	if err != nil {
 		return containertypes.CreateResponse{Warnings: warnings}, errdefs.InvalidParameter(err)
 	}
@@ -94,12 +95,12 @@ func (daemon *Daemon) containerCreate(ctx context.Context, opts createOpts) (con
 	if opts.params.HostConfig == nil {
 		opts.params.HostConfig = &containertypes.HostConfig{}
 	}
-	err = daemon.adaptContainerSettings(opts.params.HostConfig, opts.params.AdjustCPUShares)
+	err = daemon.adaptContainerSettings(&daemonCfg.Config, opts.params.HostConfig, opts.params.AdjustCPUShares)
 	if err != nil {
 		return containertypes.CreateResponse{Warnings: warnings}, errdefs.InvalidParameter(err)
 	}
 
-	ctr, err := daemon.create(ctx, opts)
+	ctr, err := daemon.create(ctx, &daemonCfg.Config, opts)
 	if err != nil {
 		return containertypes.CreateResponse{Warnings: warnings}, err
 	}
@@ -113,7 +114,7 @@ func (daemon *Daemon) containerCreate(ctx context.Context, opts createOpts) (con
 }
 
 // Create creates a new container from the given configuration with a given name.
-func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *container.Container, retErr error) {
+func (daemon *Daemon) create(ctx context.Context, daemonCfg *config.Config, opts createOpts) (retC *container.Container, retErr error) {
 	var (
 		ctr         *container.Container
 		img         *image.Image
@@ -175,7 +176,7 @@ func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *contai
 		}
 	}()
 
-	if err := daemon.setSecurityOptions(ctr, opts.params.HostConfig); err != nil {
+	if err := daemon.setSecurityOptions(daemonCfg, ctr, opts.params.HostConfig); err != nil {
 		return nil, err
 	}
 

+ 115 - 84
daemon/daemon.go

@@ -16,6 +16,7 @@ import (
 	"runtime"
 	"strings"
 	"sync"
+	"sync/atomic"
 	"time"
 
 	"github.com/containerd/containerd"
@@ -76,6 +77,12 @@ import (
 	"resenje.org/singleflight"
 )
 
+type configStore struct {
+	config.Config
+
+	Runtimes runtimes
+}
+
 // Daemon holds information about the Docker daemon.
 type Daemon struct {
 	id                    string
@@ -84,7 +91,8 @@ type Daemon struct {
 	containersReplica     *container.ViewDB
 	execCommands          *container.ExecStore
 	imageService          ImageService
-	configStore           *config.Config
+	configStore           atomic.Pointer[configStore]
+	configReload          sync.Mutex
 	statsCollector        *stats.Collector
 	defaultLogConfig      containertypes.LogConfig
 	registryService       *registry.Service
@@ -148,14 +156,31 @@ func (daemon *Daemon) StoreHosts(hosts []string) {
 	}
 }
 
+// config returns an immutable snapshot of the current daemon configuration.
+// Multiple calls to this function will return the same pointer until the
+// configuration is reloaded so callers must take care not to modify the
+// returned value.
+//
+// To ensure that the configuration used remains consistent throughout the
+// lifetime of an operation, the configuration pointer should be passed down the
+// call stack, like one would a [context.Context] value. Only the entrypoints
+// for operations, the outermost functions, should call this function.
+func (daemon *Daemon) config() *configStore {
+	cfg := daemon.configStore.Load()
+	if cfg == nil {
+		return &configStore{}
+	}
+	return cfg
+}
+
 // HasExperimental returns whether the experimental features of the daemon are enabled or not
 func (daemon *Daemon) HasExperimental() bool {
-	return daemon.configStore != nil && daemon.configStore.Experimental
+	return daemon.config().Experimental
 }
 
 // Features returns the features map from configStore
-func (daemon *Daemon) Features() *map[string]bool {
-	return &daemon.configStore.Features
+func (daemon *Daemon) Features() map[string]bool {
+	return daemon.config().Features
 }
 
 // UsesSnapshotter returns true if feature flag to use containerd snapshotter is enabled
@@ -163,15 +188,17 @@ func (daemon *Daemon) UsesSnapshotter() bool {
 	return daemon.usesSnapshotter
 }
 
-// RegistryHosts returns registry configuration in containerd resolvers format
-func (daemon *Daemon) RegistryHosts() docker.RegistryHosts {
+// RegistryHosts returns the registry hosts configuration for the host component
+// of a distribution image reference.
+func (daemon *Daemon) RegistryHosts(host string) ([]docker.RegistryHost, error) {
 	var (
+		conf        = daemon.config()
 		registryKey = "docker.io"
-		mirrors     = make([]string, len(daemon.configStore.Mirrors))
+		mirrors     = make([]string, len(conf.Mirrors))
 		m           = map[string]resolverconfig.RegistryConfig{}
 	)
 	// must trim "https://" or "http://" prefix
-	for i, v := range daemon.configStore.Mirrors {
+	for i, v := range conf.Mirrors {
 		if uri, err := url.Parse(v); err == nil {
 			v = uri.Host
 		}
@@ -180,7 +207,7 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts {
 	// set mirrors for default registry
 	m[registryKey] = resolverconfig.RegistryConfig{Mirrors: mirrors}
 
-	for _, v := range daemon.configStore.InsecureRegistries {
+	for _, v := range conf.InsecureRegistries {
 		u, err := url.Parse(v)
 		if err != nil && !strings.HasPrefix(v, "http://") && !strings.HasPrefix(v, "https://") {
 			originalErr := err
@@ -218,7 +245,7 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts {
 		}
 	}
 
-	return resolver.NewRegistryConfig(m)
+	return resolver.NewRegistryConfig(m)(host)
 }
 
 // layerAccessor may be implemented by ImageService
@@ -226,7 +253,7 @@ type layerAccessor interface {
 	GetLayerByID(cid string) (layer.RWLayer, error)
 }
 
-func (daemon *Daemon) restore() error {
+func (daemon *Daemon) restore(cfg *configStore) error {
 	var mapLock sync.Mutex
 	containers := make(map[string]*container.Container)
 
@@ -366,7 +393,7 @@ func (daemon *Daemon) restore() error {
 							logger(c).WithError(err).Error("failed to delete task from containerd")
 							return
 						}
-					} else if !daemon.configStore.LiveRestoreEnabled {
+					} else if !cfg.LiveRestoreEnabled {
 						logger(c).Debug("shutting down container considered alive by containerd")
 						if err := daemon.shutdownContainer(c); err != nil && !errdefs.IsNotFound(err) {
 							log.WithError(err).Error("error shutting down container")
@@ -446,7 +473,7 @@ func (daemon *Daemon) restore() error {
 
 				c.ResetRestartManager(false)
 				if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
-					options, err := daemon.buildSandboxOptions(c)
+					options, err := daemon.buildSandboxOptions(&cfg.Config, c)
 					if err != nil {
 						logger(c).WithError(err).Warn("failed to build sandbox option to restore container")
 					}
@@ -464,7 +491,7 @@ func (daemon *Daemon) restore() error {
 			// not initialized yet. We will start
 			// it after the cluster is
 			// initialized.
-			if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
+			if cfg.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
 				mapLock.Lock()
 				restartContainers[c] = make(chan struct{})
 				mapLock.Unlock()
@@ -502,7 +529,7 @@ func (daemon *Daemon) restore() error {
 	//
 	// Note that we cannot initialize the network controller earlier, as it
 	// needs to know if there's active sandboxes (running containers).
-	if err = daemon.initNetworkController(activeSandboxes); err != nil {
+	if err = daemon.initNetworkController(&cfg.Config, activeSandboxes); err != nil {
 		return fmt.Errorf("Error initializing network controller: %v", err)
 	}
 
@@ -549,7 +576,7 @@ func (daemon *Daemon) restore() error {
 			if err := daemon.prepareMountPoints(c); err != nil {
 				log.WithError(err).Error("failed to prepare mount points for container")
 			}
-			if err := daemon.containerStart(context.Background(), c, "", "", true); err != nil {
+			if err := daemon.containerStart(context.Background(), cfg, c, "", "", true); err != nil {
 				log.WithError(err).Error("failed to start container")
 			}
 			close(chNotify)
@@ -565,7 +592,7 @@ func (daemon *Daemon) restore() error {
 		go func(cid string) {
 			_ = sem.Acquire(context.Background(), 1)
 
-			if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
+			if err := daemon.containerRm(&cfg.Config, cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
 				logrus.WithField("container", cid).WithError(err).Error("failed to remove container")
 			}
 
@@ -614,8 +641,10 @@ func (daemon *Daemon) restore() error {
 // RestartSwarmContainers restarts any autostart container which has a
 // swarm endpoint.
 func (daemon *Daemon) RestartSwarmContainers() {
-	ctx := context.Background()
+	daemon.restartSwarmContainers(context.Background(), daemon.config())
+}
 
+func (daemon *Daemon) restartSwarmContainers(ctx context.Context, cfg *configStore) {
 	// parallelLimit is the maximum number of parallel startup jobs that we
 	// allow (this is the limited used for all startup semaphores). The multipler
 	// (128) was chosen after some fairly significant benchmarking -- don't change
@@ -631,7 +660,7 @@ func (daemon *Daemon) RestartSwarmContainers() {
 			// Autostart all the containers which has a
 			// swarm endpoint now that the cluster is
 			// initialized.
-			if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
+			if cfg.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
 				group.Add(1)
 				go func(c *container.Container) {
 					if err := sem.Acquire(ctx, 1); err != nil {
@@ -640,7 +669,7 @@ func (daemon *Daemon) RestartSwarmContainers() {
 						return
 					}
 
-					if err := daemon.containerStart(ctx, c, "", "", true); err != nil {
+					if err := daemon.containerStart(ctx, cfg, c, "", "", true); err != nil {
 						logrus.WithField("container", c.ID).WithError(err).Error("failed to start swarm container")
 					}
 
@@ -724,10 +753,7 @@ func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) {
 // IsSwarmCompatible verifies if the current daemon
 // configuration is compatible with the swarm mode
 func (daemon *Daemon) IsSwarmCompatible() error {
-	if daemon.configStore == nil {
-		return nil
-	}
-	return daemon.configStore.IsSwarmCompatible()
+	return daemon.config().IsSwarmCompatible()
 }
 
 // NewDaemon sets up everything for the daemon to be able to service
@@ -788,11 +814,23 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 		os.Setenv("TMPDIR", realTmp)
 	}
 
+	if err := initRuntimesDir(config); err != nil {
+		return nil, err
+	}
+	runtimes, err := setupRuntimes(config)
+	if err != nil {
+		return nil, err
+	}
+
 	d := &Daemon{
-		configStore: config,
 		PluginStore: pluginStore,
 		startupDone: make(chan struct{}),
 	}
+	configStore := &configStore{
+		Config:   *config,
+		Runtimes: runtimes,
+	}
+	d.configStore.Store(configStore)
 
 	// TEST_INTEGRATION_USE_SNAPSHOTTER is used for integration tests only.
 	if os.Getenv("TEST_INTEGRATION_USE_SNAPSHOTTER") != "" {
@@ -812,27 +850,27 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 		}
 	}()
 
-	if err := d.setGenericResources(config); err != nil {
+	if err := d.setGenericResources(&configStore.Config); err != nil {
 		return nil, err
 	}
 	// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
 	// on Windows to dump Go routine stacks
-	stackDumpDir := config.Root
-	if execRoot := config.GetExecRoot(); execRoot != "" {
+	stackDumpDir := configStore.Root
+	if execRoot := configStore.GetExecRoot(); execRoot != "" {
 		stackDumpDir = execRoot
 	}
 	d.setupDumpStackTrap(stackDumpDir)
 
-	if err := d.setupSeccompProfile(); err != nil {
+	if err := d.setupSeccompProfile(&configStore.Config); err != nil {
 		return nil, err
 	}
 
 	// Set the default isolation mode (only applicable on Windows)
-	if err := d.setDefaultIsolation(); err != nil {
+	if err := d.setDefaultIsolation(&configStore.Config); err != nil {
 		return nil, fmt.Errorf("error setting default isolation mode: %v", err)
 	}
 
-	if err := configureMaxThreads(config); err != nil {
+	if err := configureMaxThreads(&configStore.Config); err != nil {
 		logrus.Warnf("Failed to configure golang's threads limit: %v", err)
 	}
 
@@ -841,7 +879,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 		logrus.Errorf(err.Error())
 	}
 
-	daemonRepo := filepath.Join(config.Root, "containers")
+	daemonRepo := filepath.Join(configStore.Root, "containers")
 	if err := idtools.MkdirAllAndChown(daemonRepo, 0o710, idtools.Identity{
 		UID: idtools.CurrentIdentity().UID,
 		GID: rootIDs.GID,
@@ -849,20 +887,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 		return nil, err
 	}
 
-	// Create the directory where we'll store the runtime scripts (i.e. in
-	// order to support runtimeArgs)
-	if err = os.Mkdir(filepath.Join(config.Root, "runtimes"), 0o700); err != nil && !errors.Is(err, os.ErrExist) {
-		return nil, err
-	}
-	if err := d.loadRuntimes(); err != nil {
-		return nil, err
-	}
-
 	if isWindows {
 		// Note that permissions (0o700) are ignored on Windows; passing them to
 		// show intent only. We could consider using idtools.MkdirAndChown here
 		// to apply an ACL.
-		if err = os.Mkdir(filepath.Join(config.Root, "credentialspecs"), 0o700); err != nil && !errors.Is(err, os.ErrExist) {
+		if err = os.Mkdir(filepath.Join(configStore.Root, "credentialspecs"), 0o700); err != nil && !errors.Is(err, os.ErrExist) {
 			return nil, err
 		}
 	}
@@ -870,7 +899,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 	d.registryService = registryService
 	dlogger.RegisterPluginGetter(d.PluginStore)
 
-	metricsSockPath, err := d.listenMetricsSock()
+	metricsSockPath, err := d.listenMetricsSock(&configStore.Config)
 	if err != nil {
 		return nil, err
 	}
@@ -909,20 +938,20 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 		grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
 	}
 
-	if config.ContainerdAddr != "" {
-		d.containerdCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(config.ContainerdNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
+	if configStore.ContainerdAddr != "" {
+		d.containerdCli, err = containerd.New(configStore.ContainerdAddr, containerd.WithDefaultNamespace(configStore.ContainerdNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
 		if err != nil {
-			return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
+			return nil, errors.Wrapf(err, "failed to dial %q", configStore.ContainerdAddr)
 		}
 	}
 
 	createPluginExec := func(m *plugin.Manager) (plugin.Executor, error) {
 		var pluginCli *containerd.Client
 
-		if config.ContainerdAddr != "" {
-			pluginCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(config.ContainerdPluginNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
+		if configStore.ContainerdAddr != "" {
+			pluginCli, err = containerd.New(configStore.ContainerdAddr, containerd.WithDefaultNamespace(configStore.ContainerdPluginNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
 			if err != nil {
-				return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
+				return nil, errors.Wrapf(err, "failed to dial %q", configStore.ContainerdAddr)
 			}
 		}
 
@@ -931,22 +960,22 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 			shimOpts interface{}
 		)
 		if runtime.GOOS != "windows" {
-			shim, shimOpts, err = d.getRuntime(config.GetDefaultRuntimeName())
+			shim, shimOpts, err = runtimes.Get("")
 			if err != nil {
 				return nil, err
 			}
 		}
-		return pluginexec.New(ctx, getPluginExecRoot(config), pluginCli, config.ContainerdPluginNamespace, m, shim, shimOpts)
+		return pluginexec.New(ctx, getPluginExecRoot(&configStore.Config), pluginCli, configStore.ContainerdPluginNamespace, m, shim, shimOpts)
 	}
 
 	// Plugin system initialization should happen before restore. Do not change order.
 	d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{
-		Root:               filepath.Join(config.Root, "plugins"),
-		ExecRoot:           getPluginExecRoot(config),
+		Root:               filepath.Join(configStore.Root, "plugins"),
+		ExecRoot:           getPluginExecRoot(&configStore.Config),
 		Store:              d.PluginStore,
 		CreateExecutor:     createPluginExec,
 		RegistryService:    registryService,
-		LiveRestoreEnabled: config.LiveRestoreEnabled,
+		LiveRestoreEnabled: configStore.LiveRestoreEnabled,
 		LogPluginEvent:     d.LogPluginEvent, // todo: make private
 		AuthzMiddleware:    authzMiddleware,
 	})
@@ -954,11 +983,13 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 		return nil, errors.Wrap(err, "couldn't create plugin manager")
 	}
 
-	if err := d.setupDefaultLogConfig(); err != nil {
-		return nil, err
+	d.defaultLogConfig, err = defaultLogConfig(&configStore.Config)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to set log opts")
 	}
+	logrus.Debugf("Using default logging driver %s", d.defaultLogConfig.Type)
 
-	d.volumes, err = volumesservice.NewVolumeService(config.Root, d.PluginStore, rootIDs, d)
+	d.volumes, err = volumesservice.NewVolumeService(configStore.Root, d.PluginStore, rootIDs, d)
 	if err != nil {
 		return nil, err
 	}
@@ -971,11 +1002,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 	// at this point.
 	//
 	// TODO(thaJeztah) add a utility to only collect the CgroupDevicesEnabled information
-	if runtime.GOOS == "linux" && !userns.RunningInUserNS() && !getSysInfo(d).CgroupDevicesEnabled {
+	if runtime.GOOS == "linux" && !userns.RunningInUserNS() && !getSysInfo(&configStore.Config).CgroupDevicesEnabled {
 		return nil, errors.New("Devices cgroup isn't mounted")
 	}
 
-	d.id, err = loadOrCreateID(filepath.Join(config.Root, "engine-id"))
+	d.id, err = loadOrCreateID(filepath.Join(configStore.Root, "engine-id"))
 	if err != nil {
 		return nil, err
 	}
@@ -988,7 +1019,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 	d.statsCollector = d.newStatsCollector(1 * time.Second)
 
 	d.EventsService = events.New()
-	d.root = config.Root
+	d.root = configStore.Root
 	d.idMapping = idMapping
 
 	d.linkIndex = newLinkIndex()
@@ -1003,7 +1034,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 	} else if driverName != "" {
 		logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName)
 	} else {
-		driverName = config.GraphDriver
+		driverName = configStore.GraphDriver
 	}
 
 	if d.UsesSnapshotter() {
@@ -1019,26 +1050,26 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 
 		// Configure and validate the kernels security support. Note this is a Linux/FreeBSD
 		// operation only, so it is safe to pass *just* the runtime OS graphdriver.
-		if err := configureKernelSecuritySupport(config, driverName); err != nil {
+		if err := configureKernelSecuritySupport(&configStore.Config, driverName); err != nil {
 			return nil, err
 		}
 		d.imageService = ctrd.NewService(ctrd.ImageServiceConfig{
 			Client:        d.containerdCli,
 			Containers:    d.containers,
 			Snapshotter:   driverName,
-			HostsProvider: d,
+			RegistryHosts: d.RegistryHosts,
 			Registry:      d.registryService,
 			EventsService: d.EventsService,
 		})
 	} else {
 		layerStore, err := layer.NewStoreFromOptions(layer.StoreOptions{
-			Root:                      config.Root,
-			MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
+			Root:                      configStore.Root,
+			MetadataStorePathTemplate: filepath.Join(configStore.Root, "image", "%s", "layerdb"),
 			GraphDriver:               driverName,
-			GraphDriverOptions:        config.GraphOptions,
+			GraphDriverOptions:        configStore.GraphOptions,
 			IDMapping:                 idMapping,
 			PluginGetter:              d.PluginStore,
-			ExperimentalEnabled:       config.Experimental,
+			ExperimentalEnabled:       configStore.Experimental,
 		})
 		if err != nil {
 			return nil, err
@@ -1046,11 +1077,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 
 		// Configure and validate the kernels security support. Note this is a Linux/FreeBSD
 		// operation only, so it is safe to pass *just* the runtime OS graphdriver.
-		if err := configureKernelSecuritySupport(config, layerStore.DriverName()); err != nil {
+		if err := configureKernelSecuritySupport(&configStore.Config, layerStore.DriverName()); err != nil {
 			return nil, err
 		}
 
-		imageRoot := filepath.Join(config.Root, "image", layerStore.DriverName())
+		imageRoot := filepath.Join(configStore.Root, "image", layerStore.DriverName())
 		ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
 		if err != nil {
 			return nil, err
@@ -1124,11 +1155,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 
 	go d.execCommandGC()
 
-	if err := d.initLibcontainerd(ctx); err != nil {
+	if err := d.initLibcontainerd(ctx, &configStore.Config); err != nil {
 		return nil, err
 	}
 
-	if err := d.restore(); err != nil {
+	if err := d.restore(configStore); err != nil {
 		return nil, err
 	}
 	close(d.startupDone)
@@ -1190,7 +1221,11 @@ func (daemon *Daemon) shutdownContainer(c *container.Container) error {
 // A negative (-1) timeout means "indefinitely", which means that containers
 // are not forcibly killed, and the daemon shuts down after all containers exit.
 func (daemon *Daemon) ShutdownTimeout() int {
-	shutdownTimeout := daemon.configStore.ShutdownTimeout
+	return daemon.shutdownTimeout(&daemon.config().Config)
+}
+
+func (daemon *Daemon) shutdownTimeout(cfg *config.Config) int {
+	shutdownTimeout := cfg.ShutdownTimeout
 	if shutdownTimeout < 0 {
 		return -1
 	}
@@ -1217,7 +1252,8 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
 	// Keep mounts and networking running on daemon shutdown if
 	// we are to keep containers running and restore them.
 
-	if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil {
+	cfg := &daemon.config().Config
+	if cfg.LiveRestoreEnabled && daemon.containers != nil {
 		// check if there are any running containers, if none we should do some cleanup
 		if ls, err := daemon.Containers(ctx, &types.ContainerListOptions{}); len(ls) != 0 || err != nil {
 			// metrics plugins still need some cleanup
@@ -1227,8 +1263,8 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
 	}
 
 	if daemon.containers != nil {
-		logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", daemon.configStore.ShutdownTimeout)
-		logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.ShutdownTimeout())
+		logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", cfg.ShutdownTimeout)
+		logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.shutdownTimeout(cfg))
 		daemon.containers.ApplyAll(func(c *container.Container) {
 			if !c.IsRunning() {
 				return
@@ -1282,7 +1318,7 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
 		daemon.mdDB.Close()
 	}
 
-	return daemon.cleanupMounts()
+	return daemon.cleanupMounts(cfg)
 }
 
 // Mount sets container.BaseFS
@@ -1363,15 +1399,10 @@ func isBridgeNetworkDisabled(conf *config.Config) bool {
 	return conf.BridgeConfig.Iface == config.DisableNetworkBridge
 }
 
-func (daemon *Daemon) networkOptions(pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
-	options := []nwconfig.Option{}
-	if daemon.configStore == nil {
-		return options, nil
-	}
-	conf := daemon.configStore
+func (daemon *Daemon) networkOptions(conf *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
 	dd := runconfig.DefaultDaemonNetworkMode()
 
-	options = []nwconfig.Option{
+	options := []nwconfig.Option{
 		nwconfig.OptionDataDir(conf.Root),
 		nwconfig.OptionExecRoot(conf.GetExecRoot()),
 		nwconfig.OptionDefaultDriver(string(dd)),
@@ -1503,7 +1534,7 @@ func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo {
 		// We check if sysInfo is not set here, to allow some test to
 		// override the actual sysInfo.
 		if daemon.sysInfo == nil {
-			daemon.sysInfo = getSysInfo(daemon)
+			daemon.sysInfo = getSysInfo(&daemon.config().Config)
 		}
 	})
 

+ 7 - 7
daemon/daemon_linux.go

@@ -76,7 +76,7 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u
 }
 
 // cleanupMounts umounts used by container resources and the daemon root mount
-func (daemon *Daemon) cleanupMounts() error {
+func (daemon *Daemon) cleanupMounts(cfg *config.Config) error {
 	if err := daemon.cleanupMountsByID(""); err != nil {
 		return err
 	}
@@ -100,7 +100,7 @@ func (daemon *Daemon) cleanupMounts() error {
 		return nil
 	}
 
-	unmountFile := getUnmountOnShutdownPath(daemon.configStore)
+	unmountFile := getUnmountOnShutdownPath(cfg)
 	if _, err := os.Stat(unmountFile); err != nil {
 		return nil
 	}
@@ -239,18 +239,18 @@ func kernelSupportsRecursivelyReadOnly() error {
 	return kernelSupportsRROErr
 }
 
-func (daemon *Daemon) supportsRecursivelyReadOnly(runtime string) error {
+func supportsRecursivelyReadOnly(cfg *configStore, runtime string) error {
 	if err := kernelSupportsRecursivelyReadOnly(); err != nil {
 		return fmt.Errorf("rro is not supported: %w (kernel is older than 5.12?)", err)
 	}
 	if runtime == "" {
-		runtime = daemon.configStore.GetDefaultRuntimeName()
+		runtime = cfg.Runtimes.Default
 	}
-	rt := daemon.configStore.GetRuntime(runtime)
-	if rt.Features == nil {
+	features := cfg.Runtimes.Features(runtime)
+	if features == nil {
 		return fmt.Errorf("rro is not supported by runtime %q: OCI features struct is not available", runtime)
 	}
-	for _, s := range rt.Features.MountOptions {
+	for _, s := range features.MountOptions {
 		if s == "rro" {
 			return nil
 		}

+ 13 - 13
daemon/daemon_linux_test.go

@@ -10,7 +10,6 @@ import (
 	"testing"
 
 	containertypes "github.com/docker/docker/api/types/container"
-	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/libnetwork/testutils"
 	"github.com/docker/docker/libnetwork/types"
 	"github.com/google/go-cmp/cmp/cmpopts"
@@ -178,7 +177,7 @@ func TestNotCleanupMounts(t *testing.T) {
 func TestValidateContainerIsolationLinux(t *testing.T) {
 	d := Daemon{}
 
-	_, err := d.verifyContainerSettings(&containertypes.HostConfig{Isolation: containertypes.IsolationHyperV}, nil, false)
+	_, err := d.verifyContainerSettings(&configStore{}, &containertypes.HostConfig{Isolation: containertypes.IsolationHyperV}, nil, false)
 	assert.Check(t, is.Error(err, "invalid isolation 'hyperv' on linux"))
 }
 
@@ -250,7 +249,7 @@ func TestRootMountCleanup(t *testing.T) {
 	testRoot, err := os.MkdirTemp("", t.Name())
 	assert.NilError(t, err)
 	defer os.RemoveAll(testRoot)
-	cfg := &config.Config{}
+	cfg := &configStore{}
 
 	err = mount.MakePrivate(testRoot)
 	assert.NilError(t, err)
@@ -264,17 +263,18 @@ func TestRootMountCleanup(t *testing.T) {
 	err = os.Mkdir(cfg.Root, 0755)
 	assert.NilError(t, err)
 
-	d := &Daemon{configStore: cfg, root: cfg.Root}
-	unmountFile := getUnmountOnShutdownPath(cfg)
+	d := &Daemon{root: cfg.Root}
+	d.configStore.Store(cfg)
+	unmountFile := getUnmountOnShutdownPath(&cfg.Config)
 
 	t.Run("regular dir no mountpoint", func(t *testing.T) {
-		err = setupDaemonRootPropagation(cfg)
+		err = setupDaemonRootPropagation(&cfg.Config)
 		assert.NilError(t, err)
 		_, err = os.Stat(unmountFile)
 		assert.NilError(t, err)
 		checkMounted(t, cfg.Root, true)
 
-		assert.Assert(t, d.cleanupMounts())
+		assert.Assert(t, d.cleanupMounts(&cfg.Config))
 		checkMounted(t, cfg.Root, false)
 
 		_, err = os.Stat(unmountFile)
@@ -286,13 +286,13 @@ func TestRootMountCleanup(t *testing.T) {
 		assert.NilError(t, err)
 		defer mount.Unmount(cfg.Root)
 
-		err = setupDaemonRootPropagation(cfg)
+		err = setupDaemonRootPropagation(&cfg.Config)
 		assert.NilError(t, err)
 		assert.Check(t, ensureShared(cfg.Root))
 
 		_, err = os.Stat(unmountFile)
 		assert.Assert(t, os.IsNotExist(err))
-		assert.Assert(t, d.cleanupMounts())
+		assert.Assert(t, d.cleanupMounts(&cfg.Config))
 		checkMounted(t, cfg.Root, true)
 	})
 
@@ -302,14 +302,14 @@ func TestRootMountCleanup(t *testing.T) {
 		assert.NilError(t, err)
 		defer mount.Unmount(cfg.Root)
 
-		err = setupDaemonRootPropagation(cfg)
+		err = setupDaemonRootPropagation(&cfg.Config)
 		assert.NilError(t, err)
 
 		if _, err := os.Stat(unmountFile); err == nil {
 			t.Fatal("unmount file should not exist")
 		}
 
-		assert.Assert(t, d.cleanupMounts())
+		assert.Assert(t, d.cleanupMounts(&cfg.Config))
 		checkMounted(t, cfg.Root, true)
 		assert.Assert(t, mount.Unmount(cfg.Root))
 	})
@@ -322,13 +322,13 @@ func TestRootMountCleanup(t *testing.T) {
 		err = os.WriteFile(unmountFile, nil, 0644)
 		assert.NilError(t, err)
 
-		err = setupDaemonRootPropagation(cfg)
+		err = setupDaemonRootPropagation(&cfg.Config)
 		assert.NilError(t, err)
 
 		_, err = os.Stat(unmountFile)
 		assert.Check(t, os.IsNotExist(err), err)
 		checkMounted(t, cfg.Root, false)
-		assert.Assert(t, d.cleanupMounts())
+		assert.Assert(t, d.cleanupMounts(&cfg.Config))
 	})
 }
 

+ 1 - 1
daemon/daemon_test.go

@@ -300,7 +300,7 @@ func TestMerge(t *testing.T) {
 func TestValidateContainerIsolation(t *testing.T) {
 	d := Daemon{}
 
-	_, err := d.verifyContainerSettings(&containertypes.HostConfig{Isolation: containertypes.Isolation("invalid")}, nil, false)
+	_, err := d.verifyContainerSettings(&configStore{}, &containertypes.HostConfig{Isolation: containertypes.Isolation("invalid")}, nil, false)
 	assert.Check(t, is.Error(err, "invalid isolation 'invalid' on "+runtime.GOOS))
 }
 

+ 33 - 42
daemon/daemon_unix.go

@@ -189,8 +189,8 @@ func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeight
 	return blkioWeightDevices, nil
 }
 
-func (daemon *Daemon) parseSecurityOpt(securityOptions *container.SecurityOptions, hostConfig *containertypes.HostConfig) error {
-	securityOptions.NoNewPrivileges = daemon.configStore.NoNewPrivileges
+func (daemon *Daemon) parseSecurityOpt(cfg *config.Config, securityOptions *container.SecurityOptions, hostConfig *containertypes.HostConfig) error {
+	securityOptions.NoNewPrivileges = cfg.NoNewPrivileges
 	return parseSecurityOpt(securityOptions, hostConfig)
 }
 
@@ -299,7 +299,7 @@ func adjustParallelLimit(n int, limit int) int {
 
 // adaptContainerSettings is called during container creation to modify any
 // settings necessary in the HostConfig structure.
-func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
+func (daemon *Daemon) adaptContainerSettings(daemonCfg *config.Config, hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
 	if adjustCPUShares && hostConfig.CPUShares > 0 {
 		// Handle unsupported CPUShares
 		if hostConfig.CPUShares < linuxMinCPUShares {
@@ -316,15 +316,15 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf
 	}
 	if hostConfig.ShmSize == 0 {
 		hostConfig.ShmSize = config.DefaultShmSize
-		if daemon.configStore != nil {
-			hostConfig.ShmSize = int64(daemon.configStore.ShmSize)
+		if daemonCfg != nil {
+			hostConfig.ShmSize = int64(daemonCfg.ShmSize)
 		}
 	}
 	// Set default IPC mode, if unset for container
 	if hostConfig.IpcMode.IsEmpty() {
 		m := config.DefaultIpcMode
-		if daemon.configStore != nil {
-			m = containertypes.IpcMode(daemon.configStore.IpcMode)
+		if daemonCfg != nil {
+			m = containertypes.IpcMode(daemonCfg.IpcMode)
 		}
 		hostConfig.IpcMode = m
 	}
@@ -340,8 +340,8 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf
 			if cgroups.Mode() == cgroups.Unified {
 				m = containertypes.CgroupnsModePrivate
 			}
-			if daemon.configStore != nil {
-				m = containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode)
+			if daemonCfg != nil {
+				m = containertypes.CgroupnsMode(daemonCfg.CgroupNamespaceMode)
 			}
 			hostConfig.CgroupnsMode = m
 		}
@@ -566,11 +566,11 @@ func verifyPlatformContainerResources(resources *containertypes.Resources, sysIn
 	return warnings, nil
 }
 
-func (daemon *Daemon) getCgroupDriver() string {
-	if UsingSystemd(daemon.configStore) {
+func cgroupDriver(cfg *config.Config) string {
+	if UsingSystemd(cfg) {
 		return cgroupSystemdDriver
 	}
-	if daemon.Rootless() {
+	if cfg.Rootless {
 		return cgroupNoneDriver
 	}
 	return cgroupFsDriver
@@ -639,7 +639,7 @@ func isRunningSystemd() bool {
 
 // verifyPlatformContainerSettings performs platform-specific validation of the
 // hostconfig and config structures.
-func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
+func verifyPlatformContainerSettings(daemon *Daemon, daemonCfg *configStore, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
 	if hostConfig == nil {
 		return nil, nil
 	}
@@ -680,7 +680,7 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.
 	}
 
 	// check for various conflicting options with user namespaces
-	if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() {
+	if daemonCfg.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() {
 		if hostConfig.Privileged {
 			return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces.  You must run the container in the host namespace when running privileged mode")
 		}
@@ -691,17 +691,17 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.
 			return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled")
 		}
 	}
-	if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) {
+	if hostConfig.CgroupParent != "" && UsingSystemd(&daemonCfg.Config) {
 		// CgroupParent for systemd cgroup should be named as "xxx.slice"
 		if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") {
 			return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
 		}
 	}
 	if hostConfig.Runtime == "" {
-		hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName()
+		hostConfig.Runtime = daemonCfg.Runtimes.Default
 	}
 
-	if _, _, err := daemon.getRuntime(hostConfig.Runtime); err != nil {
+	if _, _, err := daemonCfg.Runtimes.Get(hostConfig.Runtime); err != nil {
 		return warnings, err
 	}
 
@@ -754,15 +754,6 @@ func verifyDaemonSettings(conf *config.Config) error {
 	if conf.Rootless && UsingSystemd(conf) && cgroups.Mode() != cgroups.Unified {
 		return fmt.Errorf("exec-opt native.cgroupdriver=systemd requires cgroup v2 for rootless mode")
 	}
-
-	configureRuntimes(conf)
-	if rtName := conf.GetDefaultRuntimeName(); rtName != "" {
-		if conf.GetRuntime(rtName) == nil {
-			if !config.IsPermissibleC8dRuntimeName(rtName) {
-				return fmt.Errorf("specified default runtime '%s' does not exist", rtName)
-			}
-		}
-	}
 	return nil
 }
 
@@ -837,8 +828,8 @@ func configureKernelSecuritySupport(config *config.Config, driverName string) er
 // initNetworkController initializes the libnetwork controller and configures
 // network settings. If there's active sandboxes, configuration changes will not
 // take effect.
-func (daemon *Daemon) initNetworkController(activeSandboxes map[string]interface{}) error {
-	netOptions, err := daemon.networkOptions(daemon.PluginStore, activeSandboxes)
+func (daemon *Daemon) initNetworkController(cfg *config.Config, activeSandboxes map[string]interface{}) error {
+	netOptions, err := daemon.networkOptions(cfg, daemon.PluginStore, activeSandboxes)
 	if err != nil {
 		return err
 	}
@@ -850,12 +841,12 @@ func (daemon *Daemon) initNetworkController(activeSandboxes map[string]interface
 
 	if len(activeSandboxes) > 0 {
 		logrus.Info("there are running containers, updated network configuration will not take affect")
-	} else if err := configureNetworking(daemon.netController, daemon.configStore); err != nil {
+	} else if err := configureNetworking(daemon.netController, cfg); err != nil {
 		return err
 	}
 
 	// Set HostGatewayIP to the default bridge's IP if it is empty
-	setHostGatewayIP(daemon.netController, daemon.configStore)
+	setHostGatewayIP(daemon.netController, cfg)
 	return nil
 }
 
@@ -1410,7 +1401,7 @@ func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container
 
 // setDefaultIsolation determines the default isolation mode for the
 // daemon to run in. This is only applicable on Windows
-func (daemon *Daemon) setDefaultIsolation() error {
+func (daemon *Daemon) setDefaultIsolation(*config.Config) error {
 	return nil
 }
 
@@ -1443,14 +1434,14 @@ func setMayDetachMounts() error {
 	return err
 }
 
-func (daemon *Daemon) initCPURtController(mnt, path string) error {
+func (daemon *Daemon) initCPURtController(cfg *config.Config, mnt, path string) error {
 	if path == "/" || path == "." {
 		return nil
 	}
 
 	// Recursively create cgroup to ensure that the system and all parent cgroups have values set
 	// for the period and runtime as this limits what the children can be set to.
-	if err := daemon.initCPURtController(mnt, filepath.Dir(path)); err != nil {
+	if err := daemon.initCPURtController(cfg, mnt, filepath.Dir(path)); err != nil {
 		return err
 	}
 
@@ -1458,10 +1449,10 @@ func (daemon *Daemon) initCPURtController(mnt, path string) error {
 	if err := os.MkdirAll(path, 0755); err != nil {
 		return err
 	}
-	if err := maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
+	if err := maybeCreateCPURealTimeFile(cfg.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
 		return err
 	}
-	return maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path)
+	return maybeCreateCPURealTimeFile(cfg.CPURealtimeRuntime, "cpu.rt_runtime_us", path)
 }
 
 func maybeCreateCPURealTimeFile(configValue int64, file string, path string) error {
@@ -1471,8 +1462,8 @@ func maybeCreateCPURealTimeFile(configValue int64, file string, path string) err
 	return os.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700)
 }
 
-func (daemon *Daemon) setupSeccompProfile() error {
-	switch profile := daemon.configStore.SeccompProfile; profile {
+func (daemon *Daemon) setupSeccompProfile(cfg *config.Config) error {
+	switch profile := cfg.SeccompProfile; profile {
 	case "", config.SeccompProfileDefault:
 		daemon.seccompProfilePath = config.SeccompProfileDefault
 	case config.SeccompProfileUnconfined:
@@ -1488,9 +1479,9 @@ func (daemon *Daemon) setupSeccompProfile() error {
 	return nil
 }
 
-func getSysInfo(daemon *Daemon) *sysinfo.SysInfo {
+func getSysInfo(cfg *config.Config) *sysinfo.SysInfo {
 	var siOpts []sysinfo.Opt
-	if daemon.getCgroupDriver() == cgroupSystemdDriver {
+	if cgroupDriver(cfg) == cgroupSystemdDriver {
 		if euid := os.Getenv("ROOTLESSKIT_PARENT_EUID"); euid != "" {
 			siOpts = append(siOpts, sysinfo.WithCgroup2GroupPath("/user.slice/user-"+euid+".slice"))
 		}
@@ -1498,13 +1489,13 @@ func getSysInfo(daemon *Daemon) *sysinfo.SysInfo {
 	return sysinfo.New(siOpts...)
 }
 
-func (daemon *Daemon) initLibcontainerd(ctx context.Context) error {
+func (daemon *Daemon) initLibcontainerd(ctx context.Context, cfg *config.Config) error {
 	var err error
 	daemon.containerd, err = remote.NewClient(
 		ctx,
 		daemon.containerdCli,
-		filepath.Join(daemon.configStore.ExecRoot, "containerd"),
-		daemon.configStore.ContainerdNamespace,
+		filepath.Join(cfg.ExecRoot, "containerd"),
+		cfg.ContainerdNamespace,
 		daemon,
 	)
 	return err

+ 16 - 14
daemon/daemon_unix_test.go

@@ -68,30 +68,31 @@ func TestAdjustCPUShares(t *testing.T) {
 		repository: tmp,
 		root:       tmp,
 	}
+	cfg := &config.Config{}
 	muteLogs()
 
 	hostConfig := &containertypes.HostConfig{
 		Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1},
 	}
-	daemon.adaptContainerSettings(hostConfig, true)
+	daemon.adaptContainerSettings(cfg, hostConfig, true)
 	if hostConfig.CPUShares != linuxMinCPUShares {
 		t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares)
 	}
 
 	hostConfig.CPUShares = linuxMaxCPUShares + 1
-	daemon.adaptContainerSettings(hostConfig, true)
+	daemon.adaptContainerSettings(cfg, hostConfig, true)
 	if hostConfig.CPUShares != linuxMaxCPUShares {
 		t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares)
 	}
 
 	hostConfig.CPUShares = 0
-	daemon.adaptContainerSettings(hostConfig, true)
+	daemon.adaptContainerSettings(cfg, hostConfig, true)
 	if hostConfig.CPUShares != 0 {
 		t.Error("Expected CPUShares to be unchanged")
 	}
 
 	hostConfig.CPUShares = 1024
-	daemon.adaptContainerSettings(hostConfig, true)
+	daemon.adaptContainerSettings(cfg, hostConfig, true)
 	if hostConfig.CPUShares != 1024 {
 		t.Error("Expected CPUShares to be unchanged")
 	}
@@ -108,29 +109,30 @@ func TestAdjustCPUSharesNoAdjustment(t *testing.T) {
 		repository: tmp,
 		root:       tmp,
 	}
+	cfg := &config.Config{}
 
 	hostConfig := &containertypes.HostConfig{
 		Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1},
 	}
-	daemon.adaptContainerSettings(hostConfig, false)
+	daemon.adaptContainerSettings(cfg, hostConfig, false)
 	if hostConfig.CPUShares != linuxMinCPUShares-1 {
 		t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares-1)
 	}
 
 	hostConfig.CPUShares = linuxMaxCPUShares + 1
-	daemon.adaptContainerSettings(hostConfig, false)
+	daemon.adaptContainerSettings(cfg, hostConfig, false)
 	if hostConfig.CPUShares != linuxMaxCPUShares+1 {
 		t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares+1)
 	}
 
 	hostConfig.CPUShares = 0
-	daemon.adaptContainerSettings(hostConfig, false)
+	daemon.adaptContainerSettings(cfg, hostConfig, false)
 	if hostConfig.CPUShares != 0 {
 		t.Error("Expected CPUShares to be unchanged")
 	}
 
 	hostConfig.CPUShares = 1024
-	daemon.adaptContainerSettings(hostConfig, false)
+	daemon.adaptContainerSettings(cfg, hostConfig, false)
 	if hostConfig.CPUShares != 1024 {
 		t.Error("Expected CPUShares to be unchanged")
 	}
@@ -243,16 +245,16 @@ func TestParseSecurityOpt(t *testing.T) {
 }
 
 func TestParseNNPSecurityOptions(t *testing.T) {
-	daemon := &Daemon{
-		configStore: &config.Config{NoNewPrivileges: true},
-	}
+	daemonCfg := &configStore{Config: config.Config{NoNewPrivileges: true}}
+	daemon := &Daemon{}
+	daemon.configStore.Store(daemonCfg)
 	opts := &container.SecurityOptions{}
 	cfg := &containertypes.HostConfig{}
 
 	// test NNP when "daemon:true" and "no-new-privileges=false""
 	cfg.SecurityOpt = []string{"no-new-privileges=false"}
 
-	if err := daemon.parseSecurityOpt(opts, cfg); err != nil {
+	if err := daemon.parseSecurityOpt(&daemonCfg.Config, opts, cfg); err != nil {
 		t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
 	}
 	if opts.NoNewPrivileges {
@@ -260,10 +262,10 @@ func TestParseNNPSecurityOptions(t *testing.T) {
 	}
 
 	// test NNP when "daemon:false" and "no-new-privileges=true""
-	daemon.configStore.NoNewPrivileges = false
+	daemonCfg.NoNewPrivileges = false
 	cfg.SecurityOpt = []string{"no-new-privileges=true"}
 
-	if err := daemon.parseSecurityOpt(opts, cfg); err != nil {
+	if err := daemon.parseSecurityOpt(&daemonCfg.Config, opts, cfg); err != nil {
 		t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
 	}
 	if !opts.NoNewPrivileges {

+ 0 - 4
daemon/daemon_unsupported.go

@@ -17,7 +17,3 @@ func setupResolvConf(_ *interface{}) {}
 func getSysInfo(_ *Daemon) *sysinfo.SysInfo {
 	return sysinfo.New()
 }
-
-func (daemon *Daemon) supportsRecursivelyReadOnly(_ string) error {
-	return nil
-}

+ 20 - 28
daemon/daemon_windows.go

@@ -55,7 +55,7 @@ func getPluginExecRoot(cfg *config.Config) string {
 	return filepath.Join(cfg.Root, "plugins")
 }
 
-func (daemon *Daemon) parseSecurityOpt(securityOptions *container.SecurityOptions, hostConfig *containertypes.HostConfig) error {
+func (daemon *Daemon) parseSecurityOpt(daemonCfg *config.Config, securityOptions *container.SecurityOptions, hostConfig *containertypes.HostConfig) error {
 	return nil
 }
 
@@ -65,7 +65,7 @@ func setupInitLayer(idMapping idtools.IdentityMapping) func(string) error {
 
 // adaptContainerSettings is called during container creation to modify any
 // settings necessary in the HostConfig structure.
-func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
+func (daemon *Daemon) adaptContainerSettings(daemonCfg *config.Config, hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
 	return nil
 }
 
@@ -171,7 +171,7 @@ func verifyPlatformContainerResources(resources *containertypes.Resources, isHyp
 
 // verifyPlatformContainerSettings performs platform-specific validation of the
 // hostconfig and config structures.
-func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
+func verifyPlatformContainerSettings(daemon *Daemon, daemonCfg *configStore, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
 	if hostConfig == nil {
 		return nil, nil
 	}
@@ -232,8 +232,8 @@ func configureMaxThreads(config *config.Config) error {
 	return nil
 }
 
-func (daemon *Daemon) initNetworkController(activeSandboxes map[string]interface{}) error {
-	netOptions, err := daemon.networkOptions(nil, nil)
+func (daemon *Daemon) initNetworkController(daemonCfg *config.Config, activeSandboxes map[string]interface{}) error {
+	netOptions, err := daemon.networkOptions(daemonCfg, nil, nil)
 	if err != nil {
 		return err
 	}
@@ -396,9 +396,9 @@ func (daemon *Daemon) initNetworkController(activeSandboxes map[string]interface
 		}
 	}
 
-	if !daemon.configStore.DisableBridge {
+	if !daemonCfg.DisableBridge {
 		// Initialize default driver "bridge"
-		if err := initBridgeDriver(daemon.netController, daemon.configStore); err != nil {
+		if err := initBridgeDriver(daemon.netController, daemonCfg); err != nil {
 			return err
 		}
 	}
@@ -452,7 +452,7 @@ func (daemon *Daemon) cleanupMountsByID(in string) error {
 	return nil
 }
 
-func (daemon *Daemon) cleanupMounts() error {
+func (daemon *Daemon) cleanupMounts(*config.Config) error {
 	return nil
 }
 
@@ -512,7 +512,7 @@ func driverOptions(_ *config.Config) nwconfig.Option {
 
 // setDefaultIsolation determine the default isolation mode for the
 // daemon to run in. This is only applicable on Windows
-func (daemon *Daemon) setDefaultIsolation() error {
+func (daemon *Daemon) setDefaultIsolation(config *config.Config) error {
 	// On client SKUs, default to Hyper-V. @engine maintainers. This
 	// should not be removed. Ping Microsoft folks is there are PRs to
 	// to change this.
@@ -521,7 +521,7 @@ func (daemon *Daemon) setDefaultIsolation() error {
 	} else {
 		daemon.defaultIsolation = containertypes.IsolationProcess
 	}
-	for _, option := range daemon.configStore.ExecOptions {
+	for _, option := range config.ExecOptions {
 		key, val, err := parsers.ParseKeyValueOpt(option)
 		if err != nil {
 			return err
@@ -552,26 +552,22 @@ func setMayDetachMounts() error {
 	return nil
 }
 
-func (daemon *Daemon) setupSeccompProfile() error {
-	return nil
-}
-
-func (daemon *Daemon) loadRuntimes() error {
+func (daemon *Daemon) setupSeccompProfile(*config.Config) error {
 	return nil
 }
 
 func setupResolvConf(config *config.Config) {}
 
-func getSysInfo(daemon *Daemon) *sysinfo.SysInfo {
+func getSysInfo(*config.Config) *sysinfo.SysInfo {
 	return sysinfo.New()
 }
 
-func (daemon *Daemon) initLibcontainerd(ctx context.Context) error {
+func (daemon *Daemon) initLibcontainerd(ctx context.Context, cfg *config.Config) error {
 	var err error
 
-	rt := daemon.configStore.GetDefaultRuntimeName()
+	rt := cfg.DefaultRuntime
 	if rt == "" {
-		if daemon.configStore.ContainerdAddr == "" {
+		if cfg.ContainerdAddr == "" {
 			rt = windowsV1RuntimeName
 		} else {
 			rt = windowsV2RuntimeName
@@ -583,19 +579,19 @@ func (daemon *Daemon) initLibcontainerd(ctx context.Context) error {
 		daemon.containerd, err = local.NewClient(
 			ctx,
 			daemon.containerdCli,
-			filepath.Join(daemon.configStore.ExecRoot, "containerd"),
-			daemon.configStore.ContainerdNamespace,
+			filepath.Join(cfg.ExecRoot, "containerd"),
+			cfg.ContainerdNamespace,
 			daemon,
 		)
 	case windowsV2RuntimeName:
-		if daemon.configStore.ContainerdAddr == "" {
+		if cfg.ContainerdAddr == "" {
 			return fmt.Errorf("cannot use the specified runtime %q without containerd", rt)
 		}
 		daemon.containerd, err = remote.NewClient(
 			ctx,
 			daemon.containerdCli,
-			filepath.Join(daemon.configStore.ExecRoot, "containerd"),
-			daemon.configStore.ContainerdNamespace,
+			filepath.Join(cfg.ExecRoot, "containerd"),
+			cfg.ContainerdNamespace,
 			daemon,
 		)
 	default:
@@ -604,7 +600,3 @@ func (daemon *Daemon) initLibcontainerd(ctx context.Context) error {
 
 	return err
 }
-
-func (daemon *Daemon) supportsRecursivelyReadOnly(_ string) error {
-	return nil
-}

+ 10 - 5
daemon/delete.go

@@ -12,6 +12,7 @@ import (
 	"github.com/docker/docker/api/types"
 	containertypes "github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/container"
+	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/pkg/containerfs"
 	"github.com/opencontainers/selinux/go-selinux"
@@ -24,6 +25,10 @@ import (
 // fails. If the remove succeeds, the container name is released, and
 // network links are removed.
 func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error {
+	return daemon.containerRm(&daemon.config().Config, name, config)
+}
+
+func (daemon *Daemon) containerRm(cfg *config.Config, name string, opts *types.ContainerRmConfig) error {
 	start := time.Now()
 	ctr, err := daemon.GetContainer(name)
 	if err != nil {
@@ -42,17 +47,17 @@ func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig)
 		return nil
 	}
 
-	if config.RemoveLink {
-		return daemon.rmLink(ctr, name)
+	if opts.RemoveLink {
+		return daemon.rmLink(cfg, ctr, name)
 	}
 
-	err = daemon.cleanupContainer(ctr, *config)
+	err = daemon.cleanupContainer(ctr, *opts)
 	containerActions.WithValues("delete").UpdateSince(start)
 
 	return err
 }
 
-func (daemon *Daemon) rmLink(container *container.Container, name string) error {
+func (daemon *Daemon) rmLink(cfg *config.Config, container *container.Container, name string) error {
 	if name[0] != '/' {
 		name = "/" + name
 	}
@@ -71,7 +76,7 @@ func (daemon *Daemon) rmLink(container *container.Container, name string) error
 	parentContainer, _ := daemon.GetContainer(pe)
 	if parentContainer != nil {
 		daemon.linkIndex.unlink(name, container, parentContainer)
-		if err := daemon.updateNetwork(parentContainer); err != nil {
+		if err := daemon.updateNetwork(cfg, parentContainer); err != nil {
 			logrus.Debugf("Could not update network to remove link %s: %v", n, err)
 		}
 	}

+ 2 - 1
daemon/exec.go

@@ -252,7 +252,8 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, optio
 		p.Cwd = "/"
 	}
 
-	if err := daemon.execSetPlatformOpt(ctx, ec, p); err != nil {
+	daemonCfg := &daemon.config().Config
+	if err := daemon.execSetPlatformOpt(ctx, daemonCfg, ec, p); err != nil {
 		return err
 	}
 

+ 3 - 2
daemon/exec_linux.go

@@ -9,6 +9,7 @@ import (
 	coci "github.com/containerd/containerd/oci"
 	"github.com/containerd/containerd/pkg/apparmor"
 	"github.com/docker/docker/container"
+	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/oci/caps"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 )
@@ -50,7 +51,7 @@ func getUserFromContainerd(ctx context.Context, containerdCli *containerd.Client
 	return spec.Process.User, nil
 }
 
-func (daemon *Daemon) execSetPlatformOpt(ctx context.Context, ec *container.ExecConfig, p *specs.Process) error {
+func (daemon *Daemon) execSetPlatformOpt(ctx context.Context, daemonCfg *config.Config, ec *container.ExecConfig, p *specs.Process) error {
 	if len(ec.User) > 0 {
 		var err error
 		if daemon.UsesSnapshotter() {
@@ -100,5 +101,5 @@ func (daemon *Daemon) execSetPlatformOpt(ctx context.Context, ec *container.Exec
 		p.ApparmorProfile = appArmorProfile
 	}
 	s := &specs.Spec{Process: p}
-	return WithRlimits(daemon, ec.Container)(ctx, nil, nil, s)
+	return withRlimits(daemon, daemonCfg, ec.Container)(ctx, nil, nil, s)
 }

+ 4 - 3
daemon/exec_linux_test.go

@@ -9,7 +9,6 @@ import (
 	"github.com/containerd/containerd/pkg/apparmor"
 	containertypes "github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/container"
-	"github.com/docker/docker/daemon/config"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"gotest.tools/v3/assert"
 )
@@ -50,7 +49,9 @@ func TestExecSetPlatformOptAppArmor(t *testing.T) {
 		},
 	}
 
-	d := &Daemon{configStore: &config.Config{}}
+	cfg := &configStore{}
+	d := &Daemon{}
+	d.configStore.Store(cfg)
 
 	// Currently, `docker exec --privileged` inherits the Privileged configuration
 	// of the container, and does not disable AppArmor.
@@ -81,7 +82,7 @@ func TestExecSetPlatformOptAppArmor(t *testing.T) {
 				ec := &container.ExecConfig{Container: c, Privileged: execPrivileged}
 				p := &specs.Process{}
 
-				err := d.execSetPlatformOpt(context.Background(), ec, p)
+				err := d.execSetPlatformOpt(context.Background(), &cfg.Config, ec, p)
 				assert.NilError(t, err)
 				assert.Equal(t, p.ApparmorProfile, tc.expectedProfile)
 			})

+ 2 - 1
daemon/exec_windows.go

@@ -4,10 +4,11 @@ import (
 	"context"
 
 	"github.com/docker/docker/container"
+	"github.com/docker/docker/daemon/config"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 )
 
-func (daemon *Daemon) execSetPlatformOpt(ctx context.Context, ec *container.ExecConfig, p *specs.Process) error {
+func (daemon *Daemon) execSetPlatformOpt(ctx context.Context, daemonCfg *config.Config, ec *container.ExecConfig, p *specs.Process) error {
 	if ec.Container.OS == "windows" {
 		p.User.Username = ec.User
 	}

+ 26 - 25
daemon/info.go

@@ -30,6 +30,7 @@ func (daemon *Daemon) SystemInfo() *types.Info {
 	defer metrics.StartTimer(hostInfoFunctions.WithValues("system_info"))()
 
 	sysInfo := daemon.RawSysInfo()
+	cfg := daemon.config()
 
 	v := &types.Info{
 		ID:                 daemon.id,
@@ -50,27 +51,27 @@ func (daemon *Daemon) SystemInfo() *types.Info {
 		NCPU:               sysinfo.NumCPU(),
 		MemTotal:           memInfo().MemTotal,
 		GenericResources:   daemon.genericResources,
-		DockerRootDir:      daemon.configStore.Root,
-		Labels:             daemon.configStore.Labels,
-		ExperimentalBuild:  daemon.configStore.Experimental,
+		DockerRootDir:      cfg.Root,
+		Labels:             cfg.Labels,
+		ExperimentalBuild:  cfg.Experimental,
 		ServerVersion:      dockerversion.Version,
-		HTTPProxy:          config.MaskCredentials(getConfigOrEnv(daemon.configStore.HTTPProxy, "HTTP_PROXY", "http_proxy")),
-		HTTPSProxy:         config.MaskCredentials(getConfigOrEnv(daemon.configStore.HTTPSProxy, "HTTPS_PROXY", "https_proxy")),
-		NoProxy:            getConfigOrEnv(daemon.configStore.NoProxy, "NO_PROXY", "no_proxy"),
-		LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled,
+		HTTPProxy:          config.MaskCredentials(getConfigOrEnv(cfg.HTTPProxy, "HTTP_PROXY", "http_proxy")),
+		HTTPSProxy:         config.MaskCredentials(getConfigOrEnv(cfg.HTTPSProxy, "HTTPS_PROXY", "https_proxy")),
+		NoProxy:            getConfigOrEnv(cfg.NoProxy, "NO_PROXY", "no_proxy"),
+		LiveRestoreEnabled: cfg.LiveRestoreEnabled,
 		Isolation:          daemon.defaultIsolation,
 	}
 
 	daemon.fillContainerStates(v)
 	daemon.fillDebugInfo(v)
-	daemon.fillAPIInfo(v)
+	daemon.fillAPIInfo(v, &cfg.Config)
 	// Retrieve platform specific info
-	daemon.fillPlatformInfo(v, sysInfo)
+	daemon.fillPlatformInfo(v, sysInfo, cfg)
 	daemon.fillDriverInfo(v)
-	daemon.fillPluginsInfo(v)
-	daemon.fillSecurityOptions(v, sysInfo)
+	daemon.fillPluginsInfo(v, &cfg.Config)
+	daemon.fillSecurityOptions(v, sysInfo, &cfg.Config)
 	daemon.fillLicense(v)
-	daemon.fillDefaultAddressPools(v)
+	daemon.fillDefaultAddressPools(v, &cfg.Config)
 
 	return v
 }
@@ -80,6 +81,7 @@ func (daemon *Daemon) SystemVersion() types.Version {
 	defer metrics.StartTimer(hostInfoFunctions.WithValues("system_version"))()
 
 	kernelVersion := kernelVersion()
+	cfg := daemon.config()
 
 	v := types.Version{
 		Components: []types.ComponentVersion{
@@ -95,7 +97,7 @@ func (daemon *Daemon) SystemVersion() types.Version {
 					"Arch":          runtime.GOARCH,
 					"BuildTime":     dockerversion.BuildTime,
 					"KernelVersion": kernelVersion,
-					"Experimental":  fmt.Sprintf("%t", daemon.configStore.Experimental),
+					"Experimental":  fmt.Sprintf("%t", cfg.Experimental),
 				},
 			},
 		},
@@ -110,12 +112,12 @@ func (daemon *Daemon) SystemVersion() types.Version {
 		Arch:          runtime.GOARCH,
 		BuildTime:     dockerversion.BuildTime,
 		KernelVersion: kernelVersion,
-		Experimental:  daemon.configStore.Experimental,
+		Experimental:  cfg.Experimental,
 	}
 
 	v.Platform.Name = dockerversion.PlatformName
 
-	daemon.fillPlatformVersion(&v)
+	daemon.fillPlatformVersion(&v, cfg)
 	return v
 }
 
@@ -135,19 +137,19 @@ WARNING: The %s storage-driver is deprecated, and will be removed in a future re
 	fillDriverWarnings(v)
 }
 
-func (daemon *Daemon) fillPluginsInfo(v *types.Info) {
+func (daemon *Daemon) fillPluginsInfo(v *types.Info, cfg *config.Config) {
 	v.Plugins = types.PluginsInfo{
 		Volume:  daemon.volumes.GetDriverList(),
 		Network: daemon.GetNetworkDriverList(),
 
 		// The authorization plugins are returned in the order they are
 		// used as they constitute a request/response modification chain.
-		Authorization: daemon.configStore.AuthorizationPlugins,
+		Authorization: cfg.AuthorizationPlugins,
 		Log:           logger.ListDrivers(),
 	}
 }
 
-func (daemon *Daemon) fillSecurityOptions(v *types.Info, sysInfo *sysinfo.SysInfo) {
+func (daemon *Daemon) fillSecurityOptions(v *types.Info, sysInfo *sysinfo.SysInfo, cfg *config.Config) {
 	var securityOptions []string
 	if sysInfo.AppArmor {
 		securityOptions = append(securityOptions, "name=apparmor")
@@ -164,13 +166,13 @@ func (daemon *Daemon) fillSecurityOptions(v *types.Info, sysInfo *sysinfo.SysInf
 	if rootIDs := daemon.idMapping.RootPair(); rootIDs.UID != 0 || rootIDs.GID != 0 {
 		securityOptions = append(securityOptions, "name=userns")
 	}
-	if daemon.Rootless() {
+	if Rootless(cfg) {
 		securityOptions = append(securityOptions, "name=rootless")
 	}
-	if daemon.cgroupNamespacesEnabled(sysInfo) {
+	if cgroupNamespacesEnabled(sysInfo, cfg) {
 		securityOptions = append(securityOptions, "name=cgroupns")
 	}
-	if daemon.noNewPrivileges() {
+	if noNewPrivileges(cfg) {
 		securityOptions = append(securityOptions, "name=no-new-privileges")
 	}
 
@@ -200,13 +202,12 @@ func (daemon *Daemon) fillDebugInfo(v *types.Info) {
 	v.NEventsListener = daemon.EventsService.SubscribersCount()
 }
 
-func (daemon *Daemon) fillAPIInfo(v *types.Info) {
+func (daemon *Daemon) fillAPIInfo(v *types.Info, cfg *config.Config) {
 	const warn string = `
          Access to the remote API is equivalent to root access on the host. Refer
          to the 'Docker daemon attack surface' section in the documentation for
          more information: https://docs.docker.com/go/attack-surface/`
 
-	cfg := daemon.configStore
 	for _, host := range cfg.Hosts {
 		// cnf.Hosts is normalized during startup, so should always have a scheme/proto
 		proto, addr, _ := strings.Cut(host, "://")
@@ -224,8 +225,8 @@ func (daemon *Daemon) fillAPIInfo(v *types.Info) {
 	}
 }
 
-func (daemon *Daemon) fillDefaultAddressPools(v *types.Info) {
-	for _, pool := range daemon.configStore.DefaultAddressPools.Value() {
+func (daemon *Daemon) fillDefaultAddressPools(v *types.Info, cfg *config.Config) {
+	for _, pool := range cfg.DefaultAddressPools.Value() {
 		v.DefaultAddressPools = append(v.DefaultAddressPools, types.NetworkAddressPool{
 			Base: pool.Base,
 			Size: pool.Size,

+ 64 - 42
daemon/info_unix.go

@@ -9,8 +9,10 @@ import (
 	"path/filepath"
 	"strings"
 
+	v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
 	"github.com/docker/docker/api/types"
 	containertypes "github.com/docker/docker/api/types/container"
+	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/pkg/rootless"
 	"github.com/docker/docker/pkg/sysinfo"
 	"github.com/pkg/errors"
@@ -18,8 +20,8 @@ import (
 )
 
 // fillPlatformInfo fills the platform related info.
-func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) {
-	v.CgroupDriver = daemon.getCgroupDriver()
+func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo, cfg *configStore) {
+	v.CgroupDriver = cgroupDriver(&cfg.Config)
 	v.CgroupVersion = "1"
 	if sysInfo.CgroupUnified {
 		v.CgroupVersion = "2"
@@ -37,22 +39,25 @@ func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo)
 		v.CPUSet = sysInfo.Cpuset
 		v.PidsLimit = sysInfo.PidsLimit
 	}
-	v.Runtimes = daemon.configStore.GetAllRuntimes()
-	v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName()
+	v.Runtimes = make(map[string]types.Runtime)
+	for n, p := range stockRuntimes() {
+		v.Runtimes[n] = types.Runtime{Path: p}
+	}
+	for n, r := range cfg.Config.Runtimes {
+		v.Runtimes[n] = types.Runtime{
+			Path: r.Path,
+			Args: append([]string(nil), r.Args...),
+		}
+	}
+	v.DefaultRuntime = cfg.Runtimes.Default
 	v.RuncCommit.ID = "N/A"
 	v.ContainerdCommit.ID = "N/A"
 	v.InitCommit.ID = "N/A"
 
-	if rt := daemon.configStore.GetRuntime(v.DefaultRuntime); rt != nil {
-		if rv, err := exec.Command(rt.Path, "--version").Output(); err == nil {
-			if _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil {
-				logrus.Warnf("failed to parse %s version: %v", rt.Path, err)
-			} else {
-				v.RuncCommit.ID = commit
-			}
-		} else {
-			logrus.Warnf("failed to retrieve %s version: %v", rt.Path, err)
-		}
+	if _, _, commit, err := parseDefaultRuntimeVersion(&cfg.Runtimes); err != nil {
+		logrus.Warnf(err.Error())
+	} else {
+		v.RuncCommit.ID = commit
 	}
 
 	if rv, err := daemon.containerd.Version(context.Background()); err == nil {
@@ -61,8 +66,8 @@ func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo)
 		logrus.Warnf("failed to retrieve containerd version: %v", err)
 	}
 
-	v.InitBinary = daemon.configStore.GetInitPath()
-	if initBinary, err := daemon.configStore.LookupInitPath(); err != nil {
+	v.InitBinary = cfg.GetInitPath()
+	if initBinary, err := cfg.LookupInitPath(); err != nil {
 		logrus.Warnf("failed to find docker-init: %s", err)
 	} else if rv, err := exec.Command(initBinary, "--version").Output(); err == nil {
 		if _, commit, err := parseInitVersion(string(rv)); err != nil {
@@ -165,7 +170,7 @@ func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo)
 	}
 }
 
-func (daemon *Daemon) fillPlatformVersion(v *types.Version) {
+func (daemon *Daemon) fillPlatformVersion(v *types.Version, cfg *configStore) {
 	if rv, err := daemon.containerd.Version(context.Background()); err == nil {
 		v.Components = append(v.Components, types.ComponentVersion{
 			Name:    "containerd",
@@ -176,26 +181,19 @@ func (daemon *Daemon) fillPlatformVersion(v *types.Version) {
 		})
 	}
 
-	defaultRuntime := daemon.configStore.GetDefaultRuntimeName()
-	if rt := daemon.configStore.GetRuntime(defaultRuntime); rt != nil {
-		if rv, err := exec.Command(rt.Path, "--version").Output(); err == nil {
-			if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil {
-				logrus.Warnf("failed to parse %s version: %v", rt.Path, err)
-			} else {
-				v.Components = append(v.Components, types.ComponentVersion{
-					Name:    defaultRuntime,
-					Version: ver,
-					Details: map[string]string{
-						"GitCommit": commit,
-					},
-				})
-			}
-		} else {
-			logrus.Warnf("failed to retrieve %s version: %v", rt.Path, err)
-		}
+	if _, ver, commit, err := parseDefaultRuntimeVersion(&cfg.Runtimes); err != nil {
+		logrus.Warnf(err.Error())
+	} else {
+		v.Components = append(v.Components, types.ComponentVersion{
+			Name:    cfg.Runtimes.Default,
+			Version: ver,
+			Details: map[string]string{
+				"GitCommit": commit,
+			},
+		})
 	}
 
-	if initBinary, err := daemon.configStore.LookupInitPath(); err != nil {
+	if initBinary, err := cfg.LookupInitPath(); err != nil {
 		logrus.Warnf("failed to find docker-init: %s", err)
 	} else if rv, err := exec.Command(initBinary, "--version").Output(); err == nil {
 		if ver, commit, err := parseInitVersion(string(rv)); err != nil {
@@ -317,7 +315,7 @@ func parseInitVersion(v string) (version string, commit string, err error) {
 //	runc version 1.0.0-rc5+dev
 //	commit: 69663f0bd4b60df09991c08812a60108003fa340
 //	spec: 1.0.0
-func parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) {
+func parseRuntimeVersion(v string) (runtime, version, commit string, err error) {
 	lines := strings.Split(strings.TrimSpace(v), "\n")
 	for _, line := range lines {
 		if strings.Contains(line, "version") {
@@ -337,15 +335,39 @@ func parseRuntimeVersion(v string) (runtime string, version string, commit strin
 	return runtime, version, commit, err
 }
 
-func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool {
-	return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate()
+func parseDefaultRuntimeVersion(rts *runtimes) (runtime, version, commit string, err error) {
+	shim, opts, err := rts.Get(rts.Default)
+	if err != nil {
+		return "", "", "", err
+	}
+	shimopts, ok := opts.(*v2runcoptions.Options)
+	if !ok {
+		return "", "", "", fmt.Errorf("%s: retrieving version not supported", shim)
+	}
+	rt := shimopts.BinaryName
+	if rt == "" {
+		rt = defaultRuntimeName
+	}
+	rv, err := exec.Command(rt, "--version").Output()
+	if err != nil {
+		return "", "", "", fmt.Errorf("failed to retrieve %s version: %w", rt, err)
+	}
+	runtime, version, commit, err = parseRuntimeVersion(string(rv))
+	if err != nil {
+		return "", "", "", fmt.Errorf("failed to parse %s version: %w", rt, err)
+	}
+	return runtime, version, commit, err
+}
+
+func cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo, cfg *config.Config) bool {
+	return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(cfg.CgroupNamespaceMode).IsPrivate()
 }
 
 // Rootless returns true if daemon is running in rootless mode
-func (daemon *Daemon) Rootless() bool {
-	return daemon.configStore.Rootless
+func Rootless(cfg *config.Config) bool {
+	return cfg.Rootless
 }
 
-func (daemon *Daemon) noNewPrivileges() bool {
-	return daemon.configStore.NoNewPrivileges
+func noNewPrivileges(cfg *config.Config) bool {
+	return cfg.NoNewPrivileges
 }

+ 6 - 5
daemon/info_windows.go

@@ -2,27 +2,28 @@ package daemon // import "github.com/docker/docker/daemon"
 
 import (
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/pkg/sysinfo"
 )
 
 // fillPlatformInfo fills the platform related info.
-func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) {
+func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo, cfg *configStore) {
 }
 
-func (daemon *Daemon) fillPlatformVersion(v *types.Version) {}
+func (daemon *Daemon) fillPlatformVersion(v *types.Version, cfg *configStore) {}
 
 func fillDriverWarnings(v *types.Info) {
 }
 
-func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool {
+func cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo, cfg *config.Config) bool {
 	return false
 }
 
 // Rootless returns true if daemon is running in rootless mode
-func (daemon *Daemon) Rootless() bool {
+func Rootless(*config.Config) bool {
 	return false
 }
 
-func (daemon *Daemon) noNewPrivileges() bool {
+func noNewPrivileges(*config.Config) bool {
 	return false
 }

+ 5 - 4
daemon/inspect.go

@@ -12,6 +12,7 @@ import (
 	"github.com/docker/docker/api/types/versions"
 	"github.com/docker/docker/api/types/versions/v1p20"
 	"github.com/docker/docker/container"
+	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/daemon/network"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/go-connections/nat"
@@ -40,7 +41,7 @@ func (daemon *Daemon) ContainerInspectCurrent(ctx context.Context, name string,
 
 	ctr.Lock()
 
-	base, err := daemon.getInspectData(ctr)
+	base, err := daemon.getInspectData(&daemon.config().Config, ctr)
 	if err != nil {
 		ctr.Unlock()
 		return nil, err
@@ -105,7 +106,7 @@ func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, er
 	ctr.Lock()
 	defer ctr.Unlock()
 
-	base, err := daemon.getInspectData(ctr)
+	base, err := daemon.getInspectData(&daemon.config().Config, ctr)
 	if err != nil {
 		return nil, err
 	}
@@ -124,7 +125,7 @@ func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, er
 	}, nil
 }
 
-func (daemon *Daemon) getInspectData(container *container.Container) (*types.ContainerJSONBase, error) {
+func (daemon *Daemon) getInspectData(daemonCfg *config.Config, container *container.Container) (*types.ContainerJSONBase, error) {
 	// make a copy to play with
 	hostConfig := *container.HostConfig
 
@@ -135,7 +136,7 @@ func (daemon *Daemon) getInspectData(container *container.Container) (*types.Con
 	}
 
 	// We merge the Ulimits from hostConfig with daemon default
-	daemon.mergeUlimits(&hostConfig)
+	daemon.mergeUlimits(&hostConfig, daemonCfg)
 
 	var containerHealth *types.Health
 	if container.State.Health != nil {

+ 1 - 1
daemon/inspect_linux.go

@@ -29,7 +29,7 @@ func (daemon *Daemon) containerInspectPre120(ctx context.Context, name string) (
 	ctr.Lock()
 	defer ctr.Unlock()
 
-	base, err := daemon.getInspectData(ctr)
+	base, err := daemon.getInspectData(&daemon.config().Config, ctr)
 	if err != nil {
 		return nil, err
 	}

+ 6 - 5
daemon/inspect_test.go

@@ -5,7 +5,6 @@ import (
 
 	containertypes "github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/container"
-	"github.com/docker/docker/daemon/config"
 	"gotest.tools/v3/assert"
 	is "gotest.tools/v3/assert/cmp"
 )
@@ -19,16 +18,18 @@ func TestGetInspectData(t *testing.T) {
 	}
 
 	d := &Daemon{
-		linkIndex:   newLinkIndex(),
-		configStore: &config.Config{},
+		linkIndex: newLinkIndex(),
 	}
 	if d.UsesSnapshotter() {
 		t.Skip("does not apply to containerd snapshotters, which don't have RWLayer set")
 	}
-	_, err := d.getInspectData(c)
+	cfg := &configStore{}
+	d.configStore.Store(cfg)
+
+	_, err := d.getInspectData(&cfg.Config, c)
 	assert.Check(t, is.ErrorContains(err, "RWLayer of container inspect-me is unexpectedly nil"))
 
 	c.Dead = true
-	_, err = d.getInspectData(c)
+	_, err = d.getInspectData(&cfg.Config, c)
 	assert.Check(t, err)
 }

+ 9 - 12
daemon/logs.go

@@ -10,6 +10,7 @@ import (
 	containertypes "github.com/docker/docker/api/types/container"
 	timetypes "github.com/docker/docker/api/types/time"
 	"github.com/docker/docker/container"
+	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/daemon/logger"
 	logcache "github.com/docker/docker/daemon/logger/loggerutils/cache"
 	"github.com/docker/docker/errdefs"
@@ -196,18 +197,14 @@ func (daemon *Daemon) mergeAndVerifyLogConfig(cfg *containertypes.LogConfig) err
 	return logger.ValidateLogOpts(cfg.Type, cfg.Config)
 }
 
-func (daemon *Daemon) setupDefaultLogConfig() error {
-	config := daemon.configStore
-	if len(config.LogConfig.Config) > 0 {
-		if err := logger.ValidateLogOpts(config.LogConfig.Type, config.LogConfig.Config); err != nil {
-			return errors.Wrap(err, "failed to set log opts")
+func defaultLogConfig(cfg *config.Config) (containertypes.LogConfig, error) {
+	if len(cfg.LogConfig.Config) > 0 {
+		if err := logger.ValidateLogOpts(cfg.LogConfig.Type, cfg.LogConfig.Config); err != nil {
+			return containertypes.LogConfig{}, errors.Wrap(err, "failed to set log opts")
 		}
 	}
-	daemon.defaultLogConfig = containertypes.LogConfig{
-		Type:   config.LogConfig.Type,
-		Config: config.LogConfig.Config,
-	}
-
-	logrus.Debugf("Using default logging driver %s", daemon.defaultLogConfig.Type)
-	return nil
+	return containertypes.LogConfig{
+		Type:   cfg.LogConfig.Type,
+		Config: cfg.LogConfig.Config,
+	}, nil
 }

+ 3 - 2
daemon/metrics_unix.go

@@ -9,6 +9,7 @@ import (
 	"strings"
 	"time"
 
+	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/pkg/plugins"
 	"github.com/docker/docker/plugin"
@@ -19,8 +20,8 @@ import (
 	"golang.org/x/sys/unix"
 )
 
-func (daemon *Daemon) listenMetricsSock() (string, error) {
-	path := filepath.Join(daemon.configStore.ExecRoot, "metrics.sock")
+func (daemon *Daemon) listenMetricsSock(cfg *config.Config) (string, error) {
+	path := filepath.Join(cfg.ExecRoot, "metrics.sock")
 	unix.Unlink(path)
 	l, err := net.Listen("unix", path)
 	if err != nil {

+ 5 - 2
daemon/metrics_unsupported.go

@@ -2,11 +2,14 @@
 
 package daemon // import "github.com/docker/docker/daemon"
 
-import "github.com/docker/docker/pkg/plugingetter"
+import (
+	"github.com/docker/docker/daemon/config"
+	"github.com/docker/docker/pkg/plugingetter"
+)
 
 func registerMetricsPluginCallback(getter plugingetter.PluginGetter, sockPath string) {
 }
 
-func (daemon *Daemon) listenMetricsSock() (string, error) {
+func (daemon *Daemon) listenMetricsSock(*config.Config) (string, error) {
 	return "", nil
 }

+ 9 - 5
daemon/monitor.go

@@ -7,6 +7,7 @@ import (
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/container"
+	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/errdefs"
 	libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
 	"github.com/docker/docker/restartmanager"
@@ -29,6 +30,8 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
 	var exitStatus container.ExitStatus
 	c.Lock()
 
+	cfg := daemon.config()
+
 	// Health checks will be automatically restarted if/when the
 	// container is started again.
 	daemon.stopHealthchecks(c)
@@ -99,7 +102,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
 	} else {
 		c.SetStopped(&exitStatus)
 		if !c.HasBeenManuallyRestarted {
-			defer daemon.autoRemove(c)
+			defer daemon.autoRemove(&cfg.Config, c)
 		}
 	}
 	defer c.Unlock() // needs to be called before autoRemove
@@ -117,7 +120,8 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
 				// But containerStart will use daemon.netController segment.
 				// So to avoid panic at startup process, here must wait util daemon restore done.
 				daemon.waitForStartupDone()
-				if err = daemon.containerStart(context.Background(), c, "", "", false); err != nil {
+				cfg := daemon.config() // Apply the most up-to-date daemon config to the restarted container.
+				if err = daemon.containerStart(context.Background(), cfg, c, "", "", false); err != nil {
 					logrus.Debugf("failed to restart container: %+v", err)
 				}
 			}
@@ -127,7 +131,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
 				daemon.setStateCounter(c)
 				c.CheckpointTo(daemon.containersReplica)
 				c.Unlock()
-				defer daemon.autoRemove(c)
+				defer daemon.autoRemove(&cfg.Config, c)
 				if err != restartmanager.ErrRestartCanceled {
 					logrus.Errorf("restartmanger wait error: %+v", err)
 				}
@@ -280,7 +284,7 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei
 	return nil
 }
 
-func (daemon *Daemon) autoRemove(c *container.Container) {
+func (daemon *Daemon) autoRemove(cfg *config.Config, c *container.Container) {
 	c.Lock()
 	ar := c.HostConfig.AutoRemove
 	c.Unlock()
@@ -288,7 +292,7 @@ func (daemon *Daemon) autoRemove(c *container.Container) {
 		return
 	}
 
-	err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true})
+	err := daemon.containerRm(cfg, c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true})
 	if err == nil {
 		return
 	}

+ 8 - 7
daemon/network.go

@@ -15,6 +15,7 @@ import (
 	"github.com/docker/docker/api/types/network"
 	"github.com/docker/docker/container"
 	clustertypes "github.com/docker/docker/daemon/cluster/provider"
+	"github.com/docker/docker/daemon/config"
 	internalnetwork "github.com/docker/docker/daemon/network"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/libnetwork"
@@ -160,7 +161,7 @@ func (daemon *Daemon) startIngressWorker() {
 			select {
 			case r := <-ingressJobsChannel:
 				if r.create != nil {
-					daemon.setupIngress(r.create, r.ip, ingressID)
+					daemon.setupIngress(&daemon.config().Config, r.create, r.ip, ingressID)
 					ingressID = r.create.ID
 				} else {
 					daemon.releaseIngress(ingressID)
@@ -199,7 +200,7 @@ func (daemon *Daemon) ReleaseIngress() (<-chan struct{}, error) {
 	return done, nil
 }
 
-func (daemon *Daemon) setupIngress(create *clustertypes.NetworkCreateRequest, ip net.IP, staleID string) {
+func (daemon *Daemon) setupIngress(cfg *config.Config, create *clustertypes.NetworkCreateRequest, ip net.IP, staleID string) {
 	controller := daemon.netController
 	controller.AgentInitWait()
 
@@ -207,7 +208,7 @@ func (daemon *Daemon) setupIngress(create *clustertypes.NetworkCreateRequest, ip
 		daemon.releaseIngress(staleID)
 	}
 
-	if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil {
+	if _, err := daemon.createNetwork(cfg, create.NetworkCreateRequest, create.ID, true); err != nil {
 		// If it is any other error other than already
 		// exists error log error and return.
 		if _, ok := err.(libnetwork.NetworkNameError); !ok {
@@ -277,16 +278,16 @@ func (daemon *Daemon) WaitForDetachment(ctx context.Context, networkName, networ
 
 // CreateManagedNetwork creates an agent network.
 func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error {
-	_, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true)
+	_, err := daemon.createNetwork(&daemon.config().Config, create.NetworkCreateRequest, create.ID, true)
 	return err
 }
 
 // CreateNetwork creates a network with the given name, driver and other optional parameters
 func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) {
-	return daemon.createNetwork(create, "", false)
+	return daemon.createNetwork(&daemon.config().Config, create, "", false)
 }
 
-func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {
+func (daemon *Daemon) createNetwork(cfg *config.Config, create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {
 	if runconfig.IsPreDefinedNetwork(create.Name) {
 		return nil, PredefinedNetworkError(create.Name)
 	}
@@ -319,7 +320,7 @@ func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string
 	for k, v := range create.Options {
 		networkOptions[k] = v
 	}
-	if defaultOpts, ok := daemon.configStore.DefaultNetworkOpts[driver]; create.ConfigFrom == nil && ok {
+	if defaultOpts, ok := cfg.DefaultNetworkOpts[driver]; create.ConfigFrom == nil && ok {
 		for k, v := range defaultOpts {
 			if _, ok := networkOptions[k]; !ok {
 				logrus.WithFields(logrus.Fields{"driver": driver, "network": id, k: v}).Debug("Applying network default option")

+ 36 - 36
daemon/oci_linux.go

@@ -36,15 +36,15 @@ import (
 
 const inContainerInitPath = "/sbin/" + dconfig.DefaultInitBinary
 
-// WithRlimits sets the container's rlimits along with merging the daemon's rlimits
-func WithRlimits(daemon *Daemon, c *container.Container) coci.SpecOpts {
+// withRlimits sets the container's rlimits along with merging the daemon's rlimits
+func withRlimits(daemon *Daemon, daemonCfg *dconfig.Config, c *container.Container) coci.SpecOpts {
 	return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error {
 		var rlimits []specs.POSIXRlimit
 
 		// We want to leave the original HostConfig alone so make a copy here
 		hostConfig := *c.HostConfig
 		// Merge with the daemon defaults
-		daemon.mergeUlimits(&hostConfig)
+		daemon.mergeUlimits(&hostConfig, daemonCfg)
 		for _, ul := range hostConfig.Ulimits {
 			rlimits = append(rlimits, specs.POSIXRlimit{
 				Type: "RLIMIT_" + strings.ToUpper(ul.Name),
@@ -58,8 +58,8 @@ func WithRlimits(daemon *Daemon, c *container.Container) coci.SpecOpts {
 	}
 }
 
-// WithLibnetwork sets the libnetwork hook
-func WithLibnetwork(daemon *Daemon, c *container.Container) coci.SpecOpts {
+// withLibnetwork sets the libnetwork hook
+func withLibnetwork(daemon *Daemon, daemonCfg *dconfig.Config, c *container.Container) coci.SpecOpts {
 	return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error {
 		if s.Hooks == nil {
 			s.Hooks = &specs.Hooks{}
@@ -72,7 +72,7 @@ func WithLibnetwork(daemon *Daemon, c *container.Container) coci.SpecOpts {
 					Path: target,
 					Args: []string{
 						"libnetwork-setkey",
-						"-exec-root=" + daemon.configStore.GetExecRoot(),
+						"-exec-root=" + daemonCfg.GetExecRoot(),
 						c.ID,
 						shortNetCtlrID,
 					},
@@ -83,11 +83,11 @@ func WithLibnetwork(daemon *Daemon, c *container.Container) coci.SpecOpts {
 	}
 }
 
-// WithRootless sets the spec to the rootless configuration
-func WithRootless(daemon *Daemon) coci.SpecOpts {
+// withRootless sets the spec to the rootless configuration
+func withRootless(daemon *Daemon, daemonCfg *dconfig.Config) coci.SpecOpts {
 	return func(_ context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error {
 		var v2Controllers []string
-		if daemon.getCgroupDriver() == cgroupSystemdDriver {
+		if cgroupDriver(daemonCfg) == cgroupSystemdDriver {
 			if cdcgroups.Mode() != cdcgroups.Unified {
 				return errors.New("rootless systemd driver doesn't support cgroup v1")
 			}
@@ -488,8 +488,8 @@ func inSlice(slice []string, s string) bool {
 	return false
 }
 
-// WithMounts sets the container's mounts
-func WithMounts(daemon *Daemon, c *container.Container) coci.SpecOpts {
+// withMounts sets the container's mounts
+func withMounts(daemon *Daemon, daemonCfg *configStore, c *container.Container) coci.SpecOpts {
 	return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) (err error) {
 		if err := daemon.setupContainerMountsRoot(c); err != nil {
 			return err
@@ -652,7 +652,7 @@ func WithMounts(daemon *Daemon, c *container.Container) coci.SpecOpts {
 						return errors.New("mount options conflict: ReadOnlyNonRecursive && ReadOnlyForceRecursive")
 					}
 				}
-				if rroErr := daemon.supportsRecursivelyReadOnly(c.HostConfig.Runtime); rroErr != nil {
+				if rroErr := supportsRecursivelyReadOnly(daemonCfg, c.HostConfig.Runtime); rroErr != nil {
 					rro = false
 					if m.ReadOnlyForceRecursive {
 						return rroErr
@@ -673,7 +673,7 @@ func WithMounts(daemon *Daemon, c *container.Container) coci.SpecOpts {
 			// "mount" when we bind-mount. The reason for this is that at the point
 			// when runc sets up the root filesystem, it is already inside a user
 			// namespace, and thus cannot change any flags that are locked.
-			if daemon.configStore.RemappedRoot != "" || userns.RunningInUserNS() {
+			if daemonCfg.RemappedRoot != "" || userns.RunningInUserNS() {
 				unprivOpts, err := getUnprivilegedMountFlags(m.Source)
 				if err != nil {
 					return err
@@ -732,8 +732,8 @@ func sysctlExists(s string) bool {
 	return err == nil
 }
 
-// WithCommonOptions sets common docker options
-func WithCommonOptions(daemon *Daemon, c *container.Container) coci.SpecOpts {
+// withCommonOptions sets common docker options
+func withCommonOptions(daemon *Daemon, daemonCfg *dconfig.Config, c *container.Container) coci.SpecOpts {
 	return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error {
 		if c.BaseFS == "" && !daemon.UsesSnapshotter() {
 			return errors.New("populateCommonSpec: BaseFS of container " + c.ID + " is unexpectedly empty")
@@ -762,9 +762,9 @@ func WithCommonOptions(daemon *Daemon, c *container.Container) coci.SpecOpts {
 		// host namespace or another container's pid namespace where we already have an init
 		if c.HostConfig.PidMode.IsPrivate() {
 			if (c.HostConfig.Init != nil && *c.HostConfig.Init) ||
-				(c.HostConfig.Init == nil && daemon.configStore.Init) {
+				(c.HostConfig.Init == nil && daemonCfg.Init) {
 				s.Process.Args = append([]string{inContainerInitPath, "--", c.Path}, c.Args...)
-				path, err := daemon.configStore.LookupInitPath() // this will fall back to DefaultInitBinary and return an absolute path
+				path, err := daemonCfg.LookupInitPath() // this will fall back to DefaultInitBinary and return an absolute path
 				if err != nil {
 					return err
 				}
@@ -790,7 +790,7 @@ func WithCommonOptions(daemon *Daemon, c *container.Container) coci.SpecOpts {
 		// joining an existing namespace, only if we create a new net namespace.
 		if c.HostConfig.NetworkMode.IsPrivate() {
 			// We cannot set up ping socket support in a user namespace
-			userNS := daemon.configStore.RemappedRoot != "" && c.HostConfig.UsernsMode.IsPrivate()
+			userNS := daemonCfg.RemappedRoot != "" && c.HostConfig.UsernsMode.IsPrivate()
 			if !userNS && !userns.RunningInUserNS() && sysctlExists("net.ipv4.ping_group_range") {
 				// allow unprivileged ICMP echo sockets without CAP_NET_RAW
 				s.Linux.Sysctl["net.ipv4.ping_group_range"] = "0 2147483647"
@@ -805,24 +805,24 @@ func WithCommonOptions(daemon *Daemon, c *container.Container) coci.SpecOpts {
 	}
 }
 
-// WithCgroups sets the container's cgroups
-func WithCgroups(daemon *Daemon, c *container.Container) coci.SpecOpts {
+// withCgroups sets the container's cgroups
+func withCgroups(daemon *Daemon, daemonCfg *dconfig.Config, c *container.Container) coci.SpecOpts {
 	return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error {
 		var cgroupsPath string
 		scopePrefix := "docker"
 		parent := "/docker"
-		useSystemd := UsingSystemd(daemon.configStore)
+		useSystemd := UsingSystemd(daemonCfg)
 		if useSystemd {
 			parent = "system.slice"
-			if daemon.configStore.Rootless {
+			if daemonCfg.Rootless {
 				parent = "user.slice"
 			}
 		}
 
 		if c.HostConfig.CgroupParent != "" {
 			parent = c.HostConfig.CgroupParent
-		} else if daemon.configStore.CgroupParent != "" {
-			parent = daemon.configStore.CgroupParent
+		} else if daemonCfg.CgroupParent != "" {
+			parent = daemonCfg.CgroupParent
 		}
 
 		if useSystemd {
@@ -835,7 +835,7 @@ func WithCgroups(daemon *Daemon, c *container.Container) coci.SpecOpts {
 
 		// the rest is only needed for CPU RT controller
 
-		if daemon.configStore.CPURealtimePeriod == 0 && daemon.configStore.CPURealtimeRuntime == 0 {
+		if daemonCfg.CPURealtimePeriod == 0 && daemonCfg.CPURealtimeRuntime == 0 {
 			return nil
 		}
 
@@ -869,7 +869,7 @@ func WithCgroups(daemon *Daemon, c *container.Container) coci.SpecOpts {
 		}
 		mnt = filepath.Join(mnt, root)
 
-		if err := daemon.initCPURtController(mnt, parentPath); err != nil {
+		if err := daemon.initCPURtController(daemonCfg, mnt, parentPath); err != nil {
 			return errors.Wrap(err, "unable to init CPU RT controller")
 		}
 		return nil
@@ -1019,23 +1019,23 @@ func WithUser(c *container.Container) coci.SpecOpts {
 	}
 }
 
-func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (retSpec *specs.Spec, err error) {
+func (daemon *Daemon) createSpec(ctx context.Context, daemonCfg *configStore, c *container.Container) (retSpec *specs.Spec, err error) {
 	var (
 		opts []coci.SpecOpts
 		s    = oci.DefaultSpec()
 	)
 	opts = append(opts,
-		WithCommonOptions(daemon, c),
-		WithCgroups(daemon, c),
+		withCommonOptions(daemon, &daemonCfg.Config, c),
+		withCgroups(daemon, &daemonCfg.Config, c),
 		WithResources(c),
 		WithSysctls(c),
 		WithDevices(daemon, c),
-		WithRlimits(daemon, c),
+		withRlimits(daemon, &daemonCfg.Config, c),
 		WithNamespaces(daemon, c),
 		WithCapabilities(c),
 		WithSeccomp(daemon, c),
-		WithMounts(daemon, c),
-		WithLibnetwork(daemon, c),
+		withMounts(daemon, daemonCfg, c),
+		withLibnetwork(daemon, &daemonCfg.Config, c),
 		WithApparmor(c),
 		WithSelinux(c),
 		WithOOMScore(&c.HostConfig.OomScoreAdj),
@@ -1068,8 +1068,8 @@ func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (r
 	if c.HostConfig.ReadonlyPaths != nil {
 		opts = append(opts, coci.WithReadonlyPaths(c.HostConfig.ReadonlyPaths))
 	}
-	if daemon.configStore.Rootless {
-		opts = append(opts, WithRootless(daemon))
+	if daemonCfg.Rootless {
+		opts = append(opts, withRootless(daemon, &daemonCfg.Config))
 	}
 
 	var snapshotter, snapshotKey string
@@ -1096,14 +1096,14 @@ func clearReadOnly(m *specs.Mount) {
 }
 
 // mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig
-func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) {
+func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig, daemonCfg *dconfig.Config) {
 	ulimits := c.Ulimits
 	// Merge ulimits with daemon defaults
 	ulIdx := make(map[string]struct{})
 	for _, ul := range ulimits {
 		ulIdx[ul.Name] = struct{}{}
 	}
-	for name, ul := range daemon.configStore.Ulimits {
+	for name, ul := range daemonCfg.Ulimits {
 		if _, exists := ulIdx[name]; !exists {
 			ulimits = append(ulimits, ul)
 		}

+ 8 - 10
daemon/oci_linux_test.go

@@ -30,7 +30,6 @@ func setupFakeDaemon(t *testing.T, c *container.Container) *Daemon {
 	d := &Daemon{
 		// some empty structs to avoid getting a panic
 		// caused by a null pointer dereference
-		configStore:   &config.Config{},
 		linkIndex:     newLinkIndex(),
 		netController: netController,
 		imageService:  &fakeImageService{},
@@ -83,7 +82,7 @@ func TestTmpfsDevShmNoDupMount(t *testing.T) {
 	d := setupFakeDaemon(t, c)
 	defer cleanupFakeContainer(c)
 
-	_, err := d.createSpec(context.TODO(), c)
+	_, err := d.createSpec(context.TODO(), &configStore{}, c)
 	assert.Check(t, err)
 }
 
@@ -102,7 +101,7 @@ func TestIpcPrivateVsReadonly(t *testing.T) {
 	d := setupFakeDaemon(t, c)
 	defer cleanupFakeContainer(c)
 
-	s, err := d.createSpec(context.TODO(), c)
+	s, err := d.createSpec(context.TODO(), &configStore{}, c)
 	assert.Check(t, err)
 
 	// Find the /dev/shm mount in ms, check it does not have ro
@@ -132,7 +131,7 @@ func TestSysctlOverride(t *testing.T) {
 	defer cleanupFakeContainer(c)
 
 	// Ensure that the implicit sysctl is set correctly.
-	s, err := d.createSpec(context.TODO(), c)
+	s, err := d.createSpec(context.TODO(), &configStore{}, c)
 	assert.NilError(t, err)
 	assert.Equal(t, s.Hostname, "foobar")
 	assert.Equal(t, s.Linux.Sysctl["kernel.domainname"], c.Config.Domainname)
@@ -148,15 +147,14 @@ func TestSysctlOverride(t *testing.T) {
 	assert.Assert(t, c.HostConfig.Sysctls["kernel.domainname"] != c.Config.Domainname)
 	c.HostConfig.Sysctls["net.ipv4.ip_unprivileged_port_start"] = "1024"
 
-	s, err = d.createSpec(context.TODO(), c)
+	s, err = d.createSpec(context.TODO(), &configStore{}, c)
 	assert.NilError(t, err)
 	assert.Equal(t, s.Hostname, "foobar")
 	assert.Equal(t, s.Linux.Sysctl["kernel.domainname"], c.HostConfig.Sysctls["kernel.domainname"])
 	assert.Equal(t, s.Linux.Sysctl["net.ipv4.ip_unprivileged_port_start"], c.HostConfig.Sysctls["net.ipv4.ip_unprivileged_port_start"])
 
 	// Ensure the ping_group_range is not set on a daemon with user-namespaces enabled
-	d.configStore.RemappedRoot = "dummy:dummy"
-	s, err = d.createSpec(context.TODO(), c)
+	s, err = d.createSpec(context.TODO(), &configStore{Config: config.Config{RemappedRoot: "dummy:dummy"}}, c)
 	assert.NilError(t, err)
 	_, ok := s.Linux.Sysctl["net.ipv4.ping_group_range"]
 	assert.Assert(t, !ok)
@@ -164,7 +162,7 @@ func TestSysctlOverride(t *testing.T) {
 	// Ensure the ping_group_range is set on a container in "host" userns mode
 	// on a daemon with user-namespaces enabled
 	c.HostConfig.UsernsMode = "host"
-	s, err = d.createSpec(context.TODO(), c)
+	s, err = d.createSpec(context.TODO(), &configStore{Config: config.Config{RemappedRoot: "dummy:dummy"}}, c)
 	assert.NilError(t, err)
 	assert.Equal(t, s.Linux.Sysctl["net.ipv4.ping_group_range"], "0 2147483647")
 }
@@ -184,7 +182,7 @@ func TestSysctlOverrideHost(t *testing.T) {
 	defer cleanupFakeContainer(c)
 
 	// Ensure that the implicit sysctl is not set
-	s, err := d.createSpec(context.TODO(), c)
+	s, err := d.createSpec(context.TODO(), &configStore{}, c)
 	assert.NilError(t, err)
 	assert.Equal(t, s.Linux.Sysctl["net.ipv4.ip_unprivileged_port_start"], "")
 	assert.Equal(t, s.Linux.Sysctl["net.ipv4.ping_group_range"], "")
@@ -192,7 +190,7 @@ func TestSysctlOverrideHost(t *testing.T) {
 	// Set an explicit sysctl.
 	c.HostConfig.Sysctls["net.ipv4.ip_unprivileged_port_start"] = "1024"
 
-	s, err = d.createSpec(context.TODO(), c)
+	s, err = d.createSpec(context.TODO(), &configStore{}, c)
 	assert.NilError(t, err)
 	assert.Equal(t, s.Linux.Sysctl["net.ipv4.ip_unprivileged_port_start"], c.HostConfig.Sysctls["net.ipv4.ip_unprivileged_port_start"])
 }

+ 4 - 3
daemon/oci_windows.go

@@ -12,6 +12,7 @@ import (
 	containertypes "github.com/docker/docker/api/types/container"
 	imagetypes "github.com/docker/docker/api/types/image"
 	"github.com/docker/docker/container"
+	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/oci"
 	"github.com/docker/docker/pkg/sysinfo"
@@ -27,7 +28,7 @@ const (
 	credentialSpecFileLocation     = "CredentialSpecs"
 )
 
-func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (*specs.Spec, error) {
+func (daemon *Daemon) createSpec(ctx context.Context, daemonCfg *configStore, c *container.Container) (*specs.Spec, error) {
 	img, err := daemon.imageService.GetImage(ctx, string(c.ImageID), imagetypes.GetImageOpts{})
 	if err != nil {
 		return nil, err
@@ -142,7 +143,7 @@ func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (*
 		return nil, errors.Wrapf(err, "container %s", c.ID)
 	}
 
-	dnsSearch := daemon.getDNSSearchSettings(c)
+	dnsSearch := daemon.getDNSSearchSettings(&daemonCfg.Config, c)
 
 	// Get endpoints for the libnetwork allocated networks to the container
 	var epList []string
@@ -404,7 +405,7 @@ func setResourcesInSpec(c *container.Container, s *specs.Spec, isHyperV bool) {
 
 // mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig
 // It will do nothing on non-Linux platform
-func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) {
+func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig, daemonCfg *config.Config) {
 	return
 }
 

+ 2 - 1
daemon/prune.go

@@ -56,6 +56,7 @@ func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters.
 		return nil, err
 	}
 
+	cfg := &daemon.config().Config
 	allContainers := daemon.List()
 	for _, c := range allContainers {
 		select {
@@ -77,7 +78,7 @@ func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters.
 				return nil, err
 			}
 			// TODO: sets RmLink to true?
-			err = daemon.ContainerRm(c.ID, &types.ContainerRmConfig{})
+			err = daemon.containerRm(cfg, c.ID, &types.ContainerRmConfig{})
 			if err != nil {
 				logrus.Warnf("failed to prune container %s: %v", c.ID, err)
 				continue

+ 179 - 160
daemon/reload.go

@@ -5,12 +5,58 @@ import (
 	"fmt"
 	"strconv"
 
-	"github.com/docker/docker/daemon/config"
+	"github.com/hashicorp/go-multierror"
+	"github.com/mitchellh/copystructure"
 	"github.com/sirupsen/logrus"
+
+	"github.com/docker/docker/daemon/config"
 )
 
-// Reload reads configuration changes and modifies the
-// daemon according to those changes.
+// reloadTxn is used to defer side effects of a config reload.
+type reloadTxn struct {
+	onCommit, onRollback []func() error
+}
+
+// OnCommit defers a function to be called when a config reload is being finalized.
+// The error returned from cb is purely informational.
+func (tx *reloadTxn) OnCommit(cb func() error) {
+	tx.onCommit = append(tx.onCommit, cb)
+}
+
+// OnRollback defers a function to be called when a config reload is aborted.
+// The error returned from cb is purely informational.
+func (tx *reloadTxn) OnRollback(cb func() error) {
+	tx.onCommit = append(tx.onRollback, cb)
+}
+
+func (tx *reloadTxn) run(cbs []func() error) error {
+	tx.onCommit = nil
+	tx.onRollback = nil
+
+	var res *multierror.Error
+	for _, cb := range cbs {
+		res = multierror.Append(res, cb())
+	}
+	return res.ErrorOrNil()
+}
+
+// Commit calls all functions registered with OnCommit.
+// Any errors returned by the functions are collated into a
+// *github.com/hashicorp/go-multierror.Error value.
+func (tx *reloadTxn) Commit() error {
+	return tx.run(tx.onCommit)
+}
+
+// Rollback calls all functions registered with OnRollback.
+// Any errors returned by the functions are collated into a
+// *github.com/hashicorp/go-multierror.Error value.
+func (tx *reloadTxn) Rollback() error {
+	return tx.run(tx.onRollback)
+}
+
+// Reload modifies the live daemon configuration from conf.
+// conf is assumed to be a validated configuration.
+//
 // These are the settings that Reload changes:
 // - Platform runtime
 // - Daemon debug log level
@@ -23,256 +69,229 @@ import (
 // - Insecure registries
 // - Registry mirrors
 // - Daemon live restore
-func (daemon *Daemon) Reload(conf *config.Config) (err error) {
-	daemon.configStore.Lock()
+func (daemon *Daemon) Reload(conf *config.Config) error {
+	daemon.configReload.Lock()
+	defer daemon.configReload.Unlock()
+	copied, err := copystructure.Copy(daemon.config().Config)
+	if err != nil {
+		return err
+	}
+	newCfg := &configStore{
+		Config: copied.(config.Config),
+	}
+
 	attributes := map[string]string{}
 
-	defer func() {
-		if err == nil {
-			jsonString, _ := json.Marshal(&struct {
-				*config.Config
-				config.Proxies `json:"proxies"`
-			}{
-				Config: daemon.configStore,
-				Proxies: config.Proxies{
-					HTTPProxy:  config.MaskCredentials(daemon.configStore.HTTPProxy),
-					HTTPSProxy: config.MaskCredentials(daemon.configStore.HTTPSProxy),
-					NoProxy:    config.MaskCredentials(daemon.configStore.NoProxy),
-				},
-			})
-			logrus.Infof("Reloaded configuration: %s", jsonString)
-		}
-		daemon.configStore.Unlock()
-		if err == nil {
-			daemon.LogDaemonEventWithAttributes("reload", attributes)
+	// Ideally reloading should be transactional: the reload either completes
+	// successfully, or the daemon config and state are left untouched. We use a
+	// two-phase commit protocol to achieve this. Any fallible reload operation is
+	// split into two phases. The first phase performs all the fallible operations
+	// and mutates the newCfg copy. The second phase atomically swaps newCfg into
+	// the live daemon configuration and executes any commit functions the first
+	// phase registered to apply the side effects. If any first-phase returns an
+	// error, the reload transaction is rolled back by discarding newCfg and
+	// executing any registered rollback functions.
+
+	var txn reloadTxn
+	for _, reload := range []func(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error{
+		daemon.reloadPlatform,
+		daemon.reloadDebug,
+		daemon.reloadMaxConcurrentDownloadsAndUploads,
+		daemon.reloadMaxDownloadAttempts,
+		daemon.reloadShutdownTimeout,
+		daemon.reloadFeatures,
+		daemon.reloadLabels,
+		daemon.reloadRegistryConfig,
+		daemon.reloadLiveRestore,
+		daemon.reloadNetworkDiagnosticPort,
+	} {
+		if err := reload(&txn, newCfg, conf, attributes); err != nil {
+			if rollbackErr := txn.Rollback(); rollbackErr != nil {
+				return multierror.Append(nil, err, rollbackErr)
+			}
+			return err
 		}
-	}()
-
-	if err := daemon.reloadPlatform(conf, attributes); err != nil {
-		return err
 	}
-	daemon.reloadDebug(conf, attributes)
-	daemon.reloadMaxConcurrentDownloadsAndUploads(conf, attributes)
-	daemon.reloadMaxDownloadAttempts(conf, attributes)
-	daemon.reloadShutdownTimeout(conf, attributes)
-	daemon.reloadFeatures(conf, attributes)
 
-	if err := daemon.reloadLabels(conf, attributes); err != nil {
-		return err
-	}
-	if err := daemon.reloadAllowNondistributableArtifacts(conf, attributes); err != nil {
-		return err
-	}
-	if err := daemon.reloadRegistryMirrors(conf, attributes); err != nil {
-		return err
-	}
-	if err := daemon.reloadInsecureRegistries(conf, attributes); err != nil {
-		return err
+	jsonString, _ := json.Marshal(&struct {
+		*config.Config
+		config.Proxies `json:"proxies"`
+	}{
+		Config: &newCfg.Config,
+		Proxies: config.Proxies{
+			HTTPProxy:  config.MaskCredentials(newCfg.HTTPProxy),
+			HTTPSProxy: config.MaskCredentials(newCfg.HTTPSProxy),
+			NoProxy:    config.MaskCredentials(newCfg.NoProxy),
+		},
+	})
+	logrus.Infof("Reloaded configuration: %s", jsonString)
+	daemon.configStore.Store(newCfg)
+	daemon.LogDaemonEventWithAttributes("reload", attributes)
+	return txn.Commit()
+}
+
+func marshalAttributeSlice(v []string) string {
+	if v == nil {
+		return "[]"
 	}
-	if err := daemon.reloadLiveRestore(conf, attributes); err != nil {
-		return err
+	b, err := json.Marshal(v)
+	if err != nil {
+		panic(err) // Should never happen as the input type is fixed.
 	}
-	return daemon.reloadNetworkDiagnosticPort(conf, attributes)
+	return string(b)
 }
 
 // reloadDebug updates configuration with Debug option
 // and updates the passed attributes
-func (daemon *Daemon) reloadDebug(conf *config.Config, attributes map[string]string) {
+func (daemon *Daemon) reloadDebug(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
 	// update corresponding configuration
 	if conf.IsValueSet("debug") {
-		daemon.configStore.Debug = conf.Debug
+		newCfg.Debug = conf.Debug
 	}
 	// prepare reload event attributes with updatable configurations
-	attributes["debug"] = strconv.FormatBool(daemon.configStore.Debug)
+	attributes["debug"] = strconv.FormatBool(newCfg.Debug)
+	return nil
 }
 
 // reloadMaxConcurrentDownloadsAndUploads updates configuration with max concurrent
 // download and upload options and updates the passed attributes
-func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(conf *config.Config, attributes map[string]string) {
+func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
 	// We always "reset" as the cost is lightweight and easy to maintain.
-	daemon.configStore.MaxConcurrentDownloads = config.DefaultMaxConcurrentDownloads
-	daemon.configStore.MaxConcurrentUploads = config.DefaultMaxConcurrentUploads
+	newCfg.MaxConcurrentDownloads = config.DefaultMaxConcurrentDownloads
+	newCfg.MaxConcurrentUploads = config.DefaultMaxConcurrentUploads
 
 	if conf.IsValueSet("max-concurrent-downloads") && conf.MaxConcurrentDownloads != 0 {
-		daemon.configStore.MaxConcurrentDownloads = conf.MaxConcurrentDownloads
+		newCfg.MaxConcurrentDownloads = conf.MaxConcurrentDownloads
 	}
 	if conf.IsValueSet("max-concurrent-uploads") && conf.MaxConcurrentUploads != 0 {
-		daemon.configStore.MaxConcurrentUploads = conf.MaxConcurrentUploads
-	}
-	if daemon.imageService != nil {
-		daemon.imageService.UpdateConfig(
-			daemon.configStore.MaxConcurrentDownloads,
-			daemon.configStore.MaxConcurrentUploads,
-		)
+		newCfg.MaxConcurrentUploads = conf.MaxConcurrentUploads
 	}
+	txn.OnCommit(func() error {
+		if daemon.imageService != nil {
+			daemon.imageService.UpdateConfig(
+				newCfg.MaxConcurrentDownloads,
+				newCfg.MaxConcurrentUploads,
+			)
+		}
+		return nil
+	})
 
 	// prepare reload event attributes with updatable configurations
-	attributes["max-concurrent-downloads"] = strconv.Itoa(daemon.configStore.MaxConcurrentDownloads)
-	attributes["max-concurrent-uploads"] = strconv.Itoa(daemon.configStore.MaxConcurrentUploads)
+	attributes["max-concurrent-downloads"] = strconv.Itoa(newCfg.MaxConcurrentDownloads)
+	attributes["max-concurrent-uploads"] = strconv.Itoa(newCfg.MaxConcurrentUploads)
 	logrus.Debug("Reset Max Concurrent Downloads: ", attributes["max-concurrent-downloads"])
 	logrus.Debug("Reset Max Concurrent Uploads: ", attributes["max-concurrent-uploads"])
+	return nil
 }
 
 // reloadMaxDownloadAttempts updates configuration with max concurrent
 // download attempts when a connection is lost and updates the passed attributes
-func (daemon *Daemon) reloadMaxDownloadAttempts(conf *config.Config, attributes map[string]string) {
+func (daemon *Daemon) reloadMaxDownloadAttempts(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
 	// We always "reset" as the cost is lightweight and easy to maintain.
-	daemon.configStore.MaxDownloadAttempts = config.DefaultDownloadAttempts
+	newCfg.MaxDownloadAttempts = config.DefaultDownloadAttempts
 	if conf.IsValueSet("max-download-attempts") && conf.MaxDownloadAttempts != 0 {
-		daemon.configStore.MaxDownloadAttempts = conf.MaxDownloadAttempts
+		newCfg.MaxDownloadAttempts = conf.MaxDownloadAttempts
 	}
 
 	// prepare reload event attributes with updatable configurations
-	attributes["max-download-attempts"] = strconv.Itoa(daemon.configStore.MaxDownloadAttempts)
+	attributes["max-download-attempts"] = strconv.Itoa(newCfg.MaxDownloadAttempts)
 	logrus.Debug("Reset Max Download Attempts: ", attributes["max-download-attempts"])
+	return nil
 }
 
 // reloadShutdownTimeout updates configuration with daemon shutdown timeout option
 // and updates the passed attributes
-func (daemon *Daemon) reloadShutdownTimeout(conf *config.Config, attributes map[string]string) {
+func (daemon *Daemon) reloadShutdownTimeout(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
 	// update corresponding configuration
 	if conf.IsValueSet("shutdown-timeout") {
-		daemon.configStore.ShutdownTimeout = conf.ShutdownTimeout
-		logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout)
+		newCfg.ShutdownTimeout = conf.ShutdownTimeout
+		logrus.Debugf("Reset Shutdown Timeout: %d", newCfg.ShutdownTimeout)
 	}
 
 	// prepare reload event attributes with updatable configurations
-	attributes["shutdown-timeout"] = strconv.Itoa(daemon.configStore.ShutdownTimeout)
+	attributes["shutdown-timeout"] = strconv.Itoa(newCfg.ShutdownTimeout)
+	return nil
 }
 
 // reloadLabels updates configuration with engine labels
 // and updates the passed attributes
-func (daemon *Daemon) reloadLabels(conf *config.Config, attributes map[string]string) error {
+func (daemon *Daemon) reloadLabels(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
 	// update corresponding configuration
 	if conf.IsValueSet("labels") {
-		daemon.configStore.Labels = conf.Labels
+		newCfg.Labels = conf.Labels
 	}
 
 	// prepare reload event attributes with updatable configurations
-	if daemon.configStore.Labels != nil {
-		labels, err := json.Marshal(daemon.configStore.Labels)
-		if err != nil {
-			return err
-		}
-		attributes["labels"] = string(labels)
-	} else {
-		attributes["labels"] = "[]"
-	}
-
+	attributes["labels"] = marshalAttributeSlice(newCfg.Labels)
 	return nil
 }
 
-// reloadAllowNondistributableArtifacts updates the configuration with allow-nondistributable-artifacts options
+// reloadRegistryConfig updates the configuration with registry options
 // and updates the passed attributes.
-func (daemon *Daemon) reloadAllowNondistributableArtifacts(conf *config.Config, attributes map[string]string) error {
+func (daemon *Daemon) reloadRegistryConfig(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
 	// Update corresponding configuration.
 	if conf.IsValueSet("allow-nondistributable-artifacts") {
-		daemon.configStore.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts
-		if err := daemon.registryService.LoadAllowNondistributableArtifacts(conf.AllowNondistributableArtifacts); err != nil {
-			return err
-		}
-	}
-
-	// Prepare reload event attributes with updatable configurations.
-	if daemon.configStore.AllowNondistributableArtifacts != nil {
-		v, err := json.Marshal(daemon.configStore.AllowNondistributableArtifacts)
-		if err != nil {
-			return err
-		}
-		attributes["allow-nondistributable-artifacts"] = string(v)
-	} else {
-		attributes["allow-nondistributable-artifacts"] = "[]"
+		newCfg.ServiceOptions.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts
 	}
-
-	return nil
-}
-
-// reloadInsecureRegistries updates configuration with insecure registry option
-// and updates the passed attributes
-func (daemon *Daemon) reloadInsecureRegistries(conf *config.Config, attributes map[string]string) error {
-	// update corresponding configuration
 	if conf.IsValueSet("insecure-registries") {
-		daemon.configStore.InsecureRegistries = conf.InsecureRegistries
-		if err := daemon.registryService.LoadInsecureRegistries(conf.InsecureRegistries); err != nil {
-			return err
-		}
-	}
-
-	// prepare reload event attributes with updatable configurations
-	if daemon.configStore.InsecureRegistries != nil {
-		insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries)
-		if err != nil {
-			return err
-		}
-		attributes["insecure-registries"] = string(insecureRegistries)
-	} else {
-		attributes["insecure-registries"] = "[]"
+		newCfg.ServiceOptions.InsecureRegistries = conf.InsecureRegistries
 	}
-
-	return nil
-}
-
-// reloadRegistryMirrors updates configuration with registry mirror options
-// and updates the passed attributes
-func (daemon *Daemon) reloadRegistryMirrors(conf *config.Config, attributes map[string]string) error {
-	// update corresponding configuration
 	if conf.IsValueSet("registry-mirrors") {
-		daemon.configStore.Mirrors = conf.Mirrors
-		if err := daemon.registryService.LoadMirrors(conf.Mirrors); err != nil {
-			return err
-		}
+		newCfg.ServiceOptions.Mirrors = conf.Mirrors
 	}
 
-	// prepare reload event attributes with updatable configurations
-	if daemon.configStore.Mirrors != nil {
-		mirrors, err := json.Marshal(daemon.configStore.Mirrors)
-		if err != nil {
-			return err
-		}
-		attributes["registry-mirrors"] = string(mirrors)
-	} else {
-		attributes["registry-mirrors"] = "[]"
+	commit, err := daemon.registryService.ReplaceConfig(newCfg.ServiceOptions)
+	if err != nil {
+		return err
 	}
+	txn.OnCommit(func() error { commit(); return nil })
+
+	attributes["allow-nondistributable-artifacts"] = marshalAttributeSlice(newCfg.ServiceOptions.AllowNondistributableArtifacts)
+	attributes["insecure-registries"] = marshalAttributeSlice(newCfg.ServiceOptions.InsecureRegistries)
+	attributes["registry-mirrors"] = marshalAttributeSlice(newCfg.ServiceOptions.Mirrors)
 
 	return nil
 }
 
 // reloadLiveRestore updates configuration with live restore option
 // and updates the passed attributes
-func (daemon *Daemon) reloadLiveRestore(conf *config.Config, attributes map[string]string) error {
+func (daemon *Daemon) reloadLiveRestore(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
 	// update corresponding configuration
 	if conf.IsValueSet("live-restore") {
-		daemon.configStore.LiveRestoreEnabled = conf.LiveRestoreEnabled
+		newCfg.LiveRestoreEnabled = conf.LiveRestoreEnabled
 	}
 
 	// prepare reload event attributes with updatable configurations
-	attributes["live-restore"] = strconv.FormatBool(daemon.configStore.LiveRestoreEnabled)
+	attributes["live-restore"] = strconv.FormatBool(newCfg.LiveRestoreEnabled)
 	return nil
 }
 
 // reloadNetworkDiagnosticPort updates the network controller starting the diagnostic if the config is valid
-func (daemon *Daemon) reloadNetworkDiagnosticPort(conf *config.Config, attributes map[string]string) error {
-	if conf == nil || daemon.netController == nil || !conf.IsValueSet("network-diagnostic-port") ||
-		conf.NetworkDiagnosticPort < 1 || conf.NetworkDiagnosticPort > 65535 {
-		// If there is no config make sure that the diagnostic is off
-		if daemon.netController != nil {
-			daemon.netController.StopDiagnostic()
+func (daemon *Daemon) reloadNetworkDiagnosticPort(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
+	txn.OnCommit(func() error {
+		if conf == nil || daemon.netController == nil || !conf.IsValueSet("network-diagnostic-port") ||
+			conf.NetworkDiagnosticPort < 1 || conf.NetworkDiagnosticPort > 65535 {
+			// If there is no config make sure that the diagnostic is off
+			if daemon.netController != nil {
+				daemon.netController.StopDiagnostic()
+			}
+			return nil
 		}
+		// Enable the network diagnostic if the flag is set with a valid port within the range
+		logrus.WithFields(logrus.Fields{"port": conf.NetworkDiagnosticPort, "ip": "127.0.0.1"}).Warn("Starting network diagnostic server")
+		daemon.netController.StartDiagnostic(conf.NetworkDiagnosticPort)
 		return nil
-	}
-	// Enable the network diagnostic if the flag is set with a valid port within the range
-	logrus.WithFields(logrus.Fields{"port": conf.NetworkDiagnosticPort, "ip": "127.0.0.1"}).Warn("Starting network diagnostic server")
-	daemon.netController.StartDiagnostic(conf.NetworkDiagnosticPort)
-
+	})
 	return nil
 }
 
 // reloadFeatures updates configuration with enabled/disabled features
-func (daemon *Daemon) reloadFeatures(conf *config.Config, attributes map[string]string) {
+func (daemon *Daemon) reloadFeatures(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
 	// update corresponding configuration
 	// note that we allow features option to be entirely unset
-	daemon.configStore.Features = conf.Features
+	newCfg.Features = conf.Features
 
 	// prepare reload event attributes with updatable configurations
-	attributes["features"] = fmt.Sprintf("%v", daemon.configStore.Features)
+	attributes["features"] = fmt.Sprintf("%v", newCfg.Features)
+	return nil
 }

+ 24 - 29
daemon/reload_test.go

@@ -19,15 +19,24 @@ func muteLogs() {
 	logrus.SetLevel(logrus.ErrorLevel)
 }
 
-func TestDaemonReloadLabels(t *testing.T) {
+func newDaemonForReloadT(t *testing.T, cfg *config.Config) *Daemon {
+	t.Helper()
 	daemon := &Daemon{
-		configStore: &config.Config{
-			CommonConfig: config.CommonConfig{
-				Labels: []string{"foo:bar"},
-			},
-		},
 		imageService: images.NewImageService(images.ImageServiceConfig{}),
 	}
+	var err error
+	daemon.registryService, err = registry.NewService(registry.ServiceOptions{})
+	assert.Assert(t, err)
+	daemon.configStore.Store(&configStore{Config: *cfg})
+	return daemon
+}
+
+func TestDaemonReloadLabels(t *testing.T) {
+	daemon := newDaemonForReloadT(t, &config.Config{
+		CommonConfig: config.CommonConfig{
+			Labels: []string{"foo:bar"},
+		},
+	})
 	muteLogs()
 
 	valuesSets := make(map[string]interface{})
@@ -43,17 +52,14 @@ func TestDaemonReloadLabels(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	label := daemon.configStore.Labels[0]
+	label := daemon.config().Labels[0]
 	if label != "foo:baz" {
 		t.Fatalf("Expected daemon label `foo:baz`, got %s", label)
 	}
 }
 
 func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) {
-	daemon := &Daemon{
-		configStore:  &config.Config{},
-		imageService: images.NewImageService(images.ImageServiceConfig{}),
-	}
+	daemon := newDaemonForReloadT(t, &config.Config{})
 	muteLogs()
 
 	var err error
@@ -125,8 +131,6 @@ func TestDaemonReloadMirrors(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	daemon.configStore = &config.Config{}
-
 	type pair struct {
 		valid   bool
 		mirrors []string
@@ -228,8 +232,6 @@ func TestDaemonReloadInsecureRegistries(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	daemon.configStore = &config.Config{}
-
 	insecureRegistries := []string{
 		"127.0.0.0/8",         // this will be kept
 		"10.10.1.11:5000",     // this will be kept
@@ -308,17 +310,13 @@ func TestDaemonReloadInsecureRegistries(t *testing.T) {
 }
 
 func TestDaemonReloadNotAffectOthers(t *testing.T) {
-	daemon := &Daemon{
-		imageService: images.NewImageService(images.ImageServiceConfig{}),
-	}
-	muteLogs()
-
-	daemon.configStore = &config.Config{
+	daemon := newDaemonForReloadT(t, &config.Config{
 		CommonConfig: config.CommonConfig{
 			Labels: []string{"foo:bar"},
 			Debug:  true,
 		},
-	}
+	})
+	muteLogs()
 
 	valuesSets := make(map[string]interface{})
 	valuesSets["labels"] = "foo:baz"
@@ -333,11 +331,11 @@ func TestDaemonReloadNotAffectOthers(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	label := daemon.configStore.Labels[0]
+	label := daemon.config().Labels[0]
 	if label != "foo:baz" {
 		t.Fatalf("Expected daemon label `foo:baz`, got %s", label)
 	}
-	debug := daemon.configStore.Debug
+	debug := daemon.config().Debug
 	if !debug {
 		t.Fatal("Expected debug 'enabled', got 'disabled'")
 	}
@@ -347,10 +345,7 @@ func TestDaemonReloadNetworkDiagnosticPort(t *testing.T) {
 	if os.Getuid() != 0 {
 		t.Skip("root required")
 	}
-	daemon := &Daemon{
-		imageService: images.NewImageService(images.ImageServiceConfig{}),
-		configStore:  &config.Config{},
-	}
+	daemon := newDaemonForReloadT(t, &config.Config{})
 
 	enableConfig := &config.Config{
 		CommonConfig: config.CommonConfig{
@@ -361,7 +356,7 @@ func TestDaemonReloadNetworkDiagnosticPort(t *testing.T) {
 		},
 	}
 
-	netOptions, err := daemon.networkOptions(nil, nil)
+	netOptions, err := daemon.networkOptions(&config.Config{}, nil, nil)
 	if err != nil {
 		t.Fatal(err)
 	}

+ 16 - 23
daemon/reload_unix.go

@@ -6,45 +6,39 @@ import (
 	"bytes"
 	"strconv"
 
-	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/daemon/config"
 )
 
 // reloadPlatform updates configuration with platform specific options
 // and updates the passed attributes
-func (daemon *Daemon) reloadPlatform(conf *config.Config, attributes map[string]string) error {
-	if err := conf.ValidatePlatformConfig(); err != nil {
-		return err
+func (daemon *Daemon) reloadPlatform(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
+	if conf.DefaultRuntime != "" {
+		newCfg.DefaultRuntime = conf.DefaultRuntime
 	}
-
 	if conf.IsValueSet("runtimes") {
-		// Always set the default one
-		conf.Runtimes[config.StockRuntimeName] = types.Runtime{Path: config.DefaultRuntimeBinary}
-		if err := daemon.initRuntimes(conf.Runtimes); err != nil {
-			return err
-		}
-		daemon.configStore.Runtimes = conf.Runtimes
+		newCfg.Config.Runtimes = conf.Runtimes
 	}
-
-	if conf.DefaultRuntime != "" {
-		daemon.configStore.DefaultRuntime = conf.DefaultRuntime
+	var err error
+	newCfg.Runtimes, err = setupRuntimes(&newCfg.Config)
+	if err != nil {
+		return err
 	}
 
 	if conf.IsValueSet("default-shm-size") {
-		daemon.configStore.ShmSize = conf.ShmSize
+		newCfg.ShmSize = conf.ShmSize
 	}
 
 	if conf.CgroupNamespaceMode != "" {
-		daemon.configStore.CgroupNamespaceMode = conf.CgroupNamespaceMode
+		newCfg.CgroupNamespaceMode = conf.CgroupNamespaceMode
 	}
 
 	if conf.IpcMode != "" {
-		daemon.configStore.IpcMode = conf.IpcMode
+		newCfg.IpcMode = conf.IpcMode
 	}
 
 	// Update attributes
 	var runtimeList bytes.Buffer
-	for name, rt := range daemon.configStore.Runtimes {
+	for name, rt := range newCfg.Config.Runtimes {
 		if runtimeList.Len() > 0 {
 			runtimeList.WriteRune(' ')
 		}
@@ -52,10 +46,9 @@ func (daemon *Daemon) reloadPlatform(conf *config.Config, attributes map[string]
 	}
 
 	attributes["runtimes"] = runtimeList.String()
-	attributes["default-runtime"] = daemon.configStore.DefaultRuntime
-	attributes["default-shm-size"] = strconv.FormatInt(int64(daemon.configStore.ShmSize), 10)
-	attributes["default-ipc-mode"] = daemon.configStore.IpcMode
-	attributes["default-cgroupns-mode"] = daemon.configStore.CgroupNamespaceMode
-
+	attributes["default-runtime"] = newCfg.DefaultRuntime
+	attributes["default-shm-size"] = strconv.FormatInt(int64(newCfg.ShmSize), 10)
+	attributes["default-ipc-mode"] = newCfg.IpcMode
+	attributes["default-cgroupns-mode"] = newCfg.CgroupNamespaceMode
 	return nil
 }

+ 1 - 1
daemon/reload_windows.go

@@ -4,6 +4,6 @@ import "github.com/docker/docker/daemon/config"
 
 // reloadPlatform updates configuration with platform specific options
 // and updates the passed attributes
-func (daemon *Daemon) reloadPlatform(config *config.Config, attributes map[string]string) error {
+func (daemon *Daemon) reloadPlatform(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
 	return nil
 }

+ 3 - 3
daemon/restart.go

@@ -19,7 +19,7 @@ func (daemon *Daemon) ContainerRestart(ctx context.Context, name string, options
 	if err != nil {
 		return err
 	}
-	err = daemon.containerRestart(ctx, ctr, options)
+	err = daemon.containerRestart(ctx, daemon.config(), ctr, options)
 	if err != nil {
 		return fmt.Errorf("Cannot restart container %s: %v", name, err)
 	}
@@ -30,7 +30,7 @@ func (daemon *Daemon) ContainerRestart(ctx context.Context, name string, options
 // container. When stopping, wait for the given duration in seconds to
 // gracefully stop, before forcefully terminating the container. If
 // given a negative duration, wait forever for a graceful stop.
-func (daemon *Daemon) containerRestart(ctx context.Context, container *container.Container, options containertypes.StopOptions) error {
+func (daemon *Daemon) containerRestart(ctx context.Context, daemonCfg *configStore, container *container.Container, options containertypes.StopOptions) error {
 	// Determine isolation. If not specified in the hostconfig, use daemon default.
 	actualIsolation := container.HostConfig.Isolation
 	if containertypes.Isolation.IsDefault(actualIsolation) {
@@ -61,7 +61,7 @@ func (daemon *Daemon) containerRestart(ctx context.Context, container *container
 		}
 	}
 
-	if err := daemon.containerStart(ctx, container, "", "", true); err != nil {
+	if err := daemon.containerStart(ctx, daemonCfg, container, "", "", true); err != nil {
 		return err
 	}
 

+ 188 - 104
daemon/runtime_unix.go

@@ -4,18 +4,24 @@ package daemon
 
 import (
 	"bytes"
+	"crypto/sha256"
+	"encoding/base32"
 	"encoding/json"
 	"fmt"
+	"io"
 	"os"
 	"os/exec"
 	"path/filepath"
 	"strings"
 
+	"github.com/containerd/containerd/plugin"
 	v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
-	"github.com/docker/docker/api/types"
+	"github.com/containerd/containerd/runtime/v2/shim"
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/libcontainerd/shimopts"
+	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/docker/pkg/system"
 	"github.com/opencontainers/runtime-spec/specs-go/features"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
@@ -24,23 +30,34 @@ import (
 const (
 	defaultRuntimeName = "runc"
 
-	linuxShimV2 = "io.containerd.runc.v2"
+	// The runtime used to specify the containerd v2 runc shim
+	linuxV2RuntimeName = "io.containerd.runc.v2"
 )
 
-func configureRuntimes(conf *config.Config) {
-	if conf.DefaultRuntime == "" {
-		conf.DefaultRuntime = config.StockRuntimeName
-	}
-	if conf.Runtimes == nil {
-		conf.Runtimes = make(map[string]types.Runtime)
+type shimConfig struct {
+	Shim     string
+	Opts     interface{}
+	Features *features.Features
+
+	// Check if the ShimConfig is valid given the current state of the system.
+	PreflightCheck func() error
+}
+
+type runtimes struct {
+	Default    string
+	configured map[string]*shimConfig
+}
+
+func stockRuntimes() map[string]string {
+	return map[string]string{
+		linuxV2RuntimeName:      defaultRuntimeName,
+		config.StockRuntimeName: defaultRuntimeName,
 	}
-	conf.Runtimes[config.LinuxV2RuntimeName] = types.Runtime{Path: defaultRuntimeName, ShimConfig: defaultV2ShimConfig(conf, defaultRuntimeName)}
-	conf.Runtimes[config.StockRuntimeName] = conf.Runtimes[config.LinuxV2RuntimeName]
 }
 
-func defaultV2ShimConfig(conf *config.Config, runtimePath string) *types.ShimConfig {
-	return &types.ShimConfig{
-		Binary: linuxShimV2,
+func defaultV2ShimConfig(conf *config.Config, runtimePath string) *shimConfig {
+	shim := &shimConfig{
+		Shim: plugin.RuntimeRuncV2,
 		Opts: &v2runcoptions.Options{
 			BinaryName:    runtimePath,
 			Root:          filepath.Join(conf.ExecRoot, "runtime-"+defaultRuntimeName),
@@ -48,138 +65,205 @@ func defaultV2ShimConfig(conf *config.Config, runtimePath string) *types.ShimCon
 			NoPivotRoot:   os.Getenv("DOCKER_RAMDISK") != "",
 		},
 	}
+
+	var featuresStderr bytes.Buffer
+	featuresCmd := exec.Command(runtimePath, "features")
+	featuresCmd.Stderr = &featuresStderr
+	if featuresB, err := featuresCmd.Output(); err != nil {
+		logrus.WithError(err).Warnf("Failed to run %v: %q", featuresCmd.Args, featuresStderr.String())
+	} else {
+		var features features.Features
+		if jsonErr := json.Unmarshal(featuresB, &features); jsonErr != nil {
+			logrus.WithError(err).Warnf("Failed to unmarshal the output of %v as a JSON", featuresCmd.Args)
+		} else {
+			shim.Features = &features
+		}
+	}
+
+	return shim
 }
 
-func (daemon *Daemon) loadRuntimes() error {
-	return daemon.initRuntimes(daemon.configStore.Runtimes)
+func runtimeScriptsDir(cfg *config.Config) string {
+	return filepath.Join(cfg.Root, "runtimes")
 }
 
-func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error) {
-	runtimeDir := filepath.Join(daemon.configStore.Root, "runtimes")
-	runtimeOldDir := runtimeDir + "-old"
-	// Remove old temp directory if any
-	os.RemoveAll(runtimeOldDir)
-	tmpDir, err := os.MkdirTemp(daemon.configStore.Root, "gen-runtimes")
-	if err != nil {
-		return errors.Wrap(err, "failed to get temp dir to generate runtime scripts")
+// initRuntimesDir creates a fresh directory where we'll store the runtime
+// scripts (i.e. in order to support runtimeArgs).
+func initRuntimesDir(cfg *config.Config) error {
+	runtimeDir := runtimeScriptsDir(cfg)
+	if err := os.RemoveAll(runtimeDir); err != nil {
+		return err
 	}
-	defer func() {
-		if err != nil {
-			if err1 := os.RemoveAll(tmpDir); err1 != nil {
-				logrus.WithError(err1).WithField("dir", tmpDir).
-					Warn("failed to remove tmp dir")
-			}
-			return
-		}
+	return system.MkdirAll(runtimeDir, 0700)
+}
 
-		if err = os.Rename(runtimeDir, runtimeOldDir); err != nil {
-			logrus.WithError(err).WithField("dir", runtimeDir).
-				Warn("failed to rename runtimes dir to old. Will try to removing it")
-			if err = os.RemoveAll(runtimeDir); err != nil {
-				logrus.WithError(err).WithField("dir", runtimeDir).
-					Warn("failed to remove old runtimes dir")
-				return
-			}
-		}
-		if err = os.Rename(tmpDir, runtimeDir); err != nil {
-			err = errors.Wrap(err, "failed to setup runtimes dir, new containers may not start")
-			return
-		}
-		if err = os.RemoveAll(runtimeOldDir); err != nil {
-			logrus.WithError(err).WithField("dir", runtimeOldDir).
-				Warn("failed to remove old runtimes dir")
+func setupRuntimes(cfg *config.Config) (runtimes, error) {
+	if _, ok := cfg.Runtimes[config.StockRuntimeName]; ok {
+		return runtimes{}, errors.Errorf("runtime name '%s' is reserved", config.StockRuntimeName)
+	}
+
+	newrt := runtimes{
+		Default:    cfg.DefaultRuntime,
+		configured: make(map[string]*shimConfig),
+	}
+	for name, path := range stockRuntimes() {
+		newrt.configured[name] = defaultV2ShimConfig(cfg, path)
+	}
+
+	if newrt.Default != "" {
+		_, isStock := newrt.configured[newrt.Default]
+		_, isConfigured := cfg.Runtimes[newrt.Default]
+		if !isStock && !isConfigured && !isPermissibleC8dRuntimeName(newrt.Default) {
+			return runtimes{}, errors.Errorf("specified default runtime '%s' does not exist", newrt.Default)
 		}
-	}()
+	} else {
+		newrt.Default = config.StockRuntimeName
+	}
 
-	for name := range runtimes {
-		rt := runtimes[name]
+	dir := runtimeScriptsDir(cfg)
+	for name, rt := range cfg.Runtimes {
+		var c *shimConfig
 		if rt.Path == "" && rt.Type == "" {
-			return errors.Errorf("runtime %s: either a runtimeType or a path must be configured", name)
+			return runtimes{}, errors.Errorf("runtime %s: either a runtimeType or a path must be configured", name)
 		}
 		if rt.Path != "" {
 			if rt.Type != "" {
-				return errors.Errorf("runtime %s: cannot configure both path and runtimeType for the same runtime", name)
+				return runtimes{}, errors.Errorf("runtime %s: cannot configure both path and runtimeType for the same runtime", name)
 			}
 			if len(rt.Options) > 0 {
-				return errors.Errorf("runtime %s: options cannot be used with a path runtime", name)
+				return runtimes{}, errors.Errorf("runtime %s: options cannot be used with a path runtime", name)
 			}
 
-			if len(rt.Args) > 0 {
-				script := filepath.Join(tmpDir, name)
-				content := fmt.Sprintf("#!/bin/sh\n%s %s $@\n", rt.Path, strings.Join(rt.Args, " "))
-				if err := os.WriteFile(script, []byte(content), 0700); err != nil {
-					return err
+			binaryName := rt.Path
+			needsWrapper := len(rt.Args) > 0
+			if needsWrapper {
+				var err error
+				binaryName, err = wrapRuntime(dir, name, rt.Path, rt.Args)
+				if err != nil {
+					return runtimes{}, err
 				}
 			}
-			rt.ShimConfig = defaultV2ShimConfig(daemon.configStore, daemon.rewriteRuntimePath(name, rt.Path, rt.Args))
-			var featuresStderr bytes.Buffer
-			featuresCmd := exec.Command(rt.Path, append(rt.Args, "features")...)
-			featuresCmd.Stderr = &featuresStderr
-			if featuresB, err := featuresCmd.Output(); err != nil {
-				logrus.WithError(err).Warnf("Failed to run %v: %q", featuresCmd.Args, featuresStderr.String())
-			} else {
-				var features features.Features
-				if jsonErr := json.Unmarshal(featuresB, &features); jsonErr != nil {
-					logrus.WithError(err).Warnf("Failed to unmarshal the output of %v as a JSON", featuresCmd.Args)
-				} else {
-					rt.Features = &features
+			c = defaultV2ShimConfig(cfg, binaryName)
+			if needsWrapper {
+				path := rt.Path
+				c.PreflightCheck = func() error {
+					// Check that the runtime path actually exists so that we can return a well known error.
+					_, err := exec.LookPath(path)
+					return errors.Wrap(err, "error while looking up the specified runtime path")
 				}
 			}
 		} else {
 			if len(rt.Args) > 0 {
-				return errors.Errorf("runtime %s: args cannot be used with a runtimeType runtime", name)
+				return runtimes{}, errors.Errorf("runtime %s: args cannot be used with a runtimeType runtime", name)
 			}
 			// Unlike implicit runtimes, there is no restriction on configuring a shim by path.
-			rt.ShimConfig = &types.ShimConfig{Binary: rt.Type}
+			c = &shimConfig{Shim: rt.Type}
 			if len(rt.Options) > 0 {
 				// It has to be a pointer type or there'll be a panic in containerd/typeurl when we try to start the container.
-				rt.ShimConfig.Opts, err = shimopts.Generate(rt.Type, rt.Options)
+				var err error
+				c.Opts, err = shimopts.Generate(rt.Type, rt.Options)
 				if err != nil {
-					return errors.Wrapf(err, "runtime %v", name)
+					return runtimes{}, errors.Wrapf(err, "runtime %v", name)
 				}
 			}
 		}
-		runtimes[name] = rt
+		newrt.configured[name] = c
 	}
-	return nil
+
+	return newrt, nil
 }
 
-// rewriteRuntimePath is used for runtimes which have custom arguments supplied.
-// This is needed because the containerd API only calls the OCI runtime binary, there is no options for extra arguments.
-// To support this case, the daemon wraps the specified runtime in a script that passes through those arguments.
-func (daemon *Daemon) rewriteRuntimePath(name, p string, args []string) string {
-	if len(args) == 0 {
-		return p
-	}
+// A non-standard Base32 encoding which lacks vowels to avoid accidentally
+// spelling naughty words. Don't use this to encode any data which requires
+// compatibility with anything outside of the currently-running process.
+var base32Disemvoweled = base32.NewEncoding("0123456789BCDFGHJKLMNPQRSTVWXYZ-")
 
-	return filepath.Join(daemon.configStore.Root, "runtimes", name)
+// wrapRuntime writes a shell script to dir which will execute binary with args
+// concatenated to the script's argv. This is needed because the
+// io.containerd.runc.v2 shim has no options for passing extra arguments to the
+// runtime binary.
+func wrapRuntime(dir, name, binary string, args []string) (string, error) {
+	var wrapper bytes.Buffer
+	sum := sha256.New()
+	_, _ = fmt.Fprintf(io.MultiWriter(&wrapper, sum), "#!/bin/sh\n%s %s $@\n", binary, strings.Join(args, " "))
+	// Generate a consistent name for the wrapper script derived from the
+	// contents so that multiple wrapper scripts can coexist with the same
+	// base name. The existing scripts might still be referenced by running
+	// containers.
+	suffix := base32Disemvoweled.EncodeToString(sum.Sum(nil))
+	scriptPath := filepath.Join(dir, name+"."+suffix)
+	if err := ioutils.AtomicWriteFile(scriptPath, wrapper.Bytes(), 0700); err != nil {
+		return "", err
+	}
+	return scriptPath, nil
 }
 
-func (daemon *Daemon) getRuntime(name string) (shim string, opts interface{}, err error) {
-	rt := daemon.configStore.GetRuntime(name)
-	if rt == nil {
-		if !config.IsPermissibleC8dRuntimeName(name) {
-			return "", nil, errdefs.InvalidParameter(errors.Errorf("unknown or invalid runtime name: %s", name))
-		}
-		return name, nil, nil
+// Get returns the containerd runtime and options for name, suitable to pass
+// into containerd.WithRuntime(). The runtime and options for the default
+// runtime are returned when name is the empty string.
+func (r *runtimes) Get(name string) (string, interface{}, error) {
+	if name == "" {
+		name = r.Default
 	}
 
-	if len(rt.Args) > 0 {
-		// Check that the path of the runtime which the script wraps actually exists so
-		// that we can return a well known error which references the configured path
-		// instead of the wrapper script's.
-		if _, err := exec.LookPath(rt.Path); err != nil {
-			return "", nil, errors.Wrap(err, "error while looking up the specified runtime path")
+	rt := r.configured[name]
+	if rt != nil {
+		if rt.PreflightCheck != nil {
+			if err := rt.PreflightCheck(); err != nil {
+				return "", nil, err
+			}
 		}
+		return rt.Shim, rt.Opts, nil
 	}
 
-	if rt.ShimConfig == nil {
-		// Should never happen as daemon.initRuntimes always sets
-		// ShimConfig and config reloading is synchronized.
-		err := errdefs.System(errors.Errorf("BUG: runtime %s: rt.ShimConfig == nil", name))
-		logrus.Error(err)
-		return "", nil, err
+	if !isPermissibleC8dRuntimeName(name) {
+		return "", nil, errdefs.InvalidParameter(errors.Errorf("unknown or invalid runtime name: %s", name))
+	}
+	return name, nil, nil
+}
+
+func (r *runtimes) Features(name string) *features.Features {
+	if name == "" {
+		name = r.Default
 	}
 
-	return rt.ShimConfig.Binary, rt.ShimConfig.Opts, nil
+	rt := r.configured[name]
+	if rt != nil {
+		return rt.Features
+	}
+	return nil
+}
+
+// isPermissibleC8dRuntimeName tests whether name is safe to pass into
+// containerd as a runtime name, and whether the name is well-formed.
+// It does not check if the runtime is installed.
+//
+// A runtime name containing slash characters is interpreted by containerd as
+// the path to a runtime binary. If we allowed this, anyone with Engine API
+// access could get containerd to execute an arbitrary binary as root. Although
+// Engine API access is already equivalent to root on the host, the runtime name
+// has not historically been a vector to run arbitrary code as root so users are
+// not expecting it to become one.
+//
+// This restriction is not configurable. There are viable workarounds for
+// legitimate use cases: administrators and runtime developers can make runtimes
+// available for use with Docker by installing them onto PATH following the
+// [binary naming convention] for containerd Runtime v2.
+//
+// [binary naming convention]: https://github.com/containerd/containerd/blob/main/runtime/v2/README.md#binary-naming
+func isPermissibleC8dRuntimeName(name string) bool {
+	// containerd uses a rather permissive test to validate runtime names:
+	//
+	//   - Any name for which filepath.IsAbs(name) is interpreted as the absolute
+	//     path to a shim binary. We want to block this behaviour.
+	//   - Any name which contains at least one '.' character and no '/' characters
+	//     and does not begin with a '.' character is a valid runtime name. The shim
+	//     binary name is derived from the final two components of the name and
+	//     searched for on the PATH. The name "a.." is technically valid per
+	//     containerd's implementation: it would resolve to a binary named
+	//     "containerd-shim---".
+	//
+	// https://github.com/containerd/containerd/blob/11ded166c15f92450958078cd13c6d87131ec563/runtime/v2/manager.go#L297-L317
+	// https://github.com/containerd/containerd/blob/11ded166c15f92450958078cd13c6d87131ec563/runtime/v2/shim/util.go#L83-L93
+	return !filepath.IsAbs(name) && !strings.ContainsRune(name, '/') && shim.BinaryName(name) != ""
 }

+ 281 - 85
daemon/runtime_unix_test.go

@@ -3,12 +3,14 @@
 package daemon
 
 import (
+	"io/fs"
 	"os"
-	"path/filepath"
+	"strings"
 	"testing"
 
 	"github.com/containerd/containerd/plugin"
 	v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
+	"github.com/imdario/mergo"
 	"gotest.tools/v3/assert"
 	is "gotest.tools/v3/assert/cmp"
 
@@ -17,81 +19,169 @@ import (
 	"github.com/docker/docker/errdefs"
 )
 
-func TestInitRuntimes_InvalidConfigs(t *testing.T) {
+func TestSetupRuntimes(t *testing.T) {
 	cases := []struct {
 		name      string
-		runtime   types.Runtime
+		config    *config.Config
 		expectErr string
 	}{
 		{
-			name:      "Empty",
+			name: "Empty",
+			config: &config.Config{
+				Runtimes: map[string]types.Runtime{
+					"myruntime": {},
+				},
+			},
 			expectErr: "either a runtimeType or a path must be configured",
 		},
 		{
-			name:      "ArgsOnly",
-			runtime:   types.Runtime{Args: []string{"foo", "bar"}},
+			name: "ArgsOnly",
+			config: &config.Config{
+				Runtimes: map[string]types.Runtime{
+					"myruntime": {Args: []string{"foo", "bar"}},
+				},
+			},
 			expectErr: "either a runtimeType or a path must be configured",
 		},
 		{
-			name:      "OptionsOnly",
-			runtime:   types.Runtime{Options: map[string]interface{}{"hello": "world"}},
+			name: "OptionsOnly",
+			config: &config.Config{
+				Runtimes: map[string]types.Runtime{
+					"myruntime": {Options: map[string]interface{}{"hello": "world"}},
+				},
+			},
 			expectErr: "either a runtimeType or a path must be configured",
 		},
 		{
-			name:      "PathAndType",
-			runtime:   types.Runtime{Path: "/bin/true", Type: "io.containerd.runsc.v1"},
+			name: "PathAndType",
+			config: &config.Config{
+				Runtimes: map[string]types.Runtime{
+					"myruntime": {Path: "/bin/true", Type: "io.containerd.runsc.v1"},
+				},
+			},
 			expectErr: "cannot configure both",
 		},
 		{
-			name:      "PathAndOptions",
-			runtime:   types.Runtime{Path: "/bin/true", Options: map[string]interface{}{"a": "b"}},
+			name: "PathAndOptions",
+			config: &config.Config{
+				Runtimes: map[string]types.Runtime{
+					"myruntime": {Path: "/bin/true", Options: map[string]interface{}{"a": "b"}},
+				},
+			},
 			expectErr: "options cannot be used with a path runtime",
 		},
 		{
-			name:      "TypeAndArgs",
-			runtime:   types.Runtime{Type: "io.containerd.runsc.v1", Args: []string{"--version"}},
+			name: "TypeAndArgs",
+			config: &config.Config{
+				Runtimes: map[string]types.Runtime{
+					"myruntime": {Type: "io.containerd.runsc.v1", Args: []string{"--version"}},
+				},
+			},
 			expectErr: "args cannot be used with a runtimeType runtime",
 		},
 		{
 			name: "PathArgsOptions",
-			runtime: types.Runtime{
-				Path:    "/bin/true",
-				Args:    []string{"--version"},
-				Options: map[string]interface{}{"hmm": 3},
+			config: &config.Config{
+				Runtimes: map[string]types.Runtime{
+					"myruntime": {
+						Path:    "/bin/true",
+						Args:    []string{"--version"},
+						Options: map[string]interface{}{"hmm": 3},
+					},
+				},
 			},
 			expectErr: "options cannot be used with a path runtime",
 		},
 		{
 			name: "TypeOptionsArgs",
-			runtime: types.Runtime{
-				Type:    "io.containerd.kata.v2",
-				Options: map[string]interface{}{"a": "b"},
-				Args:    []string{"--help"},
+			config: &config.Config{
+				Runtimes: map[string]types.Runtime{
+					"myruntime": {
+						Type:    "io.containerd.kata.v2",
+						Options: map[string]interface{}{"a": "b"},
+						Args:    []string{"--help"},
+					},
+				},
 			},
 			expectErr: "args cannot be used with a runtimeType runtime",
 		},
 		{
 			name: "PathArgsTypeOptions",
-			runtime: types.Runtime{
-				Path:    "/bin/true",
-				Args:    []string{"foo"},
-				Type:    "io.containerd.runsc.v1",
-				Options: map[string]interface{}{"a": "b"},
+			config: &config.Config{
+				Runtimes: map[string]types.Runtime{
+					"myruntime": {
+						Path:    "/bin/true",
+						Args:    []string{"foo"},
+						Type:    "io.containerd.runsc.v1",
+						Options: map[string]interface{}{"a": "b"},
+					},
+				},
 			},
 			expectErr: "cannot configure both",
 		},
+		{
+			name: "CannotOverrideStockRuntime",
+			config: &config.Config{
+				Runtimes: map[string]types.Runtime{
+					config.StockRuntimeName: {},
+				},
+			},
+			expectErr: `runtime name 'runc' is reserved`,
+		},
+		{
+			name: "SetStockRuntimeAsDefault",
+			config: &config.Config{
+				CommonConfig: config.CommonConfig{
+					DefaultRuntime: config.StockRuntimeName,
+				},
+			},
+		},
+		{
+			name: "SetLinuxRuntimeAsDefault",
+			config: &config.Config{
+				CommonConfig: config.CommonConfig{
+					DefaultRuntime: linuxV2RuntimeName,
+				},
+			},
+		},
+		{
+			name: "CannotSetBogusRuntimeAsDefault",
+			config: &config.Config{
+				CommonConfig: config.CommonConfig{
+					DefaultRuntime: "notdefined",
+				},
+			},
+			expectErr: "specified default runtime 'notdefined' does not exist",
+		},
+		{
+			name: "SetDefinedRuntimeAsDefault",
+			config: &config.Config{
+				Runtimes: map[string]types.Runtime{
+					"some-runtime": {
+						Path: "/usr/local/bin/file-not-found",
+					},
+				},
+				CommonConfig: config.CommonConfig{
+					DefaultRuntime: "some-runtime",
+				},
+			},
+		},
 	}
-
-	for _, tt := range cases {
-		t.Run(tt.name, func(t *testing.T) {
+	for _, tc := range cases {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
 			cfg, err := config.New()
 			assert.NilError(t, err)
-			d := &Daemon{configStore: cfg}
-			d.configStore.Root = t.TempDir()
-			assert.Assert(t, os.Mkdir(filepath.Join(d.configStore.Root, "runtimes"), 0700))
+			cfg.Root = t.TempDir()
+			assert.NilError(t, mergo.Merge(cfg, tc.config, mergo.WithOverride))
+			assert.Assert(t, initRuntimesDir(cfg))
 
-			err = d.initRuntimes(map[string]types.Runtime{"myruntime": tt.runtime})
-			assert.Check(t, is.ErrorContains(err, tt.expectErr))
+			_, err = setupRuntimes(cfg)
+			if tc.expectErr == "" {
+				assert.NilError(t, err)
+			} else {
+				assert.ErrorContains(t, err, tc.expectErr)
+			}
 		})
 	}
 }
@@ -124,49 +214,51 @@ func TestGetRuntime(t *testing.T) {
 	cfg, err := config.New()
 	assert.NilError(t, err)
 
-	d := &Daemon{configStore: cfg}
-	d.configStore.Root = t.TempDir()
-	assert.Assert(t, os.Mkdir(filepath.Join(d.configStore.Root, "runtimes"), 0700))
-	d.configStore.Runtimes = map[string]types.Runtime{
+	cfg.Root = t.TempDir()
+	cfg.Runtimes = map[string]types.Runtime{
 		configuredRtName:         configuredRuntime,
 		rtWithArgsName:           rtWithArgs,
 		shimWithOptsName:         shimWithOpts,
 		shimAliasName:            shimAlias,
 		configuredShimByPathName: configuredShimByPath,
 	}
-	configureRuntimes(d.configStore)
-	assert.Assert(t, d.loadRuntimes())
+	assert.NilError(t, initRuntimesDir(cfg))
+	runtimes, err := setupRuntimes(cfg)
+	assert.NilError(t, err)
 
-	stockRuntime, ok := d.configStore.Runtimes[config.StockRuntimeName]
+	stockRuntime, ok := runtimes.configured[config.StockRuntimeName]
 	assert.Assert(t, ok, "stock runtime could not be found (test needs to be updated)")
+	stockRuntime.Features = nil
 
-	configdOpts := *stockRuntime.ShimConfig.Opts.(*v2runcoptions.Options)
+	configdOpts := *stockRuntime.Opts.(*v2runcoptions.Options)
 	configdOpts.BinaryName = configuredRuntime.Path
+	wantConfigdRuntime := &shimConfig{
+		Shim: stockRuntime.Shim,
+		Opts: &configdOpts,
+	}
 
 	for _, tt := range []struct {
 		name, runtime string
-		wantShim      string
-		wantOpts      interface{}
+		want          *shimConfig
 	}{
 		{
-			name:     "StockRuntime",
-			runtime:  config.StockRuntimeName,
-			wantShim: stockRuntime.ShimConfig.Binary,
-			wantOpts: stockRuntime.ShimConfig.Opts,
+			name:    "StockRuntime",
+			runtime: config.StockRuntimeName,
+			want:    stockRuntime,
 		},
 		{
-			name:     "ShimName",
-			runtime:  "io.containerd.my-shim.v42",
-			wantShim: "io.containerd.my-shim.v42",
+			name:    "ShimName",
+			runtime: "io.containerd.my-shim.v42",
+			want:    &shimConfig{Shim: "io.containerd.my-shim.v42"},
 		},
 		{
 			// containerd is pretty loose about the format of runtime names. Perhaps too
 			// loose. The only requirements are that the name contain a dot and (depending
 			// on the containerd version) not start with a dot. It does not enforce any
 			// particular format of the dot-delimited components of the name.
-			name:     "VersionlessShimName",
-			runtime:  "io.containerd.my-shim",
-			wantShim: "io.containerd.my-shim",
+			name:    "VersionlessShimName",
+			runtime: "io.containerd.my-shim",
+			want:    &shimConfig{Shim: "io.containerd.my-shim"},
 		},
 		{
 			name:    "IllformedShimName",
@@ -175,6 +267,7 @@ func TestGetRuntime(t *testing.T) {
 		{
 			name:    "EmptyString",
 			runtime: "",
+			want:    stockRuntime,
 		},
 		{
 			name:    "PathToShim",
@@ -189,49 +282,152 @@ func TestGetRuntime(t *testing.T) {
 			runtime: "my/io.containerd.runc.v2",
 		},
 		{
-			name:     "ConfiguredRuntime",
-			runtime:  configuredRtName,
-			wantShim: stockRuntime.ShimConfig.Binary,
-			wantOpts: &configdOpts,
+			name:    "ConfiguredRuntime",
+			runtime: configuredRtName,
+			want:    wantConfigdRuntime,
 		},
 		{
-			name:     "RuntimeWithArgs",
-			runtime:  rtWithArgsName,
-			wantShim: stockRuntime.ShimConfig.Binary,
-			wantOpts: defaultV2ShimConfig(
-				d.configStore,
-				d.rewriteRuntimePath(
-					rtWithArgsName,
-					rtWithArgs.Path,
-					rtWithArgs.Args)).Opts,
-		},
-		{
-			name:     "ShimWithOpts",
-			runtime:  shimWithOptsName,
-			wantShim: shimWithOpts.Type,
-			wantOpts: &v2runcoptions.Options{IoUid: 42},
+			name:    "ShimWithOpts",
+			runtime: shimWithOptsName,
+			want: &shimConfig{
+				Shim: shimWithOpts.Type,
+				Opts: &v2runcoptions.Options{IoUid: 42},
+			},
 		},
 		{
-			name:     "ShimAlias",
-			runtime:  shimAliasName,
-			wantShim: shimAlias.Type,
+			name:    "ShimAlias",
+			runtime: shimAliasName,
+			want:    &shimConfig{Shim: shimAlias.Type},
 		},
 		{
-			name:     "ConfiguredShimByPath",
-			runtime:  configuredShimByPathName,
-			wantShim: configuredShimByPath.Type,
+			name:    "ConfiguredShimByPath",
+			runtime: configuredShimByPathName,
+			want:    &shimConfig{Shim: configuredShimByPath.Type},
 		},
 	} {
 		tt := tt
 		t.Run(tt.name, func(t *testing.T) {
-			gotShim, gotOpts, err := d.getRuntime(tt.runtime)
-			assert.Check(t, is.Equal(gotShim, tt.wantShim))
-			assert.Check(t, is.DeepEqual(gotOpts, tt.wantOpts))
-			if tt.wantShim != "" {
+			shim, opts, err := runtimes.Get(tt.runtime)
+			if tt.want != nil {
 				assert.Check(t, err)
+				got := &shimConfig{Shim: shim, Opts: opts}
+				assert.Check(t, is.DeepEqual(got, tt.want))
 			} else {
-				assert.Check(t, errdefs.IsInvalidParameter(err))
+				assert.Check(t, is.Equal(shim, ""))
+				assert.Check(t, is.Nil(opts))
+				assert.Check(t, errdefs.IsInvalidParameter(err), "[%T] %[1]v", err)
+			}
+		})
+	}
+	t.Run("RuntimeWithArgs", func(t *testing.T) {
+		shim, opts, err := runtimes.Get(rtWithArgsName)
+		assert.Check(t, err)
+		assert.Check(t, is.Equal(shim, stockRuntime.Shim))
+		runcopts, ok := opts.(*v2runcoptions.Options)
+		if assert.Check(t, ok, "runtimes.Get() opts = type %T, want *v2runcoptions.Options", opts) {
+			wrapper, err := os.ReadFile(runcopts.BinaryName)
+			if assert.Check(t, err) {
+				assert.Check(t, is.Contains(string(wrapper),
+					strings.Join(append([]string{rtWithArgs.Path}, rtWithArgs.Args...), " ")))
 			}
+		}
+	})
+}
+
+func TestGetRuntime_PreflightCheck(t *testing.T) {
+	cfg, err := config.New()
+	assert.NilError(t, err)
+
+	cfg.Root = t.TempDir()
+	cfg.Runtimes = map[string]types.Runtime{
+		"path-only": {
+			Path: "/usr/local/bin/file-not-found",
+		},
+		"with-args": {
+			Path: "/usr/local/bin/file-not-found",
+			Args: []string{"--arg"},
+		},
+	}
+	assert.NilError(t, initRuntimesDir(cfg))
+	runtimes, err := setupRuntimes(cfg)
+	assert.NilError(t, err, "runtime paths should not be validated during setupRuntimes()")
+
+	t.Run("PathOnly", func(t *testing.T) {
+		_, _, err := runtimes.Get("path-only")
+		assert.NilError(t, err, "custom runtimes without wrapper scripts should not have pre-flight checks")
+	})
+	t.Run("WithArgs", func(t *testing.T) {
+		_, _, err := runtimes.Get("with-args")
+		assert.ErrorIs(t, err, fs.ErrNotExist)
+	})
+}
+
+// TestRuntimeWrapping checks that reloading runtime config does not delete or
+// modify existing wrapper scripts, which could break lifecycle management of
+// existing containers.
+func TestRuntimeWrapping(t *testing.T) {
+	cfg, err := config.New()
+	assert.NilError(t, err)
+	cfg.Root = t.TempDir()
+	cfg.Runtimes = map[string]types.Runtime{
+		"change-args": {
+			Path: "/bin/true",
+			Args: []string{"foo", "bar"},
+		},
+		"dupe": {
+			Path: "/bin/true",
+			Args: []string{"foo", "bar"},
+		},
+		"change-path": {
+			Path: "/bin/true",
+			Args: []string{"baz"},
+		},
+		"drop-args": {
+			Path: "/bin/true",
+			Args: []string{"some", "arguments"},
+		},
+		"goes-away": {
+			Path: "/bin/true",
+			Args: []string{"bye"},
+		},
+	}
+	assert.NilError(t, initRuntimesDir(cfg))
+	rt, err := setupRuntimes(cfg)
+	assert.Check(t, err)
+
+	type WrapperInfo struct{ BinaryName, Content string }
+	wrappers := make(map[string]WrapperInfo)
+	for name := range cfg.Runtimes {
+		_, opts, err := rt.Get(name)
+		if assert.Check(t, err, "rt.Get(%q)", name) {
+			binary := opts.(*v2runcoptions.Options).BinaryName
+			content, err := os.ReadFile(binary)
+			assert.Check(t, err, "could not read wrapper script contents for runtime %q", binary)
+			wrappers[name] = WrapperInfo{BinaryName: binary, Content: string(content)}
+		}
+	}
+
+	cfg.Runtimes["change-args"] = types.Runtime{
+		Path: cfg.Runtimes["change-args"].Path,
+		Args: []string{"baz", "quux"},
+	}
+	cfg.Runtimes["change-path"] = types.Runtime{
+		Path: "/bin/false",
+		Args: cfg.Runtimes["change-path"].Args,
+	}
+	cfg.Runtimes["drop-args"] = types.Runtime{
+		Path: cfg.Runtimes["drop-args"].Path,
+	}
+	delete(cfg.Runtimes, "goes-away")
+
+	_, err = setupRuntimes(cfg)
+	assert.Check(t, err)
+
+	for name, info := range wrappers {
+		t.Run(name, func(t *testing.T) {
+			content, err := os.ReadFile(info.BinaryName)
+			assert.NilError(t, err)
+			assert.DeepEqual(t, info.Content, string(content))
 		})
 	}
 }

+ 13 - 1
daemon/runtime_windows.go

@@ -2,8 +2,20 @@ package daemon
 
 import (
 	"errors"
+
+	"github.com/docker/docker/daemon/config"
 )
 
-func (daemon *Daemon) getRuntime(name string) (shim string, opts interface{}, err error) {
+type runtimes struct{}
+
+func (r *runtimes) Get(name string) (string, interface{}, error) {
 	return "", nil, errors.New("not implemented")
 }
+
+func initRuntimesDir(*config.Config) error {
+	return nil
+}
+
+func setupRuntimes(*config.Config) (runtimes, error) {
+	return runtimes{}, nil
+}

+ 11 - 10
daemon/start.go

@@ -17,7 +17,8 @@ import (
 
 // ContainerStart starts a container.
 func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error {
-	if checkpoint != "" && !daemon.HasExperimental() {
+	daemonCfg := daemon.config()
+	if checkpoint != "" && !daemonCfg.Experimental {
 		return errdefs.InvalidParameter(errors.New("checkpoint is only supported in experimental mode"))
 	}
 
@@ -55,7 +56,7 @@ func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfi
 		if hostConfig != nil {
 			logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12")
 			oldNetworkMode := ctr.HostConfig.NetworkMode
-			if err := daemon.setSecurityOptions(ctr, hostConfig); err != nil {
+			if err := daemon.setSecurityOptions(&daemonCfg.Config, ctr, hostConfig); err != nil {
 				return errdefs.InvalidParameter(err)
 			}
 			if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil {
@@ -83,24 +84,24 @@ func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfi
 
 	// check if hostConfig is in line with the current system settings.
 	// It may happen cgroups are umounted or the like.
-	if _, err = daemon.verifyContainerSettings(ctr.HostConfig, nil, false); err != nil {
+	if _, err = daemon.verifyContainerSettings(daemonCfg, ctr.HostConfig, nil, false); err != nil {
 		return errdefs.InvalidParameter(err)
 	}
 	// Adapt for old containers in case we have updates in this function and
 	// old containers never have chance to call the new function in create stage.
 	if hostConfig != nil {
-		if err := daemon.adaptContainerSettings(ctr.HostConfig, false); err != nil {
+		if err := daemon.adaptContainerSettings(&daemonCfg.Config, ctr.HostConfig, false); err != nil {
 			return errdefs.InvalidParameter(err)
 		}
 	}
-	return daemon.containerStart(ctx, ctr, checkpoint, checkpointDir, true)
+	return daemon.containerStart(ctx, daemonCfg, ctr, checkpoint, checkpointDir, true)
 }
 
 // containerStart prepares the container to run by setting up everything the
 // container needs, such as storage and networking, as well as links
 // between containers. The container is left waiting for a signal to
 // begin running.
-func (daemon *Daemon) containerStart(ctx context.Context, container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (retErr error) {
+func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore, container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (retErr error) {
 	start := time.Now()
 	container.Lock()
 	defer container.Unlock()
@@ -136,7 +137,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, container *container.C
 			// if containers AutoRemove flag is set, remove it after clean up
 			if container.HostConfig.AutoRemove {
 				container.Unlock()
-				if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
+				if err := daemon.containerRm(&daemonCfg.Config, container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
 					logrus.Errorf("can't remove container %s: %v", container.ID, err)
 				}
 				container.Lock()
@@ -148,11 +149,11 @@ func (daemon *Daemon) containerStart(ctx context.Context, container *container.C
 		return err
 	}
 
-	if err := daemon.initializeNetworking(container); err != nil {
+	if err := daemon.initializeNetworking(&daemonCfg.Config, container); err != nil {
 		return err
 	}
 
-	spec, err := daemon.createSpec(ctx, container)
+	spec, err := daemon.createSpec(ctx, daemonCfg, container)
 	if err != nil {
 		return errdefs.System(err)
 	}
@@ -173,7 +174,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, container *container.C
 		}
 	}
 
-	shim, createOptions, err := daemon.getLibcontainerdCreateOptions(container)
+	shim, createOptions, err := daemon.getLibcontainerdCreateOptions(daemonCfg, container)
 	if err != nil {
 		return err
 	}

+ 4 - 4
daemon/start_unix.go

@@ -7,17 +7,17 @@ import (
 )
 
 // getLibcontainerdCreateOptions callers must hold a lock on the container
-func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) (string, interface{}, error) {
+func (daemon *Daemon) getLibcontainerdCreateOptions(daemonCfg *configStore, container *container.Container) (string, interface{}, error) {
 	// Ensure a runtime has been assigned to this container
 	if container.HostConfig.Runtime == "" {
-		container.HostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName()
+		container.HostConfig.Runtime = daemonCfg.Runtimes.Default
 		container.CheckpointTo(daemon.containersReplica)
 	}
 
-	binary, opts, err := daemon.getRuntime(container.HostConfig.Runtime)
+	shim, opts, err := daemonCfg.Runtimes.Get(container.HostConfig.Runtime)
 	if err != nil {
 		return "", nil, setExitCodeFromError(container.SetExitCode, err)
 	}
 
-	return binary, opts, nil
+	return shim, opts, nil
 }

+ 1 - 1
daemon/start_windows.go

@@ -6,7 +6,7 @@ import (
 	"github.com/docker/docker/pkg/system"
 )
 
-func (daemon *Daemon) getLibcontainerdCreateOptions(_ *container.Container) (string, interface{}, error) {
+func (daemon *Daemon) getLibcontainerdCreateOptions(*configStore, *container.Container) (string, interface{}, error) {
 	if system.ContainerdRuntimeSupported() {
 		opts := &options.Options{}
 		return "io.containerd.runhcs.v1", opts, nil

+ 2 - 1
daemon/update.go

@@ -13,7 +13,8 @@ import (
 func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) {
 	var warnings []string
 
-	warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true)
+	daemonCfg := daemon.config()
+	warnings, err := daemon.verifyContainerSettings(daemonCfg, hostConfig, nil, true)
 	if err != nil {
 		return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err)
 	}

+ 1 - 1
integration-cli/docker_cli_daemon_test.go

@@ -2275,7 +2275,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *testing.T) {
 
 	content, err := s.d.ReadLogFile()
 	assert.NilError(c, err)
-	assert.Assert(c, is.Contains(string(content), `file configuration validation failed: runtime name 'runc' is reserved`))
+	assert.Assert(c, is.Contains(string(content), `runtime name 'runc' is reserved`))
 	// Check that we can select a default runtime
 	config = `
 {

+ 12 - 22
registry/service.go

@@ -35,28 +35,18 @@ func (s *Service) ServiceConfig() *registry.ServiceConfig {
 	return s.config.copy()
 }
 
-// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries for Service.
-func (s *Service) LoadAllowNondistributableArtifacts(registries []string) error {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	return s.config.loadAllowNondistributableArtifacts(registries)
-}
-
-// LoadMirrors loads registry mirrors for Service
-func (s *Service) LoadMirrors(mirrors []string) error {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	return s.config.loadMirrors(mirrors)
-}
-
-// LoadInsecureRegistries loads insecure registries for Service
-func (s *Service) LoadInsecureRegistries(registries []string) error {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	return s.config.loadInsecureRegistries(registries)
+// ReplaceConfig prepares a transaction which will atomically replace the
+// registry service's configuration when the returned commit function is called.
+func (s *Service) ReplaceConfig(options ServiceOptions) (commit func(), err error) {
+	config, err := newServiceConfig(options)
+	if err != nil {
+		return nil, err
+	}
+	return func() {
+		s.mu.Lock()
+		defer s.mu.Unlock()
+		s.config = config
+	}, nil
 }
 
 // Auth contacts the public registry with the provided credentials,

+ 4 - 0
vendor.mod

@@ -58,6 +58,7 @@ require (
 	github.com/klauspost/compress v1.16.3
 	github.com/miekg/dns v1.1.43
 	github.com/mistifyio/go-zfs/v3 v3.0.1
+	github.com/mitchellh/copystructure v1.2.0
 	github.com/moby/buildkit v0.11.7-0.20230525183624-798ad6b0ce9f
 	github.com/moby/ipvs v1.1.0
 	github.com/moby/locker v1.0.1
@@ -154,6 +155,9 @@ require (
 	github.com/inconshreveable/mousetrap v1.1.0 // indirect
 	github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
 	github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
+	github.com/mitchellh/reflectwalk v1.0.2 // indirect
+	github.com/onsi/ginkgo/v2 v2.1.4 // indirect
+	github.com/onsi/gomega v1.20.1 // indirect
 	github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 // indirect
 	github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 // indirect
 	github.com/philhofer/fwd v1.1.2 // indirect

+ 8 - 2
vendor.sum

@@ -1034,6 +1034,8 @@ github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go
 github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=
 github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
 github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
+github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
 github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
 github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
 github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
@@ -1048,6 +1050,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
 github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
 github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
 github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
+github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
+github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
 github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
 github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ=
 github.com/moby/buildkit v0.11.7-0.20230525183624-798ad6b0ce9f h1:9wobL03Y6U8azuDLUqYblbUdVU9jpjqecDdW7w4wZtI=
@@ -1121,8 +1125,9 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0
 github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
 github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
 github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
-github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
 github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY=
+github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
 github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
 github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
 github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -1133,8 +1138,9 @@ github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
 github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
 github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
 github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
 github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
+github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
 github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
 github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
 github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=

+ 21 - 0
vendor/github.com/mitchellh/copystructure/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 21 - 0
vendor/github.com/mitchellh/copystructure/README.md

@@ -0,0 +1,21 @@
+# copystructure
+
+copystructure is a Go library for deep copying values in Go.
+
+This allows you to copy Go values that may contain reference values
+such as maps, slices, or pointers, and copy their data as well instead
+of just their references.
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/copystructure
+```
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure).
+
+The `Copy` function has examples associated with it there.

+ 15 - 0
vendor/github.com/mitchellh/copystructure/copier_time.go

@@ -0,0 +1,15 @@
+package copystructure
+
+import (
+	"reflect"
+	"time"
+)
+
+func init() {
+	Copiers[reflect.TypeOf(time.Time{})] = timeCopier
+}
+
+func timeCopier(v interface{}) (interface{}, error) {
+	// Just... copy it.
+	return v.(time.Time), nil
+}

+ 631 - 0
vendor/github.com/mitchellh/copystructure/copystructure.go

@@ -0,0 +1,631 @@
+package copystructure
+
+import (
+	"errors"
+	"reflect"
+	"sync"
+
+	"github.com/mitchellh/reflectwalk"
+)
+
+const tagKey = "copy"
+
+// Copy returns a deep copy of v.
+//
+// Copy is unable to copy unexported fields in a struct (lowercase field names).
+// Unexported fields can't be reflected by the Go runtime and therefore
+// copystructure can't perform any data copies.
+//
+// For structs, copy behavior can be controlled with struct tags. For example:
+//
+//   struct {
+//     Name string
+//     Data *bytes.Buffer `copy:"shallow"`
+//   }
+//
+// The available tag values are:
+//
+// * "ignore" - The field will be ignored, effectively resulting in it being
+//   assigned the zero value in the copy.
+//
+// * "shallow" - The field will be be shallow copied. This means that references
+//   values such as pointers, maps, slices, etc. will be directly assigned
+//   versus deep copied.
+//
+func Copy(v interface{}) (interface{}, error) {
+	return Config{}.Copy(v)
+}
+
+// CopierFunc is a function that knows how to deep copy a specific type.
+// Register these globally with the Copiers variable.
+type CopierFunc func(interface{}) (interface{}, error)
+
+// Copiers is a map of types that behave specially when they are copied.
+// If a type is found in this map while deep copying, this function
+// will be called to copy it instead of attempting to copy all fields.
+//
+// The key should be the type, obtained using: reflect.TypeOf(value with type).
+//
+// It is unsafe to write to this map after Copies have started. If you
+// are writing to this map while also copying, wrap all modifications to
+// this map as well as to Copy in a mutex.
+var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc)
+
+// ShallowCopiers is a map of pointer types that behave specially
+// when they are copied.  If a type is found in this map while deep
+// copying, the pointer value will be shallow copied and not walked
+// into.
+//
+// The key should be the type, obtained using: reflect.TypeOf(value
+// with type).
+//
+// It is unsafe to write to this map after Copies have started. If you
+// are writing to this map while also copying, wrap all modifications to
+// this map as well as to Copy in a mutex.
+var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{})
+
+// Must is a helper that wraps a call to a function returning
+// (interface{}, error) and panics if the error is non-nil. It is intended
+// for use in variable initializations and should only be used when a copy
+// error should be a crashing case.
+func Must(v interface{}, err error) interface{} {
+	if err != nil {
+		panic("copy error: " + err.Error())
+	}
+
+	return v
+}
+
+var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true")
+
+type Config struct {
+	// Lock any types that are a sync.Locker and are not a mutex while copying.
+	// If there is an RLocker method, use that to get the sync.Locker.
+	Lock bool
+
+	// Copiers is a map of types associated with a CopierFunc. Use the global
+	// Copiers map if this is nil.
+	Copiers map[reflect.Type]CopierFunc
+
+	// ShallowCopiers is a map of pointer types that when they are
+	// shallow copied no matter where they are encountered. Use the
+	// global ShallowCopiers if this is nil.
+	ShallowCopiers map[reflect.Type]struct{}
+}
+
+func (c Config) Copy(v interface{}) (interface{}, error) {
+	if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr {
+		return nil, errPointerRequired
+	}
+
+	w := new(walker)
+	if c.Lock {
+		w.useLocks = true
+	}
+
+	if c.Copiers == nil {
+		c.Copiers = Copiers
+	}
+	w.copiers = c.Copiers
+
+	if c.ShallowCopiers == nil {
+		c.ShallowCopiers = ShallowCopiers
+	}
+	w.shallowCopiers = c.ShallowCopiers
+
+	err := reflectwalk.Walk(v, w)
+	if err != nil {
+		return nil, err
+	}
+
+	// Get the result. If the result is nil, then we want to turn it
+	// into a typed nil if we can.
+	result := w.Result
+	if result == nil {
+		val := reflect.ValueOf(v)
+		result = reflect.Indirect(reflect.New(val.Type())).Interface()
+	}
+
+	return result, nil
+}
+
+// Return the key used to index interfaces types we've seen. Store the number
+// of pointers in the upper 32bits, and the depth in the lower 32bits. This is
+// easy to calculate, easy to match a key with our current depth, and we don't
+// need to deal with initializing and cleaning up nested maps or slices.
+func ifaceKey(pointers, depth int) uint64 {
+	return uint64(pointers)<<32 | uint64(depth)
+}
+
+type walker struct {
+	Result interface{}
+
+	copiers        map[reflect.Type]CopierFunc
+	shallowCopiers map[reflect.Type]struct{}
+	depth          int
+	ignoreDepth    int
+	vals           []reflect.Value
+	cs             []reflect.Value
+
+	// This stores the number of pointers we've walked over, indexed by depth.
+	ps []int
+
+	// If an interface is indirected by a pointer, we need to know the type of
+	// interface to create when creating the new value.  Store the interface
+	// types here, indexed by both the walk depth and the number of pointers
+	// already seen at that depth. Use ifaceKey to calculate the proper uint64
+	// value.
+	ifaceTypes map[uint64]reflect.Type
+
+	// any locks we've taken, indexed by depth
+	locks []sync.Locker
+	// take locks while walking the structure
+	useLocks bool
+}
+
+func (w *walker) Enter(l reflectwalk.Location) error {
+	w.depth++
+
+	// ensure we have enough elements to index via w.depth
+	for w.depth >= len(w.locks) {
+		w.locks = append(w.locks, nil)
+	}
+
+	for len(w.ps) < w.depth+1 {
+		w.ps = append(w.ps, 0)
+	}
+
+	return nil
+}
+
+func (w *walker) Exit(l reflectwalk.Location) error {
+	locker := w.locks[w.depth]
+	w.locks[w.depth] = nil
+	if locker != nil {
+		defer locker.Unlock()
+	}
+
+	// clear out pointers and interfaces as we exit the stack
+	w.ps[w.depth] = 0
+
+	for k := range w.ifaceTypes {
+		mask := uint64(^uint32(0))
+		if k&mask == uint64(w.depth) {
+			delete(w.ifaceTypes, k)
+		}
+	}
+
+	w.depth--
+	if w.ignoreDepth > w.depth {
+		w.ignoreDepth = 0
+	}
+
+	if w.ignoring() {
+		return nil
+	}
+
+	switch l {
+	case reflectwalk.Array:
+		fallthrough
+	case reflectwalk.Map:
+		fallthrough
+	case reflectwalk.Slice:
+		w.replacePointerMaybe()
+
+		// Pop map off our container
+		w.cs = w.cs[:len(w.cs)-1]
+	case reflectwalk.MapValue:
+		// Pop off the key and value
+		mv := w.valPop()
+		mk := w.valPop()
+		m := w.cs[len(w.cs)-1]
+
+		// If mv is the zero value, SetMapIndex deletes the key form the map,
+		// or in this case never adds it. We need to create a properly typed
+		// zero value so that this key can be set.
+		if !mv.IsValid() {
+			mv = reflect.Zero(m.Elem().Type().Elem())
+		}
+		m.Elem().SetMapIndex(mk, mv)
+	case reflectwalk.ArrayElem:
+		// Pop off the value and the index and set it on the array
+		v := w.valPop()
+		i := w.valPop().Interface().(int)
+		if v.IsValid() {
+			a := w.cs[len(w.cs)-1]
+			ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call
+			if ae.CanSet() {
+				ae.Set(v)
+			}
+		}
+	case reflectwalk.SliceElem:
+		// Pop off the value and the index and set it on the slice
+		v := w.valPop()
+		i := w.valPop().Interface().(int)
+		if v.IsValid() {
+			s := w.cs[len(w.cs)-1]
+			se := s.Elem().Index(i)
+			if se.CanSet() {
+				se.Set(v)
+			}
+		}
+	case reflectwalk.Struct:
+		w.replacePointerMaybe()
+
+		// Remove the struct from the container stack
+		w.cs = w.cs[:len(w.cs)-1]
+	case reflectwalk.StructField:
+		// Pop off the value and the field
+		v := w.valPop()
+		f := w.valPop().Interface().(reflect.StructField)
+		if v.IsValid() {
+			s := w.cs[len(w.cs)-1]
+			sf := reflect.Indirect(s).FieldByName(f.Name)
+
+			if sf.CanSet() {
+				sf.Set(v)
+			}
+		}
+	case reflectwalk.WalkLoc:
+		// Clear out the slices for GC
+		w.cs = nil
+		w.vals = nil
+	}
+
+	return nil
+}
+
+func (w *walker) Map(m reflect.Value) error {
+	if w.ignoring() {
+		return nil
+	}
+	w.lock(m)
+
+	// Create the map. If the map itself is nil, then just make a nil map
+	var newMap reflect.Value
+	if m.IsNil() {
+		newMap = reflect.New(m.Type())
+	} else {
+		newMap = wrapPtr(reflect.MakeMap(m.Type()))
+	}
+
+	w.cs = append(w.cs, newMap)
+	w.valPush(newMap)
+	return nil
+}
+
+func (w *walker) MapElem(m, k, v reflect.Value) error {
+	return nil
+}
+
+func (w *walker) PointerEnter(v bool) error {
+	if v {
+		w.ps[w.depth]++
+	}
+	return nil
+}
+
+func (w *walker) PointerExit(v bool) error {
+	if v {
+		w.ps[w.depth]--
+	}
+	return nil
+}
+
+func (w *walker) Pointer(v reflect.Value) error {
+	if _, ok := w.shallowCopiers[v.Type()]; ok {
+		// Shallow copy this value. Use the same logic as primitive, then
+		// return skip.
+		if err := w.Primitive(v); err != nil {
+			return err
+		}
+
+		return reflectwalk.SkipEntry
+	}
+
+	return nil
+}
+
+func (w *walker) Interface(v reflect.Value) error {
+	if !v.IsValid() {
+		return nil
+	}
+	if w.ifaceTypes == nil {
+		w.ifaceTypes = make(map[uint64]reflect.Type)
+	}
+
+	w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type()
+	return nil
+}
+
+func (w *walker) Primitive(v reflect.Value) error {
+	if w.ignoring() {
+		return nil
+	}
+	w.lock(v)
+
+	// IsValid verifies the v is non-zero and CanInterface verifies
+	// that we're allowed to read this value (unexported fields).
+	var newV reflect.Value
+	if v.IsValid() && v.CanInterface() {
+		newV = reflect.New(v.Type())
+		newV.Elem().Set(v)
+	}
+
+	w.valPush(newV)
+	w.replacePointerMaybe()
+	return nil
+}
+
+func (w *walker) Slice(s reflect.Value) error {
+	if w.ignoring() {
+		return nil
+	}
+	w.lock(s)
+
+	var newS reflect.Value
+	if s.IsNil() {
+		newS = reflect.New(s.Type())
+	} else {
+		newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap()))
+	}
+
+	w.cs = append(w.cs, newS)
+	w.valPush(newS)
+	return nil
+}
+
+func (w *walker) SliceElem(i int, elem reflect.Value) error {
+	if w.ignoring() {
+		return nil
+	}
+
+	// We don't write the slice here because elem might still be
+	// arbitrarily complex. Just record the index and continue on.
+	w.valPush(reflect.ValueOf(i))
+
+	return nil
+}
+
+func (w *walker) Array(a reflect.Value) error {
+	if w.ignoring() {
+		return nil
+	}
+	w.lock(a)
+
+	newA := reflect.New(a.Type())
+
+	w.cs = append(w.cs, newA)
+	w.valPush(newA)
+	return nil
+}
+
+func (w *walker) ArrayElem(i int, elem reflect.Value) error {
+	if w.ignoring() {
+		return nil
+	}
+
+	// We don't write the array here because elem might still be
+	// arbitrarily complex. Just record the index and continue on.
+	w.valPush(reflect.ValueOf(i))
+
+	return nil
+}
+
+func (w *walker) Struct(s reflect.Value) error {
+	if w.ignoring() {
+		return nil
+	}
+	w.lock(s)
+
+	var v reflect.Value
+	if c, ok := w.copiers[s.Type()]; ok {
+		// We have a Copier for this struct, so we use that copier to
+		// get the copy, and we ignore anything deeper than this.
+		w.ignoreDepth = w.depth
+
+		dup, err := c(s.Interface())
+		if err != nil {
+			return err
+		}
+
+		// We need to put a pointer to the value on the value stack,
+		// so allocate a new pointer and set it.
+		v = reflect.New(s.Type())
+		reflect.Indirect(v).Set(reflect.ValueOf(dup))
+	} else {
+		// No copier, we copy ourselves and allow reflectwalk to guide
+		// us deeper into the structure for copying.
+		v = reflect.New(s.Type())
+	}
+
+	// Push the value onto the value stack for setting the struct field,
+	// and add the struct itself to the containers stack in case we walk
+	// deeper so that its own fields can be modified.
+	w.valPush(v)
+	w.cs = append(w.cs, v)
+
+	return nil
+}
+
+func (w *walker) StructField(f reflect.StructField, v reflect.Value) error {
+	if w.ignoring() {
+		return nil
+	}
+
+	// If PkgPath is non-empty, this is a private (unexported) field.
+	// We do not set this unexported since the Go runtime doesn't allow us.
+	if f.PkgPath != "" {
+		return reflectwalk.SkipEntry
+	}
+
+	switch f.Tag.Get(tagKey) {
+	case "shallow":
+		// If we're shallow copying then assign the value directly to the
+		// struct and skip the entry.
+		if v.IsValid() {
+			s := w.cs[len(w.cs)-1]
+			sf := reflect.Indirect(s).FieldByName(f.Name)
+			if sf.CanSet() {
+				sf.Set(v)
+			}
+		}
+
+		return reflectwalk.SkipEntry
+
+	case "ignore":
+		// Do nothing
+		return reflectwalk.SkipEntry
+	}
+
+	// Push the field onto the stack, we'll handle it when we exit
+	// the struct field in Exit...
+	w.valPush(reflect.ValueOf(f))
+
+	return nil
+}
+
+// ignore causes the walker to ignore any more values until we exit this on
+func (w *walker) ignore() {
+	w.ignoreDepth = w.depth
+}
+
+func (w *walker) ignoring() bool {
+	return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth
+}
+
+func (w *walker) pointerPeek() bool {
+	return w.ps[w.depth] > 0
+}
+
+func (w *walker) valPop() reflect.Value {
+	result := w.vals[len(w.vals)-1]
+	w.vals = w.vals[:len(w.vals)-1]
+
+	// If we're out of values, that means we popped everything off. In
+	// this case, we reset the result so the next pushed value becomes
+	// the result.
+	if len(w.vals) == 0 {
+		w.Result = nil
+	}
+
+	return result
+}
+
+func (w *walker) valPush(v reflect.Value) {
+	w.vals = append(w.vals, v)
+
+	// If we haven't set the result yet, then this is the result since
+	// it is the first (outermost) value we're seeing.
+	if w.Result == nil && v.IsValid() {
+		w.Result = v.Interface()
+	}
+}
+
+func (w *walker) replacePointerMaybe() {
+	// Determine the last pointer value. If it is NOT a pointer, then
+	// we need to push that onto the stack.
+	if !w.pointerPeek() {
+		w.valPush(reflect.Indirect(w.valPop()))
+		return
+	}
+
+	v := w.valPop()
+
+	// If the expected type is a pointer to an interface of any depth,
+	// such as *interface{}, **interface{}, etc., then we need to convert
+	// the value "v" from *CONCRETE to *interface{} so types match for
+	// Set.
+	//
+	// Example if v is type *Foo where Foo is a struct, v would become
+	// *interface{} instead. This only happens if we have an interface expectation
+	// at this depth.
+	//
+	// For more info, see GH-16
+	if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface {
+		y := reflect.New(iType)           // Create *interface{}
+		y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced)
+		v = y                             // v is now typed *interface{} (where *v = Foo)
+	}
+
+	for i := 1; i < w.ps[w.depth]; i++ {
+		if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok {
+			iface := reflect.New(iType).Elem()
+			iface.Set(v)
+			v = iface
+		}
+
+		p := reflect.New(v.Type())
+		p.Elem().Set(v)
+		v = p
+	}
+
+	w.valPush(v)
+}
+
+// if this value is a Locker, lock it and add it to the locks slice
+func (w *walker) lock(v reflect.Value) {
+	if !w.useLocks {
+		return
+	}
+
+	if !v.IsValid() || !v.CanInterface() {
+		return
+	}
+
+	type rlocker interface {
+		RLocker() sync.Locker
+	}
+
+	var locker sync.Locker
+
+	// We can't call Interface() on a value directly, since that requires
+	// a copy. This is OK, since the pointer to a value which is a sync.Locker
+	// is also a sync.Locker.
+	if v.Kind() == reflect.Ptr {
+		switch l := v.Interface().(type) {
+		case rlocker:
+			// don't lock a mutex directly
+			if _, ok := l.(*sync.RWMutex); !ok {
+				locker = l.RLocker()
+			}
+		case sync.Locker:
+			locker = l
+		}
+	} else if v.CanAddr() {
+		switch l := v.Addr().Interface().(type) {
+		case rlocker:
+			// don't lock a mutex directly
+			if _, ok := l.(*sync.RWMutex); !ok {
+				locker = l.RLocker()
+			}
+		case sync.Locker:
+			locker = l
+		}
+	}
+
+	// still no callable locker
+	if locker == nil {
+		return
+	}
+
+	// don't lock a mutex directly
+	switch locker.(type) {
+	case *sync.Mutex, *sync.RWMutex:
+		return
+	}
+
+	locker.Lock()
+	w.locks[w.depth] = locker
+}
+
+// wrapPtr is a helper that takes v and always make it *v. copystructure
+// stores things internally as pointers until the last moment before unwrapping
+func wrapPtr(v reflect.Value) reflect.Value {
+	if !v.IsValid() {
+		return v
+	}
+	vPtr := reflect.New(v.Type())
+	vPtr.Elem().Set(v)
+	return vPtr
+}

+ 1 - 0
vendor/github.com/mitchellh/reflectwalk/.travis.yml

@@ -0,0 +1 @@
+language: go

+ 21 - 0
vendor/github.com/mitchellh/reflectwalk/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 6 - 0
vendor/github.com/mitchellh/reflectwalk/README.md

@@ -0,0 +1,6 @@
+# reflectwalk
+
+reflectwalk is a Go library for "walking" a value in Go using reflection,
+in the same way a directory tree can be "walked" on the filesystem. Walking
+a complex structure can allow you to do manipulations on unknown structures
+such as those decoded from JSON.

+ 19 - 0
vendor/github.com/mitchellh/reflectwalk/location.go

@@ -0,0 +1,19 @@
+package reflectwalk
+
+//go:generate stringer -type=Location location.go
+
+type Location uint
+
+const (
+	None Location = iota
+	Map
+	MapKey
+	MapValue
+	Slice
+	SliceElem
+	Array
+	ArrayElem
+	Struct
+	StructField
+	WalkLoc
+)

+ 16 - 0
vendor/github.com/mitchellh/reflectwalk/location_string.go

@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=Location location.go"; DO NOT EDIT.
+
+package reflectwalk
+
+import "fmt"
+
+const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc"
+
+var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73}
+
+func (i Location) String() string {
+	if i >= Location(len(_Location_index)-1) {
+		return fmt.Sprintf("Location(%d)", i)
+	}
+	return _Location_name[_Location_index[i]:_Location_index[i+1]]
+}

+ 420 - 0
vendor/github.com/mitchellh/reflectwalk/reflectwalk.go

@@ -0,0 +1,420 @@
+// reflectwalk is a package that allows you to "walk" complex structures
+// similar to how you may "walk" a filesystem: visiting every element one
+// by one and calling callback functions allowing you to handle and manipulate
+// those elements.
+package reflectwalk
+
+import (
+	"errors"
+	"reflect"
+)
+
+// PrimitiveWalker implementations are able to handle primitive values
+// within complex structures. Primitive values are numbers, strings,
+// booleans, funcs, chans.
+//
+// These primitive values are often members of more complex
+// structures (slices, maps, etc.) that are walkable by other interfaces.
+type PrimitiveWalker interface {
+	Primitive(reflect.Value) error
+}
+
+// InterfaceWalker implementations are able to handle interface values as they
+// are encountered during the walk.
+type InterfaceWalker interface {
+	Interface(reflect.Value) error
+}
+
+// MapWalker implementations are able to handle individual elements
+// found within a map structure.
+type MapWalker interface {
+	Map(m reflect.Value) error
+	MapElem(m, k, v reflect.Value) error
+}
+
+// SliceWalker implementations are able to handle slice elements found
+// within complex structures.
+type SliceWalker interface {
+	Slice(reflect.Value) error
+	SliceElem(int, reflect.Value) error
+}
+
+// ArrayWalker implementations are able to handle array elements found
+// within complex structures.
+type ArrayWalker interface {
+	Array(reflect.Value) error
+	ArrayElem(int, reflect.Value) error
+}
+
+// StructWalker is an interface that has methods that are called for
+// structs when a Walk is done.
+type StructWalker interface {
+	Struct(reflect.Value) error
+	StructField(reflect.StructField, reflect.Value) error
+}
+
+// EnterExitWalker implementations are notified before and after
+// they walk deeper into complex structures (into struct fields,
+// into slice elements, etc.)
+type EnterExitWalker interface {
+	Enter(Location) error
+	Exit(Location) error
+}
+
+// PointerWalker implementations are notified when the value they're
+// walking is a pointer or not. Pointer is called for _every_ value whether
+// it is a pointer or not.
+type PointerWalker interface {
+	PointerEnter(bool) error
+	PointerExit(bool) error
+}
+
+// PointerValueWalker implementations are notified with the value of
+// a particular pointer when a pointer is walked. Pointer is called
+// right before PointerEnter.
+type PointerValueWalker interface {
+	Pointer(reflect.Value) error
+}
+
+// SkipEntry can be returned from walk functions to skip walking
+// the value of this field. This is only valid in the following functions:
+//
+//   - Struct: skips all fields from being walked
+//   - StructField: skips walking the struct value
+//
+var SkipEntry = errors.New("skip this entry")
+
+// Walk takes an arbitrary value and an interface and traverses the
+// value, calling callbacks on the interface if they are supported.
+// The interface should implement one or more of the walker interfaces
+// in this package, such as PrimitiveWalker, StructWalker, etc.
+func Walk(data, walker interface{}) (err error) {
+	v := reflect.ValueOf(data)
+	ew, ok := walker.(EnterExitWalker)
+	if ok {
+		err = ew.Enter(WalkLoc)
+	}
+
+	if err == nil {
+		err = walk(v, walker)
+	}
+
+	if ok && err == nil {
+		err = ew.Exit(WalkLoc)
+	}
+
+	return
+}
+
+func walk(v reflect.Value, w interface{}) (err error) {
+	// Determine if we're receiving a pointer and if so notify the walker.
+	// The logic here is convoluted but very important (tests will fail if
+	// almost any part is changed). I will try to explain here.
+	//
+	// First, we check if the value is an interface, if so, we really need
+	// to check the interface's VALUE to see whether it is a pointer.
+	//
+	// Check whether the value is then a pointer. If so, then set pointer
+	// to true to notify the user.
+	//
+	// If we still have a pointer or an interface after the indirections, then
+	// we unwrap another level
+	//
+	// At this time, we also set "v" to be the dereferenced value. This is
+	// because once we've unwrapped the pointer we want to use that value.
+	pointer := false
+	pointerV := v
+
+	for {
+		if pointerV.Kind() == reflect.Interface {
+			if iw, ok := w.(InterfaceWalker); ok {
+				if err = iw.Interface(pointerV); err != nil {
+					return
+				}
+			}
+
+			pointerV = pointerV.Elem()
+		}
+
+		if pointerV.Kind() == reflect.Ptr {
+			if pw, ok := w.(PointerValueWalker); ok {
+				if err = pw.Pointer(pointerV); err != nil {
+					if err == SkipEntry {
+						// Skip the rest of this entry but clear the error
+						return nil
+					}
+
+					return
+				}
+			}
+
+			pointer = true
+			v = reflect.Indirect(pointerV)
+		}
+		if pw, ok := w.(PointerWalker); ok {
+			if err = pw.PointerEnter(pointer); err != nil {
+				return
+			}
+
+			defer func(pointer bool) {
+				if err != nil {
+					return
+				}
+
+				err = pw.PointerExit(pointer)
+			}(pointer)
+		}
+
+		if pointer {
+			pointerV = v
+		}
+		pointer = false
+
+		// If we still have a pointer or interface we have to indirect another level.
+		switch pointerV.Kind() {
+		case reflect.Ptr, reflect.Interface:
+			continue
+		}
+		break
+	}
+
+	// We preserve the original value here because if it is an interface
+	// type, we want to pass that directly into the walkPrimitive, so that
+	// we can set it.
+	originalV := v
+	if v.Kind() == reflect.Interface {
+		v = v.Elem()
+	}
+
+	k := v.Kind()
+	if k >= reflect.Int && k <= reflect.Complex128 {
+		k = reflect.Int
+	}
+
+	switch k {
+	// Primitives
+	case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid:
+		err = walkPrimitive(originalV, w)
+		return
+	case reflect.Map:
+		err = walkMap(v, w)
+		return
+	case reflect.Slice:
+		err = walkSlice(v, w)
+		return
+	case reflect.Struct:
+		err = walkStruct(v, w)
+		return
+	case reflect.Array:
+		err = walkArray(v, w)
+		return
+	default:
+		panic("unsupported type: " + k.String())
+	}
+}
+
+func walkMap(v reflect.Value, w interface{}) error {
+	ew, ewok := w.(EnterExitWalker)
+	if ewok {
+		ew.Enter(Map)
+	}
+
+	if mw, ok := w.(MapWalker); ok {
+		if err := mw.Map(v); err != nil {
+			return err
+		}
+	}
+
+	for _, k := range v.MapKeys() {
+		kv := v.MapIndex(k)
+
+		if mw, ok := w.(MapWalker); ok {
+			if err := mw.MapElem(v, k, kv); err != nil {
+				return err
+			}
+		}
+
+		ew, ok := w.(EnterExitWalker)
+		if ok {
+			ew.Enter(MapKey)
+		}
+
+		if err := walk(k, w); err != nil {
+			return err
+		}
+
+		if ok {
+			ew.Exit(MapKey)
+			ew.Enter(MapValue)
+		}
+
+		// get the map value again as it may have changed in the MapElem call
+		if err := walk(v.MapIndex(k), w); err != nil {
+			return err
+		}
+
+		if ok {
+			ew.Exit(MapValue)
+		}
+	}
+
+	if ewok {
+		ew.Exit(Map)
+	}
+
+	return nil
+}
+
+func walkPrimitive(v reflect.Value, w interface{}) error {
+	if pw, ok := w.(PrimitiveWalker); ok {
+		return pw.Primitive(v)
+	}
+
+	return nil
+}
+
+func walkSlice(v reflect.Value, w interface{}) (err error) {
+	ew, ok := w.(EnterExitWalker)
+	if ok {
+		ew.Enter(Slice)
+	}
+
+	if sw, ok := w.(SliceWalker); ok {
+		if err := sw.Slice(v); err != nil {
+			return err
+		}
+	}
+
+	for i := 0; i < v.Len(); i++ {
+		elem := v.Index(i)
+
+		if sw, ok := w.(SliceWalker); ok {
+			if err := sw.SliceElem(i, elem); err != nil {
+				return err
+			}
+		}
+
+		ew, ok := w.(EnterExitWalker)
+		if ok {
+			ew.Enter(SliceElem)
+		}
+
+		if err := walk(elem, w); err != nil {
+			return err
+		}
+
+		if ok {
+			ew.Exit(SliceElem)
+		}
+	}
+
+	ew, ok = w.(EnterExitWalker)
+	if ok {
+		ew.Exit(Slice)
+	}
+
+	return nil
+}
+
+func walkArray(v reflect.Value, w interface{}) (err error) {
+	ew, ok := w.(EnterExitWalker)
+	if ok {
+		ew.Enter(Array)
+	}
+
+	if aw, ok := w.(ArrayWalker); ok {
+		if err := aw.Array(v); err != nil {
+			return err
+		}
+	}
+
+	for i := 0; i < v.Len(); i++ {
+		elem := v.Index(i)
+
+		if aw, ok := w.(ArrayWalker); ok {
+			if err := aw.ArrayElem(i, elem); err != nil {
+				return err
+			}
+		}
+
+		ew, ok := w.(EnterExitWalker)
+		if ok {
+			ew.Enter(ArrayElem)
+		}
+
+		if err := walk(elem, w); err != nil {
+			return err
+		}
+
+		if ok {
+			ew.Exit(ArrayElem)
+		}
+	}
+
+	ew, ok = w.(EnterExitWalker)
+	if ok {
+		ew.Exit(Array)
+	}
+
+	return nil
+}
+
+func walkStruct(v reflect.Value, w interface{}) (err error) {
+	ew, ewok := w.(EnterExitWalker)
+	if ewok {
+		ew.Enter(Struct)
+	}
+
+	skip := false
+	if sw, ok := w.(StructWalker); ok {
+		err = sw.Struct(v)
+		if err == SkipEntry {
+			skip = true
+			err = nil
+		}
+		if err != nil {
+			return
+		}
+	}
+
+	if !skip {
+		vt := v.Type()
+		for i := 0; i < vt.NumField(); i++ {
+			sf := vt.Field(i)
+			f := v.FieldByIndex([]int{i})
+
+			if sw, ok := w.(StructWalker); ok {
+				err = sw.StructField(sf, f)
+
+				// SkipEntry just pretends this field doesn't even exist
+				if err == SkipEntry {
+					continue
+				}
+
+				if err != nil {
+					return
+				}
+			}
+
+			ew, ok := w.(EnterExitWalker)
+			if ok {
+				ew.Enter(StructField)
+			}
+
+			err = walk(f, w)
+			if err != nil {
+				return
+			}
+
+			if ok {
+				ew.Exit(StructField)
+			}
+		}
+	}
+
+	if ewok {
+		ew.Exit(Struct)
+	}
+
+	return nil
+}

+ 10 - 0
vendor/modules.txt

@@ -611,9 +611,15 @@ github.com/miekg/dns
 # github.com/mistifyio/go-zfs/v3 v3.0.1
 ## explicit; go 1.14
 github.com/mistifyio/go-zfs/v3
+# github.com/mitchellh/copystructure v1.2.0
+## explicit; go 1.15
+github.com/mitchellh/copystructure
 # github.com/mitchellh/hashstructure/v2 v2.0.2
 ## explicit; go 1.14
 github.com/mitchellh/hashstructure/v2
+# github.com/mitchellh/reflectwalk v1.0.2
+## explicit
+github.com/mitchellh/reflectwalk
 # github.com/moby/buildkit v0.11.7-0.20230525183624-798ad6b0ce9f
 ## explicit; go 1.18
 github.com/moby/buildkit/api/services/control
@@ -864,6 +870,10 @@ github.com/moby/term/windows
 # github.com/morikuni/aec v1.0.0
 ## explicit
 github.com/morikuni/aec
+# github.com/onsi/ginkgo/v2 v2.1.4
+## explicit; go 1.18
+# github.com/onsi/gomega v1.20.1
+## explicit; go 1.18
 # github.com/opencontainers/go-digest v1.0.0
 ## explicit; go 1.13
 github.com/opencontainers/go-digest