daemon: reload runtimes w/o breaking containers

The existing runtimes reload logic went to great lengths to replace the
directory containing runtime wrapper scripts as atomically as possible
within the limitations of the Linux filesystem ABI. Trouble is,
atomically swapping the wrapper scripts directory solves the wrong
problem! The runtime configuration is "locked in" when a container is
started, including the path to the runC binary. If a container is
started with a runtime which requires a daemon-managed wrapper script
and then the daemon is reloaded with a config which no longer requires
the wrapper script (i.e. some args -> no args, or the runtime is dropped
from the config), that container would become unmanageable. Any attempts
to stop, exec or otherwise perform lifecycle management operations on
the container are likely to fail due to the wrapper script no longer
existing at its original path.

Atomically swapping the wrapper scripts is also incompatible with the
read-copy-update paradigm for reloading configuration. A handler in the
daemon could retain a reference to the pre-reload configuration for an
indeterminate amount of time after the daemon configuration has been
reloaded and updated. It is possible for the daemon to attempt to start
a container using a deleted wrapper script if a request to run a
container races a reload.

Solve the problem of deleting referenced wrapper scripts by ensuring
that all wrapper scripts are *immutable* for the lifetime of the daemon
process. Any given runtime wrapper script must always exist with the
same contents, no matter how many times the daemon config is reloaded,
or what changes are made to the config. This is accomplished by using
everyone's favourite design pattern: content-addressable storage. Each
wrapper script file name is suffixed with the SHA-256 digest of its
contents to (probabilistically) guarantee immutability without needing
any concurrency control. Stale runtime wrapper scripts are only cleaned
up on the next daemon restart.

Split the derived runtimes configuration from the user-supplied
configuration to have a place to store derived state without mutating
the user-supplied configuration or exposing daemon internals in API
struct types. Hold the derived state and the user-supplied configuration
in a single struct value so that they can be updated as an atomic unit.

Signed-off-by: Cory Snider <csnider@mirantis.com>
This commit is contained in:
Cory Snider 2022-08-31 16:12:30 -04:00
parent 0b592467d9
commit d222bf097c
40 changed files with 518 additions and 395 deletions

View file

@ -16,7 +16,6 @@ import (
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/volume"
"github.com/docker/go-connections/nat"
"github.com/opencontainers/runtime-spec/specs-go/features"
)
const (
@ -657,16 +656,6 @@ type Runtime struct {
Type string `json:"runtimeType,omitempty"`
Options map[string]interface{} `json:"options,omitempty"`
// This is exposed here only for internal use
ShimConfig *ShimConfig `json:"-"`
Features *features.Features `json:"-"`
}
// ShimConfig is used by runtime to configure containerd shims
type ShimConfig struct {
Binary string
Opts interface{}
}
// DiskUsageObject represents an object type used for disk usage query filtering.

View file

@ -268,7 +268,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
// Restart all autostart containers which has a swarm endpoint
// and is not yet running now that we have successfully
// initialized the cluster.
d.RestartSwarmContainers(cli.Config)
d.RestartSwarmContainers()
logrus.Info("Daemon has completed initialization")

View file

@ -81,15 +81,6 @@ type Config struct {
Rootless bool `json:"rootless,omitempty"`
}
// GetRuntime returns the runtime path and arguments for a given
// runtime name
func (conf *Config) GetRuntime(name string) *types.Runtime {
if rt, ok := conf.Runtimes[name]; ok {
return &rt
}
return nil
}
// GetAllRuntimes returns a copy of the runtimes map
func (conf *Config) GetAllRuntimes() map[string]types.Runtime {
return conf.Runtimes

View file

@ -30,12 +30,6 @@ type Config struct {
// for the Windows daemon.)
}
// GetRuntime returns the runtime path and arguments for a given
// runtime name
func (conf *Config) GetRuntime(name string) *types.Runtime {
return nil
}
// GetAllRuntimes returns a copy of the runtimes map
func (conf *Config) GetAllRuntimes() map[string]types.Runtime {
return map[string]types.Runtime{}

View file

@ -235,7 +235,7 @@ func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *
// verifyContainerSettings performs validation of the hostconfig and config
// structures.
func (daemon *Daemon) verifyContainerSettings(daemonCfg *config.Config, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) (warnings []string, err error) {
func (daemon *Daemon) verifyContainerSettings(daemonCfg *configStore, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) (warnings []string, err error) {
// First perform verification of settings common across all platforms.
if err = validateContainerConfig(config); err != nil {
return warnings, err

View file

@ -1075,7 +1075,7 @@ func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName
}
}
} else {
if err := daemon.connectToNetwork(daemon.config(), container, idOrName, endpointConfig, true); err != nil {
if err := daemon.connectToNetwork(&daemon.config().Config, container, idOrName, endpointConfig, true); err != nil {
return err
}
}

View file

@ -31,11 +31,14 @@ func TestContainerWarningHostAndPublishPorts(t *testing.T) {
NetworkMode: "host",
PortBindings: tc.ports,
}
cs := &config.Config{}
configureRuntimes(cs)
d := &Daemon{}
d.configStore.Store(cs)
wrns, err := d.verifyContainerSettings(cs, hostConfig, &containertypes.Config{}, false)
cfg, err := config.New()
assert.NilError(t, err)
configureRuntimes(cfg)
runtimes, err := setupRuntimes(cfg)
assert.NilError(t, err)
daemonCfg := &configStore{Config: *cfg, Runtimes: runtimes}
wrns, err := d.verifyContainerSettings(daemonCfg, hostConfig, &containertypes.Config{}, false)
assert.NilError(t, err)
assert.DeepEqual(t, tc.warnings, wrns)
}

View file

@ -57,7 +57,7 @@ func (daemon *Daemon) ContainerCreateIgnoreImagesArgsEscaped(ctx context.Context
})
}
func (daemon *Daemon) containerCreate(ctx context.Context, daemonCfg *config.Config, opts createOpts) (containertypes.CreateResponse, error) {
func (daemon *Daemon) containerCreate(ctx context.Context, daemonCfg *configStore, opts createOpts) (containertypes.CreateResponse, error) {
start := time.Now()
if opts.params.Config == nil {
return containertypes.CreateResponse{}, errdefs.InvalidParameter(errors.New("Config cannot be empty in order to create a container"))
@ -95,12 +95,12 @@ func (daemon *Daemon) containerCreate(ctx context.Context, daemonCfg *config.Con
if opts.params.HostConfig == nil {
opts.params.HostConfig = &containertypes.HostConfig{}
}
err = daemon.adaptContainerSettings(daemonCfg, opts.params.HostConfig, opts.params.AdjustCPUShares)
err = daemon.adaptContainerSettings(&daemonCfg.Config, opts.params.HostConfig, opts.params.AdjustCPUShares)
if err != nil {
return containertypes.CreateResponse{Warnings: warnings}, errdefs.InvalidParameter(err)
}
ctr, err := daemon.create(ctx, daemonCfg, opts)
ctr, err := daemon.create(ctx, &daemonCfg.Config, opts)
if err != nil {
return containertypes.CreateResponse{Warnings: warnings}, err
}

View file

@ -77,6 +77,12 @@ import (
"resenje.org/singleflight"
)
type configStore struct {
config.Config
Runtimes runtimes
}
// Daemon holds information about the Docker daemon.
type Daemon struct {
id string
@ -85,7 +91,7 @@ type Daemon struct {
containersReplica *container.ViewDB
execCommands *container.ExecStore
imageService ImageService
configStore atomic.Pointer[config.Config]
configStore atomic.Pointer[configStore]
configReload sync.Mutex
statsCollector *stats.Collector
defaultLogConfig containertypes.LogConfig
@ -159,10 +165,10 @@ func (daemon *Daemon) StoreHosts(hosts []string) {
// lifetime of an operation, the configuration pointer should be passed down the
// call stack, like one would a [context.Context] value. Only the entrypoints
// for operations, the outermost functions, should call this function.
func (daemon *Daemon) config() *config.Config {
func (daemon *Daemon) config() *configStore {
cfg := daemon.configStore.Load()
if cfg == nil {
return &config.Config{}
return &configStore{}
}
return cfg
}
@ -247,7 +253,7 @@ type layerAccessor interface {
GetLayerByID(cid string) (layer.RWLayer, error)
}
func (daemon *Daemon) restore(cfg *config.Config) error {
func (daemon *Daemon) restore(cfg *configStore) error {
var mapLock sync.Mutex
containers := make(map[string]*container.Container)
@ -467,7 +473,7 @@ func (daemon *Daemon) restore(cfg *config.Config) error {
c.ResetRestartManager(false)
if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
options, err := daemon.buildSandboxOptions(cfg, c)
options, err := daemon.buildSandboxOptions(&cfg.Config, c)
if err != nil {
logger(c).WithError(err).Warn("failed to build sandbox option to restore container")
}
@ -523,7 +529,7 @@ func (daemon *Daemon) restore(cfg *config.Config) error {
//
// Note that we cannot initialize the network controller earlier, as it
// needs to know if there's active sandboxes (running containers).
if err = daemon.initNetworkController(cfg, activeSandboxes); err != nil {
if err = daemon.initNetworkController(&cfg.Config, activeSandboxes); err != nil {
return fmt.Errorf("Error initializing network controller: %v", err)
}
@ -586,7 +592,7 @@ func (daemon *Daemon) restore(cfg *config.Config) error {
go func(cid string) {
_ = sem.Acquire(context.Background(), 1)
if err := daemon.containerRm(cfg, cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
if err := daemon.containerRm(&cfg.Config, cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
logrus.WithField("container", cid).WithError(err).Error("failed to remove container")
}
@ -634,9 +640,11 @@ func (daemon *Daemon) restore(cfg *config.Config) error {
// RestartSwarmContainers restarts any autostart container which has a
// swarm endpoint.
func (daemon *Daemon) RestartSwarmContainers(cfg *config.Config) {
ctx := context.Background()
func (daemon *Daemon) RestartSwarmContainers() {
daemon.restartSwarmContainers(context.Background(), daemon.config())
}
func (daemon *Daemon) restartSwarmContainers(ctx context.Context, cfg *configStore) {
// parallelLimit is the maximum number of parallel startup jobs that we
// allow (this is the limited used for all startup semaphores). The multipler
// (128) was chosen after some fairly significant benchmarking -- don't change
@ -806,11 +814,23 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
os.Setenv("TMPDIR", realTmp)
}
if err := initRuntimesDir(config); err != nil {
return nil, err
}
runtimes, err := setupRuntimes(config)
if err != nil {
return nil, err
}
d := &Daemon{
PluginStore: pluginStore,
startupDone: make(chan struct{}),
}
d.configStore.Store(config)
configStore := &configStore{
Config: *config,
Runtimes: runtimes,
}
d.configStore.Store(configStore)
// TEST_INTEGRATION_USE_SNAPSHOTTER is used for integration tests only.
if os.Getenv("TEST_INTEGRATION_USE_SNAPSHOTTER") != "" {
@ -830,27 +850,27 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
}
}()
if err := d.setGenericResources(config); err != nil {
if err := d.setGenericResources(&configStore.Config); err != nil {
return nil, err
}
// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
// on Windows to dump Go routine stacks
stackDumpDir := config.Root
if execRoot := config.GetExecRoot(); execRoot != "" {
stackDumpDir := configStore.Root
if execRoot := configStore.GetExecRoot(); execRoot != "" {
stackDumpDir = execRoot
}
d.setupDumpStackTrap(stackDumpDir)
if err := d.setupSeccompProfile(config); err != nil {
if err := d.setupSeccompProfile(&configStore.Config); err != nil {
return nil, err
}
// Set the default isolation mode (only applicable on Windows)
if err := d.setDefaultIsolation(config); err != nil {
if err := d.setDefaultIsolation(&configStore.Config); err != nil {
return nil, fmt.Errorf("error setting default isolation mode: %v", err)
}
if err := configureMaxThreads(config); err != nil {
if err := configureMaxThreads(&configStore.Config); err != nil {
logrus.Warnf("Failed to configure golang's threads limit: %v", err)
}
@ -859,7 +879,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
logrus.Errorf(err.Error())
}
daemonRepo := filepath.Join(config.Root, "containers")
daemonRepo := filepath.Join(configStore.Root, "containers")
if err := idtools.MkdirAllAndChown(daemonRepo, 0o710, idtools.Identity{
UID: idtools.CurrentIdentity().UID,
GID: rootIDs.GID,
@ -867,20 +887,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
return nil, err
}
// Create the directory where we'll store the runtime scripts (i.e. in
// order to support runtimeArgs)
if err = os.Mkdir(filepath.Join(config.Root, "runtimes"), 0o700); err != nil && !errors.Is(err, os.ErrExist) {
return nil, err
}
if err := d.loadRuntimes(); err != nil {
return nil, err
}
if isWindows {
// Note that permissions (0o700) are ignored on Windows; passing them to
// show intent only. We could consider using idtools.MkdirAndChown here
// to apply an ACL.
if err = os.Mkdir(filepath.Join(config.Root, "credentialspecs"), 0o700); err != nil && !errors.Is(err, os.ErrExist) {
if err = os.Mkdir(filepath.Join(configStore.Root, "credentialspecs"), 0o700); err != nil && !errors.Is(err, os.ErrExist) {
return nil, err
}
}
@ -888,7 +899,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
d.registryService = registryService
dlogger.RegisterPluginGetter(d.PluginStore)
metricsSockPath, err := d.listenMetricsSock(config)
metricsSockPath, err := d.listenMetricsSock(&configStore.Config)
if err != nil {
return nil, err
}
@ -927,20 +938,20 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
}
if config.ContainerdAddr != "" {
d.containerdCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(config.ContainerdNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
if configStore.ContainerdAddr != "" {
d.containerdCli, err = containerd.New(configStore.ContainerdAddr, containerd.WithDefaultNamespace(configStore.ContainerdNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
return nil, errors.Wrapf(err, "failed to dial %q", configStore.ContainerdAddr)
}
}
createPluginExec := func(m *plugin.Manager) (plugin.Executor, error) {
var pluginCli *containerd.Client
if config.ContainerdAddr != "" {
pluginCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(config.ContainerdPluginNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
if configStore.ContainerdAddr != "" {
pluginCli, err = containerd.New(configStore.ContainerdAddr, containerd.WithDefaultNamespace(configStore.ContainerdPluginNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
return nil, errors.Wrapf(err, "failed to dial %q", configStore.ContainerdAddr)
}
}
@ -949,22 +960,22 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
shimOpts interface{}
)
if runtime.GOOS != "windows" {
shim, shimOpts, err = d.getRuntime(config, config.DefaultRuntime)
shim, shimOpts, err = runtimes.Get(configStore.DefaultRuntime)
if err != nil {
return nil, err
}
}
return pluginexec.New(ctx, getPluginExecRoot(config), pluginCli, config.ContainerdPluginNamespace, m, shim, shimOpts)
return pluginexec.New(ctx, getPluginExecRoot(&configStore.Config), pluginCli, configStore.ContainerdPluginNamespace, m, shim, shimOpts)
}
// Plugin system initialization should happen before restore. Do not change order.
d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{
Root: filepath.Join(config.Root, "plugins"),
ExecRoot: getPluginExecRoot(config),
Root: filepath.Join(configStore.Root, "plugins"),
ExecRoot: getPluginExecRoot(&configStore.Config),
Store: d.PluginStore,
CreateExecutor: createPluginExec,
RegistryService: registryService,
LiveRestoreEnabled: config.LiveRestoreEnabled,
LiveRestoreEnabled: configStore.LiveRestoreEnabled,
LogPluginEvent: d.LogPluginEvent, // todo: make private
AuthzMiddleware: authzMiddleware,
})
@ -972,13 +983,13 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
return nil, errors.Wrap(err, "couldn't create plugin manager")
}
d.defaultLogConfig, err = defaultLogConfig(config)
d.defaultLogConfig, err = defaultLogConfig(&configStore.Config)
if err != nil {
return nil, errors.Wrap(err, "failed to set log opts")
}
logrus.Debugf("Using default logging driver %s", d.defaultLogConfig.Type)
d.volumes, err = volumesservice.NewVolumeService(config.Root, d.PluginStore, rootIDs, d)
d.volumes, err = volumesservice.NewVolumeService(configStore.Root, d.PluginStore, rootIDs, d)
if err != nil {
return nil, err
}
@ -991,11 +1002,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
// at this point.
//
// TODO(thaJeztah) add a utility to only collect the CgroupDevicesEnabled information
if runtime.GOOS == "linux" && !userns.RunningInUserNS() && !getSysInfo(config).CgroupDevicesEnabled {
if runtime.GOOS == "linux" && !userns.RunningInUserNS() && !getSysInfo(&configStore.Config).CgroupDevicesEnabled {
return nil, errors.New("Devices cgroup isn't mounted")
}
d.id, err = loadOrCreateID(filepath.Join(config.Root, "engine-id"))
d.id, err = loadOrCreateID(filepath.Join(configStore.Root, "engine-id"))
if err != nil {
return nil, err
}
@ -1008,7 +1019,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
d.statsCollector = d.newStatsCollector(1 * time.Second)
d.EventsService = events.New()
d.root = config.Root
d.root = configStore.Root
d.idMapping = idMapping
d.linkIndex = newLinkIndex()
@ -1023,7 +1034,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
} else if driverName != "" {
logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName)
} else {
driverName = config.GraphDriver
driverName = configStore.GraphDriver
}
if d.UsesSnapshotter() {
@ -1039,7 +1050,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
// Configure and validate the kernels security support. Note this is a Linux/FreeBSD
// operation only, so it is safe to pass *just* the runtime OS graphdriver.
if err := configureKernelSecuritySupport(config, driverName); err != nil {
if err := configureKernelSecuritySupport(&configStore.Config, driverName); err != nil {
return nil, err
}
d.imageService = ctrd.NewService(ctrd.ImageServiceConfig{
@ -1052,13 +1063,13 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
})
} else {
layerStore, err := layer.NewStoreFromOptions(layer.StoreOptions{
Root: config.Root,
MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
Root: configStore.Root,
MetadataStorePathTemplate: filepath.Join(configStore.Root, "image", "%s", "layerdb"),
GraphDriver: driverName,
GraphDriverOptions: config.GraphOptions,
GraphDriverOptions: configStore.GraphOptions,
IDMapping: idMapping,
PluginGetter: d.PluginStore,
ExperimentalEnabled: config.Experimental,
ExperimentalEnabled: configStore.Experimental,
})
if err != nil {
return nil, err
@ -1066,11 +1077,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
// Configure and validate the kernels security support. Note this is a Linux/FreeBSD
// operation only, so it is safe to pass *just* the runtime OS graphdriver.
if err := configureKernelSecuritySupport(config, layerStore.DriverName()); err != nil {
if err := configureKernelSecuritySupport(&configStore.Config, layerStore.DriverName()); err != nil {
return nil, err
}
imageRoot := filepath.Join(config.Root, "image", layerStore.DriverName())
imageRoot := filepath.Join(configStore.Root, "image", layerStore.DriverName())
ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
if err != nil {
return nil, err
@ -1144,11 +1155,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
go d.execCommandGC()
if err := d.initLibcontainerd(ctx, config); err != nil {
if err := d.initLibcontainerd(ctx, &configStore.Config); err != nil {
return nil, err
}
if err := d.restore(config); err != nil {
if err := d.restore(configStore); err != nil {
return nil, err
}
close(d.startupDone)
@ -1210,7 +1221,7 @@ func (daemon *Daemon) shutdownContainer(c *container.Container) error {
// A negative (-1) timeout means "indefinitely", which means that containers
// are not forcibly killed, and the daemon shuts down after all containers exit.
func (daemon *Daemon) ShutdownTimeout() int {
return daemon.shutdownTimeout(daemon.config())
return daemon.shutdownTimeout(&daemon.config().Config)
}
func (daemon *Daemon) shutdownTimeout(cfg *config.Config) int {
@ -1241,7 +1252,7 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
// Keep mounts and networking running on daemon shutdown if
// we are to keep containers running and restore them.
cfg := daemon.config()
cfg := &daemon.config().Config
if cfg.LiveRestoreEnabled && daemon.containers != nil {
// check if there are any running containers, if none we should do some cleanup
if ls, err := daemon.Containers(ctx, &types.ContainerListOptions{}); len(ls) != 0 || err != nil {
@ -1523,7 +1534,7 @@ func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo {
// We check if sysInfo is not set here, to allow some test to
// override the actual sysInfo.
if daemon.sysInfo == nil {
daemon.sysInfo = getSysInfo(daemon.config())
daemon.sysInfo = getSysInfo(&daemon.config().Config)
}
})

View file

@ -239,18 +239,18 @@ func kernelSupportsRecursivelyReadOnly() error {
return kernelSupportsRROErr
}
func supportsRecursivelyReadOnly(cfg *config.Config, runtime string) error {
func supportsRecursivelyReadOnly(cfg *configStore, runtime string) error {
if err := kernelSupportsRecursivelyReadOnly(); err != nil {
return fmt.Errorf("rro is not supported: %w (kernel is older than 5.12?)", err)
}
if runtime == "" {
runtime = cfg.DefaultRuntime
}
rt := cfg.GetRuntime(runtime)
if rt.Features == nil {
features := cfg.Runtimes.Features(runtime)
if features == nil {
return fmt.Errorf("rro is not supported by runtime %q: OCI features struct is not available", runtime)
}
for _, s := range rt.Features.MountOptions {
for _, s := range features.MountOptions {
if s == "rro" {
return nil
}

View file

@ -10,7 +10,6 @@ import (
"testing"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/libnetwork/testutils"
"github.com/docker/docker/libnetwork/types"
"github.com/google/go-cmp/cmp/cmpopts"
@ -178,7 +177,7 @@ func TestNotCleanupMounts(t *testing.T) {
func TestValidateContainerIsolationLinux(t *testing.T) {
d := Daemon{}
_, err := d.verifyContainerSettings(&config.Config{}, &containertypes.HostConfig{Isolation: containertypes.IsolationHyperV}, nil, false)
_, err := d.verifyContainerSettings(&configStore{}, &containertypes.HostConfig{Isolation: containertypes.IsolationHyperV}, nil, false)
assert.Check(t, is.Error(err, "invalid isolation 'hyperv' on linux"))
}
@ -250,7 +249,7 @@ func TestRootMountCleanup(t *testing.T) {
testRoot, err := os.MkdirTemp("", t.Name())
assert.NilError(t, err)
defer os.RemoveAll(testRoot)
cfg := &config.Config{}
cfg := &configStore{}
err = mount.MakePrivate(testRoot)
assert.NilError(t, err)
@ -266,16 +265,16 @@ func TestRootMountCleanup(t *testing.T) {
d := &Daemon{root: cfg.Root}
d.configStore.Store(cfg)
unmountFile := getUnmountOnShutdownPath(cfg)
unmountFile := getUnmountOnShutdownPath(&cfg.Config)
t.Run("regular dir no mountpoint", func(t *testing.T) {
err = setupDaemonRootPropagation(cfg)
err = setupDaemonRootPropagation(&cfg.Config)
assert.NilError(t, err)
_, err = os.Stat(unmountFile)
assert.NilError(t, err)
checkMounted(t, cfg.Root, true)
assert.Assert(t, d.cleanupMounts(cfg))
assert.Assert(t, d.cleanupMounts(&cfg.Config))
checkMounted(t, cfg.Root, false)
_, err = os.Stat(unmountFile)
@ -287,13 +286,13 @@ func TestRootMountCleanup(t *testing.T) {
assert.NilError(t, err)
defer mount.Unmount(cfg.Root)
err = setupDaemonRootPropagation(cfg)
err = setupDaemonRootPropagation(&cfg.Config)
assert.NilError(t, err)
assert.Check(t, ensureShared(cfg.Root))
_, err = os.Stat(unmountFile)
assert.Assert(t, os.IsNotExist(err))
assert.Assert(t, d.cleanupMounts(cfg))
assert.Assert(t, d.cleanupMounts(&cfg.Config))
checkMounted(t, cfg.Root, true)
})
@ -303,14 +302,14 @@ func TestRootMountCleanup(t *testing.T) {
assert.NilError(t, err)
defer mount.Unmount(cfg.Root)
err = setupDaemonRootPropagation(cfg)
err = setupDaemonRootPropagation(&cfg.Config)
assert.NilError(t, err)
if _, err := os.Stat(unmountFile); err == nil {
t.Fatal("unmount file should not exist")
}
assert.Assert(t, d.cleanupMounts(cfg))
assert.Assert(t, d.cleanupMounts(&cfg.Config))
checkMounted(t, cfg.Root, true)
assert.Assert(t, mount.Unmount(cfg.Root))
})
@ -323,13 +322,13 @@ func TestRootMountCleanup(t *testing.T) {
err = os.WriteFile(unmountFile, nil, 0644)
assert.NilError(t, err)
err = setupDaemonRootPropagation(cfg)
err = setupDaemonRootPropagation(&cfg.Config)
assert.NilError(t, err)
_, err = os.Stat(unmountFile)
assert.Check(t, os.IsNotExist(err), err)
checkMounted(t, cfg.Root, false)
assert.Assert(t, d.cleanupMounts(cfg))
assert.Assert(t, d.cleanupMounts(&cfg.Config))
})
}

View file

@ -8,7 +8,6 @@ import (
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libnetwork"
"github.com/docker/docker/pkg/idtools"
@ -301,7 +300,7 @@ func TestMerge(t *testing.T) {
func TestValidateContainerIsolation(t *testing.T) {
d := Daemon{}
_, err := d.verifyContainerSettings(&config.Config{}, &containertypes.HostConfig{Isolation: containertypes.Isolation("invalid")}, nil, false)
_, err := d.verifyContainerSettings(&configStore{}, &containertypes.HostConfig{Isolation: containertypes.Isolation("invalid")}, nil, false)
assert.Check(t, is.Error(err, "invalid isolation 'invalid' on "+runtime.GOOS))
}

View file

@ -639,7 +639,7 @@ func isRunningSystemd() bool {
// verifyPlatformContainerSettings performs platform-specific validation of the
// hostconfig and config structures.
func verifyPlatformContainerSettings(daemon *Daemon, daemonCfg *config.Config, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
func verifyPlatformContainerSettings(daemon *Daemon, daemonCfg *configStore, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
if hostConfig == nil {
return nil, nil
}
@ -691,7 +691,7 @@ func verifyPlatformContainerSettings(daemon *Daemon, daemonCfg *config.Config, h
return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled")
}
}
if hostConfig.CgroupParent != "" && UsingSystemd(daemonCfg) {
if hostConfig.CgroupParent != "" && UsingSystemd(&daemonCfg.Config) {
// CgroupParent for systemd cgroup should be named as "xxx.slice"
if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") {
return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
@ -701,7 +701,7 @@ func verifyPlatformContainerSettings(daemon *Daemon, daemonCfg *config.Config, h
hostConfig.Runtime = daemonCfg.DefaultRuntime
}
if _, _, err := daemon.getRuntime(daemonCfg, hostConfig.Runtime); err != nil {
if _, _, err := daemonCfg.Runtimes.Get(hostConfig.Runtime); err != nil {
return warnings, err
}
@ -757,7 +757,7 @@ func verifyDaemonSettings(conf *config.Config) error {
configureRuntimes(conf)
if rtName := conf.DefaultRuntime; rtName != "" {
if conf.GetRuntime(rtName) == nil {
if _, ok := conf.Runtimes[rtName]; !ok {
if !config.IsPermissibleC8dRuntimeName(rtName) {
return fmt.Errorf("specified default runtime '%s' does not exist", rtName)
}

View file

@ -245,7 +245,7 @@ func TestParseSecurityOpt(t *testing.T) {
}
func TestParseNNPSecurityOptions(t *testing.T) {
daemonCfg := &config.Config{NoNewPrivileges: true}
daemonCfg := &configStore{Config: config.Config{NoNewPrivileges: true}}
daemon := &Daemon{}
daemon.configStore.Store(daemonCfg)
opts := &container.SecurityOptions{}
@ -254,7 +254,7 @@ func TestParseNNPSecurityOptions(t *testing.T) {
// test NNP when "daemon:true" and "no-new-privileges=false""
cfg.SecurityOpt = []string{"no-new-privileges=false"}
if err := daemon.parseSecurityOpt(daemonCfg, opts, cfg); err != nil {
if err := daemon.parseSecurityOpt(&daemonCfg.Config, opts, cfg); err != nil {
t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
}
if opts.NoNewPrivileges {
@ -265,7 +265,7 @@ func TestParseNNPSecurityOptions(t *testing.T) {
daemonCfg.NoNewPrivileges = false
cfg.SecurityOpt = []string{"no-new-privileges=true"}
if err := daemon.parseSecurityOpt(daemonCfg, opts, cfg); err != nil {
if err := daemon.parseSecurityOpt(&daemonCfg.Config, opts, cfg); err != nil {
t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
}
if !opts.NoNewPrivileges {

View file

@ -171,7 +171,7 @@ func verifyPlatformContainerResources(resources *containertypes.Resources, isHyp
// verifyPlatformContainerSettings performs platform-specific validation of the
// hostconfig and config structures.
func verifyPlatformContainerSettings(daemon *Daemon, daemonCfg *config.Config, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
func verifyPlatformContainerSettings(daemon *Daemon, daemonCfg *configStore, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
if hostConfig == nil {
return nil, nil
}
@ -556,10 +556,6 @@ func (daemon *Daemon) setupSeccompProfile(*config.Config) error {
return nil
}
func (daemon *Daemon) loadRuntimes() error {
return nil
}
func setupResolvConf(config *config.Config) {}
func getSysInfo(*config.Config) *sysinfo.SysInfo {

View file

@ -25,7 +25,7 @@ import (
// fails. If the remove succeeds, the container name is released, and
// network links are removed.
func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error {
return daemon.containerRm(daemon.config(), name, config)
return daemon.containerRm(&daemon.config().Config, name, config)
}
func (daemon *Daemon) containerRm(cfg *config.Config, name string, opts *types.ContainerRmConfig) error {

View file

@ -252,7 +252,7 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, optio
p.Cwd = "/"
}
daemonCfg := daemon.config()
daemonCfg := &daemon.config().Config
if err := daemon.execSetPlatformOpt(ctx, daemonCfg, ec, p); err != nil {
return err
}

View file

@ -9,7 +9,6 @@ import (
"github.com/containerd/containerd/pkg/apparmor"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
specs "github.com/opencontainers/runtime-spec/specs-go"
"gotest.tools/v3/assert"
)
@ -50,7 +49,7 @@ func TestExecSetPlatformOptAppArmor(t *testing.T) {
},
}
cfg := &config.Config{}
cfg := &configStore{}
d := &Daemon{}
d.configStore.Store(cfg)
@ -83,7 +82,7 @@ func TestExecSetPlatformOptAppArmor(t *testing.T) {
ec := &container.ExecConfig{Container: c, Privileged: execPrivileged}
p := &specs.Process{}
err := d.execSetPlatformOpt(context.Background(), cfg, ec, p)
err := d.execSetPlatformOpt(context.Background(), &cfg.Config, ec, p)
assert.NilError(t, err)
assert.Equal(t, p.ApparmorProfile, tc.expectedProfile)
})

View file

@ -64,14 +64,14 @@ func (daemon *Daemon) SystemInfo() *types.Info {
daemon.fillContainerStates(v)
daemon.fillDebugInfo(v)
daemon.fillAPIInfo(v, cfg)
daemon.fillAPIInfo(v, &cfg.Config)
// Retrieve platform specific info
daemon.fillPlatformInfo(v, sysInfo, cfg)
daemon.fillPlatformInfo(v, sysInfo, &cfg.Config)
daemon.fillDriverInfo(v)
daemon.fillPluginsInfo(v, cfg)
daemon.fillSecurityOptions(v, sysInfo, cfg)
daemon.fillPluginsInfo(v, &cfg.Config)
daemon.fillSecurityOptions(v, sysInfo, &cfg.Config)
daemon.fillLicense(v)
daemon.fillDefaultAddressPools(v, cfg)
daemon.fillDefaultAddressPools(v, &cfg.Config)
return v
}
@ -117,7 +117,7 @@ func (daemon *Daemon) SystemVersion() types.Version {
v.Platform.Name = dockerversion.PlatformName
daemon.fillPlatformVersion(&v, cfg)
daemon.fillPlatformVersion(&v, &cfg.Config)
return v
}

View file

@ -38,23 +38,23 @@ func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo,
v.CPUSet = sysInfo.Cpuset
v.PidsLimit = sysInfo.PidsLimit
}
v.Runtimes = cfg.GetAllRuntimes()
v.Runtimes = make(map[string]types.Runtime)
for n, r := range cfg.Runtimes {
v.Runtimes[n] = types.Runtime{
Path: r.Path,
Args: append([]string(nil), r.Args...),
}
}
v.DefaultRuntime = cfg.DefaultRuntime
v.RuncCommit.ID = "N/A"
v.ContainerdCommit.ID = "N/A"
v.InitCommit.ID = "N/A"
if rt := cfg.GetRuntime(v.DefaultRuntime); rt != nil {
if rv, err := exec.Command(rt.Path, "--version").Output(); err == nil {
if _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %v", rt.Path, err)
if _, _, commit, err := parseDefaultRuntimeVersion(cfg); err != nil {
logrus.Warnf(err.Error())
} else {
v.RuncCommit.ID = commit
}
} else {
logrus.Warnf("failed to retrieve %s version: %v", rt.Path, err)
}
}
if rv, err := daemon.containerd.Version(context.Background()); err == nil {
v.ContainerdCommit.ID = rv.Revision
@ -177,24 +177,17 @@ func (daemon *Daemon) fillPlatformVersion(v *types.Version, cfg *config.Config)
})
}
defaultRuntime := cfg.DefaultRuntime
if rt := cfg.GetRuntime(defaultRuntime); rt != nil {
if rv, err := exec.Command(rt.Path, "--version").Output(); err == nil {
if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %v", rt.Path, err)
if _, ver, commit, err := parseDefaultRuntimeVersion(cfg); err != nil {
logrus.Warnf(err.Error())
} else {
v.Components = append(v.Components, types.ComponentVersion{
Name: defaultRuntime,
Name: cfg.DefaultRuntime,
Version: ver,
Details: map[string]string{
"GitCommit": commit,
},
})
}
} else {
logrus.Warnf("failed to retrieve %s version: %v", rt.Path, err)
}
}
if initBinary, err := cfg.LookupInitPath(); err != nil {
logrus.Warnf("failed to find docker-init: %s", err)
@ -318,7 +311,7 @@ func parseInitVersion(v string) (version string, commit string, err error) {
// runc version 1.0.0-rc5+dev
// commit: 69663f0bd4b60df09991c08812a60108003fa340
// spec: 1.0.0
func parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) {
func parseRuntimeVersion(v string) (runtime, version, commit string, err error) {
lines := strings.Split(strings.TrimSpace(v), "\n")
for _, line := range lines {
if strings.Contains(line, "version") {
@ -338,6 +331,21 @@ func parseRuntimeVersion(v string) (runtime string, version string, commit strin
return runtime, version, commit, err
}
func parseDefaultRuntimeVersion(cfg *config.Config) (runtime, version, commit string, err error) {
if rt, ok := cfg.Runtimes[cfg.DefaultRuntime]; ok {
rv, err := exec.Command(rt.Path, "--version").Output()
if err != nil {
return "", "", "", fmt.Errorf("failed to retrieve %s version: %w", rt.Path, err)
}
runtime, version, commit, err := parseRuntimeVersion(string(rv))
if err != nil {
return "", "", "", fmt.Errorf("failed to parse %s version: %w", rt.Path, err)
}
return runtime, version, commit, err
}
return "", "", "", nil
}
func cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo, cfg *config.Config) bool {
return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(cfg.CgroupNamespaceMode).IsPrivate()
}

View file

@ -41,7 +41,7 @@ func (daemon *Daemon) ContainerInspectCurrent(ctx context.Context, name string,
ctr.Lock()
base, err := daemon.getInspectData(daemon.config(), ctr)
base, err := daemon.getInspectData(&daemon.config().Config, ctr)
if err != nil {
ctr.Unlock()
return nil, err
@ -106,7 +106,7 @@ func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, er
ctr.Lock()
defer ctr.Unlock()
base, err := daemon.getInspectData(daemon.config(), ctr)
base, err := daemon.getInspectData(&daemon.config().Config, ctr)
if err != nil {
return nil, err
}

View file

@ -29,7 +29,7 @@ func (daemon *Daemon) containerInspectPre120(ctx context.Context, name string) (
ctr.Lock()
defer ctr.Unlock()
base, err := daemon.getInspectData(daemon.config(), ctr)
base, err := daemon.getInspectData(&daemon.config().Config, ctr)
if err != nil {
return nil, err
}

View file

@ -5,7 +5,6 @@ import (
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
@ -24,13 +23,13 @@ func TestGetInspectData(t *testing.T) {
if d.UsesSnapshotter() {
t.Skip("does not apply to containerd snapshotters, which don't have RWLayer set")
}
cfg := &config.Config{}
cfg := &configStore{}
d.configStore.Store(cfg)
_, err := d.getInspectData(cfg, c)
_, err := d.getInspectData(&cfg.Config, c)
assert.Check(t, is.ErrorContains(err, "RWLayer of container inspect-me is unexpectedly nil"))
c.Dead = true
_, err = d.getInspectData(cfg, c)
_, err = d.getInspectData(&cfg.Config, c)
assert.Check(t, err)
}

View file

@ -102,7 +102,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
} else {
c.SetStopped(&exitStatus)
if !c.HasBeenManuallyRestarted {
defer daemon.autoRemove(cfg, c)
defer daemon.autoRemove(&cfg.Config, c)
}
}
defer c.Unlock() // needs to be called before autoRemove
@ -131,7 +131,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
daemon.setStateCounter(c)
c.CheckpointTo(daemon.containersReplica)
c.Unlock()
defer daemon.autoRemove(cfg, c)
defer daemon.autoRemove(&cfg.Config, c)
if err != restartmanager.ErrRestartCanceled {
logrus.Errorf("restartmanger wait error: %+v", err)
}

View file

@ -161,7 +161,7 @@ func (daemon *Daemon) startIngressWorker() {
select {
case r := <-ingressJobsChannel:
if r.create != nil {
daemon.setupIngress(daemon.config(), r.create, r.ip, ingressID)
daemon.setupIngress(&daemon.config().Config, r.create, r.ip, ingressID)
ingressID = r.create.ID
} else {
daemon.releaseIngress(ingressID)
@ -278,13 +278,13 @@ func (daemon *Daemon) WaitForDetachment(ctx context.Context, networkName, networ
// CreateManagedNetwork creates an agent network.
func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error {
_, err := daemon.createNetwork(daemon.config(), create.NetworkCreateRequest, create.ID, true)
_, err := daemon.createNetwork(&daemon.config().Config, create.NetworkCreateRequest, create.ID, true)
return err
}
// CreateNetwork creates a network with the given name, driver and other optional parameters
func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) {
return daemon.createNetwork(daemon.config(), create, "", false)
return daemon.createNetwork(&daemon.config().Config, create, "", false)
}
func (daemon *Daemon) createNetwork(cfg *config.Config, create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {

View file

@ -489,7 +489,7 @@ func inSlice(slice []string, s string) bool {
}
// withMounts sets the container's mounts
func withMounts(daemon *Daemon, daemonCfg *dconfig.Config, c *container.Container) coci.SpecOpts {
func withMounts(daemon *Daemon, daemonCfg *configStore, c *container.Container) coci.SpecOpts {
return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) (err error) {
if err := daemon.setupContainerMountsRoot(c); err != nil {
return err
@ -1019,23 +1019,23 @@ func WithUser(c *container.Container) coci.SpecOpts {
}
}
func (daemon *Daemon) createSpec(ctx context.Context, daemonCfg *dconfig.Config, c *container.Container) (retSpec *specs.Spec, err error) {
func (daemon *Daemon) createSpec(ctx context.Context, daemonCfg *configStore, c *container.Container) (retSpec *specs.Spec, err error) {
var (
opts []coci.SpecOpts
s = oci.DefaultSpec()
)
opts = append(opts,
withCommonOptions(daemon, daemonCfg, c),
withCgroups(daemon, daemonCfg, c),
withCommonOptions(daemon, &daemonCfg.Config, c),
withCgroups(daemon, &daemonCfg.Config, c),
WithResources(c),
WithSysctls(c),
WithDevices(daemon, c),
withRlimits(daemon, daemonCfg, c),
withRlimits(daemon, &daemonCfg.Config, c),
WithNamespaces(daemon, c),
WithCapabilities(c),
WithSeccomp(daemon, c),
withMounts(daemon, daemonCfg, c),
withLibnetwork(daemon, daemonCfg, c),
withLibnetwork(daemon, &daemonCfg.Config, c),
WithApparmor(c),
WithSelinux(c),
WithOOMScore(&c.HostConfig.OomScoreAdj),
@ -1069,7 +1069,7 @@ func (daemon *Daemon) createSpec(ctx context.Context, daemonCfg *dconfig.Config,
opts = append(opts, coci.WithReadonlyPaths(c.HostConfig.ReadonlyPaths))
}
if daemonCfg.Rootless {
opts = append(opts, withRootless(daemon, daemonCfg))
opts = append(opts, withRootless(daemon, &daemonCfg.Config))
}
var snapshotter, snapshotKey string

View file

@ -82,7 +82,7 @@ func TestTmpfsDevShmNoDupMount(t *testing.T) {
d := setupFakeDaemon(t, c)
defer cleanupFakeContainer(c)
_, err := d.createSpec(context.TODO(), &config.Config{}, c)
_, err := d.createSpec(context.TODO(), &configStore{}, c)
assert.Check(t, err)
}
@ -101,7 +101,7 @@ func TestIpcPrivateVsReadonly(t *testing.T) {
d := setupFakeDaemon(t, c)
defer cleanupFakeContainer(c)
s, err := d.createSpec(context.TODO(), &config.Config{}, c)
s, err := d.createSpec(context.TODO(), &configStore{}, c)
assert.Check(t, err)
// Find the /dev/shm mount in ms, check it does not have ro
@ -131,7 +131,7 @@ func TestSysctlOverride(t *testing.T) {
defer cleanupFakeContainer(c)
// Ensure that the implicit sysctl is set correctly.
s, err := d.createSpec(context.TODO(), &config.Config{}, c)
s, err := d.createSpec(context.TODO(), &configStore{}, c)
assert.NilError(t, err)
assert.Equal(t, s.Hostname, "foobar")
assert.Equal(t, s.Linux.Sysctl["kernel.domainname"], c.Config.Domainname)
@ -147,14 +147,14 @@ func TestSysctlOverride(t *testing.T) {
assert.Assert(t, c.HostConfig.Sysctls["kernel.domainname"] != c.Config.Domainname)
c.HostConfig.Sysctls["net.ipv4.ip_unprivileged_port_start"] = "1024"
s, err = d.createSpec(context.TODO(), &config.Config{}, c)
s, err = d.createSpec(context.TODO(), &configStore{}, c)
assert.NilError(t, err)
assert.Equal(t, s.Hostname, "foobar")
assert.Equal(t, s.Linux.Sysctl["kernel.domainname"], c.HostConfig.Sysctls["kernel.domainname"])
assert.Equal(t, s.Linux.Sysctl["net.ipv4.ip_unprivileged_port_start"], c.HostConfig.Sysctls["net.ipv4.ip_unprivileged_port_start"])
// Ensure the ping_group_range is not set on a daemon with user-namespaces enabled
s, err = d.createSpec(context.TODO(), &config.Config{RemappedRoot: "dummy:dummy"}, c)
s, err = d.createSpec(context.TODO(), &configStore{Config: config.Config{RemappedRoot: "dummy:dummy"}}, c)
assert.NilError(t, err)
_, ok := s.Linux.Sysctl["net.ipv4.ping_group_range"]
assert.Assert(t, !ok)
@ -162,7 +162,7 @@ func TestSysctlOverride(t *testing.T) {
// Ensure the ping_group_range is set on a container in "host" userns mode
// on a daemon with user-namespaces enabled
c.HostConfig.UsernsMode = "host"
s, err = d.createSpec(context.TODO(), &config.Config{RemappedRoot: "dummy:dummy"}, c)
s, err = d.createSpec(context.TODO(), &configStore{Config: config.Config{RemappedRoot: "dummy:dummy"}}, c)
assert.NilError(t, err)
assert.Equal(t, s.Linux.Sysctl["net.ipv4.ping_group_range"], "0 2147483647")
}
@ -182,7 +182,7 @@ func TestSysctlOverrideHost(t *testing.T) {
defer cleanupFakeContainer(c)
// Ensure that the implicit sysctl is not set
s, err := d.createSpec(context.TODO(), &config.Config{}, c)
s, err := d.createSpec(context.TODO(), &configStore{}, c)
assert.NilError(t, err)
assert.Equal(t, s.Linux.Sysctl["net.ipv4.ip_unprivileged_port_start"], "")
assert.Equal(t, s.Linux.Sysctl["net.ipv4.ping_group_range"], "")
@ -190,7 +190,7 @@ func TestSysctlOverrideHost(t *testing.T) {
// Set an explicit sysctl.
c.HostConfig.Sysctls["net.ipv4.ip_unprivileged_port_start"] = "1024"
s, err = d.createSpec(context.TODO(), &config.Config{}, c)
s, err = d.createSpec(context.TODO(), &configStore{}, c)
assert.NilError(t, err)
assert.Equal(t, s.Linux.Sysctl["net.ipv4.ip_unprivileged_port_start"], c.HostConfig.Sysctls["net.ipv4.ip_unprivileged_port_start"])
}

View file

@ -28,7 +28,7 @@ const (
credentialSpecFileLocation = "CredentialSpecs"
)
func (daemon *Daemon) createSpec(ctx context.Context, daemonCfg *config.Config, c *container.Container) (*specs.Spec, error) {
func (daemon *Daemon) createSpec(ctx context.Context, daemonCfg *configStore, c *container.Container) (*specs.Spec, error) {
img, err := daemon.imageService.GetImage(ctx, string(c.ImageID), imagetypes.GetImageOpts{})
if err != nil {
return nil, err
@ -143,7 +143,7 @@ func (daemon *Daemon) createSpec(ctx context.Context, daemonCfg *config.Config,
return nil, errors.Wrapf(err, "container %s", c.ID)
}
dnsSearch := daemon.getDNSSearchSettings(daemonCfg, c)
dnsSearch := daemon.getDNSSearchSettings(&daemonCfg.Config, c)
// Get endpoints for the libnetwork allocated networks to the container
var epList []string

View file

@ -56,7 +56,7 @@ func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters.
return nil, err
}
cfg := daemon.config()
cfg := &daemon.config().Config
allContainers := daemon.List()
for _, c := range allContainers {
select {

View file

@ -72,11 +72,13 @@ func (tx *reloadTxn) Rollback() error {
func (daemon *Daemon) Reload(conf *config.Config) error {
daemon.configReload.Lock()
defer daemon.configReload.Unlock()
copied, err := copystructure.Copy(daemon.config())
copied, err := copystructure.Copy(daemon.config().Config)
if err != nil {
return err
}
newCfg := copied.(*config.Config)
newCfg := &configStore{
Config: copied.(config.Config),
}
attributes := map[string]string{}
@ -91,7 +93,7 @@ func (daemon *Daemon) Reload(conf *config.Config) error {
// executing any registered rollback functions.
var txn reloadTxn
for _, reload := range []func(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error{
for _, reload := range []func(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error{
daemon.reloadPlatform,
daemon.reloadDebug,
daemon.reloadMaxConcurrentDownloadsAndUploads,
@ -115,7 +117,7 @@ func (daemon *Daemon) Reload(conf *config.Config) error {
*config.Config
config.Proxies `json:"proxies"`
}{
Config: newCfg,
Config: &newCfg.Config,
Proxies: config.Proxies{
HTTPProxy: config.MaskCredentials(newCfg.HTTPProxy),
HTTPSProxy: config.MaskCredentials(newCfg.HTTPSProxy),
@ -141,7 +143,7 @@ func marshalAttributeSlice(v []string) string {
// reloadDebug updates configuration with Debug option
// and updates the passed attributes
func (daemon *Daemon) reloadDebug(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
func (daemon *Daemon) reloadDebug(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
// update corresponding configuration
if conf.IsValueSet("debug") {
newCfg.Debug = conf.Debug
@ -153,7 +155,7 @@ func (daemon *Daemon) reloadDebug(txn *reloadTxn, newCfg, conf *config.Config, a
// reloadMaxConcurrentDownloadsAndUploads updates configuration with max concurrent
// download and upload options and updates the passed attributes
func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
// We always "reset" as the cost is lightweight and easy to maintain.
newCfg.MaxConcurrentDownloads = config.DefaultMaxConcurrentDownloads
newCfg.MaxConcurrentUploads = config.DefaultMaxConcurrentUploads
@ -184,7 +186,7 @@ func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(txn *reloadTxn, new
// reloadMaxDownloadAttempts updates configuration with max concurrent
// download attempts when a connection is lost and updates the passed attributes
func (daemon *Daemon) reloadMaxDownloadAttempts(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
func (daemon *Daemon) reloadMaxDownloadAttempts(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
// We always "reset" as the cost is lightweight and easy to maintain.
newCfg.MaxDownloadAttempts = config.DefaultDownloadAttempts
if conf.IsValueSet("max-download-attempts") && conf.MaxDownloadAttempts != 0 {
@ -199,7 +201,7 @@ func (daemon *Daemon) reloadMaxDownloadAttempts(txn *reloadTxn, newCfg, conf *co
// reloadShutdownTimeout updates configuration with daemon shutdown timeout option
// and updates the passed attributes
func (daemon *Daemon) reloadShutdownTimeout(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
func (daemon *Daemon) reloadShutdownTimeout(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
// update corresponding configuration
if conf.IsValueSet("shutdown-timeout") {
newCfg.ShutdownTimeout = conf.ShutdownTimeout
@ -213,7 +215,7 @@ func (daemon *Daemon) reloadShutdownTimeout(txn *reloadTxn, newCfg, conf *config
// reloadLabels updates configuration with engine labels
// and updates the passed attributes
func (daemon *Daemon) reloadLabels(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
func (daemon *Daemon) reloadLabels(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
// update corresponding configuration
if conf.IsValueSet("labels") {
newCfg.Labels = conf.Labels
@ -226,7 +228,7 @@ func (daemon *Daemon) reloadLabels(txn *reloadTxn, newCfg, conf *config.Config,
// reloadRegistryConfig updates the configuration with registry options
// and updates the passed attributes.
func (daemon *Daemon) reloadRegistryConfig(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
func (daemon *Daemon) reloadRegistryConfig(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
// Update corresponding configuration.
if conf.IsValueSet("allow-nondistributable-artifacts") {
newCfg.ServiceOptions.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts
@ -253,7 +255,7 @@ func (daemon *Daemon) reloadRegistryConfig(txn *reloadTxn, newCfg, conf *config.
// reloadLiveRestore updates configuration with live restore option
// and updates the passed attributes
func (daemon *Daemon) reloadLiveRestore(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
func (daemon *Daemon) reloadLiveRestore(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
// update corresponding configuration
if conf.IsValueSet("live-restore") {
newCfg.LiveRestoreEnabled = conf.LiveRestoreEnabled
@ -265,7 +267,7 @@ func (daemon *Daemon) reloadLiveRestore(txn *reloadTxn, newCfg, conf *config.Con
}
// reloadNetworkDiagnosticPort updates the network controller starting the diagnostic if the config is valid
func (daemon *Daemon) reloadNetworkDiagnosticPort(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
func (daemon *Daemon) reloadNetworkDiagnosticPort(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
txn.OnCommit(func() error {
if conf == nil || daemon.netController == nil || !conf.IsValueSet("network-diagnostic-port") ||
conf.NetworkDiagnosticPort < 1 || conf.NetworkDiagnosticPort > 65535 {
@ -284,7 +286,7 @@ func (daemon *Daemon) reloadNetworkDiagnosticPort(txn *reloadTxn, newCfg, conf *
}
// reloadFeatures updates configuration with enabled/disabled features
func (daemon *Daemon) reloadFeatures(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
func (daemon *Daemon) reloadFeatures(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
// update corresponding configuration
// note that we allow features option to be entirely unset
newCfg.Features = conf.Features

View file

@ -27,7 +27,7 @@ func newDaemonForReloadT(t *testing.T, cfg *config.Config) *Daemon {
var err error
daemon.registryService, err = registry.NewService(registry.ServiceOptions{})
assert.Assert(t, err)
daemon.configStore.Store(cfg)
daemon.configStore.Store(&configStore{Config: *cfg})
return daemon
}

View file

@ -11,15 +11,19 @@ import (
// reloadPlatform updates configuration with platform specific options
// and updates the passed attributes
func (daemon *Daemon) reloadPlatform(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
func (daemon *Daemon) reloadPlatform(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
if conf.DefaultRuntime != "" {
newCfg.DefaultRuntime = conf.DefaultRuntime
}
if conf.IsValueSet("runtimes") {
newCfg.Runtimes = conf.Runtimes
txn.OnCommit(func() error { return daemon.initRuntimes(newCfg) })
newCfg.Config.Runtimes = conf.Runtimes
}
configureRuntimes(&newCfg.Config)
var err error
newCfg.Runtimes, err = setupRuntimes(&newCfg.Config)
if err != nil {
return err
}
configureRuntimes(newCfg)
if conf.IsValueSet("default-shm-size") {
newCfg.ShmSize = conf.ShmSize
@ -35,7 +39,7 @@ func (daemon *Daemon) reloadPlatform(txn *reloadTxn, newCfg, conf *config.Config
// Update attributes
var runtimeList bytes.Buffer
for name, rt := range newCfg.Runtimes {
for name, rt := range newCfg.Config.Runtimes {
if runtimeList.Len() > 0 {
runtimeList.WriteRune(' ')
}

View file

@ -4,6 +4,6 @@ import "github.com/docker/docker/daemon/config"
// reloadPlatform updates configuration with platform specific options
// and updates the passed attributes
func (daemon *Daemon) reloadPlatform(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
func (daemon *Daemon) reloadPlatform(txn *reloadTxn, newCfg *configStore, conf *config.Config, attributes map[string]string) error {
return nil
}

View file

@ -6,7 +6,6 @@ import (
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
)
// ContainerRestart stops and starts a container. It attempts to
@ -31,7 +30,7 @@ func (daemon *Daemon) ContainerRestart(ctx context.Context, name string, options
// container. When stopping, wait for the given duration in seconds to
// gracefully stop, before forcefully terminating the container. If
// given a negative duration, wait forever for a graceful stop.
func (daemon *Daemon) containerRestart(ctx context.Context, daemonCfg *config.Config, container *container.Container, options containertypes.StopOptions) error {
func (daemon *Daemon) containerRestart(ctx context.Context, daemonCfg *configStore, container *container.Container, options containertypes.StopOptions) error {
// Determine isolation. If not specified in the hostconfig, use daemon default.
actualIsolation := container.HostConfig.Isolation
if containertypes.Isolation.IsDefault(actualIsolation) {

View file

@ -4,18 +4,24 @@ package daemon
import (
"bytes"
"crypto/sha256"
"encoding/base32"
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/containerd/containerd/plugin"
v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
"github.com/docker/docker/api/types"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libcontainerd/shimopts"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/system"
"github.com/opencontainers/runtime-spec/specs-go/features"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -23,10 +29,21 @@ import (
const (
defaultRuntimeName = "runc"
linuxShimV2 = "io.containerd.runc.v2"
)
type shimConfig struct {
Shim string
Opts interface{}
Features *features.Features
// Check if the ShimConfig is valid given the current state of the system.
PreflightCheck func() error
}
type runtimes struct {
configured map[string]*shimConfig
}
func configureRuntimes(conf *config.Config) {
if conf.DefaultRuntime == "" {
conf.DefaultRuntime = config.StockRuntimeName
@ -34,13 +51,13 @@ func configureRuntimes(conf *config.Config) {
if conf.Runtimes == nil {
conf.Runtimes = make(map[string]types.Runtime)
}
conf.Runtimes[config.LinuxV2RuntimeName] = types.Runtime{Path: defaultRuntimeName, ShimConfig: defaultV2ShimConfig(conf, defaultRuntimeName)}
conf.Runtimes[config.LinuxV2RuntimeName] = types.Runtime{Path: defaultRuntimeName}
conf.Runtimes[config.StockRuntimeName] = conf.Runtimes[config.LinuxV2RuntimeName]
}
func defaultV2ShimConfig(conf *config.Config, runtimePath string) *types.ShimConfig {
return &types.ShimConfig{
Binary: linuxShimV2,
func defaultV2ShimConfig(conf *config.Config, runtimePath string) *shimConfig {
shim := &shimConfig{
Shim: plugin.RuntimeRuncV2,
Opts: &v2runcoptions.Options{
BinaryName: runtimePath,
Root: filepath.Join(conf.ExecRoot, "runtime-"+defaultRuntimeName),
@ -48,72 +65,9 @@ func defaultV2ShimConfig(conf *config.Config, runtimePath string) *types.ShimCon
NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "",
},
}
}
func (daemon *Daemon) loadRuntimes() error {
return daemon.initRuntimes(daemon.config())
}
func (daemon *Daemon) initRuntimes(cfg *config.Config) (err error) {
runtimeDir := filepath.Join(cfg.Root, "runtimes")
runtimeOldDir := runtimeDir + "-old"
// Remove old temp directory if any
os.RemoveAll(runtimeOldDir)
tmpDir, err := os.MkdirTemp(cfg.Root, "gen-runtimes")
if err != nil {
return errors.Wrap(err, "failed to get temp dir to generate runtime scripts")
}
defer func() {
if err != nil {
if err1 := os.RemoveAll(tmpDir); err1 != nil {
logrus.WithError(err1).WithField("dir", tmpDir).
Warn("failed to remove tmp dir")
}
return
}
if err = os.Rename(runtimeDir, runtimeOldDir); err != nil {
logrus.WithError(err).WithField("dir", runtimeDir).
Warn("failed to rename runtimes dir to old. Will try to removing it")
if err = os.RemoveAll(runtimeDir); err != nil {
logrus.WithError(err).WithField("dir", runtimeDir).
Warn("failed to remove old runtimes dir")
return
}
}
if err = os.Rename(tmpDir, runtimeDir); err != nil {
err = errors.Wrap(err, "failed to setup runtimes dir, new containers may not start")
return
}
if err = os.RemoveAll(runtimeOldDir); err != nil {
logrus.WithError(err).WithField("dir", runtimeOldDir).
Warn("failed to remove old runtimes dir")
}
}()
for name := range cfg.Runtimes {
rt := cfg.Runtimes[name]
if rt.Path == "" && rt.Type == "" {
return errors.Errorf("runtime %s: either a runtimeType or a path must be configured", name)
}
if rt.Path != "" {
if rt.Type != "" {
return errors.Errorf("runtime %s: cannot configure both path and runtimeType for the same runtime", name)
}
if len(rt.Options) > 0 {
return errors.Errorf("runtime %s: options cannot be used with a path runtime", name)
}
if len(rt.Args) > 0 {
script := filepath.Join(tmpDir, name)
content := fmt.Sprintf("#!/bin/sh\n%s %s $@\n", rt.Path, strings.Join(rt.Args, " "))
if err := os.WriteFile(script, []byte(content), 0700); err != nil {
return err
}
}
rt.ShimConfig = defaultV2ShimConfig(cfg, daemon.rewriteRuntimePath(cfg, name, rt.Path, rt.Args))
var featuresStderr bytes.Buffer
featuresCmd := exec.Command(rt.Path, append(rt.Args, "features")...)
featuresCmd := exec.Command(runtimePath, "features")
featuresCmd.Stderr = &featuresStderr
if featuresB, err := featuresCmd.Output(); err != nil {
logrus.WithError(err).Warnf("Failed to run %v: %q", featuresCmd.Args, featuresStderr.String())
@ -122,64 +76,131 @@ func (daemon *Daemon) initRuntimes(cfg *config.Config) (err error) {
if jsonErr := json.Unmarshal(featuresB, &features); jsonErr != nil {
logrus.WithError(err).Warnf("Failed to unmarshal the output of %v as a JSON", featuresCmd.Args)
} else {
rt.Features = &features
shim.Features = &features
}
}
return shim
}
func runtimeScriptsDir(cfg *config.Config) string {
return filepath.Join(cfg.Root, "runtimes")
}
// initRuntimesDir creates a fresh directory where we'll store the runtime
// scripts (i.e. in order to support runtimeArgs).
func initRuntimesDir(cfg *config.Config) error {
runtimeDir := runtimeScriptsDir(cfg)
if err := os.RemoveAll(runtimeDir); err != nil {
return err
}
return system.MkdirAll(runtimeDir, 0700)
}
func setupRuntimes(cfg *config.Config) (runtimes, error) {
newrt := runtimes{
configured: make(map[string]*shimConfig),
}
dir := runtimeScriptsDir(cfg)
for name, rt := range cfg.Runtimes {
var c *shimConfig
if rt.Path == "" && rt.Type == "" {
return runtimes{}, errors.Errorf("runtime %s: either a runtimeType or a path must be configured", name)
}
if rt.Path != "" {
if rt.Type != "" {
return runtimes{}, errors.Errorf("runtime %s: cannot configure both path and runtimeType for the same runtime", name)
}
if len(rt.Options) > 0 {
return runtimes{}, errors.Errorf("runtime %s: options cannot be used with a path runtime", name)
}
binaryName := rt.Path
needsWrapper := len(rt.Args) > 0
if needsWrapper {
var err error
binaryName, err = wrapRuntime(dir, name, rt.Path, rt.Args)
if err != nil {
return runtimes{}, err
}
}
c = defaultV2ShimConfig(cfg, binaryName)
if needsWrapper {
path := rt.Path
c.PreflightCheck = func() error {
// Check that the runtime path actually exists so that we can return a well known error.
_, err := exec.LookPath(path)
return errors.Wrap(err, "error while looking up the specified runtime path")
}
}
} else {
if len(rt.Args) > 0 {
return errors.Errorf("runtime %s: args cannot be used with a runtimeType runtime", name)
return runtimes{}, errors.Errorf("runtime %s: args cannot be used with a runtimeType runtime", name)
}
// Unlike implicit runtimes, there is no restriction on configuring a shim by path.
rt.ShimConfig = &types.ShimConfig{Binary: rt.Type}
c = &shimConfig{Shim: rt.Type}
if len(rt.Options) > 0 {
// It has to be a pointer type or there'll be a panic in containerd/typeurl when we try to start the container.
rt.ShimConfig.Opts, err = shimopts.Generate(rt.Type, rt.Options)
var err error
c.Opts, err = shimopts.Generate(rt.Type, rt.Options)
if err != nil {
return errors.Wrapf(err, "runtime %v", name)
return runtimes{}, errors.Wrapf(err, "runtime %v", name)
}
}
}
cfg.Runtimes[name] = rt
newrt.configured[name] = c
}
return nil
return newrt, nil
}
// rewriteRuntimePath is used for runtimes which have custom arguments supplied.
// This is needed because the containerd API only calls the OCI runtime binary, there is no options for extra arguments.
// To support this case, the daemon wraps the specified runtime in a script that passes through those arguments.
func (daemon *Daemon) rewriteRuntimePath(cfg *config.Config, name, p string, args []string) string {
if len(args) == 0 {
return p
}
// A non-standard Base32 encoding which lacks vowels to avoid accidentally
// spelling naughty words. Don't use this to encode any data which requires
// compatibility with anything outside of the currently-running process.
var base32Disemvoweled = base32.NewEncoding("0123456789BCDFGHJKLMNPQRSTVWXYZ-")
return filepath.Join(cfg.Root, "runtimes", name)
// wrapRuntime writes a shell script to dir which will execute binary with args
// concatenated to the script's argv. This is needed because the
// io.containerd.runc.v2 shim has no options for passing extra arguments to the
// runtime binary.
func wrapRuntime(dir, name, binary string, args []string) (string, error) {
var wrapper bytes.Buffer
sum := sha256.New()
_, _ = fmt.Fprintf(io.MultiWriter(&wrapper, sum), "#!/bin/sh\n%s %s $@\n", binary, strings.Join(args, " "))
// Generate a consistent name for the wrapper script derived from the
// contents so that multiple wrapper scripts can coexist with the same
// base name. The existing scripts might still be referenced by running
// containers.
suffix := base32Disemvoweled.EncodeToString(sum.Sum(nil))
scriptPath := filepath.Join(dir, name+"."+suffix)
if err := ioutils.AtomicWriteFile(scriptPath, wrapper.Bytes(), 0700); err != nil {
return "", err
}
return scriptPath, nil
}
func (daemon *Daemon) getRuntime(cfg *config.Config, name string) (shim string, opts interface{}, err error) {
rt := cfg.GetRuntime(name)
if rt == nil {
func (r *runtimes) Get(name string) (string, interface{}, error) {
rt := r.configured[name]
if rt != nil {
if rt.PreflightCheck != nil {
if err := rt.PreflightCheck(); err != nil {
return "", nil, err
}
}
return rt.Shim, rt.Opts, nil
}
if !config.IsPermissibleC8dRuntimeName(name) {
return "", nil, errdefs.InvalidParameter(errors.Errorf("unknown or invalid runtime name: %s", name))
}
return name, nil, nil
}
if len(rt.Args) > 0 {
// Check that the path of the runtime which the script wraps actually exists so
// that we can return a well known error which references the configured path
// instead of the wrapper script's.
if _, err := exec.LookPath(rt.Path); err != nil {
return "", nil, errors.Wrap(err, "error while looking up the specified runtime path")
}
}
if rt.ShimConfig == nil {
// Should never happen as daemon.initRuntimes always sets
// ShimConfig and config reloading is synchronized.
err := errdefs.System(errors.Errorf("BUG: runtime %s: rt.ShimConfig == nil", name))
logrus.Error(err)
return "", nil, err
}
return rt.ShimConfig.Binary, rt.ShimConfig.Opts, nil
}
func (r *runtimes) Features(name string) *features.Features {
rt := r.configured[name]
if rt != nil {
return rt.Features
}
return nil
}

View file

@ -3,8 +3,9 @@
package daemon
import (
"io/fs"
"os"
"path/filepath"
"strings"
"testing"
"github.com/containerd/containerd/plugin"
@ -88,11 +89,9 @@ func TestInitRuntimes_InvalidConfigs(t *testing.T) {
assert.NilError(t, err)
cfg.Root = t.TempDir()
cfg.Runtimes["myruntime"] = tt.runtime
d := &Daemon{}
d.configStore.Store(cfg)
assert.Assert(t, os.Mkdir(filepath.Join(d.config().Root, "runtimes"), 0700))
assert.Assert(t, initRuntimesDir(cfg))
err = d.initRuntimes(d.config())
_, err = setupRuntimes(cfg)
assert.Check(t, is.ErrorContains(err, tt.expectErr))
})
}
@ -127,7 +126,6 @@ func TestGetRuntime(t *testing.T) {
assert.NilError(t, err)
cfg.Root = t.TempDir()
assert.Assert(t, os.Mkdir(filepath.Join(cfg.Root, "runtimes"), 0700))
cfg.Runtimes = map[string]types.Runtime{
configuredRtName: configuredRuntime,
rtWithArgsName: rtWithArgs,
@ -136,32 +134,34 @@ func TestGetRuntime(t *testing.T) {
configuredShimByPathName: configuredShimByPath,
}
configureRuntimes(cfg)
assert.NilError(t, initRuntimesDir(cfg))
runtimes, err := setupRuntimes(cfg)
assert.NilError(t, err)
d := &Daemon{}
d.configStore.Store(cfg)
assert.Assert(t, d.loadRuntimes())
stockRuntime, ok := cfg.Runtimes[config.StockRuntimeName]
stockRuntime, ok := runtimes.configured[config.StockRuntimeName]
assert.Assert(t, ok, "stock runtime could not be found (test needs to be updated)")
stockRuntime.Features = nil
configdOpts := *stockRuntime.ShimConfig.Opts.(*v2runcoptions.Options)
configdOpts := *stockRuntime.Opts.(*v2runcoptions.Options)
configdOpts.BinaryName = configuredRuntime.Path
wantConfigdRuntime := &shimConfig{
Shim: stockRuntime.Shim,
Opts: &configdOpts,
}
for _, tt := range []struct {
name, runtime string
wantShim string
wantOpts interface{}
want *shimConfig
}{
{
name: "StockRuntime",
runtime: config.StockRuntimeName,
wantShim: stockRuntime.ShimConfig.Binary,
wantOpts: stockRuntime.ShimConfig.Opts,
want: stockRuntime,
},
{
name: "ShimName",
runtime: "io.containerd.my-shim.v42",
wantShim: "io.containerd.my-shim.v42",
want: &shimConfig{Shim: "io.containerd.my-shim.v42"},
},
{
// containerd is pretty loose about the format of runtime names. Perhaps too
@ -170,7 +170,7 @@ func TestGetRuntime(t *testing.T) {
// particular format of the dot-delimited components of the name.
name: "VersionlessShimName",
runtime: "io.containerd.my-shim",
wantShim: "io.containerd.my-shim",
want: &shimConfig{Shim: "io.containerd.my-shim"},
},
{
name: "IllformedShimName",
@ -195,48 +195,150 @@ func TestGetRuntime(t *testing.T) {
{
name: "ConfiguredRuntime",
runtime: configuredRtName,
wantShim: stockRuntime.ShimConfig.Binary,
wantOpts: &configdOpts,
},
{
name: "RuntimeWithArgs",
runtime: rtWithArgsName,
wantShim: stockRuntime.ShimConfig.Binary,
wantOpts: defaultV2ShimConfig(
d.config(),
d.rewriteRuntimePath(
d.config(),
rtWithArgsName,
rtWithArgs.Path,
rtWithArgs.Args)).Opts,
want: wantConfigdRuntime,
},
{
name: "ShimWithOpts",
runtime: shimWithOptsName,
wantShim: shimWithOpts.Type,
wantOpts: &v2runcoptions.Options{IoUid: 42},
want: &shimConfig{
Shim: shimWithOpts.Type,
Opts: &v2runcoptions.Options{IoUid: 42},
},
},
{
name: "ShimAlias",
runtime: shimAliasName,
wantShim: shimAlias.Type,
want: &shimConfig{Shim: shimAlias.Type},
},
{
name: "ConfiguredShimByPath",
runtime: configuredShimByPathName,
wantShim: configuredShimByPath.Type,
want: &shimConfig{Shim: configuredShimByPath.Type},
},
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
gotShim, gotOpts, err := d.getRuntime(cfg, tt.runtime)
assert.Check(t, is.Equal(gotShim, tt.wantShim))
assert.Check(t, is.DeepEqual(gotOpts, tt.wantOpts))
if tt.wantShim != "" {
shim, opts, err := runtimes.Get(tt.runtime)
if tt.want != nil {
assert.Check(t, err)
got := &shimConfig{Shim: shim, Opts: opts}
assert.Check(t, is.DeepEqual(got, tt.want))
} else {
assert.Check(t, errdefs.IsInvalidParameter(err))
assert.Check(t, is.Equal(shim, ""))
assert.Check(t, is.Nil(opts))
assert.Check(t, errdefs.IsInvalidParameter(err), "[%T] %[1]v", err)
}
})
}
t.Run("RuntimeWithArgs", func(t *testing.T) {
shim, opts, err := runtimes.Get(rtWithArgsName)
assert.Check(t, err)
assert.Check(t, is.Equal(shim, stockRuntime.Shim))
runcopts, ok := opts.(*v2runcoptions.Options)
if assert.Check(t, ok, "runtimes.Get() opts = type %T, want *v2runcoptions.Options", opts) {
wrapper, err := os.ReadFile(runcopts.BinaryName)
if assert.Check(t, err) {
assert.Check(t, is.Contains(string(wrapper),
strings.Join(append([]string{rtWithArgs.Path}, rtWithArgs.Args...), " ")))
}
}
})
}
func TestGetRuntime_PreflightCheck(t *testing.T) {
cfg, err := config.New()
assert.NilError(t, err)
cfg.Root = t.TempDir()
cfg.Runtimes = map[string]types.Runtime{
"path-only": {
Path: "/usr/local/bin/file-not-found",
},
"with-args": {
Path: "/usr/local/bin/file-not-found",
Args: []string{"--arg"},
},
}
assert.NilError(t, initRuntimesDir(cfg))
runtimes, err := setupRuntimes(cfg)
assert.NilError(t, err, "runtime paths should not be validated during setupRuntimes()")
t.Run("PathOnly", func(t *testing.T) {
_, _, err := runtimes.Get("path-only")
assert.NilError(t, err, "custom runtimes without wrapper scripts should not have pre-flight checks")
})
t.Run("WithArgs", func(t *testing.T) {
_, _, err := runtimes.Get("with-args")
assert.ErrorIs(t, err, fs.ErrNotExist)
})
}
// TestRuntimeWrapping checks that reloading runtime config does not delete or
// modify existing wrapper scripts, which could break lifecycle management of
// existing containers.
func TestRuntimeWrapping(t *testing.T) {
cfg, err := config.New()
assert.NilError(t, err)
cfg.Root = t.TempDir()
cfg.Runtimes = map[string]types.Runtime{
"change-args": {
Path: "/bin/true",
Args: []string{"foo", "bar"},
},
"dupe": {
Path: "/bin/true",
Args: []string{"foo", "bar"},
},
"change-path": {
Path: "/bin/true",
Args: []string{"baz"},
},
"drop-args": {
Path: "/bin/true",
Args: []string{"some", "arguments"},
},
"goes-away": {
Path: "/bin/true",
Args: []string{"bye"},
},
}
assert.NilError(t, initRuntimesDir(cfg))
rt, err := setupRuntimes(cfg)
assert.Check(t, err)
type WrapperInfo struct{ BinaryName, Content string }
wrappers := make(map[string]WrapperInfo)
for name := range cfg.Runtimes {
_, opts, err := rt.Get(name)
if assert.Check(t, err, "rt.Get(%q)", name) {
binary := opts.(*v2runcoptions.Options).BinaryName
content, err := os.ReadFile(binary)
assert.Check(t, err, "could not read wrapper script contents for runtime %q", binary)
wrappers[name] = WrapperInfo{BinaryName: binary, Content: string(content)}
}
}
cfg.Runtimes["change-args"] = types.Runtime{
Path: cfg.Runtimes["change-args"].Path,
Args: []string{"baz", "quux"},
}
cfg.Runtimes["change-path"] = types.Runtime{
Path: "/bin/false",
Args: cfg.Runtimes["change-path"].Args,
}
cfg.Runtimes["drop-args"] = types.Runtime{
Path: cfg.Runtimes["drop-args"].Path,
}
delete(cfg.Runtimes, "goes-away")
_, err = setupRuntimes(cfg)
assert.Check(t, err)
for name, info := range wrappers {
t.Run(name, func(t *testing.T) {
content, err := os.ReadFile(info.BinaryName)
assert.NilError(t, err)
assert.DeepEqual(t, info.Content, string(content))
})
}
}

View file

@ -6,6 +6,16 @@ import (
"github.com/docker/docker/daemon/config"
)
func (daemon *Daemon) getRuntime(cfg *config.Config, name string) (shim string, opts interface{}, err error) {
type runtimes struct{}
func (r *runtimes) Get(name string) (string, interface{}, error) {
return "", nil, errors.New("not implemented")
}
func initRuntimesDir(*config.Config) error {
return nil
}
func setupRuntimes(*config.Config) (runtimes, error) {
return runtimes{}, nil
}

View file

@ -9,7 +9,6 @@ import (
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libcontainerd"
"github.com/pkg/errors"
@ -57,7 +56,7 @@ func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfi
if hostConfig != nil {
logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12")
oldNetworkMode := ctr.HostConfig.NetworkMode
if err := daemon.setSecurityOptions(daemonCfg, ctr, hostConfig); err != nil {
if err := daemon.setSecurityOptions(&daemonCfg.Config, ctr, hostConfig); err != nil {
return errdefs.InvalidParameter(err)
}
if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil {
@ -91,7 +90,7 @@ func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfi
// Adapt for old containers in case we have updates in this function and
// old containers never have chance to call the new function in create stage.
if hostConfig != nil {
if err := daemon.adaptContainerSettings(daemonCfg, ctr.HostConfig, false); err != nil {
if err := daemon.adaptContainerSettings(&daemonCfg.Config, ctr.HostConfig, false); err != nil {
return errdefs.InvalidParameter(err)
}
}
@ -102,7 +101,7 @@ func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfi
// container needs, such as storage and networking, as well as links
// between containers. The container is left waiting for a signal to
// begin running.
func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *config.Config, container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (retErr error) {
func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore, container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (retErr error) {
start := time.Now()
container.Lock()
defer container.Unlock()
@ -138,7 +137,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *config.Conf
// if containers AutoRemove flag is set, remove it after clean up
if container.HostConfig.AutoRemove {
container.Unlock()
if err := daemon.containerRm(daemonCfg, container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
if err := daemon.containerRm(&daemonCfg.Config, container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
logrus.Errorf("can't remove container %s: %v", container.ID, err)
}
container.Lock()
@ -150,7 +149,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *config.Conf
return err
}
if err := daemon.initializeNetworking(daemonCfg, container); err != nil {
if err := daemon.initializeNetworking(&daemonCfg.Config, container); err != nil {
return err
}

View file

@ -4,21 +4,20 @@ package daemon // import "github.com/docker/docker/daemon"
import (
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
)
// getLibcontainerdCreateOptions callers must hold a lock on the container
func (daemon *Daemon) getLibcontainerdCreateOptions(daemonCfg *config.Config, container *container.Container) (string, interface{}, error) {
func (daemon *Daemon) getLibcontainerdCreateOptions(daemonCfg *configStore, container *container.Container) (string, interface{}, error) {
// Ensure a runtime has been assigned to this container
if container.HostConfig.Runtime == "" {
container.HostConfig.Runtime = daemonCfg.DefaultRuntime
container.CheckpointTo(daemon.containersReplica)
}
binary, opts, err := daemon.getRuntime(daemonCfg, container.HostConfig.Runtime)
shim, opts, err := daemonCfg.Runtimes.Get(container.HostConfig.Runtime)
if err != nil {
return "", nil, setExitCodeFromError(container.SetExitCode, err)
}
return binary, opts, nil
return shim, opts, nil
}

View file

@ -3,11 +3,10 @@ package daemon // import "github.com/docker/docker/daemon"
import (
"github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/pkg/system"
)
func (daemon *Daemon) getLibcontainerdCreateOptions(*config.Config, *container.Container) (string, interface{}, error) {
func (daemon *Daemon) getLibcontainerdCreateOptions(*configStore, *container.Container) (string, interface{}, error) {
if system.ContainerdRuntimeSupported() {
opts := &options.Options{}
return "io.containerd.runhcs.v1", opts, nil