moby/daemon/start.go

269 lines
9.2 KiB
Go
Raw Permalink Normal View History

package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"time"
"github.com/containerd/log"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/container"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/internal/compatcontext"
"github.com/docker/docker/libcontainerd"
"github.com/pkg/errors"
)
// validateState verifies if the container is in a non-conflicting state.
func validateState(ctr *container.Container) error {
ctr.Lock()
defer ctr.Unlock()
// Intentionally checking paused first, because a container can be
// BOTH running AND paused. To start a paused (but running) container,
// it must be thawed ("un-paused").
if ctr.Paused {
return errdefs.Conflict(errors.New("cannot start a paused container, try unpause instead"))
} else if ctr.Running {
// This is not an actual error, but produces a 304 "not modified"
// when returned through the API to indicates the container is
// already in the desired state. It's implemented as an error
// to make the code calling this function terminate early (as
// no further processing is needed).
return errdefs.NotModified(errors.New("container is already running"))
}
if ctr.RemovalInProgress || ctr.Dead {
return errdefs.Conflict(errors.New("container is marked for removal and cannot be started"))
}
return nil
}
// ContainerStart starts a container.
func (daemon *Daemon) ContainerStart(ctx context.Context, name string, checkpoint string, checkpointDir string) error {
daemonCfg := daemon.config()
if checkpoint != "" && !daemonCfg.Experimental {
return errdefs.InvalidParameter(errors.New("checkpoint is only supported in experimental mode"))
}
ctr, err := daemon.GetContainer(name)
if err != nil {
return err
}
if err := validateState(ctr); err != nil {
return err
}
// check if hostConfig is in line with the current system settings.
// It may happen cgroups are unmounted or the like.
if _, err = daemon.verifyContainerSettings(daemonCfg, ctr.HostConfig, nil, false); err != nil {
return errdefs.InvalidParameter(err)
}
return daemon.containerStart(ctx, daemonCfg, ctr, checkpoint, checkpointDir, true)
}
// containerStart prepares the container to run by setting up everything the
// container needs, such as storage and networking, as well as links
// between containers. The container is left waiting for a signal to
// begin running.
daemon: reload runtimes w/o breaking containers The existing runtimes reload logic went to great lengths to replace the directory containing runtime wrapper scripts as atomically as possible within the limitations of the Linux filesystem ABI. Trouble is, atomically swapping the wrapper scripts directory solves the wrong problem! The runtime configuration is "locked in" when a container is started, including the path to the runC binary. If a container is started with a runtime which requires a daemon-managed wrapper script and then the daemon is reloaded with a config which no longer requires the wrapper script (i.e. some args -> no args, or the runtime is dropped from the config), that container would become unmanageable. Any attempts to stop, exec or otherwise perform lifecycle management operations on the container are likely to fail due to the wrapper script no longer existing at its original path. Atomically swapping the wrapper scripts is also incompatible with the read-copy-update paradigm for reloading configuration. A handler in the daemon could retain a reference to the pre-reload configuration for an indeterminate amount of time after the daemon configuration has been reloaded and updated. It is possible for the daemon to attempt to start a container using a deleted wrapper script if a request to run a container races a reload. Solve the problem of deleting referenced wrapper scripts by ensuring that all wrapper scripts are *immutable* for the lifetime of the daemon process. Any given runtime wrapper script must always exist with the same contents, no matter how many times the daemon config is reloaded, or what changes are made to the config. This is accomplished by using everyone's favourite design pattern: content-addressable storage. Each wrapper script file name is suffixed with the SHA-256 digest of its contents to (probabilistically) guarantee immutability without needing any concurrency control. Stale runtime wrapper scripts are only cleaned up on the next daemon restart. Split the derived runtimes configuration from the user-supplied configuration to have a place to store derived state without mutating the user-supplied configuration or exposing daemon internals in API struct types. Hold the derived state and the user-supplied configuration in a single struct value so that they can be updated as an atomic unit. Signed-off-by: Cory Snider <csnider@mirantis.com>
2022-08-31 20:12:30 +00:00
func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore, container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (retErr error) {
start := time.Now()
container.Lock()
defer container.Unlock()
if resetRestartManager && container.Running { // skip this check if already in restarting step and resetRestartManager==false
return nil
}
if container.RemovalInProgress || container.Dead {
return errdefs.Conflict(errors.New("container is marked for removal and cannot be started"))
}
if checkpointDir != "" {
// TODO(mlaventure): how would we support that?
return errdefs.Forbidden(errors.New("custom checkpointdir is not supported"))
}
// if we encounter an error during start we need to ensure that any other
// setup has been cleaned up properly
defer func() {
if retErr != nil {
container.SetError(retErr)
// if no one else has set it, make sure we don't leave it at zero
if container.ExitCode() == 0 {
container.SetExitCode(exitUnknown)
}
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
log.G(ctx).Errorf("%s: failed saving state on start failure: %v", container.ID, err)
}
container.Reset(false)
daemon.Cleanup(compatcontext.WithoutCancel(ctx), container)
// if containers AutoRemove flag is set, remove it after clean up
if container.HostConfig.AutoRemove {
container.Unlock()
if err := daemon.containerRm(&daemonCfg.Config, container.ID, &backend.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
log.G(ctx).Errorf("can't remove container %s: %v", container.ID, err)
}
container.Lock()
}
}
}()
if err := daemon.conditionalMountOnStart(container); err != nil {
return err
}
daemon: reload runtimes w/o breaking containers The existing runtimes reload logic went to great lengths to replace the directory containing runtime wrapper scripts as atomically as possible within the limitations of the Linux filesystem ABI. Trouble is, atomically swapping the wrapper scripts directory solves the wrong problem! The runtime configuration is "locked in" when a container is started, including the path to the runC binary. If a container is started with a runtime which requires a daemon-managed wrapper script and then the daemon is reloaded with a config which no longer requires the wrapper script (i.e. some args -> no args, or the runtime is dropped from the config), that container would become unmanageable. Any attempts to stop, exec or otherwise perform lifecycle management operations on the container are likely to fail due to the wrapper script no longer existing at its original path. Atomically swapping the wrapper scripts is also incompatible with the read-copy-update paradigm for reloading configuration. A handler in the daemon could retain a reference to the pre-reload configuration for an indeterminate amount of time after the daemon configuration has been reloaded and updated. It is possible for the daemon to attempt to start a container using a deleted wrapper script if a request to run a container races a reload. Solve the problem of deleting referenced wrapper scripts by ensuring that all wrapper scripts are *immutable* for the lifetime of the daemon process. Any given runtime wrapper script must always exist with the same contents, no matter how many times the daemon config is reloaded, or what changes are made to the config. This is accomplished by using everyone's favourite design pattern: content-addressable storage. Each wrapper script file name is suffixed with the SHA-256 digest of its contents to (probabilistically) guarantee immutability without needing any concurrency control. Stale runtime wrapper scripts are only cleaned up on the next daemon restart. Split the derived runtimes configuration from the user-supplied configuration to have a place to store derived state without mutating the user-supplied configuration or exposing daemon internals in API struct types. Hold the derived state and the user-supplied configuration in a single struct value so that they can be updated as an atomic unit. Signed-off-by: Cory Snider <csnider@mirantis.com>
2022-08-31 20:12:30 +00:00
if err := daemon.initializeNetworking(&daemonCfg.Config, container); err != nil {
return err
}
mnts, err := daemon.setupContainerDirs(container)
if err != nil {
return err
}
m, cleanup, err := daemon.setupMounts(ctx, container)
if err != nil {
return err
}
mnts = append(mnts, m...)
defer cleanup(compatcontext.WithoutCancel(ctx))
spec, err := daemon.createSpec(ctx, daemonCfg, container, mnts)
if err != nil {
// Any error that occurs while creating the spec, even if it's the
// result of an invalid container config, must be considered a System
// error (internal server error), as it's not an error with the request
// to start the container.
//
// Invalid configuration in the config itself must be validated when
// creating the container (creating its config), but some errors are
// dependent on the current state, for example when starting a container
// that shares a namespace with another container, and that container
// is not running (or missing).
return errdefs.System(err)
}
if resetRestartManager {
container.ResetRestartManager(true)
container.HasBeenManuallyStopped = false
}
if err := daemon.saveAppArmorConfig(container); err != nil {
return err
}
if checkpoint != "" {
checkpointDir, err = getCheckpointDir(checkpointDir, checkpoint, container.Name, container.ID, container.CheckpointDir(), false)
if err != nil {
return err
}
}
shim, createOptions, err := daemon.getLibcontainerdCreateOptions(daemonCfg, container)
if err != nil {
return err
}
ctr, err := libcontainerd.ReplaceContainer(ctx, daemon.containerd, container.ID, spec, shim, createOptions)
if err != nil {
return setExitCodeFromError(container.SetExitCode, err)
}
defer func() {
if retErr != nil {
if err := ctr.Delete(compatcontext.WithoutCancel(ctx)); err != nil {
log.G(ctx).WithError(err).WithField("container", container.ID).
Error("failed to delete failed start container")
}
}
}()
// TODO(mlaventure): we need to specify checkpoint options here
tsk, err := ctr.NewTask(context.TODO(), // Passing ctx caused integration tests to be stuck in the cleanup phase
checkpointDir, container.StreamConfig.Stdin() != nil || container.Config.Tty,
container.InitializeStdio)
if err != nil {
return setExitCodeFromError(container.SetExitCode, err)
}
defer func() {
if retErr != nil {
if err := tsk.ForceDelete(compatcontext.WithoutCancel(ctx)); err != nil {
log.G(ctx).WithError(err).WithField("container", container.ID).
Error("failed to delete task after fail start")
}
}
}()
if err := daemon.initializeCreatedTask(ctx, tsk, container, spec); err != nil {
return err
}
if err := tsk.Start(context.TODO()); err != nil { // passing ctx caused integration tests to be stuck in the cleanup phase
return setExitCodeFromError(container.SetExitCode, err)
}
container.HasBeenManuallyRestarted = false
container.SetRunning(ctr, tsk, true)
container.HasBeenStartedBefore = true
daemon.setStateCounter(container)
daemon.initHealthMonitor(container)
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
log.G(ctx).WithError(err).WithField("container", container.ID).
Errorf("failed to store container")
}
daemon.LogContainerEvent(container, events.ActionStart)
containerActions.WithValues("start").UpdateSince(start)
return nil
}
// Cleanup releases any network resources allocated to the container along with any rules
// around how containers are linked together. It also unmounts the container's root filesystem.
func (daemon *Daemon) Cleanup(ctx context.Context, container *container.Container) {
// Microsoft HCS containers get in a bad state if host resources are
// released while the container still exists.
if ctr, ok := container.C8dContainer(); ok {
if err := ctr.Delete(context.Background()); err != nil {
log.G(ctx).Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err)
}
}
daemon.releaseNetwork(container)
if err := container.UnmountIpcMount(); err != nil {
log.G(ctx).Warnf("%s cleanup: failed to unmount IPC: %s", container.ID, err)
Implement none, private, and shareable ipc modes Since the commit d88fe447df0e8 ("Add support for sharing /dev/shm/ and /dev/mqueue between containers") container's /dev/shm is mounted on the host first, then bind-mounted inside the container. This is done that way in order to be able to share this container's IPC namespace (and the /dev/shm mount point) with another container. Unfortunately, this functionality breaks container checkpoint/restore (even if IPC is not shared). Since /dev/shm is an external mount, its contents is not saved by `criu checkpoint`, and so upon restore any application that tries to access data under /dev/shm is severily disappointed (which usually results in a fatal crash). This commit solves the issue by introducing new IPC modes for containers (in addition to 'host' and 'container:ID'). The new modes are: - 'shareable': enables sharing this container's IPC with others (this used to be the implicit default); - 'private': disables sharing this container's IPC. In 'private' mode, container's /dev/shm is truly mounted inside the container, without any bind-mounting from the host, which solves the issue. While at it, let's also implement 'none' mode. The motivation, as eloquently put by Justin Cormack, is: > I wondered a while back about having a none shm mode, as currently it is > not possible to have a totally unwriteable container as there is always > a /dev/shm writeable mount. It is a bit of a niche case (and clearly > should never be allowed to be daemon default) but it would be trivial to > add now so maybe we should... ...so here's yet yet another mode: - 'none': no /dev/shm mount inside the container (though it still has its own private IPC namespace). Now, to ultimately solve the abovementioned checkpoint/restore issue, we'd need to make 'private' the default mode, but unfortunately it breaks the backward compatibility. So, let's make the default container IPC mode per-daemon configurable (with the built-in default set to 'shareable' for now). The default can be changed either via a daemon CLI option (--default-shm-mode) or a daemon.json configuration file parameter of the same name. Note one can only set either 'shareable' or 'private' IPC modes as a daemon default (i.e. in this context 'host', 'container', or 'none' do not make much sense). Some other changes this patch introduces are: 1. A mount for /dev/shm is added to default OCI Linux spec. 2. IpcMode.Valid() is simplified to remove duplicated code that parsed 'container:ID' form. Note the old version used to check that ID does not contain a semicolon -- this is no longer the case (tests are modified accordingly). The motivation is we should either do a proper check for container ID validity, or don't check it at all (since it is checked in other places anyway). I chose the latter. 3. IpcMode.Container() is modified to not return container ID if the mode value does not start with "container:", unifying the check to be the same as in IpcMode.IsContainer(). 3. IPC mode unit tests (runconfig/hostconfig_test.go) are modified to add checks for newly added values. [v2: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-51345997] [v3: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-53902833] [v4: addressed the case of upgrading from older daemon, in this case container.HostConfig.IpcMode is unset and this is valid] [v5: document old and new IpcMode values in api/swagger.yaml] [v6: add the 'none' mode, changelog entry to docs/api/version-history.md] Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2017-06-27 21:58:50 +00:00
}
if err := daemon.conditionalUnmountOnCleanup(container); err != nil {
// FIXME: remove once reference counting for graphdrivers has been refactored
// Ensure that all the mounts are gone
if mountid, err := daemon.imageService.GetLayerMountID(container.ID); err == nil {
daemon.cleanupMountsByID(mountid)
}
}
if err := container.UnmountSecrets(); err != nil {
log.G(ctx).Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err)
}
if err := recursiveUnmount(container.Root); err != nil {
log.G(ctx).WithError(err).WithField("container", container.ID).Warn("Error while cleaning up container resource mounts.")
}
for _, eConfig := range container.ExecCommands.Commands() {
daemon.unregisterExecCommand(container, eConfig)
}
if container.BaseFS != "" {
if err := container.UnmountVolumes(ctx, daemon.LogVolumeEvent); err != nil {
log.G(ctx).Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
}
}
container.CancelAttachContext()
}