moby/daemon/delete.go

189 lines
6.5 KiB
Go
Raw Normal View History

package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"os"
"path"
"strings"
"time"
cerrdefs "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/leases"
"github.com/containerd/log"
"github.com/docker/docker/api/types/backend"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/errdefs"
pkg/system: move EnsureRemoveAll() to pkg/containerfs pkg/system historically has been a bit of a kitchen-sink of things that were somewhat "system" related, but didn't have a good place for. EnsureRemoveAll() is one of those utilities. EnsureRemoveAll() is used to both unmount and remove a path, for which it depends on both github.com/moby/sys/mount, which in turn depends on github.com/moby/sys/mountinfo. pkg/system is imported in the CLI, but neither EnsureRemoveAll(), nor any of its moby/sys dependencies are used on the client side, so let's move this function somewhere else, to remove those dependencies from the CLI. I looked for plausible locations that were related; it's used in: - daemon - daemon/graphdriver/XXX/ - plugin I considered moving it into a (e.g.) "utils" package within graphdriver (but not a huge fan of "utils" packages), and given that it felt (mostly) related to cleaning up container filesystems, I decided to move it there. Some things to follow-up on after this: - Verify if this function is still needed (it feels a bit like a big hammer in a "YOLO, let's try some things just in case it fails") - Perhaps it should be integrated in `containerfs.Remove()` (so that it's used automatically) - Look if there's other implementations (and if they should be consolidated), although (e.g.) the one in containerd is a copy of ours: https://github.com/containerd/containerd/blob/v1.5.9/pkg/cri/server/helpers_linux.go#L200 Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-03-02 21:43:07 +00:00
"github.com/docker/docker/pkg/containerfs"
"github.com/opencontainers/selinux/go-selinux"
"github.com/pkg/errors"
)
// ContainerRm removes the container id from the filesystem. An error
// is returned if the container is not found, or if the remove
// fails. If the remove succeeds, the container name is released, and
// network links are removed.
func (daemon *Daemon) ContainerRm(name string, config *backend.ContainerRmConfig) error {
daemon: reload runtimes w/o breaking containers The existing runtimes reload logic went to great lengths to replace the directory containing runtime wrapper scripts as atomically as possible within the limitations of the Linux filesystem ABI. Trouble is, atomically swapping the wrapper scripts directory solves the wrong problem! The runtime configuration is "locked in" when a container is started, including the path to the runC binary. If a container is started with a runtime which requires a daemon-managed wrapper script and then the daemon is reloaded with a config which no longer requires the wrapper script (i.e. some args -> no args, or the runtime is dropped from the config), that container would become unmanageable. Any attempts to stop, exec or otherwise perform lifecycle management operations on the container are likely to fail due to the wrapper script no longer existing at its original path. Atomically swapping the wrapper scripts is also incompatible with the read-copy-update paradigm for reloading configuration. A handler in the daemon could retain a reference to the pre-reload configuration for an indeterminate amount of time after the daemon configuration has been reloaded and updated. It is possible for the daemon to attempt to start a container using a deleted wrapper script if a request to run a container races a reload. Solve the problem of deleting referenced wrapper scripts by ensuring that all wrapper scripts are *immutable* for the lifetime of the daemon process. Any given runtime wrapper script must always exist with the same contents, no matter how many times the daemon config is reloaded, or what changes are made to the config. This is accomplished by using everyone's favourite design pattern: content-addressable storage. Each wrapper script file name is suffixed with the SHA-256 digest of its contents to (probabilistically) guarantee immutability without needing any concurrency control. Stale runtime wrapper scripts are only cleaned up on the next daemon restart. Split the derived runtimes configuration from the user-supplied configuration to have a place to store derived state without mutating the user-supplied configuration or exposing daemon internals in API struct types. Hold the derived state and the user-supplied configuration in a single struct value so that they can be updated as an atomic unit. Signed-off-by: Cory Snider <csnider@mirantis.com>
2022-08-31 20:12:30 +00:00
return daemon.containerRm(&daemon.config().Config, name, config)
}
func (daemon *Daemon) containerRm(cfg *config.Config, name string, opts *backend.ContainerRmConfig) error {
start := time.Now()
ctr, err := daemon.GetContainer(name)
if err != nil {
return err
}
// Container state RemovalInProgress should be used to avoid races.
if inProgress := ctr.SetRemovalInProgress(); inProgress {
err := fmt.Errorf("removal of container %s is already in progress", name)
return errdefs.Conflict(err)
}
defer ctr.ResetRemovalInProgress()
// check if container wasn't deregistered by previous rm since Get
if c := daemon.containers.Get(ctr.ID); c == nil {
return nil
}
if opts.RemoveLink {
return daemon.rmLink(cfg, ctr, name)
}
err = daemon.cleanupContainer(ctr, *opts)
containerActions.WithValues("delete").UpdateSince(start)
return err
}
func (daemon *Daemon) rmLink(cfg *config.Config, container *container.Container, name string) error {
if name[0] != '/' {
name = "/" + name
}
parent, n := path.Split(name)
if parent == "/" {
return fmt.Errorf("Conflict, cannot remove the default link name of the container")
}
parent = strings.TrimSuffix(parent, "/")
pe, err := daemon.containersReplica.Snapshot().GetID(parent)
if err != nil {
return fmt.Errorf("Cannot get parent %s for link name %s", parent, name)
}
daemon.releaseName(name)
parentContainer, _ := daemon.GetContainer(pe)
if parentContainer != nil {
daemon.linkIndex.unlink(name, container, parentContainer)
if err := daemon.updateNetwork(cfg, parentContainer); err != nil {
log.G(context.TODO()).Debugf("Could not update network to remove link %s: %v", n, err)
}
}
return nil
}
// cleanupContainer unregisters a container from the daemon, stops stats
// collection and cleanly removes contents and metadata from the filesystem.
func (daemon *Daemon) cleanupContainer(container *container.Container, config backend.ContainerRmConfig) error {
if container.IsRunning() {
if !config.ForceRemove {
if state := container.StateString(); state == "paused" {
return errdefs.Conflict(fmt.Errorf("cannot remove container %q: container is %s and must be unpaused first", container.Name, state))
} else {
return errdefs.Conflict(fmt.Errorf("cannot remove container %q: container is %s: stop the container before removing or force remove", container.Name, state))
}
}
daemon: cleanupContainer: don't fail if container is already stopped Saw this failure in a flaky test, and I wondered why we consider this an error condition; === RUN TestKillWithStopSignalAndRestartPolicies main_test.go:32: assertion failed: error is not nil: Error response from daemon: Could not kill running container 668f62511f4aa62357269cd405cff1fbe295b7f6d5011e7cfed434e3072330b7, cannot remove - Container 668f62511f4aa62357269cd405cff1fbe295b7f6d5011e7cfed434e3072330b7 is not running: failed to remove 668f62511f4aa62357269cd405cff1fbe295b7f6d5011e7cfed434e3072330b7 --- FAIL: TestKillWithStopSignalAndRestartPolicies (0.84s) === RUN TestKillWithStopSignalAndRestartPolicies/same-signal-disables-restart-policy --- PASS: TestKillWithStopSignalAndRestartPolicies/same-signal-disables-restart-policy (0.42s) === RUN TestKillWithStopSignalAndRestartPolicies/different-signal-keep-restart-policy --- PASS: TestKillWithStopSignalAndRestartPolicies/different-signal-keep-restart-policy (0.23s) In the above; 1. `Error response from daemon: Could not kill running container 668f62511f4aa62357269cd405cff1fbe295b7f6d5011e7cfed434e3072330b7` 2. `cannot remove - Container 668f62511f4aa62357269cd405cff1fbe295b7f6d5011e7cfed434e3072330b7 is not running` 3. `failed to remove 668f62511f4aa62357269cd405cff1fbe295b7f6d5011e7cfed434e3072330b7` So it looks like the removal fails because we couldn't kill the container because it was already stopped, which may be a race condition where the first check shows the container to be running (but may already be in process to be removed or killed. In either case, we probably shouldn't fail the removal if the container is already stopped. This patch adds a `isNotRunning()` utility, so that we can ignore this case, and proceed with the removal. Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2023-08-13 19:37:24 +00:00
if err := daemon.Kill(container); err != nil && !isNotRunning(err) {
return fmt.Errorf("cannot remove container %q: could not kill: %w", container.Name, err)
}
}
// stop collection of stats for the container regardless
// if stats are currently getting collected.
daemon.statsCollector.StopCollection(container)
// stopTimeout is the number of seconds to wait for the container to stop
// gracefully before forcibly killing it.
//
// Why 3 seconds? The timeout specified here was originally added in commit
// 1615bb08c7c3fc6c4b22db0a633edda516f97cf0, which added a custom timeout to
// some commands, but lacking an option for a timeout on "docker rm", was
// hardcoded to 10 seconds. Commit 28fd289b448164b77affd8103c0d96fd8110daf9
// later on updated this to 3 seconds (but no background on that change).
//
// If you arrived here and know the answer, you earned yourself a picture
// of a cute animal of your own choosing.
stopTimeout := 3
if err := daemon.containerStop(context.TODO(), container, containertypes.StopOptions{Timeout: &stopTimeout}); err != nil {
return err
}
// Mark container dead. We don't want anybody to be restarting it.
container.Lock()
container.Dead = true
// Save container state to disk. So that if error happens before
// container meta file got removed from disk, then a restart of
// docker should not make a dead container alive.
if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) {
log.G(context.TODO()).Errorf("Error saving dying container to disk: %v", err)
}
container.Unlock()
// When container creation fails and `RWLayer` has not been created yet, we
// do not call `ReleaseRWLayer`
if container.RWLayer != nil {
if err := daemon.imageService.ReleaseLayer(container.RWLayer); err != nil {
err = errors.Wrapf(err, "container %s", container.ID)
container.SetRemovalError(err)
return err
}
container.RWLayer = nil
} else {
if daemon.UsesSnapshotter() {
ls := daemon.containerdClient.LeasesService()
lease := leases.Lease{
ID: container.ID,
}
if err := ls.Delete(context.Background(), lease, leases.SynchronousDelete); err != nil {
if !cerrdefs.IsNotFound(err) {
container.SetRemovalError(err)
return err
}
}
}
}
// Hold the container lock while deleting the container root directory
// so that other goroutines don't attempt to concurrently open files
// within it. Having any file open on Windows (without the
// FILE_SHARE_DELETE flag) will block it from being deleted.
container.Lock()
err := containerfs.EnsureRemoveAll(container.Root)
container.Unlock()
if err != nil {
err = errors.Wrapf(err, "unable to remove filesystem for %s", container.ID)
container.SetRemovalError(err)
return err
}
linkNames := daemon.linkIndex.delete(container)
selinux.ReleaseLabel(container.ProcessLabel)
daemon.containers.Delete(container.ID)
daemon.containersReplica.Delete(container)
if err := daemon.removeMountPoints(container, config.RemoveVolume); err != nil {
log.G(context.TODO()).Error(err)
}
for _, name := range linkNames {
daemon.releaseName(name)
}
container.SetRemoved()
stateCtr.del(container.ID)
daemon.LogContainerEvent(container, events.ActionDestroy)
return nil
}