moby/daemon/delete.go
Fabio Kung edad52707c save deep copies of Container in the replica store
Reuse existing structures and rely on json serialization to deep copy
Container objects.

Also consolidate all "save" operations on container.CheckpointTo, which
now both saves a serialized json to disk, and replicates state to the
ACID in-memory store.

Signed-off-by: Fabio Kung <fabio.kung@gmail.com>
2017-06-23 07:52:33 -07:00

172 lines
5.5 KiB
Go

package daemon
import (
"fmt"
"os"
"path"
"strings"
"time"
"github.com/Sirupsen/logrus"
apierrors "github.com/docker/docker/api/errors"
"github.com/docker/docker/api/types"
"github.com/docker/docker/container"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/system"
volumestore "github.com/docker/docker/volume/store"
"github.com/pkg/errors"
)
// ContainerRm removes the container id from the filesystem. An error
// is returned if the container is not found, or if the remove
// fails. If the remove succeeds, the container name is released, and
// network links are removed.
func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error {
start := time.Now()
container, err := daemon.GetContainer(name)
if err != nil {
return err
}
// Container state RemovalInProgress should be used to avoid races.
if inProgress := container.SetRemovalInProgress(); inProgress {
err := fmt.Errorf("removal of container %s is already in progress", name)
return apierrors.NewBadRequestError(err)
}
defer container.ResetRemovalInProgress()
// check if container wasn't deregistered by previous rm since Get
if c := daemon.containers.Get(container.ID); c == nil {
return nil
}
if config.RemoveLink {
return daemon.rmLink(container, name)
}
err = daemon.cleanupContainer(container, config.ForceRemove, config.RemoveVolume)
containerActions.WithValues("delete").UpdateSince(start)
return err
}
func (daemon *Daemon) rmLink(container *container.Container, name string) error {
if name[0] != '/' {
name = "/" + name
}
parent, n := path.Split(name)
if parent == "/" {
return fmt.Errorf("Conflict, cannot remove the default name of the container")
}
parent = strings.TrimSuffix(parent, "/")
pe, err := daemon.nameIndex.Get(parent)
if err != nil {
return fmt.Errorf("Cannot get parent %s for name %s", parent, name)
}
daemon.releaseName(name)
parentContainer, _ := daemon.GetContainer(pe)
if parentContainer != nil {
daemon.linkIndex.unlink(name, container, parentContainer)
if err := daemon.updateNetwork(parentContainer); err != nil {
logrus.Debugf("Could not update network to remove link %s: %v", n, err)
}
}
return nil
}
// cleanupContainer unregisters a container from the daemon, stops stats
// collection and cleanly removes contents and metadata from the filesystem.
func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove, removeVolume bool) (err error) {
if container.IsRunning() {
if !forceRemove {
state := container.StateString()
procedure := "Stop the container before attempting removal or force remove"
if state == "paused" {
procedure = "Unpause and then " + strings.ToLower(procedure)
}
err := fmt.Errorf("You cannot remove a %s container %s. %s", state, container.ID, procedure)
return apierrors.NewRequestConflictError(err)
}
if err := daemon.Kill(container); err != nil {
return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err)
}
}
// stop collection of stats for the container regardless
// if stats are currently getting collected.
daemon.statsCollector.StopCollection(container)
if err = daemon.containerStop(container, 3); err != nil {
return err
}
// Mark container dead. We don't want anybody to be restarting it.
container.Lock()
container.Dead = true
// Save container state to disk. So that if error happens before
// container meta file got removed from disk, then a restart of
// docker should not make a dead container alive.
if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) {
logrus.Errorf("Error saving dying container to disk: %v", err)
}
container.Unlock()
// When container creation fails and `RWLayer` has not been created yet, we
// do not call `ReleaseRWLayer`
if container.RWLayer != nil {
metadata, err := daemon.stores[container.Platform].layerStore.ReleaseRWLayer(container.RWLayer)
layer.LogReleaseMetadata(metadata)
if err != nil && err != layer.ErrMountDoesNotExist {
return errors.Wrapf(err, "driver %q failed to remove root filesystem for %s", daemon.GraphDriverName(container.Platform), container.ID)
}
}
if err := system.EnsureRemoveAll(container.Root); err != nil {
return errors.Wrapf(err, "unable to remove filesystem for %s", container.ID)
}
daemon.nameIndex.Delete(container.ID)
daemon.linkIndex.delete(container)
selinuxFreeLxcContexts(container.ProcessLabel)
daemon.idIndex.Delete(container.ID)
daemon.containers.Delete(container.ID)
daemon.containersReplica.Delete(container)
if e := daemon.removeMountPoints(container, removeVolume); e != nil {
logrus.Error(e)
}
container.SetRemoved()
stateCtr.del(container.ID)
daemon.LogContainerEvent(container, "destroy")
return nil
}
// VolumeRm removes the volume with the given name.
// If the volume is referenced by a container it is not removed
// This is called directly from the Engine API
func (daemon *Daemon) VolumeRm(name string, force bool) error {
err := daemon.volumeRm(name)
if err != nil && volumestore.IsInUse(err) {
return apierrors.NewRequestConflictError(err)
}
if err == nil || force {
daemon.volumes.Purge(name)
return nil
}
return err
}
func (daemon *Daemon) volumeRm(name string) error {
v, err := daemon.volumes.Get(name)
if err != nil {
return err
}
if err := daemon.volumes.Remove(v); err != nil {
return errors.Wrap(err, "unable to remove volume")
}
daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()})
return nil
}