moby/daemon/update.go
Cory Snider 4bafaa00aa Refactor libcontainerd to minimize c8d RPCs
The containerd client is very chatty at the best of times. Because the
libcontained API is stateless and references containers and processes by
string ID for every method call, the implementation is essentially
forced to use the containerd client in a way which amplifies the number
of redundant RPCs invoked to perform any operation. The libcontainerd
remote implementation has to reload the containerd container, task
and/or process metadata for nearly every operation. This in turn
amplifies the number of context switches between dockerd and containerd
to perform any container operation or handle a containerd event,
increasing the load on the system which could otherwise be allocated to
workloads.

Overhaul the libcontainerd interface to reduce the impedance mismatch
with the containerd client so that the containerd client can be used
more efficiently. Split the API out into container, task and process
interfaces which the consumer is expected to retain so that
libcontainerd can retain state---especially the analogous containerd
client objects---without having to manage any state-store inside the
libcontainerd client.

Signed-off-by: Cory Snider <csnider@mirantis.com>
2022-08-24 14:59:08 -04:00

105 lines
2.7 KiB
Go

package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/errdefs"
"github.com/pkg/errors"
)
// ContainerUpdate updates configuration of the container
func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) {
var warnings []string
warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true)
if err != nil {
return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err)
}
if err := daemon.update(name, hostConfig); err != nil {
return container.ContainerUpdateOKBody{Warnings: warnings}, err
}
return container.ContainerUpdateOKBody{Warnings: warnings}, nil
}
func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error {
if hostConfig == nil {
return nil
}
ctr, err := daemon.GetContainer(name)
if err != nil {
return err
}
restoreConfig := false
backupHostConfig := *ctr.HostConfig
defer func() {
if restoreConfig {
ctr.Lock()
if !ctr.RemovalInProgress && !ctr.Dead {
ctr.HostConfig = &backupHostConfig
ctr.CheckpointTo(daemon.containersReplica)
}
ctr.Unlock()
}
}()
ctr.Lock()
if ctr.RemovalInProgress || ctr.Dead {
ctr.Unlock()
return errCannotUpdate(ctr.ID, fmt.Errorf("container is marked for removal and cannot be \"update\""))
}
if err := ctr.UpdateContainer(hostConfig); err != nil {
restoreConfig = true
ctr.Unlock()
return errCannotUpdate(ctr.ID, err)
}
if err := ctr.CheckpointTo(daemon.containersReplica); err != nil {
restoreConfig = true
ctr.Unlock()
return errCannotUpdate(ctr.ID, err)
}
ctr.Unlock()
// if Restart Policy changed, we need to update container monitor
if hostConfig.RestartPolicy.Name != "" {
ctr.UpdateMonitor(hostConfig.RestartPolicy)
}
defer daemon.LogContainerEvent(ctr, "update")
// If container is not running, update hostConfig struct is enough,
// resources will be updated when the container is started again.
// If container is running (including paused), we need to update configs
// to the real world.
ctr.Lock()
isRestarting := ctr.Restarting
tsk, err := ctr.GetRunningTask()
ctr.Unlock()
if errdefs.IsConflict(err) || isRestarting {
return nil
}
if err != nil {
return err
}
if err := tsk.UpdateResources(context.TODO(), toContainerdResources(hostConfig.Resources)); err != nil {
restoreConfig = true
// TODO: it would be nice if containerd responded with better errors here so we can classify this better.
return errCannotUpdate(ctr.ID, errdefs.System(err))
}
return nil
}
func errCannotUpdate(containerID string, err error) error {
return errors.Wrap(err, "Cannot update container "+containerID)
}