daemon: rename variables that collide with imported package names
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
797ec8e913
commit
eb14d936bf
28 changed files with 306 additions and 305 deletions
|
@ -49,17 +49,17 @@ func archivePath(i interface{}, src string, opts *archive.TarOptions, root strin
|
||||||
// ContainerCopy performs a deprecated operation of archiving the resource at
|
// ContainerCopy performs a deprecated operation of archiving the resource at
|
||||||
// the specified path in the container identified by the given name.
|
// the specified path in the container identified by the given name.
|
||||||
func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
|
func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
|
||||||
container, err := daemon.GetContainer(name)
|
ctr, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure an online file-system operation is permitted.
|
// Make sure an online file-system operation is permitted.
|
||||||
if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
|
if err := daemon.isOnlineFSOperationPermitted(ctr); err != nil {
|
||||||
return nil, errdefs.System(err)
|
return nil, errdefs.System(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := daemon.containerCopy(container, res)
|
data, err := daemon.containerCopy(ctr, res)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return data, nil
|
return data, nil
|
||||||
}
|
}
|
||||||
|
@ -73,17 +73,17 @@ func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, err
|
||||||
// ContainerStatPath stats the filesystem resource at the specified path in the
|
// ContainerStatPath stats the filesystem resource at the specified path in the
|
||||||
// container identified by the given name.
|
// container identified by the given name.
|
||||||
func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) {
|
func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) {
|
||||||
container, err := daemon.GetContainer(name)
|
ctr, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure an online file-system operation is permitted.
|
// Make sure an online file-system operation is permitted.
|
||||||
if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
|
if err := daemon.isOnlineFSOperationPermitted(ctr); err != nil {
|
||||||
return nil, errdefs.System(err)
|
return nil, errdefs.System(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stat, err = daemon.containerStatPath(container, path)
|
stat, err = daemon.containerStatPath(ctr, path)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return stat, nil
|
return stat, nil
|
||||||
}
|
}
|
||||||
|
@ -98,17 +98,17 @@ func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.C
|
||||||
// specified path in the container identified by the given name. Returns a
|
// specified path in the container identified by the given name. Returns a
|
||||||
// tar archive of the resource and whether it was a directory or a single file.
|
// tar archive of the resource and whether it was a directory or a single file.
|
||||||
func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
|
func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
|
||||||
container, err := daemon.GetContainer(name)
|
ctr, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure an online file-system operation is permitted.
|
// Make sure an online file-system operation is permitted.
|
||||||
if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
|
if err := daemon.isOnlineFSOperationPermitted(ctr); err != nil {
|
||||||
return nil, nil, errdefs.System(err)
|
return nil, nil, errdefs.System(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
content, stat, err = daemon.containerArchivePath(container, path)
|
content, stat, err = daemon.containerArchivePath(ctr, path)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return content, stat, nil
|
return content, stat, nil
|
||||||
}
|
}
|
||||||
|
@ -126,17 +126,17 @@ func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io
|
||||||
// be an error if unpacking the given content would cause an existing directory
|
// be an error if unpacking the given content would cause an existing directory
|
||||||
// to be replaced with a non-directory and vice versa.
|
// to be replaced with a non-directory and vice versa.
|
||||||
func (daemon *Daemon) ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error {
|
func (daemon *Daemon) ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error {
|
||||||
container, err := daemon.GetContainer(name)
|
ctr, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure an online file-system operation is permitted.
|
// Make sure an online file-system operation is permitted.
|
||||||
if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
|
if err := daemon.isOnlineFSOperationPermitted(ctr); err != nil {
|
||||||
return errdefs.System(err)
|
return errdefs.System(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = daemon.containerExtractToDir(container, path, copyUIDGID, noOverwriteDirNonDir, content)
|
err = daemon.containerExtractToDir(ctr, path, copyUIDGID, noOverwriteDirNonDir, content)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -433,7 +433,7 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str
|
||||||
basePath = d
|
basePath = d
|
||||||
filter = []string{f}
|
filter = []string{f}
|
||||||
}
|
}
|
||||||
archive, err := archivePath(driver, basePath, &archive.TarOptions{
|
archv, err := archivePath(driver, basePath, &archive.TarOptions{
|
||||||
Compression: archive.Uncompressed,
|
Compression: archive.Uncompressed,
|
||||||
IncludeFiles: filter,
|
IncludeFiles: filter,
|
||||||
}, container.BaseFS.Path())
|
}, container.BaseFS.Path())
|
||||||
|
@ -441,8 +441,8 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
reader := ioutils.NewReadCloserWrapper(archive, func() error {
|
reader := ioutils.NewReadCloserWrapper(archv, func() error {
|
||||||
err := archive.Close()
|
err := archv.Close()
|
||||||
container.DetachAndUnmount(daemon.LogVolumeEvent)
|
container.DetachAndUnmount(daemon.LogVolumeEvent)
|
||||||
daemon.Unmount(container)
|
daemon.Unmount(container)
|
||||||
container.Unlock()
|
container.Unlock()
|
||||||
|
|
|
@ -27,15 +27,15 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
container, err := daemon.GetContainer(prefixOrName)
|
ctr, err := daemon.GetContainer(prefixOrName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if container.IsPaused() {
|
if ctr.IsPaused() {
|
||||||
err := fmt.Errorf("container %s is paused, unpause the container before attach", prefixOrName)
|
err := fmt.Errorf("container %s is paused, unpause the container before attach", prefixOrName)
|
||||||
return errdefs.Conflict(err)
|
return errdefs.Conflict(err)
|
||||||
}
|
}
|
||||||
if container.IsRestarting() {
|
if ctr.IsRestarting() {
|
||||||
err := fmt.Errorf("container %s is restarting, wait until the container is running", prefixOrName)
|
err := fmt.Errorf("container %s is restarting, wait until the container is running", prefixOrName)
|
||||||
return errdefs.Conflict(err)
|
return errdefs.Conflict(err)
|
||||||
}
|
}
|
||||||
|
@ -44,11 +44,11 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA
|
||||||
UseStdin: c.UseStdin,
|
UseStdin: c.UseStdin,
|
||||||
UseStdout: c.UseStdout,
|
UseStdout: c.UseStdout,
|
||||||
UseStderr: c.UseStderr,
|
UseStderr: c.UseStderr,
|
||||||
TTY: container.Config.Tty,
|
TTY: ctr.Config.Tty,
|
||||||
CloseStdin: container.Config.StdinOnce,
|
CloseStdin: ctr.Config.StdinOnce,
|
||||||
DetachKeys: keys,
|
DetachKeys: keys,
|
||||||
}
|
}
|
||||||
container.StreamConfig.AttachStreams(&cfg)
|
ctr.StreamConfig.AttachStreams(&cfg)
|
||||||
|
|
||||||
inStream, outStream, errStream, err := c.GetStreams()
|
inStream, outStream, errStream, err := c.GetStreams()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -56,7 +56,7 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA
|
||||||
}
|
}
|
||||||
defer inStream.Close()
|
defer inStream.Close()
|
||||||
|
|
||||||
if !container.Config.Tty && c.MuxStreams {
|
if !ctr.Config.Tty && c.MuxStreams {
|
||||||
errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr)
|
errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr)
|
||||||
outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
|
outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
|
||||||
}
|
}
|
||||||
|
@ -71,7 +71,7 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA
|
||||||
cfg.Stderr = errStream
|
cfg.Stderr = errStream
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := daemon.containerAttach(container, &cfg, c.Logs, c.Stream); err != nil {
|
if err := daemon.containerAttach(ctr, &cfg, c.Logs, c.Stream); err != nil {
|
||||||
fmt.Fprintf(outStream, "Error attaching: %s\n", err)
|
fmt.Fprintf(outStream, "Error attaching: %s\n", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -79,7 +79,7 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA
|
||||||
|
|
||||||
// ContainerAttachRaw attaches the provided streams to the container's stdio
|
// ContainerAttachRaw attaches the provided streams to the container's stdio
|
||||||
func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, doStream bool, attached chan struct{}) error {
|
func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, doStream bool, attached chan struct{}) error {
|
||||||
container, err := daemon.GetContainer(prefixOrName)
|
ctr, err := daemon.GetContainer(prefixOrName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -87,10 +87,10 @@ func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadClose
|
||||||
UseStdin: stdin != nil,
|
UseStdin: stdin != nil,
|
||||||
UseStdout: stdout != nil,
|
UseStdout: stdout != nil,
|
||||||
UseStderr: stderr != nil,
|
UseStderr: stderr != nil,
|
||||||
TTY: container.Config.Tty,
|
TTY: ctr.Config.Tty,
|
||||||
CloseStdin: container.Config.StdinOnce,
|
CloseStdin: ctr.Config.StdinOnce,
|
||||||
}
|
}
|
||||||
container.StreamConfig.AttachStreams(&cfg)
|
ctr.StreamConfig.AttachStreams(&cfg)
|
||||||
close(attached)
|
close(attached)
|
||||||
if cfg.UseStdin {
|
if cfg.UseStdin {
|
||||||
cfg.Stdin = stdin
|
cfg.Stdin = stdin
|
||||||
|
@ -102,7 +102,7 @@ func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadClose
|
||||||
cfg.Stderr = stderr
|
cfg.Stderr = stderr
|
||||||
}
|
}
|
||||||
|
|
||||||
return daemon.containerAttach(container, &cfg, false, doStream)
|
return daemon.containerAttach(ctr, &cfg, false, doStream)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.AttachConfig, logs, doStream bool) error {
|
func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.AttachConfig, logs, doStream bool) error {
|
||||||
|
|
|
@ -92,20 +92,20 @@ func (daemon *Daemon) containerRoot(id string) string {
|
||||||
// Load reads the contents of a container from disk
|
// Load reads the contents of a container from disk
|
||||||
// This is typically done at startup.
|
// This is typically done at startup.
|
||||||
func (daemon *Daemon) load(id string) (*container.Container, error) {
|
func (daemon *Daemon) load(id string) (*container.Container, error) {
|
||||||
container := daemon.newBaseContainer(id)
|
ctr := daemon.newBaseContainer(id)
|
||||||
|
|
||||||
if err := container.FromDisk(); err != nil {
|
if err := ctr.FromDisk(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := label.ReserveLabel(container.ProcessLabel); err != nil {
|
if err := label.ReserveLabel(ctr.ProcessLabel); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if container.ID != id {
|
if ctr.ID != id {
|
||||||
return container, fmt.Errorf("Container %s is stored at %s", container.ID, id)
|
return ctr, fmt.Errorf("Container %s is stored at %s", ctr.ID, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
return container, nil
|
return ctr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register makes a container object usable by the daemon as <container.ID>
|
// Register makes a container object usable by the daemon as <container.ID>
|
||||||
|
|
|
@ -342,12 +342,12 @@ func (daemon *Daemon) updateNetwork(container *container.Container) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
options, err := daemon.buildSandboxOptions(container)
|
sbOptions, err := daemon.buildSandboxOptions(container)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Update network failed: %v", err)
|
return fmt.Errorf("Update network failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := sb.Refresh(options...); err != nil {
|
if err := sb.Refresh(sbOptions...); err != nil {
|
||||||
return fmt.Errorf("Update network failed: Failure in refresh sandbox %s: %v", sid, err)
|
return fmt.Errorf("Update network failed: Failure in refresh sandbox %s: %v", sid, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -378,7 +378,7 @@ func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrN
|
||||||
if container.NetworkSettings.Networks != nil {
|
if container.NetworkSettings.Networks != nil {
|
||||||
networkName := n.Name()
|
networkName := n.Name()
|
||||||
containerName := strings.TrimPrefix(container.Name, "/")
|
containerName := strings.TrimPrefix(container.Name, "/")
|
||||||
if network, ok := container.NetworkSettings.Networks[networkName]; ok && network.EndpointID != "" {
|
if nw, ok := container.NetworkSettings.Networks[networkName]; ok && nw.EndpointID != "" {
|
||||||
err := fmt.Errorf("%s is already attached to network %s", containerName, networkName)
|
err := fmt.Errorf("%s is already attached to network %s", containerName, networkName)
|
||||||
return n, nil, errdefs.Conflict(err)
|
return n, nil, errdefs.Conflict(err)
|
||||||
}
|
}
|
||||||
|
@ -584,11 +584,11 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) error {
|
||||||
// create its network sandbox now if not present
|
// create its network sandbox now if not present
|
||||||
if len(networks) == 0 {
|
if len(networks) == 0 {
|
||||||
if nil == daemon.getNetworkSandbox(container) {
|
if nil == daemon.getNetworkSandbox(container) {
|
||||||
options, err := daemon.buildSandboxOptions(container)
|
sbOptions, err := daemon.buildSandboxOptions(container)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sb, err := daemon.netController.NewSandbox(container.ID, options...)
|
sb, err := daemon.netController.NewSandbox(container.ID, sbOptions...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -802,11 +802,11 @@ func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName
|
||||||
}
|
}
|
||||||
|
|
||||||
if sb == nil {
|
if sb == nil {
|
||||||
options, err := daemon.buildSandboxOptions(container)
|
sbOptions, err := daemon.buildSandboxOptions(container)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sb, err = controller.NewSandbox(container.ID, options...)
|
sb, err = controller.NewSandbox(container.ID, sbOptions...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1135,11 +1135,11 @@ func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, netw
|
||||||
|
|
||||||
// ActivateContainerServiceBinding puts this container into load balancer active rotation and DNS response
|
// ActivateContainerServiceBinding puts this container into load balancer active rotation and DNS response
|
||||||
func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) error {
|
func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) error {
|
||||||
container, err := daemon.GetContainer(containerName)
|
ctr, err := daemon.GetContainer(containerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sb := daemon.getNetworkSandbox(container)
|
sb := daemon.getNetworkSandbox(ctr)
|
||||||
if sb == nil {
|
if sb == nil {
|
||||||
return fmt.Errorf("network sandbox does not exist for container %s", containerName)
|
return fmt.Errorf("network sandbox does not exist for container %s", containerName)
|
||||||
}
|
}
|
||||||
|
@ -1148,11 +1148,11 @@ func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) erro
|
||||||
|
|
||||||
// DeactivateContainerServiceBinding removes this container from load balancer active rotation, and DNS response
|
// DeactivateContainerServiceBinding removes this container from load balancer active rotation, and DNS response
|
||||||
func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) error {
|
func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) error {
|
||||||
container, err := daemon.GetContainer(containerName)
|
ctr, err := daemon.GetContainer(containerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sb := daemon.getNetworkSandbox(container)
|
sb := daemon.getNetworkSandbox(ctr)
|
||||||
if sb == nil {
|
if sb == nil {
|
||||||
// If the network sandbox is not found, then there is nothing to deactivate
|
// If the network sandbox is not found, then there is nothing to deactivate
|
||||||
logrus.Debugf("Could not find network sandbox for container %s on service binding deactivation request", containerName)
|
logrus.Debugf("Could not find network sandbox for container %s on service binding deactivation request", containerName)
|
||||||
|
|
|
@ -61,33 +61,33 @@ func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]s
|
||||||
func (daemon *Daemon) getIpcContainer(id string) (*container.Container, error) {
|
func (daemon *Daemon) getIpcContainer(id string) (*container.Container, error) {
|
||||||
errMsg := "can't join IPC of container " + id
|
errMsg := "can't join IPC of container " + id
|
||||||
// Check the container exists
|
// Check the container exists
|
||||||
container, err := daemon.GetContainer(id)
|
ctr, err := daemon.GetContainer(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, errMsg)
|
return nil, errors.Wrap(err, errMsg)
|
||||||
}
|
}
|
||||||
// Check the container is running and not restarting
|
// Check the container is running and not restarting
|
||||||
if err := daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting); err != nil {
|
if err := daemon.checkContainer(ctr, containerIsRunning, containerIsNotRestarting); err != nil {
|
||||||
return nil, errors.Wrap(err, errMsg)
|
return nil, errors.Wrap(err, errMsg)
|
||||||
}
|
}
|
||||||
// Check the container ipc is shareable
|
// Check the container ipc is shareable
|
||||||
if st, err := os.Stat(container.ShmPath); err != nil || !st.IsDir() {
|
if st, err := os.Stat(ctr.ShmPath); err != nil || !st.IsDir() {
|
||||||
if err == nil || os.IsNotExist(err) {
|
if err == nil || os.IsNotExist(err) {
|
||||||
return nil, errors.New(errMsg + ": non-shareable IPC (hint: use IpcMode:shareable for the donor container)")
|
return nil, errors.New(errMsg + ": non-shareable IPC (hint: use IpcMode:shareable for the donor container)")
|
||||||
}
|
}
|
||||||
// stat() failed?
|
// stat() failed?
|
||||||
return nil, errors.Wrap(err, errMsg+": unexpected error from stat "+container.ShmPath)
|
return nil, errors.Wrap(err, errMsg+": unexpected error from stat "+ctr.ShmPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
return container, nil
|
return ctr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (daemon *Daemon) getPidContainer(container *container.Container) (*container.Container, error) {
|
func (daemon *Daemon) getPidContainer(ctr *container.Container) (*container.Container, error) {
|
||||||
containerID := container.HostConfig.PidMode.Container()
|
containerID := ctr.HostConfig.PidMode.Container()
|
||||||
container, err := daemon.GetContainer(containerID)
|
ctr, err := daemon.GetContainer(containerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "cannot join PID of a non running container: %s", containerID)
|
return nil, errors.Wrapf(err, "cannot join PID of a non running container: %s", containerID)
|
||||||
}
|
}
|
||||||
return container, daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting)
|
return ctr, daemon.checkContainer(ctr, containerIsRunning, containerIsNotRestarting)
|
||||||
}
|
}
|
||||||
|
|
||||||
func containerIsRunning(c *container.Container) error {
|
func containerIsRunning(c *container.Container) error {
|
||||||
|
|
|
@ -90,7 +90,7 @@ func (daemon *Daemon) containerCreate(opts createOpts) (containertypes.Container
|
||||||
return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, errdefs.InvalidParameter(err)
|
return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
container, err := daemon.create(opts)
|
ctr, err := daemon.create(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err
|
return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err
|
||||||
}
|
}
|
||||||
|
@ -100,16 +100,16 @@ func (daemon *Daemon) containerCreate(opts createOpts) (containertypes.Container
|
||||||
warnings = make([]string, 0) // Create an empty slice to avoid https://github.com/moby/moby/issues/38222
|
warnings = make([]string, 0) // Create an empty slice to avoid https://github.com/moby/moby/issues/38222
|
||||||
}
|
}
|
||||||
|
|
||||||
return containertypes.ContainerCreateCreatedBody{ID: container.ID, Warnings: warnings}, nil
|
return containertypes.ContainerCreateCreatedBody{ID: ctr.ID, Warnings: warnings}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates a new container from the given configuration with a given name.
|
// Create creates a new container from the given configuration with a given name.
|
||||||
func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr error) {
|
func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr error) {
|
||||||
var (
|
var (
|
||||||
container *container.Container
|
ctr *container.Container
|
||||||
img *image.Image
|
img *image.Image
|
||||||
imgID image.ID
|
imgID image.ID
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
os := runtime.GOOS
|
os := runtime.GOOS
|
||||||
|
@ -153,22 +153,22 @@ func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr
|
||||||
return nil, errdefs.InvalidParameter(err)
|
return nil, errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if container, err = daemon.newContainer(opts.params.Name, os, opts.params.Config, opts.params.HostConfig, imgID, opts.managed); err != nil {
|
if ctr, err = daemon.newContainer(opts.params.Name, os, opts.params.Config, opts.params.HostConfig, imgID, opts.managed); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if retErr != nil {
|
if retErr != nil {
|
||||||
if err := daemon.cleanupContainer(container, true, true); err != nil {
|
if err := daemon.cleanupContainer(ctr, true, true); err != nil {
|
||||||
logrus.Errorf("failed to cleanup container on create error: %v", err)
|
logrus.Errorf("failed to cleanup container on create error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err := daemon.setSecurityOptions(container, opts.params.HostConfig); err != nil {
|
if err := daemon.setSecurityOptions(ctr, opts.params.HostConfig); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
container.HostConfig.StorageOpt = opts.params.HostConfig.StorageOpt
|
ctr.HostConfig.StorageOpt = opts.params.HostConfig.StorageOpt
|
||||||
|
|
||||||
// Fixes: https://github.com/moby/moby/issues/34074 and
|
// Fixes: https://github.com/moby/moby/issues/34074 and
|
||||||
// https://github.com/docker/for-win/issues/999.
|
// https://github.com/docker/for-win/issues/999.
|
||||||
|
@ -176,38 +176,38 @@ func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr
|
||||||
// do this on Windows as there's no effective sandbox size limit other than
|
// do this on Windows as there's no effective sandbox size limit other than
|
||||||
// physical on Linux.
|
// physical on Linux.
|
||||||
if isWindows {
|
if isWindows {
|
||||||
if container.HostConfig.StorageOpt == nil {
|
if ctr.HostConfig.StorageOpt == nil {
|
||||||
container.HostConfig.StorageOpt = make(map[string]string)
|
ctr.HostConfig.StorageOpt = make(map[string]string)
|
||||||
}
|
}
|
||||||
for _, v := range daemon.configStore.GraphOptions {
|
for _, v := range daemon.configStore.GraphOptions {
|
||||||
opt := strings.SplitN(v, "=", 2)
|
opt := strings.SplitN(v, "=", 2)
|
||||||
if _, ok := container.HostConfig.StorageOpt[opt[0]]; !ok {
|
if _, ok := ctr.HostConfig.StorageOpt[opt[0]]; !ok {
|
||||||
container.HostConfig.StorageOpt[opt[0]] = opt[1]
|
ctr.HostConfig.StorageOpt[opt[0]] = opt[1]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set RWLayer for container after mount labels have been set
|
// Set RWLayer for container after mount labels have been set
|
||||||
rwLayer, err := daemon.imageService.CreateLayer(container, setupInitLayer(daemon.idMapping))
|
rwLayer, err := daemon.imageService.CreateLayer(ctr, setupInitLayer(daemon.idMapping))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errdefs.System(err)
|
return nil, errdefs.System(err)
|
||||||
}
|
}
|
||||||
container.RWLayer = rwLayer
|
ctr.RWLayer = rwLayer
|
||||||
|
|
||||||
rootIDs := daemon.idMapping.RootPair()
|
rootIDs := daemon.idMapping.RootPair()
|
||||||
|
|
||||||
if err := idtools.MkdirAndChown(container.Root, 0700, rootIDs); err != nil {
|
if err := idtools.MkdirAndChown(ctr.Root, 0700, rootIDs); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := idtools.MkdirAndChown(container.CheckpointDir(), 0700, rootIDs); err != nil {
|
if err := idtools.MkdirAndChown(ctr.CheckpointDir(), 0700, rootIDs); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := daemon.setHostConfig(container, opts.params.HostConfig); err != nil {
|
if err := daemon.setHostConfig(ctr, opts.params.HostConfig); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := daemon.createContainerOSSpecificSettings(container, opts.params.Config, opts.params.HostConfig); err != nil {
|
if err := daemon.createContainerOSSpecificSettings(ctr, opts.params.Config, opts.params.HostConfig); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -217,15 +217,15 @@ func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr
|
||||||
}
|
}
|
||||||
// Make sure NetworkMode has an acceptable value. We do this to ensure
|
// Make sure NetworkMode has an acceptable value. We do this to ensure
|
||||||
// backwards API compatibility.
|
// backwards API compatibility.
|
||||||
runconfig.SetDefaultNetModeIfBlank(container.HostConfig)
|
runconfig.SetDefaultNetModeIfBlank(ctr.HostConfig)
|
||||||
|
|
||||||
daemon.updateContainerNetworkSettings(container, endpointsConfigs)
|
daemon.updateContainerNetworkSettings(ctr, endpointsConfigs)
|
||||||
if err := daemon.Register(container); err != nil {
|
if err := daemon.Register(ctr); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
stateCtr.set(container.ID, "stopped")
|
stateCtr.set(ctr.ID, "stopped")
|
||||||
daemon.LogContainerEvent(container, "create")
|
daemon.LogContainerEvent(ctr, "create")
|
||||||
return container, nil
|
return ctr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func toHostConfigSelinuxLabels(labels []string) []string {
|
func toHostConfigSelinuxLabels(labels []string) []string {
|
||||||
|
|
|
@ -993,8 +993,8 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
||||||
}
|
}
|
||||||
|
|
||||||
lgrMap := make(map[string]image.LayerGetReleaser)
|
lgrMap := make(map[string]image.LayerGetReleaser)
|
||||||
for os, ls := range layerStores {
|
for los, ls := range layerStores {
|
||||||
lgrMap[os] = ls
|
lgrMap[los] = ls
|
||||||
}
|
}
|
||||||
imageStore, err := image.NewImageStore(ifs, lgrMap)
|
imageStore, err := image.NewImageStore(ifs, lgrMap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -39,7 +39,7 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u
|
||||||
if daemon.root == "" {
|
if daemon.root == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var errors []string
|
var errs []string
|
||||||
|
|
||||||
regexps := getCleanPatterns(id)
|
regexps := getCleanPatterns(id)
|
||||||
sc := bufio.NewScanner(reader)
|
sc := bufio.NewScanner(reader)
|
||||||
|
@ -50,7 +50,7 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u
|
||||||
if p.MatchString(mnt) {
|
if p.MatchString(mnt) {
|
||||||
if err := unmount(mnt); err != nil {
|
if err := unmount(mnt); err != nil {
|
||||||
logrus.Error(err)
|
logrus.Error(err)
|
||||||
errors = append(errors, err.Error())
|
errs = append(errs, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -62,8 +62,8 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(errors) > 0 {
|
if len(errs) > 0 {
|
||||||
return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n"))
|
return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errs, "\n"))
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("Cleaning up old mountid %v: done.", id)
|
logrus.Debugf("Cleaning up old mountid %v: done.", id)
|
||||||
|
|
|
@ -82,15 +82,15 @@ func TestGetContainer(t *testing.T) {
|
||||||
daemon.reserveName(c4.ID, c4.Name)
|
daemon.reserveName(c4.ID, c4.Name)
|
||||||
daemon.reserveName(c5.ID, c5.Name)
|
daemon.reserveName(c5.ID, c5.Name)
|
||||||
|
|
||||||
if container, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
|
if ctr, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); ctr != c2 {
|
||||||
t.Fatal("Should explicitly match full container IDs")
|
t.Fatal("Should explicitly match full container IDs")
|
||||||
}
|
}
|
||||||
|
|
||||||
if container, _ := daemon.GetContainer("75fb0b8009"); container != c4 {
|
if ctr, _ := daemon.GetContainer("75fb0b8009"); ctr != c4 {
|
||||||
t.Fatal("Should match a partial ID")
|
t.Fatal("Should match a partial ID")
|
||||||
}
|
}
|
||||||
|
|
||||||
if container, _ := daemon.GetContainer("drunk_hawking"); container != c2 {
|
if ctr, _ := daemon.GetContainer("drunk_hawking"); ctr != c2 {
|
||||||
t.Fatal("Should match a full name")
|
t.Fatal("Should match a full name")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,7 +99,7 @@ func TestGetContainer(t *testing.T) {
|
||||||
t.Fatal("Should match a full name even though it collides with another container's ID")
|
t.Fatal("Should match a full name even though it collides with another container's ID")
|
||||||
}
|
}
|
||||||
|
|
||||||
if container, _ := daemon.GetContainer("d22d69a2b896"); container != c5 {
|
if ctr, _ := daemon.GetContainer("d22d69a2b896"); ctr != c5 {
|
||||||
t.Fatal("Should match a container where the provided prefix is an exact match to the its name, and is also a prefix for its ID")
|
t.Fatal("Should match a container where the provided prefix is an exact match to the its name, and is also a prefix for its ID")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,8 +176,8 @@ func TestContainerInitDNS(t *testing.T) {
|
||||||
"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}`
|
"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}`
|
||||||
|
|
||||||
// Container struct only used to retrieve path to config file
|
// Container struct only used to retrieve path to config file
|
||||||
container := &container.Container{Root: containerPath}
|
ctr := &container.Container{Root: containerPath}
|
||||||
configPath, err := container.ConfigPath()
|
configPath, err := ctr.ConfigPath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -190,7 +190,7 @@ func TestContainerInitDNS(t *testing.T) {
|
||||||
"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0},
|
"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0},
|
||||||
"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}`
|
"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}`
|
||||||
|
|
||||||
hostConfigPath, err := container.HostConfigPath()
|
hostConfigPath, err := ctr.HostConfigPath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -384,11 +384,11 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf
|
||||||
adaptSharedNamespaceContainer(daemon, hostConfig)
|
adaptSharedNamespaceContainer(daemon, hostConfig)
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
opts, err := daemon.generateSecurityOpt(hostConfig)
|
secOpts, err := daemon.generateSecurityOpt(hostConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, opts...)
|
hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, secOpts...)
|
||||||
if hostConfig.OomKillDisable == nil {
|
if hostConfig.OomKillDisable == nil {
|
||||||
defaultOomKillDisable := false
|
defaultOomKillDisable := false
|
||||||
hostConfig.OomKillDisable = &defaultOomKillDisable
|
hostConfig.OomKillDisable = &defaultOomKillDisable
|
||||||
|
@ -1310,7 +1310,7 @@ func setupDaemonRoot(config *config.Config, rootDir string, rootIdentity idtools
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupDaemonRootPropagation(cfg *config.Config) error {
|
func setupDaemonRootPropagation(cfg *config.Config) error {
|
||||||
rootParentMount, options, err := getSourceMount(cfg.Root)
|
rootParentMount, mountOptions, err := getSourceMount(cfg.Root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "error getting daemon root's parent mount")
|
return errors.Wrap(err, "error getting daemon root's parent mount")
|
||||||
}
|
}
|
||||||
|
@ -1326,7 +1326,7 @@ func setupDaemonRootPropagation(cfg *config.Config) error {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if hasMountInfoOption(options, sharedPropagationOption, slavePropagationOption) {
|
if hasMountInfoOption(mountOptions, sharedPropagationOption, slavePropagationOption) {
|
||||||
cleanupOldFile = true
|
cleanupOldFile = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1745,11 +1745,11 @@ func (daemon *Daemon) initCgroupsPath(path string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
path = filepath.Join(mnt, root, path)
|
path = filepath.Join(mnt, root, path)
|
||||||
sysinfo := sysinfo.New(true)
|
sysInfo := sysinfo.New(true)
|
||||||
if err := maybeCreateCPURealTimeFile(sysinfo.CPURealtimePeriod, daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
|
if err := maybeCreateCPURealTimeFile(sysInfo.CPURealtimePeriod, daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return maybeCreateCPURealTimeFile(sysinfo.CPURealtimeRuntime, daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path)
|
return maybeCreateCPURealTimeFile(sysInfo.CPURealtimeRuntime, daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
func maybeCreateCPURealTimeFile(sysinfoPresent bool, configValue int64, file string, path string) error {
|
func maybeCreateCPURealTimeFile(sysinfoPresent bool, configValue int64, file string, path string) error {
|
||||||
|
|
|
@ -24,11 +24,11 @@ type fakeContainerGetter struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeContainerGetter) GetContainer(cid string) (*container.Container, error) {
|
func (f *fakeContainerGetter) GetContainer(cid string) (*container.Container, error) {
|
||||||
container, ok := f.containers[cid]
|
ctr, ok := f.containers[cid]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("container not found")
|
return nil, errors.New("container not found")
|
||||||
}
|
}
|
||||||
return container, nil
|
return ctr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unix test as uses settings which are not available on Windows
|
// Unix test as uses settings which are not available on Windows
|
||||||
|
@ -138,85 +138,85 @@ func TestAdjustCPUSharesNoAdjustment(t *testing.T) {
|
||||||
|
|
||||||
// Unix test as uses settings which are not available on Windows
|
// Unix test as uses settings which are not available on Windows
|
||||||
func TestParseSecurityOptWithDeprecatedColon(t *testing.T) {
|
func TestParseSecurityOptWithDeprecatedColon(t *testing.T) {
|
||||||
container := &container.Container{}
|
ctr := &container.Container{}
|
||||||
config := &containertypes.HostConfig{}
|
cfg := &containertypes.HostConfig{}
|
||||||
|
|
||||||
// test apparmor
|
// test apparmor
|
||||||
config.SecurityOpt = []string{"apparmor=test_profile"}
|
cfg.SecurityOpt = []string{"apparmor=test_profile"}
|
||||||
if err := parseSecurityOpt(container, config); err != nil {
|
if err := parseSecurityOpt(ctr, cfg); err != nil {
|
||||||
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
||||||
}
|
}
|
||||||
if container.AppArmorProfile != "test_profile" {
|
if ctr.AppArmorProfile != "test_profile" {
|
||||||
t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile)
|
t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", ctr.AppArmorProfile)
|
||||||
}
|
}
|
||||||
|
|
||||||
// test seccomp
|
// test seccomp
|
||||||
sp := "/path/to/seccomp_test.json"
|
sp := "/path/to/seccomp_test.json"
|
||||||
config.SecurityOpt = []string{"seccomp=" + sp}
|
cfg.SecurityOpt = []string{"seccomp=" + sp}
|
||||||
if err := parseSecurityOpt(container, config); err != nil {
|
if err := parseSecurityOpt(ctr, cfg); err != nil {
|
||||||
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
||||||
}
|
}
|
||||||
if container.SeccompProfile != sp {
|
if ctr.SeccompProfile != sp {
|
||||||
t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, container.SeccompProfile)
|
t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, ctr.SeccompProfile)
|
||||||
}
|
}
|
||||||
|
|
||||||
// test valid label
|
// test valid label
|
||||||
config.SecurityOpt = []string{"label=user:USER"}
|
cfg.SecurityOpt = []string{"label=user:USER"}
|
||||||
if err := parseSecurityOpt(container, config); err != nil {
|
if err := parseSecurityOpt(ctr, cfg); err != nil {
|
||||||
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// test invalid label
|
// test invalid label
|
||||||
config.SecurityOpt = []string{"label"}
|
cfg.SecurityOpt = []string{"label"}
|
||||||
if err := parseSecurityOpt(container, config); err == nil {
|
if err := parseSecurityOpt(ctr, cfg); err == nil {
|
||||||
t.Fatal("Expected parseSecurityOpt error, got nil")
|
t.Fatal("Expected parseSecurityOpt error, got nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
// test invalid opt
|
// test invalid opt
|
||||||
config.SecurityOpt = []string{"test"}
|
cfg.SecurityOpt = []string{"test"}
|
||||||
if err := parseSecurityOpt(container, config); err == nil {
|
if err := parseSecurityOpt(ctr, cfg); err == nil {
|
||||||
t.Fatal("Expected parseSecurityOpt error, got nil")
|
t.Fatal("Expected parseSecurityOpt error, got nil")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseSecurityOpt(t *testing.T) {
|
func TestParseSecurityOpt(t *testing.T) {
|
||||||
container := &container.Container{}
|
ctr := &container.Container{}
|
||||||
config := &containertypes.HostConfig{}
|
cfg := &containertypes.HostConfig{}
|
||||||
|
|
||||||
// test apparmor
|
// test apparmor
|
||||||
config.SecurityOpt = []string{"apparmor=test_profile"}
|
cfg.SecurityOpt = []string{"apparmor=test_profile"}
|
||||||
if err := parseSecurityOpt(container, config); err != nil {
|
if err := parseSecurityOpt(ctr, cfg); err != nil {
|
||||||
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
||||||
}
|
}
|
||||||
if container.AppArmorProfile != "test_profile" {
|
if ctr.AppArmorProfile != "test_profile" {
|
||||||
t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile)
|
t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", ctr.AppArmorProfile)
|
||||||
}
|
}
|
||||||
|
|
||||||
// test seccomp
|
// test seccomp
|
||||||
sp := "/path/to/seccomp_test.json"
|
sp := "/path/to/seccomp_test.json"
|
||||||
config.SecurityOpt = []string{"seccomp=" + sp}
|
cfg.SecurityOpt = []string{"seccomp=" + sp}
|
||||||
if err := parseSecurityOpt(container, config); err != nil {
|
if err := parseSecurityOpt(ctr, cfg); err != nil {
|
||||||
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
||||||
}
|
}
|
||||||
if container.SeccompProfile != sp {
|
if ctr.SeccompProfile != sp {
|
||||||
t.Fatalf("Unexpected SeccompProfile, expected: %q, got %q", sp, container.SeccompProfile)
|
t.Fatalf("Unexpected SeccompProfile, expected: %q, got %q", sp, ctr.SeccompProfile)
|
||||||
}
|
}
|
||||||
|
|
||||||
// test valid label
|
// test valid label
|
||||||
config.SecurityOpt = []string{"label=user:USER"}
|
cfg.SecurityOpt = []string{"label=user:USER"}
|
||||||
if err := parseSecurityOpt(container, config); err != nil {
|
if err := parseSecurityOpt(ctr, cfg); err != nil {
|
||||||
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// test invalid label
|
// test invalid label
|
||||||
config.SecurityOpt = []string{"label"}
|
cfg.SecurityOpt = []string{"label"}
|
||||||
if err := parseSecurityOpt(container, config); err == nil {
|
if err := parseSecurityOpt(ctr, cfg); err == nil {
|
||||||
t.Fatal("Expected parseSecurityOpt error, got nil")
|
t.Fatal("Expected parseSecurityOpt error, got nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
// test invalid opt
|
// test invalid opt
|
||||||
config.SecurityOpt = []string{"test"}
|
cfg.SecurityOpt = []string{"test"}
|
||||||
if err := parseSecurityOpt(container, config); err == nil {
|
if err := parseSecurityOpt(ctr, cfg); err == nil {
|
||||||
t.Fatal("Expected parseSecurityOpt error, got nil")
|
t.Fatal("Expected parseSecurityOpt error, got nil")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -225,28 +225,28 @@ func TestParseNNPSecurityOptions(t *testing.T) {
|
||||||
daemon := &Daemon{
|
daemon := &Daemon{
|
||||||
configStore: &config.Config{NoNewPrivileges: true},
|
configStore: &config.Config{NoNewPrivileges: true},
|
||||||
}
|
}
|
||||||
container := &container.Container{}
|
ctr := &container.Container{}
|
||||||
config := &containertypes.HostConfig{}
|
cfg := &containertypes.HostConfig{}
|
||||||
|
|
||||||
// test NNP when "daemon:true" and "no-new-privileges=false""
|
// test NNP when "daemon:true" and "no-new-privileges=false""
|
||||||
config.SecurityOpt = []string{"no-new-privileges=false"}
|
cfg.SecurityOpt = []string{"no-new-privileges=false"}
|
||||||
|
|
||||||
if err := daemon.parseSecurityOpt(container, config); err != nil {
|
if err := daemon.parseSecurityOpt(ctr, cfg); err != nil {
|
||||||
t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
|
t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
|
||||||
}
|
}
|
||||||
if container.NoNewPrivileges {
|
if ctr.NoNewPrivileges {
|
||||||
t.Fatalf("container.NoNewPrivileges should be FALSE: %v", container.NoNewPrivileges)
|
t.Fatalf("container.NoNewPrivileges should be FALSE: %v", ctr.NoNewPrivileges)
|
||||||
}
|
}
|
||||||
|
|
||||||
// test NNP when "daemon:false" and "no-new-privileges=true""
|
// test NNP when "daemon:false" and "no-new-privileges=true""
|
||||||
daemon.configStore.NoNewPrivileges = false
|
daemon.configStore.NoNewPrivileges = false
|
||||||
config.SecurityOpt = []string{"no-new-privileges=true"}
|
cfg.SecurityOpt = []string{"no-new-privileges=true"}
|
||||||
|
|
||||||
if err := daemon.parseSecurityOpt(container, config); err != nil {
|
if err := daemon.parseSecurityOpt(ctr, cfg); err != nil {
|
||||||
t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
|
t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
|
||||||
}
|
}
|
||||||
if !container.NoNewPrivileges {
|
if !ctr.NoNewPrivileges {
|
||||||
t.Fatalf("container.NoNewPrivileges should be TRUE: %v", container.NoNewPrivileges)
|
t.Fatalf("container.NoNewPrivileges should be TRUE: %v", ctr.NoNewPrivileges)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ func TestLogContainerEventCopyLabels(t *testing.T) {
|
||||||
_, l, _ := e.Subscribe()
|
_, l, _ := e.Subscribe()
|
||||||
defer e.Evict(l)
|
defer e.Evict(l)
|
||||||
|
|
||||||
container := &container.Container{
|
ctr := &container.Container{
|
||||||
ID: "container_id",
|
ID: "container_id",
|
||||||
Name: "container_name",
|
Name: "container_name",
|
||||||
Config: &containertypes.Config{
|
Config: &containertypes.Config{
|
||||||
|
@ -29,10 +29,10 @@ func TestLogContainerEventCopyLabels(t *testing.T) {
|
||||||
daemon := &Daemon{
|
daemon := &Daemon{
|
||||||
EventsService: e,
|
EventsService: e,
|
||||||
}
|
}
|
||||||
daemon.LogContainerEvent(container, "create")
|
daemon.LogContainerEvent(ctr, "create")
|
||||||
|
|
||||||
if _, mutated := container.Config.Labels["image"]; mutated {
|
if _, mutated := ctr.Config.Labels["image"]; mutated {
|
||||||
t.Fatalf("Expected to not mutate the container labels, got %q", container.Config.Labels)
|
t.Fatalf("Expected to not mutate the container labels, got %q", ctr.Config.Labels)
|
||||||
}
|
}
|
||||||
|
|
||||||
validateTestAttributes(t, l, map[string]string{
|
validateTestAttributes(t, l, map[string]string{
|
||||||
|
@ -46,7 +46,7 @@ func TestLogContainerEventWithAttributes(t *testing.T) {
|
||||||
_, l, _ := e.Subscribe()
|
_, l, _ := e.Subscribe()
|
||||||
defer e.Evict(l)
|
defer e.Evict(l)
|
||||||
|
|
||||||
container := &container.Container{
|
ctr := &container.Container{
|
||||||
ID: "container_id",
|
ID: "container_id",
|
||||||
Name: "container_name",
|
Name: "container_name",
|
||||||
Config: &containertypes.Config{
|
Config: &containertypes.Config{
|
||||||
|
@ -63,7 +63,7 @@ func TestLogContainerEventWithAttributes(t *testing.T) {
|
||||||
"node": "2",
|
"node": "2",
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
}
|
}
|
||||||
daemon.LogContainerEventWithAttributes(container, "create", attributes)
|
daemon.LogContainerEventWithAttributes(ctr, "create", attributes)
|
||||||
|
|
||||||
validateTestAttributes(t, l, map[string]string{
|
validateTestAttributes(t, l, map[string]string{
|
||||||
"node": "1",
|
"node": "1",
|
||||||
|
|
|
@ -54,18 +54,18 @@ func (daemon *Daemon) getExecConfig(name string) (*exec.Config, error) {
|
||||||
// saying the container isn't running, we should return a 404 so that
|
// saying the container isn't running, we should return a 404 so that
|
||||||
// the user sees the same error now that they will after the
|
// the user sees the same error now that they will after the
|
||||||
// 5 minute clean-up loop is run which erases old/dead execs.
|
// 5 minute clean-up loop is run which erases old/dead execs.
|
||||||
container := daemon.containers.Get(ec.ContainerID)
|
ctr := daemon.containers.Get(ec.ContainerID)
|
||||||
if container == nil {
|
if ctr == nil {
|
||||||
return nil, containerNotFound(name)
|
return nil, containerNotFound(name)
|
||||||
}
|
}
|
||||||
if !container.IsRunning() {
|
if !ctr.IsRunning() {
|
||||||
return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String())
|
return nil, fmt.Errorf("Container %s is not running: %s", ctr.ID, ctr.State.String())
|
||||||
}
|
}
|
||||||
if container.IsPaused() {
|
if ctr.IsPaused() {
|
||||||
return nil, errExecPaused(container.ID)
|
return nil, errExecPaused(ctr.ID)
|
||||||
}
|
}
|
||||||
if container.IsRestarting() {
|
if ctr.IsRestarting() {
|
||||||
return nil, errContainerIsRestarting(container.ID)
|
return nil, errContainerIsRestarting(ctr.ID)
|
||||||
}
|
}
|
||||||
return ec, nil
|
return ec, nil
|
||||||
}
|
}
|
||||||
|
@ -76,21 +76,21 @@ func (daemon *Daemon) unregisterExecCommand(container *container.Container, exec
|
||||||
}
|
}
|
||||||
|
|
||||||
func (daemon *Daemon) getActiveContainer(name string) (*container.Container, error) {
|
func (daemon *Daemon) getActiveContainer(name string) (*container.Container, error) {
|
||||||
container, err := daemon.GetContainer(name)
|
ctr, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !container.IsRunning() {
|
if !ctr.IsRunning() {
|
||||||
return nil, errNotRunning(container.ID)
|
return nil, errNotRunning(ctr.ID)
|
||||||
}
|
}
|
||||||
if container.IsPaused() {
|
if ctr.IsPaused() {
|
||||||
return nil, errExecPaused(name)
|
return nil, errExecPaused(name)
|
||||||
}
|
}
|
||||||
if container.IsRestarting() {
|
if ctr.IsRestarting() {
|
||||||
return nil, errContainerIsRestarting(container.ID)
|
return nil, errContainerIsRestarting(ctr.ID)
|
||||||
}
|
}
|
||||||
return container, nil
|
return ctr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainerExecCreate sets up an exec in a running container.
|
// ContainerExecCreate sets up an exec in a running container.
|
||||||
|
@ -220,11 +220,11 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, stdin
|
||||||
|
|
||||||
p := &specs.Process{}
|
p := &specs.Process{}
|
||||||
if runtime.GOOS != "windows" {
|
if runtime.GOOS != "windows" {
|
||||||
container, err := daemon.containerdCli.LoadContainer(ctx, ec.ContainerID)
|
ctr, err := daemon.containerdCli.LoadContainer(ctx, ec.ContainerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
spec, err := container.Spec(ctx)
|
spec, err := ctr.Spec(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,26 +14,26 @@ import (
|
||||||
// ContainerExport writes the contents of the container to the given
|
// ContainerExport writes the contents of the container to the given
|
||||||
// writer. An error is returned if the container cannot be found.
|
// writer. An error is returned if the container cannot be found.
|
||||||
func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
|
func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
|
||||||
container, err := daemon.GetContainer(name)
|
ctr, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if isWindows && container.OS == "windows" {
|
if isWindows && ctr.OS == "windows" {
|
||||||
return fmt.Errorf("the daemon on this operating system does not support exporting Windows containers")
|
return fmt.Errorf("the daemon on this operating system does not support exporting Windows containers")
|
||||||
}
|
}
|
||||||
|
|
||||||
if container.IsDead() {
|
if ctr.IsDead() {
|
||||||
err := fmt.Errorf("You cannot export container %s which is Dead", container.ID)
|
err := fmt.Errorf("You cannot export container %s which is Dead", ctr.ID)
|
||||||
return errdefs.Conflict(err)
|
return errdefs.Conflict(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if container.IsRemovalInProgress() {
|
if ctr.IsRemovalInProgress() {
|
||||||
err := fmt.Errorf("You cannot export container %s which is being removed", container.ID)
|
err := fmt.Errorf("You cannot export container %s which is being removed", ctr.ID)
|
||||||
return errdefs.Conflict(err)
|
return errdefs.Conflict(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := daemon.containerExport(container)
|
data, err := daemon.containerExport(ctr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error exporting container %s: %v", name, err)
|
return fmt.Errorf("Error exporting container %s: %v", name, err)
|
||||||
}
|
}
|
||||||
|
@ -65,7 +65,7 @@ func (daemon *Daemon) containerExport(container *container.Container) (arch io.R
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
archive, err := archivePath(basefs, basefs.Path(), &archive.TarOptions{
|
archv, err := archivePath(basefs, basefs.Path(), &archive.TarOptions{
|
||||||
Compression: archive.Uncompressed,
|
Compression: archive.Uncompressed,
|
||||||
UIDMaps: daemon.idMapping.UIDs(),
|
UIDMaps: daemon.idMapping.UIDs(),
|
||||||
GIDMaps: daemon.idMapping.GIDs(),
|
GIDMaps: daemon.idMapping.GIDs(),
|
||||||
|
@ -74,8 +74,8 @@ func (daemon *Daemon) containerExport(container *container.Container) (arch io.R
|
||||||
rwlayer.Unmount()
|
rwlayer.Unmount()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
arch = ioutils.NewReadCloserWrapper(archive, func() error {
|
arch = ioutils.NewReadCloserWrapper(archv, func() error {
|
||||||
err := archive.Close()
|
err := archv.Close()
|
||||||
rwlayer.Unmount()
|
rwlayer.Unmount()
|
||||||
daemon.imageService.ReleaseLayer(rwlayer, container.OS)
|
daemon.imageService.ReleaseLayer(rwlayer, container.OS)
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -32,50 +32,50 @@ func (daemon *Daemon) ContainerInspect(name string, size bool, version string) (
|
||||||
// ContainerInspectCurrent returns low-level information about a
|
// ContainerInspectCurrent returns low-level information about a
|
||||||
// container in a most recent api version.
|
// container in a most recent api version.
|
||||||
func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) {
|
func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) {
|
||||||
container, err := daemon.GetContainer(name)
|
ctr, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
container.Lock()
|
ctr.Lock()
|
||||||
|
|
||||||
base, err := daemon.getInspectData(container)
|
base, err := daemon.getInspectData(ctr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
container.Unlock()
|
ctr.Unlock()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
apiNetworks := make(map[string]*networktypes.EndpointSettings)
|
apiNetworks := make(map[string]*networktypes.EndpointSettings)
|
||||||
for name, epConf := range container.NetworkSettings.Networks {
|
for name, epConf := range ctr.NetworkSettings.Networks {
|
||||||
if epConf.EndpointSettings != nil {
|
if epConf.EndpointSettings != nil {
|
||||||
// We must make a copy of this pointer object otherwise it can race with other operations
|
// We must make a copy of this pointer object otherwise it can race with other operations
|
||||||
apiNetworks[name] = epConf.EndpointSettings.Copy()
|
apiNetworks[name] = epConf.EndpointSettings.Copy()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mountPoints := container.GetMountPoints()
|
mountPoints := ctr.GetMountPoints()
|
||||||
networkSettings := &types.NetworkSettings{
|
networkSettings := &types.NetworkSettings{
|
||||||
NetworkSettingsBase: types.NetworkSettingsBase{
|
NetworkSettingsBase: types.NetworkSettingsBase{
|
||||||
Bridge: container.NetworkSettings.Bridge,
|
Bridge: ctr.NetworkSettings.Bridge,
|
||||||
SandboxID: container.NetworkSettings.SandboxID,
|
SandboxID: ctr.NetworkSettings.SandboxID,
|
||||||
HairpinMode: container.NetworkSettings.HairpinMode,
|
HairpinMode: ctr.NetworkSettings.HairpinMode,
|
||||||
LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address,
|
LinkLocalIPv6Address: ctr.NetworkSettings.LinkLocalIPv6Address,
|
||||||
LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen,
|
LinkLocalIPv6PrefixLen: ctr.NetworkSettings.LinkLocalIPv6PrefixLen,
|
||||||
SandboxKey: container.NetworkSettings.SandboxKey,
|
SandboxKey: ctr.NetworkSettings.SandboxKey,
|
||||||
SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses,
|
SecondaryIPAddresses: ctr.NetworkSettings.SecondaryIPAddresses,
|
||||||
SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses,
|
SecondaryIPv6Addresses: ctr.NetworkSettings.SecondaryIPv6Addresses,
|
||||||
},
|
},
|
||||||
DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks),
|
DefaultNetworkSettings: daemon.getDefaultNetworkSettings(ctr.NetworkSettings.Networks),
|
||||||
Networks: apiNetworks,
|
Networks: apiNetworks,
|
||||||
}
|
}
|
||||||
|
|
||||||
ports := make(nat.PortMap, len(container.NetworkSettings.Ports))
|
ports := make(nat.PortMap, len(ctr.NetworkSettings.Ports))
|
||||||
for k, pm := range container.NetworkSettings.Ports {
|
for k, pm := range ctr.NetworkSettings.Ports {
|
||||||
ports[k] = pm
|
ports[k] = pm
|
||||||
}
|
}
|
||||||
networkSettings.NetworkSettingsBase.Ports = ports
|
networkSettings.NetworkSettingsBase.Ports = ports
|
||||||
|
|
||||||
container.Unlock()
|
ctr.Unlock()
|
||||||
|
|
||||||
if size {
|
if size {
|
||||||
sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(base.ID)
|
sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(base.ID)
|
||||||
|
@ -86,7 +86,7 @@ func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.Co
|
||||||
return &types.ContainerJSON{
|
return &types.ContainerJSON{
|
||||||
ContainerJSONBase: base,
|
ContainerJSONBase: base,
|
||||||
Mounts: mountPoints,
|
Mounts: mountPoints,
|
||||||
Config: container.Config,
|
Config: ctr.Config,
|
||||||
NetworkSettings: networkSettings,
|
NetworkSettings: networkSettings,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -214,7 +214,7 @@ func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, err
|
||||||
return nil, errExecNotFound(id)
|
return nil, errExecNotFound(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
if container := daemon.containers.Get(e.ContainerID); container == nil {
|
if ctr := daemon.containers.Get(e.ContainerID); ctr == nil {
|
||||||
return nil, errExecNotFound(id)
|
return nil, errExecNotFound(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,38 +20,38 @@ func setPlatformSpecificContainerFields(container *container.Container, contJSON
|
||||||
|
|
||||||
// containerInspectPre120 gets containers for pre 1.20 APIs.
|
// containerInspectPre120 gets containers for pre 1.20 APIs.
|
||||||
func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) {
|
func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) {
|
||||||
container, err := daemon.GetContainer(name)
|
ctr, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
container.Lock()
|
ctr.Lock()
|
||||||
defer container.Unlock()
|
defer ctr.Unlock()
|
||||||
|
|
||||||
base, err := daemon.getInspectData(container)
|
base, err := daemon.getInspectData(ctr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
volumes := make(map[string]string)
|
volumes := make(map[string]string)
|
||||||
volumesRW := make(map[string]bool)
|
volumesRW := make(map[string]bool)
|
||||||
for _, m := range container.MountPoints {
|
for _, m := range ctr.MountPoints {
|
||||||
volumes[m.Destination] = m.Path()
|
volumes[m.Destination] = m.Path()
|
||||||
volumesRW[m.Destination] = m.RW
|
volumesRW[m.Destination] = m.RW
|
||||||
}
|
}
|
||||||
|
|
||||||
config := &v1p19.ContainerConfig{
|
config := &v1p19.ContainerConfig{
|
||||||
Config: container.Config,
|
Config: ctr.Config,
|
||||||
MacAddress: container.Config.MacAddress,
|
MacAddress: ctr.Config.MacAddress,
|
||||||
NetworkDisabled: container.Config.NetworkDisabled,
|
NetworkDisabled: ctr.Config.NetworkDisabled,
|
||||||
ExposedPorts: container.Config.ExposedPorts,
|
ExposedPorts: ctr.Config.ExposedPorts,
|
||||||
VolumeDriver: container.HostConfig.VolumeDriver,
|
VolumeDriver: ctr.HostConfig.VolumeDriver,
|
||||||
Memory: container.HostConfig.Memory,
|
Memory: ctr.HostConfig.Memory,
|
||||||
MemorySwap: container.HostConfig.MemorySwap,
|
MemorySwap: ctr.HostConfig.MemorySwap,
|
||||||
CPUShares: container.HostConfig.CPUShares,
|
CPUShares: ctr.HostConfig.CPUShares,
|
||||||
CPUSet: container.HostConfig.CpusetCpus,
|
CPUSet: ctr.HostConfig.CpusetCpus,
|
||||||
}
|
}
|
||||||
networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings)
|
networkSettings := daemon.getBackwardsCompatibleNetworkSettings(ctr.NetworkSettings)
|
||||||
|
|
||||||
return &v1p19.ContainerJSON{
|
return &v1p19.ContainerJSON{
|
||||||
ContainerJSONBase: base,
|
ContainerJSONBase: base,
|
||||||
|
|
|
@ -69,8 +69,8 @@ func setupContainerWithName(t *testing.T, name string, daemon *Daemon) *containe
|
||||||
}
|
}
|
||||||
|
|
||||||
func containerListContainsName(containers []*types.Container, name string) bool {
|
func containerListContainsName(containers []*types.Container, name string) bool {
|
||||||
for _, container := range containers {
|
for _, ctr := range containers {
|
||||||
for _, containerName := range container.Names {
|
for _, containerName := range ctr.Names {
|
||||||
if containerName == name {
|
if containerName == name {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,20 +33,20 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c
|
||||||
if !(config.ShowStdout || config.ShowStderr) {
|
if !(config.ShowStdout || config.ShowStderr) {
|
||||||
return nil, false, errdefs.InvalidParameter(errors.New("You must choose at least one stream"))
|
return nil, false, errdefs.InvalidParameter(errors.New("You must choose at least one stream"))
|
||||||
}
|
}
|
||||||
container, err := daemon.GetContainer(containerName)
|
ctr, err := daemon.GetContainer(containerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if container.RemovalInProgress || container.Dead {
|
if ctr.RemovalInProgress || ctr.Dead {
|
||||||
return nil, false, errdefs.Conflict(errors.New("can not get logs from container which is dead or marked for removal"))
|
return nil, false, errdefs.Conflict(errors.New("can not get logs from container which is dead or marked for removal"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if container.HostConfig.LogConfig.Type == "none" {
|
if ctr.HostConfig.LogConfig.Type == "none" {
|
||||||
return nil, false, logger.ErrReadLogsNotSupported{}
|
return nil, false, logger.ErrReadLogsNotSupported{}
|
||||||
}
|
}
|
||||||
|
|
||||||
cLog, cLogCreated, err := daemon.getLogger(container)
|
cLog, cLogCreated, err := daemon.getLogger(ctr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
@ -157,7 +157,7 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
return messageChan, container.Config.Tty, nil
|
return messageChan, ctr.Config.Tty, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (daemon *Daemon) getLogger(container *container.Container) (l logger.Logger, created bool, err error) {
|
func (daemon *Daemon) getLogger(container *container.Container) (l logger.Logger, created bool, err error) {
|
||||||
|
|
|
@ -430,12 +430,12 @@ func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnet
|
||||||
|
|
||||||
// UpdateContainerServiceConfig updates a service configuration.
|
// UpdateContainerServiceConfig updates a service configuration.
|
||||||
func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error {
|
func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error {
|
||||||
container, err := daemon.GetContainer(containerName)
|
ctr, err := daemon.GetContainer(containerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
container.NetworkSettings.Service = serviceConfig
|
ctr.NetworkSettings.Service = serviceConfig
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -443,24 +443,24 @@ func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, service
|
||||||
// network. If either cannot be found, an err is returned. If the
|
// network. If either cannot be found, an err is returned. If the
|
||||||
// network cannot be set up, an err is returned.
|
// network cannot be set up, an err is returned.
|
||||||
func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error {
|
func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error {
|
||||||
container, err := daemon.GetContainer(containerName)
|
ctr, err := daemon.GetContainer(containerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return daemon.ConnectToNetwork(container, networkName, endpointConfig)
|
return daemon.ConnectToNetwork(ctr, networkName, endpointConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DisconnectContainerFromNetwork disconnects the given container from
|
// DisconnectContainerFromNetwork disconnects the given container from
|
||||||
// the given network. If either cannot be found, an err is returned.
|
// the given network. If either cannot be found, an err is returned.
|
||||||
func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error {
|
func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error {
|
||||||
container, err := daemon.GetContainer(containerName)
|
ctr, err := daemon.GetContainer(containerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if force {
|
if force {
|
||||||
return daemon.ForceEndpointDelete(containerName, networkName)
|
return daemon.ForceEndpointDelete(containerName, networkName)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return daemon.DisconnectFromNetwork(container, networkName, force)
|
return daemon.DisconnectFromNetwork(ctr, networkName, force)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNetworkDriverList returns the list of plugins drivers
|
// GetNetworkDriverList returns the list of plugins drivers
|
||||||
|
@ -485,10 +485,10 @@ func (daemon *Daemon) GetNetworkDriverList() []string {
|
||||||
|
|
||||||
networks := daemon.netController.Networks()
|
networks := daemon.netController.Networks()
|
||||||
|
|
||||||
for _, network := range networks {
|
for _, nw := range networks {
|
||||||
if !pluginMap[network.Type()] {
|
if !pluginMap[nw.Type()] {
|
||||||
pluginList = append(pluginList, network.Type())
|
pluginList = append(pluginList, nw.Type())
|
||||||
pluginMap[network.Type()] = true
|
pluginMap[nw.Type()] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,11 +10,11 @@ import (
|
||||||
|
|
||||||
// ContainerPause pauses a container
|
// ContainerPause pauses a container
|
||||||
func (daemon *Daemon) ContainerPause(name string) error {
|
func (daemon *Daemon) ContainerPause(name string) error {
|
||||||
container, err := daemon.GetContainer(name)
|
ctr, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return daemon.containerPause(container)
|
return daemon.containerPause(ctr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// containerPause pauses the container execution without stopping the process.
|
// containerPause pauses the container execution without stopping the process.
|
||||||
|
|
|
@ -15,15 +15,15 @@ import (
|
||||||
// stop. Returns an error if the container cannot be found, or if
|
// stop. Returns an error if the container cannot be found, or if
|
||||||
// there is an underlying error at any stage of the restart.
|
// there is an underlying error at any stage of the restart.
|
||||||
func (daemon *Daemon) ContainerRestart(name string, seconds *int) error {
|
func (daemon *Daemon) ContainerRestart(name string, seconds *int) error {
|
||||||
container, err := daemon.GetContainer(name)
|
ctr, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if seconds == nil {
|
if seconds == nil {
|
||||||
stopTimeout := container.StopTimeout()
|
stopTimeout := ctr.StopTimeout()
|
||||||
seconds = &stopTimeout
|
seconds = &stopTimeout
|
||||||
}
|
}
|
||||||
if err := daemon.containerRestart(container, *seconds); err != nil {
|
if err := daemon.containerRestart(ctr, *seconds); err != nil {
|
||||||
return fmt.Errorf("Cannot restart container %s: %v", name, err)
|
return fmt.Errorf("Cannot restart container %s: %v", name, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -23,24 +23,24 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos
|
||||||
return errdefs.InvalidParameter(errors.New("checkpoint is only supported in experimental mode"))
|
return errdefs.InvalidParameter(errors.New("checkpoint is only supported in experimental mode"))
|
||||||
}
|
}
|
||||||
|
|
||||||
container, err := daemon.GetContainer(name)
|
ctr, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
validateState := func() error {
|
validateState := func() error {
|
||||||
container.Lock()
|
ctr.Lock()
|
||||||
defer container.Unlock()
|
defer ctr.Unlock()
|
||||||
|
|
||||||
if container.Paused {
|
if ctr.Paused {
|
||||||
return errdefs.Conflict(errors.New("cannot start a paused container, try unpause instead"))
|
return errdefs.Conflict(errors.New("cannot start a paused container, try unpause instead"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if container.Running {
|
if ctr.Running {
|
||||||
return containerNotModifiedError{running: true}
|
return containerNotModifiedError{running: true}
|
||||||
}
|
}
|
||||||
|
|
||||||
if container.RemovalInProgress || container.Dead {
|
if ctr.RemovalInProgress || ctr.Dead {
|
||||||
return errdefs.Conflict(errors.New("container is marked for removal and cannot be started"))
|
return errdefs.Conflict(errors.New("container is marked for removal and cannot be started"))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -56,26 +56,26 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos
|
||||||
// creating a container, not during start.
|
// creating a container, not during start.
|
||||||
if hostConfig != nil {
|
if hostConfig != nil {
|
||||||
logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12")
|
logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12")
|
||||||
oldNetworkMode := container.HostConfig.NetworkMode
|
oldNetworkMode := ctr.HostConfig.NetworkMode
|
||||||
if err := daemon.setSecurityOptions(container, hostConfig); err != nil {
|
if err := daemon.setSecurityOptions(ctr, hostConfig); err != nil {
|
||||||
return errdefs.InvalidParameter(err)
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil {
|
if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil {
|
||||||
return errdefs.InvalidParameter(err)
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
if err := daemon.setHostConfig(container, hostConfig); err != nil {
|
if err := daemon.setHostConfig(ctr, hostConfig); err != nil {
|
||||||
return errdefs.InvalidParameter(err)
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
newNetworkMode := container.HostConfig.NetworkMode
|
newNetworkMode := ctr.HostConfig.NetworkMode
|
||||||
if string(oldNetworkMode) != string(newNetworkMode) {
|
if string(oldNetworkMode) != string(newNetworkMode) {
|
||||||
// if user has change the network mode on starting, clean up the
|
// if user has change the network mode on starting, clean up the
|
||||||
// old networks. It is a deprecated feature and has been removed in Docker 1.12
|
// old networks. It is a deprecated feature and has been removed in Docker 1.12
|
||||||
container.NetworkSettings.Networks = nil
|
ctr.NetworkSettings.Networks = nil
|
||||||
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
|
if err := ctr.CheckpointTo(daemon.containersReplica); err != nil {
|
||||||
return errdefs.System(err)
|
return errdefs.System(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
container.InitDNSHostConfig()
|
ctr.InitDNSHostConfig()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if hostConfig != nil {
|
if hostConfig != nil {
|
||||||
|
@ -85,17 +85,17 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos
|
||||||
|
|
||||||
// check if hostConfig is in line with the current system settings.
|
// check if hostConfig is in line with the current system settings.
|
||||||
// It may happen cgroups are umounted or the like.
|
// It may happen cgroups are umounted or the like.
|
||||||
if _, err = daemon.verifyContainerSettings(container.OS, container.HostConfig, nil, false); err != nil {
|
if _, err = daemon.verifyContainerSettings(ctr.OS, ctr.HostConfig, nil, false); err != nil {
|
||||||
return errdefs.InvalidParameter(err)
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
// Adapt for old containers in case we have updates in this function and
|
// Adapt for old containers in case we have updates in this function and
|
||||||
// old containers never have chance to call the new function in create stage.
|
// old containers never have chance to call the new function in create stage.
|
||||||
if hostConfig != nil {
|
if hostConfig != nil {
|
||||||
if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil {
|
if err := daemon.adaptContainerSettings(ctr.HostConfig, false); err != nil {
|
||||||
return errdefs.InvalidParameter(err)
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return daemon.containerStart(container, checkpoint, checkpointDir, true)
|
return daemon.containerStart(ctr, checkpoint, checkpointDir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// containerStart prepares the container to run by setting up everything the
|
// containerStart prepares the container to run by setting up everything the
|
||||||
|
|
|
@ -25,16 +25,17 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c
|
||||||
return errors.New("API versions pre v1.21 do not support stats on Windows")
|
return errors.New("API versions pre v1.21 do not support stats on Windows")
|
||||||
}
|
}
|
||||||
|
|
||||||
container, err := daemon.GetContainer(prefixOrName)
|
ctr, err := daemon.GetContainer(prefixOrName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the container is either not running or restarting and requires no stream, return an empty stats.
|
// If the container is either not running or restarting and requires no stream, return an empty stats.
|
||||||
if (!container.IsRunning() || container.IsRestarting()) && !config.Stream {
|
if (!ctr.IsRunning() || ctr.IsRestarting()) && !config.Stream {
|
||||||
return json.NewEncoder(config.OutStream).Encode(&types.StatsJSON{
|
return json.NewEncoder(config.OutStream).Encode(&types.StatsJSON{
|
||||||
Name: container.Name,
|
Name: ctr.Name,
|
||||||
ID: container.ID})
|
ID: ctr.ID,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
outStream := config.OutStream
|
outStream := config.OutStream
|
||||||
|
@ -49,8 +50,8 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c
|
||||||
var preRead time.Time
|
var preRead time.Time
|
||||||
getStatJSON := func(v interface{}) *types.StatsJSON {
|
getStatJSON := func(v interface{}) *types.StatsJSON {
|
||||||
ss := v.(types.StatsJSON)
|
ss := v.(types.StatsJSON)
|
||||||
ss.Name = container.Name
|
ss.Name = ctr.Name
|
||||||
ss.ID = container.ID
|
ss.ID = ctr.ID
|
||||||
ss.PreCPUStats = preCPUStats
|
ss.PreCPUStats = preCPUStats
|
||||||
ss.PreRead = preRead
|
ss.PreRead = preRead
|
||||||
preCPUStats = ss.CPUStats
|
preCPUStats = ss.CPUStats
|
||||||
|
@ -60,8 +61,8 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c
|
||||||
|
|
||||||
enc := json.NewEncoder(outStream)
|
enc := json.NewEncoder(outStream)
|
||||||
|
|
||||||
updates := daemon.subscribeToContainerStats(container)
|
updates := daemon.subscribeToContainerStats(ctr)
|
||||||
defer daemon.unsubscribeToContainerStats(container, updates)
|
defer daemon.unsubscribeToContainerStats(ctr, updates)
|
||||||
|
|
||||||
noStreamFirstFrame := true
|
noStreamFirstFrame := true
|
||||||
for {
|
for {
|
||||||
|
|
|
@ -144,20 +144,20 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*container.Conta
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
container, err := daemon.GetContainer(name)
|
ctr, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !container.IsRunning() {
|
if !ctr.IsRunning() {
|
||||||
return nil, errNotRunning(container.ID)
|
return nil, errNotRunning(ctr.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if container.IsRestarting() {
|
if ctr.IsRestarting() {
|
||||||
return nil, errContainerIsRestarting(container.ID)
|
return nil, errContainerIsRestarting(ctr.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
procs, err := daemon.containerd.ListPids(context.Background(), container.ID)
|
procs, err := daemon.containerd.ListPids(context.Background(), ctr.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -184,6 +184,6 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*container.Conta
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
daemon.LogContainerEvent(container, "top")
|
daemon.LogContainerEvent(ctr, "top")
|
||||||
return procList, nil
|
return procList, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,33 +10,33 @@ import (
|
||||||
|
|
||||||
// ContainerUnpause unpauses a container
|
// ContainerUnpause unpauses a container
|
||||||
func (daemon *Daemon) ContainerUnpause(name string) error {
|
func (daemon *Daemon) ContainerUnpause(name string) error {
|
||||||
container, err := daemon.GetContainer(name)
|
ctr, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return daemon.containerUnpause(container)
|
return daemon.containerUnpause(ctr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// containerUnpause resumes the container execution after the container is paused.
|
// containerUnpause resumes the container execution after the container is paused.
|
||||||
func (daemon *Daemon) containerUnpause(container *container.Container) error {
|
func (daemon *Daemon) containerUnpause(ctr *container.Container) error {
|
||||||
container.Lock()
|
ctr.Lock()
|
||||||
defer container.Unlock()
|
defer ctr.Unlock()
|
||||||
|
|
||||||
// We cannot unpause the container which is not paused
|
// We cannot unpause the container which is not paused
|
||||||
if !container.Paused {
|
if !ctr.Paused {
|
||||||
return fmt.Errorf("Container %s is not paused", container.ID)
|
return fmt.Errorf("Container %s is not paused", ctr.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := daemon.containerd.Resume(context.Background(), container.ID); err != nil {
|
if err := daemon.containerd.Resume(context.Background(), ctr.ID); err != nil {
|
||||||
return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err)
|
return fmt.Errorf("Cannot unpause container %s: %s", ctr.ID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
container.Paused = false
|
ctr.Paused = false
|
||||||
daemon.setStateCounter(container)
|
daemon.setStateCounter(ctr)
|
||||||
daemon.updateHealthMonitor(container)
|
daemon.updateHealthMonitor(ctr)
|
||||||
daemon.LogContainerEvent(container, "unpause")
|
daemon.LogContainerEvent(ctr, "unpause")
|
||||||
|
|
||||||
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
|
if err := ctr.CheckpointTo(daemon.containersReplica); err != nil {
|
||||||
logrus.WithError(err).Warn("could not save container to disk")
|
logrus.WithError(err).Warn("could not save container to disk")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,57 +35,57 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
container, err := daemon.GetContainer(name)
|
ctr, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
restoreConfig := false
|
restoreConfig := false
|
||||||
backupHostConfig := *container.HostConfig
|
backupHostConfig := *ctr.HostConfig
|
||||||
defer func() {
|
defer func() {
|
||||||
if restoreConfig {
|
if restoreConfig {
|
||||||
container.Lock()
|
ctr.Lock()
|
||||||
container.HostConfig = &backupHostConfig
|
ctr.HostConfig = &backupHostConfig
|
||||||
container.CheckpointTo(daemon.containersReplica)
|
ctr.CheckpointTo(daemon.containersReplica)
|
||||||
container.Unlock()
|
ctr.Unlock()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if container.RemovalInProgress || container.Dead {
|
if ctr.RemovalInProgress || ctr.Dead {
|
||||||
return errCannotUpdate(container.ID, fmt.Errorf("container is marked for removal and cannot be \"update\""))
|
return errCannotUpdate(ctr.ID, fmt.Errorf("container is marked for removal and cannot be \"update\""))
|
||||||
}
|
}
|
||||||
|
|
||||||
container.Lock()
|
ctr.Lock()
|
||||||
if err := container.UpdateContainer(hostConfig); err != nil {
|
if err := ctr.UpdateContainer(hostConfig); err != nil {
|
||||||
restoreConfig = true
|
restoreConfig = true
|
||||||
container.Unlock()
|
ctr.Unlock()
|
||||||
return errCannotUpdate(container.ID, err)
|
return errCannotUpdate(ctr.ID, err)
|
||||||
}
|
}
|
||||||
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
|
if err := ctr.CheckpointTo(daemon.containersReplica); err != nil {
|
||||||
restoreConfig = true
|
restoreConfig = true
|
||||||
container.Unlock()
|
ctr.Unlock()
|
||||||
return errCannotUpdate(container.ID, err)
|
return errCannotUpdate(ctr.ID, err)
|
||||||
}
|
}
|
||||||
container.Unlock()
|
ctr.Unlock()
|
||||||
|
|
||||||
// if Restart Policy changed, we need to update container monitor
|
// if Restart Policy changed, we need to update container monitor
|
||||||
if hostConfig.RestartPolicy.Name != "" {
|
if hostConfig.RestartPolicy.Name != "" {
|
||||||
container.UpdateMonitor(hostConfig.RestartPolicy)
|
ctr.UpdateMonitor(hostConfig.RestartPolicy)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If container is not running, update hostConfig struct is enough,
|
// If container is not running, update hostConfig struct is enough,
|
||||||
// resources will be updated when the container is started again.
|
// resources will be updated when the container is started again.
|
||||||
// If container is running (including paused), we need to update configs
|
// If container is running (including paused), we need to update configs
|
||||||
// to the real world.
|
// to the real world.
|
||||||
if container.IsRunning() && !container.IsRestarting() {
|
if ctr.IsRunning() && !ctr.IsRestarting() {
|
||||||
if err := daemon.containerd.UpdateResources(context.Background(), container.ID, toContainerdResources(hostConfig.Resources)); err != nil {
|
if err := daemon.containerd.UpdateResources(context.Background(), ctr.ID, toContainerdResources(hostConfig.Resources)); err != nil {
|
||||||
restoreConfig = true
|
restoreConfig = true
|
||||||
// TODO: it would be nice if containerd responded with better errors here so we can classify this better.
|
// TODO: it would be nice if containerd responded with better errors here so we can classify this better.
|
||||||
return errCannotUpdate(container.ID, errdefs.System(err))
|
return errCannotUpdate(ctr.ID, errdefs.System(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
daemon.LogContainerEvent(container, "update")
|
daemon.LogContainerEvent(ctr, "update")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,12 +34,12 @@ func TestBindDaemonRoot(t *testing.T) {
|
||||||
"source is /": "/",
|
"source is /": "/",
|
||||||
} {
|
} {
|
||||||
t.Run(desc, func(t *testing.T) {
|
t.Run(desc, func(t *testing.T) {
|
||||||
mount := mount.Mount{
|
mnt := mount.Mount{
|
||||||
Type: mount.TypeBind,
|
Type: mount.TypeBind,
|
||||||
Source: source,
|
Source: source,
|
||||||
BindOptions: test.opts,
|
BindOptions: test.opts,
|
||||||
}
|
}
|
||||||
needsProp, err := d.validateBindDaemonRoot(mount)
|
needsProp, err := d.validateBindDaemonRoot(mnt)
|
||||||
if (err != nil) != test.err {
|
if (err != nil) != test.err {
|
||||||
t.Fatalf("expected err=%v, got: %v", test.err, err)
|
t.Fatalf("expected err=%v, got: %v", test.err, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -82,12 +82,12 @@ func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, er
|
||||||
// metadata, the ownership must be set properly for potential container
|
// metadata, the ownership must be set properly for potential container
|
||||||
// remapped root (user namespaces)
|
// remapped root (user namespaces)
|
||||||
rootIDs := daemon.idMapping.RootPair()
|
rootIDs := daemon.idMapping.RootPair()
|
||||||
for _, mount := range netMounts {
|
for _, mnt := range netMounts {
|
||||||
// we should only modify ownership of network files within our own container
|
// we should only modify ownership of network files within our own container
|
||||||
// metadata repository. If the user specifies a mount path external, it is
|
// metadata repository. If the user specifies a mount path external, it is
|
||||||
// up to the user to make sure the file has proper ownership for userns
|
// up to the user to make sure the file has proper ownership for userns
|
||||||
if strings.Index(mount.Source, daemon.repository) == 0 {
|
if strings.Index(mnt.Source, daemon.repository) == 0 {
|
||||||
if err := os.Chown(mount.Source, rootIDs.UID, rootIDs.GID); err != nil {
|
if err := os.Chown(mnt.Source, rootIDs.UID, rootIDs.GID); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue