daemon: rename variables that collide with imported package names

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2019-08-09 14:10:07 +02:00
parent 797ec8e913
commit eb14d936bf
No known key found for this signature in database
GPG key ID: 76698F39D527CE8C
28 changed files with 306 additions and 305 deletions

View file

@ -49,17 +49,17 @@ func archivePath(i interface{}, src string, opts *archive.TarOptions, root strin
// ContainerCopy performs a deprecated operation of archiving the resource at
// the specified path in the container identified by the given name.
func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
container, err := daemon.GetContainer(name)
ctr, err := daemon.GetContainer(name)
if err != nil {
return nil, err
}
// Make sure an online file-system operation is permitted.
if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
if err := daemon.isOnlineFSOperationPermitted(ctr); err != nil {
return nil, errdefs.System(err)
}
data, err := daemon.containerCopy(container, res)
data, err := daemon.containerCopy(ctr, res)
if err == nil {
return data, nil
}
@ -73,17 +73,17 @@ func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, err
// ContainerStatPath stats the filesystem resource at the specified path in the
// container identified by the given name.
func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) {
container, err := daemon.GetContainer(name)
ctr, err := daemon.GetContainer(name)
if err != nil {
return nil, err
}
// Make sure an online file-system operation is permitted.
if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
if err := daemon.isOnlineFSOperationPermitted(ctr); err != nil {
return nil, errdefs.System(err)
}
stat, err = daemon.containerStatPath(container, path)
stat, err = daemon.containerStatPath(ctr, path)
if err == nil {
return stat, nil
}
@ -98,17 +98,17 @@ func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.C
// specified path in the container identified by the given name. Returns a
// tar archive of the resource and whether it was a directory or a single file.
func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
container, err := daemon.GetContainer(name)
ctr, err := daemon.GetContainer(name)
if err != nil {
return nil, nil, err
}
// Make sure an online file-system operation is permitted.
if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
if err := daemon.isOnlineFSOperationPermitted(ctr); err != nil {
return nil, nil, errdefs.System(err)
}
content, stat, err = daemon.containerArchivePath(container, path)
content, stat, err = daemon.containerArchivePath(ctr, path)
if err == nil {
return content, stat, nil
}
@ -126,17 +126,17 @@ func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io
// be an error if unpacking the given content would cause an existing directory
// to be replaced with a non-directory and vice versa.
func (daemon *Daemon) ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error {
container, err := daemon.GetContainer(name)
ctr, err := daemon.GetContainer(name)
if err != nil {
return err
}
// Make sure an online file-system operation is permitted.
if err := daemon.isOnlineFSOperationPermitted(container); err != nil {
if err := daemon.isOnlineFSOperationPermitted(ctr); err != nil {
return errdefs.System(err)
}
err = daemon.containerExtractToDir(container, path, copyUIDGID, noOverwriteDirNonDir, content)
err = daemon.containerExtractToDir(ctr, path, copyUIDGID, noOverwriteDirNonDir, content)
if err == nil {
return nil
}
@ -433,7 +433,7 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str
basePath = d
filter = []string{f}
}
archive, err := archivePath(driver, basePath, &archive.TarOptions{
archv, err := archivePath(driver, basePath, &archive.TarOptions{
Compression: archive.Uncompressed,
IncludeFiles: filter,
}, container.BaseFS.Path())
@ -441,8 +441,8 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str
return nil, err
}
reader := ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
reader := ioutils.NewReadCloserWrapper(archv, func() error {
err := archv.Close()
container.DetachAndUnmount(daemon.LogVolumeEvent)
daemon.Unmount(container)
container.Unlock()

View file

@ -27,15 +27,15 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA
}
}
container, err := daemon.GetContainer(prefixOrName)
ctr, err := daemon.GetContainer(prefixOrName)
if err != nil {
return err
}
if container.IsPaused() {
if ctr.IsPaused() {
err := fmt.Errorf("container %s is paused, unpause the container before attach", prefixOrName)
return errdefs.Conflict(err)
}
if container.IsRestarting() {
if ctr.IsRestarting() {
err := fmt.Errorf("container %s is restarting, wait until the container is running", prefixOrName)
return errdefs.Conflict(err)
}
@ -44,11 +44,11 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA
UseStdin: c.UseStdin,
UseStdout: c.UseStdout,
UseStderr: c.UseStderr,
TTY: container.Config.Tty,
CloseStdin: container.Config.StdinOnce,
TTY: ctr.Config.Tty,
CloseStdin: ctr.Config.StdinOnce,
DetachKeys: keys,
}
container.StreamConfig.AttachStreams(&cfg)
ctr.StreamConfig.AttachStreams(&cfg)
inStream, outStream, errStream, err := c.GetStreams()
if err != nil {
@ -56,7 +56,7 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA
}
defer inStream.Close()
if !container.Config.Tty && c.MuxStreams {
if !ctr.Config.Tty && c.MuxStreams {
errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr)
outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
}
@ -71,7 +71,7 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA
cfg.Stderr = errStream
}
if err := daemon.containerAttach(container, &cfg, c.Logs, c.Stream); err != nil {
if err := daemon.containerAttach(ctr, &cfg, c.Logs, c.Stream); err != nil {
fmt.Fprintf(outStream, "Error attaching: %s\n", err)
}
return nil
@ -79,7 +79,7 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA
// ContainerAttachRaw attaches the provided streams to the container's stdio
func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, doStream bool, attached chan struct{}) error {
container, err := daemon.GetContainer(prefixOrName)
ctr, err := daemon.GetContainer(prefixOrName)
if err != nil {
return err
}
@ -87,10 +87,10 @@ func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadClose
UseStdin: stdin != nil,
UseStdout: stdout != nil,
UseStderr: stderr != nil,
TTY: container.Config.Tty,
CloseStdin: container.Config.StdinOnce,
TTY: ctr.Config.Tty,
CloseStdin: ctr.Config.StdinOnce,
}
container.StreamConfig.AttachStreams(&cfg)
ctr.StreamConfig.AttachStreams(&cfg)
close(attached)
if cfg.UseStdin {
cfg.Stdin = stdin
@ -102,7 +102,7 @@ func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadClose
cfg.Stderr = stderr
}
return daemon.containerAttach(container, &cfg, false, doStream)
return daemon.containerAttach(ctr, &cfg, false, doStream)
}
func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.AttachConfig, logs, doStream bool) error {

View file

@ -92,20 +92,20 @@ func (daemon *Daemon) containerRoot(id string) string {
// Load reads the contents of a container from disk
// This is typically done at startup.
func (daemon *Daemon) load(id string) (*container.Container, error) {
container := daemon.newBaseContainer(id)
ctr := daemon.newBaseContainer(id)
if err := container.FromDisk(); err != nil {
if err := ctr.FromDisk(); err != nil {
return nil, err
}
if err := label.ReserveLabel(container.ProcessLabel); err != nil {
if err := label.ReserveLabel(ctr.ProcessLabel); err != nil {
return nil, err
}
if container.ID != id {
return container, fmt.Errorf("Container %s is stored at %s", container.ID, id)
if ctr.ID != id {
return ctr, fmt.Errorf("Container %s is stored at %s", ctr.ID, id)
}
return container, nil
return ctr, nil
}
// Register makes a container object usable by the daemon as <container.ID>

View file

@ -342,12 +342,12 @@ func (daemon *Daemon) updateNetwork(container *container.Container) error {
return nil
}
options, err := daemon.buildSandboxOptions(container)
sbOptions, err := daemon.buildSandboxOptions(container)
if err != nil {
return fmt.Errorf("Update network failed: %v", err)
}
if err := sb.Refresh(options...); err != nil {
if err := sb.Refresh(sbOptions...); err != nil {
return fmt.Errorf("Update network failed: Failure in refresh sandbox %s: %v", sid, err)
}
@ -378,7 +378,7 @@ func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrN
if container.NetworkSettings.Networks != nil {
networkName := n.Name()
containerName := strings.TrimPrefix(container.Name, "/")
if network, ok := container.NetworkSettings.Networks[networkName]; ok && network.EndpointID != "" {
if nw, ok := container.NetworkSettings.Networks[networkName]; ok && nw.EndpointID != "" {
err := fmt.Errorf("%s is already attached to network %s", containerName, networkName)
return n, nil, errdefs.Conflict(err)
}
@ -584,11 +584,11 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) error {
// create its network sandbox now if not present
if len(networks) == 0 {
if nil == daemon.getNetworkSandbox(container) {
options, err := daemon.buildSandboxOptions(container)
sbOptions, err := daemon.buildSandboxOptions(container)
if err != nil {
return err
}
sb, err := daemon.netController.NewSandbox(container.ID, options...)
sb, err := daemon.netController.NewSandbox(container.ID, sbOptions...)
if err != nil {
return err
}
@ -802,11 +802,11 @@ func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName
}
if sb == nil {
options, err := daemon.buildSandboxOptions(container)
sbOptions, err := daemon.buildSandboxOptions(container)
if err != nil {
return err
}
sb, err = controller.NewSandbox(container.ID, options...)
sb, err = controller.NewSandbox(container.ID, sbOptions...)
if err != nil {
return err
}
@ -1135,11 +1135,11 @@ func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, netw
// ActivateContainerServiceBinding puts this container into load balancer active rotation and DNS response
func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) error {
container, err := daemon.GetContainer(containerName)
ctr, err := daemon.GetContainer(containerName)
if err != nil {
return err
}
sb := daemon.getNetworkSandbox(container)
sb := daemon.getNetworkSandbox(ctr)
if sb == nil {
return fmt.Errorf("network sandbox does not exist for container %s", containerName)
}
@ -1148,11 +1148,11 @@ func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) erro
// DeactivateContainerServiceBinding removes this container from load balancer active rotation, and DNS response
func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) error {
container, err := daemon.GetContainer(containerName)
ctr, err := daemon.GetContainer(containerName)
if err != nil {
return err
}
sb := daemon.getNetworkSandbox(container)
sb := daemon.getNetworkSandbox(ctr)
if sb == nil {
// If the network sandbox is not found, then there is nothing to deactivate
logrus.Debugf("Could not find network sandbox for container %s on service binding deactivation request", containerName)

View file

@ -61,33 +61,33 @@ func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]s
func (daemon *Daemon) getIpcContainer(id string) (*container.Container, error) {
errMsg := "can't join IPC of container " + id
// Check the container exists
container, err := daemon.GetContainer(id)
ctr, err := daemon.GetContainer(id)
if err != nil {
return nil, errors.Wrap(err, errMsg)
}
// Check the container is running and not restarting
if err := daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting); err != nil {
if err := daemon.checkContainer(ctr, containerIsRunning, containerIsNotRestarting); err != nil {
return nil, errors.Wrap(err, errMsg)
}
// Check the container ipc is shareable
if st, err := os.Stat(container.ShmPath); err != nil || !st.IsDir() {
if st, err := os.Stat(ctr.ShmPath); err != nil || !st.IsDir() {
if err == nil || os.IsNotExist(err) {
return nil, errors.New(errMsg + ": non-shareable IPC (hint: use IpcMode:shareable for the donor container)")
}
// stat() failed?
return nil, errors.Wrap(err, errMsg+": unexpected error from stat "+container.ShmPath)
return nil, errors.Wrap(err, errMsg+": unexpected error from stat "+ctr.ShmPath)
}
return container, nil
return ctr, nil
}
func (daemon *Daemon) getPidContainer(container *container.Container) (*container.Container, error) {
containerID := container.HostConfig.PidMode.Container()
container, err := daemon.GetContainer(containerID)
func (daemon *Daemon) getPidContainer(ctr *container.Container) (*container.Container, error) {
containerID := ctr.HostConfig.PidMode.Container()
ctr, err := daemon.GetContainer(containerID)
if err != nil {
return nil, errors.Wrapf(err, "cannot join PID of a non running container: %s", containerID)
}
return container, daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting)
return ctr, daemon.checkContainer(ctr, containerIsRunning, containerIsNotRestarting)
}
func containerIsRunning(c *container.Container) error {

View file

@ -90,7 +90,7 @@ func (daemon *Daemon) containerCreate(opts createOpts) (containertypes.Container
return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, errdefs.InvalidParameter(err)
}
container, err := daemon.create(opts)
ctr, err := daemon.create(opts)
if err != nil {
return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err
}
@ -100,16 +100,16 @@ func (daemon *Daemon) containerCreate(opts createOpts) (containertypes.Container
warnings = make([]string, 0) // Create an empty slice to avoid https://github.com/moby/moby/issues/38222
}
return containertypes.ContainerCreateCreatedBody{ID: container.ID, Warnings: warnings}, nil
return containertypes.ContainerCreateCreatedBody{ID: ctr.ID, Warnings: warnings}, nil
}
// Create creates a new container from the given configuration with a given name.
func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr error) {
var (
container *container.Container
img *image.Image
imgID image.ID
err error
ctr *container.Container
img *image.Image
imgID image.ID
err error
)
os := runtime.GOOS
@ -153,22 +153,22 @@ func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr
return nil, errdefs.InvalidParameter(err)
}
if container, err = daemon.newContainer(opts.params.Name, os, opts.params.Config, opts.params.HostConfig, imgID, opts.managed); err != nil {
if ctr, err = daemon.newContainer(opts.params.Name, os, opts.params.Config, opts.params.HostConfig, imgID, opts.managed); err != nil {
return nil, err
}
defer func() {
if retErr != nil {
if err := daemon.cleanupContainer(container, true, true); err != nil {
if err := daemon.cleanupContainer(ctr, true, true); err != nil {
logrus.Errorf("failed to cleanup container on create error: %v", err)
}
}
}()
if err := daemon.setSecurityOptions(container, opts.params.HostConfig); err != nil {
if err := daemon.setSecurityOptions(ctr, opts.params.HostConfig); err != nil {
return nil, err
}
container.HostConfig.StorageOpt = opts.params.HostConfig.StorageOpt
ctr.HostConfig.StorageOpt = opts.params.HostConfig.StorageOpt
// Fixes: https://github.com/moby/moby/issues/34074 and
// https://github.com/docker/for-win/issues/999.
@ -176,38 +176,38 @@ func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr
// do this on Windows as there's no effective sandbox size limit other than
// physical on Linux.
if isWindows {
if container.HostConfig.StorageOpt == nil {
container.HostConfig.StorageOpt = make(map[string]string)
if ctr.HostConfig.StorageOpt == nil {
ctr.HostConfig.StorageOpt = make(map[string]string)
}
for _, v := range daemon.configStore.GraphOptions {
opt := strings.SplitN(v, "=", 2)
if _, ok := container.HostConfig.StorageOpt[opt[0]]; !ok {
container.HostConfig.StorageOpt[opt[0]] = opt[1]
if _, ok := ctr.HostConfig.StorageOpt[opt[0]]; !ok {
ctr.HostConfig.StorageOpt[opt[0]] = opt[1]
}
}
}
// Set RWLayer for container after mount labels have been set
rwLayer, err := daemon.imageService.CreateLayer(container, setupInitLayer(daemon.idMapping))
rwLayer, err := daemon.imageService.CreateLayer(ctr, setupInitLayer(daemon.idMapping))
if err != nil {
return nil, errdefs.System(err)
}
container.RWLayer = rwLayer
ctr.RWLayer = rwLayer
rootIDs := daemon.idMapping.RootPair()
if err := idtools.MkdirAndChown(container.Root, 0700, rootIDs); err != nil {
if err := idtools.MkdirAndChown(ctr.Root, 0700, rootIDs); err != nil {
return nil, err
}
if err := idtools.MkdirAndChown(container.CheckpointDir(), 0700, rootIDs); err != nil {
if err := idtools.MkdirAndChown(ctr.CheckpointDir(), 0700, rootIDs); err != nil {
return nil, err
}
if err := daemon.setHostConfig(container, opts.params.HostConfig); err != nil {
if err := daemon.setHostConfig(ctr, opts.params.HostConfig); err != nil {
return nil, err
}
if err := daemon.createContainerOSSpecificSettings(container, opts.params.Config, opts.params.HostConfig); err != nil {
if err := daemon.createContainerOSSpecificSettings(ctr, opts.params.Config, opts.params.HostConfig); err != nil {
return nil, err
}
@ -217,15 +217,15 @@ func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr
}
// Make sure NetworkMode has an acceptable value. We do this to ensure
// backwards API compatibility.
runconfig.SetDefaultNetModeIfBlank(container.HostConfig)
runconfig.SetDefaultNetModeIfBlank(ctr.HostConfig)
daemon.updateContainerNetworkSettings(container, endpointsConfigs)
if err := daemon.Register(container); err != nil {
daemon.updateContainerNetworkSettings(ctr, endpointsConfigs)
if err := daemon.Register(ctr); err != nil {
return nil, err
}
stateCtr.set(container.ID, "stopped")
daemon.LogContainerEvent(container, "create")
return container, nil
stateCtr.set(ctr.ID, "stopped")
daemon.LogContainerEvent(ctr, "create")
return ctr, nil
}
func toHostConfigSelinuxLabels(labels []string) []string {

View file

@ -993,8 +993,8 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
}
lgrMap := make(map[string]image.LayerGetReleaser)
for os, ls := range layerStores {
lgrMap[os] = ls
for los, ls := range layerStores {
lgrMap[los] = ls
}
imageStore, err := image.NewImageStore(ifs, lgrMap)
if err != nil {

View file

@ -39,7 +39,7 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u
if daemon.root == "" {
return nil
}
var errors []string
var errs []string
regexps := getCleanPatterns(id)
sc := bufio.NewScanner(reader)
@ -50,7 +50,7 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u
if p.MatchString(mnt) {
if err := unmount(mnt); err != nil {
logrus.Error(err)
errors = append(errors, err.Error())
errs = append(errs, err.Error())
}
}
}
@ -62,8 +62,8 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u
return err
}
if len(errors) > 0 {
return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n"))
if len(errs) > 0 {
return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errs, "\n"))
}
logrus.Debugf("Cleaning up old mountid %v: done.", id)

View file

@ -82,15 +82,15 @@ func TestGetContainer(t *testing.T) {
daemon.reserveName(c4.ID, c4.Name)
daemon.reserveName(c5.ID, c5.Name)
if container, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
if ctr, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); ctr != c2 {
t.Fatal("Should explicitly match full container IDs")
}
if container, _ := daemon.GetContainer("75fb0b8009"); container != c4 {
if ctr, _ := daemon.GetContainer("75fb0b8009"); ctr != c4 {
t.Fatal("Should match a partial ID")
}
if container, _ := daemon.GetContainer("drunk_hawking"); container != c2 {
if ctr, _ := daemon.GetContainer("drunk_hawking"); ctr != c2 {
t.Fatal("Should match a full name")
}
@ -99,7 +99,7 @@ func TestGetContainer(t *testing.T) {
t.Fatal("Should match a full name even though it collides with another container's ID")
}
if container, _ := daemon.GetContainer("d22d69a2b896"); container != c5 {
if ctr, _ := daemon.GetContainer("d22d69a2b896"); ctr != c5 {
t.Fatal("Should match a container where the provided prefix is an exact match to the its name, and is also a prefix for its ID")
}
@ -176,8 +176,8 @@ func TestContainerInitDNS(t *testing.T) {
"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}`
// Container struct only used to retrieve path to config file
container := &container.Container{Root: containerPath}
configPath, err := container.ConfigPath()
ctr := &container.Container{Root: containerPath}
configPath, err := ctr.ConfigPath()
if err != nil {
t.Fatal(err)
}
@ -190,7 +190,7 @@ func TestContainerInitDNS(t *testing.T) {
"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0},
"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}`
hostConfigPath, err := container.HostConfigPath()
hostConfigPath, err := ctr.HostConfigPath()
if err != nil {
t.Fatal(err)
}

View file

@ -384,11 +384,11 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf
adaptSharedNamespaceContainer(daemon, hostConfig)
var err error
opts, err := daemon.generateSecurityOpt(hostConfig)
secOpts, err := daemon.generateSecurityOpt(hostConfig)
if err != nil {
return err
}
hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, opts...)
hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, secOpts...)
if hostConfig.OomKillDisable == nil {
defaultOomKillDisable := false
hostConfig.OomKillDisable = &defaultOomKillDisable
@ -1310,7 +1310,7 @@ func setupDaemonRoot(config *config.Config, rootDir string, rootIdentity idtools
}
func setupDaemonRootPropagation(cfg *config.Config) error {
rootParentMount, options, err := getSourceMount(cfg.Root)
rootParentMount, mountOptions, err := getSourceMount(cfg.Root)
if err != nil {
return errors.Wrap(err, "error getting daemon root's parent mount")
}
@ -1326,7 +1326,7 @@ func setupDaemonRootPropagation(cfg *config.Config) error {
}
}()
if hasMountInfoOption(options, sharedPropagationOption, slavePropagationOption) {
if hasMountInfoOption(mountOptions, sharedPropagationOption, slavePropagationOption) {
cleanupOldFile = true
return nil
}
@ -1745,11 +1745,11 @@ func (daemon *Daemon) initCgroupsPath(path string) error {
}
path = filepath.Join(mnt, root, path)
sysinfo := sysinfo.New(true)
if err := maybeCreateCPURealTimeFile(sysinfo.CPURealtimePeriod, daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
sysInfo := sysinfo.New(true)
if err := maybeCreateCPURealTimeFile(sysInfo.CPURealtimePeriod, daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
return err
}
return maybeCreateCPURealTimeFile(sysinfo.CPURealtimeRuntime, daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path)
return maybeCreateCPURealTimeFile(sysInfo.CPURealtimeRuntime, daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path)
}
func maybeCreateCPURealTimeFile(sysinfoPresent bool, configValue int64, file string, path string) error {

View file

@ -24,11 +24,11 @@ type fakeContainerGetter struct {
}
func (f *fakeContainerGetter) GetContainer(cid string) (*container.Container, error) {
container, ok := f.containers[cid]
ctr, ok := f.containers[cid]
if !ok {
return nil, errors.New("container not found")
}
return container, nil
return ctr, nil
}
// Unix test as uses settings which are not available on Windows
@ -138,85 +138,85 @@ func TestAdjustCPUSharesNoAdjustment(t *testing.T) {
// Unix test as uses settings which are not available on Windows
func TestParseSecurityOptWithDeprecatedColon(t *testing.T) {
container := &container.Container{}
config := &containertypes.HostConfig{}
ctr := &container.Container{}
cfg := &containertypes.HostConfig{}
// test apparmor
config.SecurityOpt = []string{"apparmor=test_profile"}
if err := parseSecurityOpt(container, config); err != nil {
cfg.SecurityOpt = []string{"apparmor=test_profile"}
if err := parseSecurityOpt(ctr, cfg); err != nil {
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
}
if container.AppArmorProfile != "test_profile" {
t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile)
if ctr.AppArmorProfile != "test_profile" {
t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", ctr.AppArmorProfile)
}
// test seccomp
sp := "/path/to/seccomp_test.json"
config.SecurityOpt = []string{"seccomp=" + sp}
if err := parseSecurityOpt(container, config); err != nil {
cfg.SecurityOpt = []string{"seccomp=" + sp}
if err := parseSecurityOpt(ctr, cfg); err != nil {
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
}
if container.SeccompProfile != sp {
t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, container.SeccompProfile)
if ctr.SeccompProfile != sp {
t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, ctr.SeccompProfile)
}
// test valid label
config.SecurityOpt = []string{"label=user:USER"}
if err := parseSecurityOpt(container, config); err != nil {
cfg.SecurityOpt = []string{"label=user:USER"}
if err := parseSecurityOpt(ctr, cfg); err != nil {
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
}
// test invalid label
config.SecurityOpt = []string{"label"}
if err := parseSecurityOpt(container, config); err == nil {
cfg.SecurityOpt = []string{"label"}
if err := parseSecurityOpt(ctr, cfg); err == nil {
t.Fatal("Expected parseSecurityOpt error, got nil")
}
// test invalid opt
config.SecurityOpt = []string{"test"}
if err := parseSecurityOpt(container, config); err == nil {
cfg.SecurityOpt = []string{"test"}
if err := parseSecurityOpt(ctr, cfg); err == nil {
t.Fatal("Expected parseSecurityOpt error, got nil")
}
}
func TestParseSecurityOpt(t *testing.T) {
container := &container.Container{}
config := &containertypes.HostConfig{}
ctr := &container.Container{}
cfg := &containertypes.HostConfig{}
// test apparmor
config.SecurityOpt = []string{"apparmor=test_profile"}
if err := parseSecurityOpt(container, config); err != nil {
cfg.SecurityOpt = []string{"apparmor=test_profile"}
if err := parseSecurityOpt(ctr, cfg); err != nil {
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
}
if container.AppArmorProfile != "test_profile" {
t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile)
if ctr.AppArmorProfile != "test_profile" {
t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", ctr.AppArmorProfile)
}
// test seccomp
sp := "/path/to/seccomp_test.json"
config.SecurityOpt = []string{"seccomp=" + sp}
if err := parseSecurityOpt(container, config); err != nil {
cfg.SecurityOpt = []string{"seccomp=" + sp}
if err := parseSecurityOpt(ctr, cfg); err != nil {
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
}
if container.SeccompProfile != sp {
t.Fatalf("Unexpected SeccompProfile, expected: %q, got %q", sp, container.SeccompProfile)
if ctr.SeccompProfile != sp {
t.Fatalf("Unexpected SeccompProfile, expected: %q, got %q", sp, ctr.SeccompProfile)
}
// test valid label
config.SecurityOpt = []string{"label=user:USER"}
if err := parseSecurityOpt(container, config); err != nil {
cfg.SecurityOpt = []string{"label=user:USER"}
if err := parseSecurityOpt(ctr, cfg); err != nil {
t.Fatalf("Unexpected parseSecurityOpt error: %v", err)
}
// test invalid label
config.SecurityOpt = []string{"label"}
if err := parseSecurityOpt(container, config); err == nil {
cfg.SecurityOpt = []string{"label"}
if err := parseSecurityOpt(ctr, cfg); err == nil {
t.Fatal("Expected parseSecurityOpt error, got nil")
}
// test invalid opt
config.SecurityOpt = []string{"test"}
if err := parseSecurityOpt(container, config); err == nil {
cfg.SecurityOpt = []string{"test"}
if err := parseSecurityOpt(ctr, cfg); err == nil {
t.Fatal("Expected parseSecurityOpt error, got nil")
}
}
@ -225,28 +225,28 @@ func TestParseNNPSecurityOptions(t *testing.T) {
daemon := &Daemon{
configStore: &config.Config{NoNewPrivileges: true},
}
container := &container.Container{}
config := &containertypes.HostConfig{}
ctr := &container.Container{}
cfg := &containertypes.HostConfig{}
// test NNP when "daemon:true" and "no-new-privileges=false""
config.SecurityOpt = []string{"no-new-privileges=false"}
cfg.SecurityOpt = []string{"no-new-privileges=false"}
if err := daemon.parseSecurityOpt(container, config); err != nil {
if err := daemon.parseSecurityOpt(ctr, cfg); err != nil {
t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
}
if container.NoNewPrivileges {
t.Fatalf("container.NoNewPrivileges should be FALSE: %v", container.NoNewPrivileges)
if ctr.NoNewPrivileges {
t.Fatalf("container.NoNewPrivileges should be FALSE: %v", ctr.NoNewPrivileges)
}
// test NNP when "daemon:false" and "no-new-privileges=true""
daemon.configStore.NoNewPrivileges = false
config.SecurityOpt = []string{"no-new-privileges=true"}
cfg.SecurityOpt = []string{"no-new-privileges=true"}
if err := daemon.parseSecurityOpt(container, config); err != nil {
if err := daemon.parseSecurityOpt(ctr, cfg); err != nil {
t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
}
if !container.NoNewPrivileges {
t.Fatalf("container.NoNewPrivileges should be TRUE: %v", container.NoNewPrivileges)
if !ctr.NoNewPrivileges {
t.Fatalf("container.NoNewPrivileges should be TRUE: %v", ctr.NoNewPrivileges)
}
}

View file

@ -15,7 +15,7 @@ func TestLogContainerEventCopyLabels(t *testing.T) {
_, l, _ := e.Subscribe()
defer e.Evict(l)
container := &container.Container{
ctr := &container.Container{
ID: "container_id",
Name: "container_name",
Config: &containertypes.Config{
@ -29,10 +29,10 @@ func TestLogContainerEventCopyLabels(t *testing.T) {
daemon := &Daemon{
EventsService: e,
}
daemon.LogContainerEvent(container, "create")
daemon.LogContainerEvent(ctr, "create")
if _, mutated := container.Config.Labels["image"]; mutated {
t.Fatalf("Expected to not mutate the container labels, got %q", container.Config.Labels)
if _, mutated := ctr.Config.Labels["image"]; mutated {
t.Fatalf("Expected to not mutate the container labels, got %q", ctr.Config.Labels)
}
validateTestAttributes(t, l, map[string]string{
@ -46,7 +46,7 @@ func TestLogContainerEventWithAttributes(t *testing.T) {
_, l, _ := e.Subscribe()
defer e.Evict(l)
container := &container.Container{
ctr := &container.Container{
ID: "container_id",
Name: "container_name",
Config: &containertypes.Config{
@ -63,7 +63,7 @@ func TestLogContainerEventWithAttributes(t *testing.T) {
"node": "2",
"foo": "bar",
}
daemon.LogContainerEventWithAttributes(container, "create", attributes)
daemon.LogContainerEventWithAttributes(ctr, "create", attributes)
validateTestAttributes(t, l, map[string]string{
"node": "1",

View file

@ -54,18 +54,18 @@ func (daemon *Daemon) getExecConfig(name string) (*exec.Config, error) {
// saying the container isn't running, we should return a 404 so that
// the user sees the same error now that they will after the
// 5 minute clean-up loop is run which erases old/dead execs.
container := daemon.containers.Get(ec.ContainerID)
if container == nil {
ctr := daemon.containers.Get(ec.ContainerID)
if ctr == nil {
return nil, containerNotFound(name)
}
if !container.IsRunning() {
return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String())
if !ctr.IsRunning() {
return nil, fmt.Errorf("Container %s is not running: %s", ctr.ID, ctr.State.String())
}
if container.IsPaused() {
return nil, errExecPaused(container.ID)
if ctr.IsPaused() {
return nil, errExecPaused(ctr.ID)
}
if container.IsRestarting() {
return nil, errContainerIsRestarting(container.ID)
if ctr.IsRestarting() {
return nil, errContainerIsRestarting(ctr.ID)
}
return ec, nil
}
@ -76,21 +76,21 @@ func (daemon *Daemon) unregisterExecCommand(container *container.Container, exec
}
func (daemon *Daemon) getActiveContainer(name string) (*container.Container, error) {
container, err := daemon.GetContainer(name)
ctr, err := daemon.GetContainer(name)
if err != nil {
return nil, err
}
if !container.IsRunning() {
return nil, errNotRunning(container.ID)
if !ctr.IsRunning() {
return nil, errNotRunning(ctr.ID)
}
if container.IsPaused() {
if ctr.IsPaused() {
return nil, errExecPaused(name)
}
if container.IsRestarting() {
return nil, errContainerIsRestarting(container.ID)
if ctr.IsRestarting() {
return nil, errContainerIsRestarting(ctr.ID)
}
return container, nil
return ctr, nil
}
// ContainerExecCreate sets up an exec in a running container.
@ -220,11 +220,11 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, stdin
p := &specs.Process{}
if runtime.GOOS != "windows" {
container, err := daemon.containerdCli.LoadContainer(ctx, ec.ContainerID)
ctr, err := daemon.containerdCli.LoadContainer(ctx, ec.ContainerID)
if err != nil {
return err
}
spec, err := container.Spec(ctx)
spec, err := ctr.Spec(ctx)
if err != nil {
return err
}

View file

@ -14,26 +14,26 @@ import (
// ContainerExport writes the contents of the container to the given
// writer. An error is returned if the container cannot be found.
func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
container, err := daemon.GetContainer(name)
ctr, err := daemon.GetContainer(name)
if err != nil {
return err
}
if isWindows && container.OS == "windows" {
if isWindows && ctr.OS == "windows" {
return fmt.Errorf("the daemon on this operating system does not support exporting Windows containers")
}
if container.IsDead() {
err := fmt.Errorf("You cannot export container %s which is Dead", container.ID)
if ctr.IsDead() {
err := fmt.Errorf("You cannot export container %s which is Dead", ctr.ID)
return errdefs.Conflict(err)
}
if container.IsRemovalInProgress() {
err := fmt.Errorf("You cannot export container %s which is being removed", container.ID)
if ctr.IsRemovalInProgress() {
err := fmt.Errorf("You cannot export container %s which is being removed", ctr.ID)
return errdefs.Conflict(err)
}
data, err := daemon.containerExport(container)
data, err := daemon.containerExport(ctr)
if err != nil {
return fmt.Errorf("Error exporting container %s: %v", name, err)
}
@ -65,7 +65,7 @@ func (daemon *Daemon) containerExport(container *container.Container) (arch io.R
return nil, err
}
archive, err := archivePath(basefs, basefs.Path(), &archive.TarOptions{
archv, err := archivePath(basefs, basefs.Path(), &archive.TarOptions{
Compression: archive.Uncompressed,
UIDMaps: daemon.idMapping.UIDs(),
GIDMaps: daemon.idMapping.GIDs(),
@ -74,8 +74,8 @@ func (daemon *Daemon) containerExport(container *container.Container) (arch io.R
rwlayer.Unmount()
return nil, err
}
arch = ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
arch = ioutils.NewReadCloserWrapper(archv, func() error {
err := archv.Close()
rwlayer.Unmount()
daemon.imageService.ReleaseLayer(rwlayer, container.OS)
return err

View file

@ -32,50 +32,50 @@ func (daemon *Daemon) ContainerInspect(name string, size bool, version string) (
// ContainerInspectCurrent returns low-level information about a
// container in a most recent api version.
func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) {
container, err := daemon.GetContainer(name)
ctr, err := daemon.GetContainer(name)
if err != nil {
return nil, err
}
container.Lock()
ctr.Lock()
base, err := daemon.getInspectData(container)
base, err := daemon.getInspectData(ctr)
if err != nil {
container.Unlock()
ctr.Unlock()
return nil, err
}
apiNetworks := make(map[string]*networktypes.EndpointSettings)
for name, epConf := range container.NetworkSettings.Networks {
for name, epConf := range ctr.NetworkSettings.Networks {
if epConf.EndpointSettings != nil {
// We must make a copy of this pointer object otherwise it can race with other operations
apiNetworks[name] = epConf.EndpointSettings.Copy()
}
}
mountPoints := container.GetMountPoints()
mountPoints := ctr.GetMountPoints()
networkSettings := &types.NetworkSettings{
NetworkSettingsBase: types.NetworkSettingsBase{
Bridge: container.NetworkSettings.Bridge,
SandboxID: container.NetworkSettings.SandboxID,
HairpinMode: container.NetworkSettings.HairpinMode,
LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address,
LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen,
SandboxKey: container.NetworkSettings.SandboxKey,
SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses,
SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses,
Bridge: ctr.NetworkSettings.Bridge,
SandboxID: ctr.NetworkSettings.SandboxID,
HairpinMode: ctr.NetworkSettings.HairpinMode,
LinkLocalIPv6Address: ctr.NetworkSettings.LinkLocalIPv6Address,
LinkLocalIPv6PrefixLen: ctr.NetworkSettings.LinkLocalIPv6PrefixLen,
SandboxKey: ctr.NetworkSettings.SandboxKey,
SecondaryIPAddresses: ctr.NetworkSettings.SecondaryIPAddresses,
SecondaryIPv6Addresses: ctr.NetworkSettings.SecondaryIPv6Addresses,
},
DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks),
DefaultNetworkSettings: daemon.getDefaultNetworkSettings(ctr.NetworkSettings.Networks),
Networks: apiNetworks,
}
ports := make(nat.PortMap, len(container.NetworkSettings.Ports))
for k, pm := range container.NetworkSettings.Ports {
ports := make(nat.PortMap, len(ctr.NetworkSettings.Ports))
for k, pm := range ctr.NetworkSettings.Ports {
ports[k] = pm
}
networkSettings.NetworkSettingsBase.Ports = ports
container.Unlock()
ctr.Unlock()
if size {
sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(base.ID)
@ -86,7 +86,7 @@ func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.Co
return &types.ContainerJSON{
ContainerJSONBase: base,
Mounts: mountPoints,
Config: container.Config,
Config: ctr.Config,
NetworkSettings: networkSettings,
}, nil
}
@ -214,7 +214,7 @@ func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, err
return nil, errExecNotFound(id)
}
if container := daemon.containers.Get(e.ContainerID); container == nil {
if ctr := daemon.containers.Get(e.ContainerID); ctr == nil {
return nil, errExecNotFound(id)
}

View file

@ -20,38 +20,38 @@ func setPlatformSpecificContainerFields(container *container.Container, contJSON
// containerInspectPre120 gets containers for pre 1.20 APIs.
func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) {
container, err := daemon.GetContainer(name)
ctr, err := daemon.GetContainer(name)
if err != nil {
return nil, err
}
container.Lock()
defer container.Unlock()
ctr.Lock()
defer ctr.Unlock()
base, err := daemon.getInspectData(container)
base, err := daemon.getInspectData(ctr)
if err != nil {
return nil, err
}
volumes := make(map[string]string)
volumesRW := make(map[string]bool)
for _, m := range container.MountPoints {
for _, m := range ctr.MountPoints {
volumes[m.Destination] = m.Path()
volumesRW[m.Destination] = m.RW
}
config := &v1p19.ContainerConfig{
Config: container.Config,
MacAddress: container.Config.MacAddress,
NetworkDisabled: container.Config.NetworkDisabled,
ExposedPorts: container.Config.ExposedPorts,
VolumeDriver: container.HostConfig.VolumeDriver,
Memory: container.HostConfig.Memory,
MemorySwap: container.HostConfig.MemorySwap,
CPUShares: container.HostConfig.CPUShares,
CPUSet: container.HostConfig.CpusetCpus,
Config: ctr.Config,
MacAddress: ctr.Config.MacAddress,
NetworkDisabled: ctr.Config.NetworkDisabled,
ExposedPorts: ctr.Config.ExposedPorts,
VolumeDriver: ctr.HostConfig.VolumeDriver,
Memory: ctr.HostConfig.Memory,
MemorySwap: ctr.HostConfig.MemorySwap,
CPUShares: ctr.HostConfig.CPUShares,
CPUSet: ctr.HostConfig.CpusetCpus,
}
networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings)
networkSettings := daemon.getBackwardsCompatibleNetworkSettings(ctr.NetworkSettings)
return &v1p19.ContainerJSON{
ContainerJSONBase: base,

View file

@ -69,8 +69,8 @@ func setupContainerWithName(t *testing.T, name string, daemon *Daemon) *containe
}
func containerListContainsName(containers []*types.Container, name string) bool {
for _, container := range containers {
for _, containerName := range container.Names {
for _, ctr := range containers {
for _, containerName := range ctr.Names {
if containerName == name {
return true
}

View file

@ -33,20 +33,20 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c
if !(config.ShowStdout || config.ShowStderr) {
return nil, false, errdefs.InvalidParameter(errors.New("You must choose at least one stream"))
}
container, err := daemon.GetContainer(containerName)
ctr, err := daemon.GetContainer(containerName)
if err != nil {
return nil, false, err
}
if container.RemovalInProgress || container.Dead {
if ctr.RemovalInProgress || ctr.Dead {
return nil, false, errdefs.Conflict(errors.New("can not get logs from container which is dead or marked for removal"))
}
if container.HostConfig.LogConfig.Type == "none" {
if ctr.HostConfig.LogConfig.Type == "none" {
return nil, false, logger.ErrReadLogsNotSupported{}
}
cLog, cLogCreated, err := daemon.getLogger(container)
cLog, cLogCreated, err := daemon.getLogger(ctr)
if err != nil {
return nil, false, err
}
@ -157,7 +157,7 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c
}
}
}()
return messageChan, container.Config.Tty, nil
return messageChan, ctr.Config.Tty, nil
}
func (daemon *Daemon) getLogger(container *container.Container) (l logger.Logger, created bool, err error) {

View file

@ -430,12 +430,12 @@ func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnet
// UpdateContainerServiceConfig updates a service configuration.
func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error {
container, err := daemon.GetContainer(containerName)
ctr, err := daemon.GetContainer(containerName)
if err != nil {
return err
}
container.NetworkSettings.Service = serviceConfig
ctr.NetworkSettings.Service = serviceConfig
return nil
}
@ -443,24 +443,24 @@ func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, service
// network. If either cannot be found, an err is returned. If the
// network cannot be set up, an err is returned.
func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error {
container, err := daemon.GetContainer(containerName)
ctr, err := daemon.GetContainer(containerName)
if err != nil {
return err
}
return daemon.ConnectToNetwork(container, networkName, endpointConfig)
return daemon.ConnectToNetwork(ctr, networkName, endpointConfig)
}
// DisconnectContainerFromNetwork disconnects the given container from
// the given network. If either cannot be found, an err is returned.
func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error {
container, err := daemon.GetContainer(containerName)
ctr, err := daemon.GetContainer(containerName)
if err != nil {
if force {
return daemon.ForceEndpointDelete(containerName, networkName)
}
return err
}
return daemon.DisconnectFromNetwork(container, networkName, force)
return daemon.DisconnectFromNetwork(ctr, networkName, force)
}
// GetNetworkDriverList returns the list of plugins drivers
@ -485,10 +485,10 @@ func (daemon *Daemon) GetNetworkDriverList() []string {
networks := daemon.netController.Networks()
for _, network := range networks {
if !pluginMap[network.Type()] {
pluginList = append(pluginList, network.Type())
pluginMap[network.Type()] = true
for _, nw := range networks {
if !pluginMap[nw.Type()] {
pluginList = append(pluginList, nw.Type())
pluginMap[nw.Type()] = true
}
}

View file

@ -10,11 +10,11 @@ import (
// ContainerPause pauses a container
func (daemon *Daemon) ContainerPause(name string) error {
container, err := daemon.GetContainer(name)
ctr, err := daemon.GetContainer(name)
if err != nil {
return err
}
return daemon.containerPause(container)
return daemon.containerPause(ctr)
}
// containerPause pauses the container execution without stopping the process.

View file

@ -15,15 +15,15 @@ import (
// stop. Returns an error if the container cannot be found, or if
// there is an underlying error at any stage of the restart.
func (daemon *Daemon) ContainerRestart(name string, seconds *int) error {
container, err := daemon.GetContainer(name)
ctr, err := daemon.GetContainer(name)
if err != nil {
return err
}
if seconds == nil {
stopTimeout := container.StopTimeout()
stopTimeout := ctr.StopTimeout()
seconds = &stopTimeout
}
if err := daemon.containerRestart(container, *seconds); err != nil {
if err := daemon.containerRestart(ctr, *seconds); err != nil {
return fmt.Errorf("Cannot restart container %s: %v", name, err)
}
return nil

View file

@ -23,24 +23,24 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos
return errdefs.InvalidParameter(errors.New("checkpoint is only supported in experimental mode"))
}
container, err := daemon.GetContainer(name)
ctr, err := daemon.GetContainer(name)
if err != nil {
return err
}
validateState := func() error {
container.Lock()
defer container.Unlock()
ctr.Lock()
defer ctr.Unlock()
if container.Paused {
if ctr.Paused {
return errdefs.Conflict(errors.New("cannot start a paused container, try unpause instead"))
}
if container.Running {
if ctr.Running {
return containerNotModifiedError{running: true}
}
if container.RemovalInProgress || container.Dead {
if ctr.RemovalInProgress || ctr.Dead {
return errdefs.Conflict(errors.New("container is marked for removal and cannot be started"))
}
return nil
@ -56,26 +56,26 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos
// creating a container, not during start.
if hostConfig != nil {
logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12")
oldNetworkMode := container.HostConfig.NetworkMode
if err := daemon.setSecurityOptions(container, hostConfig); err != nil {
oldNetworkMode := ctr.HostConfig.NetworkMode
if err := daemon.setSecurityOptions(ctr, hostConfig); err != nil {
return errdefs.InvalidParameter(err)
}
if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil {
return errdefs.InvalidParameter(err)
}
if err := daemon.setHostConfig(container, hostConfig); err != nil {
if err := daemon.setHostConfig(ctr, hostConfig); err != nil {
return errdefs.InvalidParameter(err)
}
newNetworkMode := container.HostConfig.NetworkMode
newNetworkMode := ctr.HostConfig.NetworkMode
if string(oldNetworkMode) != string(newNetworkMode) {
// if user has change the network mode on starting, clean up the
// old networks. It is a deprecated feature and has been removed in Docker 1.12
container.NetworkSettings.Networks = nil
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
ctr.NetworkSettings.Networks = nil
if err := ctr.CheckpointTo(daemon.containersReplica); err != nil {
return errdefs.System(err)
}
}
container.InitDNSHostConfig()
ctr.InitDNSHostConfig()
}
} else {
if hostConfig != nil {
@ -85,17 +85,17 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos
// check if hostConfig is in line with the current system settings.
// It may happen cgroups are umounted or the like.
if _, err = daemon.verifyContainerSettings(container.OS, container.HostConfig, nil, false); err != nil {
if _, err = daemon.verifyContainerSettings(ctr.OS, ctr.HostConfig, nil, false); err != nil {
return errdefs.InvalidParameter(err)
}
// Adapt for old containers in case we have updates in this function and
// old containers never have chance to call the new function in create stage.
if hostConfig != nil {
if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil {
if err := daemon.adaptContainerSettings(ctr.HostConfig, false); err != nil {
return errdefs.InvalidParameter(err)
}
}
return daemon.containerStart(container, checkpoint, checkpointDir, true)
return daemon.containerStart(ctr, checkpoint, checkpointDir, true)
}
// containerStart prepares the container to run by setting up everything the

View file

@ -25,16 +25,17 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c
return errors.New("API versions pre v1.21 do not support stats on Windows")
}
container, err := daemon.GetContainer(prefixOrName)
ctr, err := daemon.GetContainer(prefixOrName)
if err != nil {
return err
}
// If the container is either not running or restarting and requires no stream, return an empty stats.
if (!container.IsRunning() || container.IsRestarting()) && !config.Stream {
if (!ctr.IsRunning() || ctr.IsRestarting()) && !config.Stream {
return json.NewEncoder(config.OutStream).Encode(&types.StatsJSON{
Name: container.Name,
ID: container.ID})
Name: ctr.Name,
ID: ctr.ID,
})
}
outStream := config.OutStream
@ -49,8 +50,8 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c
var preRead time.Time
getStatJSON := func(v interface{}) *types.StatsJSON {
ss := v.(types.StatsJSON)
ss.Name = container.Name
ss.ID = container.ID
ss.Name = ctr.Name
ss.ID = ctr.ID
ss.PreCPUStats = preCPUStats
ss.PreRead = preRead
preCPUStats = ss.CPUStats
@ -60,8 +61,8 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c
enc := json.NewEncoder(outStream)
updates := daemon.subscribeToContainerStats(container)
defer daemon.unsubscribeToContainerStats(container, updates)
updates := daemon.subscribeToContainerStats(ctr)
defer daemon.unsubscribeToContainerStats(ctr, updates)
noStreamFirstFrame := true
for {

View file

@ -144,20 +144,20 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*container.Conta
return nil, err
}
container, err := daemon.GetContainer(name)
ctr, err := daemon.GetContainer(name)
if err != nil {
return nil, err
}
if !container.IsRunning() {
return nil, errNotRunning(container.ID)
if !ctr.IsRunning() {
return nil, errNotRunning(ctr.ID)
}
if container.IsRestarting() {
return nil, errContainerIsRestarting(container.ID)
if ctr.IsRestarting() {
return nil, errContainerIsRestarting(ctr.ID)
}
procs, err := daemon.containerd.ListPids(context.Background(), container.ID)
procs, err := daemon.containerd.ListPids(context.Background(), ctr.ID)
if err != nil {
return nil, err
}
@ -184,6 +184,6 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*container.Conta
if err != nil {
return nil, err
}
daemon.LogContainerEvent(container, "top")
daemon.LogContainerEvent(ctr, "top")
return procList, nil
}

View file

@ -10,33 +10,33 @@ import (
// ContainerUnpause unpauses a container
func (daemon *Daemon) ContainerUnpause(name string) error {
container, err := daemon.GetContainer(name)
ctr, err := daemon.GetContainer(name)
if err != nil {
return err
}
return daemon.containerUnpause(container)
return daemon.containerUnpause(ctr)
}
// containerUnpause resumes the container execution after the container is paused.
func (daemon *Daemon) containerUnpause(container *container.Container) error {
container.Lock()
defer container.Unlock()
func (daemon *Daemon) containerUnpause(ctr *container.Container) error {
ctr.Lock()
defer ctr.Unlock()
// We cannot unpause the container which is not paused
if !container.Paused {
return fmt.Errorf("Container %s is not paused", container.ID)
if !ctr.Paused {
return fmt.Errorf("Container %s is not paused", ctr.ID)
}
if err := daemon.containerd.Resume(context.Background(), container.ID); err != nil {
return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err)
if err := daemon.containerd.Resume(context.Background(), ctr.ID); err != nil {
return fmt.Errorf("Cannot unpause container %s: %s", ctr.ID, err)
}
container.Paused = false
daemon.setStateCounter(container)
daemon.updateHealthMonitor(container)
daemon.LogContainerEvent(container, "unpause")
ctr.Paused = false
daemon.setStateCounter(ctr)
daemon.updateHealthMonitor(ctr)
daemon.LogContainerEvent(ctr, "unpause")
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
if err := ctr.CheckpointTo(daemon.containersReplica); err != nil {
logrus.WithError(err).Warn("could not save container to disk")
}

View file

@ -35,57 +35,57 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro
return nil
}
container, err := daemon.GetContainer(name)
ctr, err := daemon.GetContainer(name)
if err != nil {
return err
}
restoreConfig := false
backupHostConfig := *container.HostConfig
backupHostConfig := *ctr.HostConfig
defer func() {
if restoreConfig {
container.Lock()
container.HostConfig = &backupHostConfig
container.CheckpointTo(daemon.containersReplica)
container.Unlock()
ctr.Lock()
ctr.HostConfig = &backupHostConfig
ctr.CheckpointTo(daemon.containersReplica)
ctr.Unlock()
}
}()
if container.RemovalInProgress || container.Dead {
return errCannotUpdate(container.ID, fmt.Errorf("container is marked for removal and cannot be \"update\""))
if ctr.RemovalInProgress || ctr.Dead {
return errCannotUpdate(ctr.ID, fmt.Errorf("container is marked for removal and cannot be \"update\""))
}
container.Lock()
if err := container.UpdateContainer(hostConfig); err != nil {
ctr.Lock()
if err := ctr.UpdateContainer(hostConfig); err != nil {
restoreConfig = true
container.Unlock()
return errCannotUpdate(container.ID, err)
ctr.Unlock()
return errCannotUpdate(ctr.ID, err)
}
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
if err := ctr.CheckpointTo(daemon.containersReplica); err != nil {
restoreConfig = true
container.Unlock()
return errCannotUpdate(container.ID, err)
ctr.Unlock()
return errCannotUpdate(ctr.ID, err)
}
container.Unlock()
ctr.Unlock()
// if Restart Policy changed, we need to update container monitor
if hostConfig.RestartPolicy.Name != "" {
container.UpdateMonitor(hostConfig.RestartPolicy)
ctr.UpdateMonitor(hostConfig.RestartPolicy)
}
// If container is not running, update hostConfig struct is enough,
// resources will be updated when the container is started again.
// If container is running (including paused), we need to update configs
// to the real world.
if container.IsRunning() && !container.IsRestarting() {
if err := daemon.containerd.UpdateResources(context.Background(), container.ID, toContainerdResources(hostConfig.Resources)); err != nil {
if ctr.IsRunning() && !ctr.IsRestarting() {
if err := daemon.containerd.UpdateResources(context.Background(), ctr.ID, toContainerdResources(hostConfig.Resources)); err != nil {
restoreConfig = true
// TODO: it would be nice if containerd responded with better errors here so we can classify this better.
return errCannotUpdate(container.ID, errdefs.System(err))
return errCannotUpdate(ctr.ID, errdefs.System(err))
}
}
daemon.LogContainerEvent(container, "update")
daemon.LogContainerEvent(ctr, "update")
return nil
}

View file

@ -34,12 +34,12 @@ func TestBindDaemonRoot(t *testing.T) {
"source is /": "/",
} {
t.Run(desc, func(t *testing.T) {
mount := mount.Mount{
mnt := mount.Mount{
Type: mount.TypeBind,
Source: source,
BindOptions: test.opts,
}
needsProp, err := d.validateBindDaemonRoot(mount)
needsProp, err := d.validateBindDaemonRoot(mnt)
if (err != nil) != test.err {
t.Fatalf("expected err=%v, got: %v", test.err, err)
}

View file

@ -82,12 +82,12 @@ func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, er
// metadata, the ownership must be set properly for potential container
// remapped root (user namespaces)
rootIDs := daemon.idMapping.RootPair()
for _, mount := range netMounts {
for _, mnt := range netMounts {
// we should only modify ownership of network files within our own container
// metadata repository. If the user specifies a mount path external, it is
// up to the user to make sure the file has proper ownership for userns
if strings.Index(mount.Source, daemon.repository) == 0 {
if err := os.Chown(mount.Source, rootIDs.UID, rootIDs.GID); err != nil {
if strings.Index(mnt.Source, daemon.repository) == 0 {
if err := os.Chown(mnt.Source, rootIDs.UID, rootIDs.GID); err != nil {
return nil, err
}
}