volumes/subpath: Plumb context

Signed-off-by: Paweł Gronowski <pawel.gronowski@docker.com>
This commit is contained in:
Paweł Gronowski 2023-07-27 14:56:28 +02:00
parent cb1af229f2
commit 5bbcc41c20
No known key found for this signature in database
GPG key ID: B85EFCFE26DEF92A
19 changed files with 103 additions and 90 deletions

View file

@ -514,14 +514,14 @@ func (container *Container) AddMountPointWithVolume(destination string, vol volu
}
// UnmountVolumes unmounts all volumes
func (container *Container) UnmountVolumes(volumeEventLog func(name string, action events.Action, attributes map[string]string)) error {
func (container *Container) UnmountVolumes(ctx context.Context, volumeEventLog func(name string, action events.Action, attributes map[string]string)) error {
var errs []string
for _, volumeMount := range container.MountPoints {
if volumeMount.Volume == nil {
continue
}
if err := volumeMount.Cleanup(); err != nil {
if err := volumeMount.Cleanup(ctx); err != nil {
errs = append(errs, err.Error())
continue
}

View file

@ -371,7 +371,7 @@ func (container *Container) DetachAndUnmount(volumeEventLog func(name string, ac
Warn("Unable to unmount")
}
}
return container.UnmountVolumes(volumeEventLog)
return container.UnmountVolumes(ctx, volumeEventLog)
}
// ignoreUnsupportedXAttrs ignores errors when extended attributes

View file

@ -1,6 +1,7 @@
package container // import "github.com/docker/docker/container"
import (
"context"
"fmt"
"os"
"path/filepath"
@ -128,7 +129,7 @@ func (container *Container) ConfigMounts() []Mount {
// On Windows it only delegates to `UnmountVolumes` since there is nothing to
// force unmount.
func (container *Container) DetachAndUnmount(volumeEventLog func(name string, action events.Action, attributes map[string]string)) error {
return container.UnmountVolumes(volumeEventLog)
return container.UnmountVolumes(context.TODO(), volumeEventLog)
}
// TmpfsMounts returns the list of tmpfs mounts

View file

@ -17,6 +17,7 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/container"
"github.com/docker/docker/internal/compatcontext"
"github.com/docker/docker/internal/mounttree"
"github.com/docker/docker/internal/unshare"
"github.com/docker/docker/pkg/fileutils"
@ -54,6 +55,8 @@ type containerFSView struct {
// openContainerFS opens a new view of the container's filesystem.
func (daemon *Daemon) openContainerFS(container *container.Container) (_ *containerFSView, err error) {
ctx := context.TODO()
if err := daemon.Mount(container); err != nil {
return nil, err
}
@ -63,14 +66,15 @@ func (daemon *Daemon) openContainerFS(container *container.Container) (_ *contai
}
}()
mounts, cleanup, err := daemon.setupMounts(container)
mounts, cleanup, err := daemon.setupMounts(ctx, container)
if err != nil {
return nil, err
}
defer func() {
cleanup()
ctx := compatcontext.WithoutCancel(ctx)
cleanup(ctx)
if err != nil {
_ = container.UnmountVolumes(daemon.LogVolumeEvent)
_ = container.UnmountVolumes(ctx, daemon.LogVolumeEvent)
}
}()
@ -208,7 +212,7 @@ func (vw *containerFSView) Close() error {
runtime.SetFinalizer(vw, nil)
close(vw.todo)
err := multierror.Append(nil, <-vw.done)
err = multierror.Append(err, vw.ctr.UnmountVolumes(vw.d.LogVolumeEvent))
err = multierror.Append(err, vw.ctr.UnmountVolumes(context.TODO(), vw.d.LogVolumeEvent))
err = multierror.Append(err, vw.d.Unmount(vw.ctr))
return err.ErrorOrNil()
}

View file

@ -222,7 +222,7 @@ func (daemon *Daemon) create(ctx context.Context, daemonCfg *config.Config, opts
return nil, err
}
if err := daemon.createContainerOSSpecificSettings(ctr, opts.params.Config, opts.params.HostConfig); err != nil {
if err := daemon.createContainerOSSpecificSettings(ctx, ctr, opts.params.Config, opts.params.HostConfig); err != nil {
return nil, err
}

View file

@ -13,6 +13,7 @@ import (
mounttypes "github.com/docker/docker/api/types/mount"
"github.com/docker/docker/container"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/internal/compatcontext"
"github.com/docker/docker/oci"
volumemounts "github.com/docker/docker/volume/mounts"
volumeopts "github.com/docker/docker/volume/service/opts"
@ -21,7 +22,7 @@ import (
)
// createContainerOSSpecificSettings performs host-OS specific container create functionality
func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error {
func (daemon *Daemon) createContainerOSSpecificSettings(ctx context.Context, container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error {
if err := daemon.Mount(container); err != nil {
return err
}
@ -48,7 +49,7 @@ func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Con
// Skip volumes for which we already have something mounted on that
// destination because of a --volume-from.
if container.HasMountFor(destination) {
log.G(context.TODO()).WithField("container", container.ID).WithField("destination", spec).Debug("mountpoint already exists, skipping anonymous volume")
log.G(ctx).WithField("container", container.ID).WithField("destination", spec).Debug("mountpoint already exists, skipping anonymous volume")
// Not an error, this could easily have come from the image config.
continue
}
@ -73,12 +74,12 @@ func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Con
container.AddMountPointWithVolume(destination, &volumeWrapper{v: v, s: daemon.volumes}, true)
}
return daemon.populateVolumes(container)
return daemon.populateVolumes(ctx, container)
}
// populateVolumes copies data from the container's rootfs into the volume for non-binds.
// this is only called when the container is created.
func (daemon *Daemon) populateVolumes(c *container.Container) error {
func (daemon *Daemon) populateVolumes(ctx context.Context, c *container.Container) error {
for _, mnt := range c.MountPoints {
if mnt.Volume == nil {
continue
@ -88,14 +89,14 @@ func (daemon *Daemon) populateVolumes(c *container.Container) error {
continue
}
if err := daemon.populateVolume(c, mnt); err != nil {
if err := daemon.populateVolume(ctx, c, mnt); err != nil {
return err
}
}
return nil
}
func (daemon *Daemon) populateVolume(c *container.Container, mnt *volumemounts.MountPoint) error {
func (daemon *Daemon) populateVolume(ctx context.Context, c *container.Container, mnt *volumemounts.MountPoint) error {
ctrDestPath, err := c.GetResourcePath(mnt.Destination)
if err != nil {
return err
@ -108,18 +109,18 @@ func (daemon *Daemon) populateVolume(c *container.Container, mnt *volumemounts.M
return err
}
volumePath, cleanup, err := mnt.Setup(c.MountLabel, daemon.idMapping.RootPair(), nil)
volumePath, cleanup, err := mnt.Setup(ctx, c.MountLabel, daemon.idMapping.RootPair(), nil)
if err != nil {
if errdefs.IsNotFound(err) {
return nil
}
log.G(context.TODO()).WithError(err).Debugf("can't copy data from %s:%s, to %s", c.ID, mnt.Destination, volumePath)
log.G(ctx).WithError(err).Debugf("can't copy data from %s:%s, to %s", c.ID, mnt.Destination, volumePath)
return errors.Wrapf(err, "failed to populate volume")
}
defer mnt.Cleanup()
defer cleanup()
defer mnt.Cleanup(compatcontext.WithoutCancel(ctx))
defer cleanup(compatcontext.WithoutCancel(ctx))
log.G(context.TODO()).Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, volumePath)
log.G(ctx).Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, volumePath)
if err := c.CopyImagePathContent(volumePath, ctrDestPath); err != nil {
return err
}

View file

@ -11,7 +11,7 @@ import (
)
// createContainerOSSpecificSettings performs host-OS specific container create functionality
func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error {
func (daemon *Daemon) createContainerOSSpecificSettings(ctx context.Context, container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error {
if containertypes.Isolation.IsDefault(hostConfig.Isolation) {
// Make sure the host config has the default daemon isolation if not specified by caller.
hostConfig.Isolation = daemon.defaultIsolation
@ -34,7 +34,7 @@ func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Con
// Create the volume in the volume driver. If it doesn't exist,
// a new one will be created.
v, err := daemon.volumes.Create(context.TODO(), "", volumeDriver, volumeopts.WithCreateReference(container.ID))
v, err := daemon.volumes.Create(ctx, "", volumeDriver, volumeopts.WithCreateReference(container.ID))
if err != nil {
return err
}

View file

@ -466,7 +466,7 @@ func (daemon *Daemon) restore(cfg *configStore) error {
ces.ExitCode = 255
}
c.SetStopped(&ces)
daemon.Cleanup(c)
daemon.Cleanup(context.TODO(), c)
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
baseLogger.WithError(err).Error("failed to update stopped container state")
}

View file

@ -89,7 +89,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
"exitCode": strconv.Itoa(exitStatus.ExitCode),
"execDuration": strconv.Itoa(int(execDuration.Seconds())),
}
daemon.Cleanup(c)
daemon.Cleanup(context.TODO(), c)
if restart {
c.RestartCount++

View file

@ -139,7 +139,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore
}
container.Reset(false)
daemon.Cleanup(container)
daemon.Cleanup(compatcontext.WithoutCancel(ctx), container)
// if containers AutoRemove flag is set, remove it after clean up
if container.HostConfig.AutoRemove {
container.Unlock()
@ -164,12 +164,12 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore
return err
}
m, cleanup, err := daemon.setupMounts(container)
m, cleanup, err := daemon.setupMounts(ctx, container)
if err != nil {
return err
}
mnts = append(mnts, m...)
defer cleanup()
defer cleanup(compatcontext.WithoutCancel(ctx))
spec, err := daemon.createSpec(ctx, daemonCfg, container, mnts)
if err != nil {
@ -260,19 +260,19 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore
// Cleanup releases any network resources allocated to the container along with any rules
// around how containers are linked together. It also unmounts the container's root filesystem.
func (daemon *Daemon) Cleanup(container *container.Container) {
func (daemon *Daemon) Cleanup(ctx context.Context, container *container.Container) {
// Microsoft HCS containers get in a bad state if host resources are
// released while the container still exists.
if ctr, ok := container.C8dContainer(); ok {
if err := ctr.Delete(context.Background()); err != nil {
log.G(context.TODO()).Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err)
log.G(ctx).Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err)
}
}
daemon.releaseNetwork(container)
if err := container.UnmountIpcMount(); err != nil {
log.G(context.TODO()).Warnf("%s cleanup: failed to unmount IPC: %s", container.ID, err)
log.G(ctx).Warnf("%s cleanup: failed to unmount IPC: %s", container.ID, err)
}
if err := daemon.conditionalUnmountOnCleanup(container); err != nil {
@ -284,11 +284,11 @@ func (daemon *Daemon) Cleanup(container *container.Container) {
}
if err := container.UnmountSecrets(); err != nil {
log.G(context.TODO()).Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err)
log.G(ctx).Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err)
}
if err := recursiveUnmount(container.Root); err != nil {
log.G(context.TODO()).WithError(err).WithField("container", container.ID).Warn("Error while cleaning up container resource mounts.")
log.G(ctx).WithError(err).WithField("container", container.ID).Warn("Error while cleaning up container resource mounts.")
}
for _, eConfig := range container.ExecCommands.Commands() {
@ -296,8 +296,8 @@ func (daemon *Daemon) Cleanup(container *container.Container) {
}
if container.BaseFS != "" {
if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil {
log.G(context.TODO()).Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
if err := container.UnmountVolumes(ctx, daemon.LogVolumeEvent); err != nil {
log.G(ctx).Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
}
}

View file

@ -15,6 +15,7 @@ import (
mounttypes "github.com/docker/docker/api/types/mount"
"github.com/docker/docker/container"
"github.com/docker/docker/internal/cleanups"
"github.com/docker/docker/internal/compatcontext"
volumemounts "github.com/docker/docker/volume/mounts"
"github.com/pkg/errors"
)
@ -25,7 +26,7 @@ import (
//
// The cleanup function should be called as soon as the container has been
// started.
func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, func() error, error) {
func (daemon *Daemon) setupMounts(ctx context.Context, c *container.Container) ([]container.Mount, func(context.Context) error, error) {
var mounts []container.Mount
// TODO: tmpfs mounts should be part of Mountpoints
tmpfsMounts := make(map[string]bool)
@ -39,8 +40,8 @@ func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, fu
cleanups := cleanups.Composite{}
defer func() {
if err := cleanups.Call(); err != nil {
log.G(context.TODO()).WithError(err).Warn("failed to cleanup temporary mounts created by MountPoint.Setup")
if err := cleanups.Call(compatcontext.WithoutCancel(ctx)); err != nil {
log.G(ctx).WithError(err).Warn("failed to cleanup temporary mounts created by MountPoint.Setup")
}
}()
@ -62,7 +63,7 @@ func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, fu
return nil
}
path, clean, err := m.Setup(c.MountLabel, daemon.idMapping.RootPair(), checkfunc)
path, clean, err := m.Setup(ctx, c.MountLabel, daemon.idMapping.RootPair(), checkfunc)
if err != nil {
return nil, nil, err
}

View file

@ -8,6 +8,7 @@ import (
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/container"
"github.com/docker/docker/internal/cleanups"
"github.com/docker/docker/internal/compatcontext"
"github.com/docker/docker/pkg/idtools"
volumemounts "github.com/docker/docker/volume/mounts"
)
@ -23,11 +24,11 @@ import (
// BUGBUG TODO Windows containerd. This would be much better if it returned
// an array of runtime spec mounts, not container mounts. Then no need to
// do multiple transitions.
func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, func() error, error) {
func (daemon *Daemon) setupMounts(ctx context.Context, c *container.Container) ([]container.Mount, func(context.Context) error, error) {
cleanups := cleanups.Composite{}
defer func() {
if err := cleanups.Call(); err != nil {
log.G(context.TODO()).WithError(err).Warn("failed to cleanup temporary mounts created by MountPoint.Setup")
if err := cleanups.Call(compatcontext.WithoutCancel(ctx)); err != nil {
log.G(ctx).WithError(err).Warn("failed to cleanup temporary mounts created by MountPoint.Setup")
}
}()
@ -36,7 +37,7 @@ func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, fu
if err := daemon.lazyInitializeVolume(c.ID, mount); err != nil {
return nil, nil, err
}
s, c, err := mount.Setup(c.MountLabel, idtools.Identity{}, nil)
s, c, err := mount.Setup(ctx, c.MountLabel, idtools.Identity{}, nil)
if err != nil {
return nil, nil, err
}

View file

@ -1,22 +1,24 @@
package cleanups
import (
"context"
"github.com/docker/docker/internal/multierror"
)
type Composite struct {
cleanups []func() error
cleanups []func(context.Context) error
}
// Add adds a cleanup to be called.
func (c *Composite) Add(f func() error) {
func (c *Composite) Add(f func(context.Context) error) {
c.cleanups = append(c.cleanups, f)
}
// Call calls all cleanups in reverse order and returns an error combining all
// non-nil errors.
func (c *Composite) Call() error {
err := call(c.cleanups)
func (c *Composite) Call(ctx context.Context) error {
err := call(ctx, c.cleanups)
c.cleanups = nil
return err
}
@ -24,19 +26,19 @@ func (c *Composite) Call() error {
// Release removes all cleanups, turning Call into a no-op.
// Caller still can call the cleanups by calling the returned function
// which is equivalent to calling the Call before Release was called.
func (c *Composite) Release() func() error {
func (c *Composite) Release() func(context.Context) error {
cleanups := c.cleanups
c.cleanups = nil
return func() error {
return call(cleanups)
return func(ctx context.Context) error {
return call(ctx, cleanups)
}
}
func call(cleanups []func() error) error {
func call(ctx context.Context, cleanups []func(context.Context) error) error {
var errs []error
for idx := len(cleanups) - 1; idx >= 0; idx-- {
c := cleanups[idx]
errs = append(errs, c())
errs = append(errs, c(ctx))
}
return multierror.Join(errs...)
}

View file

@ -1,6 +1,7 @@
package cleanups
import (
"context"
"errors"
"fmt"
"testing"
@ -18,20 +19,20 @@ func TestCall(t *testing.T) {
var errZ = errors.New("errorZ")
var errYZ = errors.Join(errY, errZ)
c.Add(func() error {
c.Add(func(ctx context.Context) error {
return err1
})
c.Add(func() error {
c.Add(func(ctx context.Context) error {
return nil
})
c.Add(func() error {
c.Add(func(ctx context.Context) error {
return fmt.Errorf("something happened: %w", err2)
})
c.Add(func() error {
c.Add(func(ctx context.Context) error {
return errors.Join(errX, fmt.Errorf("joined: %w", errYZ))
})
err := c.Call()
err := c.Call(context.Background())
errs := err.(interface{ Unwrap() []error }).Unwrap()

View file

@ -20,7 +20,7 @@ import (
// After use, it is the caller's responsibility to call Close on the returned
// SafePath object, which will unmount the temporary file/directory
// and remove it.
func Join(path, subpath string) (*SafePath, error) {
func Join(_ context.Context, path, subpath string) (*SafePath, error) {
base, subpart, err := evaluatePath(path, subpath)
if err != nil {
return nil, err
@ -126,20 +126,20 @@ func tempMountPoint(sourceFd int) (string, error) {
// cleanupSafePaths returns a function that unmounts the path and removes the
// mountpoint.
func cleanupSafePath(path string) func() error {
return func() error {
log.G(context.TODO()).WithField("path", path).Debug("removing safe temp mount")
func cleanupSafePath(path string) func(context.Context) error {
return func(ctx context.Context) error {
log.G(ctx).WithField("path", path).Debug("removing safe temp mount")
if err := unix_noeintr.Unmount(path, unix.MNT_DETACH); err != nil {
if errors.Is(err, unix.EINVAL) {
log.G(context.TODO()).WithField("path", path).Warn("safe temp mount no longer exists?")
log.G(ctx).WithField("path", path).Warn("safe temp mount no longer exists?")
return nil
}
return errors.Wrapf(err, "error unmounting safe mount %s", path)
}
if err := os.Remove(path); err != nil {
if errors.Is(err, os.ErrNotExist) {
log.G(context.TODO()).WithField("path", path).Warn("safe temp mount no longer exists?")
log.G(ctx).WithField("path", path).Warn("safe temp mount no longer exists?")
return nil
}
return errors.Wrapf(err, "failed to delete temporary safe mount")

View file

@ -1,6 +1,7 @@
package safepath
import (
"context"
"os"
"path/filepath"
"runtime"
@ -40,9 +41,9 @@ func TestJoinEscapingSymlink(t *testing.T) {
err = os.Symlink(tc.target, filepath.Join(dir, "link"))
assert.NilError(t, err, "failed to create symlink to %s", tc.target)
safe, err := Join(dir, "link")
safe, err := Join(context.Background(), dir, "link")
if err == nil {
safe.Close()
safe.Close(context.Background())
}
assert.ErrorType(t, err, &ErrEscapesBase{})
})
@ -70,10 +71,10 @@ func TestJoinGoodSymlink(t *testing.T) {
"subdir_link_relative", "foo_link_relative",
} {
t.Run(target, func(t *testing.T) {
safe, err := Join(dir, target)
safe, err := Join(context.Background(), dir, target)
assert.NilError(t, err)
defer safe.Close()
defer safe.Close(context.Background())
if strings.HasPrefix(target, "subdir") {
data, err := os.ReadFile(filepath.Join(safe.Path(), "hello.txt"))
assert.NilError(t, err)
@ -97,10 +98,10 @@ func TestJoinWithSymlinkReplace(t *testing.T) {
err = os.Symlink(target, link)
assert.Check(t, err, "failed to create symlink to foo")
safe, err := Join(dir, "link")
safe, err := Join(context.Background(), dir, "link")
assert.NilError(t, err)
defer safe.Close()
defer safe.Close(context.Background())
// Delete the link target.
err = os.Remove(target)
@ -133,12 +134,12 @@ func TestJoinCloseInvalidates(t *testing.T) {
err = os.WriteFile(foo, []byte("bar"), 0o744)
assert.NilError(t, err, "failed to create test file")
safe, err := Join(dir, "foo")
safe, err := Join(context.Background(), dir, "foo")
assert.NilError(t, err)
assert.Check(t, safe.IsValid())
assert.NilError(t, safe.Close())
assert.NilError(t, safe.Close(context.Background()))
assert.Check(t, !safe.IsValid())
}

View file

@ -8,6 +8,7 @@ import (
"github.com/containerd/log"
"github.com/docker/docker/internal/cleanups"
"github.com/docker/docker/internal/compatcontext"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
)
@ -19,7 +20,7 @@ import (
// The path is safe (the path target won't change) until the returned SafePath
// is Closed.
// Caller is responsible for calling the Close function which unlocks the path.
func Join(path, subpath string) (*SafePath, error) {
func Join(ctx context.Context, path, subpath string) (*SafePath, error) {
base, subpart, err := evaluatePath(path, subpath)
if err != nil {
return nil, err
@ -28,8 +29,8 @@ func Join(path, subpath string) (*SafePath, error) {
cleanups := cleanups.Composite{}
defer func() {
if cErr := cleanups.Call(); cErr != nil {
log.G(context.TODO()).WithError(cErr).Warn("failed to close handles after error")
if cErr := cleanups.Call(compatcontext.WithoutCancel(ctx)); cErr != nil {
log.G(ctx).WithError(cErr).Warn("failed to close handles after error")
}
}()
@ -44,7 +45,7 @@ func Join(path, subpath string) (*SafePath, error) {
}
return nil, errors.Wrapf(err, "failed to lock file %s", fullPath)
}
cleanups.Add(func() error {
cleanups.Add(func(context.Context) error {
if err := windows.CloseHandle(handle); err != nil {
return &os.PathError{Op: "CloseHandle", Path: fullPath, Err: err}
}

View file

@ -10,7 +10,7 @@ import (
type SafePath struct {
path string
cleanup func() error
cleanup func(ctx context.Context) error
mutex sync.Mutex
// Immutable fields
@ -18,13 +18,13 @@ type SafePath struct {
}
// Close releases the resources used by the path.
func (s *SafePath) Close() error {
func (s *SafePath) Close(ctx context.Context) error {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.path == "" {
base, sub := s.SourcePath()
log.G(context.TODO()).WithFields(log.Fields{
log.G(ctx).WithFields(log.Fields{
"path": s.Path(),
"sourceBase": base,
"sourceSubpath": sub,
@ -34,7 +34,7 @@ func (s *SafePath) Close() error {
s.path = ""
if s.cleanup != nil {
return s.cleanup()
return s.cleanup(ctx)
}
return nil
}

View file

@ -83,7 +83,7 @@ type MountPoint struct {
// Cleanup frees resources used by the mountpoint and cleans up all the paths
// returned by Setup that hasn't been cleaned up by the caller.
func (m *MountPoint) Cleanup() error {
func (m *MountPoint) Cleanup(ctx context.Context) error {
if m.Volume == nil || m.ID == "" {
return nil
}
@ -93,9 +93,9 @@ func (m *MountPoint) Cleanup() error {
continue
}
err := p.Close()
err := p.Close(ctx)
base, sub := p.SourcePath()
log.G(context.TODO()).WithFields(log.Fields{
log.G(ctx).WithFields(log.Fields{
"error": err,
"path": p.Path(),
"sourceBase": base,
@ -126,7 +126,7 @@ func (m *MountPoint) Cleanup() error {
// still points to the same target (to avoid TOCTOU attack).
//
// Cleanup function doesn't need to be called when error is returned.
func (m *MountPoint) Setup(mountLabel string, rootIDs idtools.Identity, checkFun func(m *MountPoint) error) (path string, cleanup func() error, retErr error) {
func (m *MountPoint) Setup(ctx context.Context, mountLabel string, rootIDs idtools.Identity, checkFun func(m *MountPoint) error) (path string, cleanup func(context.Context) error, retErr error) {
if m.SkipMountpointCreation {
return m.Source, noCleanup, nil
}
@ -140,8 +140,8 @@ func (m *MountPoint) Setup(mountLabel string, rootIDs idtools.Identity, checkFun
if err != nil {
path = ""
retErr = errors.Wrapf(err, "error evaluating symlinks from mount source %q", m.Source)
if cleanupErr := cleanup(); cleanupErr != nil {
log.G(context.TODO()).WithError(cleanupErr).Warn("failed to cleanup after error")
if cleanupErr := cleanup(ctx); cleanupErr != nil {
log.G(ctx).WithError(cleanupErr).Warn("failed to cleanup after error")
}
cleanup = noCleanup
return
@ -150,8 +150,8 @@ func (m *MountPoint) Setup(mountLabel string, rootIDs idtools.Identity, checkFun
if err != nil && !errors.Is(err, syscall.ENOTSUP) {
path = ""
retErr = errors.Wrapf(err, "error setting label on mount source '%s'", sourcePath)
if cleanupErr := cleanup(); cleanupErr != nil {
log.G(context.TODO()).WithError(cleanupErr).Warn("failed to cleanup after error")
if cleanupErr := cleanup(ctx); cleanupErr != nil {
log.G(ctx).WithError(cleanupErr).Warn("failed to cleanup after error")
}
cleanup = noCleanup
}
@ -172,15 +172,15 @@ func (m *MountPoint) Setup(mountLabel string, rootIDs idtools.Identity, checkFun
if m.Spec.VolumeOptions != nil && m.Spec.VolumeOptions.Subpath != "" {
subpath := m.Spec.VolumeOptions.Subpath
safePath, err := safepath.Join(volumePath, subpath)
safePath, err := safepath.Join(ctx, volumePath, subpath)
if err != nil {
if err := m.Volume.Unmount(id); err != nil {
log.G(context.TODO()).WithError(err).Error("failed to unmount after safepath.Join failed")
log.G(ctx).WithError(err).Error("failed to unmount after safepath.Join failed")
}
return "", noCleanup, err
}
m.safePaths = append(m.safePaths, safePath)
log.G(context.TODO()).Debugf("mounting (%s|%s) via %s", volumePath, subpath, safePath.Path())
log.G(ctx).Debugf("mounting (%s|%s) via %s", volumePath, subpath, safePath.Path())
clean = safePath.Close
volumePath = safePath.Path()
@ -260,6 +260,6 @@ func errInvalidSpec(spec string) error {
}
// noCleanup is a no-op cleanup function.
func noCleanup() error {
func noCleanup(_ context.Context) error {
return nil
}