b3e527dfd2
It will Tar up contents of child directory onto tmpfs if mounted over This patch will use the new PreMount and PostMount hooks to "tar" up the contents of the base image on top of tmpfs mount points. Signed-off-by: Dan Walsh <dwalsh@redhat.com>
163 lines
4.4 KiB
Go
163 lines
4.4 KiB
Go
package daemon
|
|
|
|
import (
|
|
"errors"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
|
|
"github.com/docker/docker/api/types"
|
|
"github.com/docker/docker/daemon/execdriver"
|
|
derr "github.com/docker/docker/errors"
|
|
"github.com/docker/docker/runconfig"
|
|
"github.com/docker/docker/volume"
|
|
"github.com/opencontainers/runc/libcontainer/label"
|
|
)
|
|
|
|
var (
|
|
// ErrVolumeReadonly is used to signal an error when trying to copy data into
|
|
// a volume mount that is not writable.
|
|
ErrVolumeReadonly = errors.New("mounted volume is marked read-only")
|
|
)
|
|
|
|
type mounts []execdriver.Mount
|
|
|
|
// volumeToAPIType converts a volume.Volume to the type used by the remote API
|
|
func volumeToAPIType(v volume.Volume) *types.Volume {
|
|
return &types.Volume{
|
|
Name: v.Name(),
|
|
Driver: v.DriverName(),
|
|
Mountpoint: v.Path(),
|
|
}
|
|
}
|
|
|
|
// createVolume creates a volume.
|
|
func (daemon *Daemon) createVolume(name, driverName string, opts map[string]string) (volume.Volume, error) {
|
|
v, err := daemon.volumes.Create(name, driverName, opts)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
daemon.volumes.Increment(v)
|
|
return v, nil
|
|
}
|
|
|
|
// Len returns the number of mounts. Used in sorting.
|
|
func (m mounts) Len() int {
|
|
return len(m)
|
|
}
|
|
|
|
// Less returns true if the number of parts (a/b/c would be 3 parts) in the
|
|
// mount indexed by parameter 1 is less than that of the mount indexed by
|
|
// parameter 2. Used in sorting.
|
|
func (m mounts) Less(i, j int) bool {
|
|
return m.parts(i) < m.parts(j)
|
|
}
|
|
|
|
// Swap swaps two items in an array of mounts. Used in sorting
|
|
func (m mounts) Swap(i, j int) {
|
|
m[i], m[j] = m[j], m[i]
|
|
}
|
|
|
|
// parts returns the number of parts in the destination of a mount. Used in sorting.
|
|
func (m mounts) parts(i int) int {
|
|
return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator))
|
|
}
|
|
|
|
// registerMountPoints initializes the container mount points with the configured volumes and bind mounts.
|
|
// It follows the next sequence to decide what to mount in each final destination:
|
|
//
|
|
// 1. Select the previously configured mount points for the containers, if any.
|
|
// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
|
|
// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
|
|
// 4. Cleanup old volumes that are about to be reasigned.
|
|
func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
|
|
binds := map[string]bool{}
|
|
mountPoints := map[string]*volume.MountPoint{}
|
|
|
|
// 1. Read already configured mount points.
|
|
for name, point := range container.MountPoints {
|
|
mountPoints[name] = point
|
|
}
|
|
|
|
// 2. Read volumes from other containers.
|
|
for _, v := range hostConfig.VolumesFrom {
|
|
containerID, mode, err := volume.ParseVolumesFrom(v)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
c, err := daemon.Get(containerID)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
for _, m := range c.MountPoints {
|
|
cp := &volume.MountPoint{
|
|
Name: m.Name,
|
|
Source: m.Source,
|
|
RW: m.RW && volume.ReadWrite(mode),
|
|
Driver: m.Driver,
|
|
Destination: m.Destination,
|
|
}
|
|
|
|
if len(cp.Source) == 0 {
|
|
v, err := daemon.createVolume(cp.Name, cp.Driver, nil)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
cp.Volume = v
|
|
}
|
|
|
|
mountPoints[cp.Destination] = cp
|
|
}
|
|
}
|
|
|
|
// 3. Read bind mounts
|
|
for _, b := range hostConfig.Binds {
|
|
// #10618
|
|
bind, err := volume.ParseMountSpec(b, hostConfig.VolumeDriver)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if binds[bind.Destination] {
|
|
return derr.ErrorCodeMountDup.WithArgs(bind.Destination)
|
|
}
|
|
|
|
if len(bind.Name) > 0 && len(bind.Driver) > 0 {
|
|
// create the volume
|
|
v, err := daemon.createVolume(bind.Name, bind.Driver, nil)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
bind.Volume = v
|
|
bind.Source = v.Path()
|
|
// bind.Name is an already existing volume, we need to use that here
|
|
bind.Driver = v.DriverName()
|
|
bind = setBindModeIfNull(bind)
|
|
}
|
|
if label.RelabelNeeded(bind.Mode) {
|
|
if err := label.Relabel(bind.Source, container.MountLabel, label.IsShared(bind.Mode)); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
binds[bind.Destination] = true
|
|
mountPoints[bind.Destination] = bind
|
|
}
|
|
|
|
container.Lock()
|
|
|
|
// 4. Cleanup old volumes that are about to be reasigned.
|
|
for _, m := range mountPoints {
|
|
if m.BackwardsCompatible() {
|
|
if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil {
|
|
daemon.volumes.Decrement(mp.Volume)
|
|
}
|
|
}
|
|
}
|
|
container.MountPoints = mountPoints
|
|
|
|
container.Unlock()
|
|
|
|
return nil
|
|
}
|