|
@@ -11,80 +11,28 @@ import (
|
|
|
|
|
|
"github.com/docker/docker/archive"
|
|
|
"github.com/docker/docker/daemon/execdriver"
|
|
|
+ "github.com/docker/docker/pkg/log"
|
|
|
"github.com/docker/docker/pkg/symlink"
|
|
|
+ "github.com/docker/docker/volumes"
|
|
|
)
|
|
|
|
|
|
-type Volume struct {
|
|
|
- HostPath string
|
|
|
- VolPath string
|
|
|
- isReadWrite bool
|
|
|
- isBindMount bool
|
|
|
+type Mount struct {
|
|
|
+ MountToPath string
|
|
|
+ container *Container
|
|
|
+ volume *volumes.Volume
|
|
|
+ Writable bool
|
|
|
}
|
|
|
|
|
|
-func (v *Volume) isDir() (bool, error) {
|
|
|
- stat, err := os.Stat(v.HostPath)
|
|
|
- if err != nil {
|
|
|
- return false, err
|
|
|
- }
|
|
|
-
|
|
|
- return stat.IsDir(), nil
|
|
|
-}
|
|
|
-
|
|
|
-func prepareVolumesForContainer(container *Container) error {
|
|
|
+func (container *Container) prepareVolumes() error {
|
|
|
if container.Volumes == nil || len(container.Volumes) == 0 {
|
|
|
container.Volumes = make(map[string]string)
|
|
|
container.VolumesRW = make(map[string]bool)
|
|
|
- if err := applyVolumesFrom(container); err != nil {
|
|
|
+ if err := container.applyVolumesFrom(); err != nil {
|
|
|
return err
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- return createVolumes(container)
|
|
|
-}
|
|
|
-
|
|
|
-func setupMountsForContainer(container *Container) error {
|
|
|
- mounts := []execdriver.Mount{
|
|
|
- {
|
|
|
- Source: container.ResolvConfPath,
|
|
|
- Destination: "/etc/resolv.conf",
|
|
|
- Writable: true,
|
|
|
- Slave: true,
|
|
|
- },
|
|
|
- }
|
|
|
-
|
|
|
- if container.HostnamePath != "" {
|
|
|
- mounts = append(mounts, execdriver.Mount{
|
|
|
- Source: container.HostnamePath,
|
|
|
- Destination: "/etc/hostname",
|
|
|
- Writable: true,
|
|
|
- Private: true,
|
|
|
- })
|
|
|
- }
|
|
|
-
|
|
|
- if container.HostsPath != "" {
|
|
|
- mounts = append(mounts, execdriver.Mount{
|
|
|
- Source: container.HostsPath,
|
|
|
- Destination: "/etc/hosts",
|
|
|
- Writable: true,
|
|
|
- Slave: true,
|
|
|
- })
|
|
|
- }
|
|
|
-
|
|
|
- // Mount user specified volumes
|
|
|
- // Note, these are not private because you may want propagation of (un)mounts from host
|
|
|
- // volumes. For instance if you use -v /usr:/usr and the host later mounts /usr/share you
|
|
|
- // want this new mount in the container
|
|
|
- // These mounts must be ordered based on the length of the path that it is being mounted to (lexicographic)
|
|
|
- for _, path := range container.sortedVolumeMounts() {
|
|
|
- mounts = append(mounts, execdriver.Mount{
|
|
|
- Source: container.Volumes[path],
|
|
|
- Destination: path,
|
|
|
- Writable: container.VolumesRW[path],
|
|
|
- })
|
|
|
- }
|
|
|
-
|
|
|
- container.command.Mounts = mounts
|
|
|
- return nil
|
|
|
+ return container.createVolumes()
|
|
|
}
|
|
|
|
|
|
// sortedVolumeMounts returns the list of container volume mount points sorted in lexicographic order
|
|
@@ -98,208 +46,223 @@ func (container *Container) sortedVolumeMounts() []string {
|
|
|
return mountPaths
|
|
|
}
|
|
|
|
|
|
-func parseVolumesFromSpec(container *Container, spec string) (map[string]*Volume, error) {
|
|
|
- specParts := strings.SplitN(spec, ":", 2)
|
|
|
- if len(specParts) == 0 {
|
|
|
- return nil, fmt.Errorf("Malformed volumes-from specification: %s", spec)
|
|
|
+func (container *Container) createVolumes() error {
|
|
|
+ mounts, err := container.parseVolumeMountConfig()
|
|
|
+ if err != nil {
|
|
|
+ return err
|
|
|
}
|
|
|
|
|
|
- c := container.daemon.Get(specParts[0])
|
|
|
- if c == nil {
|
|
|
- return nil, fmt.Errorf("Container %s not found. Impossible to mount its volumes", specParts[0])
|
|
|
+ for _, mnt := range mounts {
|
|
|
+ if err := mnt.initialize(); err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- volumes, err := c.GetVolumes()
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+func (m *Mount) initialize() error {
|
|
|
+ // No need to initialize anything since it's already been initialized
|
|
|
+ if _, exists := m.container.Volumes[m.MountToPath]; exists {
|
|
|
+ return nil
|
|
|
+ }
|
|
|
+
|
|
|
+ // This is the full path to container fs + mntToPath
|
|
|
+ containerMntPath, err := symlink.FollowSymlinkInScope(filepath.Join(m.container.basefs, m.MountToPath), m.container.basefs)
|
|
|
if err != nil {
|
|
|
- return nil, err
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ m.container.VolumesRW[m.MountToPath] = m.Writable
|
|
|
+ m.container.Volumes[m.MountToPath] = m.volume.Path
|
|
|
+ m.volume.AddContainer(m.container.ID)
|
|
|
+ if m.Writable && !m.volume.IsBindMount {
|
|
|
+ // Copy whatever is in the container at the mntToPath to the volume
|
|
|
+ copyExistingContents(containerMntPath, m.volume.Path)
|
|
|
}
|
|
|
|
|
|
- if len(specParts) == 2 {
|
|
|
- mode := specParts[1]
|
|
|
- if validVolumeMode(mode) {
|
|
|
- return nil, fmt.Errorf("Invalid mode for volumes-from: %s", mode)
|
|
|
- }
|
|
|
+ return nil
|
|
|
+}
|
|
|
|
|
|
- // Set the mode for the inheritted volume
|
|
|
- for _, v := range volumes {
|
|
|
- v.isReadWrite = mode != "ro"
|
|
|
- }
|
|
|
+func (container *Container) VolumePaths() map[string]struct{} {
|
|
|
+ var paths = make(map[string]struct{})
|
|
|
+ for _, path := range container.Volumes {
|
|
|
+ paths[path] = struct{}{}
|
|
|
}
|
|
|
-
|
|
|
- return volumes, nil
|
|
|
+ return paths
|
|
|
}
|
|
|
|
|
|
-func applyVolumesFrom(container *Container) error {
|
|
|
- volumesFrom := container.hostConfig.VolumesFrom
|
|
|
+func (container *Container) derefVolumes() {
|
|
|
+ for path := range container.VolumePaths() {
|
|
|
+ vol := container.daemon.volumes.Get(path)
|
|
|
+ if vol == nil {
|
|
|
+ log.Debugf("Volume %s was not found and could not be dereferenced", path)
|
|
|
+ continue
|
|
|
+ }
|
|
|
+ vol.RemoveContainer(container.ID)
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
- for _, spec := range volumesFrom {
|
|
|
- volumes, err := parseVolumesFromSpec(container, spec)
|
|
|
+func (container *Container) parseVolumeMountConfig() (map[string]*Mount, error) {
|
|
|
+ var mounts = make(map[string]*Mount)
|
|
|
+ // Get all the bind mounts
|
|
|
+ for _, spec := range container.hostConfig.Binds {
|
|
|
+ path, mountToPath, writable, err := parseBindMountSpec(spec)
|
|
|
if err != nil {
|
|
|
- return err
|
|
|
+ return nil, err
|
|
|
}
|
|
|
-
|
|
|
- for _, v := range volumes {
|
|
|
- if err = v.initialize(container); err != nil {
|
|
|
- return err
|
|
|
+ // Check if a volume already exists for this and use it
|
|
|
+ vol := container.daemon.volumes.Get(path)
|
|
|
+ if vol == nil {
|
|
|
+ vol, err = container.daemon.volumes.NewVolume(path, writable)
|
|
|
+ if err != nil {
|
|
|
+ return nil, err
|
|
|
}
|
|
|
}
|
|
|
+ mounts[mountToPath] = &Mount{container: container, volume: vol, MountToPath: mountToPath, Writable: writable}
|
|
|
}
|
|
|
- return nil
|
|
|
-}
|
|
|
|
|
|
-func validVolumeMode(mode string) bool {
|
|
|
- validModes := map[string]bool{
|
|
|
- "rw": true,
|
|
|
- "ro": true,
|
|
|
+ // Get the rest of the volumes
|
|
|
+ for path := range container.Config.Volumes {
|
|
|
+ // Check if this is already added as a bind-mount
|
|
|
+ if _, exists := mounts[path]; exists {
|
|
|
+ continue
|
|
|
+ }
|
|
|
+
|
|
|
+ vol, err := container.daemon.volumes.NewVolume("", true)
|
|
|
+ if err != nil {
|
|
|
+ return nil, err
|
|
|
+ }
|
|
|
+ mounts[path] = &Mount{container: container, MountToPath: path, volume: vol, Writable: true}
|
|
|
}
|
|
|
|
|
|
- return validModes[mode]
|
|
|
+ return mounts, nil
|
|
|
}
|
|
|
|
|
|
-func parseBindVolumeSpec(spec string) (Volume, error) {
|
|
|
+func parseBindMountSpec(spec string) (string, string, bool, error) {
|
|
|
var (
|
|
|
- arr = strings.Split(spec, ":")
|
|
|
- vol Volume
|
|
|
+ path, mountToPath string
|
|
|
+ writable bool
|
|
|
+ arr = strings.Split(spec, ":")
|
|
|
)
|
|
|
|
|
|
switch len(arr) {
|
|
|
- case 1:
|
|
|
- vol.VolPath = spec
|
|
|
- vol.isReadWrite = true
|
|
|
case 2:
|
|
|
- vol.HostPath = arr[0]
|
|
|
- vol.VolPath = arr[1]
|
|
|
- vol.isReadWrite = true
|
|
|
+ path = arr[0]
|
|
|
+ mountToPath = arr[1]
|
|
|
+ writable = true
|
|
|
case 3:
|
|
|
- vol.HostPath = arr[0]
|
|
|
- vol.VolPath = arr[1]
|
|
|
- vol.isReadWrite = validVolumeMode(arr[2]) && arr[2] == "rw"
|
|
|
+ path = arr[0]
|
|
|
+ mountToPath = arr[1]
|
|
|
+ writable = validMountMode(arr[2]) && arr[2] == "rw"
|
|
|
default:
|
|
|
- return vol, fmt.Errorf("Invalid volume specification: %s", spec)
|
|
|
+ return "", "", false, fmt.Errorf("Invalid volume specification: %s", spec)
|
|
|
}
|
|
|
|
|
|
- if !filepath.IsAbs(vol.HostPath) {
|
|
|
- return vol, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", vol.HostPath)
|
|
|
+ if !filepath.IsAbs(path) {
|
|
|
+ return "", "", false, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", path)
|
|
|
}
|
|
|
|
|
|
- return vol, nil
|
|
|
+ return path, mountToPath, writable, nil
|
|
|
}
|
|
|
|
|
|
-func createVolumes(container *Container) error {
|
|
|
- // Get all the bindmounts
|
|
|
- volumes, err := container.GetVolumes()
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
- }
|
|
|
+func (container *Container) applyVolumesFrom() error {
|
|
|
+ volumesFrom := container.hostConfig.VolumesFrom
|
|
|
|
|
|
- // Get all the rest of the volumes
|
|
|
- for volPath := range container.Config.Volumes {
|
|
|
- // Make sure the the volume isn't already specified as a bindmount
|
|
|
- if _, exists := volumes[volPath]; !exists {
|
|
|
- volumes[volPath] = &Volume{
|
|
|
- VolPath: volPath,
|
|
|
- isReadWrite: true,
|
|
|
- isBindMount: false,
|
|
|
- }
|
|
|
+ for _, spec := range volumesFrom {
|
|
|
+ mounts, err := parseVolumesFromSpec(container.daemon, spec)
|
|
|
+ if err != nil {
|
|
|
+ return err
|
|
|
}
|
|
|
- }
|
|
|
|
|
|
- for _, vol := range volumes {
|
|
|
- if err = vol.initialize(container); err != nil {
|
|
|
- return err
|
|
|
+ for _, mnt := range mounts {
|
|
|
+ mnt.container = container
|
|
|
+ if err = mnt.initialize(); err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
return nil
|
|
|
}
|
|
|
|
|
|
-func createVolumeHostPath(container *Container) (string, error) {
|
|
|
- volumesDriver := container.daemon.volumes.Driver()
|
|
|
-
|
|
|
- // Do not pass a container as the parameter for the volume creation.
|
|
|
- // The graph driver using the container's information ( Image ) to
|
|
|
- // create the parent.
|
|
|
- c, err := container.daemon.volumes.Create(nil, "", "", "", "", nil, nil)
|
|
|
- if err != nil {
|
|
|
- return "", err
|
|
|
- }
|
|
|
- hostPath, err := volumesDriver.Get(c.ID, "")
|
|
|
- if err != nil {
|
|
|
- return hostPath, fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err)
|
|
|
+func validMountMode(mode string) bool {
|
|
|
+ validModes := map[string]bool{
|
|
|
+ "rw": true,
|
|
|
+ "ro": true,
|
|
|
}
|
|
|
|
|
|
- return hostPath, nil
|
|
|
+ return validModes[mode]
|
|
|
}
|
|
|
|
|
|
-func (v *Volume) initialize(container *Container) error {
|
|
|
- var err error
|
|
|
- v.VolPath = filepath.Clean(v.VolPath)
|
|
|
-
|
|
|
- // Do not initialize an existing volume
|
|
|
- if _, exists := container.Volumes[v.VolPath]; exists {
|
|
|
- return nil
|
|
|
- }
|
|
|
-
|
|
|
- // If it's not a bindmount we need to create the dir on the host
|
|
|
- if !v.isBindMount && v.HostPath == "" {
|
|
|
- v.HostPath, err = createVolumeHostPath(container)
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
- }
|
|
|
+func (container *Container) setupMounts() error {
|
|
|
+ mounts := []execdriver.Mount{
|
|
|
+ {Source: container.ResolvConfPath, Destination: "/etc/resolv.conf", Writable: true, Private: true},
|
|
|
}
|
|
|
|
|
|
- hostPath, err := filepath.EvalSymlinks(v.HostPath)
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
+ if container.HostnamePath != "" {
|
|
|
+ mounts = append(mounts, execdriver.Mount{Source: container.HostnamePath, Destination: "/etc/hostname", Writable: true, Private: true})
|
|
|
}
|
|
|
|
|
|
- // Create the mountpoint
|
|
|
- // This is the path to the volume within the container FS
|
|
|
- // This differs from `hostPath` in that `hostPath` refers to the place where
|
|
|
- // the volume data is actually stored on the host
|
|
|
- fullVolPath, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, v.VolPath), container.basefs)
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
+ if container.HostsPath != "" {
|
|
|
+ mounts = append(mounts, execdriver.Mount{Source: container.HostsPath, Destination: "/etc/hosts", Writable: true, Private: true})
|
|
|
}
|
|
|
|
|
|
- container.Volumes[v.VolPath] = hostPath
|
|
|
- container.VolumesRW[v.VolPath] = v.isReadWrite
|
|
|
-
|
|
|
- volIsDir, err := v.isDir()
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
- }
|
|
|
- if err := createIfNotExists(fullVolPath, volIsDir); err != nil {
|
|
|
- return err
|
|
|
+ // Mount user specified volumes
|
|
|
+ // Note, these are not private because you may want propagation of (un)mounts from host
|
|
|
+ // volumes. For instance if you use -v /usr:/usr and the host later mounts /usr/share you
|
|
|
+ // want this new mount in the container
|
|
|
+ // These mounts must be ordered based on the length of the path that it is being mounted to (lexicographic)
|
|
|
+ for _, path := range container.sortedVolumeMounts() {
|
|
|
+ mounts = append(mounts, execdriver.Mount{
|
|
|
+ Source: container.Volumes[path],
|
|
|
+ Destination: path,
|
|
|
+ Writable: container.VolumesRW[path],
|
|
|
+ })
|
|
|
}
|
|
|
|
|
|
- // Do not copy or change permissions if we are mounting from the host
|
|
|
- if v.isReadWrite && !v.isBindMount {
|
|
|
- return copyExistingContents(fullVolPath, hostPath)
|
|
|
- }
|
|
|
+ container.command.Mounts = mounts
|
|
|
return nil
|
|
|
}
|
|
|
|
|
|
-func createIfNotExists(destination string, isDir bool) error {
|
|
|
- if _, err := os.Stat(destination); err == nil || !os.IsNotExist(err) {
|
|
|
- return nil
|
|
|
+func parseVolumesFromSpec(daemon *Daemon, spec string) (map[string]*Mount, error) {
|
|
|
+ specParts := strings.SplitN(spec, ":", 2)
|
|
|
+ if len(specParts) == 0 {
|
|
|
+ return nil, fmt.Errorf("Malformed volumes-from specification: %s", spec)
|
|
|
}
|
|
|
|
|
|
- if isDir {
|
|
|
- return os.MkdirAll(destination, 0755)
|
|
|
+ c := daemon.Get(specParts[0])
|
|
|
+ if c == nil {
|
|
|
+ return nil, fmt.Errorf("Container %s not found. Impossible to mount its volumes", specParts[0])
|
|
|
}
|
|
|
|
|
|
- if err := os.MkdirAll(filepath.Dir(destination), 0755); err != nil {
|
|
|
- return err
|
|
|
+ mounts := c.VolumeMounts()
|
|
|
+
|
|
|
+ if len(specParts) == 2 {
|
|
|
+ mode := specParts[1]
|
|
|
+ if validMountMode(mode) {
|
|
|
+ return nil, fmt.Errorf("Invalid mode for volumes-from: %s", mode)
|
|
|
+ }
|
|
|
+
|
|
|
+ // Set the mode for the inheritted volume
|
|
|
+ for _, mnt := range mounts {
|
|
|
+ // Ensure that if the inherited volume is not writable, that we don't make
|
|
|
+ // it writable here
|
|
|
+ mnt.Writable = mnt.Writable && (mode == "rw")
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- f, err := os.OpenFile(destination, os.O_CREATE, 0755)
|
|
|
- if err != nil {
|
|
|
- return err
|
|
|
+ return mounts, nil
|
|
|
+}
|
|
|
+
|
|
|
+func (container *Container) VolumeMounts() map[string]*Mount {
|
|
|
+ mounts := make(map[string]*Mount)
|
|
|
+
|
|
|
+ for mountToPath, path := range container.Volumes {
|
|
|
+ if v := container.daemon.volumes.Get(path); v != nil {
|
|
|
+ mounts[mountToPath] = &Mount{volume: v, container: container, MountToPath: mountToPath, Writable: container.VolumesRW[mountToPath]}
|
|
|
+ }
|
|
|
}
|
|
|
- f.Close()
|
|
|
|
|
|
- return nil
|
|
|
+ return mounts
|
|
|
}
|
|
|
|
|
|
func copyExistingContents(source, destination string) error {
|