Merge pull request #13161 from calavera/plugin_discovery

Proposal: Volume refactor and external volume plugins
This commit is contained in:
Arnaud Porterie 2015-05-23 18:44:18 -07:00
commit bce3e761c0
43 changed files with 1804 additions and 1192 deletions

View file

@ -6,19 +6,18 @@ import (
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"path/filepath"
"reflect"
"strings"
"text/template"
"time"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/pkg/homedir"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/term"
"github.com/docker/docker/utils"
)
// DockerCli represents the docker command line client.
@ -178,19 +177,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, a
tr := &http.Transport{
TLSClientConfig: tlsConfig,
}
// Why 32? See https://github.com/docker/docker/pull/8035.
timeout := 32 * time.Second
if proto == "unix" {
// No need for compression in local communications.
tr.DisableCompression = true
tr.Dial = func(_, _ string) (net.Conn, error) {
return net.DialTimeout(proto, addr, timeout)
}
} else {
tr.Proxy = http.ProxyFromEnvironment
tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
}
utils.ConfigureTCPTransport(tr, proto, addr)
configFile, e := cliconfig.Load(filepath.Join(homedir.Get(), ".docker"))
if e != nil {

View file

@ -773,7 +773,7 @@ func (b *Builder) clearTmp() {
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
return
}
b.Daemon.DeleteVolumes(tmp.VolumePaths())
b.Daemon.DeleteVolumes(tmp)
delete(b.TmpContainers, c)
fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
}

View file

@ -26,9 +26,11 @@ import (
"github.com/docker/docker/pkg/broadcastwriter"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/jsonlog"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/volume"
)
var (
@ -48,46 +50,37 @@ type StreamConfig struct {
// CommonContainer holds the settings for a container which are applicable
// across all platforms supported by the daemon.
type CommonContainer struct {
StreamConfig
*State `json:"State"` // Needed for remote api version <= 1.11
root string // Path to the "home" of the container, including metadata.
basefs string // Path to the graphdriver mountpoint
ID string
Created time.Time
Path string
Args []string
Config *runconfig.Config
ImageID string `json:"Image"`
NetworkSettings *network.Settings
ResolvConfPath string
HostnamePath string
HostsPath string
LogPath string
Name string
Driver string
ExecDriver string
command *execdriver.Command
StreamConfig
daemon *Daemon
ID string
Created time.Time
Path string
Args []string
Config *runconfig.Config
ImageID string `json:"Image"`
NetworkSettings *network.Settings
ResolvConfPath string
HostnamePath string
HostsPath string
LogPath string
Name string
Driver string
ExecDriver string
MountLabel, ProcessLabel string
RestartCount int
UpdateDns bool
MountPoints map[string]*mountPoint
// Maps container paths to volume paths. The key in this is the path to which
// the volume is being mounted inside the container. Value is the path of the
// volume on disk
Volumes map[string]string
hostConfig *runconfig.HostConfig
command *execdriver.Command
monitor *containerMonitor
execCommands *execStore
daemon *Daemon
// logDriver for closing
logDriver logger.Logger
logCopier *logger.Copier
@ -259,9 +252,6 @@ func (container *Container) Start() (err error) {
return err
}
container.verifyDaemonSettings()
if err := container.prepareVolumes(); err != nil {
return err
}
linkedEnv, err := container.setupLinkedContainers()
if err != nil {
return err
@ -273,10 +263,13 @@ func (container *Container) Start() (err error) {
if err := populateCommand(container, env); err != nil {
return err
}
if err := container.setupMounts(); err != nil {
mounts, err := container.setupMounts()
if err != nil {
return err
}
container.command.Mounts = mounts
return container.waitForStart()
}
@ -353,6 +346,8 @@ func (container *Container) cleanup() {
for _, eConfig := range container.execCommands.s {
container.daemon.unregisterExecCommand(eConfig)
}
container.UnmountVolumes(true)
}
func (container *Container) KillSig(sig int) error {
@ -476,6 +471,7 @@ func (container *Container) Stop(seconds int) error {
return err
}
}
return nil
}
@ -573,25 +569,29 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
}
defer func() {
if err != nil {
// unmount any volumes
container.UnmountVolumes(true)
// unmount the container's rootfs
container.Unmount()
}
}()
if err = container.mountVolumes(); err != nil {
container.unmountVolumes()
mounts, err := container.setupMounts()
if err != nil {
return nil, err
}
defer func() {
for _, m := range mounts {
dest, err := container.GetResourcePath(m.Destination)
if err != nil {
container.unmountVolumes()
return nil, err
}
}()
if err := mount.Mount(m.Source, dest, "bind", "rbind,ro"); err != nil {
return nil, err
}
}
basePath, err := container.GetResourcePath(resource)
if err != nil {
return nil, err
}
stat, err := os.Stat(basePath)
if err != nil {
return nil, err
@ -605,7 +605,6 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
filter = []string{filepath.Base(basePath)}
basePath = filepath.Dir(basePath)
}
archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
Compression: archive.Uncompressed,
IncludeFiles: filter,
@ -613,10 +612,9 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
container.unmountVolumes()
container.UnmountVolumes(true)
container.Unmount()
return err
}),
@ -1007,3 +1005,129 @@ func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error)
}
return written, err
}
func (container *Container) networkMounts() []execdriver.Mount {
var mounts []execdriver.Mount
if container.ResolvConfPath != "" {
mounts = append(mounts, execdriver.Mount{
Source: container.ResolvConfPath,
Destination: "/etc/resolv.conf",
Writable: !container.hostConfig.ReadonlyRootfs,
Private: true,
})
}
if container.HostnamePath != "" {
mounts = append(mounts, execdriver.Mount{
Source: container.HostnamePath,
Destination: "/etc/hostname",
Writable: !container.hostConfig.ReadonlyRootfs,
Private: true,
})
}
if container.HostsPath != "" {
mounts = append(mounts, execdriver.Mount{
Source: container.HostsPath,
Destination: "/etc/hosts",
Writable: !container.hostConfig.ReadonlyRootfs,
Private: true,
})
}
return mounts
}
func (container *Container) addLocalMountPoint(name, destination string, rw bool) {
container.MountPoints[destination] = &mountPoint{
Name: name,
Driver: volume.DefaultDriverName,
Destination: destination,
RW: rw,
}
}
func (container *Container) addMountPointWithVolume(destination string, vol volume.Volume, rw bool) {
container.MountPoints[destination] = &mountPoint{
Name: vol.Name(),
Driver: vol.DriverName(),
Destination: destination,
RW: rw,
Volume: vol,
}
}
func (container *Container) isDestinationMounted(destination string) bool {
return container.MountPoints[destination] != nil
}
func (container *Container) prepareMountPoints() error {
for _, config := range container.MountPoints {
if len(config.Driver) > 0 {
v, err := createVolume(config.Name, config.Driver)
if err != nil {
return err
}
config.Volume = v
}
}
return nil
}
func (container *Container) removeMountPoints() error {
for _, m := range container.MountPoints {
if m.Volume != nil {
if err := removeVolume(m.Volume); err != nil {
return err
}
}
}
return nil
}
func (container *Container) shouldRestart() bool {
return container.hostConfig.RestartPolicy.Name == "always" ||
(container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0)
}
func (container *Container) UnmountVolumes(forceSyscall bool) error {
for _, m := range container.MountPoints {
dest, err := container.GetResourcePath(m.Destination)
if err != nil {
return err
}
if forceSyscall {
syscall.Unmount(dest, 0)
}
if m.Volume != nil {
if err := m.Volume.Unmount(); err != nil {
return err
}
}
}
return nil
}
func (container *Container) copyImagePathContent(v volume.Volume, destination string) error {
rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs)
if err != nil {
return err
}
if _, err = ioutil.ReadDir(rootfs); err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
path, err := v.Mount()
if err != nil {
return err
}
if err := copyExistingContents(rootfs, path); err != nil {
return err
}
return v.Unmount()
}

View file

@ -42,14 +42,7 @@ type Container struct {
// Fields below here are platform specific.
AppArmorProfile string
// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
// Easier than migrating older container configs :)
VolumesRW map[string]bool
AppliedVolumesFrom map[string]struct{}
activeLinks map[string]*links.Link
activeLinks map[string]*links.Link
}
func killProcessDirectly(container *Container) error {

View file

@ -27,12 +27,6 @@ type Container struct {
// removed in subsequent PRs.
AppArmorProfile string
// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
// Easier than migrating older container configs :)
VolumesRW map[string]bool
AppliedVolumesFrom map[string]struct{}
// ---- END OF TEMPORARY DECLARATION ----
}

View file

@ -2,11 +2,14 @@ package daemon
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/graph"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/runconfig"
"github.com/docker/libcontainer/label"
)
@ -87,17 +90,51 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
if err := daemon.createRootfs(container); err != nil {
return nil, nil, err
}
if hostConfig != nil {
if err := daemon.setHostConfig(container, hostConfig); err != nil {
return nil, nil, err
}
if err := daemon.setHostConfig(container, hostConfig); err != nil {
return nil, nil, err
}
if err := container.Mount(); err != nil {
return nil, nil, err
}
defer container.Unmount()
if err := container.prepareVolumes(); err != nil {
return nil, nil, err
for spec := range config.Volumes {
var (
name, destination string
parts = strings.Split(spec, ":")
)
switch len(parts) {
case 2:
name, destination = parts[0], filepath.Clean(parts[1])
default:
name = stringid.GenerateRandomID()
destination = filepath.Clean(parts[0])
}
// Skip volumes for which we already have something mounted on that
// destination because of a --volume-from.
if container.isDestinationMounted(destination) {
continue
}
path, err := container.GetResourcePath(destination)
if err != nil {
return nil, nil, err
}
stat, err := os.Stat(path)
if err == nil && !stat.IsDir() {
return nil, nil, fmt.Errorf("cannot mount volume over existing file, file exists %s", path)
}
v, err := createVolume(name, config.VolumeDriver)
if err != nil {
return nil, nil, err
}
if err := container.copyImagePathContent(v, destination); err != nil {
return nil, nil, err
}
container.addMountPointWithVolume(destination, v, true)
}
if err := container.ToDisk(); err != nil {
return nil, nil, err

View file

@ -46,9 +46,12 @@ import (
"github.com/docker/docker/runconfig"
"github.com/docker/docker/trust"
"github.com/docker/docker/utils"
"github.com/docker/docker/volumes"
volumedrivers "github.com/docker/docker/volume/drivers"
"github.com/docker/docker/volume/local"
)
const defaultVolumesPathName = "volumes"
var (
validContainerNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
@ -99,7 +102,6 @@ type Daemon struct {
repositories *graph.TagStore
idIndex *truncindex.TruncIndex
sysInfo *sysinfo.SysInfo
volumes *volumes.Repository
config *Config
containerGraph *graphdb.Database
driver graphdriver.Driver
@ -109,6 +111,7 @@ type Daemon struct {
RegistryService *registry.Service
EventsService *events.Events
netController libnetwork.NetworkController
root string
}
// Get looks for a container using the provided information, which could be
@ -209,7 +212,13 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
// we'll waste time if we update it for every container
daemon.idIndex.Add(container.ID)
container.registerVolumes()
if err := daemon.verifyOldVolumesInfo(container); err != nil {
return err
}
if err := container.prepareMountPoints(); err != nil {
return err
}
if container.IsRunning() {
logrus.Debugf("killing old running container %s", container.ID)
@ -249,10 +258,15 @@ func (daemon *Daemon) ensureName(container *Container) error {
}
func (daemon *Daemon) restore() error {
type cr struct {
container *Container
registered bool
}
var (
debug = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "")
containers = make(map[string]*Container)
currentDriver = daemon.driver.String()
containers = make(map[string]*cr)
)
if !debug {
@ -278,14 +292,12 @@ func (daemon *Daemon) restore() error {
if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
logrus.Debugf("Loaded container %v", container.ID)
containers[container.ID] = container
containers[container.ID] = &cr{container: container}
} else {
logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
}
}
registeredContainers := []*Container{}
if entities := daemon.containerGraph.List("/", -1); entities != nil {
for _, p := range entities.Paths() {
if !debug && logrus.GetLevel() == logrus.InfoLevel {
@ -294,50 +306,43 @@ func (daemon *Daemon) restore() error {
e := entities[p]
if container, ok := containers[e.ID()]; ok {
if err := daemon.register(container, false); err != nil {
logrus.Debugf("Failed to register container %s: %s", container.ID, err)
}
registeredContainers = append(registeredContainers, container)
// delete from the map so that a new name is not automatically generated
delete(containers, e.ID())
if c, ok := containers[e.ID()]; ok {
c.registered = true
}
}
}
// Any containers that are left over do not exist in the graph
for _, container := range containers {
// Try to set the default name for a container if it exists prior to links
container.Name, err = daemon.generateNewName(container.ID)
if err != nil {
logrus.Debugf("Setting default id - %s", err)
}
group := sync.WaitGroup{}
for _, c := range containers {
group.Add(1)
if err := daemon.register(container, false); err != nil {
logrus.Debugf("Failed to register container %s: %s", container.ID, err)
}
go func(container *Container, registered bool) {
defer group.Done()
registeredContainers = append(registeredContainers, container)
}
if !registered {
// Try to set the default name for a container if it exists prior to links
container.Name, err = daemon.generateNewName(container.ID)
if err != nil {
logrus.Debugf("Setting default id - %s", err)
}
}
// check the restart policy on the containers and restart any container with
// the restart policy of "always"
if daemon.config.AutoRestart {
logrus.Debug("Restarting containers...")
if err := daemon.register(container, false); err != nil {
logrus.Debugf("Failed to register container %s: %s", container.ID, err)
}
for _, container := range registeredContainers {
if container.hostConfig.RestartPolicy.IsAlways() ||
(container.hostConfig.RestartPolicy.IsOnFailure() && container.ExitCode != 0) {
// check the restart policy on the containers and restart any container with
// the restart policy of "always"
if daemon.config.AutoRestart && container.shouldRestart() {
logrus.Debugf("Starting container %s", container.ID)
if err := container.Start(); err != nil {
logrus.Debugf("Failed to start container %s: %s", container.ID, err)
}
}
}
}(c.container, c.registered)
}
group.Wait()
if !debug {
if logrus.GetLevel() == logrus.InfoLevel {
@ -535,6 +540,7 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID
ExecDriver: daemon.execDriver.Name(),
State: NewState(),
execCommands: newExecStore(),
MountPoints: map[string]*mountPoint{},
},
}
container.root = daemon.containerRoot(container.ID)
@ -785,15 +791,11 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
return nil, err
}
volumesDriver, err := graphdriver.GetDriver("vfs", config.Root, config.GraphOptions)
if err != nil {
return nil, err
}
volumes, err := volumes.NewRepository(filepath.Join(config.Root, "volumes"), volumesDriver)
volumesDriver, err := local.New(filepath.Join(config.Root, defaultVolumesPathName))
if err != nil {
return nil, err
}
volumedrivers.Register(volumesDriver, volumesDriver.Name())
trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
if err != nil {
@ -872,7 +874,6 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
d.repositories = repositories
d.idIndex = truncindex.NewTruncIndex([]string{})
d.sysInfo = sysInfo
d.volumes = volumes
d.config = config
d.sysInitPath = sysInitPath
d.execDriver = ed
@ -880,6 +881,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
d.defaultLogConfig = config.LogConfig
d.RegistryService = registryService
d.EventsService = eventsService
d.root = config.Root
if err := d.restore(); err != nil {
return nil, err
@ -1218,6 +1220,10 @@ func (daemon *Daemon) verifyHostConfig(hostConfig *runconfig.HostConfig) ([]stri
}
func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
if err := daemon.registerMountPoints(container, hostConfig); err != nil {
return err
}
container.Lock()
defer container.Unlock()
if err := parseSecurityOpt(container, hostConfig); err != nil {
@ -1231,6 +1237,5 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.
container.hostConfig = hostConfig
container.toDisk()
return nil
}

View file

@ -70,22 +70,14 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
}
}
container.LogEvent("destroy")
if config.RemoveVolume {
daemon.DeleteVolumes(container.VolumePaths())
container.removeMountPoints()
}
}
return nil
}
func (daemon *Daemon) DeleteVolumes(volumeIDs map[string]struct{}) {
for id := range volumeIDs {
if err := daemon.volumes.Delete(id); err != nil {
logrus.Infof("%s", err)
continue
}
}
}
func (daemon *Daemon) Rm(container *Container) (err error) {
return daemon.commonRm(container, false)
}
@ -134,7 +126,6 @@ func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err erro
}
}()
container.derefVolumes()
if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
logrus.Debugf("Unable to remove container from link graph: %s", err)
}
@ -162,3 +153,7 @@ func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err erro
return nil
}
func (daemon *Daemon) DeleteVolumes(c *Container) error {
return c.removeMountPoints()
}

View file

@ -10,6 +10,10 @@ import (
type ContainerJSONRaw struct {
*Container
HostConfig *runconfig.HostConfig
// Unused fields for backward compatibility with API versions < 1.12.
Volumes map[string]string
VolumesRW map[string]bool
}
func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) {
@ -48,6 +52,14 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error
FinishedAt: container.State.FinishedAt,
}
volumes := make(map[string]string)
volumesRW := make(map[string]bool)
for _, m := range container.MountPoints {
volumes[m.Destination] = m.Path()
volumesRW[m.Destination] = m.RW
}
contJSON := &types.ContainerJSON{
Id: container.ID,
Created: container.Created,
@ -67,8 +79,8 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error
ExecDriver: container.ExecDriver,
MountLabel: container.MountLabel,
ProcessLabel: container.ProcessLabel,
Volumes: container.Volumes,
VolumesRW: container.VolumesRW,
Volumes: volumes,
VolumesRW: volumesRW,
AppArmorProfile: container.AppArmorProfile,
ExecIDs: container.GetExecIDs(),
HostConfig: &hostConfig,

View file

@ -1,213 +1,103 @@
package daemon
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/volume"
)
type volumeMount struct {
containerPath string
hostPath string
writable bool
copyData bool
from string
type mountPoint struct {
Name string
Destination string
Driver string
RW bool
Volume volume.Volume `json:"-"`
Source string
}
func (container *Container) createVolumes() error {
mounts := make(map[string]*volumeMount)
func (m *mountPoint) Setup() (string, error) {
if m.Volume != nil {
return m.Volume.Mount()
}
// get the normal volumes
for path := range container.Config.Volumes {
path = filepath.Clean(path)
// skip if there is already a volume for this container path
if _, exists := container.Volumes[path]; exists {
continue
}
realPath, err := container.GetResourcePath(path)
if err != nil {
return err
}
if stat, err := os.Stat(realPath); err == nil {
if !stat.IsDir() {
return fmt.Errorf("can't mount to container path, file exists - %s", path)
if len(m.Source) > 0 {
if _, err := os.Stat(m.Source); err != nil {
if !os.IsNotExist(err) {
return "", err
}
if err := os.MkdirAll(m.Source, 0755); err != nil {
return "", err
}
}
mnt := &volumeMount{
containerPath: path,
writable: true,
copyData: true,
}
mounts[mnt.containerPath] = mnt
return m.Source, nil
}
// Get all the bind mounts
// track bind paths separately due to #10618
bindPaths := make(map[string]struct{})
for _, spec := range container.hostConfig.Binds {
mnt, err := parseBindMountSpec(spec)
if err != nil {
return err
}
// #10618
if _, exists := bindPaths[mnt.containerPath]; exists {
return fmt.Errorf("Duplicate volume mount %s", mnt.containerPath)
}
bindPaths[mnt.containerPath] = struct{}{}
mounts[mnt.containerPath] = mnt
}
// Get volumes from
for _, from := range container.hostConfig.VolumesFrom {
cID, mode, err := parseVolumesFromSpec(from)
if err != nil {
return err
}
if _, exists := container.AppliedVolumesFrom[cID]; exists {
// skip since it's already been applied
continue
}
c, err := container.daemon.Get(cID)
if err != nil {
return fmt.Errorf("container %s not found, impossible to mount its volumes", cID)
}
for _, mnt := range c.volumeMounts() {
mnt.writable = mnt.writable && (mode == "rw")
mnt.from = cID
mounts[mnt.containerPath] = mnt
}
}
for _, mnt := range mounts {
containerMntPath, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, mnt.containerPath), container.basefs)
if err != nil {
return err
}
// Create the actual volume
v, err := container.daemon.volumes.FindOrCreateVolume(mnt.hostPath, mnt.writable)
if err != nil {
return err
}
container.VolumesRW[mnt.containerPath] = mnt.writable
container.Volumes[mnt.containerPath] = v.Path
v.AddContainer(container.ID)
if mnt.from != "" {
container.AppliedVolumesFrom[mnt.from] = struct{}{}
}
if mnt.writable && mnt.copyData {
// Copy whatever is in the container at the containerPath to the volume
copyExistingContents(containerMntPath, v.Path)
}
}
return nil
return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined")
}
// sortedVolumeMounts returns the list of container volume mount points sorted in lexicographic order
func (container *Container) sortedVolumeMounts() []string {
var mountPaths []string
for path := range container.Volumes {
mountPaths = append(mountPaths, path)
func (m *mountPoint) Path() string {
if m.Volume != nil {
return m.Volume.Path()
}
sort.Strings(mountPaths)
return mountPaths
return m.Source
}
func (container *Container) VolumePaths() map[string]struct{} {
var paths = make(map[string]struct{})
for _, path := range container.Volumes {
paths[path] = struct{}{}
func parseBindMount(spec string, config *runconfig.Config) (*mountPoint, error) {
bind := &mountPoint{
RW: true,
}
return paths
}
func (container *Container) registerVolumes() {
for path := range container.VolumePaths() {
if v := container.daemon.volumes.Get(path); v != nil {
v.AddContainer(container.ID)
continue
}
// if container was created with an old daemon, this volume may not be registered so we need to make sure it gets registered
writable := true
if rw, exists := container.VolumesRW[path]; exists {
writable = rw
}
v, err := container.daemon.volumes.FindOrCreateVolume(path, writable)
if err != nil {
logrus.Debugf("error registering volume %s: %v", path, err)
continue
}
v.AddContainer(container.ID)
}
}
func (container *Container) derefVolumes() {
for path := range container.VolumePaths() {
vol := container.daemon.volumes.Get(path)
if vol == nil {
logrus.Debugf("Volume %s was not found and could not be dereferenced", path)
continue
}
vol.RemoveContainer(container.ID)
}
}
func parseBindMountSpec(spec string) (*volumeMount, error) {
arr := strings.Split(spec, ":")
mnt := &volumeMount{}
switch len(arr) {
case 2:
mnt.hostPath = arr[0]
mnt.containerPath = arr[1]
mnt.writable = true
bind.Destination = arr[1]
case 3:
mnt.hostPath = arr[0]
mnt.containerPath = arr[1]
mnt.writable = validMountMode(arr[2]) && arr[2] == "rw"
bind.Destination = arr[1]
if !validMountMode(arr[2]) {
return nil, fmt.Errorf("invalid mode for volumes-from: %s", arr[2])
}
bind.RW = arr[2] == "rw"
default:
return nil, fmt.Errorf("Invalid volume specification: %s", spec)
}
if !filepath.IsAbs(mnt.hostPath) {
return nil, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", mnt.hostPath)
name, source, err := parseVolumeSource(arr[0], config)
if err != nil {
return nil, err
}
mnt.hostPath = filepath.Clean(mnt.hostPath)
mnt.containerPath = filepath.Clean(mnt.containerPath)
return mnt, nil
if len(source) == 0 {
bind.Driver = config.VolumeDriver
if len(bind.Driver) == 0 {
bind.Driver = volume.DefaultDriverName
}
} else {
bind.Source = filepath.Clean(source)
}
bind.Name = name
bind.Destination = filepath.Clean(bind.Destination)
return bind, nil
}
func parseVolumesFromSpec(spec string) (string, string, error) {
specParts := strings.SplitN(spec, ":", 2)
if len(specParts) == 0 {
func parseVolumesFrom(spec string) (string, string, error) {
if len(spec) == 0 {
return "", "", fmt.Errorf("malformed volumes-from specification: %s", spec)
}
var (
id = specParts[0]
mode = "rw"
)
specParts := strings.SplitN(spec, ":", 2)
id := specParts[0]
mode := "rw"
if len(specParts) == 2 {
mode = specParts[1]
if !validMountMode(mode) {
@ -222,7 +112,6 @@ func validMountMode(mode string) bool {
"rw": true,
"ro": true,
}
return validModes[mode]
}
@ -240,34 +129,16 @@ func (container *Container) specialMounts() []execdriver.Mount {
return mounts
}
func (container *Container) volumeMounts() map[string]*volumeMount {
mounts := make(map[string]*volumeMount)
for containerPath, path := range container.Volumes {
v := container.daemon.volumes.Get(path)
if v == nil {
// This should never happen
logrus.Debugf("reference by container %s to non-existent volume path %s", container.ID, path)
continue
}
mounts[containerPath] = &volumeMount{hostPath: path, containerPath: containerPath, writable: container.VolumesRW[containerPath]}
}
return mounts
}
func copyExistingContents(source, destination string) error {
volList, err := ioutil.ReadDir(source)
if err != nil {
return err
}
if len(volList) > 0 {
srcList, err := ioutil.ReadDir(destination)
if err != nil {
return err
}
if len(srcList) == 0 {
// If the source volume is empty copy files from the root into the volume
if err := chrootarchive.CopyWithTar(source, destination); err != nil {
@ -275,60 +146,136 @@ func copyExistingContents(source, destination string) error {
}
}
}
return copyOwnership(source, destination)
}
func (container *Container) mountVolumes() error {
for dest, source := range container.Volumes {
v := container.daemon.volumes.Get(source)
if v == nil {
return fmt.Errorf("could not find volume for %s:%s, impossible to mount", source, dest)
}
// registerMountPoints initializes the container mount points with the configured volumes and bind mounts.
// It follows the next sequence to decide what to mount in each final destination:
//
// 1. Select the previously configured mount points for the containers, if any.
// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
binds := map[string]bool{}
mountPoints := map[string]*mountPoint{}
destPath, err := container.GetResourcePath(dest)
// 1. Read already configured mount points.
for name, point := range container.MountPoints {
mountPoints[name] = point
}
// 2. Read volumes from other containers.
for _, v := range hostConfig.VolumesFrom {
containerID, mode, err := parseVolumesFrom(v)
if err != nil {
return err
}
if err := mount.Mount(source, destPath, "bind", "rbind,rw"); err != nil {
return fmt.Errorf("error while mounting volume %s: %v", source, err)
}
}
for _, mnt := range container.specialMounts() {
destPath, err := container.GetResourcePath(mnt.Destination)
c, err := daemon.Get(containerID)
if err != nil {
return err
}
if err := mount.Mount(mnt.Source, destPath, "bind", "bind,rw"); err != nil {
return fmt.Errorf("error while mounting volume %s: %v", mnt.Source, err)
for _, m := range c.MountPoints {
cp := m
cp.RW = m.RW && mode != "ro"
if len(m.Source) == 0 {
v, err := createVolume(m.Name, m.Driver)
if err != nil {
return err
}
cp.Volume = v
}
mountPoints[cp.Destination] = cp
}
}
// 3. Read bind mounts
for _, b := range hostConfig.Binds {
// #10618
bind, err := parseBindMount(b, container.Config)
if err != nil {
return err
}
if binds[bind.Destination] {
return fmt.Errorf("Duplicate bind mount %s", bind.Destination)
}
if len(bind.Name) > 0 && len(bind.Driver) > 0 {
v, err := createVolume(bind.Name, bind.Driver)
if err != nil {
return err
}
bind.Volume = v
}
binds[bind.Destination] = true
mountPoints[bind.Destination] = bind
}
container.MountPoints = mountPoints
return nil
}
func (container *Container) unmountVolumes() {
for dest := range container.Volumes {
destPath, err := container.GetResourcePath(dest)
if err != nil {
logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
continue
// verifyOldVolumesInfo ports volumes configured for the containers pre docker 1.7.
// It reads the container configuration and creates valid mount points for the old volumes.
func (daemon *Daemon) verifyOldVolumesInfo(container *Container) error {
jsonPath, err := container.jsonPath()
if err != nil {
return err
}
f, err := os.Open(jsonPath)
if err != nil {
if os.IsNotExist(err) {
return nil
}
if err := mount.ForceUnmount(destPath); err != nil {
logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
continue
return err
}
type oldContVolCfg struct {
Volumes map[string]string
VolumesRW map[string]bool
}
vols := oldContVolCfg{
Volumes: make(map[string]string),
VolumesRW: make(map[string]bool),
}
if err := json.NewDecoder(f).Decode(&vols); err != nil {
return err
}
for destination, hostPath := range vols.Volumes {
vfsPath := filepath.Join(daemon.root, "vfs", "dir")
if strings.HasPrefix(hostPath, vfsPath) {
id := filepath.Base(hostPath)
rw := vols.VolumesRW != nil && vols.VolumesRW[destination]
container.addLocalMountPoint(id, destination, rw)
}
}
for _, mnt := range container.specialMounts() {
destPath, err := container.GetResourcePath(mnt.Destination)
if err != nil {
logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
continue
}
if err := mount.ForceUnmount(destPath); err != nil {
logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
}
}
return container.ToDisk()
}
func createVolume(name, driverName string) (volume.Volume, error) {
vd, err := getVolumeDriver(driverName)
if err != nil {
return nil, err
}
return vd.Create(name)
}
func removeVolume(v volume.Volume) error {
vd, err := getVolumeDriver(v.DriverName())
if err != nil {
return nil
}
return vd.Remove(v)
}

View file

@ -0,0 +1,26 @@
// +build experimental
package daemon
import (
"path/filepath"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/volume"
"github.com/docker/docker/volume/drivers"
)
func getVolumeDriver(name string) (volume.Driver, error) {
if name == "" {
name = volume.DefaultDriverName
}
return volumedrivers.Lookup(name)
}
func parseVolumeSource(spec string, config *runconfig.Config) (string, string, error) {
if !filepath.IsAbs(spec) {
return spec, "", nil
}
return "", spec, nil
}

View file

@ -0,0 +1,86 @@
// +build experimental
package daemon
import (
"testing"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/volume"
"github.com/docker/docker/volume/drivers"
)
type fakeDriver struct{}
func (fakeDriver) Name() string { return "fake" }
func (fakeDriver) Create(name string) (volume.Volume, error) { return nil, nil }
func (fakeDriver) Remove(v volume.Volume) error { return nil }
func TestGetVolumeDriver(t *testing.T) {
_, err := getVolumeDriver("missing")
if err == nil {
t.Fatal("Expected error, was nil")
}
volumedrivers.Register(fakeDriver{}, "fake")
d, err := getVolumeDriver("fake")
if err != nil {
t.Fatal(err)
}
if d.Name() != "fake" {
t.Fatalf("Expected fake driver, got %s\n", d.Name())
}
}
func TestParseBindMount(t *testing.T) {
cases := []struct {
bind string
driver string
expDest string
expSource string
expName string
expDriver string
expRW bool
fail bool
}{
{"/tmp:/tmp", "", "/tmp", "/tmp", "", "", true, false},
{"/tmp:/tmp:ro", "", "/tmp", "/tmp", "", "", false, false},
{"/tmp:/tmp:rw", "", "/tmp", "/tmp", "", "", true, false},
{"/tmp:/tmp:foo", "", "/tmp", "/tmp", "", "", false, true},
{"name:/tmp", "", "/tmp", "", "name", "local", true, false},
{"name:/tmp", "external", "/tmp", "", "name", "external", true, false},
{"name:/tmp:ro", "local", "/tmp", "", "name", "local", false, false},
{"local/name:/tmp:rw", "", "/tmp", "", "local/name", "local", true, false},
}
for _, c := range cases {
conf := &runconfig.Config{VolumeDriver: c.driver}
m, err := parseBindMount(c.bind, conf)
if c.fail {
if err == nil {
t.Fatalf("Expected error, was nil, for spec %s\n", c.bind)
}
continue
}
if m.Destination != c.expDest {
t.Fatalf("Expected destination %s, was %s, for spec %s\n", c.expDest, m.Destination, c.bind)
}
if m.Source != c.expSource {
t.Fatalf("Expected source %s, was %s, for spec %s\n", c.expSource, m.Source, c.bind)
}
if m.Name != c.expName {
t.Fatalf("Expected name %s, was %s for spec %s\n", c.expName, m.Name, c.bind)
}
if m.Driver != c.expDriver {
t.Fatalf("Expected driver %s, was %s, for spec %s\n", c.expDriver, m.Driver, c.bind)
}
if m.RW != c.expRW {
t.Fatalf("Expected RW %v, was %v for spec %s\n", c.expRW, m.RW, c.bind)
}
}
}

View file

@ -4,6 +4,9 @@ package daemon
import (
"os"
"path/filepath"
"sort"
"strings"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/system"
@ -24,36 +27,44 @@ func copyOwnership(source, destination string) error {
return os.Chmod(destination, os.FileMode(stat.Mode()))
}
func (container *Container) prepareVolumes() error {
if container.Volumes == nil || len(container.Volumes) == 0 {
container.Volumes = make(map[string]string)
container.VolumesRW = make(map[string]bool)
}
func (container *Container) setupMounts() ([]execdriver.Mount, error) {
var mounts []execdriver.Mount
for _, m := range container.MountPoints {
path, err := m.Setup()
if err != nil {
return nil, err
}
if len(container.hostConfig.VolumesFrom) > 0 && container.AppliedVolumesFrom == nil {
container.AppliedVolumesFrom = make(map[string]struct{})
}
return container.createVolumes()
}
func (container *Container) setupMounts() error {
mounts := []execdriver.Mount{}
// Mount user specified volumes
// Note, these are not private because you may want propagation of (un)mounts from host
// volumes. For instance if you use -v /usr:/usr and the host later mounts /usr/share you
// want this new mount in the container
// These mounts must be ordered based on the length of the path that it is being mounted to (lexicographic)
for _, path := range container.sortedVolumeMounts() {
mounts = append(mounts, execdriver.Mount{
Source: container.Volumes[path],
Destination: path,
Writable: container.VolumesRW[path],
Source: path,
Destination: m.Destination,
Writable: m.RW,
})
}
mounts = append(mounts, container.specialMounts()...)
container.command.Mounts = mounts
return nil
mounts = sortMounts(mounts)
return append(mounts, container.networkMounts()...), nil
}
func sortMounts(m []execdriver.Mount) []execdriver.Mount {
sort.Sort(mounts(m))
return m
}
type mounts []execdriver.Mount
func (m mounts) Len() int {
return len(m)
}
func (m mounts) Less(i, j int) bool {
return m.parts(i) < m.parts(j)
}
func (m mounts) Swap(i, j int) {
m[i], m[j] = m[j], m[i]
}
func (m mounts) parts(i int) int {
return len(strings.Split(filepath.Clean(m[i].Destination), string(os.PathSeparator)))
}

24
daemon/volumes_stubs.go Normal file
View file

@ -0,0 +1,24 @@
// +build !experimental
package daemon
import (
"fmt"
"path/filepath"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/volume"
"github.com/docker/docker/volume/drivers"
)
func getVolumeDriver(_ string) (volume.Driver, error) {
return volumedrivers.Lookup(volume.DefaultDriverName)
}
func parseVolumeSource(spec string, _ *runconfig.Config) (string, string, error) {
if !filepath.IsAbs(spec) {
return "", "", fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", spec)
}
return "", spec, nil
}

View file

@ -0,0 +1,81 @@
// +build !experimental
package daemon
import (
"io/ioutil"
"os"
"testing"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/volume"
"github.com/docker/docker/volume/drivers"
"github.com/docker/docker/volume/local"
)
func TestGetVolumeDefaultDriver(t *testing.T) {
tmp, err := ioutil.TempDir("", "volume-test-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
l, err := local.New(tmp)
if err != nil {
t.Fatal(err)
}
volumedrivers.Register(l, volume.DefaultDriverName)
d, err := getVolumeDriver("missing")
if err != nil {
t.Fatal(err)
}
if d.Name() != volume.DefaultDriverName {
t.Fatalf("Expected local driver, was %s\n", d.Name)
}
}
func TestParseBindMount(t *testing.T) {
cases := []struct {
bind string
expDest string
expSource string
expName string
expRW bool
fail bool
}{
{"/tmp:/tmp", "/tmp", "/tmp", "", true, false},
{"/tmp:/tmp:ro", "/tmp", "/tmp", "", false, false},
{"/tmp:/tmp:rw", "/tmp", "/tmp", "", true, false},
{"/tmp:/tmp:foo", "/tmp", "/tmp", "", false, true},
{"name:/tmp", "", "", "", false, true},
{"local/name:/tmp:rw", "", "", "", true, true},
}
for _, c := range cases {
conf := &runconfig.Config{}
m, err := parseBindMount(c.bind, conf)
if c.fail {
if err == nil {
t.Fatalf("Expected error, was nil, for spec %s\n", c.bind)
}
continue
}
if m.Destination != c.expDest {
t.Fatalf("Expected destination %s, was %s, for spec %s\n", c.expDest, m.Destination, c.bind)
}
if m.Source != c.expSource {
t.Fatalf("Expected source %s, was %s, for spec %s\n", c.expSource, m.Source, c.bind)
}
if m.Name != c.expName {
t.Fatalf("Expected name %s, was %s for spec %s\n", c.expName, m.Name, c.bind)
}
if m.RW != c.expRW {
t.Fatalf("Expected RW %v, was %v for spec %s\n", c.expRW, m.RW, c.bind)
}
}
}

View file

@ -0,0 +1,35 @@
package daemon
import "testing"
func TestParseVolumeFrom(t *testing.T) {
cases := []struct {
spec string
expId string
expMode string
fail bool
}{
{"", "", "", true},
{"foobar", "foobar", "rw", false},
{"foobar:rw", "foobar", "rw", false},
{"foobar:ro", "foobar", "ro", false},
{"foobar:baz", "", "", true},
}
for _, c := range cases {
id, mode, err := parseVolumesFrom(c.spec)
if c.fail {
if err == nil {
t.Fatalf("Expected error, was nil, for spec %s\n", c.spec)
}
continue
}
if id != c.expId {
t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expId, id, c.spec)
}
if mode != c.expMode {
t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec)
}
}
}

View file

@ -2,15 +2,13 @@
package daemon
import "github.com/docker/docker/daemon/execdriver"
// Not supported on Windows
func copyOwnership(source, destination string) error {
return nil
return nil, nil
}
func (container *Container) prepareVolumes() error {
return nil
}
func (container *Container) setupMounts() error {
func (container *Container) setupMounts() ([]execdriver.Mount, error) {
return nil
}

View file

@ -0,0 +1,223 @@
page_title: Plugin API documentation
page_description: Documentation for writing a Docker plugin.
page_keywords: docker, plugins, api, extensions
# Docker Plugin API
Docker plugins are out-of-process extensions which add capabilities to the
Docker Engine.
This page is intended for people who want to develop their own Docker plugin.
If you just want to learn about or use Docker plugins, look
[here](/userguide/plugins).
## What plugins are
A plugin is a process running on the same docker host as the docker daemon,
which registers itself by placing a file in `/usr/share/docker/plugins` (the
"plugin directory").
Plugins have human-readable names, which are short, lowercase strings. For
example, `flocker` or `weave`.
Plugins can run inside or outside containers. Currently running them outside
containers is recommended.
## Plugin discovery
Docker discovers plugins by looking for them in the plugin directory whenever a
user or container tries to use one by name.
There are two types of files which can be put in the plugin directory.
* `.sock` files are UNIX domain sockets.
* `.spec` files are text files containing a URL, such as `unix:///other.sock`.
The name of the file (excluding the extension) determines the plugin name.
For example, the `flocker` plugin might create a UNIX socket at
`/usr/share/docker/plugins/flocker.sock`.
Plugins must be run locally on the same machine as the Docker daemon. UNIX
domain sockets are strongly encouraged for security reasons.
## Plugin lifecycle
Plugins should be started before Docker, and stopped after Docker. For
example, when packaging a plugin for a platform which supports `systemd`, you
might use [`systemd` dependencies](
http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to
manage startup and shutdown order.
When upgrading a plugin, you should first stop the Docker daemon, upgrade the
plugin, then start Docker again.
If a plugin is packaged as a container, this may cause issues. Plugins as
containers are currently considered experimental due to these shutdown/startup
ordering issues. These issues are mitigated by plugin retries (see below).
## Plugin activation
When a plugin is first referred to -- either by a user referring to it by name
(e.g. `docker run --volume-driver=foo`) or a container already configured to
use a plugin being started -- Docker looks for the named plugin in the plugin
directory and activates it with a handshake. See Handshake API below.
Plugins are *not* activated automatically at Docker daemon startup. Rather,
they are activated only lazily, or on-demand, when they are needed.
## API design
The Plugin API is RPC-style JSON over HTTP, much like webhooks.
Requests flow *from* the Docker daemon *to* the plugin. So the plugin needs to
implement an HTTP server and bind this to the UNIX socket mentioned in the
"plugin discovery" section.
All requests are HTTP `POST` requests.
The API is versioned via an Accept header, which currently is always set to
`application/vnd.docker.plugins.v1+json`.
## Handshake API
Plugins are activated via the following "handshake" API call.
### /Plugin.Activate
**Request:** empty body
**Response:**
```
{
"Implements": ["VolumeDriver"]
}
```
Responds with a list of Docker subsystems which this plugin implements.
After activation, the plugin will then be sent events from this subsystem.
## Volume API
If a plugin registers itself as a `VolumeDriver` (see above) then it is
expected to provide writeable paths on the host filesystem for the Docker
daemon to provide to containers to consume.
The Docker daemon handles bind-mounting the provided paths into user
containers.
### /VolumeDriver.Create
**Request**:
```
{
"Name": "volume_name"
}
```
Instruct the plugin that the user wants to create a volume, given a user
specified volume name. The plugin does not need to actually manifest the
volume on the filesystem yet (until Mount is called).
**Response**:
```
{
"Err": null
}
```
Respond with a string error if an error occurred.
### /VolumeDriver.Remove
**Request**:
```
{
"Name": "volume_name"
}
```
Create a volume, given a user specified volume name.
**Response**:
```
{
"Err": null
}
```
Respond with a string error if an error occurred.
### /VolumeDriver.Mount
**Request**:
```
{
"Name": "volume_name"
}
```
Docker requires the plugin to provide a volume, given a user specified volume
name. This is called once per container start.
**Response**:
```
{
"Mountpoint": "/path/to/directory/on/host",
"Err": null
}
```
Respond with the path on the host filesystem where the volume has been made
available, and/or a string error if an error occurred.
### /VolumeDriver.Path
**Request**:
```
{
"Name": "volume_name"
}
```
Docker needs reminding of the path to the volume on the host.
**Response**:
```
{
"Mountpoint": "/path/to/directory/on/host",
"Err": null
}
```
Respond with the path on the host filesystem where the volume has been made
available, and/or a string error if an error occurred.
### /VolumeDriver.Unmount
**Request**:
```
{
"Name": "volume_name"
}
```
Indication that Docker no longer is using the named volume. This is called once
per container stop. Plugin may deduce that it is safe to deprovision it at
this point.
**Response**:
```
{
"Err": null
}
```
Respond with a string error if an error occurred.
## Plugin retries
Attempts to call a method on a plugin are retried with an exponential backoff
for up to 30 seconds. This may help when packaging plugins as containers, since
it gives plugin containers a chance to start up before failing any user
containers which depend on them.

View file

@ -0,0 +1,46 @@
page_title: Experimental feature - Plugins
page_keywords: experimental, Docker, plugins
# Overview
You can extend the capabilities of the Docker Engine by loading third-party
plugins.
## Types of plugins
Plugins extend Docker's functionality. They come in specific types. For
example, a [volume plugin](/experimental/plugins_volume) might enable Docker
volumes to persist across multiple Docker hosts.
Currently Docker supports volume plugins. In the future it will support
additional plugin types.
## Installing a plugin
Follow the instructions in the plugin's documentation.
## Finding a plugin
The following plugins exist:
* The [Flocker plugin](https://clusterhq.com/docker-plugin/) is a volume plugin
which provides multi-host portable volumes for Docker, enabling you to run
databases and other stateful containers and move them around across a cluster
of machines.
## Troubleshooting a plugin
If you are having problems with Docker after loading a plugin, ask the authors
of the plugin for help. The Docker team may not be able to assist you.
## Writing a plugin
If you are interested in writing a plugin for Docker, or seeing how they work
under the hood, see the [docker plugins reference](/experimental/plugin_api).
# Related GitHub PRs and issues
- [#13222](https://github.com/docker/docker/pull/13222) Plugins plumbing
Send us feedback and comments on [#13419](https://github.com/docker/docker/issues/13419),
or on the usual Google Groups (docker-user, docker-dev) and IRC channels.

View file

@ -0,0 +1,43 @@
page_title: Experimental feature - Volume plugins
page_keywords: experimental, Docker, plugins, volume
# Overview
Docker volume plugins enable Docker deployments to be integrated with external
storage systems, such as Amazon EBS, and enable data volumes to persist beyond
the lifetime of a single Docker host. See the [plugin documentation](/experimental/plugins)
for more information.
# Command-line changes
This experimental features introduces two changes to the `docker run` command:
- The `--volume-driver` flag is introduced.
- The `-v` syntax is changed to accept a volume name a first component.
Example:
$ docker run -ti -v volumename:/data --volume-driver=flocker busybox sh
By specifying a volume name in conjunction with a volume driver, volume plugins
such as [Flocker](https://clusterhq.com/docker-plugin/), once installed, can be
used to manage volumes external to a single host, such as those on EBS. In this
example, "volumename" is passed through to the volume plugin as a user-given
name for the volume which allows the plugin to associate it with an external
volume beyond the lifetime of a single container or container host. This can be
used, for example, to move a stateful container from one server to another.
The `volumename` must not begin with a `/`.
# API changes
The container creation endpoint (`/containers/create`) accepts a `VolumeDriver`
field of type `string` allowing to specify the name of the driver. It's default
value of `"local"` (the default driver for local volumes).
# Related GitHub PRs and issues
- [#13161](https://github.com/docker/docker/pull/13161) Volume refactor and external volume plugins
Send us feedback and comments on [#13420](https://github.com/docker/docker/issues/13420),
or on the usual Google Groups (docker-user, docker-dev) and IRC channels.

View file

@ -166,7 +166,7 @@ func (s *DockerSuite) TestContainerApiStartDupVolumeBinds(c *check.C) {
c.Assert(status, check.Equals, http.StatusInternalServerError)
c.Assert(err, check.IsNil)
if !strings.Contains(string(body), "Duplicate volume") {
if !strings.Contains(string(body), "Duplicate bind") {
c.Fatalf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err)
}
}
@ -210,49 +210,6 @@ func (s *DockerSuite) TestContainerApiStartVolumesFrom(c *check.C) {
}
}
// Ensure that volumes-from has priority over binds/anything else
// This is pretty much the same as TestRunApplyVolumesFromBeforeVolumes, except with passing the VolumesFrom and the bind on start
func (s *DockerSuite) TestVolumesFromHasPriority(c *check.C) {
volName := "voltst2"
volPath := "/tmp"
if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", volName, "-v", volPath, "busybox")); err != nil {
c.Fatal(out, err)
}
name := "testing"
config := map[string]interface{}{
"Image": "busybox",
"Volumes": map[string]struct{}{volPath: {}},
}
status, _, err := sockRequest("POST", "/containers/create?name="+name, config)
c.Assert(status, check.Equals, http.StatusCreated)
c.Assert(err, check.IsNil)
bindPath := randomUnixTmpDirPath("test")
config = map[string]interface{}{
"VolumesFrom": []string{volName},
"Binds": []string{bindPath + ":/tmp"},
}
status, _, err = sockRequest("POST", "/containers/"+name+"/start", config)
c.Assert(status, check.Equals, http.StatusNoContent)
c.Assert(err, check.IsNil)
pth, err := inspectFieldMap(name, "Volumes", volPath)
if err != nil {
c.Fatal(err)
}
pth2, err := inspectFieldMap(volName, "Volumes", volPath)
if err != nil {
c.Fatal(err)
}
if pth != pth2 {
c.Fatalf("expected volume host path to be %s, got %s", pth, pth2)
}
}
func (s *DockerSuite) TestGetContainerStats(c *check.C) {
var (
name = "statscontainer"

View file

@ -284,35 +284,6 @@ func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) {
}
}
// #9629
func (s *DockerDaemonSuite) TestDaemonVolumesBindsRefs(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatal(err)
}
tmp, err := ioutil.TempDir(os.TempDir(), "")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmp)
if err := ioutil.WriteFile(tmp+"/test", []byte("testing"), 0655); err != nil {
c.Fatal(err)
}
if out, err := s.d.Cmd("create", "-v", tmp+":/foo", "--name=voltest", "busybox"); err != nil {
c.Fatal(err, out)
}
if err := s.d.Restart(); err != nil {
c.Fatal(err)
}
if out, err := s.d.Cmd("run", "--volumes-from=voltest", "--name=consumer", "busybox", "/bin/sh", "-c", "[ -f /foo/test ]"); err != nil {
c.Fatal(err, out)
}
}
func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) {
// TODO: skip or update for Windows daemon
os.Remove("/etc/docker/key.json")
@ -360,76 +331,6 @@ func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) {
}
}
// Simulate an older daemon (pre 1.3) coming up with volumes specified in containers
// without corresponding volume json
func (s *DockerDaemonSuite) TestDaemonUpgradeWithVolumes(c *check.C) {
graphDir := filepath.Join(os.TempDir(), "docker-test")
defer os.RemoveAll(graphDir)
if err := s.d.StartWithBusybox("-g", graphDir); err != nil {
c.Fatal(err)
}
tmpDir := filepath.Join(os.TempDir(), "test")
defer os.RemoveAll(tmpDir)
if out, err := s.d.Cmd("create", "-v", tmpDir+":/foo", "--name=test", "busybox"); err != nil {
c.Fatal(err, out)
}
if err := s.d.Stop(); err != nil {
c.Fatal(err)
}
// Remove this since we're expecting the daemon to re-create it too
if err := os.RemoveAll(tmpDir); err != nil {
c.Fatal(err)
}
configDir := filepath.Join(graphDir, "volumes")
if err := os.RemoveAll(configDir); err != nil {
c.Fatal(err)
}
if err := s.d.Start("-g", graphDir); err != nil {
c.Fatal(err)
}
if _, err := os.Stat(tmpDir); os.IsNotExist(err) {
c.Fatalf("expected volume path %s to exist but it does not", tmpDir)
}
dir, err := ioutil.ReadDir(configDir)
if err != nil {
c.Fatal(err)
}
if len(dir) == 0 {
c.Fatalf("expected volumes config dir to contain data for new volume")
}
// Now with just removing the volume config and not the volume data
if err := s.d.Stop(); err != nil {
c.Fatal(err)
}
if err := os.RemoveAll(configDir); err != nil {
c.Fatal(err)
}
if err := s.d.Start("-g", graphDir); err != nil {
c.Fatal(err)
}
dir, err = ioutil.ReadDir(configDir)
if err != nil {
c.Fatal(err)
}
if len(dir) == 0 {
c.Fatalf("expected volumes config dir to contain data for new volume")
}
}
// GH#11320 - verify that the daemon exits on failure properly
// Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means
// to get a daemon init failure; no other tests for -b/--bip conflict are therefore required

View file

@ -395,21 +395,6 @@ func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) {
}
}
// Regression test for #4741
func (s *DockerSuite) TestRunWithVolumesAsFiles(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/etc/hosts:/target-file", "busybox", "true")
out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd)
if err != nil && exitCode != 0 {
c.Fatal("1", out, stderr, err)
}
runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/target-file")
out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd)
if err != nil && exitCode != 0 {
c.Fatal("2", out, stderr, err)
}
}
// Regression test for #4979
func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file")
@ -455,14 +440,6 @@ func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) {
}
}
// Regression test for #4830
func (s *DockerSuite) TestRunWithRelativePath(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "-v", "tmp:/other-tmp", "busybox", "true")
if _, _, _, err := runCommandWithStdoutStderr(runCmd); err == nil {
c.Fatalf("relative path should result in an error")
}
}
func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) {
cmd := exec.Command(dockerBinary, "run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile")
if code, err := runCommand(cmd); err == nil || code == 0 {
@ -536,7 +513,7 @@ func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) {
if out, _, err := runCommandWithOutput(cmd); err == nil {
c.Fatal("Expected error about duplicate volume definitions")
} else {
if !strings.Contains(out, "Duplicate volume") {
if !strings.Contains(out, "Duplicate bind mount") {
c.Fatalf("Expected 'duplicate volume' error, got %v", err)
}
}
@ -2333,7 +2310,13 @@ func (s *DockerSuite) TestRunMountOrdering(c *check.C) {
c.Fatal(err)
}
cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp", tmpDir), "-v", fmt.Sprintf("%s:/tmp/foo", fooDir), "-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2), "-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir), "busybox:latest", "sh", "-c", "ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me")
cmd := exec.Command(dockerBinary, "run",
"-v", fmt.Sprintf("%s:/tmp", tmpDir),
"-v", fmt.Sprintf("%s:/tmp/foo", fooDir),
"-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2),
"-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir),
"busybox:latest", "sh", "-c",
"ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me")
out, _, err := runCommandWithOutput(cmd)
if err != nil {
c.Fatal(out, err)
@ -2427,41 +2410,6 @@ func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) {
}
}
func (s *DockerSuite) TestRunVolumesNotRecreatedOnStart(c *check.C) {
testRequires(c, SameHostDaemon)
// Clear out any remnants from other tests
info, err := ioutil.ReadDir(volumesConfigPath)
if err != nil {
c.Fatal(err)
}
if len(info) > 0 {
for _, f := range info {
if err := os.RemoveAll(volumesConfigPath + "/" + f.Name()); err != nil {
c.Fatal(err)
}
}
}
cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "--name", "lone_starr", "busybox")
if _, err := runCommand(cmd); err != nil {
c.Fatal(err)
}
cmd = exec.Command(dockerBinary, "start", "lone_starr")
if _, err := runCommand(cmd); err != nil {
c.Fatal(err)
}
info, err = ioutil.ReadDir(volumesConfigPath)
if err != nil {
c.Fatal(err)
}
if len(info) != 1 {
c.Fatalf("Expected only 1 volume have %v", len(info))
}
}
func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) {
// just run with unknown image
cmd := exec.Command(dockerBinary, "run", "asdfsg")
@ -2496,7 +2444,7 @@ func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) {
out, err = inspectFieldMap("dark_helmet", "Volumes", "/foo")
c.Assert(err, check.IsNil)
if !strings.Contains(out, volumesStoragePath) {
if !strings.Contains(out, volumesConfigPath) {
c.Fatalf("Volume was not defined for /foo\n%q", out)
}
@ -2507,7 +2455,7 @@ func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) {
}
out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar")
c.Assert(err, check.IsNil)
if !strings.Contains(out, volumesStoragePath) {
if !strings.Contains(out, volumesConfigPath) {
c.Fatalf("Volume was not defined for /bar\n%q", out)
}
}

View file

@ -126,32 +126,6 @@ func (s *DockerSuite) TestStartRecordError(c *check.C) {
}
// gh#8726: a failed Start() breaks --volumes-from on subsequent Start()'s
func (s *DockerSuite) TestStartVolumesFromFailsCleanly(c *check.C) {
// Create the first data volume
dockerCmd(c, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox")
// Expect this to fail because the data test after contaienr doesn't exist yet
if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil {
c.Fatal("Expected error but got none")
}
// Create the second data volume
dockerCmd(c, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox")
// Now, all the volumes should be there
dockerCmd(c, "start", "consumer")
// Check that we have the volumes we want
out, _ := dockerCmd(c, "inspect", "--format='{{ len .Volumes }}'", "consumer")
nVolumes := strings.Trim(out, " \r\n'")
if nVolumes != "2" {
c.Fatalf("Missing volumes: expected 2, got %s", nVolumes)
}
}
func (s *DockerSuite) TestStartPausedContainer(c *check.C) {
defer unpauseAllContainers()

View file

@ -0,0 +1,249 @@
// +build experimental
// +build !windows
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"github.com/go-check/check"
)
func init() {
check.Suite(&DockerExternalVolumeSuite{
ds: &DockerSuite{},
})
}
type eventCounter struct {
activations int
creations int
removals int
mounts int
unmounts int
paths int
}
type DockerExternalVolumeSuite struct {
server *httptest.Server
ds *DockerSuite
d *Daemon
ec *eventCounter
}
func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) {
s.d = NewDaemon(c)
s.ds.SetUpTest(c)
s.ec = &eventCounter{}
}
func (s *DockerExternalVolumeSuite) TearDownTest(c *check.C) {
s.d.Stop()
s.ds.TearDownTest(c)
}
func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) {
mux := http.NewServeMux()
s.server = httptest.NewServer(mux)
type pluginRequest struct {
name string
}
mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
s.ec.activations++
w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
fmt.Fprintln(w, `{"Implements": ["VolumeDriver"]}`)
})
mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) {
s.ec.creations++
w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
fmt.Fprintln(w, `{}`)
})
mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) {
s.ec.removals++
w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
fmt.Fprintln(w, `{}`)
})
mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) {
s.ec.paths++
var pr pluginRequest
if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
http.Error(w, err.Error(), 500)
}
p := hostVolumePath(pr.name)
w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
fmt.Fprintln(w, fmt.Sprintf("{\"Mountpoint\": \"%s\"}", p))
})
mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) {
s.ec.mounts++
var pr pluginRequest
if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
http.Error(w, err.Error(), 500)
}
p := hostVolumePath(pr.name)
if err := os.MkdirAll(p, 0755); err != nil {
http.Error(w, err.Error(), 500)
}
if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.server.URL), 0644); err != nil {
http.Error(w, err.Error(), 500)
}
w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
fmt.Fprintln(w, fmt.Sprintf("{\"Mountpoint\": \"%s\"}", p))
})
mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) {
s.ec.unmounts++
var pr pluginRequest
if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
http.Error(w, err.Error(), 500)
}
p := hostVolumePath(pr.name)
if err := os.RemoveAll(p); err != nil {
http.Error(w, err.Error(), 500)
}
w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
fmt.Fprintln(w, `{}`)
})
if err := os.MkdirAll("/usr/share/docker/plugins", 0755); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile("/usr/share/docker/plugins/test-external-volume-driver.spec", []byte(s.server.URL), 0644); err != nil {
c.Fatal(err)
}
}
func (s *DockerExternalVolumeSuite) TearDownSuite(c *check.C) {
s.server.Close()
if err := os.RemoveAll("/usr/share/docker/plugins"); err != nil {
c.Fatal(err)
}
}
func (s *DockerExternalVolumeSuite) TestStartExternalNamedVolumeDriver(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatal(err)
}
out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test")
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, s.server.URL) {
c.Fatalf("External volume mount failed. Output: %s\n", out)
}
p := hostVolumePath("external-volume-test")
_, err = os.Lstat(p)
if err == nil {
c.Fatalf("Expected error checking volume path in host: %s\n", p)
}
if !os.IsNotExist(err) {
c.Fatalf("Expected volume path in host to not exist: %s, %v\n", p, err)
}
c.Assert(s.ec.activations, check.Equals, 1)
c.Assert(s.ec.creations, check.Equals, 1)
c.Assert(s.ec.removals, check.Equals, 1)
c.Assert(s.ec.mounts, check.Equals, 1)
c.Assert(s.ec.unmounts, check.Equals, 1)
}
func (s *DockerExternalVolumeSuite) TestStartExternalVolumeUnnamedDriver(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatal(err)
}
out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test")
if err != nil {
c.Fatal(err)
}
if !strings.Contains(out, s.server.URL) {
c.Fatalf("External volume mount failed. Output: %s\n", out)
}
c.Assert(s.ec.activations, check.Equals, 1)
c.Assert(s.ec.creations, check.Equals, 1)
c.Assert(s.ec.removals, check.Equals, 1)
c.Assert(s.ec.mounts, check.Equals, 1)
c.Assert(s.ec.unmounts, check.Equals, 1)
}
func (s DockerExternalVolumeSuite) TestStartExternalVolumeDriverVolumesFrom(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatal(err)
}
if _, err := s.d.Cmd("run", "-d", "--name", "vol-test1", "-v", "/foo", "--volume-driver", "test-external-volume-driver", "busybox:latest"); err != nil {
c.Fatal(err)
}
if _, err := s.d.Cmd("run", "--rm", "--volumes-from", "vol-test1", "--name", "vol-test2", "busybox", "ls", "/tmp"); err != nil {
c.Fatal(err)
}
if _, err := s.d.Cmd("rm", "-f", "vol-test1"); err != nil {
c.Fatal(err)
}
c.Assert(s.ec.activations, check.Equals, 1)
c.Assert(s.ec.creations, check.Equals, 2)
c.Assert(s.ec.removals, check.Equals, 1)
c.Assert(s.ec.mounts, check.Equals, 2)
c.Assert(s.ec.unmounts, check.Equals, 2)
}
func (s DockerExternalVolumeSuite) TestStartExternalVolumeDriverDeleteContainer(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatal(err)
}
if _, err := s.d.Cmd("run", "-d", "--name", "vol-test1", "-v", "/foo", "--volume-driver", "test-external-volume-driver", "busybox:latest"); err != nil {
c.Fatal(err)
}
if _, err := s.d.Cmd("rm", "-fv", "vol-test1"); err != nil {
c.Fatal(err)
}
c.Assert(s.ec.activations, check.Equals, 1)
c.Assert(s.ec.creations, check.Equals, 1)
c.Assert(s.ec.removals, check.Equals, 1)
c.Assert(s.ec.mounts, check.Equals, 1)
c.Assert(s.ec.unmounts, check.Equals, 1)
}
func hostVolumePath(name string) string {
return fmt.Sprintf("/var/lib/docker/volumes/%s", name)
}

View file

@ -18,7 +18,6 @@ var (
dockerBasePath = "/var/lib/docker"
volumesConfigPath = dockerBasePath + "/volumes"
volumesStoragePath = dockerBasePath + "/vfs/dir"
containerStoragePath = dockerBasePath + "/containers"
runtimePath = "/var/run/docker"

View file

@ -31,6 +31,10 @@ type Client struct {
}
func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error {
return c.callWithRetry(serviceMethod, args, ret, true)
}
func (c *Client) callWithRetry(serviceMethod string, args interface{}, ret interface{}, retry bool) error {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(args); err != nil {
return err
@ -50,12 +54,16 @@ func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) e
for {
resp, err := c.http.Do(req)
if err != nil {
if !retry {
return err
}
timeOff := backoff(retries)
if timeOff+time.Since(start) > defaultTimeOut {
if abort(start, timeOff) {
return err
}
retries++
logrus.Warn("Unable to connect to plugin: %s, retrying in %ds\n", c.addr, timeOff)
logrus.Warnf("Unable to connect to plugin: %s, retrying in %v", c.addr, timeOff)
time.Sleep(timeOff)
continue
}
@ -73,7 +81,7 @@ func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) e
}
func backoff(retries int) time.Duration {
b, max := float64(1), float64(defaultTimeOut)
b, max := 1, defaultTimeOut
for b < max && retries > 0 {
b *= 2
retries--
@ -81,7 +89,11 @@ func backoff(retries int) time.Duration {
if b > max {
b = max
}
return time.Duration(b)
return time.Duration(b) * time.Second
}
func abort(start time.Time, timeOff time.Duration) bool {
return timeOff+time.Since(start) > time.Duration(defaultTimeOut)*time.Second
}
func configureTCPTransport(tr *http.Transport, proto, addr string) {

View file

@ -6,6 +6,7 @@ import (
"net/http/httptest"
"reflect"
"testing"
"time"
)
var (
@ -27,7 +28,7 @@ func teardownRemotePluginServer() {
func TestFailedConnection(t *testing.T) {
c := NewClient("tcp://127.0.0.1:1")
err := c.Call("Service.Method", nil, nil)
err := c.callWithRetry("Service.Method", nil, nil, false)
if err == nil {
t.Fatal("Unexpected successful connection")
}
@ -61,3 +62,44 @@ func TestEchoInputOutput(t *testing.T) {
t.Fatalf("Expected %v, was %v\n", m, output)
}
}
func TestBackoff(t *testing.T) {
cases := []struct {
retries int
expTimeOff time.Duration
}{
{0, time.Duration(1)},
{1, time.Duration(2)},
{2, time.Duration(4)},
{4, time.Duration(16)},
{6, time.Duration(30)},
{10, time.Duration(30)},
}
for _, c := range cases {
s := c.expTimeOff * time.Second
if d := backoff(c.retries); d != s {
t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d)
}
}
}
func TestAbortRetry(t *testing.T) {
cases := []struct {
timeOff time.Duration
expAbort bool
}{
{time.Duration(1), false},
{time.Duration(2), false},
{time.Duration(10), false},
{time.Duration(30), true},
{time.Duration(40), true},
}
for _, c := range cases {
s := c.timeOff * time.Second
if a := abort(time.Now(), s); a != c.expAbort {
t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a)
}
}
}

View file

@ -122,6 +122,7 @@ type Config struct {
Cmd *Command
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
Volumes map[string]struct{}
VolumeDriver string
WorkingDir string
Entrypoint *Entrypoint
NetworkDisabled bool

View file

@ -100,6 +100,8 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options")
cmd.Var(&flLoggingOpts, []string{"-log-opt"}, "Log driver options")
expFlags := attachExperimentalFlags(cmd)
cmd.Require(flag.Min, 1)
if err := cmd.ParseFlags(args, true); err != nil {
@ -355,6 +357,8 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
CgroupParent: *flCgroupParent,
}
applyExperimentalFlags(expFlags, config, hostConfig)
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true

View file

@ -0,0 +1,19 @@
// +build experimental
package runconfig
import flag "github.com/docker/docker/pkg/mflag"
type experimentalFlags struct {
flags map[string]interface{}
}
func attachExperimentalFlags(cmd *flag.FlagSet) *experimentalFlags {
flags := make(map[string]interface{})
flags["volume-driver"] = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container")
return &experimentalFlags{flags: flags}
}
func applyExperimentalFlags(exp *experimentalFlags, config *Config, hostConfig *HostConfig) {
config.VolumeDriver = *(exp.flags["volume-driver"]).(*string)
}

14
runconfig/parse_stub.go Normal file
View file

@ -0,0 +1,14 @@
// +build !experimental
package runconfig
import flag "github.com/docker/docker/pkg/mflag"
type experimentalFlags struct{}
func attachExperimentalFlags(cmd *flag.FlagSet) *experimentalFlags {
return nil
}
func applyExperimentalFlags(flags *experimentalFlags, config *Config, hostConfig *HostConfig) {
}

22
utils/tcp.go Normal file
View file

@ -0,0 +1,22 @@
package utils
import (
"net"
"net/http"
"time"
)
func ConfigureTCPTransport(tr *http.Transport, proto, addr string) {
// Why 32? See https://github.com/docker/docker/pull/8035.
timeout := 32 * time.Second
if proto == "unix" {
// No need for compression in local communications.
tr.DisableCompression = true
tr.Dial = func(_, _ string) (net.Conn, error) {
return net.DialTimeout(proto, addr, timeout)
}
} else {
tr.Proxy = http.ProxyFromEnvironment
tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
}
}

60
volume/drivers/adapter.go Normal file
View file

@ -0,0 +1,60 @@
package volumedrivers
import "github.com/docker/docker/volume"
type volumeDriverAdapter struct {
name string
proxy *volumeDriverProxy
}
func (a *volumeDriverAdapter) Name() string {
return a.name
}
func (a *volumeDriverAdapter) Create(name string) (volume.Volume, error) {
err := a.proxy.Create(name)
if err != nil {
return nil, err
}
return &volumeAdapter{
proxy: a.proxy,
name: name,
driverName: a.name}, nil
}
func (a *volumeDriverAdapter) Remove(v volume.Volume) error {
return a.proxy.Remove(v.Name())
}
type volumeAdapter struct {
proxy *volumeDriverProxy
name string
driverName string
eMount string // ephemeral host volume path
}
func (a *volumeAdapter) Name() string {
return a.name
}
func (a *volumeAdapter) DriverName() string {
return a.driverName
}
func (a *volumeAdapter) Path() string {
if len(a.eMount) > 0 {
return a.eMount
}
m, _ := a.proxy.Path(a.name)
return m
}
func (a *volumeAdapter) Mount() (string, error) {
var err error
a.eMount, err = a.proxy.Mount(a.name)
return a.eMount, err
}
func (a *volumeAdapter) Unmount() error {
return a.proxy.Unmount(a.name)
}

20
volume/drivers/api.go Normal file
View file

@ -0,0 +1,20 @@
package volumedrivers
import "github.com/docker/docker/volume"
type client interface {
Call(string, interface{}, interface{}) error
}
func NewVolumeDriver(name string, c client) volume.Driver {
proxy := &volumeDriverProxy{c}
return &volumeDriverAdapter{name, proxy}
}
type VolumeDriver interface {
Create(name string) (err error)
Remove(name string) (err error)
Path(name string) (mountpoint string, err error)
Mount(name string) (mountpoint string, err error)
Unmount(name string) (err error)
}

View file

@ -0,0 +1,61 @@
package volumedrivers
import (
"fmt"
"sync"
"github.com/docker/docker/pkg/plugins"
"github.com/docker/docker/volume"
)
// currently created by hand. generation tool would generate this like:
// $ extpoint-gen Driver > volume/extpoint.go
var drivers = &driverExtpoint{extensions: make(map[string]volume.Driver)}
type driverExtpoint struct {
extensions map[string]volume.Driver
sync.Mutex
}
func Register(extension volume.Driver, name string) bool {
drivers.Lock()
defer drivers.Unlock()
if name == "" {
return false
}
_, exists := drivers.extensions[name]
if exists {
return false
}
drivers.extensions[name] = extension
return true
}
func Unregister(name string) bool {
drivers.Lock()
defer drivers.Unlock()
_, exists := drivers.extensions[name]
if !exists {
return false
}
delete(drivers.extensions, name)
return true
}
func Lookup(name string) (volume.Driver, error) {
drivers.Lock()
defer drivers.Unlock()
ext, ok := drivers.extensions[name]
if ok {
return ext, nil
}
pl, err := plugins.Get(name, "VolumeDriver")
if err != nil {
return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err)
}
d := NewVolumeDriver(name, pl.Client)
drivers.extensions[name] = d
return d, nil
}

74
volume/drivers/proxy.go Normal file
View file

@ -0,0 +1,74 @@
package volumedrivers
import "fmt"
// currently created by hand. generation tool would generate this like:
// $ rpc-gen volume/drivers/api.go VolumeDriver > volume/drivers/proxy.go
type volumeDriverRequest struct {
Name string
}
type volumeDriverResponse struct {
Mountpoint string `json:",ommitempty"`
Err error `json:",ommitempty"`
}
type volumeDriverProxy struct {
c client
}
func (pp *volumeDriverProxy) Create(name string) error {
args := volumeDriverRequest{name}
var ret volumeDriverResponse
err := pp.c.Call("VolumeDriver.Create", args, &ret)
if err != nil {
return pp.fmtError(name, err)
}
return pp.fmtError(name, ret.Err)
}
func (pp *volumeDriverProxy) Remove(name string) error {
args := volumeDriverRequest{name}
var ret volumeDriverResponse
err := pp.c.Call("VolumeDriver.Remove", args, &ret)
if err != nil {
return pp.fmtError(name, err)
}
return pp.fmtError(name, ret.Err)
}
func (pp *volumeDriverProxy) Path(name string) (string, error) {
args := volumeDriverRequest{name}
var ret volumeDriverResponse
if err := pp.c.Call("VolumeDriver.Path", args, &ret); err != nil {
return "", pp.fmtError(name, err)
}
return ret.Mountpoint, pp.fmtError(name, ret.Err)
}
func (pp *volumeDriverProxy) Mount(name string) (string, error) {
args := volumeDriverRequest{name}
var ret volumeDriverResponse
if err := pp.c.Call("VolumeDriver.Mount", args, &ret); err != nil {
return "", pp.fmtError(name, err)
}
return ret.Mountpoint, pp.fmtError(name, ret.Err)
}
func (pp *volumeDriverProxy) Unmount(name string) error {
args := volumeDriverRequest{name}
var ret volumeDriverResponse
err := pp.c.Call("VolumeDriver.Unmount", args, &ret)
if err != nil {
return pp.fmtError(name, err)
}
return pp.fmtError(name, ret.Err)
}
func (pp *volumeDriverProxy) fmtError(name string, err error) error {
if err == nil {
return nil
}
return fmt.Errorf("External volume driver request failed for %s: %v", name, err)
}

126
volume/local/local.go Normal file
View file

@ -0,0 +1,126 @@
package local
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/docker/docker/volume"
)
func New(rootDirectory string) (*Root, error) {
if err := os.MkdirAll(rootDirectory, 0700); err != nil {
return nil, err
}
r := &Root{
path: rootDirectory,
volumes: make(map[string]*Volume),
}
dirs, err := ioutil.ReadDir(rootDirectory)
if err != nil {
return nil, err
}
for _, d := range dirs {
name := filepath.Base(d.Name())
r.volumes[name] = &Volume{
driverName: r.Name(),
name: name,
path: filepath.Join(rootDirectory, name),
}
}
return r, nil
}
type Root struct {
m sync.Mutex
path string
volumes map[string]*Volume
}
func (r *Root) Name() string {
return "local"
}
func (r *Root) Create(name string) (volume.Volume, error) {
r.m.Lock()
defer r.m.Unlock()
v, exists := r.volumes[name]
if !exists {
path := filepath.Join(r.path, name)
if err := os.Mkdir(path, 0755); err != nil {
if os.IsExist(err) {
return nil, fmt.Errorf("volume already exists under %s", path)
}
return nil, err
}
v = &Volume{
driverName: r.Name(),
name: name,
path: path,
}
r.volumes[name] = v
}
v.use()
return v, nil
}
func (r *Root) Remove(v volume.Volume) error {
r.m.Lock()
defer r.m.Unlock()
lv, ok := v.(*Volume)
if !ok {
return errors.New("unknown volume type")
}
lv.release()
if lv.usedCount == 0 {
delete(r.volumes, lv.name)
return os.RemoveAll(lv.path)
}
return nil
}
type Volume struct {
m sync.Mutex
usedCount int
// unique name of the volume
name string
// path is the path on the host where the data lives
path string
// driverName is the name of the driver that created the volume.
driverName string
}
func (v *Volume) Name() string {
return v.name
}
func (v *Volume) DriverName() string {
return v.driverName
}
func (v *Volume) Path() string {
return v.path
}
func (v *Volume) Mount() (string, error) {
return v.path, nil
}
func (v *Volume) Unmount() error {
return nil
}
func (v *Volume) use() {
v.m.Lock()
v.usedCount++
v.m.Unlock()
}
func (v *Volume) release() {
v.m.Lock()
v.usedCount--
v.m.Unlock()
}

26
volume/volume.go Normal file
View file

@ -0,0 +1,26 @@
package volume
const DefaultDriverName = "local"
type Driver interface {
// Name returns the name of the volume driver.
Name() string
// Create makes a new volume with the given id.
Create(string) (Volume, error)
// Remove deletes the volume.
Remove(Volume) error
}
type Volume interface {
// Name returns the name of the volume
Name() string
// DriverName returns the name of the driver which owns this volume.
DriverName() string
// Path returns the absolute path to the volume.
Path() string
// Mount mounts the volume and returns the absolute path to
// where it can be consumed.
Mount() (string, error)
// Unmount unmounts the volume when it is no longer in use.
Unmount() error
}

View file

@ -1,193 +0,0 @@
package volumes
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/stringid"
)
type Repository struct {
configPath string
driver graphdriver.Driver
volumes map[string]*Volume
lock sync.Mutex
}
func NewRepository(configPath string, driver graphdriver.Driver) (*Repository, error) {
abspath, err := filepath.Abs(configPath)
if err != nil {
return nil, err
}
// Create the config path
if err := os.MkdirAll(abspath, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
repo := &Repository{
driver: driver,
configPath: abspath,
volumes: make(map[string]*Volume),
}
return repo, repo.restore()
}
func (r *Repository) newVolume(path string, writable bool) (*Volume, error) {
var (
isBindMount bool
err error
id = stringid.GenerateRandomID()
)
if path != "" {
isBindMount = true
}
if path == "" {
path, err = r.createNewVolumePath(id)
if err != nil {
return nil, err
}
}
path = filepath.Clean(path)
// Ignore the error here since the path may not exist
// Really just want to make sure the path we are using is real(or nonexistent)
if cleanPath, err := filepath.EvalSymlinks(path); err == nil {
path = cleanPath
}
v := &Volume{
ID: id,
Path: path,
repository: r,
Writable: writable,
containers: make(map[string]struct{}),
configPath: r.configPath + "/" + id,
IsBindMount: isBindMount,
}
if err := v.initialize(); err != nil {
return nil, err
}
r.add(v)
return v, nil
}
func (r *Repository) restore() error {
dir, err := ioutil.ReadDir(r.configPath)
if err != nil {
return err
}
for _, v := range dir {
id := v.Name()
vol := &Volume{
ID: id,
configPath: r.configPath + "/" + id,
containers: make(map[string]struct{}),
}
if err := vol.FromDisk(); err != nil {
if !os.IsNotExist(err) {
logrus.Debugf("Error restoring volume: %v", err)
continue
}
if err := vol.initialize(); err != nil {
logrus.Debugf("%s", err)
continue
}
}
r.add(vol)
}
return nil
}
func (r *Repository) Get(path string) *Volume {
r.lock.Lock()
vol := r.get(path)
r.lock.Unlock()
return vol
}
func (r *Repository) get(path string) *Volume {
path, err := filepath.EvalSymlinks(path)
if err != nil {
return nil
}
return r.volumes[filepath.Clean(path)]
}
func (r *Repository) add(volume *Volume) {
if vol := r.get(volume.Path); vol != nil {
return
}
r.volumes[volume.Path] = volume
}
func (r *Repository) Delete(path string) error {
r.lock.Lock()
defer r.lock.Unlock()
path, err := filepath.EvalSymlinks(path)
if err != nil {
return err
}
volume := r.get(filepath.Clean(path))
if volume == nil {
return fmt.Errorf("Volume %s does not exist", path)
}
containers := volume.Containers()
if len(containers) > 0 {
return fmt.Errorf("Volume %s is being used and cannot be removed: used by containers %s", volume.Path, containers)
}
if err := os.RemoveAll(volume.configPath); err != nil {
return err
}
if !volume.IsBindMount {
if err := r.driver.Remove(volume.ID); err != nil {
if !os.IsNotExist(err) {
return err
}
}
}
delete(r.volumes, volume.Path)
return nil
}
func (r *Repository) createNewVolumePath(id string) (string, error) {
if err := r.driver.Create(id, ""); err != nil {
return "", err
}
path, err := r.driver.Get(id, "")
if err != nil {
return "", fmt.Errorf("Driver %s failed to get volume rootfs %s: %v", r.driver, id, err)
}
return path, nil
}
func (r *Repository) FindOrCreateVolume(path string, writable bool) (*Volume, error) {
r.lock.Lock()
defer r.lock.Unlock()
if path == "" {
return r.newVolume(path, writable)
}
if v := r.get(path); v != nil {
return v, nil
}
return r.newVolume(path, writable)
}

View file

@ -1,164 +0,0 @@
package volumes
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/docker/docker/daemon/graphdriver"
_ "github.com/docker/docker/daemon/graphdriver/vfs"
)
func TestRepositoryFindOrCreate(t *testing.T) {
root, err := ioutil.TempDir(os.TempDir(), "volumes")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root)
repo, err := newRepo(root)
if err != nil {
t.Fatal(err)
}
// no path
v, err := repo.FindOrCreateVolume("", true)
if err != nil {
t.Fatal(err)
}
// FIXME: volumes are heavily dependent on the vfs driver, but this should not be so!
expected := filepath.Join(root, "repo-graph", "vfs", "dir", v.ID)
if v.Path != expected {
t.Fatalf("expected new path to be created in %s, got %s", expected, v.Path)
}
// with a non-existant path
dir := filepath.Join(root, "doesntexist")
v, err = repo.FindOrCreateVolume(dir, true)
if err != nil {
t.Fatal(err)
}
if v.Path != dir {
t.Fatalf("expected new path to be created in %s, got %s", dir, v.Path)
}
if _, err := os.Stat(v.Path); err != nil {
t.Fatal(err)
}
// with a pre-existing path
// can just use the same path from above since it now exists
v, err = repo.FindOrCreateVolume(dir, true)
if err != nil {
t.Fatal(err)
}
if v.Path != dir {
t.Fatalf("expected new path to be created in %s, got %s", dir, v.Path)
}
}
func TestRepositoryGet(t *testing.T) {
root, err := ioutil.TempDir(os.TempDir(), "volumes")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root)
repo, err := newRepo(root)
if err != nil {
t.Fatal(err)
}
v, err := repo.FindOrCreateVolume("", true)
if err != nil {
t.Fatal(err)
}
v2 := repo.Get(v.Path)
if v2 == nil {
t.Fatalf("expected to find volume but didn't")
}
if v2 != v {
t.Fatalf("expected get to return same volume")
}
}
func TestRepositoryDelete(t *testing.T) {
root, err := ioutil.TempDir(os.TempDir(), "volumes")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root)
repo, err := newRepo(root)
if err != nil {
t.Fatal(err)
}
// with a normal volume
v, err := repo.FindOrCreateVolume("", true)
if err != nil {
t.Fatal(err)
}
if err := repo.Delete(v.Path); err != nil {
t.Fatal(err)
}
if v := repo.Get(v.Path); v != nil {
t.Fatalf("expected volume to not exist")
}
if _, err := os.Stat(v.Path); err == nil {
t.Fatalf("expected volume files to be removed")
}
// with a bind mount
dir := filepath.Join(root, "test")
v, err = repo.FindOrCreateVolume(dir, true)
if err != nil {
t.Fatal(err)
}
if err := repo.Delete(v.Path); err != nil {
t.Fatal(err)
}
if v := repo.Get(v.Path); v != nil {
t.Fatalf("expected volume to not exist")
}
if _, err := os.Stat(v.Path); err != nil && os.IsNotExist(err) {
t.Fatalf("expected bind volume data to persist after destroying volume")
}
// with container refs
dir = filepath.Join(root, "test")
v, err = repo.FindOrCreateVolume(dir, true)
if err != nil {
t.Fatal(err)
}
v.AddContainer("1234")
if err := repo.Delete(v.Path); err == nil {
t.Fatalf("expected volume delete to fail due to container refs")
}
v.RemoveContainer("1234")
if err := repo.Delete(v.Path); err != nil {
t.Fatal(err)
}
}
func newRepo(root string) (*Repository, error) {
configPath := filepath.Join(root, "repo-config")
graphDir := filepath.Join(root, "repo-graph")
driver, err := graphdriver.GetDriver("vfs", graphDir, []string{})
if err != nil {
return nil, err
}
return NewRepository(configPath, driver)
}

View file

@ -1,152 +0,0 @@
package volumes
import (
"encoding/json"
"os"
"path/filepath"
"sync"
"github.com/docker/docker/pkg/symlink"
)
type Volume struct {
ID string
Path string
IsBindMount bool
Writable bool
containers map[string]struct{}
configPath string
repository *Repository
lock sync.Mutex
}
func (v *Volume) IsDir() (bool, error) {
stat, err := os.Stat(v.Path)
if err != nil {
return false, err
}
return stat.IsDir(), nil
}
func (v *Volume) Containers() []string {
v.lock.Lock()
var containers []string
for c := range v.containers {
containers = append(containers, c)
}
v.lock.Unlock()
return containers
}
func (v *Volume) RemoveContainer(containerId string) {
v.lock.Lock()
delete(v.containers, containerId)
v.lock.Unlock()
}
func (v *Volume) AddContainer(containerId string) {
v.lock.Lock()
v.containers[containerId] = struct{}{}
v.lock.Unlock()
}
func (v *Volume) initialize() error {
v.lock.Lock()
defer v.lock.Unlock()
if _, err := os.Stat(v.Path); err != nil {
if !os.IsNotExist(err) {
return err
}
if err := os.MkdirAll(v.Path, 0755); err != nil {
return err
}
}
if err := os.MkdirAll(v.configPath, 0755); err != nil {
return err
}
return v.toDisk()
}
func (v *Volume) ToDisk() error {
v.lock.Lock()
defer v.lock.Unlock()
return v.toDisk()
}
func (v *Volume) toDisk() error {
jsonPath, err := v.jsonPath()
if err != nil {
return err
}
f, err := os.OpenFile(jsonPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
if err := json.NewEncoder(f).Encode(v); err != nil {
f.Close()
return err
}
return f.Close()
}
func (v *Volume) FromDisk() error {
v.lock.Lock()
defer v.lock.Unlock()
pth, err := v.jsonPath()
if err != nil {
return err
}
jsonSource, err := os.Open(pth)
if err != nil {
return err
}
defer jsonSource.Close()
dec := json.NewDecoder(jsonSource)
return dec.Decode(v)
}
func (v *Volume) jsonPath() (string, error) {
return v.GetRootResourcePath("config.json")
}
// Evalutes `path` in the scope of the volume's root path, with proper path
// sanitisation. Symlinks are all scoped to the root of the volume, as
// though the volume's root was `/`.
//
// The volume's root path is the host-facing path of the root of the volume's
// mountpoint inside a container.
//
// NOTE: The returned path is *only* safely scoped inside the volume's root
// if no component of the returned path changes (such as a component
// symlinking to a different path) between using this method and using the
// path. See symlink.FollowSymlinkInScope for more details.
func (v *Volume) GetResourcePath(path string) (string, error) {
cleanPath := filepath.Join("/", path)
return symlink.FollowSymlinkInScope(filepath.Join(v.Path, cleanPath), v.Path)
}
// Evalutes `path` in the scope of the volume's config path, with proper path
// sanitisation. Symlinks are all scoped to the root of the config path, as
// though the config path was `/`.
//
// The config path of a volume is not exposed to the container and is just used
// to store volume configuration options and other internal information. If in
// doubt, you probably want to just use v.GetResourcePath.
//
// NOTE: The returned path is *only* safely scoped inside the volume's config
// path if no component of the returned path changes (such as a component
// symlinking to a different path) between using this method and using the
// path. See symlink.FollowSymlinkInScope for more details.
func (v *Volume) GetRootResourcePath(path string) (string, error) {
cleanPath := filepath.Join("/", path)
return symlink.FollowSymlinkInScope(filepath.Join(v.configPath, cleanPath), v.configPath)
}

View file

@ -1,55 +0,0 @@
package volumes
import (
"os"
"testing"
"github.com/docker/docker/pkg/stringutils"
)
func TestContainers(t *testing.T) {
v := &Volume{containers: make(map[string]struct{})}
id := "1234"
v.AddContainer(id)
if v.Containers()[0] != id {
t.Fatalf("adding a container ref failed")
}
v.RemoveContainer(id)
if len(v.Containers()) != 0 {
t.Fatalf("removing container failed")
}
}
// os.Stat(v.Path) is returning ErrNotExist, initialize catch it and try to
// mkdir v.Path but it dies and correctly returns the error
func TestInitializeCannotMkdirOnNonExistentPath(t *testing.T) {
v := &Volume{Path: "nonexistentpath"}
err := v.initialize()
if err == nil {
t.Fatal("Expected not to initialize volume with a non existent path")
}
if !os.IsNotExist(err) {
t.Fatalf("Expected to get ErrNotExist error, got %s", err)
}
}
// os.Stat(v.Path) is NOT returning ErrNotExist so skip and return error from
// initialize
func TestInitializeCannotStatPathFileNameTooLong(t *testing.T) {
// ENAMETOOLONG
v := &Volume{Path: stringutils.GenerateRandomAlphaOnlyString(300)}
err := v.initialize()
if err == nil {
t.Fatal("Expected not to initialize volume with a non existent path")
}
if os.IsNotExist(err) {
t.Fatal("Expected to not get ErrNotExist")
}
}