Volumes refactor and external plugin implementation.

Signed by all authors:

Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
Signed-off-by: Arnaud Porterie <arnaud.porterie@docker.com>
Signed-off-by: David Calavera <david.calavera@gmail.com>
Signed-off-by: Jeff Lindsay <progrium@gmail.com>
Signed-off-by: Alexander Morozov <lk4d4@docker.com>
Signed-off-by: Luke Marsden <luke@clusterhq.com>
Signed-off-by: David Calavera <david.calavera@gmail.com>
This commit is contained in:
David Calavera 2015-05-19 13:05:25 -07:00
parent 23e8dff9e7
commit 81fa9feb0c
43 changed files with 1538 additions and 1191 deletions

View file

@ -6,19 +6,18 @@ import (
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"path/filepath"
"reflect"
"strings"
"text/template"
"time"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/pkg/homedir"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/term"
"github.com/docker/docker/utils"
)
// DockerCli represents the docker command line client.
@ -178,19 +177,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, a
tr := &http.Transport{
TLSClientConfig: tlsConfig,
}
// Why 32? See https://github.com/docker/docker/pull/8035.
timeout := 32 * time.Second
if proto == "unix" {
// No need for compression in local communications.
tr.DisableCompression = true
tr.Dial = func(_, _ string) (net.Conn, error) {
return net.DialTimeout(proto, addr, timeout)
}
} else {
tr.Proxy = http.ProxyFromEnvironment
tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
}
utils.ConfigureTCPTransport(tr, proto, addr)
configFile, e := cliconfig.Load(filepath.Join(homedir.Get(), ".docker"))
if e != nil {

View file

@ -773,7 +773,7 @@ func (b *Builder) clearTmp() {
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
return
}
b.Daemon.DeleteVolumes(tmp.VolumePaths())
b.Daemon.DeleteVolumes(tmp)
delete(b.TmpContainers, c)
fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
}

View file

@ -26,9 +26,11 @@ import (
"github.com/docker/docker/pkg/broadcastwriter"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/jsonlog"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/volume"
)
var (
@ -48,46 +50,37 @@ type StreamConfig struct {
// CommonContainer holds the settings for a container which are applicable
// across all platforms supported by the daemon.
type CommonContainer struct {
StreamConfig
*State `json:"State"` // Needed for remote api version <= 1.11
root string // Path to the "home" of the container, including metadata.
basefs string // Path to the graphdriver mountpoint
ID string
Created time.Time
Path string
Args []string
Config *runconfig.Config
ImageID string `json:"Image"`
NetworkSettings *network.Settings
ResolvConfPath string
HostnamePath string
HostsPath string
LogPath string
Name string
Driver string
ExecDriver string
command *execdriver.Command
StreamConfig
daemon *Daemon
ID string
Created time.Time
Path string
Args []string
Config *runconfig.Config
ImageID string `json:"Image"`
NetworkSettings *network.Settings
ResolvConfPath string
HostnamePath string
HostsPath string
LogPath string
Name string
Driver string
ExecDriver string
MountLabel, ProcessLabel string
RestartCount int
UpdateDns bool
MountPoints map[string]*mountPoint
// Maps container paths to volume paths. The key in this is the path to which
// the volume is being mounted inside the container. Value is the path of the
// volume on disk
Volumes map[string]string
hostConfig *runconfig.HostConfig
command *execdriver.Command
monitor *containerMonitor
execCommands *execStore
daemon *Daemon
// logDriver for closing
logDriver logger.Logger
logCopier *logger.Copier
@ -259,9 +252,6 @@ func (container *Container) Start() (err error) {
return err
}
container.verifyDaemonSettings()
if err := container.prepareVolumes(); err != nil {
return err
}
linkedEnv, err := container.setupLinkedContainers()
if err != nil {
return err
@ -273,10 +263,13 @@ func (container *Container) Start() (err error) {
if err := populateCommand(container, env); err != nil {
return err
}
if err := container.setupMounts(); err != nil {
mounts, err := container.setupMounts()
if err != nil {
return err
}
container.command.Mounts = mounts
return container.waitForStart()
}
@ -571,27 +564,38 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
if err := container.Mount(); err != nil {
return nil, err
}
defer func() {
if err != nil {
container.Unmount()
var paths []string
unmount := func() {
for _, p := range paths {
syscall.Unmount(p, 0)
}
}()
if err = container.mountVolumes(); err != nil {
container.unmountVolumes()
return nil, err
}
defer func() {
if err != nil {
container.unmountVolumes()
// unmount any volumes
unmount()
// unmount the container's rootfs
container.Unmount()
}
}()
mounts, err := container.setupMounts()
if err != nil {
return nil, err
}
for _, m := range mounts {
dest, err := container.GetResourcePath(m.Destination)
if err != nil {
return nil, err
}
paths = append(paths, dest)
if err := mount.Mount(m.Source, dest, "bind", "rbind,ro"); err != nil {
return nil, err
}
}
basePath, err := container.GetResourcePath(resource)
if err != nil {
return nil, err
}
stat, err := os.Stat(basePath)
if err != nil {
return nil, err
@ -605,7 +609,6 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
filter = []string{filepath.Base(basePath)}
basePath = filepath.Dir(basePath)
}
archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
Compression: archive.Uncompressed,
IncludeFiles: filter,
@ -613,10 +616,9 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
container.unmountVolumes()
unmount()
container.Unmount()
return err
}),
@ -1007,3 +1009,84 @@ func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error)
}
return written, err
}
func (container *Container) networkMounts() []execdriver.Mount {
var mounts []execdriver.Mount
if container.ResolvConfPath != "" {
mounts = append(mounts, execdriver.Mount{
Source: container.ResolvConfPath,
Destination: "/etc/resolv.conf",
Writable: !container.hostConfig.ReadonlyRootfs,
Private: true,
})
}
if container.HostnamePath != "" {
mounts = append(mounts, execdriver.Mount{
Source: container.HostnamePath,
Destination: "/etc/hostname",
Writable: !container.hostConfig.ReadonlyRootfs,
Private: true,
})
}
if container.HostsPath != "" {
mounts = append(mounts, execdriver.Mount{
Source: container.HostsPath,
Destination: "/etc/hosts",
Writable: !container.hostConfig.ReadonlyRootfs,
Private: true,
})
}
return mounts
}
func (container *Container) AddLocalMountPoint(name, destination string, rw bool) {
container.MountPoints[destination] = &mountPoint{
Name: name,
Driver: volume.DefaultDriverName,
Destination: destination,
RW: rw,
}
}
func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) {
container.MountPoints[destination] = &mountPoint{
Name: vol.Name(),
Driver: vol.DriverName(),
Destination: destination,
RW: rw,
Volume: vol,
}
}
func (container *Container) IsDestinationMounted(destination string) bool {
return container.MountPoints[destination] != nil
}
func (container *Container) PrepareMountPoints() error {
for _, config := range container.MountPoints {
if len(config.Driver) > 0 {
v, err := createVolume(config.Name, config.Driver)
if err != nil {
return err
}
config.Volume = v
}
}
return nil
}
func (container *Container) RemoveMountPoints() error {
for _, m := range container.MountPoints {
if m.Volume != nil {
if err := removeVolume(m.Volume); err != nil {
return err
}
}
}
return nil
}
func (container *Container) ShouldRestart() bool {
return container.hostConfig.RestartPolicy.Name == "always" ||
(container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0)
}

View file

@ -42,14 +42,7 @@ type Container struct {
// Fields below here are platform specific.
AppArmorProfile string
// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
// Easier than migrating older container configs :)
VolumesRW map[string]bool
AppliedVolumesFrom map[string]struct{}
activeLinks map[string]*links.Link
activeLinks map[string]*links.Link
}
func killProcessDirectly(container *Container) error {

View file

@ -27,12 +27,6 @@ type Container struct {
// removed in subsequent PRs.
AppArmorProfile string
// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
// Easier than migrating older container configs :)
VolumesRW map[string]bool
AppliedVolumesFrom map[string]struct{}
// ---- END OF TEMPORARY DECLARATION ----
}

View file

@ -2,11 +2,15 @@ package daemon
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/graph"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/runconfig"
"github.com/docker/libcontainer/label"
)
@ -87,17 +91,52 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
if err := daemon.createRootfs(container); err != nil {
return nil, nil, err
}
if hostConfig != nil {
if err := daemon.setHostConfig(container, hostConfig); err != nil {
return nil, nil, err
}
if err := daemon.setHostConfig(container, hostConfig); err != nil {
return nil, nil, err
}
if err := container.Mount(); err != nil {
return nil, nil, err
}
defer container.Unmount()
if err := container.prepareVolumes(); err != nil {
return nil, nil, err
for spec := range config.Volumes {
var (
name, destination string
parts = strings.Split(spec, ":")
)
switch len(parts) {
case 2:
name, destination = parts[0], filepath.Clean(parts[1])
default:
name = stringid.GenerateRandomID()
destination = filepath.Clean(parts[0])
}
// Skip volumes for which we already have something mounted on that
// destination because of a --volume-from.
if container.IsDestinationMounted(destination) {
continue
}
path, err := container.GetResourcePath(destination)
if err != nil {
return nil, nil, err
}
if stat, err := os.Stat(path); err == nil && !stat.IsDir() {
return nil, nil, fmt.Errorf("cannot mount volume over existing file, file exists %s", path)
}
v, err := createVolume(name, config.VolumeDriver)
if err != nil {
return nil, nil, err
}
rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs)
if err != nil {
return nil, nil, err
}
if path, err = v.Mount(); err != nil {
return nil, nil, err
}
copyExistingContents(rootfs, path)
container.AddMountPointWithVolume(destination, v, true)
}
if err := container.ToDisk(); err != nil {
return nil, nil, err

View file

@ -46,9 +46,12 @@ import (
"github.com/docker/docker/runconfig"
"github.com/docker/docker/trust"
"github.com/docker/docker/utils"
"github.com/docker/docker/volumes"
volumedrivers "github.com/docker/docker/volume/drivers"
"github.com/docker/docker/volume/local"
)
const defaultVolumesPathName = "volumes"
var (
validContainerNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
@ -99,7 +102,6 @@ type Daemon struct {
repositories *graph.TagStore
idIndex *truncindex.TruncIndex
sysInfo *sysinfo.SysInfo
volumes *volumes.Repository
config *Config
containerGraph *graphdb.Database
driver graphdriver.Driver
@ -109,6 +111,7 @@ type Daemon struct {
RegistryService *registry.Service
EventsService *events.Events
netController libnetwork.NetworkController
root string
}
// Get looks for a container using the provided information, which could be
@ -209,7 +212,13 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
// we'll waste time if we update it for every container
daemon.idIndex.Add(container.ID)
container.registerVolumes()
if err := daemon.verifyOldVolumesInfo(container); err != nil {
return err
}
if err := container.PrepareMountPoints(); err != nil {
return err
}
if container.IsRunning() {
logrus.Debugf("killing old running container %s", container.ID)
@ -249,10 +258,15 @@ func (daemon *Daemon) ensureName(container *Container) error {
}
func (daemon *Daemon) restore() error {
type cr struct {
container *Container
registered bool
}
var (
debug = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "")
containers = make(map[string]*Container)
currentDriver = daemon.driver.String()
containers = make(map[string]*cr)
)
if !debug {
@ -278,14 +292,12 @@ func (daemon *Daemon) restore() error {
if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
logrus.Debugf("Loaded container %v", container.ID)
containers[container.ID] = container
containers[container.ID] = &cr{container: container}
} else {
logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
}
}
registeredContainers := []*Container{}
if entities := daemon.containerGraph.List("/", -1); entities != nil {
for _, p := range entities.Paths() {
if !debug && logrus.GetLevel() == logrus.InfoLevel {
@ -294,50 +306,43 @@ func (daemon *Daemon) restore() error {
e := entities[p]
if container, ok := containers[e.ID()]; ok {
if err := daemon.register(container, false); err != nil {
logrus.Debugf("Failed to register container %s: %s", container.ID, err)
}
registeredContainers = append(registeredContainers, container)
// delete from the map so that a new name is not automatically generated
delete(containers, e.ID())
if c, ok := containers[e.ID()]; ok {
c.registered = true
}
}
}
// Any containers that are left over do not exist in the graph
for _, container := range containers {
// Try to set the default name for a container if it exists prior to links
container.Name, err = daemon.generateNewName(container.ID)
if err != nil {
logrus.Debugf("Setting default id - %s", err)
}
group := sync.WaitGroup{}
for _, c := range containers {
group.Add(1)
if err := daemon.register(container, false); err != nil {
logrus.Debugf("Failed to register container %s: %s", container.ID, err)
}
go func(container *Container, registered bool) {
defer group.Done()
registeredContainers = append(registeredContainers, container)
}
if !registered {
// Try to set the default name for a container if it exists prior to links
container.Name, err = daemon.generateNewName(container.ID)
if err != nil {
logrus.Debugf("Setting default id - %s", err)
}
}
// check the restart policy on the containers and restart any container with
// the restart policy of "always"
if daemon.config.AutoRestart {
logrus.Debug("Restarting containers...")
if err := daemon.register(container, false); err != nil {
logrus.Debugf("Failed to register container %s: %s", container.ID, err)
}
for _, container := range registeredContainers {
if container.hostConfig.RestartPolicy.IsAlways() ||
(container.hostConfig.RestartPolicy.IsOnFailure() && container.ExitCode != 0) {
// check the restart policy on the containers and restart any container with
// the restart policy of "always"
if daemon.config.AutoRestart && container.ShouldRestart() {
logrus.Debugf("Starting container %s", container.ID)
if err := container.Start(); err != nil {
logrus.Debugf("Failed to start container %s: %s", container.ID, err)
}
}
}
}(c.container, c.registered)
}
group.Wait()
if !debug {
if logrus.GetLevel() == logrus.InfoLevel {
@ -535,6 +540,7 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID
ExecDriver: daemon.execDriver.Name(),
State: NewState(),
execCommands: newExecStore(),
MountPoints: map[string]*mountPoint{},
},
}
container.root = daemon.containerRoot(container.ID)
@ -785,15 +791,11 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
return nil, err
}
volumesDriver, err := graphdriver.GetDriver("vfs", config.Root, config.GraphOptions)
if err != nil {
return nil, err
}
volumes, err := volumes.NewRepository(filepath.Join(config.Root, "volumes"), volumesDriver)
volumesDriver, err := local.New(filepath.Join(config.Root, defaultVolumesPathName))
if err != nil {
return nil, err
}
volumedrivers.Register(volumesDriver, volumesDriver.Name())
trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
if err != nil {
@ -872,7 +874,6 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
d.repositories = repositories
d.idIndex = truncindex.NewTruncIndex([]string{})
d.sysInfo = sysInfo
d.volumes = volumes
d.config = config
d.sysInitPath = sysInitPath
d.execDriver = ed
@ -880,6 +881,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
d.defaultLogConfig = config.LogConfig
d.RegistryService = registryService
d.EventsService = eventsService
d.root = config.Root
if err := d.restore(); err != nil {
return nil, err
@ -1218,6 +1220,10 @@ func (daemon *Daemon) verifyHostConfig(hostConfig *runconfig.HostConfig) ([]stri
}
func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
if err := daemon.registerMountPoints(container, hostConfig); err != nil {
return err
}
container.Lock()
defer container.Unlock()
if err := parseSecurityOpt(container, hostConfig); err != nil {
@ -1231,6 +1237,5 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.
container.hostConfig = hostConfig
container.toDisk()
return nil
}

View file

@ -71,21 +71,12 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
}
container.LogEvent("destroy")
if config.RemoveVolume {
daemon.DeleteVolumes(container.VolumePaths())
container.RemoveMountPoints()
}
}
return nil
}
func (daemon *Daemon) DeleteVolumes(volumeIDs map[string]struct{}) {
for id := range volumeIDs {
if err := daemon.volumes.Delete(id); err != nil {
logrus.Infof("%s", err)
continue
}
}
}
func (daemon *Daemon) Rm(container *Container) (err error) {
return daemon.commonRm(container, false)
}
@ -134,7 +125,6 @@ func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err erro
}
}()
container.derefVolumes()
if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
logrus.Debugf("Unable to remove container from link graph: %s", err)
}
@ -162,3 +152,7 @@ func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err erro
return nil
}
func (daemon *Daemon) DeleteVolumes(c *Container) error {
return c.RemoveMountPoints()
}

View file

@ -10,6 +10,10 @@ import (
type ContainerJSONRaw struct {
*Container
HostConfig *runconfig.HostConfig
// Unused fields for backward compatibility with API versions < 1.12.
Volumes map[string]string
VolumesRW map[string]bool
}
func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) {
@ -48,6 +52,14 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error
FinishedAt: container.State.FinishedAt,
}
volumes := make(map[string]string)
volumesRW := make(map[string]bool)
for _, m := range container.MountPoints {
volumes[m.Destination] = m.Path()
volumesRW[m.Destination] = m.RW
}
contJSON := &types.ContainerJSON{
Id: container.ID,
Created: container.Created,
@ -67,8 +79,8 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error
ExecDriver: container.ExecDriver,
MountLabel: container.MountLabel,
ProcessLabel: container.ProcessLabel,
Volumes: container.Volumes,
VolumesRW: container.VolumesRW,
Volumes: volumes,
VolumesRW: volumesRW,
AppArmorProfile: container.AppArmorProfile,
ExecIDs: container.GetExecIDs(),
HostConfig: &hostConfig,

View file

@ -1,213 +1,116 @@
package daemon
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/volume"
volumedrivers "github.com/docker/docker/volume/drivers"
)
type volumeMount struct {
containerPath string
hostPath string
writable bool
copyData bool
from string
var localMountErr = fmt.Errorf("Invalid driver: %s driver doesn't support named volumes", volume.DefaultDriverName)
type mountPoint struct {
Name string
Destination string
Driver string
RW bool
Volume volume.Volume `json:"-"`
Source string
}
func (container *Container) createVolumes() error {
mounts := make(map[string]*volumeMount)
func (m *mountPoint) Setup() (string, error) {
if m.Volume != nil {
return m.Volume.Mount()
}
// get the normal volumes
for path := range container.Config.Volumes {
path = filepath.Clean(path)
// skip if there is already a volume for this container path
if _, exists := container.Volumes[path]; exists {
continue
}
realPath, err := container.GetResourcePath(path)
if err != nil {
return err
}
if stat, err := os.Stat(realPath); err == nil {
if !stat.IsDir() {
return fmt.Errorf("can't mount to container path, file exists - %s", path)
if len(m.Source) > 0 {
if _, err := os.Stat(m.Source); err != nil {
if !os.IsNotExist(err) {
return "", err
}
if err := os.MkdirAll(m.Source, 0755); err != nil {
return "", err
}
}
mnt := &volumeMount{
containerPath: path,
writable: true,
copyData: true,
}
mounts[mnt.containerPath] = mnt
return m.Source, nil
}
// Get all the bind mounts
// track bind paths separately due to #10618
bindPaths := make(map[string]struct{})
for _, spec := range container.hostConfig.Binds {
mnt, err := parseBindMountSpec(spec)
if err != nil {
return err
}
// #10618
if _, exists := bindPaths[mnt.containerPath]; exists {
return fmt.Errorf("Duplicate volume mount %s", mnt.containerPath)
}
bindPaths[mnt.containerPath] = struct{}{}
mounts[mnt.containerPath] = mnt
}
// Get volumes from
for _, from := range container.hostConfig.VolumesFrom {
cID, mode, err := parseVolumesFromSpec(from)
if err != nil {
return err
}
if _, exists := container.AppliedVolumesFrom[cID]; exists {
// skip since it's already been applied
continue
}
c, err := container.daemon.Get(cID)
if err != nil {
return fmt.Errorf("container %s not found, impossible to mount its volumes", cID)
}
for _, mnt := range c.volumeMounts() {
mnt.writable = mnt.writable && (mode == "rw")
mnt.from = cID
mounts[mnt.containerPath] = mnt
}
}
for _, mnt := range mounts {
containerMntPath, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, mnt.containerPath), container.basefs)
if err != nil {
return err
}
// Create the actual volume
v, err := container.daemon.volumes.FindOrCreateVolume(mnt.hostPath, mnt.writable)
if err != nil {
return err
}
container.VolumesRW[mnt.containerPath] = mnt.writable
container.Volumes[mnt.containerPath] = v.Path
v.AddContainer(container.ID)
if mnt.from != "" {
container.AppliedVolumesFrom[mnt.from] = struct{}{}
}
if mnt.writable && mnt.copyData {
// Copy whatever is in the container at the containerPath to the volume
copyExistingContents(containerMntPath, v.Path)
}
}
return nil
return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined")
}
// sortedVolumeMounts returns the list of container volume mount points sorted in lexicographic order
func (container *Container) sortedVolumeMounts() []string {
var mountPaths []string
for path := range container.Volumes {
mountPaths = append(mountPaths, path)
func (m *mountPoint) Path() string {
if m.Volume != nil {
return m.Volume.Path()
}
sort.Strings(mountPaths)
return mountPaths
return m.Source
}
func (container *Container) VolumePaths() map[string]struct{} {
var paths = make(map[string]struct{})
for _, path := range container.Volumes {
paths[path] = struct{}{}
func parseBindMount(spec string, config *runconfig.Config) (*mountPoint, error) {
bind := &mountPoint{
RW: true,
}
return paths
}
func (container *Container) registerVolumes() {
for path := range container.VolumePaths() {
if v := container.daemon.volumes.Get(path); v != nil {
v.AddContainer(container.ID)
continue
}
// if container was created with an old daemon, this volume may not be registered so we need to make sure it gets registered
writable := true
if rw, exists := container.VolumesRW[path]; exists {
writable = rw
}
v, err := container.daemon.volumes.FindOrCreateVolume(path, writable)
if err != nil {
logrus.Debugf("error registering volume %s: %v", path, err)
continue
}
v.AddContainer(container.ID)
}
}
func (container *Container) derefVolumes() {
for path := range container.VolumePaths() {
vol := container.daemon.volumes.Get(path)
if vol == nil {
logrus.Debugf("Volume %s was not found and could not be dereferenced", path)
continue
}
vol.RemoveContainer(container.ID)
}
}
func parseBindMountSpec(spec string) (*volumeMount, error) {
arr := strings.Split(spec, ":")
mnt := &volumeMount{}
switch len(arr) {
case 2:
mnt.hostPath = arr[0]
mnt.containerPath = arr[1]
mnt.writable = true
bind.Destination = arr[1]
case 3:
mnt.hostPath = arr[0]
mnt.containerPath = arr[1]
mnt.writable = validMountMode(arr[2]) && arr[2] == "rw"
bind.Destination = arr[1]
if !validMountMode(arr[2]) {
return nil, fmt.Errorf("invalid mode for volumes-from: %s", arr[2])
}
bind.RW = arr[2] == "rw"
default:
return nil, fmt.Errorf("Invalid volume specification: %s", spec)
}
if !filepath.IsAbs(mnt.hostPath) {
return nil, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", mnt.hostPath)
if !filepath.IsAbs(arr[0]) {
bind.Driver, bind.Name = parseNamedVolumeInfo(arr[0], config)
if bind.Driver == volume.DefaultDriverName {
return nil, localMountErr
}
} else {
bind.Source = filepath.Clean(arr[0])
}
mnt.hostPath = filepath.Clean(mnt.hostPath)
mnt.containerPath = filepath.Clean(mnt.containerPath)
return mnt, nil
bind.Destination = filepath.Clean(bind.Destination)
return bind, nil
}
func parseVolumesFromSpec(spec string) (string, string, error) {
specParts := strings.SplitN(spec, ":", 2)
if len(specParts) == 0 {
func parseNamedVolumeInfo(info string, config *runconfig.Config) (driver string, name string) {
p := strings.SplitN(info, "/", 2)
switch len(p) {
case 2:
driver = p[0]
name = p[1]
default:
if driver = config.VolumeDriver; len(driver) == 0 {
driver = volume.DefaultDriverName
}
name = p[0]
}
return
}
func parseVolumesFrom(spec string) (string, string, error) {
if len(spec) == 0 {
return "", "", fmt.Errorf("malformed volumes-from specification: %s", spec)
}
var (
id = specParts[0]
mode = "rw"
)
specParts := strings.SplitN(spec, ":", 2)
id := specParts[0]
mode := "rw"
if len(specParts) == 2 {
mode = specParts[1]
if !validMountMode(mode) {
@ -222,7 +125,6 @@ func validMountMode(mode string) bool {
"rw": true,
"ro": true,
}
return validModes[mode]
}
@ -240,34 +142,16 @@ func (container *Container) specialMounts() []execdriver.Mount {
return mounts
}
func (container *Container) volumeMounts() map[string]*volumeMount {
mounts := make(map[string]*volumeMount)
for containerPath, path := range container.Volumes {
v := container.daemon.volumes.Get(path)
if v == nil {
// This should never happen
logrus.Debugf("reference by container %s to non-existent volume path %s", container.ID, path)
continue
}
mounts[containerPath] = &volumeMount{hostPath: path, containerPath: containerPath, writable: container.VolumesRW[containerPath]}
}
return mounts
}
func copyExistingContents(source, destination string) error {
volList, err := ioutil.ReadDir(source)
if err != nil {
return err
}
if len(volList) > 0 {
srcList, err := ioutil.ReadDir(destination)
if err != nil {
return err
}
if len(srcList) == 0 {
// If the source volume is empty copy files from the root into the volume
if err := chrootarchive.CopyWithTar(source, destination); err != nil {
@ -275,60 +159,145 @@ func copyExistingContents(source, destination string) error {
}
}
}
return copyOwnership(source, destination)
}
func (container *Container) mountVolumes() error {
for dest, source := range container.Volumes {
v := container.daemon.volumes.Get(source)
if v == nil {
return fmt.Errorf("could not find volume for %s:%s, impossible to mount", source, dest)
}
// registerMountPoints initializes the container mount points with the configured volumes and bind mounts.
// It follows the next sequence to decide what to mount in each final destination:
//
// 1. Select the previously configured mount points for the containers, if any.
// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
binds := map[string]bool{}
mountPoints := map[string]*mountPoint{}
destPath, err := container.GetResourcePath(dest)
// 1. Read already configured mount points.
for name, point := range container.MountPoints {
mountPoints[name] = point
}
// 2. Read volumes from other containers.
for _, v := range hostConfig.VolumesFrom {
containerID, mode, err := parseVolumesFrom(v)
if err != nil {
return err
}
if err := mount.Mount(source, destPath, "bind", "rbind,rw"); err != nil {
return fmt.Errorf("error while mounting volume %s: %v", source, err)
}
}
for _, mnt := range container.specialMounts() {
destPath, err := container.GetResourcePath(mnt.Destination)
c, err := daemon.Get(containerID)
if err != nil {
return err
}
if err := mount.Mount(mnt.Source, destPath, "bind", "bind,rw"); err != nil {
return fmt.Errorf("error while mounting volume %s: %v", mnt.Source, err)
for _, m := range c.MountPoints {
cp := m
cp.RW = m.RW && mode != "ro"
if len(m.Source) == 0 {
v, err := createVolume(m.Name, m.Driver)
if err != nil {
return err
}
cp.Volume = v
}
mountPoints[cp.Destination] = cp
}
}
// 3. Read bind mounts
for _, b := range hostConfig.Binds {
// #10618
bind, err := parseBindMount(b, container.Config)
if err != nil {
return err
}
if binds[bind.Destination] {
return fmt.Errorf("Duplicate bind mount %s", bind.Destination)
}
if len(bind.Name) > 0 && len(bind.Driver) > 0 {
v, err := createVolume(bind.Name, bind.Driver)
if err != nil {
return err
}
bind.Volume = v
}
binds[bind.Destination] = true
mountPoints[bind.Destination] = bind
}
container.MountPoints = mountPoints
return nil
}
func (container *Container) unmountVolumes() {
for dest := range container.Volumes {
destPath, err := container.GetResourcePath(dest)
if err != nil {
logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
continue
// verifyOldVolumesInfo ports volumes configured for the containers pre docker 1.7.
// It reads the container configuration and creates valid mount points for the old volumes.
func (daemon *Daemon) verifyOldVolumesInfo(container *Container) error {
jsonPath, err := container.jsonPath()
if err != nil {
return err
}
f, err := os.Open(jsonPath)
if err != nil {
if os.IsNotExist(err) {
return nil
}
if err := mount.ForceUnmount(destPath); err != nil {
logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
continue
return err
}
type oldContVolCfg struct {
Volumes map[string]string
VolumesRW map[string]bool
}
vols := oldContVolCfg{
Volumes: make(map[string]string),
VolumesRW: make(map[string]bool),
}
if err := json.NewDecoder(f).Decode(&vols); err != nil {
return err
}
for destination, hostPath := range vols.Volumes {
vfsPath := filepath.Join(daemon.root, "vfs", "dir")
if strings.HasPrefix(hostPath, vfsPath) {
id := filepath.Base(hostPath)
container.AddLocalMountPoint(id, destination, vols.VolumesRW[destination])
}
}
for _, mnt := range container.specialMounts() {
destPath, err := container.GetResourcePath(mnt.Destination)
if err != nil {
logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
continue
}
if err := mount.ForceUnmount(destPath); err != nil {
logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
}
}
return container.ToDisk()
}
func createVolume(name, driverName string) (volume.Volume, error) {
vd, err := getVolumeDriver(driverName)
if err != nil {
return nil, err
}
return vd.Create(name)
}
func removeVolume(v volume.Volume) error {
vd, err := getVolumeDriver(v.DriverName())
if err != nil {
return nil
}
return vd.Remove(v)
}
func getVolumeDriver(name string) (volume.Driver, error) {
if name == "" {
name = volume.DefaultDriverName
}
vd := volumedrivers.Lookup(name)
if vd == nil {
return nil, fmt.Errorf("Volumes Driver %s isn't registered", name)
}
return vd, nil
}

View file

@ -4,6 +4,9 @@ package daemon
import (
"os"
"path/filepath"
"sort"
"strings"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/system"
@ -24,36 +27,44 @@ func copyOwnership(source, destination string) error {
return os.Chmod(destination, os.FileMode(stat.Mode()))
}
func (container *Container) prepareVolumes() error {
if container.Volumes == nil || len(container.Volumes) == 0 {
container.Volumes = make(map[string]string)
container.VolumesRW = make(map[string]bool)
}
func (container *Container) setupMounts() ([]execdriver.Mount, error) {
var mounts []execdriver.Mount
for _, m := range container.MountPoints {
path, err := m.Setup()
if err != nil {
return nil, err
}
if len(container.hostConfig.VolumesFrom) > 0 && container.AppliedVolumesFrom == nil {
container.AppliedVolumesFrom = make(map[string]struct{})
}
return container.createVolumes()
}
func (container *Container) setupMounts() error {
mounts := []execdriver.Mount{}
// Mount user specified volumes
// Note, these are not private because you may want propagation of (un)mounts from host
// volumes. For instance if you use -v /usr:/usr and the host later mounts /usr/share you
// want this new mount in the container
// These mounts must be ordered based on the length of the path that it is being mounted to (lexicographic)
for _, path := range container.sortedVolumeMounts() {
mounts = append(mounts, execdriver.Mount{
Source: container.Volumes[path],
Destination: path,
Writable: container.VolumesRW[path],
Source: path,
Destination: m.Destination,
Writable: m.RW,
})
}
mounts = append(mounts, container.specialMounts()...)
container.command.Mounts = mounts
return nil
mounts = sortMounts(mounts)
return append(mounts, container.networkMounts()...), nil
}
func sortMounts(m []execdriver.Mount) []execdriver.Mount {
sort.Sort(mounts(m))
return m
}
type mounts []execdriver.Mount
func (m mounts) Len() int {
return len(m)
}
func (m mounts) Less(i, j int) bool {
return m.parts(i) < m.parts(j)
}
func (m mounts) Swap(i, j int) {
m[i], m[j] = m[j], m[i]
}
func (m mounts) parts(i int) int {
return len(strings.Split(filepath.Clean(m[i].Destination), string(os.PathSeparator)))
}

146
daemon/volumes_unit_test.go Normal file
View file

@ -0,0 +1,146 @@
package daemon
import (
"testing"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/volume"
volumedrivers "github.com/docker/docker/volume/drivers"
)
func TestParseNamedVolumeInfo(t *testing.T) {
cases := []struct {
driver string
name string
expDriver string
expName string
}{
{"", "name", "local", "name"},
{"external", "name", "external", "name"},
{"", "external/name", "external", "name"},
{"ignored", "external/name", "external", "name"},
}
for _, c := range cases {
conf := &runconfig.Config{VolumeDriver: c.driver}
driver, name := parseNamedVolumeInfo(c.name, conf)
if driver != c.expDriver {
t.Fatalf("Expected %s, was %s\n", c.expDriver, driver)
}
if name != c.expName {
t.Fatalf("Expected %s, was %s\n", c.expName, name)
}
}
}
func TestParseBindMount(t *testing.T) {
cases := []struct {
bind string
driver string
expDest string
expSource string
expName string
expDriver string
expRW bool
fail bool
}{
{"/tmp:/tmp", "", "/tmp", "/tmp", "", "", true, false},
{"/tmp:/tmp:ro", "", "/tmp", "/tmp", "", "", false, false},
{"/tmp:/tmp:rw", "", "/tmp", "/tmp", "", "", true, false},
{"/tmp:/tmp:foo", "", "/tmp", "/tmp", "", "", false, true},
{"name:/tmp", "", "", "", "", "", false, true},
{"name:/tmp", "external", "/tmp", "", "name", "external", true, false},
{"external/name:/tmp:rw", "", "/tmp", "", "name", "external", true, false},
{"external/name:/tmp:ro", "", "/tmp", "", "name", "external", false, false},
{"external/name:/tmp:foo", "", "/tmp", "", "name", "external", false, true},
{"name:/tmp", "local", "", "", "", "", false, true},
{"local/name:/tmp:rw", "", "", "", "", "", true, true},
}
for _, c := range cases {
conf := &runconfig.Config{VolumeDriver: c.driver}
m, err := parseBindMount(c.bind, conf)
if c.fail {
if err == nil {
t.Fatalf("Expected error, was nil, for spec %s\n", c.bind)
}
continue
}
if m.Destination != c.expDest {
t.Fatalf("Expected destination %s, was %s, for spec %s\n", c.expDest, m.Destination, c.bind)
}
if m.Source != c.expSource {
t.Fatalf("Expected source %s, was %s, for spec %s\n", c.expSource, m.Source, c.bind)
}
if m.Name != c.expName {
t.Fatalf("Expected name %s, was %s for spec %s\n", c.expName, m.Name, c.bind)
}
if m.Driver != c.expDriver {
t.Fatalf("Expected driver %s, was %s, for spec %s\n", c.expDriver, m.Driver, c.bind)
}
if m.RW != c.expRW {
t.Fatalf("Expected RW %v, was %v for spec %s\n", c.expRW, m.RW, c.bind)
}
}
}
func TestParseVolumeFrom(t *testing.T) {
cases := []struct {
spec string
expId string
expMode string
fail bool
}{
{"", "", "", true},
{"foobar", "foobar", "rw", false},
{"foobar:rw", "foobar", "rw", false},
{"foobar:ro", "foobar", "ro", false},
{"foobar:baz", "", "", true},
}
for _, c := range cases {
id, mode, err := parseVolumesFrom(c.spec)
if c.fail {
if err == nil {
t.Fatalf("Expected error, was nil, for spec %s\n", c.spec)
}
continue
}
if id != c.expId {
t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expId, id, c.spec)
}
if mode != c.expMode {
t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec)
}
}
}
type fakeDriver struct{}
func (fakeDriver) Name() string { return "fake" }
func (fakeDriver) Create(name string) (volume.Volume, error) { return nil, nil }
func (fakeDriver) Remove(v volume.Volume) error { return nil }
func TestGetVolumeDriver(t *testing.T) {
_, err := getVolumeDriver("missing")
if err == nil {
t.Fatal("Expected error, was nil")
}
volumedrivers.Register(fakeDriver{}, "fake")
d, err := getVolumeDriver("fake")
if err != nil {
t.Fatal(err)
}
if d.Name() != "fake" {
t.Fatalf("Expected fake driver, got %s\n", d.Name())
}
}

View file

@ -2,15 +2,13 @@
package daemon
import "github.com/docker/docker/daemon/execdriver"
// Not supported on Windows
func copyOwnership(source, destination string) error {
return nil
return nil, nil
}
func (container *Container) prepareVolumes() error {
return nil
}
func (container *Container) setupMounts() error {
func (container *Container) setupMounts() ([]execdriver.Mount, error) {
return nil
}

View file

@ -73,6 +73,7 @@ pages:
- ['machine/index.md', 'User Guide', 'Docker Machine' ]
- ['swarm/index.md', 'User Guide', 'Docker Swarm' ]
- ['kitematic/userguide.md', 'User Guide', 'Kitematic']
- ['userguide/plugins.md', 'User Guide', 'Docker Plugins']
# Docker Hub docs:
- ['docker-hub/index.md', 'Docker Hub', 'Docker Hub' ]
@ -185,6 +186,7 @@ pages:
- ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**']
- ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API client libraries']
- ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker Hub accounts API']
- ['reference/api/plugin_api.md', 'Reference', 'Docker Plugin API']
- ['kitematic/faq.md', 'Reference', 'Kitematic: FAQ']
- ['kitematic/known-issues.md', 'Reference', 'Kitematic: Known issues']

View file

@ -7,3 +7,4 @@ This directory holds the authoritative specifications of APIs defined and implem
index for images to download
* The docker.io OAuth and accounts API which 3rd party services can
use to access account information
* The plugin API for Docker Plugins

View file

@ -73,6 +73,13 @@ are now returned as boolean instead of as an int.
In addition, the end point now returns the new boolean fields
`CpuCfsPeriod`, `CpuCfsQuota`, and `OomKillDisable`.
**New!**
You can now specify a volume plugin in `/v1.19/containers/create`, for example
`"HostConfig": {"Binds": ["flocker/name:/data"]}` where `flocker` is the name
of the plugin, `name` is the user-facing name of the volume (passed to the
volume plugin) and `/data` is the mountpoint inside the container.
## v1.18
### Full documentation

View file

@ -226,8 +226,11 @@ Json Parameters:
- **Binds** A list of volume bindings for this container. Each volume
binding is a string of the form `container_path` (to create a new
volume for the container), `host_path:container_path` (to bind-mount
a host path into the container), or `host_path:container_path:ro`
(to make the bind-mount read-only inside the container).
a host path into the container), `host_path:container_path:ro`
(to make the bind-mount read-only inside the container), or
`volume_plugin/volume_name:container_path` (to provision a
volume named `volume_name` from a [volume plugin](/userguide/plugins)
named `volume_plugin`).
- **Links** - A list of links for the container. Each link entry should be
in the form of `container_name:alias`.
- **LxcConf** - LXC specific configurations. These configurations will only

View file

@ -0,0 +1,223 @@
page_title: Plugin API documentation
page_description: Documentation for writing a Docker plugin.
page_keywords: docker, plugins, api, extensions
# Docker Plugin API
Docker plugins are out-of-process extensions which add capabilities to the
Docker Engine.
This page is intended for people who want to develop their own Docker plugin.
If you just want to learn about or use Docker plugins, look
[here](/userguide/plugins).
## What plugins are
A plugin is a process running on the same docker host as the docker daemon,
which registers itself by placing a file in `/usr/share/docker/plugins` (the
"plugin directory").
Plugins have human-readable names, which are short, lowercase strings. For
example, `flocker` or `weave`.
Plugins can run inside or outside containers. Currently running them outside
containers is recommended.
## Plugin discovery
Docker discovers plugins by looking for them in the plugin directory whenever a
user or container tries to use one by name.
There are two types of files which can be put in the plugin directory.
* `.sock` files are UNIX domain sockets.
* `.spec` files are text files containing a URL, such as `unix:///other.sock`.
The name of the file (excluding the extension) determines the plugin name.
For example, the `flocker` plugin might create a UNIX socket at
`/usr/share/docker/plugins/flocker.sock`.
Plugins must be run locally on the same machine as the Docker daemon. UNIX
domain sockets are strongly encouraged for security reasons.
## Plugin lifecycle
Plugins should be started before Docker, and stopped after Docker. For
example, when packaging a plugin for a platform which supports `systemd`, you
might use [`systemd` dependencies](
http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to
manage startup and shutdown order.
When upgrading a plugin, you should first stop the Docker daemon, upgrade the
plugin, then start Docker again.
If a plugin is packaged as a container, this may cause issues. Plugins as
containers are currently considered experimental due to these shutdown/startup
ordering issues. These issues are mitigated by plugin retries (see below).
## Plugin activation
When a plugin is first referred to -- either by a user referring to it by name
(e.g. `docker run --volume-driver=foo`) or a container already configured to
use a plugin being started -- Docker looks for the named plugin in the plugin
directory and activates it with a handshake. See Handshake API below.
Plugins are *not* activated automatically at Docker daemon startup. Rather,
they are activated only lazily, or on-demand, when they are needed.
## API design
The Plugin API is RPC-style JSON over HTTP, much like webhooks.
Requests flow *from* the Docker daemon *to* the plugin. So the plugin needs to
implement an HTTP server and bind this to the UNIX socket mentioned in the
"plugin discovery" section.
All requests are HTTP `POST` requests.
The API is versioned via an Accept header, which currently is always set to
`application/vnd.docker.plugins.v1+json`.
## Handshake API
Plugins are activated via the following "handshake" API call.
### /Plugin.Activate
**Request:** empty body
**Response:**
```
{
"Implements": ["VolumeDriver"]
}
```
Responds with a list of Docker subsystems which this plugin implements.
After activation, the plugin will then be sent events from this subsystem.
## Volume API
If a plugin registers itself as a `VolumeDriver` (see above) then it is
expected to provide writeable paths on the host filesystem for the Docker
daemon to provide to containers to consume.
The Docker daemon handles bind-mounting the provided paths into user
containers.
### /VolumeDriver.Create
**Request**:
```
{
"Name": "volume_name"
}
```
Instruct the plugin that the user wants to create a volume, given a user
specified volume name. The plugin does not need to actually manifest the
volume on the filesystem yet (until Mount is called).
**Response**:
```
{
"Err": null
}
```
Respond with a string error if an error occurred.
### /VolumeDriver.Remove
**Request**:
```
{
"Name": "volume_name"
}
```
Create a volume, given a user specified volume name.
**Response**:
```
{
"Err": null
}
```
Respond with a string error if an error occurred.
### /VolumeDriver.Mount
**Request**:
```
{
"Name": "volume_name"
}
```
Docker requires the plugin to provide a volume, given a user specified volume
name. This is called once per container start.
**Response**:
```
{
"Mountpoint": "/path/to/directory/on/host",
"Err": null
}
```
Respond with the path on the host filesystem where the volume has been made
available, and/or a string error if an error occurred.
### /VolumeDriver.Path
**Request**:
```
{
"Name": "volume_name"
}
```
Docker needs reminding of the path to the volume on the host.
**Response**:
```
{
"Mountpoint": "/path/to/directory/on/host",
"Err": null
}
```
Respond with the path on the host filesystem where the volume has been made
available, and/or a string error if an error occurred.
### /VolumeDriver.Unmount
**Request**:
```
{
"Name": "volume_name"
}
```
Indication that Docker no longer is using the named volume. This is called once
per container stop. Plugin may deduce that it is safe to deprovision it at
this point.
**Response**:
```
{
"Err": null
}
```
Respond with a string error if an error occurred.
## Plugin retries
Attempts to call a method on a plugin are retried with an exponential backoff
for up to 30 seconds. This may help when packaging plugins as containers, since
it gives plugin containers a chance to start up before failing any user
containers which depend on them.

View file

@ -1000,7 +1000,8 @@ Creates a new container.
--security-opt=[] Security options
-t, --tty=false Allocate a pseudo-TTY
-u, --user="" Username or UID
-v, --volume=[] Bind mount a volume
-v, --volume=[] Bind mount a volume, or specify name for volume plugin
--volume-driver= Optional volume driver (plugin name) for the container
--volumes-from=[] Mount volumes from the specified container(s)
-w, --workdir="" Working directory inside the container
@ -1970,7 +1971,8 @@ To remove an image using its digest:
--sig-proxy=true Proxy received signals to the process
-t, --tty=false Allocate a pseudo-TTY
-u, --user="" Username or UID (format: <name|uid>[:<group|gid>])
-v, --volume=[] Bind mount a volume
-v, --volume=[] Bind mount a volume, or specify name for volume plugin
--volume-driver= Optional volume driver (plugin name) for the container
--volumes-from=[] Mount volumes from the specified container(s)
-w, --workdir="" Working directory inside the container
@ -2066,6 +2068,18 @@ binary (such as that provided by [https://get.docker.com](
https://get.docker.com)), you give the container the full access to create and
manipulate the host's Docker daemon.
$ docker run -ti -v volumename:/data --volume-driver=flocker busybox sh
By specifying a volume name in conjunction with a volume driver, volume plugins
such as [Flocker](https://clusterhq.com/docker-plugin/), once installed, can be
used to manage volumes external to a single host, such as those on EBS. In this
example, "volumename" is passed through to the volume plugin as a user-given
name for the volume which allows the plugin to associate it with an external
volume beyond the lifetime of a single container or container host. This can be
used, for example, to move a stateful container from one server to another.
The `volumename` must not begin with a `/`.
$ docker run -p 127.0.0.1:80:8080 ubuntu bash
This binds port `8080` of the container to port `80` on `127.0.0.1` of

View file

@ -210,6 +210,14 @@ Then un-tar the backup file in the new container's data volume.
You can use the techniques above to automate backup, migration and
restore testing using your preferred tools.
## Integrating Docker with external storage systems
Docker volume plugins such as [Flocker](https://clusterhq.com/docker-plugin/)
enable Docker deployments to be integrated with external storage systems, such
as Amazon EBS, and enable data volumes to persist beyond the lifetime of a
single Docker host. See the [plugin section of the user
guide](/userguide/plugins) for more information.
# Next steps
Now we've learned a bit more about how to use Docker we're going to see how to

View file

@ -105,6 +105,12 @@ works with Docker can now transparently scale up to multiple hosts.
Go to [Docker Swarm user guide](/swarm/).
## Docker Plugins
Docker plugins allow you to extend the capabilities of the Docker Engine.
Go to [Docker Plugins](/userguide/plugins).
## Getting help
* [Docker homepage](http://www.docker.com/)

View file

@ -0,0 +1,51 @@
page_title: Docker Plugins
page_description: Learn what Docker Plugins are and how to use them.
page_keywords: plugins, extensions, extensibility
# Understanding Docker Plugins
You can extend the capabilities of the Docker Engine by loading third-party
plugins.
## Types of plugins
Plugins extend Docker's functionality. They come in specific types. For
example, a **volume plugin** might enable Docker volumes to persist across
multiple Docker hosts.
Currently Docker supports **volume plugins**. In the future it will support
additional plugin types.
## Installing a plugin
Follow the instructions in the plugin's documentation.
## Finding a plugin
The following plugins exist:
* The [Flocker plugin](https://clusterhq.com/docker-plugin/) is a volume plugin
which provides multi-host portable volumes for Docker, enabling you to run
databases and other stateful containers and move them around across a cluster
of machines.
## Using a plugin
Depending on the plugin type, there are additional arguments to `docker` CLI
commands.
* For example `docker run` has a [`--volume-driver` argument](
/reference/commandline/cli/#run).
You can also use plugins via the [Docker Remote API](
/reference/api/docker_remote_api/).
## Troubleshooting a plugin
If you are having problems with Docker after loading a plugin, ask the authors
of the plugin for help. The Docker team may not be able to assist you.
## Writing a plugin
If you are interested in writing a plugin for Docker, or seeing how they work
under the hood, see the [docker plugins reference](/reference/api/plugin_api).

View file

@ -166,7 +166,7 @@ func (s *DockerSuite) TestContainerApiStartDupVolumeBinds(c *check.C) {
c.Assert(status, check.Equals, http.StatusInternalServerError)
c.Assert(err, check.IsNil)
if !strings.Contains(string(body), "Duplicate volume") {
if !strings.Contains(string(body), "Duplicate bind") {
c.Fatalf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err)
}
}
@ -210,49 +210,6 @@ func (s *DockerSuite) TestContainerApiStartVolumesFrom(c *check.C) {
}
}
// Ensure that volumes-from has priority over binds/anything else
// This is pretty much the same as TestRunApplyVolumesFromBeforeVolumes, except with passing the VolumesFrom and the bind on start
func (s *DockerSuite) TestVolumesFromHasPriority(c *check.C) {
volName := "voltst2"
volPath := "/tmp"
if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", volName, "-v", volPath, "busybox")); err != nil {
c.Fatal(out, err)
}
name := "testing"
config := map[string]interface{}{
"Image": "busybox",
"Volumes": map[string]struct{}{volPath: {}},
}
status, _, err := sockRequest("POST", "/containers/create?name="+name, config)
c.Assert(status, check.Equals, http.StatusCreated)
c.Assert(err, check.IsNil)
bindPath := randomUnixTmpDirPath("test")
config = map[string]interface{}{
"VolumesFrom": []string{volName},
"Binds": []string{bindPath + ":/tmp"},
}
status, _, err = sockRequest("POST", "/containers/"+name+"/start", config)
c.Assert(status, check.Equals, http.StatusNoContent)
c.Assert(err, check.IsNil)
pth, err := inspectFieldMap(name, "Volumes", volPath)
if err != nil {
c.Fatal(err)
}
pth2, err := inspectFieldMap(volName, "Volumes", volPath)
if err != nil {
c.Fatal(err)
}
if pth != pth2 {
c.Fatalf("expected volume host path to be %s, got %s", pth, pth2)
}
}
func (s *DockerSuite) TestGetContainerStats(c *check.C) {
var (
name = "statscontainer"

View file

@ -284,35 +284,6 @@ func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) {
}
}
// #9629
func (s *DockerDaemonSuite) TestDaemonVolumesBindsRefs(c *check.C) {
if err := s.d.StartWithBusybox(); err != nil {
c.Fatal(err)
}
tmp, err := ioutil.TempDir(os.TempDir(), "")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmp)
if err := ioutil.WriteFile(tmp+"/test", []byte("testing"), 0655); err != nil {
c.Fatal(err)
}
if out, err := s.d.Cmd("create", "-v", tmp+":/foo", "--name=voltest", "busybox"); err != nil {
c.Fatal(err, out)
}
if err := s.d.Restart(); err != nil {
c.Fatal(err)
}
if out, err := s.d.Cmd("run", "--volumes-from=voltest", "--name=consumer", "busybox", "/bin/sh", "-c", "[ -f /foo/test ]"); err != nil {
c.Fatal(err, out)
}
}
func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) {
// TODO: skip or update for Windows daemon
os.Remove("/etc/docker/key.json")
@ -360,76 +331,6 @@ func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) {
}
}
// Simulate an older daemon (pre 1.3) coming up with volumes specified in containers
// without corresponding volume json
func (s *DockerDaemonSuite) TestDaemonUpgradeWithVolumes(c *check.C) {
graphDir := filepath.Join(os.TempDir(), "docker-test")
defer os.RemoveAll(graphDir)
if err := s.d.StartWithBusybox("-g", graphDir); err != nil {
c.Fatal(err)
}
tmpDir := filepath.Join(os.TempDir(), "test")
defer os.RemoveAll(tmpDir)
if out, err := s.d.Cmd("create", "-v", tmpDir+":/foo", "--name=test", "busybox"); err != nil {
c.Fatal(err, out)
}
if err := s.d.Stop(); err != nil {
c.Fatal(err)
}
// Remove this since we're expecting the daemon to re-create it too
if err := os.RemoveAll(tmpDir); err != nil {
c.Fatal(err)
}
configDir := filepath.Join(graphDir, "volumes")
if err := os.RemoveAll(configDir); err != nil {
c.Fatal(err)
}
if err := s.d.Start("-g", graphDir); err != nil {
c.Fatal(err)
}
if _, err := os.Stat(tmpDir); os.IsNotExist(err) {
c.Fatalf("expected volume path %s to exist but it does not", tmpDir)
}
dir, err := ioutil.ReadDir(configDir)
if err != nil {
c.Fatal(err)
}
if len(dir) == 0 {
c.Fatalf("expected volumes config dir to contain data for new volume")
}
// Now with just removing the volume config and not the volume data
if err := s.d.Stop(); err != nil {
c.Fatal(err)
}
if err := os.RemoveAll(configDir); err != nil {
c.Fatal(err)
}
if err := s.d.Start("-g", graphDir); err != nil {
c.Fatal(err)
}
dir, err = ioutil.ReadDir(configDir)
if err != nil {
c.Fatal(err)
}
if len(dir) == 0 {
c.Fatalf("expected volumes config dir to contain data for new volume")
}
}
// GH#11320 - verify that the daemon exits on failure properly
// Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means
// to get a daemon init failure; no other tests for -b/--bip conflict are therefore required

View file

@ -395,21 +395,6 @@ func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) {
}
}
// Regression test for #4741
func (s *DockerSuite) TestRunWithVolumesAsFiles(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/etc/hosts:/target-file", "busybox", "true")
out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd)
if err != nil && exitCode != 0 {
c.Fatal("1", out, stderr, err)
}
runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/target-file")
out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd)
if err != nil && exitCode != 0 {
c.Fatal("2", out, stderr, err)
}
}
// Regression test for #4979
func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file")
@ -536,7 +521,7 @@ func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) {
if out, _, err := runCommandWithOutput(cmd); err == nil {
c.Fatal("Expected error about duplicate volume definitions")
} else {
if !strings.Contains(out, "Duplicate volume") {
if !strings.Contains(out, "Duplicate bind mount") {
c.Fatalf("Expected 'duplicate volume' error, got %v", err)
}
}
@ -2333,7 +2318,13 @@ func (s *DockerSuite) TestRunMountOrdering(c *check.C) {
c.Fatal(err)
}
cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp", tmpDir), "-v", fmt.Sprintf("%s:/tmp/foo", fooDir), "-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2), "-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir), "busybox:latest", "sh", "-c", "ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me")
cmd := exec.Command(dockerBinary, "run",
"-v", fmt.Sprintf("%s:/tmp", tmpDir),
"-v", fmt.Sprintf("%s:/tmp/foo", fooDir),
"-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2),
"-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir),
"busybox:latest", "sh", "-c",
"ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me")
out, _, err := runCommandWithOutput(cmd)
if err != nil {
c.Fatal(out, err)
@ -2427,41 +2418,6 @@ func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) {
}
}
func (s *DockerSuite) TestRunVolumesNotRecreatedOnStart(c *check.C) {
testRequires(c, SameHostDaemon)
// Clear out any remnants from other tests
info, err := ioutil.ReadDir(volumesConfigPath)
if err != nil {
c.Fatal(err)
}
if len(info) > 0 {
for _, f := range info {
if err := os.RemoveAll(volumesConfigPath + "/" + f.Name()); err != nil {
c.Fatal(err)
}
}
}
cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "--name", "lone_starr", "busybox")
if _, err := runCommand(cmd); err != nil {
c.Fatal(err)
}
cmd = exec.Command(dockerBinary, "start", "lone_starr")
if _, err := runCommand(cmd); err != nil {
c.Fatal(err)
}
info, err = ioutil.ReadDir(volumesConfigPath)
if err != nil {
c.Fatal(err)
}
if len(info) != 1 {
c.Fatalf("Expected only 1 volume have %v", len(info))
}
}
func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) {
// just run with unknown image
cmd := exec.Command(dockerBinary, "run", "asdfsg")
@ -2496,7 +2452,7 @@ func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) {
out, err = inspectFieldMap("dark_helmet", "Volumes", "/foo")
c.Assert(err, check.IsNil)
if !strings.Contains(out, volumesStoragePath) {
if !strings.Contains(out, volumesConfigPath) {
c.Fatalf("Volume was not defined for /foo\n%q", out)
}
@ -2507,7 +2463,7 @@ func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) {
}
out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar")
c.Assert(err, check.IsNil)
if !strings.Contains(out, volumesStoragePath) {
if !strings.Contains(out, volumesConfigPath) {
c.Fatalf("Volume was not defined for /bar\n%q", out)
}
}

View file

@ -126,32 +126,6 @@ func (s *DockerSuite) TestStartRecordError(c *check.C) {
}
// gh#8726: a failed Start() breaks --volumes-from on subsequent Start()'s
func (s *DockerSuite) TestStartVolumesFromFailsCleanly(c *check.C) {
// Create the first data volume
dockerCmd(c, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox")
// Expect this to fail because the data test after contaienr doesn't exist yet
if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil {
c.Fatal("Expected error but got none")
}
// Create the second data volume
dockerCmd(c, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox")
// Now, all the volumes should be there
dockerCmd(c, "start", "consumer")
// Check that we have the volumes we want
out, _ := dockerCmd(c, "inspect", "--format='{{ len .Volumes }}'", "consumer")
nVolumes := strings.Trim(out, " \r\n'")
if nVolumes != "2" {
c.Fatalf("Missing volumes: expected 2, got %s", nVolumes)
}
}
func (s *DockerSuite) TestStartPausedContainer(c *check.C) {
defer unpauseAllContainers()

View file

@ -0,0 +1,150 @@
// +build !windows
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/go-check/check"
)
func init() {
check.Suite(&ExternalVolumeSuite{
ds: &DockerSuite{},
})
}
type ExternalVolumeSuite struct {
server *httptest.Server
ds *DockerSuite
}
func (s *ExternalVolumeSuite) SetUpTest(c *check.C) {
s.ds.SetUpTest(c)
}
func (s *ExternalVolumeSuite) TearDownTest(c *check.C) {
s.ds.TearDownTest(c)
}
func (s *ExternalVolumeSuite) SetUpSuite(c *check.C) {
mux := http.NewServeMux()
s.server = httptest.NewServer(mux)
type pluginRequest struct {
name string
}
hostVolumePath := func(name string) string {
return fmt.Sprintf("/var/lib/docker/volumes/%s", name)
}
mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
fmt.Fprintln(w, `{"Implements": ["VolumeDriver"]}`)
})
mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
fmt.Fprintln(w, `{}`)
})
mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
fmt.Fprintln(w, `{}`)
})
mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) {
var pr pluginRequest
if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
http.Error(w, err.Error(), 500)
}
p := hostVolumePath(pr.name)
w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
fmt.Fprintln(w, fmt.Sprintf("{\"Mountpoint\": \"%s\"}", p))
})
mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) {
var pr pluginRequest
if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
http.Error(w, err.Error(), 500)
}
p := hostVolumePath(pr.name)
if err := os.MkdirAll(p, 0755); err != nil {
http.Error(w, err.Error(), 500)
}
if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.server.URL), 0644); err != nil {
http.Error(w, err.Error(), 500)
}
w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
fmt.Fprintln(w, fmt.Sprintf("{\"Mountpoint\": \"%s\"}", p))
})
mux.HandleFunc("/VolumeDriver.Umount", func(w http.ResponseWriter, r *http.Request) {
var pr pluginRequest
if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
http.Error(w, err.Error(), 500)
}
p := hostVolumePath(pr.name)
if err := os.RemoveAll(p); err != nil {
http.Error(w, err.Error(), 500)
}
w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
fmt.Fprintln(w, `{}`)
})
if err := os.MkdirAll("/usr/share/docker/plugins", 0755); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile("/usr/share/docker/plugins/test-external-volume-driver.spec", []byte(s.server.URL), 0644); err != nil {
c.Fatal(err)
}
}
func (s *ExternalVolumeSuite) TearDownSuite(c *check.C) {
s.server.Close()
if err := os.RemoveAll("/usr/share/docker/plugins"); err != nil {
c.Fatal(err)
}
}
func (s *ExternalVolumeSuite) TestStartExternalVolumeDriver(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test")
out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd)
if err != nil && exitCode != 0 {
c.Fatal(out, stderr, err)
}
if !strings.Contains(out, s.server.URL) {
c.Fatalf("External volume mount failed. Output: %s\n", out)
}
}
func (s *ExternalVolumeSuite) TestStartExternalVolumeNamedDriver(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "-v", "test-external-volume-driver/volume-1:/tmp/external-volume-test", "busybox:latest", "cat", "/tmp/external-volume-test/test")
out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd)
if err != nil && exitCode != 0 {
c.Fatal(out, stderr, err)
}
if !strings.Contains(out, s.server.URL) {
c.Fatalf("External volume mount failed. Output: %s\n", out)
}
}

View file

@ -18,7 +18,6 @@ var (
dockerBasePath = "/var/lib/docker"
volumesConfigPath = dockerBasePath + "/volumes"
volumesStoragePath = dockerBasePath + "/vfs/dir"
containerStoragePath = dockerBasePath + "/containers"
runtimePath = "/var/run/docker"

View file

@ -31,6 +31,10 @@ type Client struct {
}
func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error {
return c.callWithRetry(serviceMethod, args, ret, true)
}
func (c *Client) callWithRetry(serviceMethod string, args interface{}, ret interface{}, retry bool) error {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(args); err != nil {
return err
@ -50,12 +54,16 @@ func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) e
for {
resp, err := c.http.Do(req)
if err != nil {
if !retry {
return err
}
timeOff := backoff(retries)
if timeOff+time.Since(start) > defaultTimeOut {
if abort(start, timeOff) {
return err
}
retries++
logrus.Warn("Unable to connect to plugin: %s, retrying in %ds\n", c.addr, timeOff)
logrus.Warnf("Unable to connect to plugin: %s, retrying in %v", c.addr, timeOff)
time.Sleep(timeOff)
continue
}
@ -73,7 +81,7 @@ func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) e
}
func backoff(retries int) time.Duration {
b, max := float64(1), float64(defaultTimeOut)
b, max := 1, defaultTimeOut
for b < max && retries > 0 {
b *= 2
retries--
@ -81,7 +89,11 @@ func backoff(retries int) time.Duration {
if b > max {
b = max
}
return time.Duration(b)
return time.Duration(b) * time.Second
}
func abort(start time.Time, timeOff time.Duration) bool {
return timeOff+time.Since(start) > time.Duration(defaultTimeOut)*time.Second
}
func configureTCPTransport(tr *http.Transport, proto, addr string) {

View file

@ -6,6 +6,7 @@ import (
"net/http/httptest"
"reflect"
"testing"
"time"
)
var (
@ -27,7 +28,7 @@ func teardownRemotePluginServer() {
func TestFailedConnection(t *testing.T) {
c := NewClient("tcp://127.0.0.1:1")
err := c.Call("Service.Method", nil, nil)
err := c.callWithRetry("Service.Method", nil, nil, false)
if err == nil {
t.Fatal("Unexpected successful connection")
}
@ -61,3 +62,44 @@ func TestEchoInputOutput(t *testing.T) {
t.Fatalf("Expected %v, was %v\n", m, output)
}
}
func TestBackoff(t *testing.T) {
cases := []struct {
retries int
expTimeOff time.Duration
}{
{0, time.Duration(1)},
{1, time.Duration(2)},
{2, time.Duration(4)},
{4, time.Duration(16)},
{6, time.Duration(30)},
{10, time.Duration(30)},
}
for _, c := range cases {
s := c.expTimeOff * time.Second
if d := backoff(c.retries); d != s {
t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d)
}
}
}
func TestAbortRetry(t *testing.T) {
cases := []struct {
timeOff time.Duration
expAbort bool
}{
{time.Duration(1), false},
{time.Duration(2), false},
{time.Duration(10), false},
{time.Duration(30), true},
{time.Duration(40), true},
}
for _, c := range cases {
s := c.timeOff * time.Second
if a := abort(time.Now(), s); a != c.expAbort {
t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a)
}
}
}

View file

@ -122,6 +122,7 @@ type Config struct {
Cmd *Command
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
Volumes map[string]struct{}
VolumeDriver string
WorkingDir string
Entrypoint *Entrypoint
NetworkDisabled bool

View file

@ -77,6 +77,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
flReadonlyRootfs = cmd.Bool([]string{"-read-only"}, false, "Mount the container's root filesystem as read only")
flLoggingDriver = cmd.String([]string{"-log-driver"}, "", "Logging driver for container")
flCgroupParent = cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container")
flVolumeDriver = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container")
)
cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR")
@ -317,6 +318,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
Entrypoint: entrypoint,
WorkingDir: *flWorkingDir,
Labels: convertKVStringsToMap(labels),
VolumeDriver: *flVolumeDriver,
}
hostConfig := &HostConfig{

22
utils/tcp.go Normal file
View file

@ -0,0 +1,22 @@
package utils
import (
"net"
"net/http"
"time"
)
func ConfigureTCPTransport(tr *http.Transport, proto, addr string) {
// Why 32? See https://github.com/docker/docker/pull/8035.
timeout := 32 * time.Second
if proto == "unix" {
// No need for compression in local communications.
tr.DisableCompression = true
tr.Dial = func(_, _ string) (net.Conn, error) {
return net.DialTimeout(proto, addr, timeout)
}
} else {
tr.Proxy = http.ProxyFromEnvironment
tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
}
}

51
volume/drivers/adapter.go Normal file
View file

@ -0,0 +1,51 @@
package volumedrivers
import "github.com/docker/docker/volume"
type volumeDriverAdapter struct {
name string
proxy *volumeDriverProxy
}
func (a *volumeDriverAdapter) Name() string {
return a.name
}
func (a *volumeDriverAdapter) Create(name string) (volume.Volume, error) {
err := a.proxy.Create(name)
if err != nil {
return nil, err
}
return &volumeAdapter{a.proxy, name, a.name}, nil
}
func (a *volumeDriverAdapter) Remove(v volume.Volume) error {
return a.proxy.Remove(v.Name())
}
type volumeAdapter struct {
proxy *volumeDriverProxy
name string
driverName string
}
func (a *volumeAdapter) Name() string {
return a.name
}
func (a *volumeAdapter) DriverName() string {
return a.driverName
}
func (a *volumeAdapter) Path() string {
m, _ := a.proxy.Path(a.name)
return m
}
func (a *volumeAdapter) Mount() (string, error) {
return a.proxy.Mount(a.name)
}
func (a *volumeAdapter) Unmount() error {
return a.proxy.Unmount(a.name)
}

20
volume/drivers/api.go Normal file
View file

@ -0,0 +1,20 @@
package volumedrivers
import "github.com/docker/docker/volume"
type client interface {
Call(string, interface{}, interface{}) error
}
func NewVolumeDriver(name string, c client) volume.Driver {
proxy := &volumeDriverProxy{c}
return &volumeDriverAdapter{name, proxy}
}
type VolumeDriver interface {
Create(name string) (err error)
Remove(name string) (err error)
Path(name string) (mountpoint string, err error)
Mount(name string) (mountpoint string, err error)
Unmount(name string) (err error)
}

View file

@ -0,0 +1,61 @@
package volumedrivers
import (
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/plugins"
"github.com/docker/docker/volume"
)
// currently created by hand. generation tool would generate this like:
// $ extpoint-gen Driver > volume/extpoint.go
var drivers = &driverExtpoint{extensions: make(map[string]volume.Driver)}
type driverExtpoint struct {
extensions map[string]volume.Driver
sync.Mutex
}
func Register(extension volume.Driver, name string) bool {
drivers.Lock()
defer drivers.Unlock()
if name == "" {
return false
}
_, exists := drivers.extensions[name]
if exists {
return false
}
drivers.extensions[name] = extension
return true
}
func Unregister(name string) bool {
drivers.Lock()
defer drivers.Unlock()
_, exists := drivers.extensions[name]
if !exists {
return false
}
delete(drivers.extensions, name)
return true
}
func Lookup(name string) volume.Driver {
drivers.Lock()
defer drivers.Unlock()
ext, ok := drivers.extensions[name]
if ok {
return ext
}
pl, err := plugins.Get(name, "VolumeDriver")
if err != nil {
logrus.Errorf("Error: %v", err)
return nil
}
d := NewVolumeDriver(name, pl.Client)
drivers.extensions[name] = d
return d
}

65
volume/drivers/proxy.go Normal file
View file

@ -0,0 +1,65 @@
package volumedrivers
// currently created by hand. generation tool would generate this like:
// $ rpc-gen volume/drivers/api.go VolumeDriver > volume/drivers/proxy.go
type volumeDriverRequest struct {
Name string
}
type volumeDriverResponse struct {
Mountpoint string `json:",ommitempty"`
Err error `json:",ommitempty"`
}
type volumeDriverProxy struct {
c client
}
func (pp *volumeDriverProxy) Create(name string) error {
args := volumeDriverRequest{name}
var ret volumeDriverResponse
err := pp.c.Call("VolumeDriver.Create", args, &ret)
if err != nil {
return err
}
return ret.Err
}
func (pp *volumeDriverProxy) Remove(name string) error {
args := volumeDriverRequest{name}
var ret volumeDriverResponse
err := pp.c.Call("VolumeDriver.Remove", args, &ret)
if err != nil {
return err
}
return ret.Err
}
func (pp *volumeDriverProxy) Path(name string) (string, error) {
args := volumeDriverRequest{name}
var ret volumeDriverResponse
if err := pp.c.Call("VolumeDriver.Path", args, &ret); err != nil {
return "", err
}
return ret.Mountpoint, ret.Err
}
func (pp *volumeDriverProxy) Mount(name string) (string, error) {
args := volumeDriverRequest{name}
var ret volumeDriverResponse
if err := pp.c.Call("VolumeDriver.Mount", args, &ret); err != nil {
return "", err
}
return ret.Mountpoint, ret.Err
}
func (pp *volumeDriverProxy) Unmount(name string) error {
args := volumeDriverRequest{name}
var ret volumeDriverResponse
err := pp.c.Call("VolumeDriver.Unmount", args, &ret)
if err != nil {
return err
}
return ret.Err
}

126
volume/local/local.go Normal file
View file

@ -0,0 +1,126 @@
package local
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/docker/docker/volume"
)
func New(rootDirectory string) (*Root, error) {
if err := os.MkdirAll(rootDirectory, 0700); err != nil {
return nil, err
}
r := &Root{
path: rootDirectory,
volumes: make(map[string]*Volume),
}
dirs, err := ioutil.ReadDir(rootDirectory)
if err != nil {
return nil, err
}
for _, d := range dirs {
name := filepath.Base(d.Name())
r.volumes[name] = &Volume{
driverName: r.Name(),
name: name,
path: filepath.Join(rootDirectory, name),
}
}
return r, nil
}
type Root struct {
m sync.Mutex
path string
volumes map[string]*Volume
}
func (r *Root) Name() string {
return "local"
}
func (r *Root) Create(name string) (volume.Volume, error) {
r.m.Lock()
defer r.m.Unlock()
v, exists := r.volumes[name]
if !exists {
path := filepath.Join(r.path, name)
if err := os.Mkdir(path, 0755); err != nil {
if os.IsExist(err) {
return nil, fmt.Errorf("volume already exists under %s", path)
}
return nil, err
}
v = &Volume{
driverName: r.Name(),
name: name,
path: path,
}
r.volumes[name] = v
}
v.use()
return v, nil
}
func (r *Root) Remove(v volume.Volume) error {
r.m.Lock()
defer r.m.Unlock()
lv, ok := v.(*Volume)
if !ok {
return errors.New("unknown volume type")
}
lv.release()
if lv.usedCount == 0 {
delete(r.volumes, lv.name)
return os.RemoveAll(lv.path)
}
return nil
}
type Volume struct {
m sync.Mutex
usedCount int
// unique name of the volume
name string
// path is the path on the host where the data lives
path string
// driverName is the name of the driver that created the volume.
driverName string
}
func (v *Volume) Name() string {
return v.name
}
func (v *Volume) DriverName() string {
return v.driverName
}
func (v *Volume) Path() string {
return v.path
}
func (v *Volume) Mount() (string, error) {
return v.path, nil
}
func (v *Volume) Unmount() error {
return nil
}
func (v *Volume) use() {
v.m.Lock()
v.usedCount++
v.m.Unlock()
}
func (v *Volume) release() {
v.m.Lock()
v.usedCount--
v.m.Unlock()
}

26
volume/volume.go Normal file
View file

@ -0,0 +1,26 @@
package volume
const DefaultDriverName = "local"
type Driver interface {
// Name returns the name of the volume driver.
Name() string
// Create makes a new volume with the given id.
Create(string) (Volume, error)
// Remove deletes the volume.
Remove(Volume) error
}
type Volume interface {
// Name returns the name of the volume
Name() string
// DriverName returns the name of the driver which owns this volume.
DriverName() string
// Path returns the absolute path to the volume.
Path() string
// Mount mounts the volume and returns the absolute path to
// where it can be consumed.
Mount() (string, error)
// Unmount unmounts the volume when it is no longer in use.
Unmount() error
}

View file

@ -1,193 +0,0 @@
package volumes
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/stringid"
)
type Repository struct {
configPath string
driver graphdriver.Driver
volumes map[string]*Volume
lock sync.Mutex
}
func NewRepository(configPath string, driver graphdriver.Driver) (*Repository, error) {
abspath, err := filepath.Abs(configPath)
if err != nil {
return nil, err
}
// Create the config path
if err := os.MkdirAll(abspath, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
repo := &Repository{
driver: driver,
configPath: abspath,
volumes: make(map[string]*Volume),
}
return repo, repo.restore()
}
func (r *Repository) newVolume(path string, writable bool) (*Volume, error) {
var (
isBindMount bool
err error
id = stringid.GenerateRandomID()
)
if path != "" {
isBindMount = true
}
if path == "" {
path, err = r.createNewVolumePath(id)
if err != nil {
return nil, err
}
}
path = filepath.Clean(path)
// Ignore the error here since the path may not exist
// Really just want to make sure the path we are using is real(or nonexistent)
if cleanPath, err := filepath.EvalSymlinks(path); err == nil {
path = cleanPath
}
v := &Volume{
ID: id,
Path: path,
repository: r,
Writable: writable,
containers: make(map[string]struct{}),
configPath: r.configPath + "/" + id,
IsBindMount: isBindMount,
}
if err := v.initialize(); err != nil {
return nil, err
}
r.add(v)
return v, nil
}
func (r *Repository) restore() error {
dir, err := ioutil.ReadDir(r.configPath)
if err != nil {
return err
}
for _, v := range dir {
id := v.Name()
vol := &Volume{
ID: id,
configPath: r.configPath + "/" + id,
containers: make(map[string]struct{}),
}
if err := vol.FromDisk(); err != nil {
if !os.IsNotExist(err) {
logrus.Debugf("Error restoring volume: %v", err)
continue
}
if err := vol.initialize(); err != nil {
logrus.Debugf("%s", err)
continue
}
}
r.add(vol)
}
return nil
}
func (r *Repository) Get(path string) *Volume {
r.lock.Lock()
vol := r.get(path)
r.lock.Unlock()
return vol
}
func (r *Repository) get(path string) *Volume {
path, err := filepath.EvalSymlinks(path)
if err != nil {
return nil
}
return r.volumes[filepath.Clean(path)]
}
func (r *Repository) add(volume *Volume) {
if vol := r.get(volume.Path); vol != nil {
return
}
r.volumes[volume.Path] = volume
}
func (r *Repository) Delete(path string) error {
r.lock.Lock()
defer r.lock.Unlock()
path, err := filepath.EvalSymlinks(path)
if err != nil {
return err
}
volume := r.get(filepath.Clean(path))
if volume == nil {
return fmt.Errorf("Volume %s does not exist", path)
}
containers := volume.Containers()
if len(containers) > 0 {
return fmt.Errorf("Volume %s is being used and cannot be removed: used by containers %s", volume.Path, containers)
}
if err := os.RemoveAll(volume.configPath); err != nil {
return err
}
if !volume.IsBindMount {
if err := r.driver.Remove(volume.ID); err != nil {
if !os.IsNotExist(err) {
return err
}
}
}
delete(r.volumes, volume.Path)
return nil
}
func (r *Repository) createNewVolumePath(id string) (string, error) {
if err := r.driver.Create(id, ""); err != nil {
return "", err
}
path, err := r.driver.Get(id, "")
if err != nil {
return "", fmt.Errorf("Driver %s failed to get volume rootfs %s: %v", r.driver, id, err)
}
return path, nil
}
func (r *Repository) FindOrCreateVolume(path string, writable bool) (*Volume, error) {
r.lock.Lock()
defer r.lock.Unlock()
if path == "" {
return r.newVolume(path, writable)
}
if v := r.get(path); v != nil {
return v, nil
}
return r.newVolume(path, writable)
}

View file

@ -1,164 +0,0 @@
package volumes
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/docker/docker/daemon/graphdriver"
_ "github.com/docker/docker/daemon/graphdriver/vfs"
)
func TestRepositoryFindOrCreate(t *testing.T) {
root, err := ioutil.TempDir(os.TempDir(), "volumes")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root)
repo, err := newRepo(root)
if err != nil {
t.Fatal(err)
}
// no path
v, err := repo.FindOrCreateVolume("", true)
if err != nil {
t.Fatal(err)
}
// FIXME: volumes are heavily dependent on the vfs driver, but this should not be so!
expected := filepath.Join(root, "repo-graph", "vfs", "dir", v.ID)
if v.Path != expected {
t.Fatalf("expected new path to be created in %s, got %s", expected, v.Path)
}
// with a non-existant path
dir := filepath.Join(root, "doesntexist")
v, err = repo.FindOrCreateVolume(dir, true)
if err != nil {
t.Fatal(err)
}
if v.Path != dir {
t.Fatalf("expected new path to be created in %s, got %s", dir, v.Path)
}
if _, err := os.Stat(v.Path); err != nil {
t.Fatal(err)
}
// with a pre-existing path
// can just use the same path from above since it now exists
v, err = repo.FindOrCreateVolume(dir, true)
if err != nil {
t.Fatal(err)
}
if v.Path != dir {
t.Fatalf("expected new path to be created in %s, got %s", dir, v.Path)
}
}
func TestRepositoryGet(t *testing.T) {
root, err := ioutil.TempDir(os.TempDir(), "volumes")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root)
repo, err := newRepo(root)
if err != nil {
t.Fatal(err)
}
v, err := repo.FindOrCreateVolume("", true)
if err != nil {
t.Fatal(err)
}
v2 := repo.Get(v.Path)
if v2 == nil {
t.Fatalf("expected to find volume but didn't")
}
if v2 != v {
t.Fatalf("expected get to return same volume")
}
}
func TestRepositoryDelete(t *testing.T) {
root, err := ioutil.TempDir(os.TempDir(), "volumes")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root)
repo, err := newRepo(root)
if err != nil {
t.Fatal(err)
}
// with a normal volume
v, err := repo.FindOrCreateVolume("", true)
if err != nil {
t.Fatal(err)
}
if err := repo.Delete(v.Path); err != nil {
t.Fatal(err)
}
if v := repo.Get(v.Path); v != nil {
t.Fatalf("expected volume to not exist")
}
if _, err := os.Stat(v.Path); err == nil {
t.Fatalf("expected volume files to be removed")
}
// with a bind mount
dir := filepath.Join(root, "test")
v, err = repo.FindOrCreateVolume(dir, true)
if err != nil {
t.Fatal(err)
}
if err := repo.Delete(v.Path); err != nil {
t.Fatal(err)
}
if v := repo.Get(v.Path); v != nil {
t.Fatalf("expected volume to not exist")
}
if _, err := os.Stat(v.Path); err != nil && os.IsNotExist(err) {
t.Fatalf("expected bind volume data to persist after destroying volume")
}
// with container refs
dir = filepath.Join(root, "test")
v, err = repo.FindOrCreateVolume(dir, true)
if err != nil {
t.Fatal(err)
}
v.AddContainer("1234")
if err := repo.Delete(v.Path); err == nil {
t.Fatalf("expected volume delete to fail due to container refs")
}
v.RemoveContainer("1234")
if err := repo.Delete(v.Path); err != nil {
t.Fatal(err)
}
}
func newRepo(root string) (*Repository, error) {
configPath := filepath.Join(root, "repo-config")
graphDir := filepath.Join(root, "repo-graph")
driver, err := graphdriver.GetDriver("vfs", graphDir, []string{})
if err != nil {
return nil, err
}
return NewRepository(configPath, driver)
}

View file

@ -1,152 +0,0 @@
package volumes
import (
"encoding/json"
"os"
"path/filepath"
"sync"
"github.com/docker/docker/pkg/symlink"
)
type Volume struct {
ID string
Path string
IsBindMount bool
Writable bool
containers map[string]struct{}
configPath string
repository *Repository
lock sync.Mutex
}
func (v *Volume) IsDir() (bool, error) {
stat, err := os.Stat(v.Path)
if err != nil {
return false, err
}
return stat.IsDir(), nil
}
func (v *Volume) Containers() []string {
v.lock.Lock()
var containers []string
for c := range v.containers {
containers = append(containers, c)
}
v.lock.Unlock()
return containers
}
func (v *Volume) RemoveContainer(containerId string) {
v.lock.Lock()
delete(v.containers, containerId)
v.lock.Unlock()
}
func (v *Volume) AddContainer(containerId string) {
v.lock.Lock()
v.containers[containerId] = struct{}{}
v.lock.Unlock()
}
func (v *Volume) initialize() error {
v.lock.Lock()
defer v.lock.Unlock()
if _, err := os.Stat(v.Path); err != nil {
if !os.IsNotExist(err) {
return err
}
if err := os.MkdirAll(v.Path, 0755); err != nil {
return err
}
}
if err := os.MkdirAll(v.configPath, 0755); err != nil {
return err
}
return v.toDisk()
}
func (v *Volume) ToDisk() error {
v.lock.Lock()
defer v.lock.Unlock()
return v.toDisk()
}
func (v *Volume) toDisk() error {
jsonPath, err := v.jsonPath()
if err != nil {
return err
}
f, err := os.OpenFile(jsonPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
if err := json.NewEncoder(f).Encode(v); err != nil {
f.Close()
return err
}
return f.Close()
}
func (v *Volume) FromDisk() error {
v.lock.Lock()
defer v.lock.Unlock()
pth, err := v.jsonPath()
if err != nil {
return err
}
jsonSource, err := os.Open(pth)
if err != nil {
return err
}
defer jsonSource.Close()
dec := json.NewDecoder(jsonSource)
return dec.Decode(v)
}
func (v *Volume) jsonPath() (string, error) {
return v.GetRootResourcePath("config.json")
}
// Evalutes `path` in the scope of the volume's root path, with proper path
// sanitisation. Symlinks are all scoped to the root of the volume, as
// though the volume's root was `/`.
//
// The volume's root path is the host-facing path of the root of the volume's
// mountpoint inside a container.
//
// NOTE: The returned path is *only* safely scoped inside the volume's root
// if no component of the returned path changes (such as a component
// symlinking to a different path) between using this method and using the
// path. See symlink.FollowSymlinkInScope for more details.
func (v *Volume) GetResourcePath(path string) (string, error) {
cleanPath := filepath.Join("/", path)
return symlink.FollowSymlinkInScope(filepath.Join(v.Path, cleanPath), v.Path)
}
// Evalutes `path` in the scope of the volume's config path, with proper path
// sanitisation. Symlinks are all scoped to the root of the config path, as
// though the config path was `/`.
//
// The config path of a volume is not exposed to the container and is just used
// to store volume configuration options and other internal information. If in
// doubt, you probably want to just use v.GetResourcePath.
//
// NOTE: The returned path is *only* safely scoped inside the volume's config
// path if no component of the returned path changes (such as a component
// symlinking to a different path) between using this method and using the
// path. See symlink.FollowSymlinkInScope for more details.
func (v *Volume) GetRootResourcePath(path string) (string, error) {
cleanPath := filepath.Join("/", path)
return symlink.FollowSymlinkInScope(filepath.Join(v.configPath, cleanPath), v.configPath)
}

View file

@ -1,55 +0,0 @@
package volumes
import (
"os"
"testing"
"github.com/docker/docker/pkg/stringutils"
)
func TestContainers(t *testing.T) {
v := &Volume{containers: make(map[string]struct{})}
id := "1234"
v.AddContainer(id)
if v.Containers()[0] != id {
t.Fatalf("adding a container ref failed")
}
v.RemoveContainer(id)
if len(v.Containers()) != 0 {
t.Fatalf("removing container failed")
}
}
// os.Stat(v.Path) is returning ErrNotExist, initialize catch it and try to
// mkdir v.Path but it dies and correctly returns the error
func TestInitializeCannotMkdirOnNonExistentPath(t *testing.T) {
v := &Volume{Path: "nonexistentpath"}
err := v.initialize()
if err == nil {
t.Fatal("Expected not to initialize volume with a non existent path")
}
if !os.IsNotExist(err) {
t.Fatalf("Expected to get ErrNotExist error, got %s", err)
}
}
// os.Stat(v.Path) is NOT returning ErrNotExist so skip and return error from
// initialize
func TestInitializeCannotStatPathFileNameTooLong(t *testing.T) {
// ENAMETOOLONG
v := &Volume{Path: stringutils.GenerateRandomAlphaOnlyString(300)}
err := v.initialize()
if err == nil {
t.Fatal("Expected not to initialize volume with a non existent path")
}
if os.IsNotExist(err) {
t.Fatal("Expected to not get ErrNotExist")
}
}