diff --git a/CHANGELOG.md b/CHANGELOG.md index 78fba74fb4..2d19c2094a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,141 @@ information on the list of deprecated flags and APIs please have a look at https://docs.docker.com/misc/deprecated/ where target removal dates can also be found. +## 1.10.0 (2016-02-04) + +**IMPORTANT**: Docker 1.10 uses a new content-addressable storage for images and layers. +A migration is performed the first time docker is run, and can take a significant amount of time depending on the number of images present. +Refer to this page on the wiki for more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration +We also released a cool migration utility that enables you to perform the migration before updating to reduce downtime. +Engine 1.10 migrator can be found on Docker Hub: https://hub.docker.com/r/docker/v1.10-migrator/ + +### Runtime + ++ New `docker update` command that allows updating resource constraints on running containers [#15078](https://github.com/docker/docker/pull/15078) ++ Add `--tmpfs` flag to `docker run` to create a tmpfs mount in a container [#13587](https://github.com/docker/docker/pull/13587) ++ Add `--format` flag to `docker images` command [#17692](https://github.com/docker/docker/pull/17692) ++ Allow to set daemon configuration in a file and hot-reload it with the `SIGHUP` signal [#18587](https://github.com/docker/docker/pull/18587) ++ Updated docker events to include more meta-data and event types [#18888](https://github.com/docker/docker/pull/18888) + This change is backward compatible in the API, but not on the CLI. ++ Add `--blkio-weight-device` flag to `docker run` [#13959](https://github.com/docker/docker/pull/13959) ++ Add `--device-read-bps` and `--device-write-bps` flags to `docker run` [#14466](https://github.com/docker/docker/pull/14466) ++ Add `--device-read-iops` and `--device-write-iops` flags to `docker run` [#15879](https://github.com/docker/docker/pull/15879) ++ Add `--oom-score-adj` flag to `docker run` [#16277](https://github.com/docker/docker/pull/16277) ++ Add `--detach-keys` flag to `attach`, `run`, `start` and `exec` commands to override the default key sequence that detaches from a container [#15666](https://github.com/docker/docker/pull/15666) ++ Add `--shm-size` flag to `run`, `create` and `build` to set the size of `/dev/shm` [#16168](https://github.com/docker/docker/pull/16168) ++ Show the number of running, stopped, and paused containers in `docker info` [#19249](https://github.com/docker/docker/pull/19249) ++ Show the `OSType` and `Architecture` in `docker info` [#17478](https://github.com/docker/docker/pull/17478) ++ Add `--cgroup-parent` flag on `daemon` to set cgroup parent for all containers [#19062](https://github.com/docker/docker/pull/19062) ++ Add `-L` flag to docker cp to follow symlinks [#16613](https://github.com/docker/docker/pull/16613) ++ New `status=dead` filter for `docker ps` [#17908](https://github.com/docker/docker/pull/17908) +* Change `docker run` exit codes to distinguish between runtime and application errors [#14012](https://github.com/docker/docker/pull/14012) +* Enhance `docker events --since` and `--until` to support nanoseconds and timezones [#17495](https://github.com/docker/docker/pull/17495) +* Add `--all`/`-a` flag to `stats` to include both running and stopped containers [#16742](https://github.com/docker/docker/pull/16742) +* Change the default cgroup-driver to `cgroupfs` [#17704](https://github.com/docker/docker/pull/17704) +* Emit a "tag" event when tagging an image with `build -t` [#17115](https://github.com/docker/docker/pull/17115) +* Best effort for linked containers' start order when starting the daemon [#18208](https://github.com/docker/docker/pull/18208) +* Add ability to add multiple tags on `build` [#15780](https://github.com/docker/docker/pull/15780) +* Permit `OPTIONS` request against any url, thus fixing issue with CORS [#19569](https://github.com/docker/docker/pull/19569) +- Fix the `--quiet` flag on `docker build` to actually be quiet [#17428](https://github.com/docker/docker/pull/17428) +- Fix `docker images --filter dangling=false` to now show all non-dangling images [#19326](https://github.com/docker/docker/pull/19326) +- Fix race condition causing autorestart turning off on restart [#17629](https://github.com/docker/docker/pull/17629) +- Recognize GPFS filesystems [#19216](https://github.com/docker/docker/pull/19216) +- Fix obscure bug preventing to start containers [#19751](https://github.com/docker/docker/pull/19751) +- Forbid `exec` during container restart [#19722](https://github.com/docker/docker/pull/19722) +- devicemapper: Increasing `--storage-opt dm.basesize` will now increase the base device size on daemon restart [#19123](https://github.com/docker/docker/pull/19123) + +### Security + ++ Add `--userns-remap` flag to `daemon` to support user namespaces (previously in experimental) [#19187](https://github.com/docker/docker/pull/19187) ++ Add support for custom seccomp profiles in `--security-opt` [#17989](https://github.com/docker/docker/pull/17989) ++ Add default seccomp profile [#18780](https://github.com/docker/docker/pull/18780) ++ Add `--authorization-plugin` flag to `daemon` to customize ACLs [#15365](https://github.com/docker/docker/pull/15365) ++ Docker Content Trust now supports the ability to read and write user delegations [#18887](https://github.com/docker/docker/pull/18887) + This is an optional, opt-in feature that requires the explicit use of the Notary command-line utility in order to be enabled. + Enabling delegation support in a specific repository will break the ability of Docker 1.9 and 1.8 to pull from that repository, if content trust is enabled. +* Allow SELinux to run in a container when using the BTRFS storage driver [#16452](https://github.com/docker/docker/pull/16452) + +### Distribution + +* Use content-addressable storage for images and layers [#17924](https://github.com/docker/docker/pull/17924) + Note that a migration is performed the first time docker is run; it can take a significant amount of time depending on the number of images and containers present. + Images no longer depend on the parent chain but contain a list of layer references. + `docker load`/`docker save` tarballs now also contain content-addressable image configurations. + For more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration +* Add support for the new [manifest format ("schema2")](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) [#18785](https://github.com/docker/docker/pull/18785) +* Lots of improvements for push and pull: performance++, retries on failed downloads, cancelling on client disconnect [#18353](https://github.com/docker/docker/pull/18353), [#18418](https://github.com/docker/docker/pull/18418), [#19109](https://github.com/docker/docker/pull/19109), [#18353](https://github.com/docker/docker/pull/18353) +* Limit v1 protocol fallbacks [#18590](https://github.com/docker/docker/pull/18590) +- Fix issue where docker could hang indefinitely waiting for a nonexistent process to pull an image [#19743](https://github.com/docker/docker/pull/19743) + +### Networking + ++ Use DNS-based discovery instead of `/etc/hosts` [#19198](https://github.com/docker/docker/pull/19198) ++ Support for network-scoped alias using `--net-alias` on `run` and `--alias` on `network connect` [#19242](https://github.com/docker/docker/pull/19242) ++ Add `--ip` and `--ip6` on `run` and `network connect` to support custom IP addresses for a container in a network [#19001](https://github.com/docker/docker/pull/19001) ++ Add `--ipam-opt` to `network create` for passing custom IPAM options [#17316](https://github.com/docker/docker/pull/17316) ++ Add `--internal` flag to `network create` to restrict external access to and from the network [#19276](https://github.com/docker/docker/pull/19276) ++ Add `kv.path` option to `--cluster-store-opt` [#19167](https://github.com/docker/docker/pull/19167) ++ Add `discovery.heartbeat` and `discovery.ttl` options to `--cluster-store-opt` to configure discovery TTL and heartbeat timer [#18204](https://github.com/docker/docker/pull/18204) ++ Add `--format` flag to `network inspect` [#17481](https://github.com/docker/docker/pull/17481) ++ Add `--link` to `network connect` to provide a container-local alias [#19229](https://github.com/docker/docker/pull/19229) ++ Support for Capability exchange with remote IPAM plugins [#18775](https://github.com/docker/docker/pull/18775) ++ Add `--force` to `network disconnect` to force container to be disconnected from network [#19317](https://github.com/docker/docker/pull/19317) +* Support for multi-host networking using built-in overlay driver for all engine supported kernels: 3.10+ [#18775](https://github.com/docker/docker/pull/18775) +* `--link` is now supported on `docker run` for containers in user-defined network [#19229](https://github.com/docker/docker/pull/19229) +* Enhance `docker network rm` to allow removing multiple networks [#17489](https://github.com/docker/docker/pull/17489) +* Include container names in `network inspect` [#17615](https://github.com/docker/docker/pull/17615) +* Include auto-generated subnets for user-defined networks in `network inspect` [#17316](https://github.com/docker/docker/pull/17316) +* Add `--filter` flag to `network ls` to hide predefined networks [#17782](https://github.com/docker/docker/pull/17782) +* Add support for network connect/disconnect to stopped containers [#18906](https://github.com/docker/docker/pull/18906) +* Add network ID to container inspect [#19323](https://github.com/docker/docker/pull/19323) +- Fix MTU issue where Docker would not start with two or more default routes [#18108](https://github.com/docker/docker/pull/18108) +- Fix duplicate IP address for containers [#18106](https://github.com/docker/docker/pull/18106) +- Fix issue preventing sometimes docker from creating the bridge network [#19338](https://github.com/docker/docker/pull/19338) +- Do not substitute 127.0.0.1 name server when using `--net=host` [#19573](https://github.com/docker/docker/pull/19573) + +### Logging + ++ New logging driver for Splunk [#16488](https://github.com/docker/docker/pull/16488) ++ Add support for syslog over TCP+TLS [#18998](https://github.com/docker/docker/pull/18998) +* Enhance `docker logs --since` and `--until` to support nanoseconds and time [#17495](https://github.com/docker/docker/pull/17495) +* Enhance AWS logs to auto-detect region [#16640](https://github.com/docker/docker/pull/16640) + +### Volumes + ++ Add support to set the mount propagation mode for a volume [#17034](https://github.com/docker/docker/pull/17034) +* Add `ls` and `inspect` endpoints to volume plugin API [#16534](https://github.com/docker/docker/pull/16534) + Existing plugins need to make use of these new APIs to satisfy users' expectation + For that, please use the new MIME type `application/vnd.docker.plugins.v1.2+json` [#19549](https://github.com/docker/docker/pull/19549) +- Fix data not being copied to named volumes [#19175](https://github.com/docker/docker/pull/19175) +- Fix issues preventing volume drivers from being containerized [#19500](https://github.com/docker/docker/pull/19500) +- Fix `docker volumes ls --dangling=false` to now show all non-dangling volumes [#19671](https://github.com/docker/docker/pull/19671) +- Do not remove named volumes on container removal [#19568](https://github.com/docker/docker/pull/19568) +- Allow external volume drivers to host anonymous volumes [#19190](https://github.com/docker/docker/pull/19190) + +### Builder + ++ Add support for `**` in `.dockerignore` to wildcard multiple levels of directories [#17090](https://github.com/docker/docker/pull/17090) +- Fix handling of UTF-8 characters in Dockerfiles [#17055](https://github.com/docker/docker/pull/17055) +- Fix permissions problem when reading from STDIN [#19283](https://github.com/docker/docker/pull/19283) + +### Client + ++ Add support for overriding the API version to use via an `DOCKER_API_VERSION` environment-variable [#15964](https://github.com/docker/docker/pull/15964) +- Fix a bug preventing Windows clients to log in to Docker Hub [#19891](https://github.com/docker/docker/pull/19891) + +### Misc + +* systemd: Set TasksMax in addition to LimitNPROC in systemd service file [#19391](https://github.com/docker/docker/pull/19391) + +### Deprecations + +* Remove LXC support. The LXC driver was deprecated in Docker 1.8, and has now been removed [#17700](https://github.com/docker/docker/pull/17700) +* Remove `--exec-driver` daemon flag, because it is no longer in use [#17700](https://github.com/docker/docker/pull/17700) +* Remove old deprecated single-dashed long CLI flags (such as `-rm`; use `--rm` instead) [#17724](https://github.com/docker/docker/pull/17724) +* Deprecate HostConfig at API container start [#17799](https://github.com/docker/docker/pull/17799) +* Deprecate docker packages for newly EOL'd Linux distributions: Fedora 21 and Ubuntu 15.04 (Vivid) [#18794](https://github.com/docker/docker/pull/18794), [#18809](https://github.com/docker/docker/pull/18809) +* Deprecate `-f` flag for docker tag [#18350](https://github.com/docker/docker/pull/18350) + ## 1.9.1 (2015-11-21) ### Runtime diff --git a/Dockerfile b/Dockerfile index d498eb6dd3..4d2c63d7eb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -155,7 +155,7 @@ RUN set -x \ # both. This allows integration-cli tests to cover push/pull with both schema1 # and schema2 manifests. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd -ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ @@ -168,7 +168,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Install notary server -ENV NOTARY_VERSION docker-v1.10-3 +ENV NOTARY_VERSION docker-v1.10-5 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ diff --git a/Dockerfile.armhf b/Dockerfile.armhf index e0203b6d83..9fb485b0cb 100644 --- a/Dockerfile.armhf +++ b/Dockerfile.armhf @@ -145,7 +145,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Install notary server -ENV NOTARY_VERSION docker-v1.10-2 +ENV NOTARY_VERSION docker-v1.10-5 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ diff --git a/Dockerfile.ppc64le b/Dockerfile.ppc64le index 1c3804bba2..1c843b347b 100644 --- a/Dockerfile.ppc64le +++ b/Dockerfile.ppc64le @@ -116,14 +116,14 @@ RUN set -x \ && rm -rf "$GOPATH" # Install notary server -ENV NOTARY_COMMIT 8e8122eb5528f621afcd4e2854c47302f17392f7 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && rm -rf "$GOPATH" +#ENV NOTARY_VERSION docker-v1.10-5 +#RUN set -x \ +# && export GOPATH="$(mktemp -d)" \ +# && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ +# && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ +# && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ +# go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ +# && rm -rf "$GOPATH" # Get the "docker-py" source so we can run their integration tests ENV DOCKER_PY_COMMIT e2878cbcc3a7eef99917adc1be252800b0e41ece diff --git a/Dockerfile.s390x b/Dockerfile.s390x index ce335c973b..b58d908faf 100644 --- a/Dockerfile.s390x +++ b/Dockerfile.s390x @@ -116,11 +116,11 @@ RUN set -x \ && rm -rf "$GOPATH" # Install notary server -ENV NOTARY_COMMIT 8e8122eb5528f621afcd4e2854c47302f17392f7 +ENV NOTARY_VERSION docker-v1.10-5 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_COMMIT") \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ && rm -rf "$GOPATH" diff --git a/README.md b/README.md index ae82959b8a..e10fbce9df 100644 --- a/README.md +++ b/README.md @@ -167,7 +167,7 @@ Under the hood Under the hood, Docker is built on the following components: * The - [cgroups](https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt) + [cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt) and [namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html) capabilities of the Linux kernel diff --git a/VERSION b/VERSION index a01185b4d6..81c871de46 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.10.0-dev +1.10.0 diff --git a/api/client/attach.go b/api/client/attach.go index 6bfe1be187..efdad108ce 100644 --- a/api/client/attach.go +++ b/api/client/attach.go @@ -75,6 +75,12 @@ func (cli *DockerCli) CmdAttach(args ...string) error { return err } defer resp.Close() + if in != nil && c.Config.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } if err := cli.holdHijackedConnection(c.Config.Tty, in, cli.out, cli.err, resp); err != nil { return err diff --git a/api/client/build.go b/api/client/build.go index 7fb3391ec8..5ede8f23c1 100644 --- a/api/client/build.go +++ b/api/client/build.go @@ -82,9 +82,6 @@ func (cli *DockerCli) CmdBuild(args ...string) error { err error ) - _, err = exec.LookPath("git") - hasGit := err == nil - specifiedContext := cmd.Arg(0) var ( @@ -105,7 +102,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { switch { case specifiedContext == "-": context, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName) - case urlutil.IsGitURL(specifiedContext) && hasGit: + case urlutil.IsGitURL(specifiedContext): tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName) case urlutil.IsURL(specifiedContext): context, relDockerfile, err = getContextFromURL(progBuff, specifiedContext, *dockerfileName) @@ -510,6 +507,9 @@ func getContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCl // path of the dockerfile in that context directory, and a non-nil error on // success. func getContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { + if _, err := exec.LookPath("git"); err != nil { + return "", "", fmt.Errorf("unable to find 'git': %v", err) + } if absContextDir, err = gitutils.Clone(gitURL); err != nil { return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err) } diff --git a/api/client/cli.go b/api/client/cli.go index 2ad3afabb0..fd47fccfa4 100644 --- a/api/client/cli.go +++ b/api/client/cli.go @@ -44,6 +44,8 @@ type DockerCli struct { isTerminalOut bool // client is the http client that performs all API operations client client.APIClient + // state holds the terminal state + state *term.State } // Initialize calls the init function that will setup the configuration for the client @@ -79,6 +81,31 @@ func (cli *DockerCli) ImagesFormat() string { return cli.configFile.ImagesFormat } +func (cli *DockerCli) setRawTerminal() error { + if cli.isTerminalIn && os.Getenv("NORAW") == "" { + state, err := term.SetRawTerminal(cli.inFd) + if err != nil { + return err + } + cli.state = state + } + return nil +} + +func (cli *DockerCli) restoreTerminal(in io.Closer) error { + if cli.state != nil { + term.RestoreTerminal(cli.inFd, cli.state) + } + // WARNING: DO NOT REMOVE THE OS CHECK !!! + // For some reason this Close call blocks on darwin.. + // As the client exists right after, simply discard the close + // until we find a better solution. + if in != nil && runtime.GOOS != "darwin" { + return in.Close() + } + return nil +} + // NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. // The key file, protocol (i.e. unix) and address are passed in as strings, along with the tls.Config. If the tls.Config // is set the client scheme will be set to https. diff --git a/api/client/create.go b/api/client/create.go index 3cf8a7a7b1..7bddf26cd2 100644 --- a/api/client/create.go +++ b/api/client/create.go @@ -40,8 +40,8 @@ func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { return err } - // Resolve the Auth config relevant for this server - encodedAuth, err := cli.encodeRegistryAuth(repoInfo.Index) + authConfig := cli.resolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) + encodedAuth, err := encodeAuthToBase64(authConfig) if err != nil { return err } diff --git a/api/client/exec.go b/api/client/exec.go index 0ce9e81fe3..68b7c16e5d 100644 --- a/api/client/exec.go +++ b/api/client/exec.go @@ -87,6 +87,12 @@ func (cli *DockerCli) CmdExec(args ...string) error { return err } defer resp.Close() + if in != nil && execConfig.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } errCh = promise.Go(func() error { return cli.holdHijackedConnection(execConfig.Tty, in, out, stderr, resp) }) diff --git a/api/client/hijack.go b/api/client/hijack.go index ea4b5e3875..4c80fe1cd9 100644 --- a/api/client/hijack.go +++ b/api/client/hijack.go @@ -2,41 +2,19 @@ package client import ( "io" - "os" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/docker/pkg/term" "github.com/docker/engine-api/types" ) -func (cli *DockerCli) holdHijackedConnection(setRawTerminal bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { - var ( - err error - oldState *term.State - ) - if inputStream != nil && setRawTerminal && cli.isTerminalIn && os.Getenv("NORAW") == "" { - oldState, err = term.SetRawTerminal(cli.inFd) - if err != nil { - return err - } - defer term.RestoreTerminal(cli.inFd, oldState) - } - +func (cli *DockerCli) holdHijackedConnection(tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { + var err error receiveStdout := make(chan error, 1) if outputStream != nil || errorStream != nil { go func() { - defer func() { - if inputStream != nil { - if setRawTerminal && cli.isTerminalIn { - term.RestoreTerminal(cli.inFd, oldState) - } - inputStream.Close() - } - }() - // When TTY is ON, use regular copy - if setRawTerminal && outputStream != nil { + if tty && outputStream != nil { _, err = io.Copy(outputStream, resp.Reader) } else { _, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader) diff --git a/api/client/info.go b/api/client/info.go index 42f06826e3..3cb7f23153 100644 --- a/api/client/info.go +++ b/api/client/info.go @@ -42,6 +42,11 @@ func (cli *DockerCli) CmdInfo(args ...string) error { } } + if info.SystemStatus != nil { + for _, pair := range info.SystemStatus { + fmt.Fprintf(cli.out, "%s: %s\n", pair[0], pair[1]) + } + } ioutils.FprintfIfNotEmpty(cli.out, "Execution Driver: %s\n", info.ExecutionDriver) ioutils.FprintfIfNotEmpty(cli.out, "Logging Driver: %s\n", info.LoggingDriver) diff --git a/api/client/login.go b/api/client/login.go index a396450de1..8156917512 100644 --- a/api/client/login.go +++ b/api/client/login.go @@ -11,7 +11,6 @@ import ( Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/term" - "github.com/docker/docker/registry" "github.com/docker/engine-api/client" "github.com/docker/engine-api/types" ) @@ -22,14 +21,12 @@ import ( // // Usage: docker login SERVER func (cli *DockerCli) CmdLogin(args ...string) error { - cmd := Cli.Subcmd("login", []string{"[SERVER]"}, Cli.DockerCommands["login"].Description+".\nIf no server is specified \""+registry.IndexServer+"\" is the default.", true) + cmd := Cli.Subcmd("login", []string{"[SERVER]"}, Cli.DockerCommands["login"].Description+".\nIf no server is specified, the default is defined by the daemon.", true) cmd.Require(flag.Max, 1) - var username, password, email string - - cmd.StringVar(&username, []string{"u", "-username"}, "", "Username") - cmd.StringVar(&password, []string{"p", "-password"}, "", "Password") - cmd.StringVar(&email, []string{"e", "-email"}, "", "Email") + flUser := cmd.String([]string{"u", "-username"}, "", "Username") + flPassword := cmd.String([]string{"p", "-password"}, "", "Password") + flEmail := cmd.String([]string{"e", "-email"}, "", "Email") cmd.ParseFlags(args, true) @@ -38,89 +35,19 @@ func (cli *DockerCli) CmdLogin(args ...string) error { cli.in = os.Stdin } - serverAddress := registry.IndexServer + var serverAddress string if len(cmd.Args()) > 0 { serverAddress = cmd.Arg(0) - } - - promptDefault := func(prompt string, configDefault string) { - if configDefault == "" { - fmt.Fprintf(cli.out, "%s: ", prompt) - } else { - fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) - } - } - - readInput := func(in io.Reader, out io.Writer) string { - reader := bufio.NewReader(in) - line, _, err := reader.ReadLine() - if err != nil { - fmt.Fprintln(out, err.Error()) - os.Exit(1) - } - return string(line) - } - - authconfig, ok := cli.configFile.AuthConfigs[serverAddress] - if !ok { - authconfig = types.AuthConfig{} - } - - if username == "" { - promptDefault("Username", authconfig.Username) - username = readInput(cli.in, cli.out) - username = strings.TrimSpace(username) - if username == "" { - username = authconfig.Username - } - } - // Assume that a different username means they may not want to use - // the password or email from the config file, so prompt them - if username != authconfig.Username { - if password == "" { - oldState, err := term.SaveState(cli.inFd) - if err != nil { - return err - } - fmt.Fprintf(cli.out, "Password: ") - term.DisableEcho(cli.inFd, oldState) - - password = readInput(cli.in, cli.out) - fmt.Fprint(cli.out, "\n") - - term.RestoreTerminal(cli.inFd, oldState) - if password == "" { - return fmt.Errorf("Error : Password Required") - } - } - - if email == "" { - promptDefault("Email", authconfig.Email) - email = readInput(cli.in, cli.out) - if email == "" { - email = authconfig.Email - } - } } else { - // However, if they don't override the username use the - // password or email from the cmd line if specified. IOW, allow - // then to change/override them. And if not specified, just - // use what's in the config file - if password == "" { - password = authconfig.Password - } - if email == "" { - email = authconfig.Email - } + serverAddress = cli.electAuthServer() } - authconfig.Username = username - authconfig.Password = password - authconfig.Email = email - authconfig.ServerAddress = serverAddress - cli.configFile.AuthConfigs[serverAddress] = authconfig - auth := cli.configFile.AuthConfigs[serverAddress] - response, err := cli.client.RegistryLogin(auth) + authConfig, err := cli.configureAuth(*flUser, *flPassword, *flEmail, serverAddress) + if err != nil { + return err + } + + response, err := cli.client.RegistryLogin(authConfig) if err != nil { if client.IsErrUnauthorized(err) { delete(cli.configFile.AuthConfigs, serverAddress) @@ -141,3 +68,80 @@ func (cli *DockerCli) CmdLogin(args ...string) error { } return nil } + +func (cli *DockerCli) promptWithDefault(prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(cli.out, "%s: ", prompt) + } else { + fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) + } +} + +func (cli *DockerCli) configureAuth(flUser, flPassword, flEmail, serverAddress string) (types.AuthConfig, error) { + authconfig, ok := cli.configFile.AuthConfigs[serverAddress] + if !ok { + authconfig = types.AuthConfig{} + } + + if flUser == "" { + cli.promptWithDefault("Username", authconfig.Username) + flUser = readInput(cli.in, cli.out) + flUser = strings.TrimSpace(flUser) + if flUser == "" { + flUser = authconfig.Username + } + } + + if flPassword == "" { + oldState, err := term.SaveState(cli.inFd) + if err != nil { + return authconfig, err + } + fmt.Fprintf(cli.out, "Password: ") + term.DisableEcho(cli.inFd, oldState) + + flPassword = readInput(cli.in, cli.out) + fmt.Fprint(cli.out, "\n") + + term.RestoreTerminal(cli.inFd, oldState) + if flPassword == "" { + return authconfig, fmt.Errorf("Error : Password Required") + } + } + + // Assume that a different username means they may not want to use + // the email from the config file, so prompt it + if flUser != authconfig.Username { + if flEmail == "" { + cli.promptWithDefault("Email", authconfig.Email) + flEmail = readInput(cli.in, cli.out) + if flEmail == "" { + flEmail = authconfig.Email + } + } + } else { + // However, if they don't override the username use the + // email from the cmd line if specified. IOW, allow + // then to change/override them. And if not specified, just + // use what's in the config file + if flEmail == "" { + flEmail = authconfig.Email + } + } + authconfig.Username = flUser + authconfig.Password = flPassword + authconfig.Email = flEmail + authconfig.ServerAddress = serverAddress + cli.configFile.AuthConfigs[serverAddress] = authconfig + return authconfig, nil +} + +func readInput(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) +} diff --git a/api/client/logout.go b/api/client/logout.go index 3753cbbe74..f81eb8dd12 100644 --- a/api/client/logout.go +++ b/api/client/logout.go @@ -5,7 +5,6 @@ import ( Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/registry" ) // CmdLogout logs a user out from a Docker registry. @@ -14,14 +13,16 @@ import ( // // Usage: docker logout [SERVER] func (cli *DockerCli) CmdLogout(args ...string) error { - cmd := Cli.Subcmd("logout", []string{"[SERVER]"}, Cli.DockerCommands["logout"].Description+".\nIf no server is specified \""+registry.IndexServer+"\" is the default.", true) + cmd := Cli.Subcmd("logout", []string{"[SERVER]"}, Cli.DockerCommands["logout"].Description+".\nIf no server is specified, the default is defined by the daemon.", true) cmd.Require(flag.Max, 1) cmd.ParseFlags(args, true) - serverAddress := registry.IndexServer + var serverAddress string if len(cmd.Args()) > 0 { serverAddress = cmd.Arg(0) + } else { + serverAddress = cli.electAuthServer() } if _, ok := cli.configFile.AuthConfigs[serverAddress]; !ok { diff --git a/api/client/pull.go b/api/client/pull.go index 19220b96c0..61ff02cd81 100644 --- a/api/client/pull.go +++ b/api/client/pull.go @@ -54,7 +54,7 @@ func (cli *DockerCli) CmdPull(args ...string) error { return err } - authConfig := registry.ResolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) + authConfig := cli.resolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "pull") if isTrusted() && !ref.HasDigest() { diff --git a/api/client/push.go b/api/client/push.go index 9e4972c308..01e7d9397f 100644 --- a/api/client/push.go +++ b/api/client/push.go @@ -42,7 +42,7 @@ func (cli *DockerCli) CmdPush(args ...string) error { return err } // Resolve the Auth config relevant for this server - authConfig := registry.ResolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) + authConfig := cli.resolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "push") if isTrusted() { diff --git a/api/client/run.go b/api/client/run.go index 3b3a1a2c77..16f4230e97 100644 --- a/api/client/run.go +++ b/api/client/run.go @@ -207,6 +207,12 @@ func (cli *DockerCli) CmdRun(args ...string) error { if err != nil { return err } + if in != nil && config.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } errCh = promise.Go(func() error { return cli.holdHijackedConnection(config.Tty, in, out, stderr, resp) }) diff --git a/api/client/search.go b/api/client/search.go index 64bbb67ea7..2e1fcdafd3 100644 --- a/api/client/search.go +++ b/api/client/search.go @@ -36,7 +36,7 @@ func (cli *DockerCli) CmdSearch(args ...string) error { return err } - authConfig := registry.ResolveAuthConfig(cli.configFile.AuthConfigs, indexInfo) + authConfig := cli.resolveAuthConfig(cli.configFile.AuthConfigs, indexInfo) requestPrivilege := cli.registryAuthenticationPrivilegedFunc(indexInfo, "search") encodedAuth, err := encodeAuthToBase64(authConfig) diff --git a/api/client/start.go b/api/client/start.go index 3a2bc12636..0d44217d19 100644 --- a/api/client/start.go +++ b/api/client/start.go @@ -96,6 +96,12 @@ func (cli *DockerCli) CmdStart(args ...string) error { return err } defer resp.Close() + if in != nil && c.Config.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } cErr := promise.Go(func() error { return cli.holdHijackedConnection(c.Config.Tty, in, cli.out, cli.err, resp) diff --git a/api/client/trust.go b/api/client/trust.go index d06db5d938..5aa3b18bf2 100644 --- a/api/client/trust.go +++ b/api/client/trust.go @@ -234,7 +234,7 @@ func (cli *DockerCli) trustedReference(ref reference.NamedTagged) (reference.Can } // Resolve the Auth config relevant for this server - authConfig := registry.ResolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) + authConfig := cli.resolveAuthConfig(cli.configFile.AuthConfigs, repoInfo.Index) notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig) if err != nil { diff --git a/api/client/update.go b/api/client/update.go index 764a995293..7083048859 100644 --- a/api/client/update.go +++ b/api/client/update.go @@ -23,7 +23,7 @@ func (cli *DockerCli) CmdUpdate(args ...string) error { flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") flMemoryReservation := cmd.String([]string{"-memory-reservation"}, "", "Memory soft limit") - flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") + flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") flKernelMemory := cmd.String([]string{"-kernel-memory"}, "", "Kernel memory limit") cmd.Require(flag.Min, 1) diff --git a/api/client/utils.go b/api/client/utils.go index 687add5bc2..a92f3bf951 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -7,6 +7,7 @@ import ( "os" gosignal "os/signal" "runtime" + "strings" "time" "github.com/Sirupsen/logrus" @@ -18,6 +19,20 @@ import ( registrytypes "github.com/docker/engine-api/types/registry" ) +func (cli *DockerCli) electAuthServer() string { + // The daemon `/info` endpoint informs us of the default registry being + // used. This is essential in cross-platforms environment, where for + // example a Linux client might be interacting with a Windows daemon, hence + // the default registry URL might be Windows specific. + serverAddress := registry.IndexServer + if info, err := cli.client.Info(); err != nil { + fmt.Fprintf(cli.out, "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) + } else { + serverAddress = info.IndexServerAddress + } + return serverAddress +} + // encodeAuthToBase64 serializes the auth configuration as JSON base64 payload func encodeAuthToBase64(authConfig types.AuthConfig) (string, error) { buf, err := json.Marshal(authConfig) @@ -35,10 +50,12 @@ func (cli *DockerCli) encodeRegistryAuth(index *registrytypes.IndexInfo) (string func (cli *DockerCli) registryAuthenticationPrivilegedFunc(index *registrytypes.IndexInfo, cmdName string) client.RequestPrivilegeFunc { return func() (string, error) { fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName) - if err := cli.CmdLogin(registry.GetAuthConfigKey(index)); err != nil { + indexServer := registry.GetAuthConfigKey(index) + authConfig, err := cli.configureAuth("", "", "", indexServer) + if err != nil { return "", err } - return cli.encodeRegistryAuth(index) + return encodeAuthToBase64(authConfig) } } @@ -138,3 +155,42 @@ func (cli *DockerCli) getTtySize() (int, int) { } return int(ws.Height), int(ws.Width) } + +// resolveAuthConfig is like registry.ResolveAuthConfig, but if using the +// default index, it uses the default index name for the daemon's platform, +// not the client's platform. +func (cli *DockerCli) resolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := index.Name + if index.Official { + configKey = cli.electAuthServer() + } + + // First try the happy case + if c, found := authConfigs[configKey]; found || index.Official { + return c + } + + convertToHostname := func(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.Replace(url, "http://", "", 1) + } else if strings.HasPrefix(url, "https://") { + stripped = strings.Replace(url, "https://", "", 1) + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] + } + + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for registry, ac := range authConfigs { + if configKey == convertToHostname(registry) { + return ac + } + } + + // When all else fails, return an empty auth config + return types.AuthConfig{} +} diff --git a/api/server/middleware.go b/api/server/middleware.go index c978478e09..2a473e9b8a 100644 --- a/api/server/middleware.go +++ b/api/server/middleware.go @@ -147,7 +147,7 @@ func versionMiddleware(handler httputils.APIFunc) httputils.APIFunc { return errors.ErrorCodeNewerClientVersion.WithArgs(apiVersion, api.DefaultVersion) } if apiVersion.LessThan(api.MinVersion) { - return errors.ErrorCodeOldClientVersion.WithArgs(apiVersion, api.DefaultVersion) + return errors.ErrorCodeOldClientVersion.WithArgs(apiVersion, api.MinVersion) } w.Header().Set("Server", "Docker/"+dockerversion.Version+" ("+runtime.GOOS+")") diff --git a/api/server/router/build/build_routes.go b/api/server/router/build/build_routes.go index 2962d87f35..25e8fbcfff 100644 --- a/api/server/router/build/build_routes.go +++ b/api/server/router/build/build_routes.go @@ -241,10 +241,11 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r * if closeNotifier, ok := w.(http.CloseNotifier); ok { finished := make(chan struct{}) defer close(finished) + clientGone := closeNotifier.CloseNotify() go func() { select { case <-finished: - case <-closeNotifier.CloseNotify(): + case <-clientGone: logrus.Infof("Client disconnected, cancelling job: build") b.Cancel() } diff --git a/api/server/router/local/image.go b/api/server/router/local/image.go index c1d1e830db..f8fc04b2dd 100644 --- a/api/server/router/local/image.go +++ b/api/server/router/local/image.go @@ -7,9 +7,11 @@ import ( "fmt" "io" "net/http" + "net/url" "strings" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/builder/dockerfile" derr "github.com/docker/docker/errors" @@ -137,6 +139,12 @@ func (s *router) postImagesCreate(ctx context.Context, w http.ResponseWriter, r err = s.daemon.PullImage(ref, metaHeaders, authConfig, output) } } + // Check the error from pulling an image to make sure the request + // was authorized. Modify the status if the request was + // unauthorized to respond with 401 rather than 500. + if err != nil && isAuthorizedError(err) { + err = errcode.ErrorCodeUnauthorized.WithMessage(fmt.Sprintf("Authentication is required: %s", err)) + } } else { //import var newRef reference.Named if repo != "" { @@ -373,3 +381,16 @@ func (s *router) getImagesSearch(ctx context.Context, w http.ResponseWriter, r * } return httputils.WriteJSON(w, http.StatusOK, query.Results) } + +func isAuthorizedError(err error) bool { + if urlError, ok := err.(*url.Error); ok { + err = urlError.Err + } + + if dError, ok := err.(errcode.Error); ok { + if dError.ErrorCode() == errcode.ErrorCodeUnauthorized { + return true + } + } + return false +} diff --git a/api/server/router/system/system.go b/api/server/router/system/system.go index 0f46eda20a..1bcea2b4f2 100644 --- a/api/server/router/system/system.go +++ b/api/server/router/system/system.go @@ -20,7 +20,7 @@ func NewRouter(b Backend) router.Router { } r.routes = []router.Route{ - local.NewOptionsRoute("/", optionsHandler), + local.NewOptionsRoute("/{anyroute:.*}", optionsHandler), local.NewGetRoute("/_ping", pingHandler), local.NewGetRoute("/events", r.getEvents), local.NewGetRoute("/info", r.getInfo), diff --git a/container/container_unix.go b/container/container_unix.go index 282d889304..6fa72151ea 100644 --- a/container/container_unix.go +++ b/container/container_unix.go @@ -21,7 +21,7 @@ import ( runconfigopts "github.com/docker/docker/runconfig/opts" "github.com/docker/docker/utils" "github.com/docker/docker/volume" - "github.com/docker/engine-api/types/container" + containertypes "github.com/docker/engine-api/types/container" "github.com/docker/engine-api/types/network" "github.com/docker/go-connections/nat" "github.com/docker/libnetwork" @@ -129,18 +129,26 @@ func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint) error { return derr.ErrorCodeEmptyNetwork } + if len(networkSettings.Ports) == 0 { + pm, err := getEndpointPortMapInfo(ep) + if err != nil { + return err + } + networkSettings.Ports = pm + } + return nil +} + +func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) { + pm := nat.PortMap{} driverInfo, err := ep.DriverInfo() if err != nil { - return err + return pm, err } if driverInfo == nil { // It is not an error for epInfo to be nil - return nil - } - - if networkSettings.Ports == nil { - networkSettings.Ports = nat.PortMap{} + return pm, nil } if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { @@ -148,30 +156,45 @@ func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint) error { for _, tp := range exposedPorts { natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) if err != nil { - return derr.ErrorCodeParsingPort.WithArgs(tp.Port, err) + return pm, derr.ErrorCodeParsingPort.WithArgs(tp.Port, err) } - networkSettings.Ports[natPort] = nil + pm[natPort] = nil } } } mapData, ok := driverInfo[netlabel.PortMap] if !ok { - return nil + return pm, nil } if portMapping, ok := mapData.([]types.PortBinding); ok { for _, pp := range portMapping { natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) if err != nil { - return err + return pm, err } natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))} - networkSettings.Ports[natPort] = append(networkSettings.Ports[natPort], natBndg) + pm[natPort] = append(pm[natPort], natBndg) } } - return nil + return pm, nil +} + +func getSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap { + pm := nat.PortMap{} + if sb == nil { + return pm + } + + for _, ep := range sb.Endpoints() { + pm, _ = getEndpointPortMapInfo(ep) + if len(pm) > 0 { + break + } + } + return pm } // BuildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint. @@ -265,7 +288,7 @@ func (container *Container) BuildJoinOptions(n libnetwork.Network) ([]libnetwork } // BuildCreateEndpointOptions builds endpoint options from a given network. -func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) { +func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epConfig *network.EndpointSettings, sb libnetwork.Sandbox) ([]libnetwork.EndpointOption, error) { var ( portSpecs = make(nat.PortSet) bindings = make(nat.PortMap) @@ -278,7 +301,7 @@ func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network) ([] createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) } - if epConfig, ok := container.NetworkSettings.Networks[n.Name()]; ok { + if epConfig != nil { ipam := epConfig.IPAMConfig if ipam != nil && (ipam.IPv4Address != "" || ipam.IPv6Address != "") { createOptions = append(createOptions, @@ -290,14 +313,33 @@ func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network) ([] } } - if !container.HostConfig.NetworkMode.IsUserDefined() { + if !containertypes.NetworkMode(n.Name()).IsUserDefined() { createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution()) } - // Other configs are applicable only for the endpoint in the network + // configs that are applicable only for the endpoint in the network // to which container was connected to on docker run. - if n.Name() != container.HostConfig.NetworkMode.NetworkName() && - !(n.Name() == "bridge" && container.HostConfig.NetworkMode.IsDefault()) { + // Ideally all these network-specific endpoint configurations must be moved under + // container.NetworkSettings.Networks[n.Name()] + if n.Name() == container.HostConfig.NetworkMode.NetworkName() || + (n.Name() == "bridge" && container.HostConfig.NetworkMode.IsDefault()) { + if container.Config.MacAddress != "" { + mac, err := net.ParseMAC(container.Config.MacAddress) + if err != nil { + return nil, err + } + + genericOption := options.Generic{ + netlabel.MacAddress: mac, + } + + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) + } + } + + // Port-mapping rules belong to the container & applicable only to non-internal networks + portmaps := getSandboxPortMapInfo(sb) + if n.Info().Internal() || len(portmaps) > 0 { return createOptions, nil } @@ -357,19 +399,6 @@ func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network) ([] libnetwork.CreateOptionPortMapping(pbList), libnetwork.CreateOptionExposedPorts(exposeList)) - if container.Config.MacAddress != "" { - mac, err := net.ParseMAC(container.Config.MacAddress) - if err != nil { - return nil, err - } - - genericOption := options.Generic{ - netlabel.MacAddress: mac, - } - - createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) - } - return createOptions, nil } @@ -577,7 +606,7 @@ func (container *Container) IpcMounts() []execdriver.Mount { return mounts } -func updateCommand(c *execdriver.Command, resources container.Resources) { +func updateCommand(c *execdriver.Command, resources containertypes.Resources) { c.Resources.BlkioWeight = resources.BlkioWeight c.Resources.CPUShares = resources.CPUShares c.Resources.CPUPeriod = resources.CPUPeriod @@ -591,7 +620,7 @@ func updateCommand(c *execdriver.Command, resources container.Resources) { } // UpdateContainer updates resources of a container. -func (container *Container) UpdateContainer(hostConfig *container.HostConfig) error { +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { container.Lock() resources := hostConfig.Resources diff --git a/daemon/history.go b/container/history.go similarity index 54% rename from daemon/history.go rename to container/history.go index 451f7b17e6..afce1d4a79 100644 --- a/daemon/history.go +++ b/container/history.go @@ -1,34 +1,35 @@ -package daemon +package container -import ( - "sort" - - "github.com/docker/docker/container" -) +import "sort" // History is a convenience type for storing a list of containers, -// ordered by creation date. -type History []*container.Container +// sorted by creation date in descendant order. +type History []*Container +// Len returns the number of containers in the history. func (history *History) Len() int { return len(*history) } +// Less compares two containers and returns true if the second one +// was created before the first one. func (history *History) Less(i, j int) bool { containers := *history return containers[j].Created.Before(containers[i].Created) } +// Swap switches containers i and j positions in the history. func (history *History) Swap(i, j int) { containers := *history containers[i], containers[j] = containers[j], containers[i] } // Add the given container to history. -func (history *History) Add(container *container.Container) { +func (history *History) Add(container *Container) { *history = append(*history, container) } +// sort orders the history by creation date in descendant order. func (history *History) sort() { sort.Sort(history) } diff --git a/container/memory_store.go b/container/memory_store.go new file mode 100644 index 0000000000..153242fdb4 --- /dev/null +++ b/container/memory_store.go @@ -0,0 +1,91 @@ +package container + +import "sync" + +// memoryStore implements a Store in memory. +type memoryStore struct { + s map[string]*Container + sync.Mutex +} + +// NewMemoryStore initializes a new memory store. +func NewMemoryStore() Store { + return &memoryStore{ + s: make(map[string]*Container), + } +} + +// Add appends a new container to the memory store. +// It overrides the id if it existed before. +func (c *memoryStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +// Get returns a container from the store by id. +func (c *memoryStore) Get(id string) *Container { + c.Lock() + res := c.s[id] + c.Unlock() + return res +} + +// Delete removes a container from the store by id. +func (c *memoryStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +// List returns a sorted list of containers from the store. +// The containers are ordered by creation date. +func (c *memoryStore) List() []*Container { + containers := new(History) + c.Lock() + for _, cont := range c.s { + containers.Add(cont) + } + c.Unlock() + containers.sort() + return *containers +} + +// Size returns the number of containers in the store. +func (c *memoryStore) Size() int { + c.Lock() + defer c.Unlock() + return len(c.s) +} + +// First returns the first container found in the store by a given filter. +func (c *memoryStore) First(filter StoreFilter) *Container { + c.Lock() + defer c.Unlock() + for _, cont := range c.s { + if filter(cont) { + return cont + } + } + return nil +} + +// ApplyAll calls the reducer function with every container in the store. +// This operation is asyncronous in the memory store. +func (c *memoryStore) ApplyAll(apply StoreReducer) { + c.Lock() + defer c.Unlock() + + wg := new(sync.WaitGroup) + for _, cont := range c.s { + wg.Add(1) + go func(container *Container) { + apply(container) + wg.Done() + }(cont) + } + + wg.Wait() +} + +var _ Store = &memoryStore{} diff --git a/container/memory_store_test.go b/container/memory_store_test.go new file mode 100644 index 0000000000..f81738fae1 --- /dev/null +++ b/container/memory_store_test.go @@ -0,0 +1,106 @@ +package container + +import ( + "testing" + "time" +) + +func TestNewMemoryStore(t *testing.T) { + s := NewMemoryStore() + m, ok := s.(*memoryStore) + if !ok { + t.Fatalf("store is not a memory store %v", s) + } + if m.s == nil { + t.Fatal("expected store map to not be nil") + } +} + +func TestAddContainers(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + if s.Size() != 1 { + t.Fatalf("expected store size 1, got %v", s.Size()) + } +} + +func TestGetContainer(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + c := s.Get("id") + if c == nil { + t.Fatal("expected container to not be nil") + } +} + +func TestDeleteContainer(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + s.Delete("id") + if c := s.Get("id"); c != nil { + t.Fatalf("expected container to be nil after removal, got %v", c) + } + + if s.Size() != 0 { + t.Fatalf("expected store size to be 0, got %v", s.Size()) + } +} + +func TestListContainers(t *testing.T) { + s := NewMemoryStore() + + cont := NewBaseContainer("id", "root") + cont.Created = time.Now() + cont2 := NewBaseContainer("id2", "root") + cont2.Created = time.Now().Add(24 * time.Hour) + + s.Add("id", cont) + s.Add("id2", cont2) + + list := s.List() + if len(list) != 2 { + t.Fatalf("expected list size 2, got %v", len(list)) + } + if list[0].ID != "id2" { + t.Fatalf("expected older container to be first, got %v", list[0].ID) + } +} + +func TestFirstContainer(t *testing.T) { + s := NewMemoryStore() + + s.Add("id", NewBaseContainer("id", "root")) + s.Add("id2", NewBaseContainer("id2", "root")) + + first := s.First(func(cont *Container) bool { + return cont.ID == "id2" + }) + + if first == nil { + t.Fatal("expected container to not be nil") + } + if first.ID != "id2" { + t.Fatalf("expected id2, got %v", first) + } +} + +func TestApplyAllContainer(t *testing.T) { + s := NewMemoryStore() + + s.Add("id", NewBaseContainer("id", "root")) + s.Add("id2", NewBaseContainer("id2", "root")) + + s.ApplyAll(func(cont *Container) { + if cont.ID == "id2" { + cont.ID = "newID" + } + }) + + cont := s.Get("id2") + if cont == nil { + t.Fatal("expected container to not be nil") + } + if cont.ID != "newID" { + t.Fatalf("expected newID, got %v", cont) + } +} diff --git a/container/monitor.go b/container/monitor.go index 2f3368f405..09b447d947 100644 --- a/container/monitor.go +++ b/container/monitor.go @@ -369,6 +369,9 @@ func (m *containerMonitor) resetContainer(lock bool) { select { case <-time.After(loggerCloseTimeout): logrus.Warnf("Logger didn't exit in time: logs may be truncated") + container.LogCopier.Close() + // always waits for the LogCopier to finished before closing + <-exit case <-exit: } } diff --git a/container/state.go b/container/state.go index 138d79874f..4a923aa968 100644 --- a/container/state.go +++ b/container/state.go @@ -247,6 +247,14 @@ func (s *State) IsPaused() bool { return res } +// IsRestarting returns whether the container is restarting or not. +func (s *State) IsRestarting() bool { + s.Lock() + res := s.Restarting + s.Unlock() + return res +} + // SetRemovalInProgress sets the container state as being removed. func (s *State) SetRemovalInProgress() error { s.Lock() diff --git a/container/store.go b/container/store.go new file mode 100644 index 0000000000..042fb1a349 --- /dev/null +++ b/container/store.go @@ -0,0 +1,28 @@ +package container + +// StoreFilter defines a function to filter +// container in the store. +type StoreFilter func(*Container) bool + +// StoreReducer defines a function to +// manipulate containers in the store +type StoreReducer func(*Container) + +// Store defines an interface that +// any container store must implement. +type Store interface { + // Add appends a new container to the store. + Add(string, *Container) + // Get returns a container from the store by the identifier it was stored with. + Get(string) *Container + // Delete removes a container from the store by the identifier it was stored with. + Delete(string) + // List returns a list of containers from the store. + List() []*Container + // Size returns the number of containers in the store. + Size() int + // First returns the first container found in the store by a given filter. + First(StoreFilter) *Container + // ApplyAll calls the reducer function with every container in the store. + ApplyAll(StoreReducer) +} diff --git a/contrib/builder/deb/debian-jessie/Dockerfile b/contrib/builder/deb/debian-jessie/Dockerfile index 2c142cc06d..759ad18c53 100644 --- a/contrib/builder/deb/debian-jessie/Dockerfile +++ b/contrib/builder/deb/debian-jessie/Dockerfile @@ -4,7 +4,7 @@ FROM debian:jessie -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local diff --git a/contrib/builder/deb/debian-stretch/Dockerfile b/contrib/builder/deb/debian-stretch/Dockerfile index 5ea789aad9..083af65aa9 100644 --- a/contrib/builder/deb/debian-stretch/Dockerfile +++ b/contrib/builder/deb/debian-stretch/Dockerfile @@ -4,7 +4,7 @@ FROM debian:stretch -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local diff --git a/contrib/builder/deb/debian-wheezy/Dockerfile b/contrib/builder/deb/debian-wheezy/Dockerfile index df2a38c99b..077a74b84b 100644 --- a/contrib/builder/deb/debian-wheezy/Dockerfile +++ b/contrib/builder/deb/debian-wheezy/Dockerfile @@ -4,7 +4,8 @@ FROM debian:wheezy-backports -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools/wheezy-backports build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y -t wheezy-backports btrfs-tools --no-install-recommends && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y apparmor bash-completion build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config --no-install-recommends && rm -rf /var/lib/apt/lists/* ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local diff --git a/contrib/builder/deb/generate.sh b/contrib/builder/deb/generate.sh index 2f18104771..903b8833c4 100755 --- a/contrib/builder/deb/generate.sh +++ b/contrib/builder/deb/generate.sh @@ -57,12 +57,13 @@ for version in "${versions[@]}"; do libapparmor-dev # for "sys/apparmor.h" libdevmapper-dev # for "libdevmapper.h" libltdl-dev # for pkcs11 "ltdl.h" - libsqlite3-dev # for "sqlite3.h" libseccomp-dev # for "seccomp.h" & "libseccomp.so" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically ) # packaging for "sd-journal.h" and libraries varies case "$suite" in - precise) ;; + precise|wheezy) ;; sid|stretch|wily) packages+=( libsystemd-dev );; *) packages+=( libsystemd-journal-dev );; esac @@ -96,9 +97,13 @@ for version in "${versions[@]}"; do fi if [ "$suite" = 'wheezy' ]; then - # pull btrfs-toold from backports - backports="/$suite-backports" - packages=( "${packages[@]/btrfs-tools/btrfs-tools$backports}" ) + # pull a couple packages from backports explicitly + # (build failures otherwise) + backportsPackages=( btrfs-tools libsystemd-journal-dev ) + for pkg in "${backportsPackages[@]}"; do + packages=( "${packages[@]/$pkg}" ) + done + echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" fi echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" diff --git a/contrib/builder/deb/ubuntu-precise/Dockerfile b/contrib/builder/deb/ubuntu-precise/Dockerfile index 9bc3d31b43..d255535544 100644 --- a/contrib/builder/deb/ubuntu-precise/Dockerfile +++ b/contrib/builder/deb/ubuntu-precise/Dockerfile @@ -4,7 +4,7 @@ FROM ubuntu:precise -RUN apt-get update && apt-get install -y apparmor bash-completion build-essential curl ca-certificates debhelper dh-apparmor git libapparmor-dev libltdl-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y apparmor bash-completion build-essential curl ca-certificates debhelper dh-apparmor git libapparmor-dev libltdl-dev libsqlite3-dev pkg-config --no-install-recommends && rm -rf /var/lib/apt/lists/* ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local diff --git a/contrib/builder/deb/ubuntu-trusty/Dockerfile b/contrib/builder/deb/ubuntu-trusty/Dockerfile index 426f6e3678..d422f5d3e6 100644 --- a/contrib/builder/deb/ubuntu-trusty/Dockerfile +++ b/contrib/builder/deb/ubuntu-trusty/Dockerfile @@ -4,7 +4,7 @@ FROM ubuntu:trusty -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local diff --git a/contrib/builder/deb/ubuntu-wily/Dockerfile b/contrib/builder/deb/ubuntu-wily/Dockerfile index f6a8a648aa..7c5f124ad4 100644 --- a/contrib/builder/deb/ubuntu-wily/Dockerfile +++ b/contrib/builder/deb/ubuntu-wily/Dockerfile @@ -4,7 +4,7 @@ FROM ubuntu:wily -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local diff --git a/contrib/builder/rpm/centos-7/Dockerfile b/contrib/builder/rpm/centos-7/Dockerfile index 8725d07e4f..215f2836d6 100644 --- a/contrib/builder/rpm/centos-7/Dockerfile +++ b/contrib/builder/rpm/centos-7/Dockerfile @@ -6,7 +6,7 @@ FROM centos:7 RUN yum groupinstall -y "Development Tools" RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs -RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel selinux-policy selinux-policy-devel sqlite-devel tar +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local @@ -15,3 +15,4 @@ ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS selinux + diff --git a/contrib/builder/rpm/fedora-22/Dockerfile b/contrib/builder/rpm/fedora-22/Dockerfile index 912be90a46..c5890789cc 100644 --- a/contrib/builder/rpm/fedora-22/Dockerfile +++ b/contrib/builder/rpm/fedora-22/Dockerfile @@ -5,7 +5,7 @@ FROM fedora:22 RUN dnf install -y @development-tools fedora-packager -RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel selinux-policy selinux-policy-devel sqlite-devel tar +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar ENV SECCOMP_VERSION v2.2.3 RUN buildDeps=' \ @@ -35,3 +35,4 @@ ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS seccomp selinux + diff --git a/contrib/builder/rpm/fedora-23/Dockerfile b/contrib/builder/rpm/fedora-23/Dockerfile index bfa70617de..c36194d6bf 100644 --- a/contrib/builder/rpm/fedora-23/Dockerfile +++ b/contrib/builder/rpm/fedora-23/Dockerfile @@ -5,7 +5,7 @@ FROM fedora:23 RUN dnf install -y @development-tools fedora-packager -RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel selinux-policy selinux-policy-devel sqlite-devel tar +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar ENV SECCOMP_VERSION v2.2.3 RUN buildDeps=' \ @@ -35,3 +35,4 @@ ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS seccomp selinux + diff --git a/contrib/builder/rpm/generate.sh b/contrib/builder/rpm/generate.sh index b6d7287cef..3759594e24 100755 --- a/contrib/builder/rpm/generate.sh +++ b/contrib/builder/rpm/generate.sh @@ -51,6 +51,7 @@ for version in "${versions[@]}"; do ;; oraclelinux:*) # get "Development Tools" packages and dependencies + # we also need yum-utils for yum-config-manager to pull the latest repo file echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" ;; opensuse:*) @@ -70,9 +71,11 @@ for version in "${versions[@]}"; do libseccomp-devel # for "seccomp.h" & "libseccomp.so" libselinux-devel # for "libselinux.so" libtool-ltdl-devel # for pkcs11 "ltdl.h" + pkgconfig # for the pkg-config command selinux-policy selinux-policy-devel sqlite-devel # for "sqlite3.h" + systemd-devel # for "sd-journal.h" and libraries tar # older versions of dev-tools do not have tar ) @@ -83,6 +86,13 @@ for version in "${versions[@]}"; do ;; esac + case "$from" in + oraclelinux:6) + # doesn't use systemd, doesn't have a devel package for it + packages=( "${packages[@]/systemd-devel}" ) + ;; + esac + # opensuse & oraclelinx:6 do not have the right libseccomp libs # centos:7 and oraclelinux:7 have a libseccomp < 2.2.1 :( case "$from" in @@ -97,6 +107,11 @@ for version in "${versions[@]}"; do case "$from" in opensuse:*) packages=( "${packages[@]/btrfs-progs-devel/libbtrfs-devel}" ) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + if [[ "$from" == "opensuse:13."* ]]; then + packages+=( systemd-rpm-macros ) + fi + # use zypper echo "RUN zypper --non-interactive install ${packages[*]}" >> "$version/Dockerfile" ;; @@ -140,6 +155,18 @@ for version in "${versions[@]}"; do *) ;; esac + case "$from" in + oraclelinux:6) + # We need a known version of the kernel-uek-devel headers to set CGO_CPPFLAGS, so grab the UEKR4 GA version + # This requires using yum-config-manager from yum-utils to enable the UEKR4 yum repo + echo "RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4" >> "$version/Dockerfile" + echo "RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + *) ;; + esac + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile" echo 'RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" @@ -154,4 +181,22 @@ for version in "${versions[@]}"; do buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + case "$from" in + oraclelinux:6) + # We need to set the CGO_CPPFLAGS environment to use the updated UEKR4 headers with all the userns stuff. + # The ordering is very important and should not be changed. + echo 'ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + *) ;; + esac + + done diff --git a/contrib/builder/rpm/opensuse-13.2/Dockerfile b/contrib/builder/rpm/opensuse-13.2/Dockerfile index 2f5f7cb1a9..67c0b6bc08 100644 --- a/contrib/builder/rpm/opensuse-13.2/Dockerfile +++ b/contrib/builder/rpm/opensuse-13.2/Dockerfile @@ -5,7 +5,7 @@ FROM opensuse:13.2 RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build -RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel selinux-policy selinux-policy-devel sqlite-devel tar +RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar systemd-rpm-macros ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local @@ -14,3 +14,4 @@ ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS selinux + diff --git a/contrib/builder/rpm/oraclelinux-6/Dockerfile b/contrib/builder/rpm/oraclelinux-6/Dockerfile new file mode 100644 index 0000000000..13468e74fb --- /dev/null +++ b/contrib/builder/rpm/oraclelinux-6/Dockerfile @@ -0,0 +1,27 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM oraclelinux:6 + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel tar + +RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4 +RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek + +ENV GO_VERSION 1.5.3 +RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS selinux + +ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include + diff --git a/contrib/builder/rpm/oraclelinux-7/Dockerfile b/contrib/builder/rpm/oraclelinux-7/Dockerfile index 153495d0f5..e27f589679 100644 --- a/contrib/builder/rpm/oraclelinux-7/Dockerfile +++ b/contrib/builder/rpm/oraclelinux-7/Dockerfile @@ -5,7 +5,7 @@ FROM oraclelinux:7 RUN yum groupinstall -y "Development Tools" -RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel selinux-policy selinux-policy-devel sqlite-devel tar +RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar ENV GO_VERSION 1.5.3 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local @@ -14,3 +14,4 @@ ENV PATH $PATH:/usr/local/go/bin ENV AUTO_GOPATH 1 ENV DOCKER_BUILDTAGS selinux + diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker index 4b12ce6894..e731de813f 100644 --- a/contrib/completion/bash/docker +++ b/contrib/completion/bash/docker @@ -220,6 +220,32 @@ __docker_pos_first_nonflag() { echo $counter } +# If we are currently completing the value of a map option (key=value) +# which matches the extglob given as an argument, returns key. +# This function is needed for key-specific completions. +# TODO use this in all "${words[$cword-2]}$prev=" occurrences +__docker_map_key_of_current_option() { + local glob="$1" + + local key glob_pos + if [ "$cur" = "=" ] ; then # key= case + key="$prev" + glob_pos=$((cword - 2)) + elif [[ $cur == *=* ]] ; then # key=value case (OSX) + key=${cur%=*} + glob_pos=$((cword - 1)) + elif [ "$prev" = "=" ] ; then + key=${words[$cword - 2]} # key=value case + glob_pos=$((cword - 3)) + else + return + fi + + [ "${words[$glob_pos]}" = "=" ] && ((glob_pos--)) # --option=key=value syntax + + [[ ${words[$glob_pos]} == @($glob) ]] && echo "$key" +} + # Returns the value of the first option matching option_glob. # Valid values for option_glob are option names like '--log-level' and # globs like '--log-level|-l' @@ -383,7 +409,7 @@ __docker_complete_log_options() { local gelf_options="env gelf-address labels tag" local journald_options="env labels" local json_file_options="env labels max-file max-size" - local syslog_options="syslog-address syslog-facility tag" + local syslog_options="syslog-address syslog-tls-ca-cert syslog-tls-cert syslog-tls-key syslog-tls-skip-verify syslog-facility tag" local splunk_options="env labels splunk-caname splunk-capath splunk-index splunk-insecureskipverify splunk-source splunk-sourcetype splunk-token splunk-url tag" local all_options="$fluentd_options $gelf_options $journald_options $json_file_options $syslog_options $splunk_options" @@ -431,8 +457,9 @@ __docker_complete_log_driver_options() { return ;; *syslog-address=*) - COMPREPLY=( $( compgen -W "tcp udp unix" -S "://" -- "${cur#=}" ) ) + COMPREPLY=( $( compgen -W "tcp:// tcp+tls:// udp:// unix://" -- "${cur#=}" ) ) __docker_nospace + __ltrim_colon_completions "${cur}" return ;; *syslog-facility=*) @@ -460,15 +487,23 @@ __docker_complete_log_driver_options() { " -- "${cur#=}" ) ) return ;; + *syslog-tls-@(ca-cert|cert|key)=*) + _filedir + return + ;; + *syslog-tls-skip-verify=*) + COMPREPLY=( $( compgen -W "true" -- "${cur#=}" ) ) + return + ;; *splunk-url=*) COMPREPLY=( $( compgen -W "http:// https://" -- "${cur#=}" ) ) - compopt -o nospace + __docker_nospace __ltrim_colon_completions "${cur}" return ;; *splunk-insecureskipverify=*) COMPREPLY=( $( compgen -W "true false" -- "${cur#=}" ) ) - compopt -o nospace + __docker_nospace return ;; esac @@ -644,7 +679,7 @@ _docker_commit() { _docker_cp() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--follow-link -L --help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) @@ -735,6 +770,7 @@ _docker_daemon() { --registry-mirror --storage-driver -s --storage-opt + --userns-remap " case "$prev" in @@ -748,7 +784,7 @@ _docker_daemon() { return ;; --cluster-store-opt) - COMPREPLY=( $( compgen -W "kv.cacertfile kv.certfile kv.keyfile" -S = -- "$cur" ) ) + COMPREPLY=( $( compgen -W "discovery.heartbeat discovery.ttl kv.cacertfile kv.certfile kv.keyfile kv.path" -S = -- "$cur" ) ) __docker_nospace return ;; @@ -810,6 +846,15 @@ _docker_daemon() { __docker_complete_log_options return ;; + --userns-remap) + if [[ $cur == *:* ]] ; then + COMPREPLY=( $(compgen -g -- "${cur#*:}") ) + else + COMPREPLY=( $(compgen -u -S : -- "$cur") ) + __docker_nospace + fi + return + ;; $(__docker_to_extglob "$options_with_args") ) return ;; @@ -860,37 +905,30 @@ _docker_diff() { } _docker_events() { - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -S = -W "container event image" -- "$cur" ) ) - __docker_nospace - return - ;; - --since|--until) - return - ;; - esac - - case "${words[$cword-2]}$prev=" in - *container=*) - cur="${cur#=}" + local filter=$(__docker_map_key_of_current_option '-f|--filter') + case "$filter" in + container) + cur="${cur##*=}" __docker_complete_containers_all return ;; - *event=*) + event) COMPREPLY=( $( compgen -W " attach commit + connect copy create delete destroy die + disconnect exec_create exec_start export import kill + mount oom pause pull @@ -902,16 +940,43 @@ _docker_events() { stop tag top + unmount unpause untag - " -- "${cur#=}" ) ) + update + " -- "${cur##*=}" ) ) return ;; - *image=*) - cur="${cur#=}" + image) + cur="${cur##*=}" __docker_complete_images return ;; + network) + cur="${cur##*=}" + __docker_complete_networks + return + ;; + type) + COMPREPLY=( $( compgen -W "container image network volume" -- "${cur##*=}" ) ) + return + ;; + volume) + cur="${cur##*=}" + __docker_complete_volumes + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "container event image label network type volume" -- "$cur" ) ) + __docker_nospace + return + ;; + --since|--until) + return + ;; esac case "$cur" in @@ -978,10 +1043,8 @@ _docker_history() { _docker_images() { case "$prev" in --filter|-f) - COMPREPLY=( $( compgen -W "dangling=true label=" -- "$cur" ) ) - if [ "$COMPREPLY" = "label=" ]; then - __docker_nospace - fi + COMPREPLY=( $( compgen -S = -W "dangling label" -- "$cur" ) ) + __docker_nospace return ;; --format) @@ -1153,12 +1216,41 @@ _docker_logs() { } _docker_network_connect() { + local options_with_args=" + --alias + --ip + --ip6 + --link + " + + local boolean_options=" + --help + " + + case "$prev" in + --link) + case "$cur" in + *:*) + ;; + *) + __docker_complete_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + ;; + esac + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + case "$cur" in -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) ;; *) - local counter=$(__docker_pos_first_nonflag) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) if [ $cword -eq $counter ]; then __docker_complete_networks elif [ $cword -eq $(($counter + 1)) ]; then @@ -1170,7 +1262,7 @@ _docker_network_connect() { _docker_network_create() { case "$prev" in - --aux-address|--gateway|--ip-range|--opt|-o|--subnet) + --aux-address|--gateway|--ip-range|--ipam-opt|--opt|-o|--subnet) return ;; --ipam-driver) @@ -1189,7 +1281,7 @@ _docker_network_create() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "--aux-address --driver -d --gateway --help --ip-range --ipam-driver --opt -o --subnet" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--aux-address --driver -d --gateway --help --internal --ip-range --ipam-driver --ipam-opt --opt -o --subnet" -- "$cur" ) ) ;; esac } @@ -1350,7 +1442,7 @@ _docker_ps() { return ;; *status=*) - COMPREPLY=( $( compgen -W "exited paused restarting running" -- "${cur#=}" ) ) + COMPREPLY=( $( compgen -W "created dead exited paused restarting running" -- "${cur#=}" ) ) return ;; esac @@ -1365,7 +1457,7 @@ _docker_ps() { _docker_pull() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "--all-tags -a --help" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--all-tags -a --disable-content-trust=false --help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) @@ -1387,7 +1479,7 @@ _docker_pull() { _docker_push() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--disable-content-trust=false --help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) @@ -1488,6 +1580,8 @@ _docker_run() { --expose --group-add --hostname -h + --ip + --ip6 --ipc --isolation --kernel-memory @@ -1503,6 +1597,7 @@ _docker_run() { --memory-reservation --name --net + --net-alias --oom-score-adj --pid --publish -p @@ -1903,7 +1998,15 @@ _docker_volume_inspect() { _docker_volume_ls() { case "$prev" in --filter|-f) - COMPREPLY=( $( compgen -W "dangling=true" -- "$cur" ) ) + COMPREPLY=( $( compgen -S = -W "dangling" -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "${words[$cword-2]}$prev=" in + *dangling=*) + COMPREPLY=( $( compgen -W "true false" -- "${cur#=}" ) ) return ;; esac diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker index df344a069f..25be25e02d 100644 --- a/contrib/completion/zsh/_docker +++ b/contrib/completion/zsh/_docker @@ -204,7 +204,7 @@ __docker_get_log_options() { gelf_options=("env" "gelf-address" "labels" "tag") journald_options=("env" "labels") json_file_options=("env" "labels" "max-file" "max-size") - syslog_options=("syslog-address" "syslog-facility" "tag") + syslog_options=("syslog-address" "syslog-tls-ca-cert" "syslog-tls-cert" "syslog-tls-key" "syslog-tls-skip-verify" "syslog-facility" "tag") splunk_options=("env" "labels" "splunk-caname" "splunk-capath" "splunk-index" "splunk-insecureskipverify" "splunk-source" "splunk-sourcetype" "splunk-token" "splunk-url" "tag") [[ $log_driver = (awslogs|all) ]] && _describe -t awslogs-options "awslogs options" awslogs_options "$@" && ret=0 @@ -231,6 +231,17 @@ __docker_log_options() { return ret } +__docker_complete_detach_keys() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + compset -P "*," + keys=(${:-{a-z}}) + ctrl_keys=(${:-ctrl-{{a-z},{@,'[','\\','^',']',_}}}) + _describe -t detach_keys "[a-z]" keys -qS "," && ret=0 + _describe -t detach_keys-ctrl "'ctrl-' + 'a-z @ [ \\\\ ] ^ _'" ctrl_keys -qS "," && ret=0 +} + __docker_networks() { [[ $PREFIX = -* ]] && return 1 integer ret=1 @@ -291,24 +302,46 @@ __docker_network_subcommand() { opts_help=("(: -)--help[Print usage]") case "$words[1]" in - (connect|disconnect) + (connect) _arguments $(__docker_arguments) \ $opts_help \ + "($help)*--alias=[Add network-scoped alias for the container]:alias: " \ + "($help)--ip=[Container IPv4 address]:IPv4: " \ + "($help)--ip6=[Container IPv6 address]:IPv6: " \ + "($help)*--link=[Add a link to another container]:link:->link" \ "($help -)1:network:__docker_networks" \ - "($help -)2:containers:__docker_runningcontainers" && ret=0 + "($help -)2:containers:__docker_containers" && ret=0 + + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_runningcontainers -qS ":" && ret=0 + fi + ;; + esac ;; (create) _arguments $(__docker_arguments) -A '-*' \ $opts_help \ - "($help -d --driver)"{-d=,--driver=}"[Driver to manage the Network]:driver:(null host bridge overlay)" \ - "($help)--ipam-driver=[IP Address Management Driver]:driver:(default)" \ - "($help)*--subnet=[Subnet in CIDR format that represents a network segment]:IP/mask: " \ - "($help)*--ip-range=[Allocate container ip from a sub-range]:IP/mask: " \ - "($help)*--gateway=[ipv4 or ipv6 Gateway for the master subnet]:IP: " \ "($help)*--aux-address[Auxiliary ipv4 or ipv6 addresses used by network driver]:key=IP: " \ - "($help)*"{-o=,--opt=}"[Set driver specific options]:key=value: " \ + "($help -d --driver)"{-d=,--driver=}"[Driver to manage the Network]:driver:(null host bridge overlay)" \ + "($help)*--gateway=[ipv4 or ipv6 Gateway for the master subnet]:IP: " \ + "($help)--internal[Restricts external access to the network]" \ + "($help)*--ip-range=[Allocate container ip from a sub-range]:IP/mask: " \ + "($help)--ipam-driver=[IP Address Management Driver]:driver:(default)" \ + "($help)*--ipam-opt=[Set custom IPAM plugin options]:opt=value: " \ + "($help)*"{-o=,--opt=}"[Set driver specific options]:opt=value: " \ + "($help)*--subnet=[Subnet in CIDR format that represents a network segment]:IP/mask: " \ "($help -)1:Network Name: " && ret=0 ;; + (disconnect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:network:__docker_networks" \ + "($help -)2:containers:__docker_containers" && ret=0 + ;; (inspect) _arguments $(__docker_arguments) \ $opts_help \ @@ -485,6 +518,8 @@ __docker_subcommand() { "($help)*--group-add=[Add additional groups to run as]:group:_groups" "($help -h --hostname)"{-h=,--hostname=}"[Container host name]:hostname:_hosts" "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" + "($help)--ip=[Container IPv4 address]:IPv4: " + "($help)--ip6=[Container IPv6 address]:IPv6: " "($help)--ipc=[IPC namespace to use]:IPC namespace: " "($help)*--link=[Add link to another container]:link:->link" "($help)*"{-l=,--label=}"[Set meta data on a container]:label: " @@ -493,6 +528,7 @@ __docker_subcommand() { "($help)--mac-address=[Container MAC address]:MAC address: " "($help)--name=[Container name]:name: " "($help)--net=[Connect a container to a network]:network mode:(bridge none container host)" + "($help)*--net-alias=[Add network-scoped alias for the container]:alias: " "($help)--oom-kill-disable[Disable OOM Killer]" "($help)--oom-score-adj[Tune the host's OOM preferences for containers (accepts -1000 to 1000)]" "($help -P --publish-all)"{-P,--publish-all}"[Publish all exposed ports]" @@ -515,11 +551,15 @@ __docker_subcommand() { "($help)--kernel-memory=[Kernel memory limit in bytes.]:Memory limit: " "($help)--memory-reservation=[Memory soft limit]:Memory limit: " ) + opts_attach_exec_run_start=( + "($help)--detach-keys=[Specify the escape key sequence used to detach a container]:sequence:__docker_complete_detach_keys" + ) case "$words[1]" in (attach) _arguments $(__docker_arguments) \ $opts_help \ + $opts_attach_exec_run_start \ "($help)--no-stdin[Do not attach stdin]" \ "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ "($help -):containers:__docker_runningcontainers" && ret=0 @@ -552,6 +592,7 @@ __docker_subcommand() { (cp) _arguments $(__docker_arguments) \ $opts_help \ + "($help -L --follow-link)"{-L,--follow-link}"[Always follow symbol link in SRC_PATH]" \ "($help -)1:container:->container" \ "($help -)2:hostpath:_files" && ret=0 case $state in @@ -650,7 +691,7 @@ __docker_subcommand() { if compset -P '*='; then _files && ret=0 else - opts=('kv.cacertfile' 'kv.certfile' 'kv.keyfile') + opts=('discovery.heartbeat' 'discovery.ttl' 'kv.cacertfile' 'kv.certfile' 'kv.keyfile' 'kv.path') _describe -t cluster-store-opts "Cluster Store Options" opts -qS "=" && ret=0 fi ;; @@ -680,6 +721,7 @@ __docker_subcommand() { local state _arguments $(__docker_arguments) \ $opts_help \ + $opts_attach_exec_run_start \ "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" \ "($help)--privileged[Give extended Linux capabilities to the command]" \ @@ -874,6 +916,7 @@ __docker_subcommand() { $opts_build_create_run_update \ $opts_create_run \ $opts_create_run_update \ + $opts_attach_exec_run_start \ "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ "($help)--rm[Remove intermediate containers when it exits]" \ "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ @@ -910,6 +953,7 @@ __docker_subcommand() { (start) _arguments $(__docker_arguments) \ $opts_help \ + $opts_attach_exec_run_start \ "($help -a --attach)"{-a,--attach}"[Attach container's stdout/stderr and forward all signals]" \ "($help -i --interactive)"{-i,--interactive}"[Attach container's stding]" \ "($help -)*:containers:__docker_stoppedcontainers" && ret=0 @@ -924,7 +968,6 @@ __docker_subcommand() { (tag) _arguments $(__docker_arguments) \ $opts_help \ - "($help -f --force)"{-f,--force}"[force]"\ "($help -):source:__docker_images"\ "($help -):destination:__docker_repositories_with_tags" && ret=0 ;; diff --git a/contrib/init/systemd/docker.service b/contrib/init/systemd/docker.service index 6015b7441f..d43658e700 100644 --- a/contrib/init/systemd/docker.service +++ b/contrib/init/systemd/docker.service @@ -11,6 +11,7 @@ MountFlags=slave LimitNOFILE=1048576 LimitNPROC=1048576 LimitCORE=infinity +TasksMax=1048576 TimeoutStartSec=0 [Install] diff --git a/contrib/init/sysvinit-debian/docker.default b/contrib/init/sysvinit-debian/docker.default index 14e660175b..da23c57ce9 100644 --- a/contrib/init/sysvinit-debian/docker.default +++ b/contrib/init/sysvinit-debian/docker.default @@ -1,5 +1,12 @@ # Docker Upstart and SysVinit configuration file +# +# THIS FILE DOES NOT APPLY TO SYSTEMD +# +# Please see the documentation for "systemd drop-ins": +# https://docs.docker.com/engine/articles/systemd/ +# + # Customize location of Docker binary (especially for development testing). #DOCKER="/usr/local/bin/docker" diff --git a/contrib/syntax/vim/syntax/dockerfile.vim b/contrib/syntax/vim/syntax/dockerfile.vim index 3cb1ecfddf..bb75da8581 100644 --- a/contrib/syntax/vim/syntax/dockerfile.vim +++ b/contrib/syntax/vim/syntax/dockerfile.vim @@ -11,7 +11,7 @@ let b:current_syntax = "dockerfile" syntax case ignore -syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|LABEL|VOLUME|WORKDIR|COPY|STOPSIGNAL)\s/ +syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|LABEL|VOLUME|WORKDIR|COPY|STOPSIGNAL|ARG)\s/ highlight link dockerfileKeyword Keyword syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/ diff --git a/daemon/config.go b/daemon/config.go index a75178faef..77bf6cf664 100644 --- a/daemon/config.go +++ b/daemon/config.go @@ -21,6 +21,15 @@ const ( disableNetworkBridge = "none" ) +// flatOptions contains configuration keys +// that MUST NOT be parsed as deep structures. +// Use this to differentiate these options +// with others like the ones in CommonTLSOptions. +var flatOptions = map[string]bool{ + "cluster-store-opts": true, + "log-opts": true, +} + // LogConfig represents the default log configuration. // It includes json tags to deserialize configuration from a file // using the same names that the flags in the command line uses. @@ -45,7 +54,6 @@ type CommonTLSOptions struct { type CommonConfig struct { AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins AutoRestart bool `json:"-"` - Bridge bridgeConfig `json:"-"` // Bridge holds bridge network specific configuration. Context map[string][]string `json:"-"` DisableBridge bool `json:"-"` DNS []string `json:"dns,omitempty"` @@ -56,7 +64,6 @@ type CommonConfig struct { GraphDriver string `json:"storage-driver,omitempty"` GraphOptions []string `json:"storage-opts,omitempty"` Labels []string `json:"labels,omitempty"` - LogConfig LogConfig `json:"log-config,omitempty"` Mtu int `json:"mtu,omitempty"` Pidfile string `json:"pidfile,omitempty"` Root string `json:"graph,omitempty"` @@ -76,14 +83,20 @@ type CommonConfig struct { // reachable by other hosts. ClusterAdvertise string `json:"cluster-advertise,omitempty"` - Debug bool `json:"debug,omitempty"` - Hosts []string `json:"hosts,omitempty"` - LogLevel string `json:"log-level,omitempty"` - TLS bool `json:"tls,omitempty"` - TLSVerify bool `json:"tls-verify,omitempty"` - TLSOptions CommonTLSOptions `json:"tls-opts,omitempty"` + Debug bool `json:"debug,omitempty"` + Hosts []string `json:"hosts,omitempty"` + LogLevel string `json:"log-level,omitempty"` + TLS bool `json:"tls,omitempty"` + TLSVerify bool `json:"tlsverify,omitempty"` + + // Embedded structs that allow config + // deserialization without the full struct. + CommonTLSOptions + LogConfig + bridgeConfig // bridgeConfig holds bridge network specific configuration. reloadLock sync.Mutex + valuesSet map[string]interface{} } // InstallCommonFlags adds command-line options to the top-level flag parser for @@ -112,6 +125,16 @@ func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string) cmd.Var(opts.NewNamedMapOpts("cluster-store-opts", config.ClusterOpts, nil), []string{"-cluster-store-opt"}, usageFn("Set cluster store options")) } +// IsValueSet returns true if a configuration value +// was explicitly set in the configuration file. +func (config *Config) IsValueSet(name string) bool { + if config.valuesSet == nil { + return false + } + _, ok := config.valuesSet[name] + return ok +} + func parseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) { if clusterAdvertise == "" { return "", errDiscoveryDisabled @@ -165,6 +188,7 @@ func getConflictFreeConfiguration(configFile string, flags *flag.FlagSet) (*Conf return nil, err } + var config Config var reader io.Reader if flags != nil { var jsonConfig map[string]interface{} @@ -173,41 +197,78 @@ func getConflictFreeConfiguration(configFile string, flags *flag.FlagSet) (*Conf return nil, err } - if err := findConfigurationConflicts(jsonConfig, flags); err != nil { + configSet := configValuesSet(jsonConfig) + + if err := findConfigurationConflicts(configSet, flags); err != nil { return nil, err } + + config.valuesSet = configSet } - var config Config reader = bytes.NewReader(b) err = json.NewDecoder(reader).Decode(&config) return &config, err } -// findConfigurationConflicts iterates over the provided flags searching for -// duplicated configurations. It returns an error with all the conflicts if -// it finds any. -func findConfigurationConflicts(config map[string]interface{}, flags *flag.FlagSet) error { - var conflicts []string +// configValuesSet returns the configuration values explicitly set in the file. +func configValuesSet(config map[string]interface{}) map[string]interface{} { flatten := make(map[string]interface{}) for k, v := range config { - if m, ok := v.(map[string]interface{}); ok { + if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] { for km, vm := range m { flatten[km] = vm } - } else { - flatten[k] = v + continue + } + + flatten[k] = v + } + return flatten +} + +// findConfigurationConflicts iterates over the provided flags searching for +// duplicated configurations and unknown keys. It returns an error with all the conflicts if +// it finds any. +func findConfigurationConflicts(config map[string]interface{}, flags *flag.FlagSet) error { + // 1. Search keys from the file that we don't recognize as flags. + unknownKeys := make(map[string]interface{}) + for key, value := range config { + flagName := "-" + key + if flag := flags.Lookup(flagName); flag == nil { + unknownKeys[key] = value } } + // 2. Discard values that implement NamedOption. + // Their configuration name differs from their flag name, like `labels` and `label`. + unknownNamedConflicts := func(f *flag.Flag) { + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if _, valid := unknownKeys[namedOption.Name()]; valid { + delete(unknownKeys, namedOption.Name()) + } + } + } + flags.VisitAll(unknownNamedConflicts) + + if len(unknownKeys) > 0 { + var unknown []string + for key := range unknownKeys { + unknown = append(unknown, key) + } + return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) + } + + var conflicts []string printConflict := func(name string, flagValue, fileValue interface{}) string { return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue) } - collectConflicts := func(f *flag.Flag) { + // 3. Search keys that are present as a flag and as a file option. + duplicatedConflicts := func(f *flag.Flag) { // search option name in the json configuration payload if the value is a named option if namedOption, ok := f.Value.(opts.NamedOption); ok { - if optsValue, ok := flatten[namedOption.Name()]; ok { + if optsValue, ok := config[namedOption.Name()]; ok { conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue)) } } else { @@ -215,7 +276,7 @@ func findConfigurationConflicts(config map[string]interface{}, flags *flag.FlagS for _, name := range f.Names { name = strings.TrimLeft(name, "-") - if value, ok := flatten[name]; ok { + if value, ok := config[name]; ok { conflicts = append(conflicts, printConflict(name, f.Value.String(), value)) break } @@ -223,7 +284,7 @@ func findConfigurationConflicts(config map[string]interface{}, flags *flag.FlagS } } - flags.Visit(collectConflicts) + flags.Visit(duplicatedConflicts) if len(conflicts) > 0 { return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) diff --git a/daemon/config_test.go b/daemon/config_test.go index 69a199e162..dc1c3bc7da 100644 --- a/daemon/config_test.go +++ b/daemon/config_test.go @@ -89,21 +89,16 @@ func TestFindConfigurationConflicts(t *testing.T) { config := map[string]interface{}{"authorization-plugins": "foobar"} flags := mflag.NewFlagSet("test", mflag.ContinueOnError) + flags.String([]string{"-authorization-plugins"}, "", "") + if err := flags.Set("-authorization-plugins", "asdf"); err != nil { + t.Fatal(err) + } + err := findConfigurationConflicts(config, flags) - if err != nil { - t.Fatal(err) - } - - flags.String([]string{"authorization-plugins"}, "", "") - if err := flags.Set("authorization-plugins", "asdf"); err != nil { - t.Fatal(err) - } - - err = findConfigurationConflicts(config, flags) if err == nil { t.Fatal("expected error, got nil") } - if !strings.Contains(err.Error(), "authorization-plugins") { + if !strings.Contains(err.Error(), "authorization-plugins: (from flag: asdf, from file: foobar)") { t.Fatalf("expected authorization-plugins conflict, got %v", err) } } @@ -175,3 +170,41 @@ func TestDaemonConfigurationMergeConflictsWithInnerStructs(t *testing.T) { t.Fatalf("expected tlscacert conflict, got %v", err) } } + +func TestFindConfigurationConflictsWithUnknownKeys(t *testing.T) { + config := map[string]interface{}{"tls-verify": "true"} + flags := mflag.NewFlagSet("test", mflag.ContinueOnError) + + flags.Bool([]string{"-tlsverify"}, false, "") + err := findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "the following directives don't match any configuration option: tls-verify") { + t.Fatalf("expected tls-verify conflict, got %v", err) + } +} + +func TestFindConfigurationConflictsWithMergedValues(t *testing.T) { + var hosts []string + config := map[string]interface{}{"hosts": "tcp://127.0.0.1:2345"} + base := mflag.NewFlagSet("base", mflag.ContinueOnError) + base.Var(opts.NewNamedListOptsRef("hosts", &hosts, nil), []string{"H", "-host"}, "") + + flags := mflag.NewFlagSet("test", mflag.ContinueOnError) + mflag.Merge(flags, base) + + err := findConfigurationConflicts(config, flags) + if err != nil { + t.Fatal(err) + } + + flags.Set("-host", "unix:///var/run/docker.sock") + err = findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "hosts: (from flag: [unix:///var/run/docker.sock], from file: tcp://127.0.0.1:2345)") { + t.Fatalf("expected hosts conflict, got %v", err) + } +} diff --git a/daemon/config_unix.go b/daemon/config_unix.go index 60fb3a9b54..daf236d6bb 100644 --- a/daemon/config_unix.go +++ b/daemon/config_unix.go @@ -37,19 +37,19 @@ type Config struct { // bridgeConfig stores all the bridge driver specific // configuration. type bridgeConfig struct { - EnableIPv6 bool - EnableIPTables bool - EnableIPForward bool - EnableIPMasq bool - EnableUserlandProxy bool - DefaultIP net.IP - Iface string - IP string - FixedCIDR string - FixedCIDRv6 string - DefaultGatewayIPv4 net.IP - DefaultGatewayIPv6 net.IP - InterContainerCommunication bool + EnableIPv6 bool `json:"ipv6,omitempty"` + EnableIPTables bool `json:"iptables,omitempty"` + EnableIPForward bool `json:"ip-forward,omitempty"` + EnableIPMasq bool `json:"ip-mask,omitempty"` + EnableUserlandProxy bool `json:"userland-proxy,omitempty"` + DefaultIP net.IP `json:"ip,omitempty"` + Iface string `json:"bridge,omitempty"` + IP string `json:"bip,omitempty"` + FixedCIDR string `json:"fixed-cidr,omitempty"` + FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` + DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` + DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` + InterContainerCommunication bool `json:"icc,omitempty"` } // InstallFlags adds command-line options to the top-level flag parser for @@ -65,19 +65,19 @@ func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) strin cmd.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", usageFn("Group for the unix socket")) config.Ulimits = make(map[string]*units.Ulimit) cmd.Var(runconfigopts.NewUlimitOpt(&config.Ulimits), []string{"-default-ulimit"}, usageFn("Set default ulimits for containers")) - cmd.BoolVar(&config.Bridge.EnableIPTables, []string{"#iptables", "-iptables"}, true, usageFn("Enable addition of iptables rules")) - cmd.BoolVar(&config.Bridge.EnableIPForward, []string{"#ip-forward", "-ip-forward"}, true, usageFn("Enable net.ipv4.ip_forward")) - cmd.BoolVar(&config.Bridge.EnableIPMasq, []string{"-ip-masq"}, true, usageFn("Enable IP masquerading")) - cmd.BoolVar(&config.Bridge.EnableIPv6, []string{"-ipv6"}, false, usageFn("Enable IPv6 networking")) - cmd.StringVar(&config.Bridge.IP, []string{"#bip", "-bip"}, "", usageFn("Specify network bridge IP")) - cmd.StringVar(&config.Bridge.Iface, []string{"b", "-bridge"}, "", usageFn("Attach containers to a network bridge")) - cmd.StringVar(&config.Bridge.FixedCIDR, []string{"-fixed-cidr"}, "", usageFn("IPv4 subnet for fixed IPs")) - cmd.StringVar(&config.Bridge.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", usageFn("IPv6 subnet for fixed IPs")) - cmd.Var(opts.NewIPOpt(&config.Bridge.DefaultGatewayIPv4, ""), []string{"-default-gateway"}, usageFn("Container default gateway IPv4 address")) - cmd.Var(opts.NewIPOpt(&config.Bridge.DefaultGatewayIPv6, ""), []string{"-default-gateway-v6"}, usageFn("Container default gateway IPv6 address")) - cmd.BoolVar(&config.Bridge.InterContainerCommunication, []string{"#icc", "-icc"}, true, usageFn("Enable inter-container communication")) - cmd.Var(opts.NewIPOpt(&config.Bridge.DefaultIP, "0.0.0.0"), []string{"#ip", "-ip"}, usageFn("Default IP when binding container ports")) - cmd.BoolVar(&config.Bridge.EnableUserlandProxy, []string{"-userland-proxy"}, true, usageFn("Use userland proxy for loopback traffic")) + cmd.BoolVar(&config.bridgeConfig.EnableIPTables, []string{"#iptables", "-iptables"}, true, usageFn("Enable addition of iptables rules")) + cmd.BoolVar(&config.bridgeConfig.EnableIPForward, []string{"#ip-forward", "-ip-forward"}, true, usageFn("Enable net.ipv4.ip_forward")) + cmd.BoolVar(&config.bridgeConfig.EnableIPMasq, []string{"-ip-masq"}, true, usageFn("Enable IP masquerading")) + cmd.BoolVar(&config.bridgeConfig.EnableIPv6, []string{"-ipv6"}, false, usageFn("Enable IPv6 networking")) + cmd.StringVar(&config.bridgeConfig.IP, []string{"#bip", "-bip"}, "", usageFn("Specify network bridge IP")) + cmd.StringVar(&config.bridgeConfig.Iface, []string{"b", "-bridge"}, "", usageFn("Attach containers to a network bridge")) + cmd.StringVar(&config.bridgeConfig.FixedCIDR, []string{"-fixed-cidr"}, "", usageFn("IPv4 subnet for fixed IPs")) + cmd.StringVar(&config.bridgeConfig.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", usageFn("IPv6 subnet for fixed IPs")) + cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv4, ""), []string{"-default-gateway"}, usageFn("Container default gateway IPv4 address")) + cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv6, ""), []string{"-default-gateway-v6"}, usageFn("Container default gateway IPv6 address")) + cmd.BoolVar(&config.bridgeConfig.InterContainerCommunication, []string{"#icc", "-icc"}, true, usageFn("Enable inter-container communication")) + cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultIP, "0.0.0.0"), []string{"#ip", "-ip"}, usageFn("Default IP when binding container ports")) + cmd.BoolVar(&config.bridgeConfig.EnableUserlandProxy, []string{"-userland-proxy"}, true, usageFn("Use userland proxy for loopback traffic")) cmd.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, usageFn("Enable CORS headers in the remote API, this is deprecated by --api-cors-header")) cmd.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", usageFn("Set CORS headers in the remote API")) cmd.StringVar(&config.CgroupParent, []string{"-cgroup-parent"}, "", usageFn("Set parent cgroup for all containers")) diff --git a/daemon/config_windows.go b/daemon/config_windows.go index bc0833aa9e..9918d45d00 100644 --- a/daemon/config_windows.go +++ b/daemon/config_windows.go @@ -15,7 +15,7 @@ var ( // bridgeConfig stores all the bridge driver specific // configuration. type bridgeConfig struct { - VirtualSwitchName string + VirtualSwitchName string `json:"bridge,omitempty"` } // Config defines the configuration of a docker daemon. @@ -37,5 +37,5 @@ func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) strin config.InstallCommonFlags(cmd, usageFn) // Then platform-specific install flags. - cmd.StringVar(&config.Bridge.VirtualSwitchName, []string{"b", "-bridge"}, "", "Attach containers to a virtual switch") + cmd.StringVar(&config.bridgeConfig.VirtualSwitchName, []string{"b", "-bridge"}, "", "Attach containers to a virtual switch") } diff --git a/daemon/container_operations_unix.go b/daemon/container_operations_unix.go index 0a361a6924..46096b849a 100644 --- a/daemon/container_operations_unix.go +++ b/daemon/container_operations_unix.go @@ -21,7 +21,6 @@ import ( "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/runconfig" containertypes "github.com/docker/engine-api/types/container" @@ -209,10 +208,12 @@ func (daemon *Daemon) populateCommand(c *container.Container, env []string) erro BlkioThrottleWriteBpsDevice: writeBpsDevice, BlkioThrottleReadIOpsDevice: readIOpsDevice, BlkioThrottleWriteIOpsDevice: writeIOpsDevice, - OomKillDisable: *c.HostConfig.OomKillDisable, MemorySwappiness: -1, } + if c.HostConfig.OomKillDisable != nil { + resources.OomKillDisable = *c.HostConfig.OomKillDisable + } if c.HostConfig.MemorySwappiness != nil { resources.MemorySwappiness = *c.HostConfig.MemorySwappiness } @@ -249,16 +250,8 @@ func (daemon *Daemon) populateCommand(c *container.Container, env []string) erro defaultCgroupParent := "/docker" if daemon.configStore.CgroupParent != "" { defaultCgroupParent = daemon.configStore.CgroupParent - } else { - for _, option := range daemon.configStore.ExecOptions { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { - continue - } - if val == "systemd" { - defaultCgroupParent = "system.slice" - } - } + } else if daemon.usingSystemd() { + defaultCgroupParent = "system.slice" } c.Command = &execdriver.Command{ CommonCommand: execdriver.CommonCommand{ @@ -513,7 +506,7 @@ func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Contain } if container.HostConfig.NetworkMode == containertypes.NetworkMode("bridge") { - container.NetworkSettings.Bridge = daemon.configStore.Bridge.Iface + container.NetworkSettings.Bridge = daemon.configStore.bridgeConfig.Iface } return nil @@ -658,6 +651,9 @@ func hasUserDefinedIPAddress(epConfig *networktypes.EndpointSettings) bool { // User specified ip address is acceptable only for networks with user specified subnets. func validateNetworkingConfig(n libnetwork.Network, epConfig *networktypes.EndpointSettings) error { + if n == nil || epConfig == nil { + return nil + } if !hasUserDefinedIPAddress(epConfig) { return nil } @@ -704,7 +700,7 @@ func cleanOperationalData(es *networktypes.EndpointSettings) { es.MacAddress = "" } -func (daemon *Daemon) updateNetworkConfig(container *container.Container, idOrName string, updateSettings bool) (libnetwork.Network, error) { +func (daemon *Daemon) updateNetworkConfig(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (libnetwork.Network, error) { if container.HostConfig.NetworkMode.IsContainer() { return nil, runconfig.ErrConflictSharedNetwork } @@ -715,11 +711,24 @@ func (daemon *Daemon) updateNetworkConfig(container *container.Container, idOrNa return nil, nil } + if !containertypes.NetworkMode(idOrName).IsUserDefined() { + if hasUserDefinedIPAddress(endpointConfig) { + return nil, runconfig.ErrUnsupportedNetworkAndIP + } + if endpointConfig != nil && len(endpointConfig.Aliases) > 0 { + return nil, runconfig.ErrUnsupportedNetworkAndAlias + } + } + n, err := daemon.FindNetwork(idOrName) if err != nil { return nil, err } + if err := validateNetworkingConfig(n, endpointConfig); err != nil { + return nil, err + } + if updateSettings { if err := daemon.updateNetworkSettings(container, n); err != nil { return nil, err @@ -734,9 +743,12 @@ func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName if container.RemovalInProgress || container.Dead { return derr.ErrorCodeRemovalContainer.WithArgs(container.ID) } - if _, err := daemon.updateNetworkConfig(container, idOrName, true); err != nil { + if _, err := daemon.updateNetworkConfig(container, idOrName, endpointConfig, true); err != nil { return err } + if endpointConfig != nil { + container.NetworkSettings.Networks[idOrName] = endpointConfig + } } else { if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil { return err @@ -749,7 +761,7 @@ func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName } func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) { - n, err := daemon.updateNetworkConfig(container, idOrName, updateSettings) + n, err := daemon.updateNetworkConfig(container, idOrName, endpointConfig, updateSettings) if err != nil { return err } @@ -757,25 +769,10 @@ func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName return nil } - if !containertypes.NetworkMode(idOrName).IsUserDefined() && hasUserDefinedIPAddress(endpointConfig) { - return runconfig.ErrUnsupportedNetworkAndIP - } - - if !containertypes.NetworkMode(idOrName).IsUserDefined() && len(endpointConfig.Aliases) > 0 { - return runconfig.ErrUnsupportedNetworkAndAlias - } - controller := daemon.netController - if err := validateNetworkingConfig(n, endpointConfig); err != nil { - return err - } - - if endpointConfig != nil { - container.NetworkSettings.Networks[n.Name()] = endpointConfig - } - - createOptions, err := container.BuildCreateEndpointOptions(n) + sb := daemon.getNetworkSandbox(container) + createOptions, err := container.BuildCreateEndpointOptions(n, endpointConfig, sb) if err != nil { return err } @@ -793,11 +790,14 @@ func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName } }() + if endpointConfig != nil { + container.NetworkSettings.Networks[n.Name()] = endpointConfig + } + if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil { return err } - sb := daemon.getNetworkSandbox(container) if sb == nil { options, err := daemon.buildSandboxOptions(container, n) if err != nil { @@ -1000,6 +1000,8 @@ func (daemon *Daemon) releaseNetwork(container *container.Container) { sid := container.NetworkSettings.SandboxID settings := container.NetworkSettings.Networks + container.NetworkSettings.Ports = nil + if sid == "" || len(settings) == 0 { return } diff --git a/daemon/container_operations_windows.go b/daemon/container_operations_windows.go index 8c3ae27e60..23bc6459cd 100644 --- a/daemon/container_operations_windows.go +++ b/daemon/container_operations_windows.go @@ -54,7 +54,7 @@ func (daemon *Daemon) populateCommand(c *container.Container, env []string) erro if !c.Config.NetworkDisabled { en.Interface = &execdriver.NetworkInterface{ MacAddress: c.Config.MacAddress, - Bridge: daemon.configStore.Bridge.VirtualSwitchName, + Bridge: daemon.configStore.bridgeConfig.VirtualSwitchName, PortBindings: c.HostConfig.PortBindings, // TODO Windows. Include IPAddress. There already is a diff --git a/daemon/create.go b/daemon/create.go index 148f2b7a88..166af3bf73 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -26,6 +26,11 @@ func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (types return types.ContainerCreateResponse{Warnings: warnings}, err } + err = daemon.verifyNetworkingConfig(params.NetworkingConfig) + if err != nil { + return types.ContainerCreateResponse{}, err + } + if params.HostConfig == nil { params.HostConfig = &containertypes.HostConfig{} } @@ -105,7 +110,7 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *containe } }() - if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig, img); err != nil { + if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { return nil, err } diff --git a/daemon/create_unix.go b/daemon/create_unix.go index e10f9e4aab..8eca648deb 100644 --- a/daemon/create_unix.go +++ b/daemon/create_unix.go @@ -9,15 +9,13 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/container" derr "github.com/docker/docker/errors" - "github.com/docker/docker/image" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/volume" containertypes "github.com/docker/engine-api/types/container" "github.com/opencontainers/runc/libcontainer/label" ) // createContainerPlatformSpecificSettings performs platform specific container create functionality -func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig, img *image.Image) error { +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { if err := daemon.Mount(container); err != nil { return err } @@ -46,17 +44,7 @@ func (daemon *Daemon) createContainerPlatformSpecificSettings(container *contain return derr.ErrorCodeMountOverFile.WithArgs(path) } - volumeDriver := hostConfig.VolumeDriver - if destination != "" && img != nil { - if _, ok := img.ContainerConfig.Volumes[destination]; ok { - // check for whether bind is not specified and then set to local - if _, ok := container.MountPoints[destination]; !ok { - volumeDriver = volume.DefaultDriverName - } - } - } - - v, err := daemon.volumes.CreateWithRef(name, volumeDriver, container.ID, nil) + v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil) if err != nil { return err } diff --git a/daemon/create_windows.go b/daemon/create_windows.go index 54b0ab6ab7..d8fc059fd3 100644 --- a/daemon/create_windows.go +++ b/daemon/create_windows.go @@ -4,14 +4,13 @@ import ( "fmt" "github.com/docker/docker/container" - "github.com/docker/docker/image" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" containertypes "github.com/docker/engine-api/types/container" ) // createContainerPlatformSpecificSettings performs platform specific container create functionality -func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig, img *image.Image) error { +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { for spec := range config.Volumes { mp, err := volume.ParseMountSpec(spec, hostConfig.VolumeDriver) @@ -31,14 +30,6 @@ func (daemon *Daemon) createContainerPlatformSpecificSettings(container *contain } volumeDriver := hostConfig.VolumeDriver - if mp.Destination != "" && img != nil { - if _, ok := img.ContainerConfig.Volumes[mp.Destination]; ok { - // check for whether bind is not specified and then set to local - if _, ok := container.MountPoints[mp.Destination]; !ok { - volumeDriver = volume.DefaultDriverName - } - } - } // Create the volume in the volume driver. If it doesn't exist, // a new one will be created. diff --git a/daemon/daemon.go b/daemon/daemon.go index 9e0e77e350..6367577cae 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -31,6 +31,7 @@ import ( containertypes "github.com/docker/engine-api/types/container" eventtypes "github.com/docker/engine-api/types/events" "github.com/docker/engine-api/types/filters" + networktypes "github.com/docker/engine-api/types/network" registrytypes "github.com/docker/engine-api/types/registry" "github.com/docker/engine-api/types/strslice" // register graph drivers @@ -99,46 +100,11 @@ func (e ErrImageDoesNotExist) Error() string { return fmt.Sprintf("no such id: %s", e.RefOrID) } -type contStore struct { - s map[string]*container.Container - sync.Mutex -} - -func (c *contStore) Add(id string, cont *container.Container) { - c.Lock() - c.s[id] = cont - c.Unlock() -} - -func (c *contStore) Get(id string) *container.Container { - c.Lock() - res := c.s[id] - c.Unlock() - return res -} - -func (c *contStore) Delete(id string) { - c.Lock() - delete(c.s, id) - c.Unlock() -} - -func (c *contStore) List() []*container.Container { - containers := new(History) - c.Lock() - for _, cont := range c.s { - containers.Add(cont) - } - c.Unlock() - containers.sort() - return *containers -} - // Daemon holds information about the Docker daemon. type Daemon struct { ID string repository string - containers *contStore + containers container.Store execCommands *exec.Store referenceStore reference.Store downloadManager *xfer.LayerDownloadManager @@ -282,10 +248,6 @@ func (daemon *Daemon) Register(container *container.Container) error { } } - if err := daemon.prepareMountPoints(container); err != nil { - return err - } - return nil } @@ -408,6 +370,23 @@ func (daemon *Daemon) restore() error { } group.Wait() + // any containers that were started above would already have had this done, + // however we need to now prepare the mountpoints for the rest of the containers as well. + // This shouldn't cause any issue running on the containers that already had this run. + // This must be run after any containers with a restart policy so that containerized plugins + // can have a chance to be running before we try to initialize them. + for _, c := range containers { + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.prepareMountPoints(c); err != nil { + logrus.Error(err) + } + }(c) + } + + group.Wait() + if !debug { if logrus.GetLevel() == logrus.InfoLevel { fmt.Println() @@ -601,6 +580,10 @@ func (daemon *Daemon) parents(c *container.Container) map[string]*container.Cont func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { fullName := path.Join(parent.Name, alias) if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { + if err == registrar.ErrNameReserved { + logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) + return nil + } return err } daemon.linkIndex.link(parent, child, fullName) @@ -612,8 +595,8 @@ func (daemon *Daemon) registerLink(parent, child *container.Container, alias str func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) { setDefaultMtu(config) - // Ensure we have compatible configuration options - if err := checkConfigOptions(config); err != nil { + // Ensure we have compatible and valid configuration options + if err := verifyDaemonSettings(config); err != nil { return nil, err } @@ -794,7 +777,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo d.ID = trustKey.PublicKey().KeyID() d.repository = daemonRepo - d.containers = &contStore{s: make(map[string]*container.Container)} + d.containers = container.NewMemoryStore() d.execCommands = exec.NewStore() d.referenceStore = referenceStore d.distributionMetadataStore = distributionMetadataStore @@ -873,24 +856,18 @@ func (daemon *Daemon) shutdownContainer(c *container.Container) error { func (daemon *Daemon) Shutdown() error { daemon.shutdown = true if daemon.containers != nil { - group := sync.WaitGroup{} logrus.Debug("starting clean shutdown of all containers...") - for _, cont := range daemon.List() { - if !cont.IsRunning() { - continue + daemon.containers.ApplyAll(func(c *container.Container) { + if !c.IsRunning() { + return } - logrus.Debugf("stopping %s", cont.ID) - group.Add(1) - go func(c *container.Container) { - defer group.Done() - if err := daemon.shutdownContainer(c); err != nil { - logrus.Errorf("Stop container error: %v", err) - return - } - logrus.Debugf("container stopped %s", c.ID) - }(cont) - } - group.Wait() + logrus.Debugf("stopping %s", c.ID) + if err := daemon.shutdownContainer(c); err != nil { + logrus.Errorf("Stop container error: %v", err) + return + } + logrus.Debugf("container stopped %s", c.ID) + }) } // trigger libnetwork Stop only if it's initialized @@ -1252,6 +1229,9 @@ func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { // Treat as an ID if id, err := digest.ParseDigest(refOrID); err == nil { + if _, err := daemon.imageStore.Get(image.ID(id)); err != nil { + return "", ErrImageDoesNotExist{refOrID} + } return image.ID(id), nil } @@ -1314,35 +1294,45 @@ func (daemon *Daemon) GetRemappedUIDGID() (int, int) { return uid, gid } -// ImageGetCached returns the earliest created image that is a child +// ImageGetCached returns the most recent created image that is a child // of the image with imgID, that had the same config when it was // created. nil is returned if a child cannot be found. An error is // returned if the parent image cannot be found. func (daemon *Daemon) ImageGetCached(imgID image.ID, config *containertypes.Config) (*image.Image, error) { - // Retrieve all images - imgs := daemon.Map() - - var siblings []image.ID - for id, img := range imgs { - if img.Parent == imgID { - siblings = append(siblings, id) - } - } - // Loop on the children of the given image and check the config - var match *image.Image - for _, id := range siblings { - img, ok := imgs[id] - if !ok { - return nil, fmt.Errorf("unable to find image %q", id) - } - if runconfig.Compare(&img.ContainerConfig, config) { - if match == nil || match.Created.Before(img.Created) { - match = img + getMatch := func(siblings []image.ID) (*image.Image, error) { + var match *image.Image + for _, id := range siblings { + img, err := daemon.imageStore.Get(id) + if err != nil { + return nil, fmt.Errorf("unable to find image %q", id) + } + + if runconfig.Compare(&img.ContainerConfig, config) { + // check for the most up to date match + if match == nil || match.Created.Before(img.Created) { + match = img + } } } + return match, nil } - return match, nil + + // In this case, this is `FROM scratch`, which isn't an actual image. + if imgID == "" { + images := daemon.imageStore.Map() + var siblings []image.ID + for id, img := range images { + if img.Parent == imgID { + siblings = append(siblings, id) + } + } + return getMatch(siblings) + } + + // find match from child images + siblings := daemon.imageStore.Children(imgID) + return getMatch(siblings) } // tempDir returns the default directory to use for temporary files. @@ -1440,6 +1430,18 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostCon return verifyPlatformContainerSettings(daemon, hostConfig, config) } +// Checks if the client set configurations for more than one network while creating a container +func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { + if nwConfig == nil || len(nwConfig.EndpointsConfig) <= 1 { + return nil + } + l := make([]string, 0, len(nwConfig.EndpointsConfig)) + for k := range nwConfig.EndpointsConfig { + l = append(l, k) + } + return derr.ErrorCodeMultipleNetworkConnect.WithArgs(fmt.Sprintf("%v", l)) +} + func configureVolumes(config *Config, rootUID, rootGID int) (*store.VolumeStore, error) { volumesDriver, err := local.New(config.Root, rootUID, rootGID) if err != nil { @@ -1536,13 +1538,12 @@ func (daemon *Daemon) initDiscovery(config *Config) error { // daemon according to those changes. // This are the settings that Reload changes: // - Daemon labels. -// - Cluster discovery (reconfigure and restart). func (daemon *Daemon) Reload(config *Config) error { daemon.configStore.reloadLock.Lock() - defer daemon.configStore.reloadLock.Unlock() - daemon.configStore.Labels = config.Labels - return daemon.reloadClusterDiscovery(config) + daemon.configStore.reloadLock.Unlock() + + return nil } func (daemon *Daemon) reloadClusterDiscovery(config *Config) error { diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go index 26e9c2f743..5b7d4cf177 100644 --- a/daemon/daemon_test.go +++ b/daemon/daemon_test.go @@ -61,15 +61,12 @@ func TestGetContainer(t *testing.T) { }, } - store := &contStore{ - s: map[string]*container.Container{ - c1.ID: c1, - c2.ID: c2, - c3.ID: c3, - c4.ID: c4, - c5.ID: c5, - }, - } + store := container.NewMemoryStore() + store.Add(c1.ID, c1) + store.Add(c2.ID, c2) + store.Add(c3.ID, c3) + store.Add(c4.ID, c4) + store.Add(c5.ID, c5) index := truncindex.NewTruncIndex([]string{}) index.Add(c1.ID) @@ -440,7 +437,7 @@ func TestDaemonDiscoveryReload(t *testing.T) { &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, } - if err := daemon.Reload(newConfig); err != nil { + if err := daemon.reloadClusterDiscovery(newConfig); err != nil { t.Fatal(err) } ch, errCh = daemon.discoveryWatcher.Watch(stopCh) @@ -472,7 +469,7 @@ func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) { &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, } - if err := daemon.Reload(newConfig); err != nil { + if err := daemon.reloadClusterDiscovery(newConfig); err != nil { t.Fatal(err) } stopCh := make(chan struct{}) diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go index 4233a7e90e..cfa9ef1ddb 100644 --- a/daemon/daemon_unix.go +++ b/daemon/daemon_unix.go @@ -18,6 +18,7 @@ import ( "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/reference" @@ -361,6 +362,24 @@ func verifyContainerResources(resources *containertypes.Resources) ([]string, er return warnings, nil } +func usingSystemd(config *Config) bool { + for _, option := range config.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { + continue + } + if val == "systemd" { + return true + } + } + + return false +} + +func (daemon *Daemon) usingSystemd() bool { + return usingSystemd(daemon.configStore) +} + // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config) ([]string, error) { @@ -407,20 +426,31 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes. return warnings, fmt.Errorf("Cannot use the --read-only option when user namespaces are enabled.") } } + if hostConfig.CgroupParent != "" && daemon.usingSystemd() { + // CgroupParent for systemd cgroup should be named as "xxx.slice" + if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { + return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } return warnings, nil } -// checkConfigOptions checks for mutually incompatible config options -func checkConfigOptions(config *Config) error { +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { // Check for mutually incompatible config options - if config.Bridge.Iface != "" && config.Bridge.IP != "" { + if config.bridgeConfig.Iface != "" && config.bridgeConfig.IP != "" { return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one.") } - if !config.Bridge.EnableIPTables && !config.Bridge.InterContainerCommunication { + if !config.bridgeConfig.EnableIPTables && !config.bridgeConfig.InterContainerCommunication { return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true.") } - if !config.Bridge.EnableIPTables && config.Bridge.EnableIPMasq { - config.Bridge.EnableIPMasq = false + if !config.bridgeConfig.EnableIPTables && config.bridgeConfig.EnableIPMasq { + config.bridgeConfig.EnableIPMasq = false + } + if config.CgroupParent != "" && usingSystemd(config) { + if len(config.CgroupParent) <= 6 || !strings.HasSuffix(config.CgroupParent, ".slice") { + return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } } return nil } @@ -452,7 +482,7 @@ func configureKernelSecuritySupport(config *Config, driverName string) error { } func isBridgeNetworkDisabled(config *Config) bool { - return config.Bridge.Iface == disableNetworkBridge + return config.bridgeConfig.Iface == disableNetworkBridge } func (daemon *Daemon) networkOptions(dconfig *Config) ([]nwconfig.Option, error) { @@ -526,9 +556,9 @@ func (daemon *Daemon) initNetworkController(config *Config) (libnetwork.NetworkC func driverOptions(config *Config) []nwconfig.Option { bridgeConfig := options.Generic{ - "EnableIPForwarding": config.Bridge.EnableIPForward, - "EnableIPTables": config.Bridge.EnableIPTables, - "EnableUserlandProxy": config.Bridge.EnableUserlandProxy} + "EnableIPForwarding": config.bridgeConfig.EnableIPForward, + "EnableIPTables": config.bridgeConfig.EnableIPTables, + "EnableUserlandProxy": config.bridgeConfig.EnableUserlandProxy} bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig} dOptions := []nwconfig.Option{} @@ -544,20 +574,20 @@ func initBridgeDriver(controller libnetwork.NetworkController, config *Config) e } bridgeName := bridge.DefaultBridgeName - if config.Bridge.Iface != "" { - bridgeName = config.Bridge.Iface + if config.bridgeConfig.Iface != "" { + bridgeName = config.bridgeConfig.Iface } netOption := map[string]string{ bridge.BridgeName: bridgeName, bridge.DefaultBridge: strconv.FormatBool(true), netlabel.DriverMTU: strconv.Itoa(config.Mtu), - bridge.EnableIPMasquerade: strconv.FormatBool(config.Bridge.EnableIPMasq), - bridge.EnableICC: strconv.FormatBool(config.Bridge.InterContainerCommunication), + bridge.EnableIPMasquerade: strconv.FormatBool(config.bridgeConfig.EnableIPMasq), + bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication), } // --ip processing - if config.Bridge.DefaultIP != nil { - netOption[bridge.DefaultBindingIP] = config.Bridge.DefaultIP.String() + if config.bridgeConfig.DefaultIP != nil { + netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String() } var ( @@ -576,9 +606,9 @@ func initBridgeDriver(controller libnetwork.NetworkController, config *Config) e } } - if config.Bridge.IP != "" { - ipamV4Conf.PreferredPool = config.Bridge.IP - ip, _, err := net.ParseCIDR(config.Bridge.IP) + if config.bridgeConfig.IP != "" { + ipamV4Conf.PreferredPool = config.bridgeConfig.IP + ip, _, err := net.ParseCIDR(config.bridgeConfig.IP) if err != nil { return err } @@ -587,8 +617,8 @@ func initBridgeDriver(controller libnetwork.NetworkController, config *Config) e logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) } - if config.Bridge.FixedCIDR != "" { - _, fCIDR, err := net.ParseCIDR(config.Bridge.FixedCIDR) + if config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) if err != nil { return err } @@ -596,13 +626,13 @@ func initBridgeDriver(controller libnetwork.NetworkController, config *Config) e ipamV4Conf.SubPool = fCIDR.String() } - if config.Bridge.DefaultGatewayIPv4 != nil { - ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.Bridge.DefaultGatewayIPv4.String() + if config.bridgeConfig.DefaultGatewayIPv4 != nil { + ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String() } var deferIPv6Alloc bool - if config.Bridge.FixedCIDRv6 != "" { - _, fCIDRv6, err := net.ParseCIDR(config.Bridge.FixedCIDRv6) + if config.bridgeConfig.FixedCIDRv6 != "" { + _, fCIDRv6, err := net.ParseCIDR(config.bridgeConfig.FixedCIDRv6) if err != nil { return err } @@ -632,11 +662,11 @@ func initBridgeDriver(controller libnetwork.NetworkController, config *Config) e } } - if config.Bridge.DefaultGatewayIPv6 != nil { + if config.bridgeConfig.DefaultGatewayIPv6 != nil { if ipamV6Conf == nil { ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} } - ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.Bridge.DefaultGatewayIPv6.String() + ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.bridgeConfig.DefaultGatewayIPv6.String() } v4Conf := []*libnetwork.IpamConf{ipamV4Conf} @@ -648,7 +678,7 @@ func initBridgeDriver(controller libnetwork.NetworkController, config *Config) e _, err = controller.NewNetwork("bridge", "bridge", libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, - netlabel.EnableIPv6: config.Bridge.EnableIPv6, + netlabel.EnableIPv6: config.bridgeConfig.EnableIPv6, }), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) diff --git a/daemon/daemon_windows.go b/daemon/daemon_windows.go index 3b571b6c1b..e5db1a8b74 100644 --- a/daemon/daemon_windows.go +++ b/daemon/daemon_windows.go @@ -88,8 +88,8 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes. return nil, nil } -// checkConfigOptions checks for mutually incompatible config options -func checkConfigOptions(config *Config) error { +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { return nil } @@ -121,8 +121,8 @@ func isBridgeNetworkDisabled(config *Config) bool { func (daemon *Daemon) initNetworkController(config *Config) (libnetwork.NetworkController, error) { // Set the name of the virtual switch if not specified by -b on daemon start - if config.Bridge.VirtualSwitchName == "" { - config.Bridge.VirtualSwitchName = defaultVirtualSwitch + if config.bridgeConfig.VirtualSwitchName == "" { + config.bridgeConfig.VirtualSwitchName = defaultVirtualSwitch } return nil, nil } diff --git a/daemon/delete.go b/daemon/delete.go index c8be1bc506..a086aed616 100644 --- a/daemon/delete.go +++ b/daemon/delete.go @@ -43,15 +43,14 @@ func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) return daemon.rmLink(container, name) } - if err := daemon.cleanupContainer(container, config.ForceRemove); err != nil { - return err + err = daemon.cleanupContainer(container, config.ForceRemove) + if err == nil || config.ForceRemove { + if e := daemon.removeMountPoints(container, config.RemoveVolume); e != nil { + logrus.Error(e) + } } - if err := daemon.removeMountPoints(container, config.RemoveVolume); err != nil { - logrus.Error(err) - } - - return nil + return err } func (daemon *Daemon) rmLink(container *container.Container, name string) error { diff --git a/daemon/delete_test.go b/daemon/delete_test.go index e0e6466a9c..0d39b4d68f 100644 --- a/daemon/delete_test.go +++ b/daemon/delete_test.go @@ -20,7 +20,7 @@ func TestContainerDoubleDelete(t *testing.T) { repository: tmp, root: tmp, } - daemon.containers = &contStore{s: make(map[string]*container.Container)} + daemon.containers = container.NewMemoryStore() container := &container.Container{ CommonContainer: container.CommonContainer{ diff --git a/daemon/exec.go b/daemon/exec.go index 464dd5db66..7772790d6e 100644 --- a/daemon/exec.go +++ b/daemon/exec.go @@ -52,6 +52,9 @@ func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { if container.IsPaused() { return nil, derr.ErrorCodeExecPaused.WithArgs(container.ID) } + if container.IsRestarting() { + return nil, derr.ErrorCodeExecRestarting.WithArgs(container.ID) + } return ec, nil } } @@ -76,6 +79,9 @@ func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { if container.IsPaused() { return nil, derr.ErrorCodeExecPaused.WithArgs(name) } + if container.IsRestarting() { + return nil, derr.ErrorCodeExecRestarting.WithArgs(name) + } return container, nil } @@ -135,6 +141,11 @@ func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io. } ec.Lock() + if ec.ExitCode != nil { + ec.Unlock() + return derr.ErrorCodeExecExited.WithArgs(ec.ID) + } + if ec.Running { ec.Unlock() return derr.ErrorCodeExecRunning.WithArgs(ec.ID) @@ -214,7 +225,7 @@ func (d *Daemon) Exec(c *container.Container, execConfig *exec.Config, pipes *ex exitStatus = 128 } - execConfig.ExitCode = exitStatus + execConfig.ExitCode = &exitStatus execConfig.Running = false return exitStatus, err diff --git a/daemon/exec/exec.go b/daemon/exec/exec.go index 5504ed3575..6941cde689 100644 --- a/daemon/exec/exec.go +++ b/daemon/exec/exec.go @@ -18,7 +18,7 @@ type Config struct { *runconfig.StreamConfig ID string Running bool - ExitCode int + ExitCode *int ProcessConfig *execdriver.ProcessConfig OpenStdin bool OpenStderr bool @@ -53,7 +53,13 @@ func NewStore() *Store { // Commands returns the exec configurations in the store. func (e *Store) Commands() map[string]*Config { - return e.commands + e.RLock() + commands := make(map[string]*Config, len(e.commands)) + for id, config := range e.commands { + commands[id] = config + } + e.RUnlock() + return commands } // Add adds a new exec configuration to the store. diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index 4f97ed93f8..4cc14532fd 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -436,7 +436,6 @@ func (d *Driver) setupMounts(container *configs.Config, c *execdriver.Command) e flags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV err error ) - fulldest := filepath.Join(c.Rootfs, m.Destination) if m.Data != "" { flags, data, err = mount.ParseTmpfsOptions(m.Data) if err != nil { @@ -449,8 +448,6 @@ func (d *Driver) setupMounts(container *configs.Config, c *execdriver.Command) e Data: data, Device: "tmpfs", Flags: flags, - PremountCmds: genTmpfsPremountCmd(c.TmpDir, fulldest, m.Destination), - PostmountCmds: genTmpfsPostmountCmd(c.TmpDir, fulldest, m.Destination), PropagationFlags: []int{mountPropagationMap[volume.DefaultPropagationMode]}, }) continue diff --git a/daemon/execdriver/native/seccomp_default.go b/daemon/execdriver/native/seccomp_default.go index b1c353a004..a3b4028359 100644 --- a/daemon/execdriver/native/seccomp_default.go +++ b/daemon/execdriver/native/seccomp_default.go @@ -17,7 +17,7 @@ func arches() []string { var a = native.String() switch a { case "amd64": - return []string{"amd64", "x86"} + return []string{"amd64", "x86", "x32"} case "arm64": return []string{"arm64", "arm"} case "mips64": @@ -944,6 +944,11 @@ var defaultSeccompProfile = &configs.Seccomp{ Action: configs.Allow, Args: []*configs.Arg{}, }, + { + Name: "recv", + Action: configs.Allow, + Args: []*configs.Arg{}, + }, { Name: "recvfrom", Action: configs.Allow, @@ -1119,6 +1124,11 @@ var defaultSeccompProfile = &configs.Seccomp{ Action: configs.Allow, Args: []*configs.Arg{}, }, + { + Name: "send", + Action: configs.Allow, + Args: []*configs.Arg{}, + }, { Name: "sendfile", Action: configs.Allow, diff --git a/daemon/execdriver/native/tmpfs.go b/daemon/execdriver/native/tmpfs.go deleted file mode 100644 index 89f7f4ae7f..0000000000 --- a/daemon/execdriver/native/tmpfs.go +++ /dev/null @@ -1,56 +0,0 @@ -package native - -import ( - "fmt" - "os" - "os/exec" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer/configs" -) - -func genTmpfsPremountCmd(tmpDir string, fullDest string, dest string) []configs.Command { - var premount []configs.Command - tarPath, err := exec.LookPath("tar") - if err != nil { - logrus.Warn("tar command is not available for tmpfs mount: %s", err) - return premount - } - if _, err = exec.LookPath("rm"); err != nil { - logrus.Warn("rm command is not available for tmpfs mount: %s", err) - return premount - } - tarFile := fmt.Sprintf("%s/%s.tar", tmpDir, strings.Replace(dest, "/", "_", -1)) - if _, err := os.Stat(fullDest); err == nil { - premount = append(premount, configs.Command{ - Path: tarPath, - Args: []string{"-cf", tarFile, "-C", fullDest, "."}, - }) - } - return premount -} - -func genTmpfsPostmountCmd(tmpDir string, fullDest string, dest string) []configs.Command { - var postmount []configs.Command - tarPath, err := exec.LookPath("tar") - if err != nil { - return postmount - } - rmPath, err := exec.LookPath("rm") - if err != nil { - return postmount - } - if _, err := os.Stat(fullDest); os.IsNotExist(err) { - return postmount - } - tarFile := fmt.Sprintf("%s/%s.tar", tmpDir, strings.Replace(dest, "/", "_", -1)) - postmount = append(postmount, configs.Command{ - Path: tarPath, - Args: []string{"-xf", tarFile, "-C", fullDest, "."}, - }) - return append(postmount, configs.Command{ - Path: rmPath, - Args: []string{"-f", tarFile}, - }) -} diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go index 9b8c4de92f..51054fa6ef 100644 --- a/daemon/graphdriver/aufs/aufs.go +++ b/daemon/graphdriver/aufs/aufs.go @@ -374,20 +374,10 @@ func (a *Driver) DiffPath(id string) (string, func() error, error) { } func (a *Driver) applyDiff(id string, diff archive.Reader) error { - dir := path.Join(a.rootPath(), "diff", id) - if err := chrootarchive.UntarUncompressed(diff, dir, &archive.TarOptions{ + return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ UIDMaps: a.uidMaps, GIDMaps: a.gidMaps, - }); err != nil { - return err - } - - // show invalid whiteouts warning. - files, err := ioutil.ReadDir(path.Join(dir, archive.WhiteoutLinkDir)) - if err == nil && len(files) > 0 { - logrus.Warnf("Archive contains aufs hardlink references that are not supported.") - } - return nil + }) } // DiffSize calculates the changes between the specified id @@ -517,7 +507,7 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro } if firstMount { - opts := "dio,noplink,xino=/dev/shm/aufs.xino" + opts := "dio,xino=/dev/shm/aufs.xino" if useDirperm() { opts += ",dirperm1" } diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go index ee70a8b72c..761b5b6872 100644 --- a/daemon/graphdriver/aufs/aufs_test.go +++ b/daemon/graphdriver/aufs/aufs_test.go @@ -638,88 +638,6 @@ func TestApplyDiff(t *testing.T) { } } -func TestHardlinks(t *testing.T) { - // Copy 2 layers that have linked files to new layers and check if hardlink are preserved - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - origFile := "test_file" - linkedFile := "linked_file" - - if err := d.Create("source-1", "", ""); err != nil { - t.Fatal(err) - } - - mountPath, err := d.Get("source-1", "") - if err != nil { - t.Fatal(err) - } - - f, err := os.Create(path.Join(mountPath, origFile)) - if err != nil { - t.Fatal(err) - } - f.Close() - - layerTar1, err := d.Diff("source-1", "") - if err != nil { - t.Fatal(err) - } - - if err := d.Create("source-2", "source-1", ""); err != nil { - t.Fatal(err) - } - - mountPath, err = d.Get("source-2", "") - if err != nil { - t.Fatal(err) - } - - if err := os.Link(path.Join(mountPath, origFile), path.Join(mountPath, linkedFile)); err != nil { - t.Fatal(err) - } - - layerTar2, err := d.Diff("source-2", "source-1") - if err != nil { - t.Fatal(err) - } - - if err := d.Create("target-1", "", ""); err != nil { - t.Fatal(err) - } - - if _, err := d.ApplyDiff("target-1", "", layerTar1); err != nil { - t.Fatal(err) - } - - if err := d.Create("target-2", "target-1", ""); err != nil { - t.Fatal(err) - } - - if _, err := d.ApplyDiff("target-2", "target-1", layerTar2); err != nil { - t.Fatal(err) - } - - mountPath, err = d.Get("target-2", "") - if err != nil { - t.Fatal(err) - } - - fi1, err := os.Lstat(path.Join(mountPath, origFile)) - if err != nil { - t.Fatal(err) - } - fi2, err := os.Lstat(path.Join(mountPath, linkedFile)) - if err != nil { - t.Fatal(err) - } - - if !os.SameFile(fi1, fi2) { - t.Fatal("Target files are not linked") - } -} - func hash(c string) string { h := sha256.New() fmt.Fprint(h, c) diff --git a/daemon/graphdriver/driver_linux.go b/daemon/graphdriver/driver_linux.go index 410a62ff7c..e64ab1bfa2 100644 --- a/daemon/graphdriver/driver_linux.go +++ b/daemon/graphdriver/driver_linux.go @@ -18,6 +18,8 @@ const ( FsMagicExtfs = FsMagic(0x0000EF53) // FsMagicF2fs filesystem id for F2fs FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) // FsMagicJffs2Fs filesystem if for Jffs2Fs FsMagicJffs2Fs = FsMagic(0x000072b6) // FsMagicJfs filesystem id for Jfs @@ -60,6 +62,7 @@ var ( FsMagicCramfs: "cramfs", FsMagicExtfs: "extfs", FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", FsMagicJffs2Fs: "jffs2", FsMagicJfs: "jfs", FsMagicNfsFs: "nfs", diff --git a/daemon/image_delete.go b/daemon/image_delete.go index b6773f8832..0f32a903e6 100644 --- a/daemon/image_delete.go +++ b/daemon/image_delete.go @@ -179,13 +179,9 @@ func isImageIDPrefix(imageID, possiblePrefix string) bool { // getContainerUsingImage returns a container that was created using the given // imageID. Returns nil if there is no such container. func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container { - for _, container := range daemon.List() { - if container.ImageID == imageID { - return container - } - } - - return nil + return daemon.containers.First(func(c *container.Container) bool { + return c.ImageID == imageID + }) } // removeImageRef attempts to parse and remove the given image reference from @@ -328,19 +324,15 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType if mask&conflictRunningContainer != 0 { // Check if any running container is using the image. - for _, container := range daemon.List() { - if !container.IsRunning() { - // Skip this until we check for soft conflicts later. - continue - } - - if container.ImageID == imgID { - return &imageDeleteConflict{ - imgID: imgID, - hard: true, - used: true, - message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), - } + running := func(c *container.Container) bool { + return c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(running); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + hard: true, + used: true, + message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), } } } @@ -355,18 +347,14 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType if mask&conflictStoppedContainer != 0 { // Check if any stopped containers reference this image. - for _, container := range daemon.List() { - if container.IsRunning() { - // Skip this as it was checked above in hard conflict conditions. - continue - } - - if container.ImageID == imgID { - return &imageDeleteConflict{ - imgID: imgID, - used: true, - message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), - } + stopped := func(c *container.Container) bool { + return !c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(stopped); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + used: true, + message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), } } } diff --git a/daemon/info.go b/daemon/info.go index 804d6e4709..008ac20a5f 100644 --- a/daemon/info.go +++ b/daemon/info.go @@ -4,9 +4,11 @@ import ( "os" "runtime" "strings" + "sync/atomic" "time" "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/parsers/kernel" @@ -54,24 +56,24 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) { initPath := utils.DockerInitPath("") sysInfo := sysinfo.New(true) - var cRunning, cPaused, cStopped int - for _, c := range daemon.List() { + var cRunning, cPaused, cStopped int32 + daemon.containers.ApplyAll(func(c *container.Container) { switch c.StateString() { case "paused": - cPaused++ + atomic.AddInt32(&cPaused, 1) case "running": - cRunning++ + atomic.AddInt32(&cRunning, 1) default: - cStopped++ + atomic.AddInt32(&cStopped, 1) } - } + }) v := &types.Info{ ID: daemon.ID, - Containers: len(daemon.List()), - ContainersRunning: cRunning, - ContainersPaused: cPaused, - ContainersStopped: cStopped, + Containers: int(cRunning + cPaused + cStopped), + ContainersRunning: int(cRunning), + ContainersPaused: int(cPaused), + ContainersStopped: int(cStopped), Images: len(daemon.imageStore.Map()), Driver: daemon.GraphDriverName(), DriverStatus: daemon.layerStore.DriverStatus(), diff --git a/daemon/links_test.go b/daemon/links_test.go index 79a641563c..d7a3c2aea9 100644 --- a/daemon/links_test.go +++ b/daemon/links_test.go @@ -39,12 +39,9 @@ func TestMigrateLegacySqliteLinks(t *testing.T) { }, } - store := &contStore{ - s: map[string]*container.Container{ - c1.ID: c1, - c2.ID: c2, - }, - } + store := container.NewMemoryStore() + store.Add(c1.ID, c1) + store.Add(c2.ID, c2) d := &Daemon{root: tmpDir, containers: store} db, err := graphdb.NewSqliteConn(filepath.Join(d.root, "linkgraph.db")) diff --git a/daemon/list.go b/daemon/list.go index f262fbe821..5aebb7472d 100644 --- a/daemon/list.go +++ b/daemon/list.go @@ -15,6 +15,10 @@ import ( "github.com/docker/go-connections/nat" ) +var acceptedVolumeFilterTags = map[string]bool{ + "dangling": true, +} + // iterationAction represents possible outcomes happening during the container iteration. type iterationAction int @@ -410,21 +414,33 @@ func (daemon *Daemon) transformContainer(container *container.Container, ctx *li // Volumes lists known volumes, using the filter to restrict the range // of volumes returned. func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { - var volumesOut []*types.Volume + var ( + volumesOut []*types.Volume + danglingOnly = false + ) volFilters, err := filters.FromParam(filter) if err != nil { return nil, nil, err } - filterUsed := volFilters.Include("dangling") && - (volFilters.ExactMatch("dangling", "true") || volFilters.ExactMatch("dangling", "1")) + if err := volFilters.Validate(acceptedVolumeFilterTags); err != nil { + return nil, nil, err + } + + if volFilters.Include("dangling") { + if volFilters.ExactMatch("dangling", "true") || volFilters.ExactMatch("dangling", "1") { + danglingOnly = true + } else if !volFilters.ExactMatch("dangling", "false") && !volFilters.ExactMatch("dangling", "0") { + return nil, nil, fmt.Errorf("Invalid filter 'dangling=%s'", volFilters.Get("dangling")) + } + } volumes, warnings, err := daemon.volumes.List() if err != nil { return nil, nil, err } - if filterUsed { - volumes = daemon.volumes.FilterByUsed(volumes) + if volFilters.Include("dangling") { + volumes = daemon.volumes.FilterByUsed(volumes, !danglingOnly) } for _, v := range volumes { volumesOut = append(volumesOut, volumeToAPIType(v)) diff --git a/daemon/logger/copier.go b/daemon/logger/copier.go index 95cd9eb355..436c0a8f9c 100644 --- a/daemon/logger/copier.go +++ b/daemon/logger/copier.go @@ -20,14 +20,16 @@ type Copier struct { srcs map[string]io.Reader dst Logger copyJobs sync.WaitGroup + closed chan struct{} } // NewCopier creates a new Copier func NewCopier(cid string, srcs map[string]io.Reader, dst Logger) *Copier { return &Copier{ - cid: cid, - srcs: srcs, - dst: dst, + cid: cid, + srcs: srcs, + dst: dst, + closed: make(chan struct{}), } } @@ -44,24 +46,28 @@ func (c *Copier) copySrc(name string, src io.Reader) { reader := bufio.NewReader(src) for { - line, err := reader.ReadBytes('\n') - line = bytes.TrimSuffix(line, []byte{'\n'}) - - // ReadBytes can return full or partial output even when it failed. - // e.g. it can return a full entry and EOF. - if err == nil || len(line) > 0 { - if logErr := c.dst.Log(&Message{ContainerID: c.cid, Line: line, Source: name, Timestamp: time.Now().UTC()}); logErr != nil { - logrus.Errorf("Failed to log msg %q for logger %s: %s", line, c.dst.Name(), logErr) - } - } - - if err != nil { - if err != io.EOF { - logrus.Errorf("Error scanning log stream: %s", err) - } + select { + case <-c.closed: return - } + default: + line, err := reader.ReadBytes('\n') + line = bytes.TrimSuffix(line, []byte{'\n'}) + // ReadBytes can return full or partial output even when it failed. + // e.g. it can return a full entry and EOF. + if err == nil || len(line) > 0 { + if logErr := c.dst.Log(&Message{ContainerID: c.cid, Line: line, Source: name, Timestamp: time.Now().UTC()}); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", line, c.dst.Name(), logErr) + } + } + + if err != nil { + if err != io.EOF { + logrus.Errorf("Error scanning log stream: %s", err) + } + return + } + } } } @@ -69,3 +75,12 @@ func (c *Copier) copySrc(name string, src io.Reader) { func (c *Copier) Wait() { c.copyJobs.Wait() } + +// Close closes the copier +func (c *Copier) Close() { + select { + case <-c.closed: + default: + close(c.closed) + } +} diff --git a/daemon/logger/copier_test.go b/daemon/logger/copier_test.go index f8980b3615..30239f06bc 100644 --- a/daemon/logger/copier_test.go +++ b/daemon/logger/copier_test.go @@ -10,9 +10,15 @@ import ( type TestLoggerJSON struct { *json.Encoder + delay time.Duration } -func (l *TestLoggerJSON) Log(m *Message) error { return l.Encode(m) } +func (l *TestLoggerJSON) Log(m *Message) error { + if l.delay > 0 { + time.Sleep(l.delay) + } + return l.Encode(m) +} func (l *TestLoggerJSON) Close() error { return nil } @@ -94,3 +100,33 @@ func TestCopier(t *testing.T) { } } } + +func TestCopierSlow(t *testing.T) { + stdoutLine := "Line that thinks that it is log line from docker stdout" + var stdout bytes.Buffer + for i := 0; i < 30; i++ { + if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { + t.Fatal(err) + } + } + + var jsonBuf bytes.Buffer + //encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)} + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf), delay: 100 * time.Millisecond} + + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + c := NewCopier(cid, map[string]io.Reader{"stdout": &stdout}, jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + <-time.After(150 * time.Millisecond) + c.Close() + select { + case <-time.After(200 * time.Millisecond): + t.Fatalf("failed to exit in time after the copier is closed") + case <-wait: + } +} diff --git a/daemon/mounts.go b/daemon/mounts.go index 81a82753ec..276301d130 100644 --- a/daemon/mounts.go +++ b/daemon/mounts.go @@ -25,6 +25,11 @@ func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) } daemon.volumes.Dereference(m.Volume, container.ID) if rm { + // Do not remove named mountpoints + // these are mountpoints specified like `docker run -v :/foo` + if m.Named { + continue + } err := daemon.volumes.Remove(m.Volume) // Ignore volume in use errors because having this // volume being referenced by other container is diff --git a/daemon/volumes.go b/daemon/volumes.go index c0097679f6..7e2c417b2b 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -90,6 +90,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo Driver: m.Driver, Destination: m.Destination, Propagation: m.Propagation, + Named: m.Named, } if len(cp.Source) == 0 { @@ -126,6 +127,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo bind.Source = v.Path() // bind.Name is an already existing volume, we need to use that here bind.Driver = v.DriverName() + bind.Named = true bind = setBindModeIfNull(bind) } if label.RelabelNeeded(bind.Mode) { @@ -159,7 +161,6 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error { if len(m.Driver) > 0 && m.Volume == nil { v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID) - if err != nil { return err } diff --git a/distribution/pull.go b/distribution/pull.go index 5f38a67673..ab8c14ce81 100644 --- a/distribution/pull.go +++ b/distribution/pull.go @@ -3,7 +3,6 @@ package distribution import ( "fmt" "os" - "strings" "github.com/Sirupsen/logrus" "github.com/docker/docker/api" @@ -97,13 +96,12 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo } var ( - // use a slice to append the error strings and return a joined string to caller - errors []string + lastErr error // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport - // By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in errors. + // By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in lastErr. // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of - // any subsequent ErrNoSupport errors in errors. + // any subsequent ErrNoSupport errors in lastErr. // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant // error is the ones from v2 endpoints not v1. @@ -123,7 +121,7 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo puller, err := newPuller(endpoint, repoInfo, imagePullConfig) if err != nil { - errors = append(errors, err.Error()) + lastErr = err continue } if err := puller.Pull(ctx, ref); err != nil { @@ -144,34 +142,28 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. discardNoSupportErrors = true // append subsequent errors - errors = append(errors, err.Error()) + lastErr = err } else if !discardNoSupportErrors { // Save the ErrNoSupport error, because it's either the first error or all encountered errors // were also ErrNoSupport errors. // append subsequent errors - errors = append(errors, err.Error()) + lastErr = err } continue } - errors = append(errors, err.Error()) - logrus.Debugf("Not continuing with error: %v", fmt.Errorf(strings.Join(errors, "\n"))) - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, "\n")) - } + logrus.Debugf("Not continuing with error: %v", err) + return err } imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull") return nil } - if len(errors) == 0 { - return fmt.Errorf("no endpoints found for %s", ref.String()) + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", ref.String()) } - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, "\n")) - } - return nil + return lastErr } // writeStatus writes a status message to out. If layersDownloaded is true, the diff --git a/distribution/pull_v2.go b/distribution/pull_v2.go index 7bb171000d..71a1c4e5fd 100644 --- a/distribution/pull_v2.go +++ b/distribution/pull_v2.go @@ -171,6 +171,10 @@ func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progre _, err = io.Copy(tmpFile, io.TeeReader(reader, verifier)) if err != nil { + tmpFile.Close() + if err := os.Remove(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } return nil, 0, retryOnError(err) } @@ -179,8 +183,9 @@ func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progre if !verifier.Verified() { err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) logrus.Error(err) + tmpFile.Close() - if err := os.RemoveAll(tmpFile.Name()); err != nil { + if err := os.Remove(tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) } @@ -191,7 +196,14 @@ func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progre logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) - tmpFile.Seek(0, 0) + _, err = tmpFile.Seek(0, os.SEEK_SET) + if err != nil { + tmpFile.Close() + if err := os.Remove(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return nil, 0, xfer.DoNotRetry{Err: err} + } return ioutils.NewReadCloserWrapper(tmpFile, tmpFileCloser(tmpFile)), size, nil } diff --git a/distribution/push.go b/distribution/push.go index 445f6bb6bd..ecf59ec20e 100644 --- a/distribution/push.go +++ b/distribution/push.go @@ -171,7 +171,14 @@ func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushCo // argument so that it can be used with httpBlobWriter's ReadFrom method. // Using httpBlobWriter's Write method would send a PATCH request for every // Write call. -func compress(in io.Reader) io.ReadCloser { +// +// The second return value is a channel that gets closed when the goroutine +// is finished. This allows the caller to make sure the goroutine finishes +// before it releases any resources connected with the reader that was +// passed in. +func compress(in io.Reader) (io.ReadCloser, chan struct{}) { + compressionDone := make(chan struct{}) + pipeReader, pipeWriter := io.Pipe() // Use a bufio.Writer to avoid excessive chunking in HTTP request. bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) @@ -190,7 +197,8 @@ func compress(in io.Reader) io.ReadCloser { } else { pipeWriter.Close() } + close(compressionDone) }() - return pipeReader + return pipeReader, compressionDone } diff --git a/distribution/push_v2.go b/distribution/push_v2.go index 68c8f69be7..e5f9de81f6 100644 --- a/distribution/push_v2.go +++ b/distribution/push_v2.go @@ -311,6 +311,8 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress. case distribution.ErrBlobMounted: progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) + err.Descriptor.MediaType = schema2.MediaTypeLayer + pd.pushState.Lock() pd.pushState.confirmedV2 = true pd.pushState.remoteLayers[diffID] = err.Descriptor @@ -343,8 +345,11 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress. size, _ := pd.layer.DiffSize() reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing") - defer reader.Close() - compressedReader := compress(reader) + compressedReader, compressionDone := compress(reader) + defer func() { + reader.Close() + <-compressionDone + }() digester := digest.Canonical.New() tee := io.TeeReader(compressedReader, digester.Hash()) diff --git a/distribution/registry.go b/distribution/registry.go index 1d4a2c4efe..1c2b4f3cc6 100644 --- a/distribution/registry.go +++ b/distribution/registry.go @@ -6,6 +6,7 @@ import ( "net/http" "net/url" "strings" + "syscall" "time" "github.com/docker/distribution" @@ -145,8 +146,14 @@ func retryOnError(err error) error { case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied: return xfer.DoNotRetry{Err: err} } + case *url.Error: + return retryOnError(v.Err) case *client.UnexpectedHTTPResponseError: return xfer.DoNotRetry{Err: err} + case error: + if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { + return xfer.DoNotRetry{Err: err} + } } // let's be nice and fallback if the error is a completely // unexpected one. diff --git a/distribution/xfer/transfer.go b/distribution/xfer/transfer.go index 7f323c1df5..6852225488 100644 --- a/distribution/xfer/transfer.go +++ b/distribution/xfer/transfer.go @@ -1,6 +1,7 @@ package xfer import ( + "runtime" "sync" "github.com/docker/docker/pkg/progress" @@ -38,7 +39,7 @@ type Transfer interface { Watch(progressOutput progress.Output) *Watcher Release(*Watcher) Context() context.Context - Cancel() + Close() Done() <-chan struct{} Released() <-chan struct{} Broadcast(masterProgressChan <-chan progress.Progress) @@ -61,11 +62,14 @@ type transfer struct { // running remains open as long as the transfer is in progress. running chan struct{} - // hasWatchers stays open until all watchers release the transfer. - hasWatchers chan struct{} + // released stays open until all watchers release the transfer and + // the transfer is no longer tracked by the transfer manager. + released chan struct{} // broadcastDone is true if the master progress channel has closed. broadcastDone bool + // closed is true if Close has been called + closed bool // broadcastSyncChan allows watchers to "ping" the broadcasting // goroutine to wait for it for deplete its input channel. This ensures // a detaching watcher won't miss an event that was sent before it @@ -78,7 +82,7 @@ func NewTransfer() Transfer { t := &transfer{ watchers: make(map[chan struct{}]*Watcher), running: make(chan struct{}), - hasWatchers: make(chan struct{}), + released: make(chan struct{}), broadcastSyncChan: make(chan struct{}), } @@ -144,13 +148,13 @@ func (t *transfer) Watch(progressOutput progress.Output) *Watcher { running: make(chan struct{}), } + t.watchers[w.releaseChan] = w + if t.broadcastDone { close(w.running) return w } - t.watchers[w.releaseChan] = w - go func() { defer func() { close(w.running) @@ -202,8 +206,19 @@ func (t *transfer) Release(watcher *Watcher) { delete(t.watchers, watcher.releaseChan) if len(t.watchers) == 0 { - close(t.hasWatchers) - t.cancel() + if t.closed { + // released may have been closed already if all + // watchers were released, then another one was added + // while waiting for a previous watcher goroutine to + // finish. + select { + case <-t.released: + default: + close(t.released) + } + } else { + t.cancel() + } } t.mu.Unlock() @@ -223,9 +238,9 @@ func (t *transfer) Done() <-chan struct{} { } // Released returns a channel which is closed once all watchers release the -// transfer. +// transfer AND the transfer is no longer tracked by the transfer manager. func (t *transfer) Released() <-chan struct{} { - return t.hasWatchers + return t.released } // Context returns the context associated with the transfer. @@ -233,9 +248,15 @@ func (t *transfer) Context() context.Context { return t.ctx } -// Cancel cancels the context associated with the transfer. -func (t *transfer) Cancel() { - t.cancel() +// Close is called by the transfer manager when the transfer is no longer +// being tracked. +func (t *transfer) Close() { + t.mu.Lock() + t.closed = true + if len(t.watchers) == 0 { + close(t.released) + } + t.mu.Unlock() } // DoFunc is a function called by the transfer manager to actually perform @@ -280,10 +301,33 @@ func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput tm.mu.Lock() defer tm.mu.Unlock() - if xfer, present := tm.transfers[key]; present { + for { + xfer, present := tm.transfers[key] + if !present { + break + } // Transfer is already in progress. watcher := xfer.Watch(progressOutput) - return xfer, watcher + + select { + case <-xfer.Context().Done(): + // We don't want to watch a transfer that has been cancelled. + // Wait for it to be removed from the map and try again. + xfer.Release(watcher) + tm.mu.Unlock() + // The goroutine that removes this transfer from the + // map is also waiting for xfer.Done(), so yield to it. + // This could be avoided by adding a Closed method + // to Transfer to allow explicitly waiting for it to be + // removed the map, but forcing a scheduling round in + // this very rare case seems better than bloating the + // interface definition. + runtime.Gosched() + <-xfer.Done() + tm.mu.Lock() + default: + return xfer, watcher + } } start := make(chan struct{}) @@ -318,6 +362,7 @@ func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput } delete(tm.transfers, key) tm.mu.Unlock() + xfer.Close() return } } diff --git a/distribution/xfer/transfer_test.go b/distribution/xfer/transfer_test.go index 7eeb304033..8fe24661bc 100644 --- a/distribution/xfer/transfer_test.go +++ b/distribution/xfer/transfer_test.go @@ -291,6 +291,44 @@ func TestWatchRelease(t *testing.T) { } } +func TestWatchFinishedTransfer(t *testing.T) { + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + // Finish immediately + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(5) + + // Start a transfer + watchers := make([]*Watcher, 3) + var xfer Transfer + xfer, watchers[0] = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(make(chan progress.Progress))) + + // Give it a watcher immediately + watchers[1] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) + + // Wait for the transfer to complete + <-xfer.Done() + + // Set up another watcher + watchers[2] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) + + // Release the watchers + for _, w := range watchers { + xfer.Release(w) + } + + // Now that all watchers have been released, Released() should + // return a closed channel. + <-xfer.Released() +} + func TestDuplicateTransfer(t *testing.T) { ready := make(chan struct{}) diff --git a/docker/client.go b/docker/client.go index 8395805b10..3b4c4f6321 100644 --- a/docker/client.go +++ b/docker/client.go @@ -6,6 +6,7 @@ import ( "github.com/docker/docker/cli" "github.com/docker/docker/cliconfig" flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils" ) var clientFlags = &cli.ClientFlags{FlagSet: new(flag.FlagSet), Common: commonFlags} @@ -24,5 +25,9 @@ func init() { if clientFlags.Common.TrustKey == "" { clientFlags.Common.TrustKey = filepath.Join(cliconfig.ConfigDir(), defaultTrustKeyFile) } + + if clientFlags.Common.Debug { + utils.EnableDebug() + } } } diff --git a/docker/common.go b/docker/common.go index 893de7109e..6028f79da0 100644 --- a/docker/common.go +++ b/docker/common.go @@ -18,6 +18,7 @@ const ( defaultCaFile = "ca.pem" defaultKeyFile = "key.pem" defaultCertFile = "cert.pem" + tlsVerifyKey = "tlsverify" ) var ( @@ -55,21 +56,12 @@ func init() { func postParseCommon() { cmd := commonFlags.FlagSet - if commonFlags.LogLevel != "" { - lvl, err := logrus.ParseLevel(commonFlags.LogLevel) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", commonFlags.LogLevel) - os.Exit(1) - } - logrus.SetLevel(lvl) - } else { - logrus.SetLevel(logrus.InfoLevel) - } + setDaemonLogLevel(commonFlags.LogLevel) // Regardless of whether the user sets it to true or false, if they // specify --tlsverify at all then we need to turn on tls // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need to check that here as well - if cmd.IsSet("-tlsverify") || commonFlags.TLSVerify { + if cmd.IsSet("-"+tlsVerifyKey) || commonFlags.TLSVerify { commonFlags.TLS = true } @@ -93,3 +85,16 @@ func postParseCommon() { } } } + +func setDaemonLogLevel(logLevel string) { + if logLevel != "" { + lvl, err := logrus.ParseLevel(logLevel) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) + os.Exit(1) + } + logrus.SetLevel(lvl) + } else { + logrus.SetLevel(logrus.InfoLevel) + } +} diff --git a/docker/daemon.go b/docker/daemon.go index a8422122f7..0f85419215 100644 --- a/docker/daemon.go +++ b/docker/daemon.go @@ -204,9 +204,9 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error { defaultHost := opts.DefaultHost if cli.Config.TLS { tlsOptions := tlsconfig.Options{ - CAFile: cli.Config.TLSOptions.CAFile, - CertFile: cli.Config.TLSOptions.CertFile, - KeyFile: cli.Config.TLSOptions.KeyFile, + CAFile: cli.Config.CommonTLSOptions.CAFile, + CertFile: cli.Config.CommonTLSOptions.CertFile, + KeyFile: cli.Config.CommonTLSOptions.KeyFile, } if cli.Config.TLSVerify { @@ -338,12 +338,12 @@ func loadDaemonCliConfig(config *daemon.Config, daemonFlags *flag.FlagSet, commo config.LogLevel = commonConfig.LogLevel config.TLS = commonConfig.TLS config.TLSVerify = commonConfig.TLSVerify - config.TLSOptions = daemon.CommonTLSOptions{} + config.CommonTLSOptions = daemon.CommonTLSOptions{} if commonConfig.TLSOptions != nil { - config.TLSOptions.CAFile = commonConfig.TLSOptions.CAFile - config.TLSOptions.CertFile = commonConfig.TLSOptions.CertFile - config.TLSOptions.KeyFile = commonConfig.TLSOptions.KeyFile + config.CommonTLSOptions.CAFile = commonConfig.TLSOptions.CAFile + config.CommonTLSOptions.CertFile = commonConfig.TLSOptions.CertFile + config.CommonTLSOptions.KeyFile = commonConfig.TLSOptions.KeyFile } if configFile != "" { @@ -360,5 +360,14 @@ func loadDaemonCliConfig(config *daemon.Config, daemonFlags *flag.FlagSet, commo } } + // Regardless of whether the user sets it to true or false, if they + // specify TLSVerify at all then we need to turn on TLS + if config.IsValueSet(tlsVerifyKey) { + config.TLS = true + } + + // ensure that the log level is the one set after merging configurations + setDaemonLogLevel(config.LogLevel) + return config, nil } diff --git a/docker/daemon_test.go b/docker/daemon_test.go index bc519e7467..5afdfb3bde 100644 --- a/docker/daemon_test.go +++ b/docker/daemon_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/Sirupsen/logrus" "github.com/docker/docker/cli" "github.com/docker/docker/daemon" "github.com/docker/docker/opts" @@ -50,8 +51,8 @@ func TestLoadDaemonCliConfigWithTLS(t *testing.T) { if loadedConfig == nil { t.Fatalf("expected configuration %v, got nil", c) } - if loadedConfig.TLSOptions.CAFile != "/tmp/ca.pem" { - t.Fatalf("expected /tmp/ca.pem, got %s: %q", loadedConfig.TLSOptions.CAFile, loadedConfig) + if loadedConfig.CommonTLSOptions.CAFile != "/tmp/ca.pem" { + t.Fatalf("expected /tmp/ca.pem, got %s: %q", loadedConfig.CommonTLSOptions.CAFile, loadedConfig) } } @@ -89,3 +90,204 @@ func TestLoadDaemonCliConfigWithConflicts(t *testing.T) { t.Fatalf("expected labels conflict, got %v", err) } } + +func TestLoadDaemonCliConfigWithTLSVerify(t *testing.T) { + c := &daemon.Config{} + common := &cli.CommonFlags{ + TLSOptions: &tlsconfig.Options{ + CAFile: "/tmp/ca.pem", + }, + } + + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"tlsverify": true}`)) + f.Close() + + flags := mflag.NewFlagSet("test", mflag.ContinueOnError) + flags.Bool([]string{"-tlsverify"}, false, "") + loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) + if err != nil { + t.Fatal(err) + } + if loadedConfig == nil { + t.Fatalf("expected configuration %v, got nil", c) + } + + if !loadedConfig.TLS { + t.Fatalf("expected TLS enabled, got %q", loadedConfig) + } +} + +func TestLoadDaemonCliConfigWithExplicitTLSVerifyFalse(t *testing.T) { + c := &daemon.Config{} + common := &cli.CommonFlags{ + TLSOptions: &tlsconfig.Options{ + CAFile: "/tmp/ca.pem", + }, + } + + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"tlsverify": false}`)) + f.Close() + + flags := mflag.NewFlagSet("test", mflag.ContinueOnError) + flags.Bool([]string{"-tlsverify"}, false, "") + loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) + if err != nil { + t.Fatal(err) + } + if loadedConfig == nil { + t.Fatalf("expected configuration %v, got nil", c) + } + + if !loadedConfig.TLS { + t.Fatalf("expected TLS enabled, got %q", loadedConfig) + } +} + +func TestLoadDaemonCliConfigWithoutTLSVerify(t *testing.T) { + c := &daemon.Config{} + common := &cli.CommonFlags{ + TLSOptions: &tlsconfig.Options{ + CAFile: "/tmp/ca.pem", + }, + } + + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{}`)) + f.Close() + + flags := mflag.NewFlagSet("test", mflag.ContinueOnError) + loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) + if err != nil { + t.Fatal(err) + } + if loadedConfig == nil { + t.Fatalf("expected configuration %v, got nil", c) + } + + if loadedConfig.TLS { + t.Fatalf("expected TLS disabled, got %q", loadedConfig) + } +} + +func TestLoadDaemonCliConfigWithLogLevel(t *testing.T) { + c := &daemon.Config{} + common := &cli.CommonFlags{} + + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"log-level": "warn"}`)) + f.Close() + + flags := mflag.NewFlagSet("test", mflag.ContinueOnError) + flags.String([]string{"-log-level"}, "", "") + loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) + if err != nil { + t.Fatal(err) + } + if loadedConfig == nil { + t.Fatalf("expected configuration %v, got nil", c) + } + if loadedConfig.LogLevel != "warn" { + t.Fatalf("expected warn log level, got %v", loadedConfig.LogLevel) + } + + if logrus.GetLevel() != logrus.WarnLevel { + t.Fatalf("expected warn log level, got %v", logrus.GetLevel()) + } +} + +func TestLoadDaemonConfigWithEmbeddedOptions(t *testing.T) { + c := &daemon.Config{} + common := &cli.CommonFlags{} + flags := mflag.NewFlagSet("test", mflag.ContinueOnError) + flags.String([]string{"-tlscacert"}, "", "") + flags.String([]string{"-log-driver"}, "", "") + + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"tlscacert": "/etc/certs/ca.pem", "log-driver": "syslog"}`)) + f.Close() + + loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) + if err != nil { + t.Fatal(err) + } + if loadedConfig == nil { + t.Fatal("expected configuration, got nil") + } + if loadedConfig.CommonTLSOptions.CAFile != "/etc/certs/ca.pem" { + t.Fatalf("expected CA file path /etc/certs/ca.pem, got %v", loadedConfig.CommonTLSOptions.CAFile) + } + if loadedConfig.LogConfig.Type != "syslog" { + t.Fatalf("expected LogConfig type syslog, got %v", loadedConfig.LogConfig.Type) + } +} + +func TestLoadDaemonConfigWithMapOptions(t *testing.T) { + c := &daemon.Config{} + common := &cli.CommonFlags{} + flags := mflag.NewFlagSet("test", mflag.ContinueOnError) + + flags.Var(opts.NewNamedMapOpts("cluster-store-opts", c.ClusterOpts, nil), []string{"-cluster-store-opt"}, "") + flags.Var(opts.NewNamedMapOpts("log-opts", c.LogConfig.Config, nil), []string{"-log-opt"}, "") + + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{ + "cluster-store-opts": {"kv.cacertfile": "/var/lib/docker/discovery_certs/ca.pem"}, + "log-opts": {"tag": "test"} +}`)) + f.Close() + + loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) + if err != nil { + t.Fatal(err) + } + if loadedConfig == nil { + t.Fatal("expected configuration, got nil") + } + if loadedConfig.ClusterOpts == nil { + t.Fatal("expected cluster options, got nil") + } + + expectedPath := "/var/lib/docker/discovery_certs/ca.pem" + if caPath := loadedConfig.ClusterOpts["kv.cacertfile"]; caPath != expectedPath { + t.Fatalf("expected %s, got %s", expectedPath, caPath) + } + + if loadedConfig.LogConfig.Config == nil { + t.Fatal("expected log config options, got nil") + } + if tag := loadedConfig.LogConfig.Config["tag"]; tag != "test" { + t.Fatalf("expected log tag `test`, got %s", tag) + } +} diff --git a/docker/daemon_unix_test.go b/docker/daemon_unix_test.go new file mode 100644 index 0000000000..889482b007 --- /dev/null +++ b/docker/daemon_unix_test.go @@ -0,0 +1,43 @@ +// +build daemon,!windows + +package main + +import ( + "io/ioutil" + "testing" + + "github.com/docker/docker/cli" + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/mflag" +) + +func TestLoadDaemonConfigWithNetwork(t *testing.T) { + c := &daemon.Config{} + common := &cli.CommonFlags{} + flags := mflag.NewFlagSet("test", mflag.ContinueOnError) + flags.String([]string{"-bip"}, "", "") + flags.String([]string{"-ip"}, "", "") + + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"bip": "127.0.0.2", "ip": "127.0.0.1"}`)) + f.Close() + + loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) + if err != nil { + t.Fatal(err) + } + if loadedConfig == nil { + t.Fatalf("expected configuration %v, got nil", c) + } + if loadedConfig.IP != "127.0.0.2" { + t.Fatalf("expected IP 127.0.0.2, got %v", loadedConfig.IP) + } + if loadedConfig.DefaultIP.String() != "127.0.0.1" { + t.Fatalf("expected DefaultIP 127.0.0.1, got %s", loadedConfig.DefaultIP) + } +} diff --git a/docs/articles/ambassador_pattern_linking.md b/docs/admin/ambassador_pattern_linking.md similarity index 98% rename from docs/articles/ambassador_pattern_linking.md rename to docs/admin/ambassador_pattern_linking.md index 7684f37961..b5c3041129 100644 --- a/docs/articles/ambassador_pattern_linking.md +++ b/docs/admin/ambassador_pattern_linking.md @@ -1,18 +1,17 @@ # Link via an ambassador container -## Introduction - Rather than hardcoding network links between a service consumer and provider, Docker encourages service portability, for example instead of: diff --git a/docs/articles/b2d_volume_images/add_cd.png b/docs/admin/b2d_volume_images/add_cd.png similarity index 100% rename from docs/articles/b2d_volume_images/add_cd.png rename to docs/admin/b2d_volume_images/add_cd.png diff --git a/docs/articles/b2d_volume_images/add_new_controller.png b/docs/admin/b2d_volume_images/add_new_controller.png similarity index 100% rename from docs/articles/b2d_volume_images/add_new_controller.png rename to docs/admin/b2d_volume_images/add_new_controller.png diff --git a/docs/articles/b2d_volume_images/add_volume.png b/docs/admin/b2d_volume_images/add_volume.png similarity index 100% rename from docs/articles/b2d_volume_images/add_volume.png rename to docs/admin/b2d_volume_images/add_volume.png diff --git a/docs/articles/b2d_volume_images/boot_order.png b/docs/admin/b2d_volume_images/boot_order.png similarity index 100% rename from docs/articles/b2d_volume_images/boot_order.png rename to docs/admin/b2d_volume_images/boot_order.png diff --git a/docs/articles/b2d_volume_images/gparted.png b/docs/admin/b2d_volume_images/gparted.png similarity index 100% rename from docs/articles/b2d_volume_images/gparted.png rename to docs/admin/b2d_volume_images/gparted.png diff --git a/docs/articles/b2d_volume_images/gparted2.png b/docs/admin/b2d_volume_images/gparted2.png similarity index 100% rename from docs/articles/b2d_volume_images/gparted2.png rename to docs/admin/b2d_volume_images/gparted2.png diff --git a/docs/articles/b2d_volume_images/verify.png b/docs/admin/b2d_volume_images/verify.png similarity index 100% rename from docs/articles/b2d_volume_images/verify.png rename to docs/admin/b2d_volume_images/verify.png diff --git a/docs/articles/b2d_volume_resize.md b/docs/admin/b2d_volume_resize.md similarity index 100% rename from docs/articles/b2d_volume_resize.md rename to docs/admin/b2d_volume_resize.md diff --git a/docs/articles/cfengine_process_management.md b/docs/admin/cfengine_process_management.md similarity index 98% rename from docs/articles/cfengine_process_management.md rename to docs/admin/cfengine_process_management.md index b2e5737307..be62e410c2 100644 --- a/docs/articles/cfengine_process_management.md +++ b/docs/admin/cfengine_process_management.md @@ -1,10 +1,11 @@ diff --git a/docs/articles/chef.md b/docs/admin/chef.md similarity index 96% rename from docs/articles/chef.md rename to docs/admin/chef.md index 42312c35a3..ba2f680919 100644 --- a/docs/articles/chef.md +++ b/docs/admin/chef.md @@ -1,10 +1,11 @@ diff --git a/docs/articles/configuring.md b/docs/admin/configuring.md similarity index 95% rename from docs/articles/configuring.md rename to docs/admin/configuring.md index 0146218ab8..b50d0d755e 100644 --- a/docs/articles/configuring.md +++ b/docs/admin/configuring.md @@ -1,10 +1,11 @@ @@ -65,7 +66,7 @@ with explanations. As of `14.04`, Ubuntu uses Upstart as a process manager. By default, Upstart jobs are located in `/etc/init` and the `docker` Upstart job can be found at `/etc/init/docker.conf`. -After successfully [installing Docker for Ubuntu](../installation/ubuntulinux.md), +After successfully [installing Docker for Ubuntu](../installation/linux/ubuntulinux.md), you can check the running status using Upstart in this way: $ sudo status docker @@ -154,7 +155,7 @@ can be located at `/var/log/upstart/docker.log` As of `7.x`, CentOS and RHEL use `systemd` as the process manager. As of `21`, Fedora uses `systemd` as its process manager. -After successfully installing Docker for [CentOS](../installation/centos.md)/[Red Hat Enterprise Linux](../installation/rhel.md)/[Fedora](../installation/fedora.md), you can check the running status in this way: +After successfully installing Docker for [CentOS](../installation/linux/centos.md)/[Red Hat Enterprise Linux](../installation/linux/rhel.md)/[Fedora](../installation/linux/fedora.md), you can check the running status in this way: $ sudo systemctl status docker @@ -176,13 +177,13 @@ If you want Docker to start at boot, you should also: For CentOS 7.x and RHEL 7.x you can [control and configure Docker with systemd](systemd.md). -Previously, for CentOS 6.x and RHEL 6.x you would configure the `docker` daemon in -the `/etc/sysconfig/docker` file on your system. You would do this by specifying -values in a `other_args` variable. For a short time in CentOS 7.x and RHEL 7.x you -would specify values in a `OPTIONS` variable. This is no longer recommended in favor -of using systemd directly. +Previously, for CentOS 6.x and RHEL 6.x you would configure the `docker` daemon in +the `/etc/sysconfig/docker` file on your system. You would do this by specifying +values in a `other_args` variable. For a short time in CentOS 7.x and RHEL 7.x you +would specify values in a `OPTIONS` variable. This is no longer recommended in favor +of using systemd directly. -For this section, we will use CentOS 7.x as an example to configure the `docker` daemon. +For this section, we will use CentOS 7.x as an example to configure the `docker` daemon. To configure Docker options: @@ -202,8 +203,8 @@ To configure Docker options: $ sudo vi /etc/systemd/system/docker.service.d/docker.conf ``` -5. Override the `ExecStart` configuration from your `docker.service` file to customize -the `docker` daemon. To modify the `ExecStart` configuration you have to specify +5. Override the `ExecStart` configuration from your `docker.service` file to customize +the `docker` daemon. To modify the `ExecStart` configuration you have to specify an empty configuration followed by a new one as follows: ``` diff --git a/docs/articles/dsc.md b/docs/admin/dsc.md similarity index 98% rename from docs/articles/dsc.md rename to docs/admin/dsc.md index 2fe7553b78..84bd1d4f9e 100644 --- a/docs/articles/dsc.md +++ b/docs/admin/dsc.md @@ -1,10 +1,11 @@ diff --git a/docs/articles/host_integration.md b/docs/admin/host_integration.md similarity index 97% rename from docs/articles/host_integration.md rename to docs/admin/host_integration.md index c1712a0701..3f71592c46 100644 --- a/docs/articles/host_integration.md +++ b/docs/admin/host_integration.md @@ -1,10 +1,11 @@ diff --git a/docs/admin/index.md b/docs/admin/index.md new file mode 100644 index 0000000000..0f0ab7d430 --- /dev/null +++ b/docs/admin/index.md @@ -0,0 +1,11 @@ + diff --git a/docs/reference/logging/awslogs.md b/docs/admin/logging/awslogs.md similarity index 98% rename from docs/reference/logging/awslogs.md rename to docs/admin/logging/awslogs.md index 99f7db3da1..3f90c41426 100644 --- a/docs/reference/logging/awslogs.md +++ b/docs/admin/logging/awslogs.md @@ -1,5 +1,6 @@ @@ -12,7 +13,7 @@ parent = "smn_third_party" > *Note:* Please note this is a community contributed installation path. The > only `official` installation is using the -> [*Ubuntu*](../installation/ubuntulinux.md) installation +> [*Ubuntu*](../installation/linux/ubuntulinux.md) installation > path. This version may sometimes be out of date. ## Requirements diff --git a/docs/articles/registry_mirror.md b/docs/admin/registry_mirror.md similarity index 80% rename from docs/articles/registry_mirror.md rename to docs/admin/registry_mirror.md index 0d5fff5d16..2d67f9c5cd 100644 --- a/docs/articles/registry_mirror.md +++ b/docs/admin/registry_mirror.md @@ -1,10 +1,11 @@ @@ -15,4 +16,4 @@ The original content was deprecated. [An archived version](https://docs.docker.com/v1.6/articles/registry_mirror) is available in the 1.7 documentation. For information about configuring mirrors with the latest Docker Registry version, please file a support request with [the Distribution -project](https://github.com/docker/distribution/issues). +project](https://github.com/docker/distribution/issues). diff --git a/docs/articles/runmetrics.md b/docs/admin/runmetrics.md similarity index 99% rename from docs/articles/runmetrics.md rename to docs/admin/runmetrics.md index dc494c6967..b1d88100fa 100644 --- a/docs/articles/runmetrics.md +++ b/docs/admin/runmetrics.md @@ -1,10 +1,11 @@ diff --git a/docs/articles/systemd.md b/docs/admin/systemd.md similarity index 98% rename from docs/articles/systemd.md rename to docs/admin/systemd.md index 633466dc79..82afb36e9f 100644 --- a/docs/articles/systemd.md +++ b/docs/admin/systemd.md @@ -1,11 +1,11 @@ @@ -88,6 +88,7 @@ In this example, we'll assume that your `docker.service` file looks something li ExecStart=/usr/bin/docker daemon -H fd:// LimitNOFILE=1048576 LimitNPROC=1048576 + TasksMax=1048576 [Install] Also=docker.socket diff --git a/docs/articles/using_supervisord.md b/docs/admin/using_supervisord.md similarity index 98% rename from docs/articles/using_supervisord.md rename to docs/admin/using_supervisord.md index 93c288b0b3..f8a2625012 100644 --- a/docs/articles/using_supervisord.md +++ b/docs/admin/using_supervisord.md @@ -1,10 +1,11 @@ diff --git a/docs/articles/certificates.md b/docs/articles/certificates.md deleted file mode 100644 index 84235f109c..0000000000 --- a/docs/articles/certificates.md +++ /dev/null @@ -1,17 +0,0 @@ - - -# Using certificates for repository client verification - -The original content was deprecated. For information about configuring -certificates, see [deploying a registry -server](http://docs.docker.com/registry/deploying). To reach an older version -of this content, refer to an older version of the documentation. diff --git a/docs/articles/index.md b/docs/articles/index.md new file mode 100644 index 0000000000..3eac9cda29 --- /dev/null +++ b/docs/articles/index.md @@ -0,0 +1,10 @@ + diff --git a/docs/breaking_changes.md b/docs/breaking_changes.md new file mode 100644 index 0000000000..6d34b9f802 --- /dev/null +++ b/docs/breaking_changes.md @@ -0,0 +1,52 @@ + + +# Breaking changes and incompatibilities + +Every Engine release strives to be backward compatible with its predecessors. +In all cases, the policy is that feature removal is communicated two releases +in advance and documented as part of the [deprecated features](deprecated.md) +page. + +Unfortunately, Docker is a fast moving project, and newly introduced features +may sometime introduce breaking changes and/or incompatibilities. This page +documents these by Engine version. + +# Engine 1.10 + +There were two breaking changes in the 1.10 release. + +## Registry + +Registry 2.3 includes improvements to the image manifest that have caused a +breaking change. Images pushed by Engine 1.10 to a Registry 2.3 cannot be +pulled by digest by older Engine versions. A `docker pull` that encounters this +situation returns the following error: + +``` + Error response from daemon: unsupported schema version 2 for tag TAGNAME +``` + +Docker Content Trust heavily relies on pull by digest. As a result, images +pushed from the Engine 1.10 CLI to a 2.3 Registry cannot be pulled by older +Engine CLIs (< 1.10) with Docker Content Trust enabled. + +If you are using an older Registry version (< 2.3), this problem does not occur +with any version of the Engine CLI; push, pull, with and without content trust +work as you would expect. + +## Docker Content Trust + +Engine older than the current 1.10 cannot pull images from repositories that +have enabled key delegation. Key delegation is a feature which requires a +manual action to enable. diff --git a/docs/misc/deprecated.md b/docs/deprecated.md similarity index 96% rename from docs/misc/deprecated.md rename to docs/deprecated.md index d455f1349e..4ed127f953 100644 --- a/docs/misc/deprecated.md +++ b/docs/deprecated.md @@ -1,16 +1,18 @@ -# Deprecated Features +# Deprecated Engine Features -The following list of features are deprecated. +The following list of features are deprecated in Engine. ### Ambiguous event fields in API **Deprecated In Release: v1.10** diff --git a/docs/examples/apt-cacher-ng.md b/docs/examples/apt-cacher-ng.md index a8e2957c5f..600157a17a 100644 --- a/docs/examples/apt-cacher-ng.md +++ b/docs/examples/apt-cacher-ng.md @@ -4,13 +4,13 @@ title = "Dockerizing an apt-cacher-ng service" description = "Installing and running an apt-cacher-ng service" keywords = ["docker, example, package installation, networking, debian, ubuntu"] [menu.main] -parent = "smn_applied" +parent = "engine_dockerize" +++ # Dockerizing an apt-cacher-ng service -> **Note**: +> **Note**: > - **If you don't like sudo** then see [*Giving non-root > access*](../installation/binaries.md#giving-non-root-access). > - **If you're using OS X or docker via TCP** then you shouldn't use diff --git a/docs/examples/couchbase.md b/docs/examples/couchbase.md new file mode 100644 index 0000000000..0144fc8da8 --- /dev/null +++ b/docs/examples/couchbase.md @@ -0,0 +1,235 @@ + + +# Dockerizing a Couchbase service + +This example shows how to start a [Couchbase](http://couchbase.com) server using Docker Compose, configure it using its [REST API](http://developer.couchbase.com/documentation/server/4.0/rest-api/rest-endpoints-all.html), and query it. + +Couchbase is an open source, document-oriented NoSQL database for modern web, mobile, and IoT applications. It is designed for ease of development and Internet-scale performance. + +## Start Couchbase server + +Couchbase Docker images are published at [Docker Hub](https://hub.docker.com/_/couchbase/). + +Start Couchbase server as: + +``` +docker run -d --name db -p 8091-8093:8091-8093 -p 11210:11210 couchbase +``` + +The purpose of each port exposed is explained at [Couchbase Developer Portal - Network Configuration](http://developer.couchbase.com/documentation/server/4.1/install/install-ports.html). + +Logs can be seen as: + +``` +docker logs db +Starting Couchbase Server -- Web UI available at http://:8091 +``` + +> **Note**: The examples on this page assume that the Docker Host +> is reachable on `192.168.99.100`. Substitute `192.168.99.100` with +> the actual IP address of your Docker Host. If you're running +> Docker using Docker machine, you can obtain the IP address +> of the Docker host using `docker-machine ip `. + +The logs show that Couchbase console can be accessed at http://192.168.99.100:8091. The default username is `Administrator` and the password is `password`. + +## Configure Couchbase Docker container + +By default, Couchbase server needs to be configured using the console before it can be used. This can be simplified by configuring it using the REST API. + +### Configure memory for Data and Index service + +Data, Query and Index are three different services that can be configured on a Couchbase instance. Each service has different operating needs. For example, Query is CPU intensive operation and so requires a faster processor. Index is disk heavy and so requires a faster solid state drive. Data needs to be read/written fast and so requires more memory. + +Memory needs to be configured for Data and Index service only. + +``` +curl -v -X POST http://192.168.99.100:8091/pools/default -d memoryQuota=300 -d indexMemoryQuota=300 +* Hostname was NOT found in DNS cache +* Trying 192.168.99.100... +* Connected to 192.168.99.100 (192.168.99.100) port 8091 (#0) +> POST /pools/default HTTP/1.1 +> User-Agent: curl/7.37.1 +> Host: 192.168.99.100:8091 +> Accept: */* +> Content-Length: 36 +> Content-Type: application/x-www-form-urlencoded +> +* upload completely sent off: 36 out of 36 bytes +< HTTP/1.1 401 Unauthorized +< WWW-Authenticate: Basic realm="Couchbase Server Admin / REST" +* Server Couchbase Server is not blacklisted +< Server: Couchbase Server +< Pragma: no-cache +< Date: Wed, 25 Nov 2015 22:48:16 GMT +< Content-Length: 0 +< Cache-Control: no-cache +< +* Connection #0 to host 192.168.99.100 left intact +``` + +The command shows an HTTP POST request to the REST endpoint `/pools/default`. The host is the IP address of the Docker machine. The port is the exposed port of Couchbase server. The memory and index quota for the server are passed in the request. + +### Configure Data, Query, and Index services + +All three services, or only one of them, can be configured on each instance. This allows different Couchbase instances to use affinities and setup services accordingly. For example, if Docker host is running a machine with solid-state drive then only Data service can be started. + +``` +curl -v http://192.168.99.100:8091/node/controller/setupServices -d 'services=kv%2Cn1ql%2Cindex' +* Hostname was NOT found in DNS cache +* Trying 192.168.99.100... +* Connected to 192.168.99.100 (192.168.99.100) port 8091 (#0) +> POST /node/controller/setupServices HTTP/1.1 +> User-Agent: curl/7.37.1 +> Host: 192.168.99.100:8091 +> Accept: */* +> Content-Length: 26 +> Content-Type: application/x-www-form-urlencoded +> +* upload completely sent off: 26 out of 26 bytes +< HTTP/1.1 200 OK +* Server Couchbase Server is not blacklisted +< Server: Couchbase Server +< Pragma: no-cache +< Date: Wed, 25 Nov 2015 22:49:51 GMT +< Content-Length: 0 +< Cache-Control: no-cache +< +* Connection #0 to host 192.168.99.100 left intact +``` + +The command shows an HTTP POST request to the REST endpoint `/node/controller/setupServices`. The command shows that all three services are configured for the Couchbase server. The Data service is identified by `kv`, Query service is identified by `n1ql` and Index service identified by `index`. + +### Setup credentials for the Couchbase server + +Sets the username and password credentials that will subsequently be used for managing the Couchbase server. + +``` +curl -v -X POST http://192.168.99.100:8091/settings/web -d port=8091 -d username=Administrator -d password=password +* Hostname was NOT found in DNS cache +* Trying 192.168.99.100... +* Connected to 192.168.99.100 (192.168.99.100) port 8091 (#0) +> POST /settings/web HTTP/1.1 +> User-Agent: curl/7.37.1 +> Host: 192.168.99.100:8091 +> Accept: */* +> Content-Length: 50 +> Content-Type: application/x-www-form-urlencoded +> +* upload completely sent off: 50 out of 50 bytes +< HTTP/1.1 200 OK +* Server Couchbase Server is not blacklisted +< Server: Couchbase Server +< Pragma: no-cache +< Date: Wed, 25 Nov 2015 22:50:43 GMT +< Content-Type: application/json +< Content-Length: 44 +< Cache-Control: no-cache +< +* Connection #0 to host 192.168.99.100 left intact +{"newBaseUri":"http://192.168.99.100:8091/"} +``` + +The command shows an HTTP POST request to the REST endpoint `/settings/web`. The user name and password credentials are passed in the request. + +### Install sample data + +The Couchbase server can be easily load some sample data in the Couchbase instance. + +``` +curl -v -u Administrator:password -X POST http://192.168.99.100:8091/sampleBuckets/install -d '["travel-sample"]' +* Hostname was NOT found in DNS cache +* Trying 192.168.99.100... +* Connected to 192.168.99.100 (192.168.99.100) port 8091 (#0) +* Server auth using Basic with user 'Administrator' +> POST /sampleBuckets/install HTTP/1.1 +> Authorization: Basic QWRtaW5pc3RyYXRvcjpwYXNzd29yZA== +> User-Agent: curl/7.37.1 +> Host: 192.168.99.100:8091 +> Accept: */* +> Content-Length: 17 +> Content-Type: application/x-www-form-urlencoded +> +* upload completely sent off: 17 out of 17 bytes +< HTTP/1.1 202 Accepted +* Server Couchbase Server is not blacklisted +< Server: Couchbase Server +< Pragma: no-cache +< Date: Wed, 25 Nov 2015 22:51:51 GMT +< Content-Type: application/json +< Content-Length: 2 +< Cache-Control: no-cache +< +* Connection #0 to host 192.168.99.100 left intact +[] +``` + +The command shows an HTTP POST request to the REST endpoint `/sampleBuckets/install`. The name of the sample bucket is passed in the request. + +Congratulations, you are now running a Couchbase container, fully configured using the REST API. + +## Query Couchbase using CBQ + +[CBQ](http://developer.couchbase.com/documentation/server/4.1/cli/cbq-tool.html), short for Couchbase Query, is a CLI tool that allows to create, read, update, and delete JSON documents on a Couchbase server. This tool is installed as part of the Couchbase Docker image. + +Run CBQ tool: + +``` +docker run -it --link db:db couchbase cbq --engine http://db:8093 +Couchbase query shell connected to http://db:8093/ . Type Ctrl-D to exit. +cbq> +``` + +`--engine` parameter to CBQ allows to specify the Couchbase server host and port running on the Docker host. For host, typically the host name or IP address of the host where Couchbase server is running is provided. In this case, the container name used when starting the container, `db`, can be used. `8093` port listens for all incoming queries. + +Couchbase allows to query JSON documents using [N1QL](http://developer.couchbase.com/documentation/server/4.1/n1ql/n1ql-language-reference/index.html). N1QL is a comprehensive, declarative query language that brings SQL-like query capabilities to JSON documents. + +Query the database by running a N1QL query: + +``` +cbq> select * from `travel-sample` limit 1; +{ + "requestID": "97816771-3c25-4a1d-9ea8-eb6ad8a51919", + "signature": { + "*": "*" + }, + "results": [ + { + "travel-sample": { + "callsign": "MILE-AIR", + "country": "United States", + "iata": "Q5", + "icao": "MLA", + "id": 10, + "name": "40-Mile Air", + "type": "airline" + } + } + ], + "status": "success", + "metrics": { + "elapsedTime": "60.872423ms", + "executionTime": "60.792258ms", + "resultCount": 1, + "resultSize": 300 + } +} +``` + +## Couchbase Web Console + +[Couchbase Web Console](http://developer.couchbase.com/documentation/server/4.1/admin/ui-intro.html) is a console that allows to manage a Couchbase instance. It can be seen at: + +http://192.168.99.100:8091/ + +Make sure to replace the IP address with the IP address of your Docker Machine or `localhost` if Docker is running locally. + +![Couchbase Web Console](couchbase/web-console.png) diff --git a/docs/examples/couchbase/web-console.png b/docs/examples/couchbase/web-console.png new file mode 100644 index 0000000000..7823c63cf8 Binary files /dev/null and b/docs/examples/couchbase/web-console.png differ diff --git a/docs/examples/couchdb_data_volumes.md b/docs/examples/couchdb_data_volumes.md index 5493af839d..972e78a7ad 100644 --- a/docs/examples/couchdb_data_volumes.md +++ b/docs/examples/couchdb_data_volumes.md @@ -4,7 +4,7 @@ title = "Dockerizing a CouchDB service" description = "Sharing data between 2 couchdb databases" keywords = ["docker, example, package installation, networking, couchdb, data volumes"] [menu.main] -parent = "smn_applied" +parent = "engine_dockerize" +++ diff --git a/docs/examples/index.md b/docs/examples/index.md index 907d5aaede..87dd2b3fd9 100644 --- a/docs/examples/index.md +++ b/docs/examples/index.md @@ -1,16 +1,16 @@ -# Examples +# Dockerize an application This section contains the following: @@ -20,4 +20,4 @@ This section contains the following: * [Dockerizing a Node.js web app](nodejs_web_app.md) * [Dockerizing a Redis service](running_redis_service.md) * [Dockerizing an apt-cacher-ng service](apt-cacher-ng.md) -* [Dockerizing applications: A 'Hello world'](../userguide/dockerizing.md) +* [Dockerizing applications: A 'Hello world'](../userguide/containers/dockerizing.md) diff --git a/docs/examples/mongodb.md b/docs/examples/mongodb.md index 88ba444502..f6498e0cc5 100644 --- a/docs/examples/mongodb.md +++ b/docs/examples/mongodb.md @@ -4,7 +4,7 @@ title = "Dockerizing MongoDB" description = "Creating a Docker image with MongoDB pre-installed using a Dockerfile and sharing the image on Docker Hub" keywords = ["docker, dockerize, dockerizing, article, example, docker.io, platform, package, installation, networking, mongodb, containers, images, image, sharing, dockerfile, build, auto-building, framework"] [menu.main] -parent = "smn_applied" +parent = "engine_dockerize" +++ @@ -173,5 +173,5 @@ the exposed port to two different ports on the host $ mongo --port 28002 - [Linking containers](../userguide/networking/default_network/dockerlinks.md) - - [Cross-host linking containers](../articles/ambassador_pattern_linking.md) + - [Cross-host linking containers](../admin/ambassador_pattern_linking.md) - [Creating an Automated Build](https://docs.docker.com/docker-hub/builds/) diff --git a/docs/examples/nodejs_web_app.md b/docs/examples/nodejs_web_app.md index 158432a217..55425c0672 100644 --- a/docs/examples/nodejs_web_app.md +++ b/docs/examples/nodejs_web_app.md @@ -4,7 +4,7 @@ title = "Dockerizing a Node.js web app" description = "Installing and running a Node.js app with Docker" keywords = ["docker, example, package installation, node, centos"] [menu.main] -parent = "smn_applied" +parent = "engine_dockerize" +++ diff --git a/docs/examples/postgresql_service.md b/docs/examples/postgresql_service.md index 9c0938d108..8d5f675260 100644 --- a/docs/examples/postgresql_service.md +++ b/docs/examples/postgresql_service.md @@ -4,7 +4,7 @@ title = "Dockerizing PostgreSQL" description = "Running and installing a PostgreSQL service" keywords = ["docker, example, package installation, postgresql"] [menu.main] -parent = "smn_applied" +parent = "engine_dockerize" +++ diff --git a/docs/examples/running_redis_service.md b/docs/examples/running_redis_service.md index d8b673bdcd..82daaa7830 100644 --- a/docs/examples/running_redis_service.md +++ b/docs/examples/running_redis_service.md @@ -4,7 +4,7 @@ title = "Dockerizing a Redis service" description = "Installing and running an redis service" keywords = ["docker, example, package installation, networking, redis"] [menu.main] -parent = "smn_applied" +parent = "engine_dockerize" +++ diff --git a/docs/examples/running_riak_service.md b/docs/examples/running_riak_service.md index 8b18ac9342..a6c3d3f4d4 100644 --- a/docs/examples/running_riak_service.md +++ b/docs/examples/running_riak_service.md @@ -4,7 +4,7 @@ title = "Dockerizing a Riak service" description = "Build a Docker image with Riak pre-installed" keywords = ["docker, example, package installation, networking, riak"] [menu.main] -parent = "smn_apps_servs" +parent = "engine_dockerize" +++ @@ -26,7 +26,7 @@ of. We'll use [Ubuntu](https://registry.hub.docker.com/_/ubuntu/) (tag: # Riak # # VERSION 0.1.1 - + # Use the Ubuntu base image provided by dotCloud FROM ubuntu:trusty MAINTAINER Hector Castro hector@basho.com @@ -51,9 +51,9 @@ Then we install and setup a few dependencies: apt-get install -y supervisor riak=2.0.5-1 RUN mkdir -p /var/log/supervisor - + RUN locale-gen en_US en_US.UTF-8 - + COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf After that, we modify Riak's configuration: @@ -82,7 +82,7 @@ Populate it with the following program definitions: [supervisord] nodaemon=true - + [program:riak] command=bash -c "/usr/sbin/riak console" numprocs=1 diff --git a/docs/examples/running_ssh_service.md b/docs/examples/running_ssh_service.md index c22e510cf6..284a53942a 100644 --- a/docs/examples/running_ssh_service.md +++ b/docs/examples/running_ssh_service.md @@ -4,7 +4,7 @@ title = "Dockerizing an SSH service" description = "Installing and running an SSHd service on Docker" keywords = ["docker, example, package installation, networking"] [menu.main] -parent = "smn_apps_servs" +parent = "engine_dockerize" +++ diff --git a/docs/extend/authorization.md b/docs/extend/authorization.md index 48790833cf..3512c56ccd 100644 --- a/docs/extend/authorization.md +++ b/docs/extend/authorization.md @@ -4,7 +4,7 @@ title = "Access authorization plugin" description = "How to create authorization plugins to manage access control to your Docker daemon." keywords = ["security, authorization, authentication, docker, documentation, plugin, extend"] [menu.main] -parent = "mn_extend" +parent = "engine_extend" weight = -1 +++ @@ -41,8 +41,8 @@ on both the current authentication context and the command context. The authentication context contains all user details and the authentication method. The command context contains all the relevant request data. -Authorization plugins must follow the rules described in [Docker Plugin API](plugin_api.md). -Each plugin must reside within directories described under the +Authorization plugins must follow the rules described in [Docker Plugin API](plugin_api.md). +Each plugin must reside within directories described under the [Plugin discovery](plugin_api.md#plugin-discovery) section. **Note**: the abbreviations `AuthZ` and `AuthN` mean authorization and authentication @@ -90,7 +90,7 @@ configure proper authentication and security policies. ## Docker client flows -To enable and configure the authorization plugin, the plugin developer must +To enable and configure the authorization plugin, the plugin developer must support the Docker client interactions detailed in this section. ### Setting up Docker daemon @@ -132,7 +132,7 @@ docker: Error response from daemon: plugin PLUGIN_NAME failed with error: AuthZP ## API schema and implementation -In addition to Docker's standard plugin registration method, each plugin +In addition to Docker's standard plugin registration method, each plugin should implement the following two methods: * `/AuthzPlugin.AuthZReq` This authorize request method is called before the Docker daemon processes the client request. @@ -198,7 +198,7 @@ should implement the following two methods: The modified response enables the authorization plugin to manipulate the content of the HTTP response. In case of more than one plugin, each subsequent plugin -receives a response (optionally) modified by a previous plugin. +receives a response (optionally) modified by a previous plugin. ### Request authorization diff --git a/docs/extend/index.md b/docs/extend/index.md index b533ea1936..f491926e9a 100644 --- a/docs/extend/index.md +++ b/docs/extend/index.md @@ -1,19 +1,19 @@ -## Extending Docker +## Extending Docker Engine -Currently, you can extend Docker by adding a plugin. This section contains the following topics: +Currently, you can extend Docker Engine by adding a plugin. This section contains the following topics: * [Understand Docker plugins](plugins.md) * [Write a volume plugin](plugins_volume.md) diff --git a/docs/extend/plugin_api.md b/docs/extend/plugin_api.md index 8e2862f6cb..c6793a3dda 100644 --- a/docs/extend/plugin_api.md +++ b/docs/extend/plugin_api.md @@ -4,7 +4,7 @@ title = "Plugins API" description = "How to write Docker plugins extensions " keywords = ["API, Usage, plugins, documentation, developer"] [menu.main] -parent = "mn_extend" +parent = "engine_extend" weight=1 +++ diff --git a/docs/extend/plugins.md b/docs/extend/plugins.md index 09362145a5..8aa0603f78 100644 --- a/docs/extend/plugins.md +++ b/docs/extend/plugins.md @@ -1,15 +1,15 @@ -# Understand Docker plugins +# Understand Engine plugins You can extend the capabilities of the Docker Engine by loading third-party plugins. This page explains the types of plugins and provides links to several @@ -53,6 +53,10 @@ The following plugins exist: another volume plugin that provides multi-host volumes management for Docker using GlusterFS. +* The [IPFS Volume Plugin](http://github.com/vdemeester/docker-volume-ipfs) + is an open source volume plugin that allows using an + [ipfs](https://ipfs.io/) filesystem as a volume. + * The [Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) is a plugin that provides credentials and secret management using Keywhiz as a central repository. @@ -62,21 +66,28 @@ The following plugins exist: * The [OpenStorage Plugin](https://github.com/libopenstorage/openstorage) is a cluster aware volume plugin that provides volume management for file and block storage solutions. It implements a vendor neutral specification for implementing extensions such as CoS, encryption, and snapshots. It has example drivers based on FUSE, NFS, NBD and EBS to name a few. +* The [Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) connects Docker to [Quobyte](http://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform. + * The [REX-Ray plugin](https://github.com/emccode/rexray) is a volume plugin which is written in Go and provides advanced storage functionality for many platforms including VirtualBox, EC2, Google Compute Engine, OpenStack, and EMC. - + * The [Contiv Volume Plugin](https://github.com/contiv/volplugin) is an open -source volume plugin that provides multi-tenant, persistent, distributed storage -with intent based consumption using ceph underneath. + source volume plugin that provides multi-tenant, persistent, distributed storage + with intent based consumption using ceph underneath. * The [Contiv Networking](https://github.com/contiv/netplugin) is an open source -libnetwork plugin to provide infrastructure and security policies for a -multi-tenant micro services deployment, while providing an integration to -physical network for non-container workload. Contiv Networking implements the -remote driver and IPAM APIs available in Docker 1.9 onwards. + libnetwork plugin to provide infrastructure and security policies for a + multi-tenant micro services deployment, while providing an integration to + physical network for non-container workload. Contiv Networking implements the + remote driver and IPAM APIs available in Docker 1.9 onwards. -* The [Weave Network Plugin](http://docs.weave.works/weave/latest_release/plugin.html) creates a virtual network that connects your Docker containers - across multiple hosts or clouds and enables automatic discovery of applications. Weave networks are resilient, partition tolerant, secure and work in partially connected networks, and other adverse environments - all configured with delightful simplicity. +* The [Weave Network Plugin](http://docs.weave.works/weave/latest_release/plugin.html) + creates a virtual network that connects your Docker containers - + across multiple hosts or clouds and enables automatic discovery of + applications. Weave networks are resilient, partition tolerant, + secure and work in partially connected networks, and other adverse + environments - all configured with delightful simplicity. ## Troubleshooting a plugin diff --git a/docs/extend/plugins_network.md b/docs/extend/plugins_network.md index 01e8089f7e..ac07273219 100644 --- a/docs/extend/plugins_network.md +++ b/docs/extend/plugins_network.md @@ -4,19 +4,19 @@ title = "Docker network driver plugins" description = "Network driver plugins." keywords = ["Examples, Usage, plugins, docker, documentation, user guide"] [menu.main] -parent = "mn_extend" +parent = "engine_extend" +++ -# Docker network driver plugins +# Engine network driver plugins -Docker network plugins enable Docker deployments to be extended to support a -wide range of networking technologies, such as VXLAN, IPVLAN, MACVLAN or -something completely different. Network driver plugins are supported via the +Docker Engine network plugins enable Engine deployments to be extended to +support a wide range of networking technologies, such as VXLAN, IPVLAN, MACVLAN +or something completely different. Network driver plugins are supported via the LibNetwork project. Each plugin is implemented asa "remote driver" for -LibNetwork, which shares plugin infrastructure with Docker. Effectively, -network driver plugins are activated in the same way as other plugins, and use -the same kind of protocol. +LibNetwork, which shares plugin infrastructure with Engine. Effectively, network +driver plugins are activated in the same way as other plugins, and use the same +kind of protocol. ## Using network driver plugins diff --git a/docs/extend/plugins_volume.md b/docs/extend/plugins_volume.md index 1814708275..cb1bebf581 100644 --- a/docs/extend/plugins_volume.md +++ b/docs/extend/plugins_volume.md @@ -4,32 +4,32 @@ title = "Volume plugins" description = "How to manage data with external volume plugins" keywords = ["Examples, Usage, volume, docker, data, volumes, plugin, api"] [menu.main] -parent = "mn_extend" +parent = "engine_extend" +++ # Write a volume plugin -Docker volume plugins enable Docker deployments to be integrated with external -storage systems, such as Amazon EBS, and enable data volumes to persist beyond -the lifetime of a single Docker host. See the [plugin documentation](plugins.md) -for more information. +Docker Engine volume plugins enable Engine deployments to be integrated with +external storage systems, such as Amazon EBS, and enable data volumes to persist +beyond the lifetime of a single Engine host. See the [plugin +documentation](plugins.md) for more information. ## Command-line changes -A volume plugin makes use of the `-v`and `--volume-driver` flag on the `docker run` command. The `-v` flag accepts a volume name and the `--volume-driver` flag a driver type, for example: +A volume plugin makes use of the `-v`and `--volume-driver` flag on the `docker run` command. The `-v` flag accepts a volume name and the `--volume-driver` flag a driver type, for example: $ docker run -ti -v volumename:/data --volume-driver=flocker busybox sh This command passes the `volumename` through to the volume plugin as a -user-given name for the volume. The `volumename` must not begin with a `/`. +user-given name for the volume. The `volumename` must not begin with a `/`. By having the user specify a `volumename`, a plugin can associate the volume with an external volume beyond the lifetime of a single container or container host. This can be used, for example, to move a stateful container from one server to another. -By specifying a `volumedriver` in conjunction with a `volumename`, users can use plugins such as [Flocker](https://clusterhq.com/docker-plugin/) to manage volumes external to a single host, such as those on EBS. +By specifying a `volumedriver` in conjunction with a `volumename`, users can use plugins such as [Flocker](https://clusterhq.com/docker-plugin/) to manage volumes external to a single host, such as those on EBS. ## Create a VolumeDriver @@ -214,4 +214,3 @@ Get the list of volumes registered with the plugin. ``` Respond with a string error if an error occurred. - diff --git a/docs/misc/faq.md b/docs/faq.md similarity index 82% rename from docs/misc/faq.md rename to docs/faq.md index 5475cb0c19..b017557cb5 100644 --- a/docs/misc/faq.md +++ b/docs/faq.md @@ -1,11 +1,13 @@ @@ -17,9 +19,9 @@ repo](https://github.com/docker/docker) and contribute them yourself by editing the documentation sources. -### How much does Docker cost? +### How much does Engine cost? -Docker is 100% free. It is open source, so you can use it without paying. +Docker Engine is 100% free. It is open source, so you can use it without paying. ### What open source license are you using? @@ -29,14 +31,14 @@ https://github.com/docker/docker/blob/master/LICENSE) ### Does Docker run on Mac OS X or Windows? -Docker currently runs only on Linux, but you can use VirtualBox to run Docker -in a virtual machine on your box, and get the best of both worlds. Check out -the [*Mac OS X*](../installation/mac.md) and [*Microsoft -Windows*](../installation/windows.md) installation guides. The small Linux +Docker Engine currently runs only on Linux, but you can use VirtualBox to run +Engine in a virtual machine on your box, and get the best of both worlds. Check +out the [*Mac OS X*](installation/mac.md) and [*Microsoft +Windows*](installation/windows.md) installation guides. The small Linux distribution boot2docker can be set up using the Docker Machine tool to be run inside virtual machines on these two operating systems. ->**Note:** if you are using a remote Docker daemon on a VM through Docker +>**Note:** if you are using a remote Docker Engine daemon on a VM through Docker >Machine, then _do not_ type the `sudo` before the `docker` commands shown in >the documentation's examples. @@ -46,13 +48,13 @@ They are complementary. VMs are best used to allocate chunks of hardware resources. Containers operate at the process level, which makes them very lightweight and perfect as a unit of software delivery. -### What does Docker add to just plain LXC? +### What does Docker technology add to just plain LXC? -Docker is not a replacement for LXC. "LXC" refers to capabilities of the Linux -kernel (specifically namespaces and control groups) which allow sandboxing -processes from one another, and controlling their resource allocations. On top -of this low-level foundation of kernel features, Docker offers a high-level tool -with several powerful functionalities: +Docker technology is not a replacement for LXC. "LXC" refers to capabilities of +the Linux kernel (specifically namespaces and control groups) which allow +sandboxing processes from one another, and controlling their resource +allocations. On top of this low-level foundation of kernel features, Docker +offers a high-level tool with several powerful functionalities: - *Portable deployment across machines.* Docker defines a format for bundling an application and all its dependencies into a single object which can be @@ -77,7 +79,7 @@ with several powerful functionalities: - *Automatic build.* Docker includes [*a tool for developers to automatically assemble a container from their source - code*](../reference/builder.md), with full control over application + code*](reference/builder.md), with full control over application dependencies, build tools, packaging etc. They are free to use `make`, `maven`, `chef`, `puppet`, `salt,` Debian packages, RPMs, source tarballs, or any combination of the above, regardless of the configuration of the machines. @@ -90,7 +92,7 @@ with several powerful functionalities: uploads and downloads, similar to `git pull`, so new versions of a container can be transferred by only sending diffs. - - *Component re-use.* Any container can be used as a [*"base image"*](../reference/glossary.md#image) to create more specialized components. This can + - *Component re-use.* Any container can be used as a [*"base image"*](reference/glossary.md#image) to create more specialized components. This can be done manually or as part of an automated build. For example you can prepare the ideal Python environment, and use it as a base for 10 different applications. Your ideal PostgreSQL setup can be re-used for all your future @@ -135,10 +137,10 @@ thousands or even millions of containers running in parallel. ### How do I connect Docker containers? -Currently the recommended way to connect containers is via the Docker network feature. You can see details of how to [work with Docker networks here](../userguide/networking/work-with-networks.md). +Currently the recommended way to connect containers is via the Docker network feature. You can see details of how to [work with Docker networks here](userguide/networking/work-with-networks.md). Also useful for more flexible service portability is the [Ambassador linking -pattern](../articles/ambassador_pattern_linking.md). +pattern](admin/ambassador_pattern_linking.md). ### How do I run more than one process in a Docker container? @@ -147,7 +149,7 @@ http://supervisord.org/), runit, s6, or daemontools can do the trick. Docker will start up the process management daemon which will then fork to run additional processes. As long as the processor manager daemon continues to run, the container will continue to as well. You can see a more substantial example -[that uses supervisord here](../articles/using_supervisord.md). +[that uses supervisord here](admin/using_supervisord.md). ### What platforms does Docker run on? @@ -256,6 +258,26 @@ dropped. To correct this problem, change the service's configuration on your localhost so that the service accepts requests from all IPs. If you aren't sure how to do this, check the documentation for your OS. +### Why do I get `Cannot connect to the Docker daemon. Is the docker daemon running on this host?` when using docker-machine? + +This error points out that the docker client cannot connect to the virtual machine. +This means that either the virtual machine that works underneath `docker-machine` +is not running or that the client doesn't correctly point at it. + +To verify that the docker machine is running you can use the `docker-machine ls` +command and start it with `docker-machine start` if needed. + + $ docker-machine ls + NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS + default - virtualbox Stopped Unknown + + $ docker-machine start default + +You have to tell Docker to talk to that machine. You can do this with the +`docker-machine env` command. For example, + + $ eval "$(docker-machine env default)" + $ docker ps ### Where can I find more answers? @@ -269,4 +291,4 @@ You can find more answers on: - [Ask questions on Stackoverflow](http://stackoverflow.com/search?q=docker) - [Join the conversation on Twitter](http://twitter.com/docker) -Looking for something else to read? Checkout the [User Guide](../userguide/index.md). +Looking for something else to read? Checkout the [User Guide](userguide/index.md). diff --git a/docs/misc/index.md b/docs/index.md similarity index 90% rename from docs/misc/index.md rename to docs/index.md index 28b239ff97..616793cb56 100644 --- a/docs/misc/index.md +++ b/docs/index.md @@ -1,14 +1,16 @@ - -# About Docker +# About Docker Engine **Develop, Ship and Run Any Application, Anywhere** @@ -73,7 +75,7 @@ Docker consists of: ## About this guide -The [Understanding Docker section](../introduction/understanding-docker.md) will help you: +The [Understanding Docker section](understanding-docker.md) will help you: - See how Docker works at a high level - Understand the architecture of Docker @@ -83,14 +85,14 @@ The [Understanding Docker section](../introduction/understanding-docker.md) will ### Installation guides -The [installation section](../installation/index.md) will show you how to install Docker +The [installation section](installation/index.md) will show you how to install Docker on a variety of platforms. ### Docker user guide To learn about Docker in more detail and to answer questions about usage and -implementation, check out the [Docker User Guide](../userguide/index.md). +implementation, check out the [Docker User Guide](userguide/index.md). ## Release notes @@ -117,4 +119,3 @@ The complete list of deprecated features can be found on the Docker is licensed under the Apache License, Version 2.0. See [LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full license text. - diff --git a/docs/installation/binaries.md b/docs/installation/binaries.md index b5f56d0f28..4c0f85c067 100644 --- a/docs/installation/binaries.md +++ b/docs/installation/binaries.md @@ -4,7 +4,7 @@ title = "Installation from binaries" description = "Instructions for installing Docker as a binary. Mostly meant for hackers who want to try out Docker on a variety of environments." keywords = ["binaries, installation, docker, documentation, linux"] [menu.main] -parent = "smn_engine" +parent = "engine_install" weight = 110 +++ @@ -95,7 +95,7 @@ To set the file's execute bit on Linux and OS X: $ chmod +x docker To get the list of stable release version numbers from GitHub, view the -`docker/docker` [releases page](https://github.com/docker/docker/releases). +`docker/docker` [releases page](https://github.com/docker/docker/releases). > **Note** > @@ -109,14 +109,14 @@ To download the latest version for Linux, use the following URLs: https://get.docker.com/builds/Linux/i386/docker-latest - + https://get.docker.com/builds/Linux/x86_64/docker-latest To download a specific version for Linux, use the following URL patterns: https://get.docker.com/builds/Linux/i386/docker- - + https://get.docker.com/builds/Linux/x86_64/docker- For example: @@ -143,19 +143,19 @@ For example: https://get.docker.com/builds/Darwin/x86_64/docker-1.9.1 ### Get the Windows binary - + You can only download the Windows client binary for version `1.9.1` onwards. Moreover, the binary is only a client, you cannot use it to run the `docker` daemon. To download the latest version for Windows, use the following URLs: https://get.docker.com/builds/Windows/i386/docker-latest.exe - + https://get.docker.com/builds/Windows/x86_64/docker-latest.exe To download a specific version for Windows, use the following URL pattern: https://get.docker.com/builds/Windows/i386/docker-.exe - + https://get.docker.com/builds/Windows/x86_64/docker-.exe For example: @@ -184,7 +184,7 @@ starts. The `docker` daemon must always run as the root user, but if you run the `docker` client as a user in the *docker* group then you don't need to add `sudo` to all the client commands. -> **Warning**: +> **Warning**: > The *docker* group (or the group specified with `-G`) is root-equivalent; > see [*Docker Daemon Attack Surface*](../security/security.md#docker-daemon-attack-surface) details. diff --git a/docs/installation/cloud-ex-aws.md b/docs/installation/cloud/cloud-ex-aws.md similarity index 97% rename from docs/installation/cloud-ex-aws.md rename to docs/installation/cloud/cloud-ex-aws.md index 70d3fc79c2..3163865ad6 100644 --- a/docs/installation/cloud-ex-aws.md +++ b/docs/installation/cloud/cloud-ex-aws.md @@ -4,7 +4,7 @@ title = "Example: Manual install on a cloud provider" description = "Example of a manual install of Docker Engine on a cloud provider, using Amazon Web Services (AWS) EC2. Shows how to create an EC2 instance, and install Docker Engine on it." keywords = ["cloud, docker, machine, documentation, installation, AWS, EC2"] [menu.main] -parent = "smn_cloud" +parent = "install_cloud" +++ @@ -34,23 +34,23 @@ Launch an instance to create a virtual machine (VM) with a specified operating s On the AWS home page, click **EC2** to go to the dashboard, then click **Launch Instance**. - ![EC2 dashboard](images/ec2_launch_instance.png) + ![EC2 dashboard](../images/ec2_launch_instance.png) AWS EC2 virtual servers are called *instances* in Amazon parlance. Once you set up an account, IAM user and key pair, you are ready to launch an instance. It is at this point that you select the OS for the VM. 2. Choose an Amazon Machine Image (AMI) with the OS and applications you want. For this example, we select an Ubuntu server. - ![Launch Ubuntu](images/ec2-ubuntu.png) + ![Launch Ubuntu](../images/ec2-ubuntu.png) 3. Choose an instance type. - ![Choose a general purpose instance type](images/ec2_instance_type.png) + ![Choose a general purpose instance type](../images/ec2_instance_type.png) 4. Configure the instance. You can select the default network and subnet, which are inherently linked to a region and availability zone. - ![Configure the instance](images/ec2_instance_details.png) + ![Configure the instance](../images/ec2_instance_details.png) 5. Click **Review and Launch**. diff --git a/docs/installation/cloud-ex-machine-ocean.md b/docs/installation/cloud/cloud-ex-machine-ocean.md similarity index 96% rename from docs/installation/cloud-ex-machine-ocean.md rename to docs/installation/cloud/cloud-ex-machine-ocean.md index 678208208e..2164f4bf22 100644 --- a/docs/installation/cloud-ex-machine-ocean.md +++ b/docs/installation/cloud/cloud-ex-machine-ocean.md @@ -4,7 +4,7 @@ title = "Example: Use Docker Machine to provision cloud hosts" description = "Example of using Docker Machine to install Docker Engine on a cloud provider, using Digital Ocean." keywords = ["cloud, docker, machine, documentation, installation, digitalocean"] [menu.main] -parent = "smn_cloud" +parent = "install_cloud" +++ @@ -28,19 +28,19 @@ To generate your access token: 1. Go to the Digital Ocean administrator console and click **API** in the header. - ![Click API in Digital Ocean console](images/ocean_click_api.png) + ![Click API in Digital Ocean console](../images/ocean_click_api.png) 2. Click **Generate New Token** to get to the token generator. - ![Generate token](images/ocean_gen_token.png) + ![Generate token](../images/ocean_gen_token.png) 3. Give the token a clever name (e.g. "machine"), make sure the **Write (Optional)** checkbox is checked, and click **Generate Token**. - ![Name and generate token](images/ocean_token_create.png) + ![Name and generate token](../images/ocean_token_create.png) 4. Grab (copy to clipboard) the generated big long hex string and store it somewhere safe. - ![Copy and save personal access token](images/ocean_save_token.png) + ![Copy and save personal access token](../images/ocean_save_token.png) This is the personal access token you'll use in the next step to create your cloud server. @@ -133,7 +133,7 @@ To generate your access token: 2. Go to the Digital Ocean console to view the new Droplet. - ![Droplet in Digital Ocean created with Machine](images/ocean_droplet.png) + ![Droplet in Digital Ocean created with Machine](../images/ocean_droplet.png) 3. At the command terminal, run `docker-machine ls`. diff --git a/docs/installation/cloud.md b/docs/installation/cloud/cloud.md similarity index 91% rename from docs/installation/cloud.md rename to docs/installation/cloud/cloud.md index 693b9ae60a..d5ba411b9c 100644 --- a/docs/installation/cloud.md +++ b/docs/installation/cloud/cloud.md @@ -1,18 +1,10 @@ diff --git a/docs/installation/cloud/index.md b/docs/installation/cloud/index.md new file mode 100644 index 0000000000..96589c4608 --- /dev/null +++ b/docs/installation/cloud/index.md @@ -0,0 +1,25 @@ + + +# Install Engine in the cloud + +* [Understand cloud install options and choose one](cloud.md) +* [Example: Use Docker Machine to provision cloud hosts](cloud-ex-machine-ocean.md) +* [Example: Manual install on a cloud provider](cloud-ex-aws.md) diff --git a/docs/installation/index.md b/docs/installation/index.md index 383d9d4236..54d85c107c 100644 --- a/docs/installation/index.md +++ b/docs/installation/index.md @@ -1,8 +1,12 @@ @@ -11,24 +15,24 @@ keywords = ["Docker install "] Docker Engine is supported on Linux, Cloud, Windows, and OS X. Installation instructions are available for the following: ## On Linux -* [Arch Linux](archlinux.md) -* [CentOS](centos.md) -* [CRUX Linux](cruxlinux.md) -* [Debian](debian.md) -* [Fedora](fedora.md) -* [FrugalWare](frugalware.md) -* [Gentoo](gentoolinux.md) -* [Oracle Linux](oracle.md) -* [Red Hat Enterprise Linux](rhel.md) -* [openSUSE and SUSE Linux Enterprise](SUSE.md) -* [Ubuntu](ubuntulinux.md) +* [Arch Linux](linux/archlinux.md) +* [CentOS](linux/centos.md) +* [CRUX Linux](linux/cruxlinux.md) +* [Debian](linux/debian.md) +* [Fedora](linux/fedora.md) +* [FrugalWare](linux/frugalware.md) +* [Gentoo](linux/gentoolinux.md) +* [Oracle Linux](linux/oracle.md) +* [Red Hat Enterprise Linux](linux/rhel.md) +* [openSUSE and SUSE Linux Enterprise](linux/SUSE.md) +* [Ubuntu](linux/ubuntulinux.md) If your linux distribution is not listed above, don't give up yet. To try out Docker on a distribution that is not listed above, go here: [Installation from binaries](binaries.md). ## On Cloud -* [Choose how to Install](cloud.md) -* [Example: Manual install on a cloud provider](cloud-ex-aws.md) -* [Example: Use Docker Machine to provision cloud hosts](cloud-ex-machine-ocean.md) +* [Choose how to Install](cloud/cloud.md) +* [Example: Manual install on a cloud provider](cloud/cloud-ex-aws.md) +* [Example: Use Docker Machine to provision cloud hosts](cloud/cloud-ex-machine-ocean.md) ## On OSX and Windows * [Mac OS X](mac.md) @@ -38,7 +42,7 @@ If your linux distribution is not listed above, don't give up yet. To try out Do Instructions for installing prior releases of Docker can be found in the following docker archives: [Docker v1.7](http://docs.docker.com/v1.7/), [Docker v1.6](http://docs.docker.com/v1.6/), [Docker v1.5](http://docs.docker.com/v1.5/), and [Docker v1.4](http://docs.docker.com/v1.4/). -## Where to go After Installing -* [About Docker](../misc/index.md) +## Where to go after installing +* [About Docker Engine](../index.md) * [Support](https://www.docker.com/support/) * [Training](https://training.docker.com//) diff --git a/docs/installation/SUSE.md b/docs/installation/linux/SUSE.md similarity index 95% rename from docs/installation/SUSE.md rename to docs/installation/linux/SUSE.md index cc5cde0761..797a329e9a 100644 --- a/docs/installation/SUSE.md +++ b/docs/installation/linux/SUSE.md @@ -1,10 +1,11 @@ @@ -91,7 +92,7 @@ flag is set to `yes` like so: If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read the systemd article to -learn how to [customize your systemd Docker daemon options](../articles/systemd.md). +learn how to [customize your systemd Docker daemon options](../../admin/systemd.md). ## Uninstallation @@ -113,4 +114,4 @@ You can find more details about Docker on openSUSE or SUSE Linux Enterprise in t [Docker quick start guide](https://www.suse.com/documentation/sles-12/dockerquick/data/dockerquick.html) on the SUSE website. The document targets SUSE Linux Enterprise, but its contents apply also to openSUSE. -Continue to the [User Guide](../userguide/index.md). +Continue to the [User Guide](../../userguide/index.md). diff --git a/docs/installation/archlinux.md b/docs/installation/linux/archlinux.md similarity index 94% rename from docs/installation/archlinux.md rename to docs/installation/linux/archlinux.md index 9172f9066b..b62b21c674 100644 --- a/docs/installation/archlinux.md +++ b/docs/installation/linux/archlinux.md @@ -1,10 +1,11 @@ @@ -63,7 +64,7 @@ To start on system boot: If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read our systemd article to -learn how to [customize your systemd Docker daemon options](../articles/systemd.md). +learn how to [customize your systemd Docker daemon options](../../admin/systemd.md). ## Running Docker with a manually-defined network diff --git a/docs/installation/centos.md b/docs/installation/linux/centos.md similarity index 96% rename from docs/installation/centos.md rename to docs/installation/linux/centos.md index 84a9b791ab..d0569f61bb 100644 --- a/docs/installation/centos.md +++ b/docs/installation/linux/centos.md @@ -1,10 +1,11 @@ @@ -134,7 +135,7 @@ makes the ownership of the Unix socket read/writable by the `docker` group. >**Warning**: The `docker` group is equivalent to the `root` user; For details >on how this impacts security in your system, see [*Docker Daemon Attack ->Surface*](../security/security.md#docker-daemon-attack-surface) for details. +>Surface*](../../security/security.md#docker-daemon-attack-surface) for details. To create the `docker` group and add your user: @@ -160,7 +161,7 @@ To ensure Docker starts when you boot your system, do the following: If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read our Systemd article to -learn how to [customize your Systemd Docker daemon options](../articles/systemd.md). +learn how to [customize your Systemd Docker daemon options](../../admin/systemd.md). ## Uninstall diff --git a/docs/installation/cruxlinux.md b/docs/installation/linux/cruxlinux.md similarity index 97% rename from docs/installation/cruxlinux.md rename to docs/installation/linux/cruxlinux.md index f70fb9770a..583f3f45ba 100644 --- a/docs/installation/cruxlinux.md +++ b/docs/installation/linux/cruxlinux.md @@ -1,10 +1,11 @@ diff --git a/docs/installation/debian.md b/docs/installation/linux/debian.md similarity index 95% rename from docs/installation/debian.md rename to docs/installation/linux/debian.md index 2621507b76..c4adab8f26 100644 --- a/docs/installation/debian.md +++ b/docs/installation/linux/debian.md @@ -1,10 +1,11 @@ @@ -138,7 +139,7 @@ use the `-G` flag to specify an alternative group. > **Warning**: > The `docker` group (or the group specified with the `-G` flag) is -> `root`-equivalent; see [*Docker Daemon Attack Surface*](../security/security.md#docker-daemon-attack-surface) details. +> `root`-equivalent; see [*Docker Daemon Attack Surface*](../../security/security.md#docker-daemon-attack-surface) details. **Example:** @@ -180,4 +181,4 @@ You must delete the user created configuration files manually. ## What next? -Continue with the [User Guide](../userguide/index.md). +Continue with the [User Guide](../../userguide/index.md). diff --git a/docs/installation/fedora.md b/docs/installation/linux/fedora.md similarity index 96% rename from docs/installation/fedora.md rename to docs/installation/linux/fedora.md index b45a5de048..3fd46e9a5a 100644 --- a/docs/installation/fedora.md +++ b/docs/installation/linux/fedora.md @@ -1,10 +1,11 @@ @@ -128,7 +129,7 @@ makes the ownership of the Unix socket read/writable by the `docker` group. >**Warning**: The `docker` group is equivalent to the `root` user; For details >on how this impacts security in your system, see [*Docker Daemon Attack ->Surface*](../security/security.md#docker-daemon-attack-surface) for details. +>Surface*](../../security/security.md#docker-daemon-attack-surface) for details. To create the `docker` group and add your user: @@ -154,7 +155,7 @@ To ensure Docker starts when you boot your system, do the following: If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read our Systemd article to -learn how to [customize your Systemd Docker daemon options](../articles/systemd.md). +learn how to [customize your Systemd Docker daemon options](../../admin/systemd.md). ## Running Docker with a manually-defined network diff --git a/docs/installation/frugalware.md b/docs/installation/linux/frugalware.md similarity index 91% rename from docs/installation/frugalware.md rename to docs/installation/linux/frugalware.md index b93267b0ff..40368302c0 100644 --- a/docs/installation/frugalware.md +++ b/docs/installation/linux/frugalware.md @@ -1,10 +1,11 @@ @@ -53,7 +54,7 @@ To start on system boot: If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read our systemd article to -learn how to [customize your systemd Docker daemon options](../articles/systemd.md). +learn how to [customize your systemd Docker daemon options](../../admin/systemd.md). ## Uninstallation diff --git a/docs/installation/gentoolinux.md b/docs/installation/linux/gentoolinux.md similarity index 92% rename from docs/installation/gentoolinux.md rename to docs/installation/linux/gentoolinux.md index 3295eef166..dac0497465 100644 --- a/docs/installation/gentoolinux.md +++ b/docs/installation/linux/gentoolinux.md @@ -1,10 +1,11 @@ @@ -19,9 +20,9 @@ The first and recommended way if you are looking for a stable experience is to use the official `app-emulation/docker` package directly from the tree. -If any issues arise from this ebuild including, missing kernel -configuration flags or dependencies, open a bug -on the Gentoo [Bugzilla](https://bugs.gentoo.org) assigned to `docker AT gentoo DOT org` +If any issues arise from this ebuild including, missing kernel +configuration flags or dependencies, open a bug +on the Gentoo [Bugzilla](https://bugs.gentoo.org) assigned to `docker AT gentoo DOT org` or join and ask in the official [IRC](http://webchat.freenode.net?channels=%23gentoo-containers&uio=d4) channel on the Freenode network. @@ -34,9 +35,9 @@ up-to-date documentation for properly installing and using the overlay can be found in the [overlay](https://github.com/tianon/docker-overlay/blob/master/README.md#using-this-overlay). If any issues arise from this ebuild or the resulting binary, including -and especially missing kernel configuration flags or dependencies, -open an [issue](https://github.com/tianon/docker-overlay/issues) on -the `docker-overlay` repository or ping `tianon` directly in the `#docker` +and especially missing kernel configuration flags or dependencies, +open an [issue](https://github.com/tianon/docker-overlay/issues) on +the `docker-overlay` repository or ping `tianon` directly in the `#docker` IRC channel on the Freenode network. ## Installation @@ -61,7 +62,7 @@ prompt for all necessary kernel options. $ sudo emerge -av app-emulation/docker ->Note: Sometimes there is a disparity between the latest versions +>Note: Sometimes there is a disparity between the latest versions >in the official **Gentoo tree** and the **docker-overlay**. >Please be patient, and the latest version should propagate shortly. @@ -72,11 +73,11 @@ modules and configuration (and optionally for device-mapper and AUFS or Btrfs, depending on the storage driver you've decided to use). To use Docker, the `docker` daemon must be running as **root**. -To use Docker as a **non-root** user, add yourself to the **docker** +To use Docker as a **non-root** user, add yourself to the **docker** group by running the following command: $ sudo usermod -a -G docker user - + ### OpenRC To start the `docker` daemon: @@ -96,10 +97,10 @@ To start the `docker` daemon: To start on system boot: $ sudo systemctl enable docker - + If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read our systemd article to -learn how to [customize your systemd Docker daemon options](../articles/systemd.md). +learn how to [customize your systemd Docker daemon options](../../admin/systemd.md). ## Uninstallation diff --git a/docs/installation/linux/index.md b/docs/installation/linux/index.md new file mode 100644 index 0000000000..2fb81ae2ed --- /dev/null +++ b/docs/installation/linux/index.md @@ -0,0 +1,29 @@ + + +# Install Docker Engine on Linux + +Docker Engine is supported on several Linux distributions. Installation instructions are available for the following: + +* [Arch Linux](archlinux.md) +* [CentOS](centos.md) +* [CRUX Linux](cruxlinux.md) +* [Debian](debian.md) +* [Fedora](fedora.md) +* [FrugalWare](frugalware.md) +* [Gentoo](gentoolinux.md) +* [Oracle Linux](oracle.md) +* [Red Hat Enterprise Linux](rhel.md) +* [openSUSE and SUSE Linux Enterprise](SUSE.md) +* [Ubuntu](ubuntulinux.md) + +If your linux distribution is not listed above, don't give up yet. To try out Docker on a distribution that is not listed above, go here: [Installation from binaries](../binaries.md). diff --git a/docs/installation/oracle.md b/docs/installation/linux/oracle.md similarity index 85% rename from docs/installation/oracle.md rename to docs/installation/linux/oracle.md index 56c96aa902..a154346494 100644 --- a/docs/installation/oracle.md +++ b/docs/installation/linux/oracle.md @@ -1,10 +1,11 @@ @@ -13,23 +14,30 @@ parent = "smn_linux" Docker is supported Oracle Linux 6 and 7. You do not require an Oracle Linux Support subscription to install Docker on Oracle Linux. -This page instructs you to install using Docker-managed release packages and -installation mechanisms. Using these packages ensures you get the latest release -of Docker. If you wish to install using Oracle-managed packages, consult your -[Oracle Linux documentation](https://oracle.com/linux). - - ## Prerequisites Due to current Docker limitations, Docker is only able to run only on the x86_64 architecture. Docker requires the use of the Unbreakable Enterprise Kernel -Release 3 (3.8.13) or higher on Oracle Linux. This kernel supports the Docker +Release 4 (4.1.12) or higher on Oracle Linux. This kernel supports the Docker btrfs storage engine on both Oracle Linux 6 and 7. - - ## Install + +> **Note**: The procedure below installs binaries built by Docker. These binaries +> are not covered by Oracle Linux support. To ensure Oracle Linux support, please +> follow the installation instructions provided in the +> [Oracle Linux documentation](https://docs.oracle.com/en/operating-systems/?tab=2). +> +> The installation instructions for Oracle Linux 6 can be found in [Chapter 10 of +> the Administrator's +> Solutions Guide](https://docs.oracle.com/cd/E37670_01/E37355/html/ol_docker.html) +> +> The installation instructions for Oracle Linux 7 can be found in [Chapter 29 of +> the Administrator's +> Guide](https://docs.oracle.com/cd/E52668_01/E54669/html/ol7-docker.html) + + 1. Log into your machine as a user with `sudo` or `root` privileges. 2. Make sure your existing yum packages are up-to-date. @@ -99,7 +107,7 @@ makes the ownership of the Unix socket read/writable by the `docker` group. >**Warning**: The `docker` group is equivalent to the `root` user; For details >on how this impacts security in your system, see [*Docker Daemon Attack ->Surface*](../security/security.md#docker-daemon-attack-surface) for details. +>Surface*](../../security/security.md#docker-daemon-attack-surface) for details. To create the `docker` group and add your user: @@ -142,7 +150,7 @@ $ sudo systemctl enable docker.service If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read our systemd article to -learn how to [customize your systemd Docker daemon options](../articles/systemd.md). +learn how to [customize your systemd Docker daemon options](../../admin/systemd.md). ### Use the btrfs storage engine diff --git a/docs/installation/rhel.md b/docs/installation/linux/rhel.md similarity index 96% rename from docs/installation/rhel.md rename to docs/installation/linux/rhel.md index b550a3790d..d35d09a1b3 100644 --- a/docs/installation/rhel.md +++ b/docs/installation/linux/rhel.md @@ -1,10 +1,11 @@ @@ -126,7 +127,7 @@ makes the ownership of the Unix socket read/writable by the `docker` group. >**Warning**: The `docker` group is equivalent to the `root` user; For details >on how this impacts security in your system, see [*Docker Daemon Attack ->Surface*](../security/security.md#docker-daemon-attack-surface) for details. +>Surface*](../../security/security.md#docker-daemon-attack-surface) for details. To create the `docker` group and add your user: @@ -152,7 +153,7 @@ To ensure Docker starts when you boot your system, do the following: If you need to add an HTTP Proxy, set a different directory or partition for the Docker runtime files, or make other customizations, read our Systemd article to -learn how to [customize your Systemd Docker daemon options](../articles/systemd.md). +learn how to [customize your Systemd Docker daemon options](../../admin/systemd.md). ## Uninstall diff --git a/docs/installation/ubuntulinux.md b/docs/installation/linux/ubuntulinux.md similarity index 97% rename from docs/installation/ubuntulinux.md rename to docs/installation/linux/ubuntulinux.md index b888e3310f..1e01366059 100644 --- a/docs/installation/ubuntulinux.md +++ b/docs/installation/linux/ubuntulinux.md @@ -1,10 +1,11 @@ @@ -230,7 +231,7 @@ makes the ownership of the Unix socket read/writable by the `docker` group. >**Warning**: The `docker` group is equivalent to the `root` user; For details >on how this impacts security in your system, see [*Docker Daemon Attack ->Surface*](../security/security.md#docker-daemon-attack-surface) for details. +>Surface*](../../security/security.md#docker-daemon-attack-surface) for details. To create the `docker` group and add your user: @@ -353,7 +354,7 @@ containers. Or, you can disable `dnsmasq` in NetworkManager. Though, disabling The instructions below describe how to configure the Docker daemon running on Ubuntu 14.10 or below. Ubuntu 15.04 and above use `systemd` as the boot and service manager. Refer to [control and configure Docker -with systemd](../articles/systemd.md#custom-docker-daemon-options) to +with systemd](../../admin/systemd.md#custom-docker-daemon-options) to configure a daemon controlled by `systemd`. To specify a DNS server for use by Docker: @@ -393,7 +394,7 @@ NetworkManager (this might slow your network). $ sudo nano /etc/NetworkManager/NetworkManager.conf -2. Comment out the `dns=dsnmasq` line: +2. Comment out the `dns=dnsmasq` line: dns=dnsmasq @@ -420,7 +421,7 @@ to start the docker daemon on boot To install the latest version of Docker with `apt-get`: - $ apt-get upgrade docker-engine + $ sudo apt-get upgrade docker-engine ## Uninstallation diff --git a/docs/installation/mac.md b/docs/installation/mac.md index 9ae6604c96..1df35ff791 100644 --- a/docs/installation/mac.md +++ b/docs/installation/mac.md @@ -4,7 +4,8 @@ title = "Installation on Mac OS X" description = "Instructions for installing Docker on OS X using boot2docker." keywords = ["Docker, Docker documentation, requirements, boot2docker, VirtualBox, SSH, Linux, OSX, OS X, Mac"] [menu.main] -parent = "smn_engine" +parent = "engine_install" +weight="-90" +++ diff --git a/docs/installation/windows.md b/docs/installation/windows.md index 59c1f93dd0..915d468faa 100644 --- a/docs/installation/windows.md +++ b/docs/installation/windows.md @@ -4,7 +4,8 @@ title = "Installation on Windows" description = "Docker installation on Microsoft Windows" keywords = ["Docker, Docker documentation, Windows, requirements, virtualbox, boot2docker"] [menu.main] -parent = "smn_engine" +parent = "engine_install" +weight="-80" +++ @@ -31,7 +32,7 @@ run the Docker daemon. The VirtualBox VM runs completely from RAM, is a small ## Requirements -To run Docker, your machine must have a 64-bit operating system running Windows 7 or higher. Additionally, you must make sure that virtualization is enabled on your machine. +To run Docker, your machine must have a 64-bit operating system running Windows 7 or higher. Additionally, you must make sure that virtualization is enabled on your machine. To verify your machine meets these requirements, do the following: 1. Right click the Windows Start Menu and choose **System**. @@ -62,10 +63,10 @@ and virtualization support is enabled in BIOS and recognized by Windows. 3. Verify your Windows OS is 64-bit (x64) - How you do this verification depends on your Windows version. For details, see the Windows - article [How to determine whether a computer is running a 32-bit version or 64-bit version + How you do this verification depends on your Windows version. For details, see the Windows + article [How to determine whether a computer is running a 32-bit version or 64-bit version of the Windows operating system](https://support.microsoft.com/en-us/kb/827218). - + > **Note**: If you have Docker hosts running and you don't wish to do a Docker Toolbox installation, you can install the `docker.exe` using the *unofficial* Windows package manager Chocolately. For information on how to do this, see [Docker package on diff --git a/docs/migration.md b/docs/migration.md new file mode 100644 index 0000000000..f3e8da9815 --- /dev/null +++ b/docs/migration.md @@ -0,0 +1,84 @@ + + +# Migrate to Engine 1.10 + +Starting from version 1.10 of Docker Engine, we completely change the way image +data is addressed on disk. Previously, every image and layer used a randomly +assigned UUID. In 1.10 we implemented a content addressable method using an ID, +based on a secure hash of the image and layer data. + +The new method gives users more security, provides a built-in way to avoid ID +collisions and guarantee data integrity after pull, push, load, or save. It also +brings better sharing of layers by allowing many images to freely share their +layers even if they didn’t come from the same build. + +Addressing images by their content also lets us more easily detect if something +has already been downloaded. Because we have separated images and layers, you +don’t have to pull the configurations for every image that was part of the +original build chain. We also don’t need to create layers for the build +instructions that didn’t modify the filesystem. + +Content addressability is the foundation for the new distribution features. The +image pull and push code has been reworked to use a download/upload manager +concept that makes pushing and pulling images much more stable and mitigate any +parallel request issues. The download manager also brings retries on failed +downloads and better prioritization for concurrent downloads. + +We are also introducing a new manifest format that is built on top of the +content addressable base. It directly references the content addressable image +configuration and layer checksums. The new manifest format also makes it +possible for a manifest list to be used for targeting multiple +architectures/platforms. Moving to the new manifest format will be completely +transparent. + +## Preparing for upgrade + +To make your current images accessible to the new model we have to migrate them +to content addressable storage. This means calculating the secure checksums for +your current data. + +All your current images, tags and containers are automatically migrated to the +new foundation the first time you start Docker Engine 1.10. Before loading your +container, the daemon will calculate all needed checksums for your current data, +and after it has completed, all your images and tags will have brand new secure +IDs. + +**While this is simple operation, calculating SHA256 checksums for your files +can take time if you have lots of image data.** On average you should assume +that migrator can process data at a speed of 100MB/s. During this time your +Docker daemon won’t be ready to respond to requests. + +## Minimizing migration time + +If you can accept this one time hit, then upgrading Docker Engine and restarting +the daemon will transparently migrate your images. However, if you want to +minimize the daemon’s downtime, a migration utility can be run while your old +daemon is still running. + +This tool will find all your current images and calculate the checksums for +them. After you upgrade and restart the daemon, the checksum data of the +migrated images will already exist, freeing the daemon from that computation +work. If new images appeared between the migration and the upgrade, those will +be processed at time of upgrade to 1.10. + +[You can download the migration tool +here.](https://github.com/docker/v1.10-migrator/releases) + +The migration tool can also be run as a Docker image. While running the migrator +image you need to expose your Docker data directory to the container. If you use +the default path then you would run: + + $ docker run --rm -v /var/lib/docker:/var/lib/docker docker/v1.10-migrator + +If you use the +devicemapper storage driver, you also need to pass the flag `--privileged` to +give the tool access to your storage devices. diff --git a/docs/misc/release-notes.md b/docs/misc/release-notes.md deleted file mode 100644 index b4d1dc1050..0000000000 --- a/docs/misc/release-notes.md +++ /dev/null @@ -1,161 +0,0 @@ - - -# Deprecated Features - -To see the complete list of deprecated features please see the -[Deprecated Features](deprecated.md) page. - -# Removed Features - -The following features have been removed in this release: - -* None! - -# Release notes version 1.6.0 -(2015-04-16) - -You can view release notes for earlier version of Docker by selecting the -desired version from the drop-down list at the top right of this page. For the -formal release announcement, see [the Docker -blog](https://blog.docker.com/2015/04/docker-release-1-6/). - - - -## Docker Engine 1.6.0 features - -For a complete list of engine patches, fixes, and other improvements, see the -[merge PR on GitHub](https://github.com/docker/docker/pull/11635). You'll also -find [a changelog in the project -repository](https://github.com/docker/docker/blob/master/CHANGELOG.md). - -## Docker Engine 1.6.0 features - -For a complete list of engine patches, fixes, and other improvements, see the -[merge PR on GitHub](https://github.com/docker/docker/pull/11635). You'll also -find [a changelog in the project -repository](https://github.com/docker/docker/blob/master/CHANGELOG.md). - - -| Feature | Description | -|------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Container and Image Labels | Labels allow you to attach user-defined metadata to containers and images that can be used by your tools. For additional information on using labels, see [Apply custom metadata](https://docs.docker.com/userguide/labels-custom-metadata.md#add-labels-to-images-the-label-instruction) in the documentation. | -| Windows Client preview | The Windows Client can be used just like the Mac OS X client is today with a remote host. Our testing infrastructure was scaled out to accommodate Windows Client testing on every PR to the Engine. See the Azure blog for [details on using this new client](http://azure.microsoft.com/blog/2015/04/16/docker-client-for-windows-is-now-available). | -| Logging drivers | The new logging driver follows the exec driver and storage driver concepts already available in Engine today. There is a new option `--log-driver` to `docker run` command. See the `run` reference for a [description on how to use this option](../reference/run.md#logging-drivers-log-driver). | -| Image digests | When you pull, build, or run images, you specify them in the form `namespace/repository:tag`, or even just `repository`. In this release, you are now able to pull, run, build and refer to images by a new content addressable identifier called a “digest” with the syntax `namespace/repo@digest`. See the the command line reference for [examples of using the digest](../reference/commandline/cli.md#listing-image-digests). | -| Custom cgroups | Containers are made from a combination of namespaces, capabilities, and cgroups. Docker already supports custom namespaces and capabilities. Additionally, in this release we’ve added support for custom cgroups. Using the `--cgroup-parent` flag, you can pass a specific `cgroup` to run a container in. See [the command line reference for more information](../reference/commandline/cli.md#create). | -| Ulimits | You can now specify the default `ulimit` settings for all containers when configuring the daemon. For example:`docker daemon --default-ulimit nproc=1024:2048` See [Default Ulimits](../reference/commandline/cli.md#default-ulimits) in this documentation. | -| Commit and import Dockerfile | You can now make changes to images on the fly without having to re-build the entire image. The feature `commit --change` and `import --change` allows you to apply standard changes to a new image. These are expressed in the Dockerfile syntax and used to modify the image. For details on how to use these, see the [commit](../reference/commandline/cli.md#commit) and [import](../reference/commandline/cli.md#import). | - -### Known issues in Engine - -This section lists significant known issues present in Docker as of release date. -For an exhaustive list of issues, see [the issues list on the project -repository](https://github.com/docker/docker/issues/). - -* *Unexpected File Permissions in Containers* -An idiosyncrasy in AUFS prevented permissions from propagating predictably -between upper and lower layers. This caused issues with accessing private -keys, database instances, etc. This issue was closed in this release: -[GitHub Issue 783](https://github.com/docker/docker/issues/783). - - -* *Docker Hub incompatible with Safari 8* -Docker Hub had multiple issues displaying on Safari 8, the default browser for -OS X 10.10 (Yosemite). Most notably, changes in the way Safari handled cookies -means that the user was repeatedly logged out. -Recently, Safari fixed the bug that was causing all the issues. If you upgrade -to Safari 8.0.5 which was just released last week and see if that fixes your -issues. You might have to flush your cookies if it doesn't work right away. -For more information, see the [Docker forum -post](https://forums.docker.com/t/new-safari-in-yosemite-issue/300). - -## Docker Registry 2.0 features - -This release includes Registry 2.0. The Docker Registry is a central server for -pushing and pulling images. In this release, it was completely rewritten in Go -around a new set of distribution APIs - -- **Webhook notifications**: You can now configure the Registry to send Webhooks -when images are pushed. Spin off a CI build, send a notification to IRC – -whatever you want! Included in the documentation is a detailed [notification -specification](https://docs.docker.com/registry/notifications/). - -- **Native TLS support**: This release makes it easier to secure a registry with -TLS. This documentation includes [expanded examples of secure -deployments](https://docs.docker.com/registry/deploying/). - -- **New Distribution APIs**: This release includes an expanded set of new -distribution APIs. You can read the [detailed specification -here](https://docs.docker.com/registry/spec/api/). - - -## Docker Compose 1.2 - -For a complete list of compose patches, fixes, and other improvements, see the -[changelog in the project -repository](https://github.com/docker/compose/blob/1.2.0/CHANGES.md). The -project also makes a [set of release -notes](https://github.com/docker/compose/releases/tag/1.2.0) on the project. - -- **extends**: You can use `extends` to share configuration between services -with the keyword “extends”. With extends, you can refer to a service defined -elsewhere and include its configuration in a locally-defined service, while also -adding or overriding configuration as necessary. The documentation describes -[how to use extends in your -configuration](https://docs.docker.com/compose/extends/#extending-services-in- -compose). - -- **Relative directory handling may cause breaking change**: Compose now treats -directories passed to build, filenames passed to `env_file` and volume host -paths passed to volumes as relative to the configuration file's directory. -Previously, they were treated as relative to the directory where you were -running `docker-compose`. In the majority of cases, the location of the -configuration file and where you ran `docker-compose` were the same directory. -Now, you can use the `-f|--file` argument to specify a configuration file in -another directory. - - -## Docker Swarm 0.2 - -You'll find the [release for download on -GitHub](https://github.com/docker/swarm/releases/tag/v0.2.0) and [the -documentation here](https://docs.docker.com/swarm/). This release includes the -following features: - -- **Spread strategy**: A new strategy for scheduling containers on your cluster -which evenly spreads them over available nodes. -- **More Docker commands supported**: More progress has been made towards -supporting the complete Docker API, such as pulling and inspecting images. -- **Clustering drivers**: There are not any third-party drivers yet, but the -first steps have been made towards making a pluggable driver interface that will -make it possible to use Swarm with clustering systems such as Mesos. - - -## Docker Machine 0.2 Pre-release - -You'll find the [release for download on -GitHub](https://github.com/docker/machine/releases) and [the documentation -here](https://docs.docker.com/machine/). For a complete list of machine changes -see [the changelog in the project -repository](https://github.com/docker/machine/blob/master/CHANGES.md#020-2015-03 --22). - -- **Cleaner driver interface**: It is now much easier to write drivers for providers. -- **More reliable and consistent provisioning**: Provisioning servers is now -handled centrally by Machine instead of letting each driver individually do it. -- **Regenerate TLS certificates**: A new command has been added to regenerate a -host’s TLS certificates for good security practice and for if a host’s IP -address changes. - -## Docker Hub Enterprise & Commercially Supported Docker Engine - -See the [DHE and CS Docker Engine release notes](https://docs.docker.com/docker-hub-enterprise/release-notes.md). diff --git a/docs/misc/search.md b/docs/misc/search.md deleted file mode 100644 index 277ba26ec6..0000000000 --- a/docs/misc/search.md +++ /dev/null @@ -1,16 +0,0 @@ - - -# Search - -*Please activate JavaScript to enable the search functionality.* - -## How To Search - -From here you can search these documents. Enter your search words into -the box below and click "search". Note that the search function will -automatically search for all of the words. Pages containing fewer words -won't appear in the result list. diff --git a/docs/userguide/basics.md b/docs/quickstart.md similarity index 88% rename from docs/userguide/basics.md rename to docs/quickstart.md index 863ece5d20..d8a93227f2 100644 --- a/docs/userguide/basics.md +++ b/docs/quickstart.md @@ -1,16 +1,18 @@ -# Quickstart containers +# Quickstart Docker Engine -This quickstart assumes you have a working installation of Docker. To verify Docker is installed, use the following command: +This quickstart assumes you have a working installation of Docker Engine. To verify Engine is installed, use the following command: # Check that you have a working install $ docker info @@ -18,17 +20,17 @@ This quickstart assumes you have a working installation of Docker. To verify Doc If you get `docker: command not found` or something like `/var/lib/docker/repositories: permission denied` you may have an incomplete Docker installation or insufficient privileges to access -Docker on your machine. With the default installation of Docker `docker` +Engine on your machine. With the default installation of Engine `docker` commands need to be run by a user that is in the `docker` group or by the `root` user. -Depending on your Docker system configuration, you may be required +Depending on your Engine system configuration, you may be required to preface each `docker` command with `sudo`. One way to avoid having to use `sudo` with the `docker` commands is to create a Unix group called `docker` and add users that will be entering `docker` commands to the 'docker' group. -For more information about installing Docker or `sudo` configuration, refer to -the [installation](../installation/index.md) instructions for your operating system. +For more information about installing Docker Engine or `sudo` configuration, refer to +the [installation](installation/index.md) instructions for your operating system. ## Download a pre-built image @@ -37,7 +39,7 @@ the [installation](../installation/index.md) instructions for your operating sys $ docker pull ubuntu This will find the `ubuntu` image by name on -[*Docker Hub*](../userguide/dockerrepos.md#searching-for-images) +[*Docker Hub*](userguide/containers/dockerrepos.md#searching-for-images) and download it from [Docker Hub](https://hub.docker.com) to a local image cache. @@ -194,6 +196,6 @@ You now have an image state from which you can create new instances. ## Where to go next -* Work your way through the [Docker User Guide](../userguide/index.md) -* Read more about [*Share Images via Repositories*](../userguide/dockerrepos.md) -* Review [*Command Line*](../reference/commandline/cli.md) +* Work your way through the [Docker User Guide](userguide/index.md) +* Read more about [*Share Images via Repositories*](userguide/containers/dockerrepos.md) +* Review [*Command Line*](reference/commandline/cli.md) diff --git a/docs/reference/api/docker-io_api.md b/docs/reference/api/docker-io_api.md index 1bbeea4906..5e3c684484 100644 --- a/docs/reference/api/docker-io_api.md +++ b/docs/reference/api/docker-io_api.md @@ -1,10 +1,11 @@ @@ -13,4 +14,3 @@ weight = 99 This API is deprecated as of 1.7. To view the old version, see the [Docker Hub API](https://docs.docker.com/v1.7/docker/reference/api/docker-io_api/) in the 1.7 documentation. - diff --git a/docs/reference/api/docker_io_accounts_api.md b/docs/reference/api/docker_io_accounts_api.md index fd9b2787ef..dfee194b19 100644 --- a/docs/reference/api/docker_io_accounts_api.md +++ b/docs/reference/api/docker_io_accounts_api.md @@ -4,7 +4,8 @@ title = "docker.io accounts API" description = "API Documentation for docker.io accounts." keywords = ["API, Docker, accounts, REST, documentation"] [menu.main] -parent = "mn_reference" +parent = "engine_remoteapi" +weight=90 +++ diff --git a/docs/reference/api/docker_remote_api.md b/docs/reference/api/docker_remote_api.md index a7d8fdb240..a4b6e7208f 100644 --- a/docs/reference/api/docker_remote_api.md +++ b/docs/reference/api/docker_remote_api.md @@ -4,8 +4,8 @@ title = "Remote API" description = "API Documentation for Docker" keywords = ["API, Docker, rcli, REST, documentation"] [menu.main] -parent = "smn_remoteapi" -weight=-3 +parent = "engine_remoteapi" +weight=-99 +++ @@ -63,12 +63,20 @@ without protocol. Throughout this structure, double quotes are required. ## Using Docker Machine with the API -If you are using `docker-machine`, the Docker daemon is on a virtual host that uses an encrypted TCP socket. This means, for Docker Machine users, you need to add extra parameters to `curl` or `wget` when making test API requests, for example: +If you are using `docker-machine`, the Docker daemon is on a host that +uses an encrypted TCP socket using TLS. This means, for Docker Machine users, +you need to add extra parameters to `curl` or `wget` when making test +API requests, for example: ``` -curl --insecure --cert ~/.docker/cert.pem --key ~/.docker/key.pem https://YOUR_VM_IP:2376/images/json +curl --insecure \ + --cert $DOCKER_CERT_PATH/cert.pem \ + --key $DOCKER_CERT_PATH/key.pem \ + https://YOUR_VM_IP:2376/images/json -wget --no-check-certificate --certificate=$DOCKER_CERT_PATH/cert.pem --private-key=$DOCKER_CERT_PATH/key.pem https://your_vm_ip:2376/images/json -O - -q +wget --no-check-certificate --certificate=$DOCKER_CERT_PATH/cert.pem \ + --private-key=$DOCKER_CERT_PATH/key.pem \ + https://YOUR_VM_IP:2376/images/json -O - -q ``` ## Docker Events @@ -108,7 +116,7 @@ This section lists each version from latest to oldest. Each listing includes a * Pushes initiated with `POST /images/(name)/push` and pulls initiated with `POST /images/create` will be cancelled if the HTTP connection making the API request is closed before the push or pull completes. -* `POST /containers/create` now allows you to set a read/write rate limit for a +* `POST /containers/create` now allows you to set a read/write rate limit for a device (in bytes per second or IO per second). * `GET /networks` now supports filtering by `name`, `id` and `type`. * `POST /containers/create` now allows you to set the static IPv4 and/or IPv6 address for the container. @@ -117,11 +125,13 @@ This section lists each version from latest to oldest. Each listing includes a * `POST /networks/create` now supports restricting external access to the network by setting the `internal` field. * `POST /networks/(id)/disconnect` now includes a `Force` option to forcefully disconnect a container from network * `GET /containers/(id)/json` now returns the `NetworkID` of containers. -* `POST /networks/create` Now supports an options field in the IPAM config that provides options +* `POST /networks/create` Now supports an options field in the IPAM config that provides options for custom IPAM plugins. * `GET /networks/{network-id}` Now returns IPAM config options for custom IPAM plugins if any are available. * `GET /networks/` now returns subnets info for user-defined networks. +* `GET /info` can now return a `SystemStatus` field useful for returning additional information about applications + that are built on top of engine. ### v1.21 API changes diff --git a/docs/reference/api/docker_remote_api_v1.14.md b/docs/reference/api/docker_remote_api_v1.14.md index 0183dbb8f1..3b8c9030b0 100644 --- a/docs/reference/api/docker_remote_api_v1.14.md +++ b/docs/reference/api/docker_remote_api_v1.14.md @@ -4,7 +4,7 @@ title = "Remote API v1.14" description = "API Documentation for Docker" keywords = ["API, Docker, rcli, REST, documentation"] [menu.main] -parent = "smn_remoteapi" +parent = "engine_remoteapi" weight = 7 +++ @@ -15,7 +15,7 @@ weight = 7 - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can - [Bind Docker to another host/port or a Unix socket](../../userguide/basics.md#bind-docker-to-another-host-port-or-a-unix-socket). + [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `STDOUT`, `STDIN` and `STDERR`. diff --git a/docs/reference/api/docker_remote_api_v1.15.md b/docs/reference/api/docker_remote_api_v1.15.md index eb0a01ec64..d7c7abd076 100644 --- a/docs/reference/api/docker_remote_api_v1.15.md +++ b/docs/reference/api/docker_remote_api_v1.15.md @@ -4,7 +4,7 @@ title = "Remote API v1.15" description = "API Documentation for Docker" keywords = ["API, Docker, rcli, REST, documentation"] [menu.main] -parent = "smn_remoteapi" +parent = "engine_remoteapi" weight = 6 +++ @@ -15,7 +15,7 @@ weight = 6 - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can - [Bind Docker to another host/port or a Unix socket](../../userguide/basics.md#bind-docker-to-another-host-port-or-a-unix-socket). + [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `STDOUT`, `STDIN` and `STDERR`. @@ -189,7 +189,7 @@ Json Parameters: for the container. - **User** - A string value containing the user to use inside the container. - **Memory** - Memory limit in bytes. -- **MemorySwap**- Total memory usage (memory + swap); set `-1` to disable swap. +- **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. - **CpuShares** - An integer value containing the CPU Shares for container (ie. the relative weight vs other containers). **CpuSet** - String value containing the cgroups Cpuset to use. diff --git a/docs/reference/api/docker_remote_api_v1.16.md b/docs/reference/api/docker_remote_api_v1.16.md index a985806d79..df94a7320a 100644 --- a/docs/reference/api/docker_remote_api_v1.16.md +++ b/docs/reference/api/docker_remote_api_v1.16.md @@ -4,7 +4,7 @@ title = "Remote API v1.16" description = "API Documentation for Docker" keywords = ["API, Docker, rcli, REST, documentation"] [menu.main] -parent = "smn_remoteapi" +parent = "engine_remoteapi" weight = 5 +++ @@ -15,7 +15,7 @@ weight = 5 - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can - [Bind Docker to another host/port or a Unix socket](../../userguide/basics.md#bind-docker-to-another-host-port-or-a-unix-socket). + [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `STDOUT`, `STDIN` and `STDERR`. @@ -189,7 +189,7 @@ Json Parameters: for the container. - **User** - A string value containing the user to use inside the container. - **Memory** - Memory limit in bytes. -- **MemorySwap**- Total memory usage (memory + swap); set `-1` to disable swap. +- **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. - **CpuShares** - An integer value containing the CPU Shares for container (ie. the relative weight vs other containers). **CpuSet** - String value containing the cgroups Cpuset to use. diff --git a/docs/reference/api/docker_remote_api_v1.17.md b/docs/reference/api/docker_remote_api_v1.17.md index 0656597bda..6f2cfe5f03 100644 --- a/docs/reference/api/docker_remote_api_v1.17.md +++ b/docs/reference/api/docker_remote_api_v1.17.md @@ -4,7 +4,7 @@ title = "Remote API v1.17" description = "API Documentation for Docker" keywords = ["API, Docker, rcli, REST, documentation"] [menu.main] -parent = "smn_remoteapi" +parent = "engine_remoteapi" weight = 4 +++ @@ -15,7 +15,7 @@ weight = 4 - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can - [Bind Docker to another host/port or a Unix socket](../../userguide/basics.md#bind-docker-to-another-host-port-or-a-unix-socket). + [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `STDOUT`, `STDIN` and `STDERR`. @@ -190,8 +190,8 @@ Json Parameters: for the container. - **User** - A string value containing the user to use inside the container. - **Memory** - Memory limit in bytes. -- **MemorySwap**- Total memory limit (memory + swap); set `-1` to disable swap, - always use this with `memory`, and make the value larger than `memory`. +- **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. - **CpuShares** - An integer value containing the CPU Shares for container (ie. the relative weight vs other containers). **CpuSet** - String value containing the cgroups Cpuset to use. diff --git a/docs/reference/api/docker_remote_api_v1.18.md b/docs/reference/api/docker_remote_api_v1.18.md index b9b6cc1453..f117a471a6 100644 --- a/docs/reference/api/docker_remote_api_v1.18.md +++ b/docs/reference/api/docker_remote_api_v1.18.md @@ -4,7 +4,7 @@ title = "Remote API v1.18" description = "API Documentation for Docker" keywords = ["API, Docker, rcli, REST, documentation"] [menu.main] -parent = "smn_remoteapi" +parent = "engine_remoteapi" weight = 3 +++ @@ -15,7 +15,7 @@ weight = 3 - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can - [Bind Docker to another host/port or a Unix socket](../../userguide/basics.md#bind-docker-to-another-host-port-or-a-unix-socket). + [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `STDOUT`, `STDIN` and `STDERR`. @@ -207,8 +207,8 @@ Json Parameters: for the container. - **User** - A string value containing the user to use inside the container. - **Memory** - Memory limit in bytes. -- **MemorySwap**- Total memory limit (memory + swap); set `-1` to disable swap, - always use this with `memory`, and make the value larger than `memory`. +- **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. - **CpuShares** - An integer value containing the CPU Shares for container (ie. the relative weight vs other containers). - **Cpuset** - The same as CpusetCpus, but deprecated, please don't use. @@ -1207,7 +1207,7 @@ Query Parameters: - **rm** - remove intermediate containers after a successful build (default behavior) - **forcerm** - always remove intermediate containers (includes rm) - **memory** - set memory limit for build -- **memswap** - Total memory (memory + swap), `-1` to disable swap +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. - **cpushares** - CPU shares (relative weight) - **cpusetcpus** - CPUs in which to allow execution, e.g., `0-3`, `0,1` diff --git a/docs/reference/api/docker_remote_api_v1.19.md b/docs/reference/api/docker_remote_api_v1.19.md index 71196dca63..196d9f75c6 100644 --- a/docs/reference/api/docker_remote_api_v1.19.md +++ b/docs/reference/api/docker_remote_api_v1.19.md @@ -4,7 +4,7 @@ title = "Remote API v1.19" description = "API Documentation for Docker" keywords = ["API, Docker, rcli, REST, documentation"] [menu.main] -parent = "smn_remoteapi" +parent = "engine_remoteapi" weight = 2 +++ @@ -15,7 +15,7 @@ weight = 2 - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can - [Bind Docker to another host/port or a Unix socket](../../userguide/basics.md#bind-docker-to-another-host-port-or-a-unix-socket). + [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST. However, for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout`, `stdin` and `stderr`. @@ -214,7 +214,7 @@ Json Parameters: for the container. - **User** - A string value specifying the user inside the container. - **Memory** - Memory limit in bytes. -- **MemorySwap**- Total memory limit (memory + swap); set `-1` to disable swap +- **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. You must use this with `memory` and make the swap value larger than `memory`. - **CpuShares** - An integer value containing the container's CPU Shares (ie. the relative weight vs other containers). @@ -1237,7 +1237,7 @@ Query Parameters: - **rm** - Remove intermediate containers after a successful build (default behavior). - **forcerm** - Always remove intermediate containers (includes `rm`). - **memory** - Set memory limit for build. -- **memswap** - Total memory (memory + swap), `-1` to disable swap. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. - **cpushares** - CPU shares (relative weight). - **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). - **cpuperiod** - The length of a CPU period in microseconds. diff --git a/docs/reference/api/docker_remote_api_v1.20.md b/docs/reference/api/docker_remote_api_v1.20.md index 6eb9c4f973..f8ab5823d1 100644 --- a/docs/reference/api/docker_remote_api_v1.20.md +++ b/docs/reference/api/docker_remote_api_v1.20.md @@ -4,7 +4,7 @@ title = "Remote API v1.20" description = "API Documentation for Docker" keywords = ["API, Docker, rcli, REST, documentation"] [menu.main] -parent="smn_remoteapi" +parent="engine_remoteapi" weight = 1 +++ @@ -15,7 +15,7 @@ weight = 1 - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can - [Bind Docker to another host/port or a Unix socket](../../userguide/basics.md#bind-docker-to-another-host-port-or-a-unix-socket). + [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST. However, for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout`, `stdin` and `stderr`. @@ -221,7 +221,7 @@ Json Parameters: for the container. - **User** - A string value specifying the user inside the container. - **Memory** - Memory limit in bytes. -- **MemorySwap** - Total memory limit (memory + swap); set `-1` to disable swap +- **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. You must use this with `memory` and make the swap value larger than `memory`. - **CpuShares** - An integer value containing the container's CPU Shares (ie. the relative weight vs other containers). @@ -1362,7 +1362,7 @@ Query Parameters: - **rm** - Remove intermediate containers after a successful build (default behavior). - **forcerm** - Always remove intermediate containers (includes `rm`). - **memory** - Set memory limit for build. -- **memswap** - Total memory (memory + swap), `-1` to disable swap. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. - **cpushares** - CPU shares (relative weight). - **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). - **cpuperiod** - The length of a CPU period in microseconds. diff --git a/docs/reference/api/docker_remote_api_v1.21.md b/docs/reference/api/docker_remote_api_v1.21.md index 9e18a17276..a1bcd62585 100644 --- a/docs/reference/api/docker_remote_api_v1.21.md +++ b/docs/reference/api/docker_remote_api_v1.21.md @@ -4,7 +4,7 @@ title = "Remote API v1.21" description = "API Documentation for Docker" keywords = ["API, Docker, rcli, REST, documentation"] [menu.main] -parent="smn_remoteapi" +parent="engine_remoteapi" weight=-2 +++ @@ -15,7 +15,7 @@ weight=-2 - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can - [Bind Docker to another host/port or a Unix socket](../../userguide/basics.md#bind-docker-to-another-host-port-or-a-unix-socket). + [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST. However, for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout`, `stdin` and `stderr`. @@ -230,7 +230,7 @@ Json Parameters: for the container. - **User** - A string value specifying the user inside the container. - **Memory** - Memory limit in bytes. -- **MemorySwap** - Total memory limit (memory + swap); set `-1` to disable swap +- **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. You must use this with `memory` and make the swap value larger than `memory`. - **MemoryReservation** - Memory soft limit in bytes. - **KernelMemory** - Kernel memory limit in bytes. @@ -1445,7 +1445,7 @@ Query Parameters: - **rm** - Remove intermediate containers after a successful build (default behavior). - **forcerm** - Always remove intermediate containers (includes `rm`). - **memory** - Set memory limit for build. -- **memswap** - Total memory (memory + swap), `-1` to disable swap. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. - **cpushares** - CPU shares (relative weight). - **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). - **cpuperiod** - The length of a CPU period in microseconds. diff --git a/docs/reference/api/docker_remote_api_v1.22.md b/docs/reference/api/docker_remote_api_v1.22.md index d523b95449..22edf39c20 100644 --- a/docs/reference/api/docker_remote_api_v1.22.md +++ b/docs/reference/api/docker_remote_api_v1.22.md @@ -4,7 +4,7 @@ title = "Remote API v1.22" description = "API Documentation for Docker" keywords = ["API, Docker, rcli, REST, documentation"] [menu.main] -parent="smn_remoteapi" +parent="engine_remoteapi" weight=-3 +++ @@ -15,7 +15,7 @@ weight=-3 - The Remote API has replaced `rcli`. - The daemon listens on `unix:///var/run/docker.sock` but you can - [Bind Docker to another host/port or a Unix socket](../../userguide/basics.md#bind-docker-to-another-host-port-or-a-unix-socket). + [Bind Docker to another host/port or a Unix socket](../../quickstart.md#bind-docker-to-another-host-port-or-a-unix-socket). - The API tends to be REST. However, for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout`, `stdin` and `stderr`. @@ -303,7 +303,7 @@ Json Parameters: for the container. - **User** - A string value specifying the user inside the container. - **Memory** - Memory limit in bytes. -- **MemorySwap** - Total memory limit (memory + swap); set `-1` to disable swap +- **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. You must use this with `memory` and make the swap value larger than `memory`. - **MemoryReservation** - Memory soft limit in bytes. - **KernelMemory** - Kernel memory limit in bytes. @@ -1034,20 +1034,16 @@ Update resource configs of one or more containers. Content-Type: application/json { - "UpdateConfig": { - "Resources": { - "BlkioWeight": 300, - "CpuShares": 512, - "CpuPeriod": 100000, - "CpuQuota": 50000, - "CpusetCpus": "0,1", - "CpusetMems": "0", - "Memory": 314572800, - "MemorySwap": 514288000, - "MemoryReservation": 209715200, - "KernelMemory": 52428800, - } - } + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800, } **Example response**: @@ -1595,7 +1591,7 @@ Query Parameters: - **rm** - Remove intermediate containers after a successful build (default behavior). - **forcerm** - Always remove intermediate containers (includes `rm`). - **memory** - Set memory limit for build. -- **memswap** - Total memory (memory + swap), `-1` to disable swap. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. - **cpushares** - CPU shares (relative weight). - **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). - **cpuperiod** - The length of a CPU period in microseconds. @@ -2102,6 +2098,7 @@ Display system-wide information "DockerRootDir": "/var/lib/docker", "Driver": "btrfs", "DriverStatus": [[""]], + "SystemStatus": [["State", "Healthy"]], "Plugins": { "Volume": [ "local" @@ -2321,7 +2318,7 @@ Docker networks report the following events: [ { "action": "pull", - "type": "image", + "type": "image", "actor": { "id": "busybox:latest", "attributes": {} @@ -2921,7 +2918,7 @@ Content-Type: application/json Query Parameters: -- **filters** - JSON encoded network list filter. The filter value is one of: +- **filters** - JSON encoded network list filter. The filter value is one of: - `name=` Matches all or part of a network name. - `id=` Matches all or part of a network id. - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. diff --git a/docs/reference/api/hub_registry_spec.md b/docs/reference/api/hub_registry_spec.md index 581a01457d..f2517c2386 100644 --- a/docs/reference/api/hub_registry_spec.md +++ b/docs/reference/api/hub_registry_spec.md @@ -1,5 +1,6 @@ + + +# API Reference + +* [Docker Remote API](docker_remote_api.md) +* [Docker Remote API client libraries](remote_api_client_libraries.md) diff --git a/docs/reference/api/registry_api.md b/docs/reference/api/registry_api.md deleted file mode 100644 index 9da7cae683..0000000000 --- a/docs/reference/api/registry_api.md +++ /dev/null @@ -1,598 +0,0 @@ - - -# Docker Registry API v1 - -## Introduction - - - This is the REST API for the Docker Registry 1.0 - - It stores the images and the graph for a set of repositories - - It does not have user accounts data - - It has no notion of user accounts or authorization - - It delegates authentication and authorization to the Index Auth - service using tokens - - It supports different storage backends (S3, cloud files, local FS) - - It doesn't have a local database - - The registry is open source: [Docker Registry](https://github.com/docker/docker-registry) - - We expect that there will be multiple registries out there. To help to -grasp the context, here are some examples of registries: - - - **sponsor registry**: such a registry is provided by a third-party - hosting infrastructure as a convenience for their customers and the - Docker community as a whole. Its costs are supported by the third - party, but the management and operation of the registry are - supported by Docker. It features read/write access, and delegates - authentication and authorization to the Index. - - **mirror registry**: such a registry is provided by a third-party - hosting infrastructure but is targeted at their customers only. Some - mechanism (unspecified to date) ensures that public images are - pulled from a sponsor registry to the mirror registry, to make sure - that the customers of the third-party provider can `docker pull` - those images locally. - - **vendor registry**: such a registry is provided by a software - vendor, who wants to distribute Docker images. It would be operated - and managed by the vendor. Only users authorized by the vendor would - be able to get write access. Some images would be public (accessible - for anyone), others private (accessible only for authorized users). - Authentication and authorization would be delegated to the Index. - The goal of vendor registries is to let someone do `docker pull - basho/riak1.3` and automatically push from the vendor registry - (instead of a sponsor registry); i.e., get all the convenience of a - sponsor registry, while retaining control on the asset distribution. - - **private registry**: such a registry is located behind a firewall, - or protected by an additional security layer (HTTP authorization, - SSL client-side certificates, IP address authorization...). The - registry is operated by a private entity, outside of Docker's - control. It can optionally delegate additional authorization to the - Index, but it is not mandatory. - -> **Note**: -> Mirror registries and private registries which do not use the Index -> don't even need to run the registry code. They can be implemented by any -> kind of transport implementing HTTP GET and PUT. Read-only registries -> can be powered by a simple static HTTPS server. - -> **Note**: -> The latter implies that while HTTP is the protocol of choice for a registry, -> multiple schemes are possible (and in some cases, trivial): -> -> - HTTP with GET (and PUT for read-write registries); -> - local mount point; -> - remote Docker addressed through SSH. - -The latter would only require two new commands in Docker, e.g., -`registryget` and `registryput`, wrapping access to the local filesystem -(and optionally doing consistency checks). Authentication and authorization -are then delegated to SSH (e.g., with public keys). - -> **Note**: -> Private registry servers that expose an HTTP endpoint need to be secured with -> TLS (preferably TLSv1.2, but at least TLSv1.0). Make sure to put the CA -> certificate at /etc/docker/certs.d/my.registry.com:5000/ca.crt on the Docker -> host, so that the daemon can securely access the private registry. -> Support for SSLv3 and lower is not available due to security issues. - -The default namespace for a private repository is `library`. - -# Endpoints - -## Images - -### Get image layer - -`GET /v1/images/(image_id)/layer` - -Get image layer for a given `image_id` - -**Example Request**: - - GET /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/layer HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - Authorization: Token signature=123abc,repository="foo/bar",access=read - -Parameters: - -- **image_id** – the id for the layer you want to get - -**Example Response**: - - HTTP/1.1 200 - Vary: Accept - X-Docker-Registry-Version: 0.6.0 - Cookie: (Cookie provided by the Registry) - - {layer binary data stream} - -Status Codes: - -- **200** – OK -- **401** – Requires authorization -- **404** – Image not found - -### Put image layer - -`PUT /v1/images/(image_id)/layer` - -Put image layer for a given `image_id` - -**Example Request**: - - PUT /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/layer HTTP/1.1 - Host: registry-1.docker.io - Transfer-Encoding: chunked - Authorization: Token signature=123abc,repository="foo/bar",access=write - - {layer binary data stream} - -Parameters: - -- **image_id** – the id for the layer you want to get - -**Example Response**: - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - "" - -Status Codes: - -- **200** – OK -- **401** – Requires authorization -- **404** – Image not found - -## Image - -### Put image layer - -`PUT /v1/images/(image_id)/json` - -Put image for a given `image_id` - -**Example Request**: - - PUT /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/json HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - Cookie: (Cookie provided by the Registry) - - { - id: "088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c", - parent: "aeee6396d62273d180a49c96c62e45438d87c7da4a5cf5d2be6bee4e21bc226f", - created: "2013-04-30T17:46:10.843673+03:00", - container: "8305672a76cc5e3d168f97221106ced35a76ec7ddbb03209b0f0d96bf74f6ef7", - container_config: { - Hostname: "host-test", - User: "", - Memory: 0, - MemorySwap: 0, - AttachStdin: false, - AttachStdout: false, - AttachStderr: false, - Tty: false, - OpenStdin: false, - StdinOnce: false, - Env: null, - Cmd: [ - "/bin/bash", - "-c", - "apt-get -q -yy -f install libevent-dev" - ], - Dns: null, - Image: "imagename/blah", - Volumes: { }, - VolumesFrom: "" - }, - docker_version: "0.1.7" - } - -Parameters: - -- **image_id** – the id for the layer you want to get - -**Example Response**: - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - "" - -Status Codes: - -- **200** – OK -- **401** – Requires authorization - -### Get image layer - -`GET /v1/images/(image_id)/json` - -Get image for a given `image_id` - -**Example Request**: - - GET /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/json HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - Cookie: (Cookie provided by the Registry) - -Parameters: - -- **image_id** – the id for the layer you want to get - -**Example Response**: - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - X-Docker-Size: 456789 - X-Docker-Checksum: b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087 - - { - id: "088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c", - parent: "aeee6396d62273d180a49c96c62e45438d87c7da4a5cf5d2be6bee4e21bc226f", - created: "2013-04-30T17:46:10.843673+03:00", - container: "8305672a76cc5e3d168f97221106ced35a76ec7ddbb03209b0f0d96bf74f6ef7", - container_config: { - Hostname: "host-test", - User: "", - Memory: 0, - MemorySwap: 0, - AttachStdin: false, - AttachStdout: false, - AttachStderr: false, - Tty: false, - OpenStdin: false, - StdinOnce: false, - Env: null, - Cmd: [ - "/bin/bash", - "-c", - "apt-get -q -yy -f install libevent-dev" - ], - Dns: null, - Image: "imagename/blah", - Volumes: { }, - VolumesFrom: "" - }, - docker_version: "0.1.7" - } - -Status Codes: - -- **200** – OK -- **401** – Requires authorization -- **404** – Image not found - -## Ancestry - -### Get image ancestry - -`GET /v1/images/(image_id)/ancestry` - -Get ancestry for an image given an `image_id` - -**Example Request**: - - GET /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/ancestry HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - Cookie: (Cookie provided by the Registry) - -Parameters: - -- **image_id** – the id for the layer you want to get - -**Example Response**: - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - ["088b4502f51920fbd9b7c503e87c7a2c05aa3adc3d35e79c031fa126b403200f", - "aeee63968d87c7da4a5cf5d2be6bee4e21bc226fd62273d180a49c96c62e4543", - "bfa4c5326bc764280b0863b46a4b20d940bc1897ef9c1dfec060604bdc383280", - "6ab5893c6927c15a15665191f2c6cf751f5056d8b95ceee32e43c5e8a3648544"] - -Status Codes: - -- **200** – OK -- **401** – Requires authorization -- **404** – Image not found - -## Tags - -### List repository tags - -`GET /v1/repositories/(namespace)/(repository)/tags` - -Get all of the tags for the given repo. - -**Example Request**: - - GET /v1/repositories/reynholm/help-system-server/tags HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - Cookie: (Cookie provided by the Registry) - -Parameters: - -- **namespace** – namespace for the repo -- **repository** – name for the repo - -**Example Response**: - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - { - "latest": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f", - "0.1.1": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087" - } - -Status Codes: - -- **200** – OK -- **401** – Requires authorization -- **404** – Repository not found - -### Get image id for a particular tag - -`GET /v1/repositories/(namespace)/(repository)/tags/(tag*)` - -Get a tag for the given repo. - -**Example Request**: - - GET /v1/repositories/reynholm/help-system-server/tags/latest HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - Cookie: (Cookie provided by the Registry) - -Parameters: - -- **namespace** – namespace for the repo -- **repository** – name for the repo -- **tag** – name of tag you want to get - -**Example Response**: - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f" - -Status Codes: - -- **200** – OK -- **401** – Requires authorization -- **404** – Tag not found - -### Delete a repository tag - -`DELETE /v1/repositories/(namespace)/(repository)/tags/(tag*)` - -Delete the tag for the repo - -**Example Request**: - - DELETE /v1/repositories/reynholm/help-system-server/tags/latest HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - Cookie: (Cookie provided by the Registry) - -Parameters: - -- **namespace** – namespace for the repo -- **repository** – name for the repo -- **tag** – name of tag you want to delete - -**Example Response**: - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - "" - -Status Codes: - -- **200** – OK -- **401** – Requires authorization -- **404** – Tag not found - -### Set a tag for a specified image id - -`PUT /v1/repositories/(namespace)/(repository)/tags/(tag*)` - -Put a tag for the given repo. - -**Example Request**: - - PUT /v1/repositories/reynholm/help-system-server/tags/latest HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - Cookie: (Cookie provided by the Registry) - - "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f" - -Parameters: - -- **namespace** – namespace for the repo -- **repository** – name for the repo -- **tag** – name of tag you want to add - -**Example Response**: - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - "" - -Status Codes: - -- **200** – OK -- **400** – Invalid data -- **401** – Requires authorization -- **404** – Image not found - -## Repositories - -### Delete a repository - -`DELETE /v1/repositories/(namespace)/(repository)/` - -Delete a repository - -**Example Request**: - - DELETE /v1/repositories/reynholm/help-system-server/ HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - Cookie: (Cookie provided by the Registry) - - "" - -Parameters: - -- **namespace** – namespace for the repo -- **repository** – name for the repo - -**Example Response**: - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - "" - -Status Codes: - -- **200** – OK -- **401** – Requires authorization -- **404** – Repository not found - -## Search - -If you need to search the index, this is the endpoint you would use. - -`GET /v1/search` - -Search the Index given a search term. It accepts - - [GET](http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.3) - only. - -**Example request**: - - GET /v1/search?q=search_term&page=1&n=25 HTTP/1.1 - Host: index.docker.io - Accept: application/json - -Query Parameters: - -- **q** – what you want to search for -- **n** - number of results you want returned per page (default: 25, min:1, max:100) -- **page** - page number of results - -**Example response**: - - HTTP/1.1 200 OK - Vary: Accept - Content-Type: application/json - - {"num_pages": 1, - "num_results": 3, - "results" : [ - {"name": "ubuntu", "description": "An ubuntu image..."}, - {"name": "centos", "description": "A centos image..."}, - {"name": "fedora", "description": "A fedora image..."} - ], - "page_size": 25, - "query":"search_term", - "page": 1 - } - -Response Items: -- **num_pages** - Total number of pages returned by query -- **num_results** - Total number of results returned by query -- **results** - List of results for the current page -- **page_size** - How many results returned per page -- **query** - Your search term -- **page** - Current page number - -Status Codes: - -- **200** – no error -- **500** – server error - -## Status - -### Status check for registry - -`GET /v1/_ping` - -Check status of the registry. This endpoint is also used to -determine if the registry supports SSL. - -**Example Request**: - - GET /v1/_ping HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - - "" - -**Example Response**: - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - "" - -Status Codes: - -- **200** – OK - -## Authorization - -This is where we describe the authorization process, including the -tokens and cookies. - diff --git a/docs/reference/api/registry_api_client_libraries.md b/docs/reference/api/registry_api_client_libraries.md deleted file mode 100644 index f5d5b3e515..0000000000 --- a/docs/reference/api/registry_api_client_libraries.md +++ /dev/null @@ -1,49 +0,0 @@ - - -# Docker Registry v1 API client libraries - -These libraries have not been tested by the Docker maintainers for -compatibility. Please file issues with the library owners. If you find -more library implementations, please submit a PR with an update to this page -or open an issue in the [Docker](https://github.com/docker/docker/issues) -project and we will add the libraries here. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Language/FrameworkNameRepositoryStatus
JavaScript (AngularJS) WebUIdocker-registry-frontendhttps://github.com/kwk/docker-registry-frontendActive
Godocker-reg-clienthttps://github.com/CenturyLinkLabs/docker-reg-clientActive
diff --git a/docs/reference/api/remote_api_client_libraries.md b/docs/reference/api/remote_api_client_libraries.md index c86e6976c1..677145a111 100644 --- a/docs/reference/api/remote_api_client_libraries.md +++ b/docs/reference/api/remote_api_client_libraries.md @@ -4,7 +4,8 @@ title = "Remote API client libraries" description = "Various client libraries available to use with the Docker remote API" keywords = ["API, Docker, index, registry, REST, documentation, clients, C#, Erlang, Go, Groovy, Java, JavaScript, Perl, PHP, Python, Ruby, Rust, Scala"] [menu.main] -parent="mn_reference" +parent="engine_remoteapi" +weight = 90 +++ @@ -186,7 +187,7 @@ will add the libraries here. PHP Docker-PHP - http://stage1.github.io/docker-php/ + https://github.com/docker-php/docker-php Active diff --git a/docs/reference/builder.md b/docs/reference/builder.md index 355c5e3767..3b9a6d5ea4 100644 --- a/docs/reference/builder.md +++ b/docs/reference/builder.md @@ -4,7 +4,8 @@ title = "Dockerfile reference" description = "Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image." keywords = ["builder, docker, Dockerfile, automation, image creation"] [menu.main] -parent = "mn_reference" +parent = "engine_ref" +weight=-90 +++ @@ -18,7 +19,7 @@ instructions in succession. This page describes the commands you can use in a `Dockerfile`. When you are done reading this page, refer to the [`Dockerfile` Best -Practices](../articles/dockerfile_best-practices.md) for a tip-oriented guide. +Practices](../userguide/eng-image/dockerfile_best-practices.md) for a tip-oriented guide. ## Usage @@ -80,7 +81,7 @@ instructions. Whenever possible, Docker will re-use the intermediate images (cache), to accelerate the `docker build` process significantly. This is indicated by the `Using cache` message in the console output. -(For more information, see the [Build cache section](../articles/dockerfile_best-practices.md#build-cache)) in the +(For more information, see the [Build cache section](../userguide/eng-image/dockerfile_best-practices.md#build-cache)) in the `Dockerfile` best practices guide: $ docker build -t svendowideit/ambassador . @@ -99,7 +100,7 @@ the `Using cache` message in the console output. Successfully built 7ea8aef582cc When you're done with your build, you're ready to look into [*Pushing a -repository to its registry*](../userguide/dockerrepos.md#contributing-to-docker-hub). +repository to its registry*](../userguide/containers/dockerrepos.md#contributing-to-docker-hub). ## Format @@ -275,7 +276,7 @@ All of the README files are included. The middle line has no effect because You can even use the `.dockerignore` file to exclude the `Dockerfile` and `.dockerignore` files. These files are still sent to the daemon because it needs them to do its job. But the `ADD` and `COPY` commands -do not copy them to the the image. +do not copy them to the image. Finally, you may want to specify which files to include in the context, rather than which to exclude. To achieve this, specify `*` as @@ -298,7 +299,7 @@ Or The `FROM` instruction sets the [*Base Image*](glossary.md#base-image) for subsequent instructions. As such, a valid `Dockerfile` must have `FROM` as its first instruction. The image can be any valid image – it is especially easy -to start by **pulling an image** from the [*Public Repositories*](../userguide/dockerrepos.md). +to start by **pulling an image** from the [*Public Repositories*](../userguide/containers/dockerrepos.md). - `FROM` must be the first non-comment instruction in the `Dockerfile`. @@ -369,7 +370,7 @@ cache for `RUN` instructions can be invalidated by using the `--no-cache` flag, for example `docker build --no-cache`. See the [`Dockerfile` Best Practices -guide](../articles/dockerfile_best-practices.md#build-cache) for more information. +guide](../userguide/eng-image/dockerfile_best-practices.md#build-cache) for more information. The cache for `RUN` instructions can be invalidated by `ADD` instructions. See [below](#add) for details. @@ -608,7 +609,7 @@ of whether or not the file has changed and the cache should be updated. > following instructions from the Dockerfile if the contents of `` have > changed. This includes invalidating the cache for `RUN` instructions. > See the [`Dockerfile` Best Practices -guide](../articles/dockerfile_best-practices.md#build-cache) for more information. +guide](../userguide/eng-image/dockerfile_best-practices.md#build-cache) for more information. `ADD` obeys the following rules: @@ -643,6 +644,14 @@ guide](../articles/dockerfile_best-practices.md#build-cache) for more informatio 2. The contents of the source tree, with conflicts resolved in favor of "2." on a file-by-file basis. + > **Note**: + > Whether a file is identified as a recognized compression format or not + > is done soley based on the contents of the file, not the name of the file. + > For example, if an empty file happens to end with `.tar.gz` this will not + > be recognized as a compressed file and **will not** generate any kind of + > decompression error message, rather the file will simply be copied to the + > destination. + - If `` is any other kind of file, it is copied individually along with its metadata. In this case, if `` ends with a trailing slash `/`, it will be considered a directory and the contents of `` will be written @@ -951,7 +960,7 @@ containers. The value can be a JSON array, `VOLUME ["/var/log/"]`, or a plain string with multiple arguments, such as `VOLUME /var/log` or `VOLUME /var/log /var/db`. For more information/examples and mounting instructions via the Docker client, refer to -[*Share Directories via Volumes*](../userguide/dockervolumes.md#mount-a-host-directory-as-a-data-volume) +[*Share Directories via Volumes*](../userguide/containers/dockervolumes.md#mount-a-host-directory-as-a-data-volume) documentation. The `docker run` command initializes the newly created volume with any data diff --git a/docs/reference/commandline/commit.md b/docs/reference/commandline/commit.md index 49e37e416b..13dd3340b0 100644 --- a/docs/reference/commandline/commit.md +++ b/docs/reference/commandline/commit.md @@ -43,11 +43,11 @@ created. Supported `Dockerfile` instructions: ID IMAGE COMMAND CREATED STATUS PORTS c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours - $ docker commit c3f279d17e0a SvenDowideit/testimage:version3 + $ docker commit c3f279d17e0a svendowideit/testimage:version3 f5283438590d $ docker images - REPOSITORY TAG ID CREATED VIRTUAL SIZE - SvenDowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB + REPOSITORY TAG ID CREATED SIZE + svendowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB ## Commit a container with new configurations @@ -57,7 +57,7 @@ created. Supported `Dockerfile` instructions: 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours $ docker inspect -f "{{ .Config.Env }}" c3f279d17e0a [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin] - $ docker commit --change "ENV DEBUG true" c3f279d17e0a SvenDowideit/testimage:version3 + $ docker commit --change "ENV DEBUG true" c3f279d17e0a svendowideit/testimage:version3 f5283438590d $ docker inspect -f "{{ .Config.Env }}" f5283438590d [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true] @@ -69,10 +69,10 @@ created. Supported `Dockerfile` instructions: c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours - $ docker commit --change='CMD ["apachectl", "-DFOREGROUND"]' -c "EXPOSE 80" c3f279d17e0a SvenDowideit/testimage:version4 + $ docker commit --change='CMD ["apachectl", "-DFOREGROUND"]' -c "EXPOSE 80" c3f279d17e0a svendowideit/testimage:version4 f5283438590d - $ docker run -d SvenDowideit/testimage:version4 + $ docker run -d svendowideit/testimage:version4 89373736e2e7f00bc149bd783073ac43d0507da250e999f3f1036e0db60817c0 $ docker ps diff --git a/docs/reference/commandline/daemon.md b/docs/reference/commandline/daemon.md index 856d913782..df823995cd 100644 --- a/docs/reference/commandline/daemon.md +++ b/docs/reference/commandline/daemon.md @@ -86,7 +86,7 @@ membership. If you need to access the Docker daemon remotely, you need to enable the `tcp` Socket. Beware that the default setup provides un-encrypted and un-authenticated direct access to the Docker daemon - and should be secured -either using the [built in HTTPS encrypted socket](../../articles/https/), or by +either using the [built in HTTPS encrypted socket](../../security/https/), or by putting a secure web proxy in front of it. You can listen on port `2375` on all network interfaces with `-H tcp://0.0.0.0:2375`, or on a particular network interface using its IP address: `-H tcp://192.168.59.103:2375`. It is @@ -220,15 +220,15 @@ options for `zfs` start with `zfs`. the empty case the larger the device is. The base device size can be increased at daemon restart which will allow - all future images and containers (based on those new images) to be of the + all future images and containers (based on those new images) to be of the new base device size. - Example use: + Example use: $ docker daemon --storage-opt dm.basesize=50G - This will increase the base device size to 50G. The Docker daemon will throw an - error if existing base device size is larger than 50G. A user can use + This will increase the base device size to 50G. The Docker daemon will throw an + error if existing base device size is larger than 50G. A user can use this option to expand the base device size however shrinking is not permitted. This value affects the system-wide "base" empty filesystem @@ -565,7 +565,7 @@ please check the [run](run.md) reference. ## Nodes discovery -The `--cluster-advertise` option specifies the 'host:port' or `interface:port` +The `--cluster-advertise` option specifies the `host:port` or `interface:port` combination that this particular daemon instance should use when advertising itself to the cluster. The daemon is reached by remote hosts through this value. If you specify an interface, make sure it includes the IP address of the actual @@ -727,7 +727,7 @@ when querying the system for the subordinate group ID range. ### Detailed information on `subuid`/`subgid` ranges -Given potential advanced use of the subordinate ID ranges by power users, the +Given potential advanced use of the subordinate ID ranges by power users, the following paragraphs define how the Docker daemon currently uses the range entries found within the subordinate range files. @@ -737,7 +737,7 @@ range for the mapping of host uids and gids to the container process. This means that the first ID in the range will be the remapped root user, and the IDs above that initial ID will map host ID 1 through the end of the range. -From the example `/etc/subid` content shown above, the remapped root +From the example `/etc/subuid` content shown above, the remapped root user would be uid 165536. If the system administrator has set up multiple ranges for a single user or @@ -838,10 +838,8 @@ This is a full example of the allowed configuration options in the file: "storage-driver": "", "storage-opts": "", "labels": [], - "log-config": { - "log-driver": "", - "log-opts": [] - }, + "log-driver": "", + "log-opts": [], "mtu": 0, "pidfile": "", "graph": "", @@ -852,18 +850,29 @@ This is a full example of the allowed configuration options in the file: "hosts": [], "log-level": "", "tls": true, - "tls-verify": true, - "tls-opts": { - "tlscacert": "", - "tlscert": "", - "tlskey": "" - }, + "tlsverify": true, + "tlscacert": "", + "tlscert": "", + "tlskey": "", "api-cors-headers": "", "selinux-enabled": false, "userns-remap": "", "group": "", "cgroup-parent": "", - "default-ulimits": {} + "default-ulimits": {}, + "ipv6": false, + "iptables": false, + "ip-forward": false, + "ip-mask": false, + "userland-proxy": false, + "ip": "0.0.0.0", + "bridge": "", + "bip": "", + "fixed-cidr": "", + "fixed-cidr-v6": "", + "default-gateway": "", + "default-gateway-v6": "", + "icc": false } ``` @@ -879,7 +888,4 @@ if there are conflicts, but it won't stop execution. The list of currently supported options that can be reconfigured is this: - `debug`: it changes the daemon to debug mode when set to true. -- `label`: it replaces the daemon labels with a new set of labels. -- `cluster-store`: it reloads the discovery store with the new address. -- `cluster-store-opts`: it uses the new options to reload the discovery store. -- `cluster-advertise`: it modifies the address advertised after reloading. +- `labels`: it replaces the daemon labels with a new set of labels. diff --git a/docs/reference/commandline/export.md b/docs/reference/commandline/export.md index 87b691cd43..604ceab189 100644 --- a/docs/reference/commandline/export.md +++ b/docs/reference/commandline/export.md @@ -23,7 +23,7 @@ the container, `docker export` will export the contents of the *underlying* directory, not the contents of the volume. Refer to [Backup, restore, or migrate data -volumes](../../userguide/dockervolumes.md#backup-restore-or-migrate-data-volumes) in +volumes](../../userguide/containers/dockervolumes.md#backup-restore-or-migrate-data-volumes) in the user guide for examples on exporting data in a volume. ## Examples diff --git a/docs/reference/commandline/images.md b/docs/reference/commandline/images.md index cfee5a4bbf..8419fd0c52 100644 --- a/docs/reference/commandline/images.md +++ b/docs/reference/commandline/images.md @@ -22,25 +22,25 @@ parent = "smn_cli" -q, --quiet Only show numeric IDs The default `docker images` will show all top level -images, their repository and tags, and their virtual size. +images, their repository and tags, and their size. Docker images have intermediate layers that increase reusability, decrease disk usage, and speed up `docker build` by allowing each step to be cached. These intermediate layers are not shown by default. -The `VIRTUAL SIZE` is the cumulative space taken up by the image and all +The `SIZE` is the cumulative space taken up by the image and all its parent images. This is also the disk space used by the contents of the Tar file created when you `docker save` an image. An image will be listed more than once if it has multiple repository names or tags. This single image (identifiable by its matching `IMAGE ID`) -uses up the `VIRTUAL SIZE` listed only once. +uses up the `SIZE` listed only once. ### Listing the most recently created images $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE 77af4d6b9913 19 hours ago 1.089 GB committ latest b6fa739cedf5 19 hours ago 1.089 GB 78a85c484f71 19 hours ago 1.089 GB @@ -61,7 +61,7 @@ given repository. For example, to list all images in the "java" repository, run this command : $ docker images java - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE java 8 308e519aac60 6 days ago 824.5 MB java 7 493d82594c15 3 months ago 656.3 MB java latest 2711b1d6f3aa 5 months ago 603.9 MB @@ -74,18 +74,18 @@ repository and tag are listed. To find all local images in the "java" repository with tag "8" you can use: $ docker images java:8 - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE java 8 308e519aac60 6 days ago 824.5 MB If nothing matches `REPOSITORY[:TAG]`, the list is empty. $ docker images java:0 - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE ## Listing the full length image IDs $ docker images --no-trunc - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE 77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB committest latest b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB 78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB @@ -104,7 +104,7 @@ unchanged, the digest value is predictable. To list image digest values, use the `--digests` flag: $ docker images --digests - REPOSITORY TAG DIGEST IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB When pushing or pulling to a 2.0 registry, the `push` or `pull` command @@ -126,7 +126,7 @@ The currently supported filters are: $ docker images --filter "dangling=true" - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE 8abc22fbb042 4 weeks ago 0 B 48e5f45168b9 4 weeks ago 2.489 MB bf747efa0e2f 4 weeks ago 0 B @@ -163,20 +163,20 @@ The following filter matches images with the `com.example.version` label regardl $ docker images --filter "label=com.example.version" - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE match-me-1 latest eeae25ada2aa About a minute ago 188.3 MB match-me-2 latest eeae25ada2aa About a minute ago 188.3 MB The following filter matches images with the `com.example.version` label with the `1.0` value. $ docker images --filter "label=com.example.version=1.0" - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE match-me latest eeae25ada2aa About a minute ago 188.3 MB In this example, with the `0.1` value, it returns an empty set because no matches were found. $ docker images --filter "label=com.example.version=0.1" - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE ## Formatting diff --git a/docs/reference/commandline/index.md b/docs/reference/commandline/index.md index 775c327700..a5704da03a 100644 --- a/docs/reference/commandline/index.md +++ b/docs/reference/commandline/index.md @@ -1,11 +1,12 @@ diff --git a/docs/reference/commandline/inspect.md b/docs/reference/commandline/inspect.md index 995388c8f1..38d4098c0c 100644 --- a/docs/reference/commandline/inspect.md +++ b/docs/reference/commandline/inspect.md @@ -41,7 +41,7 @@ straightforward manner. For the most part, you can pick out any field from the JSON in a fairly straightforward manner. - $ docker inspect '{{range .NetworkSettings.Networks}}{{.MacAddress}}{{end}}' $INSTANCE_ID + $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.MacAddress}}{{end}}' $INSTANCE_ID **Get an instance's log path:** diff --git a/docs/reference/commandline/load.md b/docs/reference/commandline/load.md index c2536fbdbf..888365fcf5 100644 --- a/docs/reference/commandline/load.md +++ b/docs/reference/commandline/load.md @@ -21,14 +21,14 @@ Loads a tarred repository from a file or the standard input stream. Restores both images and tags. $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE $ docker load < busybox.tar.gz $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE busybox latest 769b9341d937 7 weeks ago 2.489 MB $ docker load --input fedora.tar $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE busybox latest 769b9341d937 7 weeks ago 2.489 MB fedora rawhide 0d20aec6529d 7 weeks ago 387 MB fedora 20 58394af37342 7 weeks ago 385.5 MB diff --git a/docs/reference/commandline/network_connect.md b/docs/reference/commandline/network_connect.md index b08dec3225..49f0b3d7f4 100644 --- a/docs/reference/commandline/network_connect.md +++ b/docs/reference/commandline/network_connect.md @@ -56,12 +56,14 @@ $ docker network connect --alias db --alias mysql multi-host-network container2 You can pause, restart, and stop containers that are connected to a network. Paused containers remain connected and can be revealed by a `network inspect`. When the container is stopped, it does not appear on the network until you restart -it. If specified, the container's IP address(es) will be reapplied (if still available) -when a stopped container rejoins the network. One way to guarantee that the container -will be assigned the same IP addresses when it rejoins the network after a stop -or a disconnect, is to specify the `--ip-range` when creating the network, and choose -the static IP address(es) from outside the range. This will ensure that the IP address -will not be given to other dynamic containers while this container is not on the network. +it. + +If specified, the container's IP address(es) is reapplied when a stopped +container is restarted. If the IP address is no longer available, the container +fails to start. One way to guarantee that the IP address is available is +to specify an `--ip-range` when creating the network, and choose the static IP +address(es) from outside that range. This ensures that the IP address is not +given to another container while this container is not on the network. ```bash $ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network diff --git a/docs/reference/commandline/network_create.md b/docs/reference/commandline/network_create.md index a1bfdf51f7..bb85cc0462 100644 --- a/docs/reference/commandline/network_create.md +++ b/docs/reference/commandline/network_create.md @@ -21,8 +21,8 @@ parent = "smn_cli" --internal Restricts external access to the network --ip-range=[] Allocate container ip from a sub-range --ipam-driver=default IP Address Management Driver - -o --opt=map[] Set custom network plugin options - --ipam-opt=map[] Set custom IPAM plugin options + --ipam-opt=map[] Set custom IPAM driver specific options + -o --opt=map[] Set custom driver specific options --subnet=[] Subnet in CIDR format that represents a network segment Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the @@ -122,6 +122,26 @@ docker network create -d overlay ``` Be sure that your subnetworks do not overlap. If they do, the network create fails and Engine returns an error. +# Bridge driver options + +When creating a custom network, the default network driver (i.e. `bridge`) has additional options that can be passed. +The following are those options and the equivalent docker daemon flags used for docker0 bridge: + +| Option | Equivalent | Description | +|--------------------------------------------------|-------------|-------------------------------------------------------| +| `com.docker.network.bridge.name` | - | bridge name to be used when creating the Linux bridge | +| `com.docker.network.bridge.enable_ip_masquerade` | `--ip-masq` | Enable IP masquerading | +| `com.docker.network.bridge.enable_icc` | `--icc` | Enable or Disable Inter Container Connectivity | +| `com.docker.network.bridge.host_binding_ipv4` | `--ip` | Default IP when binding container ports | +| `com.docker.network.mtu` | `--mtu` | Set the containers network MTU | +| `com.docker.network.enable_ipv6` | `--ipv6` | Enable IPv6 networking | + +For example, let's use `-o` or `--opt` options to specify an IP address binding when publishing ports: + +```bash +docker network create -o "com.docker.network.bridge.host_binding_ipv4"="172.19.0.1" simple-network +``` + ### Network internal mode By default, when you connect a container to an `overlay` network, Docker also connects a bridge network to it to provide external connectivity. diff --git a/docs/reference/commandline/port.md b/docs/reference/commandline/port.md index 606969a170..dbfae61039 100644 --- a/docs/reference/commandline/port.md +++ b/docs/reference/commandline/port.md @@ -20,7 +20,7 @@ parent = "smn_cli" You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or just a specific mapping: - $ docker ps test + $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test $ docker port test diff --git a/docs/reference/commandline/ps.md b/docs/reference/commandline/ps.md index 432a9a31c4..328e674264 100644 --- a/docs/reference/commandline/ps.md +++ b/docs/reference/commandline/ps.md @@ -55,7 +55,7 @@ The currently supported filters are: * label (`label=` or `label==`) * name (container's name) * exited (int - the code of exited containers. Only useful with `--all`) -* status (created|restarting|running|paused|exited) +* status (created|restarting|running|paused|exited|dead) * ancestor (`[:]`, `` or ``) - filters containers that were created from the given image or a descendant. * isolation (default|process|hyperv) (Windows daemon only) @@ -109,7 +109,7 @@ that have exited successfully: #### Status -The `status` filter matches containers by status. You can filter using `created`, `restarting`, `running`, `paused` and `exited`. For example, to filter for `running` containers: +The `status` filter matches containers by status. You can filter using `created`, `restarting`, `running`, `paused`, `exited` and `dead`. For example, to filter for `running` containers: $ docker ps --filter status=running CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES diff --git a/docs/reference/commandline/rm.md b/docs/reference/commandline/rm.md index 3664f98ccd..514b92c27e 100644 --- a/docs/reference/commandline/rm.md +++ b/docs/reference/commandline/rm.md @@ -45,3 +45,17 @@ This command will delete all stopped containers. The command `docker ps -a -q` will return all existing container IDs and pass them to the `rm` command which will delete them. Any running containers will not be deleted. + + $ docker rm -v redis + redis + +This command will remove the container and any volumes associated with it. +Note that if a volume was specified with a name, it will not be removed. + + $ docker create -v awesome:/foo -v /bar --name hello redis + hello + $ docker rm -v hello + +In this example, the volume for `/foo` will remain intact, but the volume for +`/bar` will be removed. The same behavior holds for volumes inherited with +`--volumes-from`. diff --git a/docs/reference/commandline/rmi.md b/docs/reference/commandline/rmi.md index 022a415928..f02734e8b4 100644 --- a/docs/reference/commandline/rmi.md +++ b/docs/reference/commandline/rmi.md @@ -63,7 +63,7 @@ command untags and removes all images that match the specified ID. An image pulled by digest has no tag associated with it: $ docker images --digests - REPOSITORY TAG DIGEST IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB To remove an image using its digest: diff --git a/docs/reference/commandline/run.md b/docs/reference/commandline/run.md index a36bbfb96a..dce1019f19 100644 --- a/docs/reference/commandline/run.md +++ b/docs/reference/commandline/run.md @@ -145,7 +145,7 @@ This will *not* work, because by default, most potentially dangerous kernel capabilities are dropped; including `cap_sys_admin` (which is required to mount filesystems). However, the `--privileged` flag will allow it to run: - $ docker run --privileged ubuntu bash + $ docker run -t -i --privileged ubuntu bash root@50e3f57e16e6:/# mount -t tmpfs none /mnt root@50e3f57e16e6:/# df -h Filesystem Size Used Avail Use% Mounted on @@ -163,13 +163,12 @@ flag exists to allow special use-cases, like running Docker within Docker. The `-w` lets the command being executed inside directory given, here `/path/to/dir/`. If the path does not exists it is created inside the container. -### mount tmpfs (--tmpfs) +### Mount tmpfs (--tmpfs) $ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image - The --tmpfs flag mounts a tmpfs into the container with the rw,noexec,nosuid,size=65536k options. - - Underlying content from the /run in the my_image image is copied into tmpfs. +The `--tmpfs` flag mounts an empty tmpfs into the container with the `rw`, +`noexec`, `nosuid`, `size=65536k` options. ### Mount volume (-v, --read-only) @@ -195,12 +194,13 @@ a container writes files. The `--read-only` flag mounts the container's root filesystem as read only prohibiting writes to locations other than the specified volumes for the container. - $ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v ./static-docker:/usr/bin/docker busybox sh + $ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v /path/to/static-docker-binary:/usr/bin/docker busybox sh By bind-mounting the docker unix socket and statically linked docker -binary (such as that provided by [https://get.docker.com]( -https://get.docker.com)), you give the container the full access to create and -manipulate the host's Docker daemon. +binary (refer to [get the linux binary]( +../../installation/binaries.md#get-the-linux-binary)), +you give the container the full access to create and manipulate the host's +Docker daemon. ### Publish or expose port (-p, --expose) @@ -422,12 +422,12 @@ flag: $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc Command (m for help): q - $ docker run --device=/dev/sda:/dev/xvdc:ro --rm -it ubuntu fdisk /dev/xvdc + $ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc You will not be able to write the partition table. Command (m for help): q - $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + $ docker run --device=/dev/sda:/dev/xvdc:rw --rm -it ubuntu fdisk /dev/xvdc Command (m for help): q @@ -539,7 +539,7 @@ available in the default container, you can set these using the `--ulimit` flag. `--ulimit` is specified with a soft and hard limit as such: `=[:]`, for example: - $ docker run --ulimit nofile=1024:1024 --rm debian ulimit -n + $ docker run --ulimit nofile=1024:1024 --rm debian sh -c "ulimit -n" 1024 > **Note:** diff --git a/docs/reference/commandline/search.md b/docs/reference/commandline/search.md index beb4ef0a49..893895e214 100644 --- a/docs/reference/commandline/search.md +++ b/docs/reference/commandline/search.md @@ -21,7 +21,7 @@ parent = "smn_cli" Search [Docker Hub](https://hub.docker.com) for images -See [*Find Public Images on Docker Hub*](../../userguide/dockerrepos.md#searching-for-images) for +See [*Find Public Images on Docker Hub*](../../userguide/containers/dockerrepos.md#searching-for-images) for more details on finding shared images from the command line. > **Note:** diff --git a/docs/reference/commandline/stats.md b/docs/reference/commandline/stats.md index b3a2ed6224..8ef7d6e152 100644 --- a/docs/reference/commandline/stats.md +++ b/docs/reference/commandline/stats.md @@ -28,9 +28,9 @@ Running `docker stats` on all running containers $ docker stats CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O - redis1 0.07% 796 KB / 64 MB 1.21% 788 B / 648 B 3.568 MB / 512 KB - redis2 0.07% 2.746 MB / 64 MB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B - nginx1 0.03% 4.583 MB / 64 MB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B + 1285939c1fd3 0.07% 796 KB / 64 MB 1.21% 788 B / 648 B 3.568 MB / 512 KB + 9c76f7834ae2 0.07% 2.746 MB / 64 MB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B + d1ea048f04e4 0.03% 4.583 MB / 64 MB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B Running `docker stats` on multiple containers by name and id. diff --git a/docs/reference/commandline/tag.md b/docs/reference/commandline/tag.md index c1ca5aa18d..cd104e8c6a 100644 --- a/docs/reference/commandline/tag.md +++ b/docs/reference/commandline/tag.md @@ -17,4 +17,4 @@ parent = "smn_cli" --help Print usage You can group your images together using names and tags, and then upload them -to [*Share Images via Repositories*](../../userguide/dockerrepos.md#contributing-to-docker-hub). +to [*Share Images via Repositories*](../../userguide/containers/dockerrepos.md#contributing-to-docker-hub). diff --git a/docs/reference/commandline/update.md b/docs/reference/commandline/update.md index c1efea57ba..bcbfab6ab9 100644 --- a/docs/reference/commandline/update.md +++ b/docs/reference/commandline/update.md @@ -23,7 +23,7 @@ parent = "smn_cli" --cpuset-mems="" Memory nodes (MEMs) in which to allow execution (0-3, 0,1) -m, --memory="" Memory limit --memory-reservation="" Memory soft limit - --memory-swap="" Total memory (memory + swap), '-1' to disable swap + --memory-swap="" A positive integer equal to memory plus swap. Specify -1 to enable unlimited swap --kernel-memory="" Kernel memory limit: container must be stopped The `docker update` command dynamically updates container resources. Use this diff --git a/docs/reference/index.md b/docs/reference/index.md new file mode 100644 index 0000000000..a207ee7d05 --- /dev/null +++ b/docs/reference/index.md @@ -0,0 +1,18 @@ + + +# Engine reference + +* [Dockerfile reference](builder.md) +* [Docker run reference](run.md) +* [Command line reference](commandline/index.md) +* [API Reference](api/index.md) diff --git a/docs/reference/run.md b/docs/reference/run.md index 95e0e0a605..e38fd1f18d 100644 --- a/docs/reference/run.md +++ b/docs/reference/run.md @@ -4,7 +4,8 @@ title = "Docker run reference" description = "Configure containers at runtime" keywords = ["docker, run, configure, runtime"] [menu.main] -parent = "mn_reference" +parent = "engine_ref" +weight=-80 +++ @@ -409,7 +410,7 @@ The following example creates a network using the built-in `bridge` network driver and running a container in the created network ``` -$ docker network create -d overlay my-net +$ docker network create -d bridge my-net $ docker run --net=my-net -itd --name=container3 busybox ``` @@ -550,7 +551,7 @@ The exit code from `docker run` gives information about why the container failed to run or why it exited. When `docker run` exits with a non-zero code, the exit codes follow the `chroot` standard, see below: -**_125_** if the error is with Docker daemon **_itself_** +**_125_** if the error is with Docker daemon **_itself_** $ docker run --foo busybox; echo $? # flag provided but not defined: --foo @@ -573,7 +574,7 @@ the exit codes follow the `chroot` standard, see below: **_Exit code_** of **_contained command_** otherwise - $ docker run busybox /bin/sh -c 'exit 3' + $ docker run busybox /bin/sh -c 'exit 3' # 3 ## Clean up (--rm) @@ -590,7 +591,11 @@ the container exits**, you can add the `--rm` flag: > **Note**: When you set the `--rm` flag, Docker also removes the volumes associated with the container when the container is removed. This is similar -to running `docker rm -v my-container`. +to running `docker rm -v my-container`. Only volumes that are specified without a +name are removed. For example, with +`docker run --rm -v /foo -v awesome:/bar busybox top`, the volume for `/foo` will be removed, +but the volume for `/bar` will not. Volumes inheritted via `--volumes-from` will be removed +with the same logic -- if the original volume was specified with a name it will **not** be removed. ## Security configuration --security-opt="label:user:USER" : Set the label user for the container @@ -996,9 +1001,9 @@ For example, to set `/dev/sda` device weight to `200`: ubuntu If you specify both the `--blkio-weight` and `--blkio-weight-device`, Docker -uses the `--blkio-weight` as the default weight and uses `--blkio-weight-device` -to override this default with a new value on a specific device. -The following example uses a default weight of `300` and overrides this default +uses the `--blkio-weight` as the default weight and uses `--blkio-weight-device` +to override this default with a new value on a specific device. +The following example uses a default weight of `300` and overrides this default on `/dev/sda` setting that weight to `200`: $ docker run -it \ @@ -1014,7 +1019,7 @@ per second from `/dev/sda`: The `--device-write-bps` flag limits the write rate (bytes per second)to a device. For example, this command creates a container and limits the write rate to `1mb` -per second for `/dev/sda`: +per second for `/dev/sda`: $ docker run -it --device-write-bps /dev/sda:1mb ubuntu @@ -1057,7 +1062,7 @@ one can use this flag: By default, Docker containers are "unprivileged" and cannot, for example, run a Docker daemon inside a Docker container. This is because by default a container is not allowed to access any devices, but a -"privileged" container is given access to all devices (see +"privileged" container is given access to all devices (see the documentation on [cgroups devices](https://www.kernel.org/doc/Documentation/cgroups/devices.txt)). When the operator executes `docker run --privileged`, Docker will enable @@ -1191,7 +1196,7 @@ container's logging driver. The following options are supported: The `docker logs` command is available only for the `json-file` and `journald` logging drivers. For detailed information on working with logging drivers, see -[Configure a logging driver](logging/overview.md). +[Configure a logging driver](../admin/logging/overview.md). ## Overriding Dockerfile image defaults @@ -1360,9 +1365,14 @@ Similarly the operator can set the **hostname** with `-h`. ### TMPFS (mount tmpfs filesystems) - --tmpfs=[]: Create a tmpfs mount with: container-dir[:], where the options are identical to the Linux `mount -t tmpfs -o` command. +```bash +--tmpfs=[]: Create a tmpfs mount with: container-dir[:], + where the options are identical to the Linux + 'mount -t tmpfs -o' command. +``` - Underlying content from the "container-dir" is copied into tmpfs. +The example below mounts an empty tmpfs into the container with the `rw`, +`noexec`, `nosuid`, and `size=65536k` options. $ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image @@ -1379,11 +1389,19 @@ Similarly the operator can set the **hostname** with `-h`. --volumes-from="": Mount all volumes from the given container(s) > **Note**: -> The auto-creation of the host path has been [*deprecated*](../misc/deprecated.md#auto-creating-missing-host-paths-for-bind-mounts). +> The auto-creation of the host path has been [*deprecated*](../deprecated.md#auto-creating-missing-host-paths-for-bind-mounts). + +> **Note**: +> When using systemd to manage the Docker daemon's start and stop, in the systemd +> unit file there is an option to control mount propagation for the Docker daemon +> itself, called `MountFlags`. The value of this setting may cause Docker to not +> see mount propagation changes made on the mount point. For example, if this value +> is `slave`, you may not be able to use the `shared` or `rshared` propagation on +> a volume. The volumes commands are complex enough to have their own documentation in section [*Managing data in -containers*](../userguide/dockervolumes.md). A developer can define +containers*](../userguide/containers/dockervolumes.md). A developer can define one or more `VOLUME`'s associated with an image, but only the operator can give access from one container to another (or from a container to a volume mounted on the host). diff --git a/docs/security/apparmor.md b/docs/security/apparmor.md index c33240dbfb..7fdf0171d5 100644 --- a/docs/security/apparmor.md +++ b/docs/security/apparmor.md @@ -5,6 +5,7 @@ description = "Enabling AppArmor in Docker" keywords = ["AppArmor, security, docker, documentation"] [menu.main] parent= "smn_secure_docker" +weight=5 +++ @@ -15,10 +16,15 @@ operating system and its applications from security threats. To use it, a system administrator associates an AppArmor security profile with each program. Docker expects to find an AppArmor policy loaded and enforced. -Docker automatically loads container profiles. A profile for the Docker Engine -itself also exists and is installed with the official *.deb* packages in -`/etc/apparmor.d/docker` file. +Docker automatically loads container profiles. The Docker binary installs +a `docker-default` profile in the `/etc/apparmor.d/docker` file. This profile +is used on containers, _not_ on the Docker Daemon. +A profile for the Docker Engine Daemon exists but it is not currently installed +with the deb packages. If you are interested in the source for the Daemon +profile, it is located in +[contrib/apparmor](https://github.com/docker/docker/tree/master/contrib/apparmor) +in the Docker Engine source repository. ## Understand the policies @@ -66,9 +72,112 @@ explicitly specifies the default policy: $ docker run --rm -it --security-opt apparmor:docker-default hello-world ``` +## Loading and Unloading Profiles + +To load a new profile into AppArmor, for use with containers: + +``` +$ apparmor_parser -r -W /path/to/your_profile +``` + +Then you can run the custom profile with `--security-opt` like so: + +```bash +$ docker run --rm -it --security-opt apparmor:your_profile hello-world +``` + +To unload a profile from AppArmor: + +```bash +# stop apparmor +$ /etc/init.d/apparmor stop +# unload the profile +$ apparmor_parser -R /path/to/profile +# start apparmor +$ /etc/init.d/apparmor start +``` + +## Debugging AppArmor + +### Using `dmesg` + +Here are some helpful tips for debugging any problems you might be facing with +regard to AppArmor. + +AppArmor sends quite verbose messaging to `dmesg`. Usually an AppArmor line +will look like the following: + +``` +[ 5442.864673] audit: type=1400 audit(1453830992.845:37): apparmor="ALLOWED" operation="open" profile="/usr/bin/docker" name="/home/jessie/docker/man/man1/docker-attach.1" pid=10923 comm="docker" requested_mask="r" denied_mask="r" fsuid=1000 ouid=0 +``` + +In the above example, the you can see `profile=/usr/bin/docker`. This means the +user has the `docker-engine` (Docker Engine Daemon) profile loaded. + +> **Note:** On version of Ubuntu > 14.04 this is all fine and well, but Trusty +> users might run into some issues when trying to `docker exec`. + +Let's look at another log line: + +``` +[ 3256.689120] type=1400 audit(1405454041.341:73): apparmor="DENIED" operation="ptrace" profile="docker-default" pid=17651 comm="docker" requested_mask="receive" denied_mask="receive" +``` + +This time the profile is `docker-default`, which is run on containers by +default unless in `privileged` mode. It is telling us, that apparmor has denied +`ptrace` in the container. This is great. + +### Using `aa-status` + +If you need to check which profiles are loaded you can use `aa-status`. The +output looks like: + +```bash +$ sudo aa-status +apparmor module is loaded. +14 profiles are loaded. +1 profiles are in enforce mode. + docker-default +13 profiles are in complain mode. + /usr/bin/docker + /usr/bin/docker///bin/cat + /usr/bin/docker///bin/ps + /usr/bin/docker///sbin/apparmor_parser + /usr/bin/docker///sbin/auplink + /usr/bin/docker///sbin/blkid + /usr/bin/docker///sbin/iptables + /usr/bin/docker///sbin/mke2fs + /usr/bin/docker///sbin/modprobe + /usr/bin/docker///sbin/tune2fs + /usr/bin/docker///sbin/xtables-multi + /usr/bin/docker///sbin/zfs + /usr/bin/docker///usr/bin/xz +38 processes have profiles defined. +37 processes are in enforce mode. + docker-default (6044) + ... + docker-default (31899) +1 processes are in complain mode. + /usr/bin/docker (29756) +0 processes are unconfined but have a profile defined. +``` + +In the above output you can tell that the `docker-default` profile running on +various container PIDs is in `enforce` mode. This means AppArmor will actively +block and audit in `dmesg` anything outside the bounds of the `docker-default` +profile. + +The output above also shows the `/usr/bin/docker` (Docker Engine Daemon) +profile is running in `complain` mode. This means AppArmor will _only_ log to +`dmesg` activity outside the bounds of the profile. (Except in the case of +Ubuntu Trusty, where we have seen some interesting behaviors being enforced.) + ## Contributing to AppArmor code in Docker Advanced users and package managers can find a profile for `/usr/bin/docker` -underneath +(Docker Engine Daemon) underneath [contrib/apparmor](https://github.com/docker/docker/tree/master/contrib/apparmor) in the Docker Engine source repository. + +The `docker-default` profile for containers lives in +[profiles/apparmor](https://github.com/docker/docker/tree/master/profiles/apparmor). diff --git a/docs/security/certificates.md b/docs/security/certificates.md new file mode 100644 index 0000000000..5684e331e3 --- /dev/null +++ b/docs/security/certificates.md @@ -0,0 +1,85 @@ + + +# Using certificates for repository client verification + +In [Running Docker with HTTPS](https.md), you learned that, by default, +Docker runs via a non-networked Unix socket and TLS must be enabled in order +to have the Docker client and the daemon communicate securely over HTTPS. TLS ensures authenticity of the registry endpoint and that traffic to/from registry is encrypted. + +This article demonstrates how to ensure the traffic between the Docker registry (i.e., *a server*) and the Docker daemon (i.e., *a client*) traffic is encrypted and a properly authenticated using *certificate-based client-server authentication*. + +We will show you how to install a Certificate Authority (CA) root certificate +for the registry and how to set the client TLS certificate for verification. + +## Understanding the configuration + +A custom certificate is configured by creating a directory under +`/etc/docker/certs.d` using the same name as the registry's hostname (e.g., +`localhost`). All `*.crt` files are added to this directory as CA roots. + +> **Note:** +> In the absence of any root certificate authorities, Docker +> will use the system default (i.e., host's root CA set). + +The presence of one or more `.key/cert` pairs indicates to Docker +that there are custom certificates required for access to the desired +repository. + +> **Note:** +> If there are multiple certificates, each will be tried in alphabetical +> order. If there is an authentication error (e.g., 403, 404, 5xx, etc.), Docker +> will continue to try with the next certificate. + +The following illustrates a configuration with multiple certs: + +``` + /etc/docker/certs.d/ <-- Certificate directory + └── localhost <-- Hostname + ├── client.cert <-- Client certificate + ├── client.key <-- Client key + └── localhost.crt <-- Certificate authority that signed + the registry certificate +``` + +The preceding example is operating-system specific and is for illustrative +purposes only. You should consult your operating system documentation for +creating an os-provided bundled certificate chain. + + +## Creating the client certificates + +You will use OpenSSL's `genrsa` and `req` commands to first generate an RSA +key and then use the key to create the certificate. + + $ openssl genrsa -out client.key 4096 + $ openssl req -new -x509 -text -key client.key -out client.cert + +> **Note:** +> These TLS commands will only generate a working set of certificates on Linux. +> The version of OpenSSL in Mac OS X is incompatible with the type of +> certificate Docker requires. + +## Troubleshooting tips + +The Docker daemon interprets ``.crt` files as CA certificates and `.cert` files +as client certificates. If a CA certificate is accidentally given the extension +`.cert` instead of the correct `.crt` extension, the Docker daemon logs the +following error message: + +``` +Missing key KEY_NAME for client certificate CERT_NAME. Note that CA certificates should use the extension .crt. +``` + +## Related Information + +* [Use trusted images](index.md) +* [Protect the Docker daemon socket](https.md) diff --git a/docs/articles/https.md b/docs/security/https.md similarity index 97% rename from docs/articles/https.md rename to docs/security/https.md index d7f016d8e0..1b2619cb21 100644 --- a/docs/articles/https.md +++ b/docs/security/https.md @@ -1,11 +1,11 @@ @@ -209,3 +209,8 @@ flags: --cert ~/.docker/cert.pem \ --key ~/.docker/key.pem \ --cacert ~/.docker/ca.pem + +## Related information + +* [Using certificates for repository client verification](certificates.md) +* [Use trusted images](trust/index.md) diff --git a/docs/articles/https/Dockerfile b/docs/security/https/Dockerfile similarity index 100% rename from docs/articles/https/Dockerfile rename to docs/security/https/Dockerfile diff --git a/docs/articles/https/Makefile b/docs/security/https/Makefile similarity index 100% rename from docs/articles/https/Makefile rename to docs/security/https/Makefile diff --git a/docs/articles/https/README.md b/docs/security/https/README.md similarity index 100% rename from docs/articles/https/README.md rename to docs/security/https/README.md diff --git a/docs/articles/https/make_certs.sh b/docs/security/https/make_certs.sh similarity index 100% rename from docs/articles/https/make_certs.sh rename to docs/security/https/make_certs.sh diff --git a/docs/articles/https/parsedocs.sh b/docs/security/https/parsedocs.sh similarity index 100% rename from docs/articles/https/parsedocs.sh rename to docs/security/https/parsedocs.sh diff --git a/docs/security/index.md b/docs/security/index.md index 6948b097f7..9524a93ef9 100644 --- a/docs/security/index.md +++ b/docs/security/index.md @@ -1,20 +1,24 @@ -# Work with Docker security +# Secure Engine This section discusses the security features you can configure and use within your Docker Engine installation. * You can configure Docker's trust features so that your users can push and pull trusted images. To learn how to do this, see [Use trusted images](trust/index.md) in this section. +* You can protect the Docker daemon socket and ensure only trusted Docker client connections. For more information, [Protect the Docker daemon socket](https.md) + +* You can use certificate-based client-server authentication to verify a Docker daemon has the rights to access images on a registry. For more information, see [Using certificates for repository client verification](certificates.md). + * You can configure secure computing mode (Seccomp) policies to secure system calls in a container. For more information, see [Seccomp security profiles for Docker](seccomp.md). * An AppArmor profile for Docker is installed with the official *.deb* packages. For information about this profile and overriding it, see [AppArmor security profiles for Docker](apparmor.md). diff --git a/docs/security/seccomp.md b/docs/security/seccomp.md index b683be026b..c2f9968a54 100644 --- a/docs/security/seccomp.md +++ b/docs/security/seccomp.md @@ -5,6 +5,7 @@ description = "Enabling seccomp in Docker" keywords = ["seccomp, security, docker, documentation"] [menu.main] parent= "smn_secure_docker" +weight=90 +++ diff --git a/docs/security/security.md b/docs/security/security.md index d6b11e466d..ec24d879d8 100644 --- a/docs/security/security.md +++ b/docs/security/security.md @@ -116,7 +116,7 @@ However, if you do that, being aware of the above mentioned security implication, you should ensure that it will be reachable only from a trusted network or VPN; or protected with e.g., `stunnel` and client SSL certificates. You can also secure them with [HTTPS and -certificates](../articles/https/). +certificates](https.md). The daemon is also potentially vulnerable to other inputs, such as image loading from either disk with 'docker load', or from the network with diff --git a/docs/security/trust/index.md b/docs/security/trust/index.md index cace5e51a0..9c2119da0a 100644 --- a/docs/security/trust/index.md +++ b/docs/security/trust/index.md @@ -5,7 +5,7 @@ description = "Use trusted images" keywords = ["trust, security, docker, index"] [menu.main] identifier="smn_content_trust" -parent= "mn_docker_hub" +parent= "smn_secure_docker" weight=4 +++ @@ -14,8 +14,7 @@ weight=4 The following topics are available: -* [Content trust in Docker](content_trust.md) +* [Content trust in Docker](content_trust.md) * [Manage keys for content trust](trust_key_mng.md) * [Automation with content trust](trust_automation.md) * [Play in a content trust sandbox](trust_sandbox.md) - diff --git a/docs/introduction/understanding-docker.md b/docs/understanding-docker.md similarity index 94% rename from docs/introduction/understanding-docker.md rename to docs/understanding-docker.md index ec1eed6260..1278f3902c 100644 --- a/docs/introduction/understanding-docker.md +++ b/docs/understanding-docker.md @@ -1,10 +1,12 @@ @@ -88,23 +90,23 @@ run on the same system, or you can connect a Docker client to a remote Docker daemon. The Docker client and daemon communicate via sockets or through a RESTful API. -![Docker Architecture Diagram](../article-img/architecture.svg) +![Docker Architecture Diagram](article-img/architecture.svg) ### The Docker daemon As shown in the diagram above, the Docker daemon runs on a host machine. The user does not directly interact with the daemon, but instead through the Docker client. -### The Docker client +### The Docker client The Docker client, in the form of the `docker` binary, is the primary user interface to Docker. It accepts commands from the user and communicates back and forth with a Docker daemon. -### Inside Docker +### Inside Docker To understand Docker's internals, you need to know about three components: -* Docker images. -* Docker registries. +* Docker images. +* Docker registries. * Docker containers. #### Docker images @@ -130,7 +132,7 @@ image. Docker containers can be run, started, stopped, moved, and deleted. Each container is an isolated and secure application platform. Docker containers are the **run** component of Docker. -## So how does Docker work? +## So how does Docker work? So far, we've learned that: 1. You can build Docker images that hold your applications. @@ -141,7 +143,7 @@ So far, we've learned that: Let's look at how these elements combine together to make Docker work. -### How does a Docker image work? +### How does a Docker image work? We've already seen that Docker images are read-only templates from which Docker containers are launched. Each image consists of a series of layers. Docker makes use of [union file systems](http://en.wikipedia.org/wiki/UnionFS) to @@ -168,8 +170,8 @@ Docker images are then built from these base images using a simple, descriptive set of steps we call *instructions*. Each instruction creates a new layer in our image. Instructions include actions like: -* Run a command. -* Add a file or directory. +* Run a command. +* Add a file or directory. * Create an environment variable. * What process to run when launching a container from this image. @@ -213,7 +215,7 @@ minimum the Docker client needs to tell the Docker daemon to run the container is: * What Docker image to build the container from, here `ubuntu`, a base Ubuntu -image; +image; * The command you want to run inside the container when it is launched, here `/bin/bash`, to start the Bash shell inside the new container. @@ -224,16 +226,16 @@ In order, Docker does the following: - **Pulls the `ubuntu` image:** Docker checks for the presence of the `ubuntu` image and, if it doesn't exist locally on the host, then Docker downloads it from [Docker Hub](https://hub.docker.com). If the image already exists, then Docker -uses it for the new container. +uses it for the new container. - **Creates a new container:** Once Docker has the image, it uses it to create a -container. -- **Allocates a filesystem and mounts a read-write _layer_:** The container is created in +container. +- **Allocates a filesystem and mounts a read-write _layer_:** The container is created in the file system and a read-write layer is added to the image. -- **Allocates a network / bridge interface:** Creates a network interface that allows the -Docker container to talk to the local host. -- **Sets up an IP address:** Finds and attaches an available IP address from a pool. -- **Executes a process that you specify:** Runs your application, and; -- **Captures and provides application output:** Connects and logs standard input, outputs +- **Allocates a network / bridge interface:** Creates a network interface that allows the +Docker container to talk to the local host. +- **Sets up an IP address:** Finds and attaches an available IP address from a pool. +- **Executes a process that you specify:** Runs your application, and; +- **Captures and provides application output:** Connects and logs standard input, outputs and errors for you to see how your application is running. You now have a running container! From here you can manage your container, interact with @@ -241,7 +243,7 @@ your application and then, when finished, stop and remove your container. ## The underlying technology Docker is written in Go and makes use of several kernel features to -deliver the functionality we've seen. +deliver the functionality we've seen. ### Namespaces Docker takes advantage of a technology called `namespaces` to provide the @@ -253,12 +255,12 @@ namespace and does not have access outside it. Some of the namespaces that Docker uses on Linux are: - - **The `pid` namespace:** Used for process isolation (PID: Process ID). + - **The `pid` namespace:** Used for process isolation (PID: Process ID). - **The `net` namespace:** Used for managing network interfaces (NET: - Networking). + Networking). - **The `ipc` namespace:** Used for managing access to IPC - resources (IPC: InterProcess Communication). - - **The `mnt` namespace:** Used for managing mount-points (MNT: Mount). + resources (IPC: InterProcess Communication). + - **The `mnt` namespace:** Used for managing mount-points (MNT: Mount). - **The `uts` namespace:** Used for isolating kernel and version identifiers. (UTS: Unix Timesharing System). @@ -276,7 +278,7 @@ making them very lightweight and fast. Docker uses union file systems to provide the building blocks for containers. Docker can make use of several union file system variants including: AUFS, btrfs, vfs, and DeviceMapper. -### Container format +### Container format Docker combines these components into a wrapper we call a container format. The default container format is called `libcontainer`. In the future, Docker may support other container formats, for example, by integrating with BSD Jails @@ -284,9 +286,7 @@ or Solaris Zones. ## Next steps ### Installing Docker -Visit the [installation section](../installation/index.md#installation). +Visit the [installation section](installation/index.md#installation). ### The Docker user guide -[Learn Docker in depth](../userguide/index.md). - - +[Learn Docker in depth](userguide/index.md). diff --git a/docs/userguide/dockerimages.md b/docs/userguide/containers/dockerimages.md similarity index 98% rename from docs/userguide/dockerimages.md rename to docs/userguide/containers/dockerimages.md index 9146f30c68..74387a5166 100644 --- a/docs/userguide/dockerimages.md +++ b/docs/userguide/containers/dockerimages.md @@ -1,10 +1,11 @@ @@ -33,7 +34,7 @@ Let's start with listing the images you have locally on our host. You can do this using the `docker images` command like so: $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE ubuntu 14.04 1d073211c498 3 days ago 187.9 MB busybox latest 2c5ac3f849df 5 days ago 1.113 MB training/webapp latest 54bb4e8718e8 5 months ago 348.7 MB @@ -213,7 +214,7 @@ You can then look at our new `ouruser/sinatra` image using the `docker images` command. $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE training/sinatra latest 5bc342fa0b91 10 hours ago 446.7 MB ouruser/sinatra v2 3c59e02ddd1a 10 hours ago 446.7 MB ouruser/sinatra latest 5db5f8471261 10 hours ago 446.7 MB @@ -471,10 +472,10 @@ You can then create a container from our new image. > This is just a brief introduction to creating images. We've > skipped a whole bunch of other instructions that you can use. We'll see more of > those instructions in later sections of the Guide or you can refer to the -> [`Dockerfile`](../reference/builder.md) reference for a +> [`Dockerfile`](../../reference/builder.md) reference for a > detailed description and examples of every instruction. > To help you write a clear, readable, maintainable `Dockerfile`, we've also -> written a [`Dockerfile` Best Practices guide](../articles/dockerfile_best-practices.md). +> written a [`Dockerfile` Best Practices guide](../eng-image/dockerfile_best-practices.md). ## Setting tags on an image @@ -491,7 +492,7 @@ user name, the repository name and the new tag. Now, see your new tag using the `docker images` command. $ docker images ouruser/sinatra - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE ouruser/sinatra latest 5db5f8471261 11 hours ago 446.7 MB ouruser/sinatra devel 5db5f8471261 11 hours ago 446.7 MB ouruser/sinatra v2 5db5f8471261 11 hours ago 446.7 MB @@ -504,7 +505,7 @@ unchanged, the digest value is predictable. To list image digest values, use the `--digests` flag: $ docker images --digests | head - REPOSITORY TAG DIGEST IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE ouruser/sinatra latest sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 5db5f8471261 11 hours ago 446.7 MB When pushing or pulling to a 2.0 registry, the `push` or `pull` command diff --git a/docs/userguide/dockerizing.md b/docs/userguide/containers/dockerizing.md similarity index 99% rename from docs/userguide/dockerizing.md rename to docs/userguide/containers/dockerizing.md index c73d7b33a1..4c9e3e8175 100644 --- a/docs/userguide/dockerizing.md +++ b/docs/userguide/containers/dockerizing.md @@ -1,10 +1,11 @@ diff --git a/docs/userguide/dockerrepos.md b/docs/userguide/containers/dockerrepos.md similarity index 99% rename from docs/userguide/dockerrepos.md rename to docs/userguide/containers/dockerrepos.md index f04254d860..257f87635b 100644 --- a/docs/userguide/dockerrepos.md +++ b/docs/userguide/containers/dockerrepos.md @@ -1,10 +1,11 @@ diff --git a/docs/userguide/dockervolumes.md b/docs/userguide/containers/dockervolumes.md similarity index 94% rename from docs/userguide/dockervolumes.md rename to docs/userguide/containers/dockervolumes.md index 5ddc9c187c..0fb7fe2571 100644 --- a/docs/userguide/dockervolumes.md +++ b/docs/userguide/containers/dockervolumes.md @@ -1,18 +1,19 @@ # Manage data in containers -So far we've been introduced to some [basic Docker concepts](usingdocker.md), -seen how to work with [Docker images](dockerimages.md) as well as learned about -[networking and links between containers](networking/default_network/dockerlinks.md). In this section we're +So far we've been introduced to some [basic Docker concepts](../containers/usingdocker.md), +seen how to work with [Docker images](../containers/dockerimages.md) as well as learned about +[networking and links between containers](../networking/default_network/dockerlinks.md). In this section we're going to discuss how you can manage data inside and between your Docker containers. @@ -25,7 +26,7 @@ Docker. ## Data volumes A *data volume* is a specially-designated directory within one or more -containers that bypasses the [*Union File System*](../reference/glossary.md#union-file-system). Data volumes provide several useful features for persistent or shared data: +containers that bypasses the [*Union File System*](../../reference/glossary.md#union-file-system). Data volumes provide several useful features for persistent or shared data: - Volumes are initialized when a container is created. If the container's base image contains data at the specified mount point, that existing data is @@ -219,7 +220,7 @@ from the `dbstore` container are visible. You can use multiple `--volumes-from` parameters to combine data volumes from several containers. To find detailed information about `--volumes-from` see the -[Mount volumes from container](../reference/commandline/run.md#mount-volumes-from-container-volumes-from) +[Mount volumes from container](../../reference/commandline/run.md#mount-volumes-from-container-volumes-from) in the `run` command reference. You can also extend the chain by mounting the volume that came from the @@ -282,4 +283,4 @@ combine Docker with the services available on [Docker Hub](https://hub.docker.com) including Automated Builds and private repositories. -Go to [Working with Docker Hub](dockerrepos.md). +Go to [Working with Docker Hub](../containers/dockerrepos.md). diff --git a/docs/userguide/containers/index.md b/docs/userguide/containers/index.md new file mode 100644 index 0000000000..2de0dccea4 --- /dev/null +++ b/docs/userguide/containers/index.md @@ -0,0 +1,19 @@ + + +# Learn by example + +* [Hello world in a container](dockerizing.md) +* [Run a simple application](usingdocker.md) +* [Build your own images](dockerimages.md) +* [Network containers](networkingcontainers.md) +* [Manage data in containers](dockervolumes.md) +* [Store images on Docker Hub](dockerrepos.md) diff --git a/docs/userguide/networkingcontainers.md b/docs/userguide/containers/networkingcontainers.md similarity index 97% rename from docs/userguide/networkingcontainers.md rename to docs/userguide/containers/networkingcontainers.md index bf0b71e894..dfe6d4553d 100644 --- a/docs/userguide/networkingcontainers.md +++ b/docs/userguide/containers/networkingcontainers.md @@ -1,16 +1,17 @@ -# Networking containers +# Network containers If you are working your way through the user guide, you just built and ran a simple application. You've also built in your own images. This section teaches @@ -167,7 +168,10 @@ If you inspect the network, you'll find that it has nothing in it. "IPAM": { "Driver": "default", "Config": [ - {} + { + "Subnet": "172.18.0.0/16", + "Gateway": "172.18.0.1/16" + } ] }, "Containers": {}, diff --git a/docs/userguide/search.png b/docs/userguide/containers/search.png similarity index 100% rename from docs/userguide/search.png rename to docs/userguide/containers/search.png diff --git a/docs/userguide/usingdocker.md b/docs/userguide/containers/usingdocker.md similarity index 98% rename from docs/userguide/usingdocker.md rename to docs/userguide/containers/usingdocker.md index 51b8e7d23b..be6daa2780 100644 --- a/docs/userguide/usingdocker.md +++ b/docs/userguide/containers/usingdocker.md @@ -4,7 +4,7 @@ title = "Run a simple application" description = "Learn how to manage and operate Docker containers." keywords = ["docker, the docker guide, documentation, docker.io, monitoring containers, docker top, docker inspect, docker port, ports, docker logs, log, Logs"] [menu.main] -parent="smn_containers" +parent="engine_learn" weight=-5 +++ @@ -76,7 +76,7 @@ To see usage for a specific command, specify the command with the `--help` flag: > **Note:** > For further details and examples of each command, see the -> [command reference](../reference/commandline/cli.md) in this guide. +> [command reference](../../reference/commandline/cli.md) in this guide. ## Running a web application in Docker @@ -104,8 +104,8 @@ Lastly, you've specified a command for our container to run: `python app.py`. Th > **Note:** > You can see more detail on the `docker run` command in the [command -> reference](../reference/commandline/run.md) and the [Docker Run -> Reference](../reference/run.md). +> reference](../../reference/commandline/run.md) and the [Docker Run +> Reference](../../reference/run.md). ## Viewing our web application container diff --git a/docs/userguide/webapp1.png b/docs/userguide/containers/webapp1.png similarity index 100% rename from docs/userguide/webapp1.png rename to docs/userguide/containers/webapp1.png diff --git a/docs/articles/baseimages.md b/docs/userguide/eng-image/baseimages.md similarity index 90% rename from docs/articles/baseimages.md rename to docs/userguide/eng-image/baseimages.md index 99cfc94744..172d65a9c2 100644 --- a/docs/articles/baseimages.md +++ b/docs/userguide/eng-image/baseimages.md @@ -1,16 +1,17 @@ # Create a base image -So you want to create your own [*Base Image*](../reference/glossary.md#base-image)? Great! +So you want to create your own [*Base Image*](../../reference/glossary.md#base-image)? Great! The specific process will depend heavily on the Linux distribution you want to package. We have some examples below, and you are encouraged to @@ -64,7 +65,7 @@ If you want to test it out, you can clone [the image repo](https://github.com/do There are lots more resources available to help you write your 'Dockerfile`. -* There's a [complete guide to all the instructions](../reference/builder.md) available for use in a `Dockerfile` in the reference section. +* There's a [complete guide to all the instructions](../../reference/builder.md) available for use in a `Dockerfile` in the reference section. * To help you write a clear, readable, maintainable `Dockerfile`, we've also written a [`Dockerfile` Best Practices guide](dockerfile_best-practices.md). * If your goal is to create a new Official Repository, be sure to read up on Docker's [Official Repositories](https://docs.docker.com/docker-hub/official_repos/). diff --git a/docs/articles/dockerfile_best-practices.md b/docs/userguide/eng-image/dockerfile_best-practices.md similarity index 92% rename from docs/articles/dockerfile_best-practices.md rename to docs/userguide/eng-image/dockerfile_best-practices.md index e15052395f..1c51af7037 100644 --- a/docs/articles/dockerfile_best-practices.md +++ b/docs/userguide/eng-image/dockerfile_best-practices.md @@ -1,22 +1,21 @@ # Best practices for writing Dockerfiles -## Overview - Docker can build images automatically by reading the instructions from a `Dockerfile`, a text file that contains all the commands, in order, needed to build a given image. `Dockerfile`s adhere to a specific format and use a specific set of instructions. You can learn the basics on the -[Dockerfile Reference](../reference/builder.md) page. If +[Dockerfile Reference](../../reference/builder.md) page. If you’re new to writing `Dockerfile`s, you should start there. This document covers the best practices and methods recommended by Docker, @@ -27,7 +26,7 @@ if you’re creating an Official Image, you *must* adhere to these practices). You can see many of these practices and recommendations in action in the [buildpack-deps `Dockerfile`](https://github.com/docker-library/buildpack-deps/blob/master/jessie/Dockerfile). > Note: for more detailed explanations of any of the Dockerfile commands ->mentioned here, visit the [Dockerfile Reference](../reference/builder.md) page. +>mentioned here, visit the [Dockerfile Reference](../../reference/builder.md) page. ## General guidelines and recommendations @@ -45,7 +44,7 @@ add to that directory only the files needed for building the Dockerfile. To increase the build's performance, you can exclude files and directories by adding a `.dockerignore` file to that directory as well. This file supports exclusion patterns similar to `.gitignore` files. For information on creating one, -see the [.dockerignore file](../reference/builder.md#dockerignore-file). +see the [.dockerignore file](../../reference/builder.md#dockerignore-file). ### Avoid installing unnecessary packages @@ -59,7 +58,7 @@ in a database image. In almost all cases, you should only run a single process in a single container. Decoupling applications into multiple containers makes it much easier to scale horizontally and reuse containers. If that service depends on -another service, make use of [container linking](../userguide/networking/default_network/dockerlinks.md). +another service, make use of [container linking](../../userguide/networking/default_network/dockerlinks.md). ### Minimize the number of layers @@ -128,7 +127,7 @@ various instructions available for use in a `Dockerfile`. ### FROM -[Dockerfile reference for the FROM instruction](../reference/builder.md#from) +[Dockerfile reference for the FROM instruction](../../reference/builder.md#from) Whenever possible, use current Official Repositories as the basis for your image. We recommend the [Debian image](https://registry.hub.docker.com/_/debian/) @@ -137,7 +136,7 @@ since it’s very tightly controlled and kept extremely minimal (currently under ### RUN -[Dockerfile reference for the RUN instruction](../reference/builder.md#run) +[Dockerfile reference for the RUN instruction](../../reference/builder.md#run) As always, to make your `Dockerfile` more readable, understandable, and maintainable, split long or complex `RUN` statements on multiple lines separated @@ -234,7 +233,7 @@ keep the image size down. Since the `RUN` statement starts with ### CMD -[Dockerfile reference for the CMD instruction](../reference/builder.md#cmd) +[Dockerfile reference for the CMD instruction](../../reference/builder.md#cmd) The `CMD` instruction should be used to run the software contained by your image, along with any arguments. `CMD` should almost always be used in the @@ -248,13 +247,13 @@ perl, etc), for example, `CMD ["perl", "-de0"]`, `CMD ["python"]`, or `CMD [“php”, “-a”]`. Using this form means that when you execute something like `docker run -it python`, you’ll get dropped into a usable shell, ready to go. `CMD` should rarely be used in the manner of `CMD [“param”, “param”]` in -conjunction with [`ENTRYPOINT`](../reference/builder.md#entrypoint), unless +conjunction with [`ENTRYPOINT`](../../reference/builder.md#entrypoint), unless you and your expected users are already quite familiar with how `ENTRYPOINT` works. ### EXPOSE -[Dockerfile reference for the EXPOSE instruction](../reference/builder.md#expose) +[Dockerfile reference for the EXPOSE instruction](../../reference/builder.md#expose) The `EXPOSE` instruction indicates the ports on which a container will listen for connections. Consequently, you should use the common, traditional port for @@ -269,7 +268,7 @@ the recipient container back to the source (ie, `MYSQL_PORT_3306_TCP`). ### ENV -[Dockerfile reference for the ENV instruction](../reference/builder.md#env) +[Dockerfile reference for the ENV instruction](../../reference/builder.md#env) In order to make new software easier to run, you can use `ENV` to update the `PATH` environment variable for the software your container installs. For @@ -294,8 +293,8 @@ auto-magically bump the version of the software in your container. ### ADD or COPY -[Dockerfile reference for the ADD instruction](../reference/builder.md#add)
-[Dockerfile reference for the COPY instruction](../reference/builder.md#copy) +[Dockerfile reference for the ADD instruction](../../reference/builder.md#add)
+[Dockerfile reference for the COPY instruction](../../reference/builder.md#copy) Although `ADD` and `COPY` are functionally similar, generally speaking, `COPY` is preferred. That’s because it’s more transparent than `ADD`. `COPY` only @@ -340,7 +339,7 @@ auto-extraction capability, you should always use `COPY`. ### ENTRYPOINT -[Dockerfile reference for the ENTRYPOINT instruction](../reference/builder.md#entrypoint) +[Dockerfile reference for the ENTRYPOINT instruction](../../reference/builder.md#entrypoint) The best use for `ENTRYPOINT` is to set the image's main command, allowing that image to be run as though it was that command (and then use `CMD` as the @@ -390,7 +389,7 @@ exec "$@" > This script uses [the `exec` Bash command](http://wiki.bash-hackers.org/commands/builtin/exec) > so that the final running application becomes the container's PID 1. This allows > the application to receive any Unix signals sent to the container. -> See the [`ENTRYPOINT`](../reference/builder.md#entrypoint) +> See the [`ENTRYPOINT`](../../reference/builder.md#entrypoint) > help for more details. @@ -416,7 +415,7 @@ Lastly, it could also be used to start a totally different tool, such as Bash: ### VOLUME -[Dockerfile reference for the VOLUME instruction](../reference/builder.md#volume) +[Dockerfile reference for the VOLUME instruction](../../reference/builder.md#volume) The `VOLUME` instruction should be used to expose any database storage area, configuration storage, or files/folders created by your docker container. You @@ -425,7 +424,7 @@ parts of your image. ### USER -[Dockerfile reference for the USER instruction](../reference/builder.md#user) +[Dockerfile reference for the USER instruction](../../reference/builder.md#user) If a service can run without privileges, use `USER` to change to a non-root user. Start by creating the user and group in the `Dockerfile` with something @@ -446,7 +445,7 @@ and forth frequently. ### WORKDIR -[Dockerfile reference for the WORKDIR instruction](../reference/builder.md#workdir) +[Dockerfile reference for the WORKDIR instruction](../../reference/builder.md#workdir) For clarity and reliability, you should always use absolute paths for your `WORKDIR`. Also, you should use `WORKDIR` instead of proliferating @@ -455,7 +454,7 @@ troubleshoot, and maintain. ### ONBUILD -[Dockerfile reference for the ONBUILD instruction](../reference/builder.md#onbuild) +[Dockerfile reference for the ONBUILD instruction](../../reference/builder.md#onbuild) An `ONBUILD` command executes after the current `Dockerfile` build completes. `ONBUILD` executes in any child image derived `FROM` the current image. Think @@ -489,7 +488,7 @@ These Official Repositories have exemplary `Dockerfile`s: ## Additional resources: -* [Dockerfile Reference](../reference/builder.md) +* [Dockerfile Reference](../../reference/builder.md) * [More about Base Images](baseimages.md) * [More about Automated Builds](https://docs.docker.com/docker-hub/builds/) * [Guidelines for Creating Official diff --git a/docs/userguide/image_management.md b/docs/userguide/eng-image/image_management.md similarity index 92% rename from docs/userguide/image_management.md rename to docs/userguide/eng-image/image_management.md index 169ffade06..035b6b681a 100644 --- a/docs/userguide/image_management.md +++ b/docs/userguide/eng-image/image_management.md @@ -1,12 +1,12 @@ @@ -48,6 +48,6 @@ operates on. You use Docker to push and pull images (data) to a registry. Content trust gives you the ability to both verify the integrity and the publisher of all the data received from a registry over any channel. -[Content trust](../security/trust/index.md) is currently only available for users of the +[Content trust](../../security/trust/index.md) is currently only available for users of the public Docker Hub. It is currently not available for the Docker Trusted Registry or for private registries. diff --git a/docs/userguide/eng-image/index.md b/docs/userguide/eng-image/index.md new file mode 100644 index 0000000000..c46eec6b59 --- /dev/null +++ b/docs/userguide/eng-image/index.md @@ -0,0 +1,16 @@ + + +# Work with images + +* [Create a base image](baseimages.md) +* [Best practices for writing Dockerfiles](dockerfile_best-practices.md) +* [Image management](image_management.md) diff --git a/docs/userguide/index.md b/docs/userguide/index.md index 2f581976be..1bffafc4c8 100644 --- a/docs/userguide/index.md +++ b/docs/userguide/index.md @@ -1,127 +1,13 @@ -# Welcome to the Docker user guide - -In the [Introduction](../misc/index.md) you got a taste of what Docker is and how it -works. This guide takes you through the fundamentals of using Docker and -integrating it into your environment. You'll learn how to use Docker to: - -* Dockerize your applications. -* Run your own containers. -* Build Docker images. -* Share your Docker images with others. -* And a whole lot more! - -This guide is broken into major sections that take you through the Docker life -cycle: - -## Getting started with Docker Hub - -*How do I use Docker Hub?* - -Docker Hub is the central hub for Docker. It hosts public Docker images -and provides services to help you build and manage your Docker -environment. To learn more: - -Go to [Using Docker Hub](https://docs.docker.com/docker-hub). - -## Dockerizing applications: A "Hello world" - -*How do I run applications inside containers?* - -Docker offers a containerization platform to power your applications. To learn -how to Dockerize applications and run them: - -Go to [Dockerizing Applications](dockerizing.md). - - -## Working with containers - -*How do I manage my containers?* - -Once you get a grip on running your applications in Docker containers -we're going to show you how to manage those containers. To find out -about how to inspect, monitor and manage containers: - -Go to [Working With Containers](usingdocker.md). - -## Working with Docker images - -*How can I access, share and build my own images?* - -Once you've learnt how to use Docker it's time to take the next step and -learn how to build your own application images with Docker. - -Go to [Working with Docker Images](dockerimages.md). - -## Networking containers - -Until now we've seen how to build individual applications inside Docker -containers. Now learn how to build whole application stacks with Docker -networking. - -Go to [Networking Containers](networkingcontainers.md). - -## Managing data in containers - -Now we know how to link Docker containers together the next step is -learning how to manage data, volumes and mounts inside our containers. - -Go to [Managing Data in Containers](dockervolumes.md). - -## Working with Docker Hub - -Now we've learned a bit more about how to use Docker we're going to see -how to combine Docker with the services available on Docker Hub including -Trusted Builds and private repositories. - -Go to [Working with Docker Hub](dockerrepos.md). - -## Docker Compose - -Docker Compose allows you to define a application's components -- their containers, -configuration, links and volumes -- in a single file. Then a single command -will set everything up and start your application running. - -Go to [Docker Compose user guide](https://docs.docker.com/compose/). - -## Docker Machine - -Docker Machine helps you get Docker Engines up and running quickly. Machine -can set up hosts for Docker Engines on your computer, on cloud providers, -and/or in your data center, and then configure your Docker client to securely -talk to them. - -Go to [Docker Machine user guide](https://docs.docker.com/machine/). - -## Docker Swarm - -Docker Swarm pools several Docker Engines together and exposes them as a single -virtual Docker Engine. It serves the standard Docker API, so any tool that already -works with Docker can now transparently scale up to multiple hosts. - -Go to [Docker Swarm user guide](https://docs.docker.com/swarm/). - -## Getting help - -* [Docker homepage](https://www.docker.com/) -* [Docker Hub](https://hub.docker.com) -* [Docker blog](https://blog.docker.com/) -* [Docker documentation](https://docs.docker.com/) -* [Docker Getting Started Guide](https://docs.docker.com/mac/started/) -* [Docker code on GitHub](https://github.com/docker/docker) -* [Docker mailing - list](https://groups.google.com/forum/#!forum/docker-user) -* Docker on IRC: irc.freenode.net and channel #docker -* [Docker on Twitter](https://twitter.com/docker) -* Get [Docker help](https://stackoverflow.com/search?q=docker) on - StackOverflow -* [Docker.com](https://www.docker.com/) +# User guide diff --git a/docs/userguide/intro.md b/docs/userguide/intro.md new file mode 100644 index 0000000000..a25217c56f --- /dev/null +++ b/docs/userguide/intro.md @@ -0,0 +1,119 @@ + + +# Introduction to Engine user guide + +This guide takes you through the fundamentals of using Docker Engine and +integrating it into your environment. You'll learn how to use Engine to: + +* Dockerize your applications. +* Run your own containers. +* Build Docker images. +* Share your Docker images with others. +* And a whole lot more! + +This guide is broken into major sections that take you through learning the basics of Docker Engine and the other Docker products that support it. + +## Dockerizing applications: A "Hello world" + +*How do I run applications inside containers?* + +Docker Engine offers a containerization platform to power your applications. To +learn how to Dockerize applications and run them: + +Go to [Dockerizing Applications](containers/dockerizing.md). + + +## Working with containers + +*How do I manage my containers?* + +Once you get a grip on running your applications in Docker containers, you'll learn how to manage those containers. To find out +about how to inspect, monitor and manage containers: + +Go to [Working With Containers](containers/usingdocker.md). + +## Working with Docker images + +*How can I access, share and build my own images?* + +Once you've learnt how to use Docker it's time to take the next step and +learn how to build your own application images with Docker. + +Go to [Working with Docker Images](containers/dockerimages.md). + +## Networking containers + +Until now we've seen how to build individual applications inside Docker +containers. Now learn how to build whole application stacks with Docker +networking. + +Go to [Networking Containers](containers/networkingcontainers.md). + +## Managing data in containers + +Now we know how to link Docker containers together the next step is +learning how to manage data, volumes and mounts inside our containers. + +Go to [Managing Data in Containers](containers/dockervolumes.md). + +## Docker products that complement Engine + +Often, one powerful technology spawns many other inventions that make that easier to get to, easier to use, and more powerful. These spawned things share one common characteristic: they augment the central technology. The following Docker products expand on the core Docker Engine functions. + +### Docker Hub + +Docker Hub is the central hub for Docker. It hosts public Docker images +and provides services to help you build and manage your Docker +environment. To learn more: + +Go to [Using Docker Hub](https://docs.docker.com/docker-hub). + +### Docker Machine + +Docker Machine helps you get Docker Engines up and running quickly. Machine +can set up hosts for Docker Engines on your computer, on cloud providers, +and/or in your data center, and then configure your Docker client to securely +talk to them. + +Go to [Docker Machine user guide](https://docs.docker.com/machine/). + +### Docker Compose + +Docker Compose allows you to define a application's components -- their containers, +configuration, links and volumes -- in a single file. Then a single command +will set everything up and start your application running. + +Go to [Docker Compose user guide](https://docs.docker.com/compose/). + + +### Docker Swarm + +Docker Swarm pools several Docker Engines together and exposes them as a single +virtual Docker Engine. It serves the standard Docker API, so any tool that already +works with Docker can now transparently scale up to multiple hosts. + +Go to [Docker Swarm user guide](https://docs.docker.com/swarm/). + +## Getting help + +* [Docker homepage](https://www.docker.com/) +* [Docker Hub](https://hub.docker.com) +* [Docker blog](https://blog.docker.com/) +* [Docker documentation](https://docs.docker.com/) +* [Docker Getting Started Guide](https://docs.docker.com/mac/started/) +* [Docker code on GitHub](https://github.com/docker/docker) +* [Docker mailing + list](https://groups.google.com/forum/#!forum/docker-user) +* Docker on IRC: irc.freenode.net and channel #docker +* [Docker on Twitter](https://twitter.com/docker) +* Get [Docker help](https://stackoverflow.com/search?q=docker) on + StackOverflow +* [Docker.com](https://www.docker.com/) diff --git a/docs/userguide/labels-custom-metadata.md b/docs/userguide/labels-custom-metadata.md index ae6a3c5e9d..4c9a1a114c 100644 --- a/docs/userguide/labels-custom-metadata.md +++ b/docs/userguide/labels-custom-metadata.md @@ -4,7 +4,8 @@ title = "Apply custom metadata" description = "Learn how to work with custom metadata in Docker, using labels." keywords = ["Usage, user guide, labels, metadata, docker, documentation, examples, annotating"] [menu.main] -parent = "mn_use_docker" +parent = "engine_guide" +weight=90 +++ diff --git a/docs/userguide/networking/configure-dns.md b/docs/userguide/networking/configure-dns.md new file mode 100644 index 0000000000..b87436fada --- /dev/null +++ b/docs/userguide/networking/configure-dns.md @@ -0,0 +1,138 @@ + + +# Embedded DNS server in user-defined networks + +The information in this section covers the embedded DNS server operation for +containers in user-defined networks. DNS lookup for containers connected to +user-defined networks works differently compared to the containers connected +to `default bridge` network. + +> **Note**: In order to maintain backward compatibility, the DNS configuration +> in `default bridge` network is retained with no behaviorial change. +> Please refer to the [DNS in default bridge network](default_network/configure-dns.md) +> for more information on DNS configuration in the `default bridge` network. + +As of Docker 1.10, the docker daemon implements an embedded DNS server which +provides built-in service discovery for any container created with a valid +`name` or `net-alias` or aliased by `link`. The exact details of how Docker +manages the DNS configurations inside the container can change from one Docker +version to the next. So you should not assume the way the files such as +`/etc/hosts`, `/etc/resolv.conf` are managed inside the containers and leave +the files alone and use the following Docker options instead. + +Various container options that affect container domain name services. + + + + + + + + + + + + + + + + + + + + + + + + + +
+

+ --name=CONTAINER-NAME +

+
+

+ Container name configured using --name is used to discover a container within + an user-defined docker network. The embedded DNS server maintains the mapping between + the container name and its IP address (on the network the container is connected to). +

+
+

+ --net-alias=ALIAS +

+
+

+ In addition to --name as described above, a container is discovered by one or more + of its configured --net-alias (or --alias in docker network connect command) + within the user-defined network. The embedded DNS server maintains the mapping between + all of the container aliases and its IP address on a specific user-defined network. + A container can have different aliases in different networks by using the --alias + option in docker network connect command. +

+
+

+ --link=CONTAINER_NAME:ALIAS +

+
+

+ Using this option as you run a container gives the embedded DNS + an extra entry named ALIAS that points to the IP address + of the container identified by CONTAINER_NAME. When using --link + the embedded DNS will guarantee that localized lookup result only on that + container where the --link is used. This lets processes inside the new container + connect to container without without having to know its name or IP. +

+

+ --dns=[IP_ADDRESS...] +

+ The IP addresses passed via the --dns option is used by the embedded DNS + server to forward the DNS query if embedded DNS server is unable to resolve a name + resolution request from the containers. + These --dns IP addresses are managed by the embedded DNS server and + will not be updated in the container's /etc/resolv.conf file. +

+ --dns-search=DOMAIN... +

+ Sets the domain names that are searched when a bare unqualified hostname is + used inside of the container. These --dns-search options are managed by the + embedded DNS server and will not be updated in the container's /etc/resolv.conf file. + When a container process attempts to access host and the search + domain example.com is set, for instance, the DNS logic will not only + look up host but also host.example.com. +

+

+ --dns-opt=OPTION... +

+ Sets the options used by DNS resolvers. These options are managed by the embedded + DNS server and will not be updated in the container's /etc/resolv.conf file. +

+

+ See documentation for resolv.conf for a list of valid options +

+ + +In the absence of the `--dns=IP_ADDRESS...`, `--dns-search=DOMAIN...`, or +`--dns-opt=OPTION...` options, Docker uses the `/etc/resolv.conf` of the +host machine (where the `docker` daemon runs). While doing so the daemon +filters out all localhost IP address `nameserver` entries from the host's +original file. + +Filtering is necessary because all localhost addresses on the host are +unreachable from the container's network. After this filtering, if there are +no more `nameserver` entries left in the container's `/etc/resolv.conf` file, +the daemon adds public Google DNS nameservers (8.8.8.8 and 8.8.4.4) to the +container's DNS configuration. If IPv6 is enabled on the daemon, the public +IPv6 Google DNS nameservers will also be added (2001:4860:4860::8888 and +2001:4860:4860::8844). + +> **Note**: If you need access to a host's localhost resolver, you must modify +> your DNS service on the host to listen on a non-localhost address that is +> reachable from within the container. diff --git a/docs/userguide/networking/default_network/configure-dns.md b/docs/userguide/networking/default_network/configure-dns.md index 5fe0d0e114..ab87c82a79 100644 --- a/docs/userguide/networking/default_network/configure-dns.md +++ b/docs/userguide/networking/default_network/configure-dns.md @@ -14,7 +14,7 @@ The information in this section explains configuring container DNS within the Docker default bridge. This is a `bridge` network named `bridge` created automatically when you install Docker. -**Note**: The [Docker networks feature](../dockernetworks.md) allows you to create user-defined networks in addition to the default bridge network. +> **Note**: The [Docker networks feature](../dockernetworks.md) allows you to create user-defined networks in addition to the default bridge network. Please refer to the [Docker Embedded DNS](../configure-dns.md) section for more information on DNS configurations in user-defined networks. How can Docker supply each container with a hostname and DNS configuration, without having to build a custom image with the hostname written inside? Its trick is to overlay three crucial `/etc` files inside the container with virtual files where it can write fresh information. You can see this by running `mount` inside a container: diff --git a/docs/userguide/networking/default_network/container-communication.md b/docs/userguide/networking/default_network/container-communication.md index 0dc7016f4f..76593ff9d4 100644 --- a/docs/userguide/networking/default_network/container-communication.md +++ b/docs/userguide/networking/default_network/container-communication.md @@ -20,7 +20,7 @@ automatically when you install Docker. Whether a container can talk to the world is governed by two factors. The first factor is whether the host machine is forwarding its IP packets. The second is -whether the hosts `iptables` allow this particular connections +whether the host's `iptables` allow this particular connection. IP packet forwarding is governed by the `ip_forward` system parameter. Packets can only pass between containers if this parameter is `1`. Usually you will diff --git a/docs/userguide/networking/default_network/dockerlinks.md b/docs/userguide/networking/default_network/dockerlinks.md index 0c71d970e8..cee84cbd0b 100644 --- a/docs/userguide/networking/default_network/dockerlinks.md +++ b/docs/userguide/networking/default_network/dockerlinks.md @@ -11,7 +11,7 @@ weight=-2 # Legacy container links -The information in this section explains legacy container links within the Docker default bridge. This is a `bridge` network named `bridge` created automatically when you install Docker. +The information in this section explains legacy container links within the Docker default bridge. This is a `bridge` network named `bridge` created automatically when you install Docker. Before the [Docker networks feature](../dockernetworks.md), you could use the Docker link feature to allow containers to discover each other and securely @@ -25,7 +25,7 @@ detail on container linking in default `bridge` network. ## Connect using network port mapping -In [the Using Docker section](../../usingdocker.md), you created a +In [the Using Docker section](../../containers/usingdocker.md), you created a container that ran a Python Flask application: $ docker run -d -P training/webapp python app.py @@ -33,7 +33,7 @@ container that ran a Python Flask application: > **Note:** > Containers have an internal network and an IP address > (as we saw when we used the `docker inspect` command to show the container's -> IP address in the [Using Docker](../../usingdocker.md) section). +> IP address in the [Using Docker](../../containers/usingdocker.md) section). > Docker can have a variety of network configurations. You can see more > information on Docker networking [here](../index.md). @@ -95,6 +95,12 @@ configurations. For example, if you've bound the container port to the ## Connect with the linking system +> **Note**: +> This section covers the legacy link feature in the default `bridge` network. +> Please refer to [linking containers in user-defined networks] +> (../work-with-networks.md#linking-containers-in-user-defined-networks) +> for more information on links in user-defined networks. + Network port mappings are not the only way Docker containers can connect to one another. Docker also has a linking system that allows you to link multiple containers together and send connection information from one to another. When diff --git a/docs/userguide/networking/dockernetworks.md b/docs/userguide/networking/dockernetworks.md index 6e7688413f..b9f1a63b44 100644 --- a/docs/userguide/networking/dockernetworks.md +++ b/docs/userguide/networking/dockernetworks.md @@ -284,7 +284,7 @@ The default `docker0` bridge network supports the use of port mapping and `dock ## User-defined networks You can create your own user-defined networks that better isolate containers. -Docker provides some default **network drivers** for use creating these +Docker provides some default **network drivers** for creating these networks. You can create a new **bridge network** or **overlay network**. You can also create a **network plugin** or **remote network** written to your own specifications. @@ -414,7 +414,7 @@ You should open the following ports between each of your hosts. | udp | 4789 | Data plane (VXLAN) | | tcp/udp | 7946 | Control plane | -Your key-value store service may require additional ports. +Your key-value store service may require additional ports. Check your vendor's documentation and open any required ports. Once you have several machines provisioned, you can use Docker Swarm to quickly @@ -439,6 +439,10 @@ Docker Engine for use with `overlay` network. There are two options to set:
--cluster-advertise=HOST_IP|HOST_IFACE:PORT
The IP address or interface of the HOST used for clustering. + +
--cluster-store-opt=KEY-VALUE OPTIONS
+ Options such as TLS certificate or tuning discovery Timers + @@ -485,23 +489,34 @@ networks can include features not present in Docker's default networks. For more information on writing plugins, see [Extending Docker](../../extend/index.md) and [Writing a network driver plugin](../../extend/plugins_network.md). -## Legacy links +### Docker embedded DNS server + +Docker daemon runs an embedded DNS server to provide automatic service discovery +for containers connected to user defined networks. Name resolution requests from +the containers are handled first by the embedded DNS server. If the embedded DNS +server is unable to resolve the request it will be forwarded to any external DNS +servers configured for the container. To facilitate this when the container is +created, only the embedded DNS server reachable at `127.0.0.11` will be listed +in the container's `resolv.conf` file. More information on embedded DNS server on +user-defined networks can be found in the [embedded DNS server in user-defined networks] +(configure-dns.md) + +## Links Before the Docker network feature, you could use the Docker link feature to -allow containers to discover each other and securely transfer information about -one container to another container. With the introduction of Docker networks, -you can still create links but they are only supported on the default `bridge` -network named `bridge` and appearing in your network stack as `docker0`. - -While links are still supported in this limited capacity, you should avoid them -in preference of Docker networks. The link feature is expected to be deprecated -and removed in a future release. +allow containers to discover each other. With the introduction of Docker networks, +containers can be discovered by its name automatically. But you can still create +links but they behave differently when used in the default `docker0` bridge network +compared to user-defined networks. For more information, please refer to +[Legacy Links](default_network/dockerlinks.md) for link feature in default `bridge` network +and the [linking containers in user-defined networks](work-with-networks.md#linking-containers-in-user-defined-networks) for links +functionality in user-defined networks. ## Related information - [Work with network commands](work-with-networks.md) - [Get started with multi-host networking](get-started-overlay.md) -- [Managing Data in Containers](../dockervolumes.md) +- [Managing Data in Containers](../containers/dockervolumes.md) - [Docker Machine overview](https://docs.docker.com/machine) - [Docker Swarm overview](https://docs.docker.com/swarm) - [Investigate the LibNetwork project](https://github.com/docker/libnetwork) diff --git a/docs/userguide/networking/get-started-overlay.md b/docs/userguide/networking/get-started-overlay.md index 17e840c52f..39d7da9169 100644 --- a/docs/userguide/networking/get-started-overlay.md +++ b/docs/userguide/networking/get-started-overlay.md @@ -158,10 +158,16 @@ To create an overlay network 3. Create your `overlay` network. - $ docker network create --driver overlay my-net + $ docker network create --driver overlay --subnet=10.0.9.0/24 my-net You only need to create the network on a single host in the cluster. In this case, you used the Swarm master but you could easily have run it on any host in the cluster. +> **Note** : It is highly recommended to use the `--subnet` option when creating +> a network. If the `--subnet` is not specified, the docker daemon automatically +> chooses and assigns a subnet for the network and it could overlap with another subnet +> in your infrastructure that is not managed by docker. Such overlaps can cause +> connectivity issues or failures when containers are connected to that network. + 4. Check that the network is running: $ docker network ls @@ -308,41 +314,9 @@ to have external connectivity outside of their cluster. ## Step 6: Extra Credit with Docker Compose -You can try starting a second network on your existing Swarm cluster using Docker Compose. - -1. If you haven't already, install Docker Compose. - -2. Change your environment to the Swarm master. - - $ eval $(docker-machine env --swarm mhs-demo0) - -3. Create a `docker-compose.yml` file. - -4. Add the following content to the file. - - web: - image: bfirsh/compose-mongodb-demo - environment: - - "MONGO_HOST=counter_mongo_1" - - "constraint:node==mhs-demo0" - ports: - - "80:5000" - mongo: - image: mongo - -5. Save and close the file. - -6. Start the application with Compose. - - $ docker-compose --x-networking --project-name=counter up -d - -7. Get the Swarm master's IP address. - - $ docker-machine ip mhs-demo0 - -8. Put the IP address into your web browser. - - Upon success, the browser should display the web application. +Please refer to the Networking feature introduced in [Compose V2 format] +(https://docs.docker.com/compose/networking/) and execute the +multi-host networking scenario in the Swarm cluster used above. ## Related information diff --git a/docs/userguide/networking/index.md b/docs/userguide/networking/index.md index 7680b199fa..eb613c6d16 100644 --- a/docs/userguide/networking/index.md +++ b/docs/userguide/networking/index.md @@ -5,7 +5,7 @@ description = "Docker networking feature is introduced" keywords = ["network, networking, bridge, docker, documentation"] [menu.main] identifier="smn_networking" -parent= "mn_use_docker" +parent= "engine_guide" weight=7 +++ diff --git a/docs/userguide/networking/work-with-networks.md b/docs/userguide/networking/work-with-networks.md index d5fac70449..b668bc1c77 100644 --- a/docs/userguide/networking/work-with-networks.md +++ b/docs/userguide/networking/work-with-networks.md @@ -79,7 +79,13 @@ management that can assist your implementation. When you create a network, Engine creates a non-overlapping subnetwork for the network by default. You can override this default and specify a subnetwork directly using the the `--subnet` option. On a `bridge` network you can only -create a single subnet. An `overlay` network supports multiple subnets. +specify a single subnet. An `overlay` network supports multiple subnets. + +> **Note** : It is highly recommended to use the `--subnet` option while creating +> a network. If the `--subnet` is not specified, the docker daemon automatically +> chooses and assigns a subnet for the network and it could overlap with another subnet +> in your infrastructure that is not managed by docker. Such overlaps can cause +> connectivity issues or failures when containers are connected to that network. In addition to the `--subnetwork` option, you also specify the `--gateway` `--ip-range` and `--aux-address` options. @@ -95,6 +101,53 @@ $ docker network create -d overlay Be sure that your subnetworks do not overlap. If they do, the network create fails and Engine returns an error. +When creating a custom network, the default network driver (i.e. `bridge`) has additional options that can be passed. +The following are those options and the equivalent docker daemon flags used for docker0 bridge: + +| Option | Equivalent | Description | +|--------------------------------------------------|-------------|-------------------------------------------------------| +| `com.docker.network.bridge.name` | - | bridge name to be used when creating the Linux bridge | +| `com.docker.network.bridge.enable_ip_masquerade` | `--ip-masq` | Enable IP masquerading | +| `com.docker.network.bridge.enable_icc` | `--icc` | Enable or Disable Inter Container Connectivity | +| `com.docker.network.bridge.host_binding_ipv4` | `--ip` | Default IP when binding container ports | +| `com.docker.network.mtu` | `--mtu` | Set the containers network MTU | +| `com.docker.network.enable_ipv6` | `--ipv6` | Enable IPv6 networking | + +For example, now let's use `-o` or `--opt` options to specify an IP address binding when publishing ports: + +```bash +$ docker network create -o "com.docker.network.bridge.host_binding_ipv4"="172.23.0.1" my-network +b1a086897963e6a2e7fc6868962e55e746bee8ad0c97b54a5831054b5f62672a +$ docker network inspect my-network +[ + { + "Name": "my-network", + "Id": "b1a086897963e6a2e7fc6868962e55e746bee8ad0c97b54a5831054b5f62672a", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Options": {}, + "Config": [ + { + "Subnet": "172.23.0.0/16", + "Gateway": "172.23.0.1/16" + } + ] + }, + "Containers": {}, + "Options": { + "com.docker.network.bridge.host_binding_ipv4": "172.23.0.1" + } + } +] +$ docker run -d -P --name redis --net my-network redis +bafb0c808c53104b2c90346f284bda33a69beadcab4fc83ab8f2c5a4410cd129 +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +bafb0c808c53 redis "/entrypoint.sh redis" 4 seconds ago Up 3 seconds 172.23.0.1:32770->6379/tcp redis +``` + ## Connect containers You can connect containers dynamically to one or more networks. These networks @@ -731,6 +784,25 @@ round-trip min/avg/max = 0.119/0.146/0.174 ms / # ``` +There are certain scenarios such as ungraceful docker daemon restarts in multi-host network, +where the daemon is unable to cleanup stale connectivity endpoints. Such stale endpoints +may cause an error `container already connected to network` when a new container is +connected to that network with the same name as the stale endpoint. In order to cleanup +these stale endpoints, first remove the container and force disconnect +(`docker network disconnect -f`) the endpoint from the network. Once the endpoint is +cleaned up, the container can be connected to the network. + +``` +$ docker run -d --name redis_db --net multihost redis +ERROR: Cannot start container bc0b19c089978f7845633027aa3435624ca3d12dd4f4f764b61eac4c0610f32e: container already connected to network multihost + +$ docker rm -f redis_db +$ docker network disconnect -f multihost redis_db + +$ docker run -d --name redis_db --net multihost redis +7d986da974aeea5e9f7aca7e510bdb216d58682faa83a9040c2f2adc0544795a +``` + ## Remove a network When all the containers in a network are stopped or disconnected, you can remove a network. diff --git a/docs/userguide/storagedriver/aufs-driver.md b/docs/userguide/storagedriver/aufs-driver.md index 8edf7a78f4..76271cf0e1 100644 --- a/docs/userguide/storagedriver/aufs-driver.md +++ b/docs/userguide/storagedriver/aufs-driver.md @@ -4,190 +4,209 @@ title = "AUFS storage driver in practice" description = "Learn how to optimize your use of AUFS driver." keywords = ["container, storage, driver, AUFS "] [menu.main] -parent = "mn_storage_docker" +parent = "engine_driver" +++ # Docker and AUFS in practice -AUFS was the first storage driver in use with Docker. As a result, it has a long and close history with Docker, is very stable, has a lot of real-world deployments, and has strong community support. AUFS has several features that make it a good choice for Docker. These features enable: +AUFS was the first storage driver in use with Docker. As a result, it has a +long and close history with Docker, is very stable, has a lot of real-world +deployments, and has strong community support. AUFS has several features that +make it a good choice for Docker. These features enable: - Fast container startup times. - Efficient use of storage. - Efficient use of memory. -Despite its capabilities and long history with Docker, some Linux distributions do not support AUFS. This is usually because AUFS is not included in the mainline (upstream) Linux kernel. +Despite its capabilities and long history with Docker, some Linux distributions + do not support AUFS. This is usually because AUFS is not included in the +mainline (upstream) Linux kernel. -The following sections examine some AUFS features and how they relate to Docker. +The following sections examine some AUFS features and how they relate to +Docker. ## Image layering and sharing with AUFS -AUFS is a *unification filesystem*. This means that it takes multiple directories on a single Linux host, stacks them on top of each other, and provides a single unified view. To achieve this, AUFS uses *union mount*. +AUFS is a *unification filesystem*. This means that it takes multiple +directories on a single Linux host, stacks them on top of each other, and +provides a single unified view. To achieve this, AUFS uses a *union mount*. -AUFS stacks multiple directories and exposes them as a unified view through a single mount point. All of the directories in the stack, as well as the union mount point, must all exist on the same Linux host. AUFS refers to each directory that it stacks as a *branch*. +AUFS stacks multiple directories and exposes them as a unified view through a +single mount point. All of the directories in the stack, as well as the union +mount point, must all exist on the same Linux host. AUFS refers to each +directory that it stacks as a *branch*. -Within Docker, AUFS union mounts enable image layering. The AUFS storage driver implements Docker image layers using this union mount system. AUFS branches correspond to Docker image layers. The diagram below shows a Docker container based on the `ubuntu:latest` image. +Within Docker, AUFS union mounts enable image layering. The AUFS storage driver + implements Docker image layers using this union mount system. AUFS branches +correspond to Docker image layers. The diagram below shows a Docker container +based on the `ubuntu:latest` image. ![](images/aufs_layers.jpg) -This diagram shows the relationship between the Docker image layers and the AUFS branches (directories) in `/var/lib/docker/aufs`. Each image layer and the container layer correspond to an AUFS branch (directory) in the Docker host's local storage area. The union mount point gives the unified view of all layers. +This diagram shows that each image layer, and the container layer, is +represented in the Docker hosts filesystem as a directory under +`/var/lib/docker/`. The union mount point provides the unified view of all +layers. As of Docker 1.10, image layer IDs do not correspond to the names of +the directories that contain their data. -AUFS also supports the copy-on-write technology (CoW). Not all storage drivers do. +AUFS also supports the copy-on-write technology (CoW). Not all storage drivers +do. ## Container reads and writes with AUFS -Docker leverages AUFS CoW technology to enable image sharing and minimize the use of disk space. AUFS works at the file level. This means that all AUFS CoW operations copy entire files - even if only a small part of the file is being modified. This behavior can have a noticeable impact on container performance, especially if the files being copied are large, below a lot of image layers, or the CoW operation must search a deep directory tree. +Docker leverages AUFS CoW technology to enable image sharing and minimize the +use of disk space. AUFS works at the file level. This means that all AUFS CoW +operations copy entire files - even if only a small part of the file is being +modified. This behavior can have a noticeable impact on container performance, + especially if the files being copied are large, below a lot of image layers, +or the CoW operation must search a deep directory tree. -Consider, for example, an application running in a container needs to add a single new value to a large key-value store (file). If this is the first time the file is modified it does not yet exist in the container's top writable layer. So, the CoW must *copy up* the file from the underlying image. The AUFS storage driver searches each image layer for the file. The search order is from top to bottom. When it is found, the entire file is *copied up* to the container's top writable layer. From there, it can be opened and modified. - -Larger files obviously take longer to *copy up* than smaller files, and files that exist in lower image layers take longer to locate than those in higher layers. However, a *copy up* operation only occurs once per file on any given container. Subsequent reads and writes happen against the file's copy already *copied-up* to the container's top layer. +Consider, for example, an application running in a container needs to add a +single new value to a large key-value store (file). If this is the first time +the file is modified, it does not yet exist in the container's top writable +layer. So, the CoW must *copy up* the file from the underlying image. The AUFS +storage driver searches each image layer for the file. The search order is from + top to bottom. When it is found, the entire file is *copied up* to the +container's top writable layer. From there, it can be opened and modified. +Larger files obviously take longer to *copy up* than smaller files, and files +that exist in lower image layers take longer to locate than those in higher +layers. However, a *copy up* operation only occurs once per file on any given +container. Subsequent reads and writes happen against the file's copy already +*copied-up* to the container's top layer. ## Deleting files with the AUFS storage driver The AUFS storage driver deletes a file from a container by placing a *whiteout file* in the container's top layer. The whiteout file effectively obscures the -existence of the file in image's lower, read-only layers. The simplified +existence of the file in the read-only image layers below. The simplified diagram below shows a container based on an image with three image layers. ![](images/aufs_delete.jpg) The `file3` was deleted from the container. So, the AUFS storage driver placed a whiteout file in the container's top layer. This whiteout file effectively -"deletes" `file3` from the container by obscuring any of the original file's -existence in the image's read-only base layer. Of course, the image could have -been in any of the other layers instead or in addition depending on how the -layers are built. +"deletes" `file3` from the container by obscuring any of the original file's +existence in the image's read-only layers. This works the same no matter which +of the image's read-only layers the file exists in. ## Configure Docker with AUFS -You can only use the AUFS storage driver on Linux systems with AUFS installed. Use the following command to determine if your system supports AUFS. +You can only use the AUFS storage driver on Linux systems with AUFS installed. +Use the following command to determine if your system supports AUFS. -```bash -$ grep aufs /proc/filesystems -nodev aufs -``` + $ grep aufs /proc/filesystems + nodev aufs -This output indicates the system supports AUFS. Once you've verified your +This output indicates the system supports AUFS. Once you've verified your system supports AUFS, you can must instruct the Docker daemon to use it. You do this from the command line with the `docker daemon` command: -```bash -$ sudo docker daemon --storage-driver=aufs & -``` + $ sudo docker daemon --storage-driver=aufs & + Alternatively, you can edit the Docker config file and add the `--storage-driver=aufs` option to the `DOCKER_OPTS` line. -```bash -# Use DOCKER_OPTS to modify the daemon startup options. -DOCKER_OPTS="--storage-driver=aufs" -``` + # Use DOCKER_OPTS to modify the daemon startup options. + DOCKER_OPTS="--storage-driver=aufs" -Once your daemon is running, verify the storage driver with the `docker info` command. +Once your daemon is running, verify the storage driver with the `docker info` +command. -```bash -$ sudo docker info -Containers: 1 -Images: 4 -Storage Driver: aufs - Root Dir: /var/lib/docker/aufs - Backing Filesystem: extfs - Dirs: 6 - Dirperm1 Supported: false -Execution Driver: native-0.2 -...output truncated... -``` + $ sudo docker info + Containers: 1 + Images: 4 + Storage Driver: aufs + Root Dir: /var/lib/docker/aufs + Backing Filesystem: extfs + Dirs: 6 + Dirperm1 Supported: false + Execution Driver: native-0.2 + ...output truncated... -The output above shows that the Docker daemon is running the AUFS storage driver on top of an existing ext4 backing filesystem. +The output above shows that the Docker daemon is running the AUFS storage +driver on top of an existing `ext4` backing filesystem. ## Local storage and AUFS -As the `docker daemon` runs with the AUFS driver, the driver stores images and containers on within the Docker host's local storage area in the `/var/lib/docker/aufs` directory. +As the `docker daemon` runs with the AUFS driver, the driver stores images and +containers within the Docker host's local storage area under +`/var/lib/docker/aufs/`. ### Images Image layers and their contents are stored under -`/var/lib/docker/aufs/mnt/diff/` directory. The contents of an image -layer in this location includes all the files and directories belonging in that -image layer. +`/var/lib/docker/aufs/diff/`. With Docker 1.10 and higher, image layer IDs do +not correspond to directory names. The `/var/lib/docker/aufs/layers/` directory contains metadata about how image layers are stacked. This directory contains one file for every image or -container layer on the Docker host. Inside each file are the image layers names -that exist below it. The diagram below shows an image with 4 layers. +container layer on the Docker host (though file names no longer match image +layer IDs). Inside each file are the names of the directories that exist below +it in the stack -![](images/aufs_metadata.jpg) +The command below shows the contents of a metadata file in +`/var/lib/docker/aufs/layers/` that lists the the three directories that are +stacked below it in the union mount. Remember, these directory names do no map +to image layer IDs with Docker 1.10 and higher. -Inspecting the contents of the file relating to the top layer of the image -shows the three image layers below it. They are listed in the order they are -stacked. - -```bash -$ cat /var/lib/docker/aufs/layers/91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c - -d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82 - -c22013c8472965aa5b62559f2b540cd440716ef149756e7b958a1b2aba421e87 - -d3a1f33e8a5a513092f01bb7eb1c2abf4d711e5105390a3fe1ae2248cfde1391 -``` + $ cat /var/lib/docker/aufs/layers/91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c + d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82 + c22013c8472965aa5b62559f2b540cd440716ef149756e7b958a1b2aba421e87 + d3a1f33e8a5a513092f01bb7eb1c2abf4d711e5105390a3fe1ae2248cfde1391 The base layer in an image has no image layers below it, so its file is empty. ### Containers -Running containers are mounted at locations in the -`/var/lib/docker/aufs/mnt/` directory. This is the AUFS union -mount point that exposes the container and all underlying image layers as a -single unified view. If a container is not running, its directory still exists -but is empty. This is because containers are only mounted when they are running. +Running containers are mounted below `/var/lib/docker/aufs/mnt/`. + This is where the AUFS union mount point that exposes the container and all +underlying image layers as a single unified view exists. If a container is not +running, it still has a directory here but it is empty. This is because AUFS +only mounts a container when it is running. With Docker 1.10 and higher, + container IDs no longer correspond to directory names under +`/var/lib/docker/aufs/mnt/`. Container metadata and various config files that are placed into the running -container are stored in `/var/lib/containers/`. Files in this -directory exist for all containers on the system, including ones that are -stopped. However, when a container is running the container's log files are also -in this directory. - -A container's thin writable layer is stored under -`/var/lib/docker/aufs/diff/`. This directory is stacked by AUFS as -the containers top writable layer and is where all changes to the container are -stored. The directory exists even if the container is stopped. This means that -restarting a container will not lose changes made to it. Once a container is -deleted this directory is deleted. - -Information about which image layers are stacked below a container's top -writable layer is stored in the following file -`/var/lib/docker/aufs/layers/`. The command below shows that the -container with ID `b41a6e5a508d` has 4 image layers below it: - -```bash -$ cat /var/lib/docker/aufs/layers/b41a6e5a508dfa02607199dfe51ed9345a675c977f2cafe8ef3e4b0b5773404e-init -91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c -d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82 -c22013c8472965aa5b62559f2b540cd440716ef149756e7b958a1b2aba421e87 -d3a1f33e8a5a513092f01bb7eb1c2abf4d711e5105390a3fe1ae2248cfde1391 -``` - -The image layers are shown in order. In the output above, the layer starting -with image ID "d3a1..." is the image's base layer. The image layer starting -with "91e5..." is the image's topmost layer. +container are stored in `/var/lib/docker/containers/`. Files in +this directory exist for all containers on the system, including ones that are +stopped. However, when a container is running the container's log files are +also in this directory. +A container's thin writable layer is stored in a directory under +`/var/lib/docker/aufs/diff/`. With Docker 1.10 and higher, container IDs no +longer correspond to directory names. However, the containers thin writable +layer still exists under here and is stacked by AUFS as the top writable layer +and is where all changes to the container are stored. The directory exists even + if the container is stopped. This means that restarting a container will not +lose changes made to it. Once a container is deleted, it's thin writable layer +in this directory is deleted. ## AUFS and Docker performance To summarize some of the performance related aspects already mentioned: -- The AUFS storage driver is a good choice for PaaS and other similar use-cases where container density is important. This is because AUFS efficiently shares images between multiple running containers, enabling fast container start times and minimal use of disk space. +- The AUFS storage driver is a good choice for PaaS and other similar use-cases + where container density is important. This is because AUFS efficiently shares +images between multiple running containers, enabling fast container start times + and minimal use of disk space. -- The underlying mechanics of how AUFS shares files between image layers and containers uses the systems page cache very efficiently. +- The underlying mechanics of how AUFS shares files between image layers and +containers uses the systems page cache very efficiently. -- The AUFS storage driver can introduce significant latencies into container write performance. This is because the first time a container writes to any file, the file has be located and copied into the containers top writable layer. These latencies increase and are compounded when these files exist below many image layers and the files themselves are large. +- The AUFS storage driver can introduce significant latencies into container +write performance. This is because the first time a container writes to any +file, the file has be located and copied into the containers top writable +layer. These latencies increase and are compounded when these files exist below + many image layers and the files themselves are large. -One final point. Data volumes provide the best and most predictable performance. -This is because they bypass the storage driver and do not incur any of the -potential overheads introduced by thin provisioning and copy-on-write. For this -reason, you may want to place heavy write workloads on data volumes. +One final point. Data volumes provide the best and most predictable +performance. This is because they bypass the storage driver and do not incur +any of the potential overheads introduced by thin provisioning and +copy-on-write. For this reason, you may want to place heavy write workloads on +data volumes. ## Related information diff --git a/docs/userguide/storagedriver/btrfs-driver.md b/docs/userguide/storagedriver/btrfs-driver.md index 4cce021100..e8fffb535d 100644 --- a/docs/userguide/storagedriver/btrfs-driver.md +++ b/docs/userguide/storagedriver/btrfs-driver.md @@ -4,7 +4,7 @@ title = "Btrfs storage in practice" description = "Learn how to optimize your use of Btrfs driver." keywords = ["container, storage, driver, Btrfs "] [menu.main] -parent = "mn_storage_docker" +parent = "engine_driver" +++ @@ -13,126 +13,118 @@ parent = "mn_storage_docker" Btrfs is a next generation copy-on-write filesystem that supports many advanced storage technologies that make it a good fit for Docker. Btrfs is included in the mainline Linux kernel and its on-disk-format is now considered stable. -However, many of its features are still under heavy development and users should -consider it a fast-moving target. +However, many of its features are still under heavy development and users +should consider it a fast-moving target. Docker's `btrfs` storage driver leverages many Btrfs features for image and -container management. Among these features are thin provisioning, copy-on-write, -and snapshotting. +container management. Among these features are thin provisioning, +copy-on-write, and snapshotting. -This article refers to Docker's Btrfs storage driver as `btrfs` and the overall Btrfs Filesystem as Btrfs. +This article refers to Docker's Btrfs storage driver as `btrfs` and the overall + Btrfs Filesystem as Btrfs. >**Note**: The [Commercially Supported Docker Engine (CS-Engine)](https://www.docker.com/compatibility-maintenance) does not currently support the `btrfs` storage driver. ## The future of Btrfs -Btrfs has been long hailed as the future of Linux filesystems. With full support in the mainline Linux kernel, a stable on-disk-format, and active development with a focus on stability, this is now becoming more of a reality. +Btrfs has been long hailed as the future of Linux filesystems. With full +support in the mainline Linux kernel, a stable on-disk-format, and active +development with a focus on stability, this is now becoming more of a reality. -As far as Docker on the Linux platform goes, many people see the `btrfs` storage driver as a potential long-term replacement for the `devicemapper` storage driver. However, at the time of writing, the `devicemapper` storage driver should be considered safer, more stable, and more *production ready*. You should only consider the `btrfs` driver for production deployments if you understand it well and have existing experience with Btrfs. +As far as Docker on the Linux platform goes, many people see the `btrfs` +storage driver as a potential long-term replacement for the `devicemapper` +storage driver. However, at the time of writing, the `devicemapper` storage +driver should be considered safer, more stable, and more *production ready*. +You should only consider the `btrfs` driver for production deployments if you +understand it well and have existing experience with Btrfs. ## Image layering and sharing with Btrfs -Docker leverages Btrfs *subvolumes* and *snapshots* for managing the on-disk components of image and container layers. Btrfs subvolumes look and feel like a normal Unix filesystem. As such, they can have their own internal directory structure that hooks into the wider Unix filesystem. +Docker leverages Btrfs *subvolumes* and *snapshots* for managing the on-disk +components of image and container layers. Btrfs subvolumes look and feel like +a normal Unix filesystem. As such, they can have their own internal directory +structure that hooks into the wider Unix filesystem. -Subvolumes are natively copy-on-write and have space allocated to them on-demand -from an underlying storage pool. They can also be nested and snapped. The -diagram blow shows 4 subvolumes. 'Subvolume 2' and 'Subvolume 3' are nested, -whereas 'Subvolume 4' shows its own internal directory tree. +Subvolumes are natively copy-on-write and have space allocated to them +on-demand from an underlying storage pool. They can also be nested and snapped. + The diagram blow shows 4 subvolumes. 'Subvolume 2' and 'Subvolume 3' are +nested, whereas 'Subvolume 4' shows its own internal directory tree. ![](images/btfs_subvolume.jpg) -Snapshots are a point-in-time read-write copy of an entire subvolume. They exist directly below the subvolume they were created from. You can create snapshots of snapshots as shown in the diagram below. +Snapshots are a point-in-time read-write copy of an entire subvolume. They +exist directly below the subvolume they were created from. You can create +snapshots of snapshots as shown in the diagram below. ![](images/btfs_snapshots.jpg) -Btfs allocates space to subvolumes and snapshots on demand from an underlying pool of storage. The unit of allocation is referred to as a *chunk* and *chunks* are normally ~1GB in size. +Btfs allocates space to subvolumes and snapshots on demand from an underlying +pool of storage. The unit of allocation is referred to as a *chunk*, and +*chunks* are normally ~1GB in size. -Snapshots are first-class citizens in a Btrfs filesystem. This means that they look, feel, and operate just like regular subvolumes. The technology required to create them is built directly into the Btrfs filesystem thanks to its native copy-on-write design. This means that Btrfs snapshots are space efficient with little or no performance overhead. The diagram below shows a subvolume and its snapshot sharing the same data. +Snapshots are first-class citizens in a Btrfs filesystem. This means that they +look, feel, and operate just like regular subvolumes. The technology required +to create them is built directly into the Btrfs filesystem thanks to its +native copy-on-write design. This means that Btrfs snapshots are space +efficient with little or no performance overhead. The diagram below shows a +subvolume and its snapshot sharing the same data. ![](images/btfs_pool.jpg) -Docker's `btrfs` storage driver stores every image layer and container in its own Btrfs subvolume or snapshot. The base layer of an image is stored as a subvolume whereas child image layers and containers are stored as snapshots. This is shown in the diagram below. +Docker's `btrfs` storage driver stores every image layer and container in its +own Btrfs subvolume or snapshot. The base layer of an image is stored as a +subvolume whereas child image layers and containers are stored as snapshots. +This is shown in the diagram below. ![](images/btfs_container_layer.jpg) -The high level process for creating images and containers on Docker hosts running the `btrfs` driver is as follows: +The high level process for creating images and containers on Docker hosts +running the `btrfs` driver is as follows: -1. The image's base layer is stored in a Btrfs subvolume under +1. The image's base layer is stored in a Btrfs *subvolume* under `/var/lib/docker/btrfs/subvolumes`. - The image ID is used as the subvolume name. E.g., a base layer with image ID - "f9a9f253f6105141e0f8e091a6bcdb19e3f27af949842db93acba9048ed2410b" will be - stored in - `/var/lib/docker/btrfs/subvolumes/f9a9f253f6105141e0f8e091a6bcdb19e3f27af949842db93acba9048ed2410b` +2. Subsequent image layers are stored as a Btrfs *snapshot* of the parent +layer's subvolume or snapshot. -2. Subsequent image layers are stored as a Btrfs snapshot of the parent layer's subvolume or snapshot. - - The diagram below shows a three-layer image. The base layer is a subvolume. Layer 1 is a snapshot of the base layer's subvolume. Layer 2 is a snapshot of Layer 1's snapshot. + The diagram below shows a three-layer image. The base layer is a subvolume. + Layer 1 is a snapshot of the base layer's subvolume. Layer 2 is a snapshot of +Layer 1's snapshot. ![](images/btfs_constructs.jpg) +As of Docker 1.10, image layer IDs no longer correspond to directory names +under `/var/lib/docker/`. + ## Image and container on-disk constructs Image layers and containers are visible in the Docker host's filesystem at -`/var/lib/docker/btrfs/subvolumes/ OR `. Directories for +`/var/lib/docker/btrfs/subvolumes/`. However, as previously stated, directory +names no longer correspond to image layer IDs. That said, directories for containers are present even for containers with a stopped status. This is because the `btrfs` storage driver mounts a default, top-level subvolume at `/var/lib/docker/subvolumes`. All other subvolumes and snapshots exist below that as Btrfs filesystem objects and not as individual mounts. -The following example shows a single Docker image with four image layers. - -```bash -$ sudo docker images -a -REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE -ubuntu latest 0a17decee413 2 weeks ago 188.3 MB - 3c9a9d7cc6a2 2 weeks ago 188.3 MB - eeb7cb91b09d 2 weeks ago 188.3 MB - f9a9f253f610 2 weeks ago 188.1 MB -``` - -Each image layer exists as a Btrfs subvolume or snapshot with the same name as its image ID as illustrated by the `btrfs subvolume list` command shown below: - -```bash -$ sudo btrfs subvolume list /var/lib/docker -ID 257 gen 9 top level 5 path btrfs/subvolumes/f9a9f253f6105141e0f8e091a6bcdb19e3f27af949842db93acba9048ed2410b -ID 258 gen 10 top level 5 path btrfs/subvolumes/eeb7cb91b09d5de9edb2798301aeedf50848eacc2123e98538f9d014f80f243c -ID 260 gen 11 top level 5 path btrfs/subvolumes/3c9a9d7cc6a235eb2de58ca9ef3551c67ae42a991933ba4958d207b29142902b -ID 261 gen 12 top level 5 path btrfs/subvolumes/0a17decee4139b0de68478f149cc16346f5e711c5ae3bb969895f22dd6723751 -``` - -Under the `/var/lib/docker/btrfs/subvolumes` directory, each of these subvolumes and snapshots are visible as a normal Unix directory: - -```bash -$ ls -l /var/lib/docker/btrfs/subvolumes/ -total 0 -drwxr-xr-x 1 root root 132 Oct 16 14:44 0a17decee4139b0de68478f149cc16346f5e711c5ae3bb969895f22dd6723751 -drwxr-xr-x 1 root root 132 Oct 16 14:44 3c9a9d7cc6a235eb2de58ca9ef3551c67ae42a991933ba4958d207b29142902b -drwxr-xr-x 1 root root 132 Oct 16 14:44 eeb7cb91b09d5de9edb2798301aeedf50848eacc2123e98538f9d014f80f243c -drwxr-xr-x 1 root root 132 Oct 16 14:44 f9a9f253f6105141e0f8e091a6bcdb19e3f27af949842db93acba9048ed2410b -``` - Because Btrfs works at the filesystem level and not the block level, each image -and container layer can be browsed in the filesystem using normal Unix commands. -The example below shows a truncated output of an `ls -l` command against the -image's top layer: +and container layer can be browsed in the filesystem using normal Unix +commands. The example below shows a truncated output of an `ls -l` command an +image layer: -```bash -$ ls -l /var/lib/docker/btrfs/subvolumes/0a17decee4139b0de68478f149cc16346f5e711c5ae3bb969895f22dd6723751/ -total 0 -drwxr-xr-x 1 root root 1372 Oct 9 08:39 bin -drwxr-xr-x 1 root root 0 Apr 10 2014 boot -drwxr-xr-x 1 root root 882 Oct 9 08:38 dev -drwxr-xr-x 1 root root 2040 Oct 12 17:27 etc -drwxr-xr-x 1 root root 0 Apr 10 2014 home -...output truncated... -``` + $ ls -l /var/lib/docker/btrfs/subvolumes/0a17decee4139b0de68478f149cc16346f5e711c5ae3bb969895f22dd6723751/ + total 0 + drwxr-xr-x 1 root root 1372 Oct 9 08:39 bin + drwxr-xr-x 1 root root 0 Apr 10 2014 boot + drwxr-xr-x 1 root root 882 Oct 9 08:38 dev + drwxr-xr-x 1 root root 2040 Oct 12 17:27 etc + drwxr-xr-x 1 root root 0 Apr 10 2014 home + ...output truncated... ## Container reads and writes with Btrfs A container is a space-efficient snapshot of an image. Metadata in the snapshot -points to the actual data blocks in the storage pool. This is the same as with a -subvolume. Therefore, reads performed against a snapshot are essentially the +points to the actual data blocks in the storage pool. This is the same as with +a subvolume. Therefore, reads performed against a snapshot are essentially the same as reads performed against a subvolume. As a result, no performance overhead is incurred from the Btrfs driver. @@ -145,28 +137,34 @@ new files to a container's snapshot operate at native Btrfs speeds. Updating an existing file in a container causes a copy-on-write operation (technically *redirect-on-write*). The driver leaves the original data and allocates new space to the snapshot. The updated data is written to this new -space. Then, the driver updates the filesystem metadata in the snapshot to point -to this new data. The original data is preserved in-place for subvolumes and -snapshots further up the tree. This behavior is native to copy-on-write +space. Then, the driver updates the filesystem metadata in the snapshot to +point to this new data. The original data is preserved in-place for subvolumes +and snapshots further up the tree. This behavior is native to copy-on-write filesystems like Btrfs and incurs very little overhead. -With Btfs, writing and updating lots of small files can result in slow performance. More on this later. +With Btfs, writing and updating lots of small files can result in slow +performance. More on this later. ## Configuring Docker with Btrfs -The `btrfs` storage driver only operates on a Docker host where `/var/lib/docker` is mounted as a Btrfs filesystem. The following procedure shows how to configure Btrfs on Ubuntu 14.04 LTS. +The `btrfs` storage driver only operates on a Docker host where +`/var/lib/docker` is mounted as a Btrfs filesystem. The following procedure +shows how to configure Btrfs on Ubuntu 14.04 LTS. ### Prerequisites -If you have already used the Docker daemon on your Docker host and have images you want to keep, `push` them to Docker Hub or your private Docker Trusted Registry before attempting this procedure. +If you have already used the Docker daemon on your Docker host and have images +you want to keep, `push` them to Docker Hub or your private Docker Trusted +Registry before attempting this procedure. -Stop the Docker daemon. Then, ensure that you have a spare block device at `/dev/xvdb`. The device identifier may be different in your environment and you should substitute your own values throughout the procedure. +Stop the Docker daemon. Then, ensure that you have a spare block device at +`/dev/xvdb`. The device identifier may be different in your environment and you + should substitute your own values throughout the procedure. -The procedure also assumes your kernel has the appropriate Btrfs modules loaded. To verify this, use the following command: +The procedure also assumes your kernel has the appropriate Btrfs modules +loaded. To verify this, use the following command: -```bash -$ cat /proc/filesystems | grep btrfs -``` + $ cat /proc/filesystems | grep btrfs ### Configure Btrfs on Ubuntu 14.04 LTS @@ -181,7 +179,9 @@ Assuming your system meets the prerequisites, do the following: 2. Create the Btrfs storage pool. - Btrfs storage pools are created with the `mkfs.btrfs` command. Passing multiple devices to the `mkfs.btrfs` command creates a pool across all of those devices. Here you create a pool with a single device at `/dev/xvdb`. + Btrfs storage pools are created with the `mkfs.btrfs` command. Passing +multiple devices to the `mkfs.btrfs` command creates a pool across all of those + devices. Here you create a pool with a single device at `/dev/xvdb`. $ sudo mkfs.btrfs -f /dev/xvdb WARNING! - Btrfs v3.12 IS EXPERIMENTAL @@ -199,7 +199,8 @@ Assuming your system meets the prerequisites, do the following: noted earlier, Btrfs is not currently recommended for production deployments unless you already have extensive experience. -3. If it does not already exist, create a directory for the Docker host's local storage area at `/var/lib/docker`. +3. If it does not already exist, create a directory for the Docker host's local + storage area at `/var/lib/docker`. $ sudo mkdir /var/lib/docker @@ -210,7 +211,10 @@ Assuming your system meets the prerequisites, do the following: $ sudo blkid /dev/xvdb /dev/xvdb: UUID="a0ed851e-158b-4120-8416-c9b072c8cf47" UUID_SUB="c3927a64-4454-4eef-95c2-a7d44ac0cf27" TYPE="btrfs" - b. Create a `/etc/fstab` entry to automatically mount `/var/lib/docker` each time the system boots. + b. Create an `/etc/fstab` entry to automatically mount `/var/lib/docker` +each time the system boots. Either of the following lines will work, just +remember to substitute the UUID value with the value obtained from the previous + command. /dev/xvdb /var/lib/docker btrfs defaults 0 0 UUID="a0ed851e-158b-4120-8416-c9b072c8cf47" /var/lib/docker btrfs defaults 0 0 @@ -223,10 +227,11 @@ Assuming your system meets the prerequisites, do the following: /dev/xvdb on /var/lib/docker type btrfs (rw) - The last line in the output above shows the `/dev/xvdb` mounted at `/var/lib/docker` as Btrfs. + The last line in the output above shows the `/dev/xvdb` mounted at +`/var/lib/docker` as Btrfs. - -Now that you have a Btrfs filesystem mounted at `/var/lib/docker`, the daemon should automatically load with the `btrfs` storage driver. +Now that you have a Btrfs filesystem mounted at `/var/lib/docker`, the daemon +should automatically load with the `btrfs` storage driver. 1. Start the Docker daemon. @@ -236,9 +241,10 @@ Now that you have a Btrfs filesystem mounted at `/var/lib/docker`, the daemon sh The procedure for starting the Docker daemon may differ depending on the Linux distribution you are using. - You can start the Docker daemon with the `btrfs` storage driver by passing - the `--storage-driver=btrfs` flag to the `docker daemon` command or you can - add the `DOCKER_OPTS` line to the Docker config file. + You can force the the Docker daemon to start with the `btrfs` storage +driver by either passing the `--storage-driver=btrfs` flag to the `docker +daemon` at startup, or adding it to the `DOCKER_OPTS` line to the Docker config + file. 2. Verify the storage driver with the `docker info` command. @@ -252,25 +258,54 @@ Your Docker host is now configured to use the `btrfs` storage driver. ## Btrfs and Docker performance -There are several factors that influence Docker's performance under the `btrfs` storage driver. +There are several factors that influence Docker's performance under the `btrfs` + storage driver. -- **Page caching**. Btrfs does not support page cache sharing. This means that *n* containers accessing the same file require *n* copies to be cached. As a result, the `btrfs` driver may not be the best choice for PaaS and other high density container use cases. +- **Page caching**. Btrfs does not support page cache sharing. This means that +*n* containers accessing the same file require *n* copies to be cached. As a +result, the `btrfs` driver may not be the best choice for PaaS and other high +density container use cases. -- **Small writes**. Containers performing lots of small writes (including Docker hosts that start and stop many containers) can lead to poor use of Btrfs chunks. This can ultimately lead to out-of-space conditions on your Docker host and stop it working. This is currently a major drawback to using current versions of Btrfs. +- **Small writes**. Containers performing lots of small writes (including +Docker hosts that start and stop many containers) can lead to poor use of Btrfs + chunks. This can ultimately lead to out-of-space conditions on your Docker +host and stop it working. This is currently a major drawback to using current +versions of Btrfs. - If you use the `btrfs` storage driver, closely monitor the free space on your Btrfs filesystem using the `btrfs filesys show` command. Do not trust the output of normal Unix commands such as `df`; always use the Btrfs native commands. + If you use the `btrfs` storage driver, closely monitor the free space on +your Btrfs filesystem using the `btrfs filesys show` command. Do not trust the +output of normal Unix commands such as `df`; always use the Btrfs native +commands. -- **Sequential writes**. Btrfs writes data to disk via journaling technique. This can impact sequential writes, where performance can be up to half. +- **Sequential writes**. Btrfs writes data to disk via journaling technique. +This can impact sequential writes, where performance can be up to half. -- **Fragmentation**. Fragmentation is a natural byproduct of copy-on-write filesystems like Btrfs. Many small random writes can compound this issue. It can manifest as CPU spikes on Docker hosts using SSD media and head thrashing on Docker hosts using spinning media. Both of these result in poor performance. +- **Fragmentation**. Fragmentation is a natural byproduct of copy-on-write +filesystems like Btrfs. Many small random writes can compound this issue. It +can manifest as CPU spikes on Docker hosts using SSD media and head thrashing +on Docker hosts using spinning media. Both of these result in poor performance. - Recent versions of Btrfs allow you to specify `autodefrag` as a mount option. This mode attempts to detect random writes and defragment them. You should perform your own tests before enabling this option on your Docker hosts. Some tests have shown this option has a negative performance impact on Docker hosts performing lots of small writes (including systems that start and stop many containers). + Recent versions of Btrfs allow you to specify `autodefrag` as a mount +option. This mode attempts to detect random writes and defragment them. You +should perform your own tests before enabling this option on your Docker hosts. + Some tests have shown this option has a negative performance impact on Docker +hosts performing lots of small writes (including systems that start and stop +many containers). -- **Solid State Devices (SSD)**. Btrfs has native optimizations for SSD media. To enable these, mount with the `-o ssd` mount option. These optimizations include enhanced SSD write performance by avoiding things like *seek optimizations* that have no use on SSD media. +- **Solid State Devices (SSD)**. Btrfs has native optimizations for SSD media. +To enable these, mount with the `-o ssd` mount option. These optimizations +include enhanced SSD write performance by avoiding things like *seek +optimizations* that have no use on SSD media. - Btfs also supports the TRIM/Discard primitives. However, mounting with the `-o discard` mount option can cause performance issues. Therefore, it is recommended you perform your own tests before using this option. + Btfs also supports the TRIM/Discard primitives. However, mounting with the +`-o discard` mount option can cause performance issues. Therefore, it is +recommended you perform your own tests before using this option. -- **Use Data Volumes**. Data volumes provide the best and most predictable performance. This is because they bypass the storage driver and do not incur any of the potential overheads introduced by thin provisioning and copy-on-write. For this reason, you may want to place heavy write workloads on data volumes. +- **Use Data Volumes**. Data volumes provide the best and most predictable +performance. This is because they bypass the storage driver and do not incur +any of the potential overheads introduced by thin provisioning and +copy-on-write. For this reason, you should place heavy write workloads on data +volumes. ## Related Information diff --git a/docs/userguide/storagedriver/device-mapper-driver.md b/docs/userguide/storagedriver/device-mapper-driver.md index 4d81d714bc..e9530b0e10 100644 --- a/docs/userguide/storagedriver/device-mapper-driver.md +++ b/docs/userguide/storagedriver/device-mapper-driver.md @@ -4,7 +4,7 @@ title="Device mapper storage in practice" description="Learn how to optimize your use of device mapper driver." keywords=["container, storage, driver, device mapper"] [menu.main] -parent="mn_storage_docker" +parent="engine_driver" +++ @@ -51,56 +51,84 @@ Device Mapper technology works at the block level rather than the file level. This means that `devicemapper` storage driver's thin provisioning and copy-on-write operations work with blocks rather than entire files. ->**Note**: Snapshots are also referred to as *thin devices* or *virtual devices*. They all mean the same thing in the context of the `devicemapper` storage driver. +>**Note**: Snapshots are also referred to as *thin devices* or *virtual +>devices*. They all mean the same thing in the context of the `devicemapper` +>storage driver. -With the `devicemapper` the high level process for creating images is as follows: +With `devicemapper` the high level process for creating images is as follows: 1. The `devicemapper` storage driver creates a thin pool. - The pool is created from block devices or loop mounted sparse files (more on this later). + The pool is created from block devices or loop mounted sparse files (more +on this later). 2. Next it creates a *base device*. - A base device is a thin device with a filesystem. You can see which filesystem is in use by running the `docker info` command and checking the `Backing filesystem` value. + A base device is a thin device with a filesystem. You can see which +filesystem is in use by running the `docker info` command and checking the +`Backing filesystem` value. 3. Each new image (and image layer) is a snapshot of this base device. - These are thin provisioned copy-on-write snapshots. This means that they are initially empty and only consume space from the pool when data is written to them. + These are thin provisioned copy-on-write snapshots. This means that they +are initially empty and only consume space from the pool when data is written +to them. -With `devicemapper`, container layers are snapshots of the image they are created from. Just as with images, container snapshots are thin provisioned copy-on-write snapshots. The container snapshot stores all updates to the container. The `devicemapper` allocates space to them on-demand from the pool as and when data is written to the container. +With `devicemapper`, container layers are snapshots of the image they are +created from. Just as with images, container snapshots are thin provisioned +copy-on-write snapshots. The container snapshot stores all updates to the +container. The `devicemapper` allocates space to them on-demand from the pool +as and when data is written to the container. -The high level diagram below shows a thin pool with a base device and two images. +The high level diagram below shows a thin pool with a base device and two +images. ![](images/base_device.jpg) -If you look closely at the diagram you'll see that it's snapshots all the way down. Each image layer is a snapshot of the layer below it. The lowest layer of each image is a snapshot of the the base device that exists in the pool. This base device is a `Device Mapper` artifact and not a Docker image layer. +If you look closely at the diagram you'll see that it's snapshots all the way +down. Each image layer is a snapshot of the layer below it. The lowest layer of + each image is a snapshot of the the base device that exists in the pool. This +base device is a `Device Mapper` artifact and not a Docker image layer. -A container is a snapshot of the image it is created from. The diagram below shows two containers - one based on the Ubuntu image and the other based on the Busybox image. +A container is a snapshot of the image it is created from. The diagram below +shows two containers - one based on the Ubuntu image and the other based on the + Busybox image. ![](images/two_dm_container.jpg) ## Reads with the devicemapper -Let's look at how reads and writes occur using the `devicemapper` storage driver. The diagram below shows the high level process for reading a single block (`0x44f`) in an example container. +Let's look at how reads and writes occur using the `devicemapper` storage +driver. The diagram below shows the high level process for reading a single +block (`0x44f`) in an example container. ![](images/dm_container.jpg) -1. An application makes a read request for block 0x44f in the container. +1. An application makes a read request for block `0x44f` in the container. - Because the container is a thin snapshot of an image it does not have the data. Instead, it has a pointer (PTR) to where the data is stored in the image snapshot lower down in the image stack. + Because the container is a thin snapshot of an image it does not have the +data. Instead, it has a pointer (PTR) to where the data is stored in the image +snapshot lower down in the image stack. -2. The storage driver follows the pointer to block `0xf33` in the snapshot relating to image layer `a005...`. +2. The storage driver follows the pointer to block `0xf33` in the snapshot +relating to image layer `a005...`. -3. The `devicemapper` copies the contents of block `0xf33` from the image snapshot to memory in the container. +3. The `devicemapper` copies the contents of block `0xf33` from the image +snapshot to memory in the container. 4. The storage driver returns the data to the requesting application. ### Write examples -With the `devicemapper` driver, writing new data to a container is accomplished by an *allocate-on-demand* operation. Updating existing data uses a copy-on-write operation. Because Device Mapper is a block-based technology these operations occur at the block level. +With the `devicemapper` driver, writing new data to a container is accomplished + by an *allocate-on-demand* operation. Updating existing data uses a +copy-on-write operation. Because Device Mapper is a block-based technology +these operations occur at the block level. -For example, when making a small change to a large file in a container, the `devicemapper` storage driver does not copy the entire file. It only copies the blocks to be modified. Each block is 64KB. +For example, when making a small change to a large file in a container, the +`devicemapper` storage driver does not copy the entire file. It only copies the + blocks to be modified. Each block is 64KB. #### Writing new data @@ -108,9 +136,11 @@ To write 56KB of new data to a container: 1. An application makes a request to write 56KB of new data to the container. -2. The allocate-on-demand operation allocates a single new 64KB block to the containers snapshot. +2. The allocate-on-demand operation allocates a single new 64KB block to the +container's snapshot. - If the write operation is larger than 64KB, multiple new blocks are allocated to the container snapshot. + If the write operation is larger than 64KB, multiple new blocks are +allocated to the container's snapshot. 3. The data is written to the newly allocated block. @@ -122,7 +152,8 @@ To modify existing data for the first time: 2. A copy-on-write operation locates the blocks that need updating. -3. The operation allocates new blocks to the container snapshot and copies the data into those blocks. +3. The operation allocates new empty blocks to the container snapshot and +copies the data into those blocks. 4. The modified data is written into the newly allocated blocks. @@ -133,7 +164,8 @@ to the application's read and write operations. ## Configuring Docker with Device Mapper The `devicemapper` is the default Docker storage driver on some Linux -distributions. This includes RHEL and most of its forks. Currently, the following distributions support the driver: +distributions. This includes RHEL and most of its forks. Currently, the +following distributions support the driver: * RHEL/CentOS/Fedora * Ubuntu 12.04 @@ -142,9 +174,9 @@ distributions. This includes RHEL and most of its forks. Currently, the followin Docker hosts running the `devicemapper` storage driver default to a configuration mode known as `loop-lvm`. This mode uses sparse files to build -the thin pool used by image and container snapshots. The mode is designed to work out-of-the-box -with no additional configuration. However, production deployments should not run -under `loop-lvm` mode. +the thin pool used by image and container snapshots. The mode is designed to +work out-of-the-box with no additional configuration. However, production +deployments should not run under `loop-lvm` mode. You can detect the mode by viewing the `docker info` command: @@ -161,56 +193,84 @@ You can detect the mode by viewing the `docker info` command: Library Version: 1.02.93-RHEL7 (2015-01-28) ... -The output above shows a Docker host running with the `devicemapper` storage driver operating in `loop-lvm` mode. This is indicated by the fact that the `Data loop file` and a `Metadata loop file` are on files under `/var/lib/docker/devicemapper/devicemapper`. These are loopback mounted sparse files. +The output above shows a Docker host running with the `devicemapper` storage +driver operating in `loop-lvm` mode. This is indicated by the fact that the +`Data loop file` and a `Metadata loop file` are on files under +`/var/lib/docker/devicemapper/devicemapper`. These are loopback mounted sparse +files. ### Configure direct-lvm mode for production The preferred configuration for production deployments is `direct lvm`. This mode uses block devices to create the thin pool. The following procedure shows -you how to configure a Docker host to use the `devicemapper` storage driver in a -`direct-lvm` configuration. +you how to configure a Docker host to use the `devicemapper` storage driver in +a `direct-lvm` configuration. -> **Caution:** If you have already run the Docker daemon on your Docker host and have images you want to keep, `push` them Docker Hub or your private Docker Trusted Registry before attempting this procedure. +> **Caution:** If you have already run the Docker daemon on your Docker host +> and have images you want to keep, `push` them Docker Hub or your private +> Docker Trusted Registry before attempting this procedure. -The procedure below will create a 90GB data volume and 4GB metadata volume to use as backing for the storage pool. It assumes that you have a spare block device at `/dev/xvdf` with enough free space to complete the task. The device identifier and volume sizes may be be different in your environment and you should substitute your own values throughout the procedure. The procedure also assumes that the Docker daemon is in the `stopped` state. +The procedure below will create a 90GB data volume and 4GB metadata volume to +use as backing for the storage pool. It assumes that you have a spare block +device at `/dev/xvdf` with enough free space to complete the task. The device +identifier and volume sizes may be be different in your environment and you +should substitute your own values throughout the procedure. The procedure also +assumes that the Docker daemon is in the `stopped` state. 1. Log in to the Docker host you want to configure and stop the Docker daemon. -2. If it exists, delete your existing image store by removing the `/var/lib/docker` directory. +2. If it exists, delete your existing image store by removing the +`/var/lib/docker` directory. $ sudo rm -rf /var/lib/docker -3. Create an LVM physical volume (PV) on your spare block device using the `pvcreate` command. +3. Create an LVM physical volume (PV) on your spare block device using the +`pvcreate` command. $ sudo pvcreate /dev/xvdf Physical volume `/dev/xvdf` successfully created - The device identifier may be different on your system. Remember to substitute your value in the command above. + The device identifier may be different on your system. Remember to +substitute your value in the command above. -4. Create a new volume group (VG) called `vg-docker` using the PV created in the previous step. +4. Create a new volume group (VG) called `vg-docker` using the PV created in +the previous step. $ sudo vgcreate vg-docker /dev/xvdf Volume group `vg-docker` successfully created -5. Create a new 90GB logical volume (LV) called `data` from space in the `vg-docker` volume group. +5. Create a new 90GB logical volume (LV) called `data` from space in the +`vg-docker` volume group. $ sudo lvcreate -L 90G -n data vg-docker Logical volume `data` created. - The command creates an LVM logical volume called `data` and an associated block device file at `/dev/vg-docker/data`. In a later step, you instruct the `devicemapper` storage driver to use this block device to store image and container data. + The command creates an LVM logical volume called `data` and an associated +block device file at `/dev/vg-docker/data`. In a later step, you instruct the +`devicemapper` storage driver to use this block device to store image and +container data. - If you receive a signature detection warning, make sure you are working on the correct devices before continuing. Signature warnings indicate that the device you're working on is currently in use by LVM or has been used by LVM in the past. + If you receive a signature detection warning, make sure you are working on +the correct devices before continuing. Signature warnings indicate that the +device you're working on is currently in use by LVM or has been used by LVM in +the past. -6. Create a new logical volume (LV) called `metadata` from space in the `vg-docker` volume group. +6. Create a new logical volume (LV) called `metadata` from space in the +`vg-docker` volume group. $ sudo lvcreate -L 4G -n metadata vg-docker Logical volume `metadata` created. - This creates an LVM logical volume called `metadata` and an associated block device file at `/dev/vg-docker/metadata`. In the next step you instruct the `devicemapper` storage driver to use this block device to store image and container metadata. + This creates an LVM logical volume called `metadata` and an associated +block device file at `/dev/vg-docker/metadata`. In the next step you instruct +the `devicemapper` storage driver to use this block device to store image and +container metadata. -5. Start the Docker daemon with the `devicemapper` storage driver and the `--storage-opt` flags. +7. Start the Docker daemon with the `devicemapper` storage driver and the +`--storage-opt` flags. - The `data` and `metadata` devices that you pass to the `--storage-opt` options were created in the previous steps. + The `data` and `metadata` devices that you pass to the `--storage-opt` +options were created in the previous steps. $ sudo docker daemon --storage-driver=devicemapper --storage-opt dm.datadev=/dev/vg-docker/data --storage-opt dm.metadatadev=/dev/vg-docker/metadata & [1] 2163 @@ -221,11 +281,12 @@ The procedure below will create a 90GB data volume and 4GB metadata volume to us INFO[0027] Daemon has completed initialization INFO[0027] Docker daemon commit=0a8c2e3 execdriver=native-0.2 graphdriver=devicemapper version=1.8.2 - It is also possible to set the `--storage-driver` and `--storage-opt` flags in - the Docker config file and start the daemon normally using the `service` or - `systemd` commands. + It is also possible to set the `--storage-driver` and `--storage-opt` flags + in the Docker config file and start the daemon normally using the `service` or + `systemd` commands. -6. Use the `docker info` command to verify that the daemon is using `data` and `metadata` devices you created. +8. Use the `docker info` command to verify that the daemon is using `data` and +`metadata` devices you created. $ sudo docker info INFO[0180] GET /v1.20/info @@ -239,11 +300,14 @@ The procedure below will create a 90GB data volume and 4GB metadata volume to us Metadata file: /dev/vg-docker/metadata [...] - The output of the command above shows the storage driver as `devicemapper`. The last two lines also confirm that the correct devices are being used for the `Data file` and the `Metadata file`. + The output of the command above shows the storage driver as `devicemapper`. + The last two lines also confirm that the correct devices are being used for +the `Data file` and the `Metadata file`. ### Examine devicemapper structures on the host -You can use the `lsblk` command to see the device files created above and the `pool` that the `devicemapper` storage driver creates on top of them. +You can use the `lsblk` command to see the device files created above and the +`pool` that the `devicemapper` storage driver creates on top of them. $ sudo lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT @@ -255,11 +319,14 @@ You can use the `lsblk` command to see the device files created above and the `p └─vg--docker-metadata 253:1 0 4G 0 lvm └─docker-202:1-1032-pool 253:2 0 10G 0 dm -The diagram below shows the image from prior examples updated with the detail from the `lsblk` command above. +The diagram below shows the image from prior examples updated with the detail +from the `lsblk` command above. ![](http://farm1.staticflickr.com/703/22116692899_0471e5e160_b.jpg) -In the diagram, the pool is named `Docker-202:1-1032-pool` and spans the `data` and `metadata` devices created earlier. The `devicemapper` constructs the pool name as follows: +In the diagram, the pool is named `Docker-202:1-1032-pool` and spans the `data` + and `metadata` devices created earlier. The `devicemapper` constructs the pool + name as follows: ``` Docker-MAJ:MIN-INO-pool @@ -268,41 +335,74 @@ Docker-MAJ:MIN-INO-pool `MAJ`, `MIN` and `INO` refer to the major and minor device numbers and inode. Because Device Mapper operates at the block level it is more difficult to see -diffs between image layers and containers. However, there are two key -directories. The `/var/lib/docker/devicemapper/mnt` directory contains the mount -points for images and containers. The `/var/lib/docker/devicemapper/metadata` -directory contains one file for every image and container snapshot. The files -contain metadata about each snapshot in JSON format. +diffs between image layers and containers. Docker 1.10 and later no longer +matches image layer IDs with directory names in `/var/lib/docker`. However, +there are two key directories. The `/var/lib/docker/devicemapper/mnt` directory + contains the mount points for image and container layers. The +`/var/lib/docker/devicemapper/metadata`directory contains one file for every +image layer and container snapshot. The files contain metadata about each +snapshot in JSON format. ## Device Mapper and Docker performance -It is important to understand the impact that allocate-on-demand and copy-on-write operations can have on overall container performance. +It is important to understand the impact that allocate-on-demand and +copy-on-write operations can have on overall container performance. ### Allocate-on-demand performance impact -The `devicemapper` storage driver allocates new blocks to a container via an allocate-on-demand operation. This means that each time an app writes to somewhere new inside a container, one or more empty blocks has to be located from the pool and mapped into the container. +The `devicemapper` storage driver allocates new blocks to a container via an +allocate-on-demand operation. This means that each time an app writes to +somewhere new inside a container, one or more empty blocks has to be located +from the pool and mapped into the container. -All blocks are 64KB. A write that uses less than 64KB still results in a single 64KB block being allocated. Writing more than 64KB of data uses multiple 64KB blocks. This can impact container performance, especially in containers that perform lots of small writes. However, once a block is allocated to a container subsequent reads and writes can operate directly on that block. +All blocks are 64KB. A write that uses less than 64KB still results in a single + 64KB block being allocated. Writing more than 64KB of data uses multiple 64KB +blocks. This can impact container performance, especially in containers that +perform lots of small writes. However, once a block is allocated to a container + subsequent reads and writes can operate directly on that block. ### Copy-on-write performance impact -Each time a container updates existing data for the first time, the `devicemapper` storage driver has to perform a copy-on-write operation. This copies the data from the image snapshot to the container's snapshot. This process can have a noticeable impact on container performance. +Each time a container updates existing data for the first time, the +`devicemapper` storage driver has to perform a copy-on-write operation. This +copies the data from the image snapshot to the container's snapshot. This +process can have a noticeable impact on container performance. -All copy-on-write operations have a 64KB granularity. As a results, updating 32KB of a 1GB file causes the driver to copy a single 64KB block into the container's snapshot. This has obvious performance advantages over file-level copy-on-write operations which would require copying the entire 1GB file into the container layer. +All copy-on-write operations have a 64KB granularity. As a results, updating +32KB of a 1GB file causes the driver to copy a single 64KB block into the +container's snapshot. This has obvious performance advantages over file-level +copy-on-write operations which would require copying the entire 1GB file into +the container layer. -In practice, however, containers that perform lots of small block writes (<64KB) can perform worse with `devicemapper` than with AUFS. +In practice, however, containers that perform lots of small block writes +(<64KB) can perform worse with `devicemapper` than with AUFS. ### Other device mapper performance considerations -There are several other things that impact the performance of the `devicemapper` storage driver.. +There are several other things that impact the performance of the +`devicemapper` storage driver. -- **The mode.** The default mode for Docker running the `devicemapper` storage driver is `loop-lvm`. This mode uses sparse files and suffers from poor performance. It is **not recommended for production**. The recommended mode for production environments is `direct-lvm` where the storage driver writes directly to raw block devices. +- **The mode.** The default mode for Docker running the `devicemapper` storage +driver is `loop-lvm`. This mode uses sparse files and suffers from poor +performance. It is **not recommended for production**. The recommended mode for + production environments is `direct-lvm` where the storage driver writes +directly to raw block devices. -- **High speed storage.** For best performance you should place the `Data file` and `Metadata file` on high speed storage such as SSD. This can be direct attached storage or from a SAN or NAS array. +- **High speed storage.** For best performance you should place the `Data file` + and `Metadata file` on high speed storage such as SSD. This can be direct +attached storage or from a SAN or NAS array. -- **Memory usage.** `devicemapper` is not the most memory efficient Docker storage driver. Launching *n* copies of the same container loads *n* copies of its files into memory. This can have a memory impact on your Docker host. As a result, the `devicemapper` storage driver may not be the best choice for PaaS and other high density use cases. +- **Memory usage.** `devicemapper` is not the most memory efficient Docker +storage driver. Launching *n* copies of the same container loads *n* copies of +its files into memory. This can have a memory impact on your Docker host. As a +result, the `devicemapper` storage driver may not be the best choice for PaaS +and other high density use cases. -One final point, data volumes provide the best and most predictable performance. This is because they bypass the storage driver and do not incur any of the potential overheads introduced by thin provisioning and copy-on-write. For this reason, you may want to place heavy write workloads on data volumes. +One final point, data volumes provide the best and most predictable +performance. This is because they bypass the storage driver and do not incur +any of the potential overheads introduced by thin provisioning and +copy-on-write. For this reason, you should to place heavy write workloads on +data volumes. ## Related Information diff --git a/docs/userguide/storagedriver/images/aufs_layers.jpg b/docs/userguide/storagedriver/images/aufs_layers.jpg index 933b0b3bdb..45e959f612 100644 Binary files a/docs/userguide/storagedriver/images/aufs_layers.jpg and b/docs/userguide/storagedriver/images/aufs_layers.jpg differ diff --git a/docs/userguide/storagedriver/images/btfs_constructs.jpg b/docs/userguide/storagedriver/images/btfs_constructs.jpg index 4ceeb6ad12..45852bceea 100644 Binary files a/docs/userguide/storagedriver/images/btfs_constructs.jpg and b/docs/userguide/storagedriver/images/btfs_constructs.jpg differ diff --git a/docs/userguide/storagedriver/images/btfs_container_layer.jpg b/docs/userguide/storagedriver/images/btfs_container_layer.jpg index 1905119e0b..752bf7b5ba 100644 Binary files a/docs/userguide/storagedriver/images/btfs_container_layer.jpg and b/docs/userguide/storagedriver/images/btfs_container_layer.jpg differ diff --git a/docs/userguide/storagedriver/images/container-layers-cas.jpg b/docs/userguide/storagedriver/images/container-layers-cas.jpg new file mode 100644 index 0000000000..63b159adc3 Binary files /dev/null and b/docs/userguide/storagedriver/images/container-layers-cas.jpg differ diff --git a/docs/userguide/storagedriver/images/driver-pros-cons.png b/docs/userguide/storagedriver/images/driver-pros-cons.png new file mode 100644 index 0000000000..6a13f3343c Binary files /dev/null and b/docs/userguide/storagedriver/images/driver-pros-cons.png differ diff --git a/docs/userguide/storagedriver/images/saving-space.jpg b/docs/userguide/storagedriver/images/saving-space.jpg index 721e90c301..125d56ccd8 100644 Binary files a/docs/userguide/storagedriver/images/saving-space.jpg and b/docs/userguide/storagedriver/images/saving-space.jpg differ diff --git a/docs/userguide/storagedriver/images/shared-uuid.jpg b/docs/userguide/storagedriver/images/shared-uuid.jpg index 2ac68bf536..1304afaa54 100644 Binary files a/docs/userguide/storagedriver/images/shared-uuid.jpg and b/docs/userguide/storagedriver/images/shared-uuid.jpg differ diff --git a/docs/userguide/storagedriver/imagesandcontainers.md b/docs/userguide/storagedriver/imagesandcontainers.md index eb3c20c7be..e4293370f4 100644 --- a/docs/userguide/storagedriver/imagesandcontainers.md +++ b/docs/userguide/storagedriver/imagesandcontainers.md @@ -4,7 +4,7 @@ title = "Understand images, containers, and storage drivers" description = "Learn the technologies that support storage drivers." keywords = ["container, storage, driver, AUFS, btfs, devicemapper,zvfs"] [menu.main] -parent = "mn_storage_docker" +parent = "engine_driver" weight = -2 +++ @@ -13,25 +13,159 @@ weight = -2 # Understand images, containers, and storage drivers To use storage drivers effectively, you must understand how Docker builds and -stores images. Then, you need an understanding of how these images are used in containers. Finally, you'll need a short introduction to the technologies that enable both images and container operations. +stores images. Then, you need an understanding of how these images are used by +containers. Finally, you'll need a short introduction to the technologies that +enable both images and container operations. -## Images and containers rely on layers +## Images and layers -Each Docker image references a list of read-only layers that represent filesystem differences. Layers are stacked on top of each other to form a base for a container's root filesystem. The diagram below shows the Ubuntu 15.04 image comprising 4 stacked image layers. +Each Docker image references a list of read-only layers that represent +filesystem differences. Layers are stacked on top of each other to form a base +for a container's root filesystem. The diagram below shows the Ubuntu 15.04 +image comprising 4 stacked image layers. ![](images/image-layers.jpg) -When you make a change inside a container by, for example, adding a new file to a container created from Ubuntu 15.04 image, you add a new layer on top of the underlying stack. This change creates a new writable layer containing the newly added file on top of the image layers. Each image layer is stored by a cryptographic hash over its contents and multiple images can share the same layers. The diagram below shows a container running the Ubuntu 15.04 image. +The Docker storage driver is responsible for stacking these layers and +providing a single unified view. + +When you create a new container, you add a new, thin, writable layer on top of +the underlying stack. This layer is often called the "container layer". All +changes made to the running container - such as writing new files, modifying +existing files, and deleting files - are written to this thin writable +container layer. The diagram below shows a container based on the Ubuntu 15.04 +image. ![](images/container-layers.jpg) -The major difference between a container and an image is this writable layer. All writes to the container that add new or modifying existing data are stored in this writable layer. When the container is deleted the writeable layer is also deleted. The image remains unchanged. +### Content addressable storage -Because each container has its own thin writable container layer and all data is stored this container layer, this means that multiple containers can share access to the same underlying image and yet have their own data state. The diagram below shows multiple containers sharing the same Ubuntu 15.04 image. +Docker 1.10 introduced a new content addressable storage model. This is a +completely new way to address image and layer data on disk. Previously, image +and layer data was referenced and stored using a a randomly generated UUID. In +the new model this is replaced by a secure *content hash*. + +The new model improves security, provides a built-in way to avoid ID +collisions, and guarantees data integrity after pull, push, load, and save +operations. It also enables better sharing of layers by allowing many images to + freely share their layers even if they didn’t come from the same build. + +The diagram below shows an updated version of the previous diagram, +highlighting the changes implemented by Docker 1.10. + +![](images/container-layers-cas.jpg) + +As can be seen, all image layer IDs are cryptographic hashes, whereas the +container ID is still a randomly generated UUID. + +There are several things to note regarding the new model. These include: + +1. Migration of existing images +2. Image and layer filesystem structures + +Existing images, those created and pulled by earlier versions of Docker, need +to be migrated before they can be used with the new model. This migration +involves calculating new secure checksums and is performed automatically the +first time you start an updated Docker daemon. After the migration is complete, + all images and tags will have brand new secure IDs. + +Although the migration is automatic and transparent, it is computationally +intensive. This means it and can take time if you have lots of image data. +During this time your Docker daemon will not respond to other requests. + +A migration tool exists that allows you to migrate existing images to the new +format before upgrading your Docker daemon. This means that upgraded Docker +daemons do not need to perform the migration in-band, and therefore avoids any +associated downtime. It also provides a way to manually migrate existing images + so that they can be distributed to other Docker daemons in your environment +that are already running the latest versions of Docker. + +The migration tool is provided by Docker, Inc., and runs as a container. You +can download it from [https://github.com/docker/v1.10-migrator/releases](https://github.com/docker/v1.10-migrator/releases). + +While running the "migrator" image you need to expose your Docker host's data +directory to the container. If you are using the default Docker data path, the +command to run the container will look like this + + $ sudo docker run --rm -v /var/lib/docker:/var/lib/docker docker/v1.10-migrator + +If you use the `devicemapper` storage driver, you will need to include the +`--privileged` option so that the container has access to your storage devices. + +#### Migration example + +The following example shows the migration tool in use on a Docker host running +version 1.9.1 of the Docker daemon and the AUFS storage driver. The Docker host + is running on a **t2.micro** AWS EC2 instance with 1 vCPU, 1GB RAM, and a +single 8GB general purpose SSD EBS volume. The Docker data directory +(`/var/lib/docker`) was consuming 2GB of space. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + jenkins latest 285c9f0f9d3d 17 hours ago 708.5 MB + mysql latest d39c3fa09ced 8 days ago 360.3 MB + mongo latest a74137af4532 13 days ago 317.4 MB + postgres latest 9aae83d4127f 13 days ago 270.7 MB + redis latest 8bccd73928d9 2 weeks ago 151.3 MB + centos latest c8a648134623 4 weeks ago 196.6 MB + ubuntu 15.04 c8be1ac8145a 7 weeks ago 131.3 MB + + $ du -hs /var/lib/docker + 2.0G /var/lib/docker + + $ time docker run --rm -v /var/lib/docker:/var/lib/docker docker/v1.10-migrator + Unable to find image 'docker/v1.10-migrator:latest' locally + latest: Pulling from docker/v1.10-migrator + ed1f33c5883d: Pull complete + b3ca410aa2c1: Pull complete + 2b9c6ed9099e: Pull complete + dce7e318b173: Pull complete + Digest: sha256:bd2b245d5d22dd94ec4a8417a9b81bb5e90b171031c6e216484db3fe300c2097 + Status: Downloaded newer image for docker/v1.10-migrator:latest + time="2016-01-27T12:31:06Z" level=debug msg="Assembling tar data for 01e70da302a553ba13485ad020a0d77dbb47575a31c4f48221137bb08f45878d from /var/lib/docker/aufs/diff/01e70da302a553ba13485ad020a0d77dbb47575a31c4f48221137bb08f45878d" + time="2016-01-27T12:31:06Z" level=debug msg="Assembling tar data for 07ac220aeeef9febf1ac16a9d1a4eff7ef3c8cbf5ed0be6b6f4c35952ed7920d from /var/lib/docker/aufs/diff/07ac220aeeef9febf1ac16a9d1a4eff7ef3c8cbf5ed0be6b6f4c35952ed7920d" + + time="2016-01-27T12:32:00Z" level=debug msg="layer dbacfa057b30b1feaf15937c28bd8ca0d6c634fc311ccc35bd8d56d017595d5b took 10.80 seconds" + + real 0m59.583s + user 0m0.046s + sys 0m0.008s + +The Unix `time` command prepends the `docker run` command to produce timings +for the operation. As can be seen, the overall time taken to migrate 7 images +comprising 2GB of disk space took approximately 1 minute. However, this +included the time taken to pull the `docker/v1.10-migrator` image +(approximately 3.5 seconds). The same operation on an m4.10xlarge EC2 instance +with 40 vCPUs, 160GB RAM and an 8GB provisioned IOPS EBS volume resulted in the + following improved timings: + + real 0m9.871s + user 0m0.094s + sys 0m0.021s + +This shows that the migration operation is affected by the hardware spec of the + machine performing the migration. + +## Container and layers + +The major difference between a container and an image is the top writable +layer. All writes to the container that add new or modify existing data are +stored in this writable layer. When the container is deleted the writable layer + is also deleted. The underlying image remains unchanged. + +Because each container has its own thin writable container layer, and all +changes are stored this container layer, this means that multiple containers +can share access to the same underlying image and yet have their own data +state. The diagram below shows multiple containers sharing the same Ubuntu +15.04 image. ![](images/sharing-layers.jpg) -A storage driver is responsible for enabling and managing both the image layers and the writeable container layer. How a storage driver accomplishes these behaviors can vary. Two key technologies behind Docker image and container management are stackable image layers and copy-on-write (CoW). +The Docker storage driver is responsible for enabling and managing both the +image layers and the writable container layer. How a storage driver +accomplishes these can vary between drivers. Two key technologies behind Docker + image and container management are stackable image layers and copy-on-write +(CoW). ## The copy-on-write strategy @@ -40,24 +174,29 @@ Sharing is a good way to optimize resources. People do this instinctively in daily life. For example, twins Jane and Joseph taking an Algebra class at different times from different teachers can share the same exercise book by passing it between each other. Now, suppose Jane gets an assignment to complete -the homework on page 11 in the book. At that point, Jane copies page 11, completes the homework, and hands in her copy. The original exercise book is unchanged and only Jane has a copy of the changed page 11. +the homework on page 11 in the book. At that point, Jane copies page 11, +completes the homework, and hands in her copy. The original exercise book is +unchanged and only Jane has a copy of the changed page 11. Copy-on-write is a similar strategy of sharing and copying. In this strategy, system processes that need the same data share the same instance of that data -rather than having their own copy. At some point, if one process needs to modify -or write to the data, only then does the operating system make a copy of the -data for that process to use. Only the process that needs to write has access to -the data copy. All the other processes continue to use the original data. +rather than having their own copy. At some point, if one process needs to +modify or write to the data, only then does the operating system make a copy of + the data for that process to use. Only the process that needs to write has +access to the data copy. All the other processes continue to use the original +data. -Docker uses a copy-on-write technology with both images and containers. This CoW -strategy optimizes both image disk space usage and the performance of container -start times. The next sections look at how copy-on-write is leveraged with -images and containers thru sharing and copying. +Docker uses a copy-on-write technology with both images and containers. This +CoW strategy optimizes both image disk space usage and the performance of +container start times. The next sections look at how copy-on-write is leveraged + with images and containers through sharing and copying. ### Sharing promotes smaller images -This section looks at image layers and copy-on-write technology. All image and container layers exist inside the Docker host's *local storage area* and are managed by the storage driver. It is a location on the host's -filesystem. +This section looks at image layers and copy-on-write technology. All image and + container layers exist inside the Docker host's *local storage area* and are +managed by the storage driver. On Linux-based Docker hosts this is usually +located under `/var/lib/docker/`. The Docker client reports on image layers when instructed to pull and push images with `docker pull` and `docker push`. The command below pulls the @@ -65,38 +204,85 @@ images with `docker pull` and `docker push`. The command below pulls the $ docker pull ubuntu:15.04 15.04: Pulling from library/ubuntu - 6e6a100fa147: Pull complete - 13c0c663a321: Pull complete - 2bd276ed39d5: Pull complete - 013f3d01d247: Pull complete - Digest: sha256:c7ecf33cef00ae34b131605c31486c91f5fd9a76315d075db2afd39d1ccdf3ed + 1ba8ac955b97: Pull complete + f157c4e5ede7: Pull complete + 0b7e98f84c4c: Pull complete + a3ed95caeb02: Pull complete + Digest: sha256:5e279a9df07990286cce22e1b0f5b0490629ca6d187698746ae5e28e604a640e Status: Downloaded newer image for ubuntu:15.04 From the output, you'll see that the command actually pulls 4 image layers. -Each of the above lines lists an image layer and its UUID. The combination of -these four layers makes up the `ubuntu:15.04` Docker image. +Each of the above lines lists an image layer and its UUID or cryptographic +hash. The combination of these four layers makes up the `ubuntu:15.04` Docker +image. -The image layers are stored in the Docker host's local storage area. Typically, -the local storage area is in the host's `/var/lib/docker` directory. Depending -on which storage driver the local storage area may be in a different location. You can list the layers in the local storage area. The following example shows the storage as it appears under the AUFS storage driver: +Each of these layers is stored in its own directory inside the Docker host's +local storage are. - $ sudo ls /var/lib/docker/aufs/layers - 013f3d01d24738964bb7101fa83a926181d600ebecca7206dced59669e6e6778 2bd276ed39d5fcfd3d00ce0a190beeea508332f5aec3c6a125cc619a3fdbade6 - 13c0c663a321cd83a97f4ce1ecbaf17c2ba166527c3b06daaefe30695c5fcb8c 6e6a100fa147e6db53b684c8516e3e2588b160fd4898b6265545d5d4edb6796d +Versions of Docker prior to 1.10 stored each layer in a directory with the same + name as the image layer ID. However, this is not the case for images pulled +with Docker version 1.10 and later. For example, the command below shows an +image being pulled from Docker Hub, followed by a directory listing on a host +running version 1.9.1 of the Docker Engine. -If you `pull` another image that shares some of the same image layers as the `ubuntu:15.04` image, the Docker daemon recognize this, and only pull the layers it hasn't already stored. After the second pull, the two images will share any common image layers. + $ docker pull ubuntu:15.04 + 15.04: Pulling from library/ubuntu + 47984b517ca9: Pull complete + df6e891a3ea9: Pull complete + e65155041eed: Pull complete + c8be1ac8145a: Pull complete + Digest: sha256:5e279a9df07990286cce22e1b0f5b0490629ca6d187698746ae5e28e604a640e + Status: Downloaded newer image for ubuntu:15.04 -You can illustrate this now for yourself. Starting the `ubuntu:15.04` image that -you just pulled, make a change to it, and build a new image based on the change. -One way to do this is using a Dockerfile and the `docker build` command. + $ ls /var/lib/docker/aufs/layers + 47984b517ca9ca0312aced5c9698753ffa964c2015f2a5f18e5efa9848cf30e2 + c8be1ac8145a6e59a55667f573883749ad66eaeef92b4df17e5ea1260e2d7356 + df6e891a3ea9cdce2a388a2cf1b1711629557454fd120abd5be6d32329a0e0ac + e65155041eed7ec58dea78d90286048055ca75d41ea893c7246e794389ecf203 -1. In an empty directory, create a simple `Dockerfile` that starts with the ubuntu:15.04 image. +Notice how the four directories match up with the layer IDs of the downloaded +image. Now compare this with the same operations performed on a host running +version 1.10 of the Docker Engine. + + $ docker pull ubuntu:15.04 + 15.04: Pulling from library/ubuntu + 1ba8ac955b97: Pull complete + f157c4e5ede7: Pull complete + 0b7e98f84c4c: Pull complete + a3ed95caeb02: Pull complete + Digest: sha256:5e279a9df07990286cce22e1b0f5b0490629ca6d187698746ae5e28e604a640e + Status: Downloaded newer image for ubuntu:15.04 + + $ ls /var/lib/docker/aufs/layers/ + 1d6674ff835b10f76e354806e16b950f91a191d3b471236609ab13a930275e24 + 5dbb0cbe0148cf447b9464a358c1587be586058d9a4c9ce079320265e2bb94e7 + bef7199f2ed8e86fa4ada1309cfad3089e0542fec8894690529e4c04a7ca2d73 + ebf814eccfe98f2704660ca1d844e4348db3b5ccc637eb905d4818fbfb00a06a + +See how the four directories do not match up with the image layer IDs pulled in + the previous step. + +Despite the differences between image management before and after version 1.10, +all versions of Docker still allow images to share layers. For example, If you +`pull` an image that shares some of the same image layers as an image that has +already been pulled, the Docker daemon recognizes this, and only pulls the +layers it doesn't already have stored locally. After the second pull, the two +images will share any common image layers. + +You can illustrate this now for yourself. Starting with the `ubuntu:15.04` +image that you just pulled, make a change to it, and build a new image based on + the change. One way to do this is using a `Dockerfile` and the `docker build` +command. + +1. In an empty directory, create a simple `Dockerfile` that starts with the +2. ubuntu:15.04 image. FROM ubuntu:15.04 -2. Add a new file called "newfile" in the image's `/tmp` directory with the text "Hello world" in it. +2. Add a new file called "newfile" in the image's `/tmp` directory with the +3. text "Hello world" in it. - When you are done, the `Dockerfile` contains two lines: + When you are done, the `Dockerfile` contains two lines: FROM ubuntu:15.04 @@ -104,78 +290,125 @@ One way to do this is using a Dockerfile and the `docker build` command. 3. Save and close the file. -2. From a terminal in the same folder as your Dockerfile, run the following command: +4. From a terminal in the same folder as your `Dockerfile`, run the following +5. command: $ docker build -t changed-ubuntu . Sending build context to Docker daemon 2.048 kB - Step 0 : FROM ubuntu:15.04 - ---> 013f3d01d247 - Step 1 : RUN echo "Hello world" > /tmp/newfile - ---> Running in 2023460815df - ---> 03b964f68d06 - Removing intermediate container 2023460815df - Successfully built 03b964f68d06 + Step 1 : FROM ubuntu:15.04 + ---> 3f7bcee56709 + Step 2 : RUN echo "Hello world" > /tmp/newfile + ---> Running in d14acd6fad4e + ---> 94e6b7d2c720 + Removing intermediate container d14acd6fad4e + Successfully built 94e6b7d2c720 - > **Note:** The period (.) at the end of the above command is important. It tells the `docker build` command to use the current working directory as its build context. + > **Note:** The period (.) at the end of the above command is important. It + > tells the `docker build` command to use the current working directory as + > its build context. - The output above shows a new image with image ID `03b964f68d06`. + The output above shows a new image with image ID `94e6b7d2c720`. -3. Run the `docker images` command to verify the new image is in the Docker host's local storage area. +5. Run the `docker images` command to verify the new `changed-ubuntu` image is +6. in the Docker host's local storage area. - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - changed-ubuntu latest 03b964f68d06 33 seconds ago 131.4 MB - ubuntu + REPOSITORY TAG IMAGE ID CREATED SIZE + changed-ubuntu latest 03b964f68d06 33 seconds ago 131.4 MB + ubuntu 15.04 013f3d01d247 6 weeks ago 131.3 MB -4. Run the `docker history` command to see which image layers were used to create the new `changed-ubuntu` image. +6. Run the `docker history` command to see which image layers were used to +7. create the new `changed-ubuntu` image. $ docker history changed-ubuntu - IMAGE CREATED CREATED BY SIZE COMMENT - 03b964f68d06 About a minute ago /bin/sh -c echo "Hello world" > /tmp/newfile 12 B - 013f3d01d247 6 weeks ago /bin/sh -c #(nop) CMD ["/bin/bash"] 0 B - 6 weeks ago /bin/sh -c sed -i 's/^#\s*\(deb.*universe\)$/ 1.879 kB - 6 weeks ago /bin/sh -c echo '#!/bin/sh' > /usr/sbin/polic 701 B - 6 weeks ago /bin/sh -c #(nop) ADD file:49710b44e2ae0edef4 131.4 MB + IMAGE CREATED CREATED BY SIZE COMMENT + 94e6b7d2c720 2 minutes ago /bin/sh -c echo "Hello world" > /tmp/newfile 12 B + 3f7bcee56709 6 weeks ago /bin/sh -c #(nop) CMD ["/bin/bash"] 0 B + 6 weeks ago /bin/sh -c sed -i 's/^#\s*\(deb.*universe\)$/ 1.879 kB + 6 weeks ago /bin/sh -c echo '#!/bin/sh' > /usr/sbin/polic 701 B + 6 weeks ago /bin/sh -c #(nop) ADD file:8e4943cd86e9b2ca13 131.3 MB - The `docker history` output shows the new `03b964f68d06` image layer at the - top. You know that the `03b964f68d06` layer was added because it was created - by the `echo "Hello world" > /tmp/newfile` command in your `Dockerfile`. - The 4 image layers below it are the exact same image layers the make up the - ubuntu:15.04 image as their UUIDs match. + The `docker history` output shows the new `94e6b7d2c720` image layer at the + top. You know that this is the new image layer added because it was created + by the `echo "Hello world" > /tmp/newfile` command in your `Dockerfile`. + The 4 image layers below it are the exact same image layers + that make up the `ubuntu:15.04` image. -Notice the new `changed-ubuntu` image does not have its own copies of every layer. As can be seen in the diagram below, the new image is sharing it's four underlying layers with the `ubuntu:15.04` image. +> **Note:** Under the content addressable storage model introduced with Docker +> 1.10, image history data is no longer stored in a config file with each image +> layer. It is now stored as a string of text in a single config file that +> relates to the overall image. This can result in some image layers showing as +> "missing" in the output of the `docker history` command. This is normal +> behaviour and can be ignored. +> +> You may hear images like these referred to as *flat images*. + +Notice the new `changed-ubuntu` image does not have its own copies of every +layer. As can be seen in the diagram below, the new image is sharing its four +underlying layers with the `ubuntu:15.04` image. ![](images/saving-space.jpg) -The `docker history` command also shows the size of each image layer. The `03b964f68d06` is only consuming 13 Bytes of disk space. Because all of the layers below it already exist on the Docker host and are shared with the `ubuntu15:04` image, this means the entire `changed-ubuntu` image only consumes 13 Bytes of disk space. +The `docker history` command also shows the size of each image layer. As you +can see, the `94e6b7d2c720` layer is only consuming 12 Bytes of disk space. +This means that the `changed-ubuntu` image we just created is only consuming an + additional 12 Bytes of disk space on the Docker host - all layers below the +`94e6b7d2c720` layer already exist on the Docker host and are shared by other +images. -This sharing of image layers is what makes Docker images and containers so space -efficient. +This sharing of image layers is what makes Docker images and containers so +space efficient. ### Copying makes containers efficient -You learned earlier that a container a Docker image with a thin writable, container layer added. The diagram below shows the layers of a container based on the `ubuntu:15.04` image: +You learned earlier that a container is a Docker image with a thin writable, +container layer added. The diagram below shows the layers of a container based +on the `ubuntu:15.04` image: -![](images/container-layers.jpg) +![](images/container-layers-cas.jpg) -All writes made to a container are stored in the thin writable container layer. The other layers are read-only (RO) image layers and can't be changed. This means that multiple containers can safely share a single underlying image. The diagram below shows multiple containers sharing a single copy of the `ubuntu:15.04` image. Each container has its own thin RW layer, but they all share a single instance of the ubuntu:15.04 image: +All writes made to a container are stored in the thin writable container layer. + The other layers are read-only (RO) image layers and can't be changed. This +means that multiple containers can safely share a single underlying image. The +diagram below shows multiple containers sharing a single copy of the +`ubuntu:15.04` image. Each container has its own thin RW layer, but they all +share a single instance of the ubuntu:15.04 image: ![](images/sharing-layers.jpg) -When a write operation occurs in a container, Docker uses the storage driver to perform a copy-on-write operation. The type of operation depends on the storage driver. For AUFS and OverlayFS storage drivers the copy-on-write operation is pretty much as follows: +When an existing file in a container is modified, Docker uses the storage +driver to perform a copy-on-write operation. The specifics of operation depends + on the storage driver. For the AUFS and OverlayFS storage drivers, the +copy-on-write operation is pretty much as follows: -* Search through the layers for the file to update. The process starts at the top, newest layer and works down to the base layer one-at-a-time. -* Perform a "copy-up" operation on the first copy of the file that is found. A "copy up" copies the file up to the container's own thin writable layer. +* Search through the image layers for the file to update. The process starts +at the top, newest layer and works down to the base layer one layer at a +time. +* Perform a "copy-up" operation on the first copy of the file that is found. A + "copy up" copies the file up to the container's own thin writable layer. * Modify the *copy of the file* in container's thin writable layer. -BTFS, ZFS, and other drivers handle the copy-on-write differently. You can read more about the methods of these drivers later in their detailed descriptions. +Btrfs, ZFS, and other drivers handle the copy-on-write differently. You can +read more about the methods of these drivers later in their detailed +descriptions. -Containers that write a lot of data will consume more space than containers that do not. This is because most write operations consume new space in the containers thin writable top layer. If your container needs to write a lot of data, you can use a data volume. +Containers that write a lot of data will consume more space than containers +that do not. This is because most write operations consume new space in the +container's thin writable top layer. If your container needs to write a lot of +data, you should consider using a data volume. -A copy-up operation can incur a noticeable performance overhead. This overhead is different depending on which storage driver is in use. However, large files, lots of layers, and deep directory trees can make the impact more noticeable. Fortunately, the operation only occurs the first time any particular file is modified. Subsequent modifications to the same file do not cause a copy-up operation and can operate directly on the file's existing copy already present in container layer. +A copy-up operation can incur a noticeable performance overhead. This overhead +is different depending on which storage driver is in use. However, large files, + lots of layers, and deep directory trees can make the impact more noticeable. +Fortunately, the operation only occurs the first time any particular file is +modified. Subsequent modifications to the same file do not cause a copy-up +operation and can operate directly on the file's existing copy already present +in the container layer. -Let's see what happens if we spin up 5 containers based on our `changed-ubuntu` image we built earlier: +Let's see what happens if we spin up 5 containers based on our `changed-ubuntu` + image we built earlier: -1. From a terminal on your Docker host, run the following `docker run` command 5 times. +1. From a terminal on your Docker host, run the following `docker run` command +5 times. $ docker run -dit changed-ubuntu bash 75bab0d54f3cf193cfdc3a86483466363f442fba30859f7dcd1b816b6ede82d4 @@ -188,28 +421,38 @@ Let's see what happens if we spin up 5 containers based on our `changed-ubuntu` $ docker run -dit changed-ubuntu bash 0ad25d06bdf6fca0dedc38301b2aff7478b3e1ce3d1acd676573bba57cb1cfef - This launches 5 containers based on the `changed-ubuntu` image. As the container is created, Docker adds a writable layer and assigns it a UUID. This is the value returned from the `docker run` command. + This launches 5 containers based on the `changed-ubuntu` image. As each +container is created, Docker adds a writable layer and assigns it a random +UUID. This is the value returned from the `docker run` command. 2. Run the `docker ps` command to verify the 5 containers are running. $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 0ad25d06bdf6 changed-ubuntu "bash" About a minute ago Up About a minute stoic_ptolemy - 8eb24b3b2d24 changed-ubuntu "bash" About a minute ago Up About a minute pensive_bartik - a651680bd6c2 changed-ubuntu "bash" 2 minutes ago Up 2 minutes hopeful_turing - 9280e777d109 changed-ubuntu "bash" 2 minutes ago Up 2 minutes backstabbing_mahavira - 75bab0d54f3c changed-ubuntu "bash" 2 minutes ago Up 2 minutes boring_pasteur + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 0ad25d06bdf6 changed-ubuntu "bash" About a minute ago Up About a minute stoic_ptolemy + 8eb24b3b2d24 changed-ubuntu "bash" About a minute ago Up About a minute pensive_bartik + a651680bd6c2 changed-ubuntu "bash" 2 minutes ago Up 2 minutes hopeful_turing + 9280e777d109 changed-ubuntu "bash" 2 minutes ago Up 2 minutes backstabbing_mahavira + 75bab0d54f3c changed-ubuntu "bash" 2 minutes ago Up 2 minutes boring_pasteur - The output above shows 5 running containers, all sharing the `changed-ubuntu` image. Each `CONTAINER ID` is derived from the UUID when creating each container. + The output above shows 5 running containers, all sharing the +`changed-ubuntu` image. Each `CONTAINER ID` is derived from the UUID when +creating each container. 3. List the contents of the local storage area. - $ sudo ls containers - 0ad25d06bdf6fca0dedc38301b2aff7478b3e1ce3d1acd676573bba57cb1cfef 9280e777d109e2eb4b13ab211553516124a3d4d4280a0edfc7abf75c59024d47 - 75bab0d54f3cf193cfdc3a86483466363f442fba30859f7dcd1b816b6ede82d4 a651680bd6c2ef64902e154eeb8a064b85c9abf08ac46f922ad8dfc11bb5cd8a + $ sudo ls /var/lib/docker/containers + 0ad25d06bdf6fca0dedc38301b2aff7478b3e1ce3d1acd676573bba57cb1cfef + 9280e777d109e2eb4b13ab211553516124a3d4d4280a0edfc7abf75c59024d47 + 75bab0d54f3cf193cfdc3a86483466363f442fba30859f7dcd1b816b6ede82d4 + a651680bd6c2ef64902e154eeb8a064b85c9abf08ac46f922ad8dfc11bb5cd8a 8eb24b3b2d246f225b24f2fca39625aaad71689c392a7b552b78baf264647373 -Docker's copy-on-write strategy not only reduces the amount of space consumed by containers, it also reduces the time required to start a container. At start time, Docker only has to create the thin writable layer for each container. The diagram below shows these 5 containers sharing a single read-only (RO) copy of the `changed-ubuntu` image. +Docker's copy-on-write strategy not only reduces the amount of space consumed +by containers, it also reduces the time required to start a container. At start + time, Docker only has to create the thin writable layer for each container. +The diagram below shows these 5 containers sharing a single read-only (RO) +copy of the `changed-ubuntu` image. ![](images/shared-uuid.jpg) @@ -219,18 +462,30 @@ significantly increased. ## Data volumes and the storage driver -When a container is deleted, any data written to the container that is not stored in a *data volume* is deleted along with the container. A data volume is directory or file that is mounted directly into a container. +When a container is deleted, any data written to the container that is not +stored in a *data volume* is deleted along with the container. -Data volumes are not controlled by the storage driver. Reads and writes to data -volumes bypass the storage driver and operate at native host speeds. You can mount any number of data volumes into a container. Multiple containers can also share one or more data volumes. +A data volume is a directory or file in the Docker host's filesystem that is +mounted directly into a container. Data volumes are not controlled by the +storage driver. Reads and writes to data volumes bypass the storage driver and +operate at native host speeds. You can mount any number of data volumes into a +container. Multiple containers can also share one or more data volumes. -The diagram below shows a single Docker host running two containers. Each container exists inside of its own address space within the Docker host's local storage area. There is also a single shared data volume located at `/data` on the Docker host. This is mounted directly into both containers. +The diagram below shows a single Docker host running two containers. Each +container exists inside of its own address space within the Docker host's local + storage area (`/var/lib/docker/...`). There is also a single shared data +volume located at `/data` on the Docker host. This is mounted directly into +both containers. ![](images/shared-volume.jpg) -The data volume resides outside of the local storage area on the Docker host further reinforcing its independence from the storage driver's control. When a container is deleted, any data stored in shared data volumes persists on the Docker host. +Data volumes reside outside of the local storage area on the Docker host, +further reinforcing their independence from the storage driver's control. When +a container is deleted, any data stored in data volumes persists on the Docker +host. -For detailed information about data volumes [Managing data in containers](https://docs.docker.com/userguide/dockervolumes/). +For detailed information about data volumes +[Managing data in containers](https://docs.docker.com/userguide/dockervolumes/). ## Related information diff --git a/docs/userguide/storagedriver/index.md b/docs/userguide/storagedriver/index.md index e389e6b1a0..76671c7196 100644 --- a/docs/userguide/storagedriver/index.md +++ b/docs/userguide/storagedriver/index.md @@ -4,8 +4,8 @@ title = "Docker storage drivers" description = "Learn how select the proper storage driver for your container." keywords = ["container, storage, driver, AUFS, btfs, devicemapper,zvfs"] [menu.main] -identifier = "mn_storage_docker" -parent = "mn_use_docker" +identifier = "engine_driver" +parent = "engine_guide" weight = 7 +++ diff --git a/docs/userguide/storagedriver/overlayfs-driver.md b/docs/userguide/storagedriver/overlayfs-driver.md index b4a47d9a12..9abc1dbe65 100644 --- a/docs/userguide/storagedriver/overlayfs-driver.md +++ b/docs/userguide/storagedriver/overlayfs-driver.md @@ -4,53 +4,89 @@ title = "OverlayFS storage in practice" description = "Learn how to optimize your use of OverlayFS driver." keywords = ["container, storage, driver, OverlayFS "] [menu.main] -parent = "mn_storage_docker" +parent = "engine_driver" +++ # Docker and OverlayFS in practice -OverlayFS is a modern *union filesystem* that is similar to AUFS. In comparison to AUFS, OverlayFS: +OverlayFS is a modern *union filesystem* that is similar to AUFS. In comparison + to AUFS, OverlayFS: * has a simpler design * has been in the mainline Linux kernel since version 3.18 * is potentially faster -As a result, OverlayFS is rapidly gaining popularity in the Docker community and is seen by many as a natural successor to AUFS. As promising as OverlayFS is, it is still relatively young. Therefore caution should be taken before using it in production Docker environments. +As a result, OverlayFS is rapidly gaining popularity in the Docker community +and is seen by many as a natural successor to AUFS. As promising as OverlayFS +is, it is still relatively young. Therefore caution should be taken before +using it in production Docker environments. -Docker's `overlay` storage driver leverages several OverlayFS features to build and manage the on-disk structures of images and containers. - ->**Note**: Since it was merged into the mainline kernel, the OverlayFS *kernel module* was renamed from "overlayfs" to "overlay". As a result you may see the two terms used interchangeably in some documentation. However, this document uses "OverlayFS" to refer to the overall filesystem, and `overlay` to refer to Docker's storage-driver. +Docker's `overlay` storage driver leverages several OverlayFS features to build + and manage the on-disk structures of images and containers. +>**Note**: Since it was merged into the mainline kernel, the OverlayFS *kernel +>module* was renamed from "overlayfs" to "overlay". As a result you may see the +> two terms used interchangeably in some documentation. However, this document +> uses "OverlayFS" to refer to the overall filesystem, and `overlay` to refer +> to Docker's storage-driver. ## Image layering and sharing with OverlayFS -OverlayFS takes two directories on a single Linux host, layers one on top of the other, and provides a single unified view. These directories are often referred to as *layers* and the technology used to layer them is known as a *union mount*. The OverlayFS terminology is "lowerdir" for the bottom layer and "upperdir" for the top layer. The unified view is exposed through its own directory called "merged". +OverlayFS takes two directories on a single Linux host, layers one on top of +the other, and provides a single unified view. These directories are often +referred to as *layers* and the technology used to layer them is known as a +*union mount*. The OverlayFS terminology is "lowerdir" for the bottom layer and + "upperdir" for the top layer. The unified view is exposed through its own +directory called "merged". -The diagram below shows how a Docker image and a Docker container are layered. The image layer is the "lowerdir" and the container layer is the "upperdir". The unified view is exposed through a directory called "merged" which is effectively the containers mount point. The diagram shows how Docker constructs map to OverlayFS constructs. +The diagram below shows how a Docker image and a Docker container are layered. +The image layer is the "lowerdir" and the container layer is the "upperdir". +The unified view is exposed through a directory called "merged" which is +effectively the containers mount point. The diagram shows how Docker constructs + map to OverlayFS constructs. ![](images/overlay_constructs.jpg) -Notice how the image layer and container layer can contain the same files. When this happens, the files in the container layer ("upperdir") are dominant and obscure the existence of the same files in the image layer ("lowerdir"). The container mount ("merged") presents the unified view. +Notice how the image layer and container layer can contain the same files. When + this happens, the files in the container layer ("upperdir") are dominant and +obscure the existence of the same files in the image layer ("lowerdir"). The +container mount ("merged") presents the unified view. -OverlayFS only works with two layers. This means that multi-layered images cannot be implemented as multiple OverlayFS layers. Instead, each image layer is implemented as its own directory under `/var/lib/docker/overlay`. Hard links are then used as a space-efficient way to reference data shared with lower layers. The diagram below shows a four-layer image and how it is represented in the Docker host's filesystem. +OverlayFS only works with two layers. This means that multi-layered images +cannot be implemented as multiple OverlayFS layers. Instead, each image layer +is implemented as its own directory under `/var/lib/docker/overlay`. +Hard links are then used as a space-efficient way to reference data shared with + lower layers. As of Docker 1.10, image layer IDs no longer correspond to +directory names in `/var/lib/docker/` -![](images/overlay_constructs2.jpg) - -To create a container, the `overlay` driver combines the directory representing the image's top layer plus a new directory for the container. The image's top layer is the "lowerdir" in the overlay and read-only. The new directory for the container is the "upperdir" and is writable. +To create a container, the `overlay` driver combines the directory representing + the image's top layer plus a new directory for the container. The image's top +layer is the "lowerdir" in the overlay and read-only. The new directory for the + container is the "upperdir" and is writable. ## Example: Image and container on-disk constructs -The following `docker images -a` command shows a Docker host with a single image. As can be seen, the image consists of four layers. +The following `docker pull` command shows a Docker host with downloading a +Docker image comprising four layers. - $ docker images -a - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - ubuntu latest 1d073211c498 7 days ago 187.9 MB - 5a4526e952f0 7 days ago 187.9 MB - 99fcaefe76ef 7 days ago 187.9 MB - c63fb41c2213 7 days ago 187.7 MB + $ sudo docker pull ubuntu + Using default tag: latest + latest: Pulling from library/ubuntu + 8387d9ff0016: Pull complete + 3b52deaaf0ed: Pull complete + 4bd501fad6de: Pull complete + a3ed95caeb02: Pull complete + Digest: sha256:457b05828bdb5dcc044d93d042863fba3f2158ae249a6db5ae3934307c757c54 + Status: Downloaded newer image for ubuntu:latest -Below, the command's output illustrates that each of the four image layers has it's own directory under `/var/lib/docker/overlay/`. +Each image layer has it's own directory under `/var/lib/docker/overlay/`. This +is where the the contents of each image layer are stored. + +The output of the command below shows the four directories that store the +contents of each image layer just pulled. However, as can be seen, the image +layer IDs do not match the directory names in `/var/lib/docker/overlay`. This +is normal behavior in Docker 1.10 and later. $ ls -l /var/lib/docker/overlay/ total 24 @@ -59,35 +95,42 @@ Below, the command's output illustrates that each of the four image layers has i drwx------ 5 root root 4096 Oct 28 11:06 99fcaefe76ef1aa4077b90a413af57fd17d19dce4e50d7964a273aae67055235 drwx------ 3 root root 4096 Oct 28 11:01 c63fb41c2213f511f12f294dd729b9903a64d88f098c20d2350905ac1fdbcbba -Each directory is named after the image layer IDs in the previous `docker images -a` command. The image layer directories contain the files unique to that layer as well as hard links to the data that is shared with lower layers. This allows for efficient use of disk space. +The image layer directories contain the files unique to that layer as well as +hard links to the data that is shared with lower layers. This allows for +efficient use of disk space. -The following `docker ps` command shows the same Docker host running a single container. The container ID is "73de7176c223". +Containers also exist on-disk in the Docker host's filesystem under +`/var/lib/docker/overlay/`. If you inspect the directory relating to a running +container using the `ls -l` command, you find the following file and +directories. - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 73de7176c223 ubuntu "bash" 2 days ago Up 2 days stupefied_nobel - -This container exists on-disk in the Docker host's filesystem under `/var/lib/docker/overlay/73de7176c223...`. If you inspect this directory using the `ls -l` command you find the following file and directories. - - $ ls -l /var/lib/docker/overlay/73de7176c223a6c82fd46c48c5f152f2c8a7e49ecb795a7197c3bb795c4d879e + $ ls -l /var/lib/docker/overlay/ total 16 -rw-r--r-- 1 root root 64 Oct 28 11:06 lower-id drwxr-xr-x 1 root root 4096 Oct 28 11:06 merged drwxr-xr-x 4 root root 4096 Oct 28 11:06 upper drwx------ 3 root root 4096 Oct 28 11:06 work -These four filesystem objects are all artifacts of OverlayFS. The "lower-id" file contains the ID of the top layer of the image the container is based on. This is used by OverlayFS as the "lowerdir". +These four filesystem objects are all artefacts of OverlayFS. The "lower-id" +file contains the ID of the top layer of the image the container is based on. +This is used by OverlayFS as the "lowerdir". $ cat /var/lib/docker/overlay/73de7176c223a6c82fd46c48c5f152f2c8a7e49ecb795a7197c3bb795c4d879e/lower-id 1d073211c498fd5022699b46a936b4e4bdacb04f637ad64d3475f558783f5c3e -The "upper" directory is the containers read-write layer. Any changes made to the container are written to this directory. +The "upper" directory is the containers read-write layer. Any changes made to +the container are written to this directory. -The "merged" directory is effectively the containers mount point. This is where the unified view of the image ("lowerdir") and container ("upperdir") is exposed. Any changes written to the container are immediately reflected in this directory. +The "merged" directory is effectively the containers mount point. This is where + the unified view of the image ("lowerdir") and container ("upperdir") is +exposed. Any changes written to the container are immediately reflected in this + directory. -The "work" directory is required for OverlayFS to function. It is used for things such as *copy_up* operations. +The "work" directory is required for OverlayFS to function. It is used for +things such as *copy_up* operations. -You can verify all of these constructs from the output of the `mount` command. (Ellipses and line breaks are used in the output below to enhance readability.) +You can verify all of these constructs from the output of the `mount` command. +(Ellipses and line breaks are used in the output below to enhance readability.) $ mount | grep overlay overlay on /var/lib/docker/overlay/73de7176c223.../merged @@ -95,39 +138,73 @@ You can verify all of these constructs from the output of the `mount` command. ( upperdir=/var/lib/docker/overlay/73de7176c223.../upper, workdir=/var/lib/docker/overlay/73de7176c223.../work) -The output reflects the overlay is mounted as read-write ("rw"). +The output reflects that the overlay is mounted as read-write ("rw"). ## Container reads and writes with overlay -Consider three scenarios where a container opens a file for read access with overlay. +Consider three scenarios where a container opens a file for read access with +overlay. -- **The file does not exist in the container layer**. If a container opens a file for read access and the file does not already exist in the container ("upperdir") it is read from the image ("lowerdir"). This should incur very little performance overhead. +- **The file does not exist in the container layer**. If a container opens a +file for read access and the file does not already exist in the container +("upperdir") it is read from the image ("lowerdir"). This should incur very +little performance overhead. -- **The file only exists in the container layer**. If a container opens a file for read access and the file exists in the container ("upperdir") and not in the image ("lowerdir"), it is read directly from the container. +- **The file only exists in the container layer**. If a container opens a file +for read access and the file exists in the container ("upperdir") and not in +the image ("lowerdir"), it is read directly from the container. -- **The file exists in the container layer and the image layer**. If a container opens a file for read access and the file exists in the image layer and the container layer, the file's version in the container layer is read. This is because files in the container layer ("upperdir") obscure files with the same name in the image layer ("lowerdir"). +- **The file exists in the container layer and the image layer**. If a +container opens a file for read access and the file exists in the image layer +and the container layer, the file's version in the container layer is read. +This is because files in the container layer ("upperdir") obscure files with +the same name in the image layer ("lowerdir"). Consider some scenarios where files in a container are modified. -- **Writing to a file for the first time**. The first time a container writes to an existing file, that file does not exist in the container ("upperdir"). The `overlay` driver performs a *copy_up* operation to copy the file from the image ("lowerdir") to the container ("upperdir"). The container then writes the changes to the new copy of the file in the container layer. +- **Writing to a file for the first time**. The first time a container writes +to an existing file, that file does not exist in the container ("upperdir"). +The `overlay` driver performs a *copy_up* operation to copy the file from the +image ("lowerdir") to the container ("upperdir"). The container then writes the + changes to the new copy of the file in the container layer. - However, OverlayFS works at the file level not the block level. This means that all OverlayFS copy-up operations copy entire files, even if the file is very large and only a small part of it is being modified. This can have a noticeable impact on container write performance. However, two things are worth noting: + However, OverlayFS works at the file level not the block level. This means +that all OverlayFS copy-up operations copy entire files, even if the file is +very large and only a small part of it is being modified. This can have a +noticeable impact on container write performance. However, two things are +worth noting: - * The copy_up operation only occurs the first time any given file is written to. Subsequent writes to the same file will operate against the copy of the file already copied up to the container. + * The copy_up operation only occurs the first time any given file is +written to. Subsequent writes to the same file will operate against the copy of + the file already copied up to the container. - * OverlayFS only works with two layers. This means that performance should be better than AUFS which can suffer noticeable latencies when searching for files in images with many layers. + * OverlayFS only works with two layers. This means that performance should +be better than AUFS which can suffer noticeable latencies when searching for +files in images with many layers. -- **Deleting files and directories**. When files are deleted within a container a *whiteout* file is created in the containers "upperdir". The version of the file in the image layer ("lowerdir") is not deleted. However, the whiteout file in the container obscures it. +- **Deleting files and directories**. When files are deleted within a container + a *whiteout* file is created in the containers "upperdir". The version of the +file in the image layer ("lowerdir") is not deleted. However, the whiteout file + in the container obscures it. - Deleting a directory in a container results in *opaque directory* being created in the "upperdir". This has the same effect as a whiteout file and effectively masks the existence of the directory in the image's "lowerdir". + Deleting a directory in a container results in *opaque directory* being +created in the "upperdir". This has the same effect as a whiteout file and +effectively masks the existence of the directory in the image's "lowerdir". ## Configure Docker with the overlay storage driver -To configure Docker to use the overlay storage driver your Docker host must be running version 3.18 of the Linux kernel (preferably newer) with the overlay kernel module loaded. OverlayFS can operate on top of most supported Linux filesystems. However, ext4 is currently recommended for use in production environments. +To configure Docker to use the overlay storage driver your Docker host must be +running version 3.18 of the Linux kernel (preferably newer) with the overlay +kernel module loaded. OverlayFS can operate on top of most supported Linux +filesystems. However, ext4 is currently recommended for use in production +environments. -The following procedure shows you how to configure your Docker host to use OverlayFS. The procedure assumes that the Docker daemon is in a stopped state. +The following procedure shows you how to configure your Docker host to use +OverlayFS. The procedure assumes that the Docker daemon is in a stopped state. -> **Caution:** If you have already run the Docker daemon on your Docker host and have images you want to keep, `push` them Docker Hub or your private Docker Trusted Registry before attempting this procedure. +> **Caution:** If you have already run the Docker daemon on your Docker host +> and have images you want to keep, `push` them Docker Hub or your private +> Docker Trusted Registry before attempting this procedure. 1. If it is running, stop the Docker `daemon`. @@ -163,28 +240,60 @@ The following procedure shows you how to configure your Docker host to use Overl Backing Filesystem: extfs - Notice that the *Backing filesystem* in the output above is showing as `extfs`. Multiple backing filesystems are supported but `extfs` (ext4) is recommended for production use cases. + Notice that the *Backing filesystem* in the output above is showing as +`extfs`. Multiple backing filesystems are supported but `extfs` (ext4) is +recommended for production use cases. -Your Docker host is now using the `overlay` storage driver. If you run the `mount` command, you'll find Docker has automatically created the `overlay` mount with the required "lowerdir", "upperdir", "merged" and "workdir" constructs. +Your Docker host is now using the `overlay` storage driver. If you run the +`mount` command, you'll find Docker has automatically created the `overlay` +mount with the required "lowerdir", "upperdir", "merged" and "workdir" +constructs. ## OverlayFS and Docker Performance -As a general rule, the `overlay` driver should be fast. Almost certainly faster than `aufs` and `devicemapper`. In certain circumstances it may also be faster than `btrfs`. That said, there are a few things to be aware of relative to the performance of Docker using the `overlay` storage driver. +As a general rule, the `overlay` driver should be fast. Almost certainly faster + than `aufs` and `devicemapper`. In certain circumstances it may also be faster + than `btrfs`. That said, there are a few things to be aware of relative to the + performance of Docker using the `overlay` storage driver. -- **Page Caching**. OverlayFS supports page cache sharing. This means multiple containers accessing the same file can share a single page cache entry (or entries). This makes the `overlay` driver efficient with memory and a good option for PaaS and other high density use cases. +- **Page Caching**. OverlayFS supports page cache sharing. This means multiple +containers accessing the same file can share a single page cache entry (or +entries). This makes the `overlay` driver efficient with memory and a good +option for PaaS and other high density use cases. -- **copy_up**. As with AUFS, OverlayFS has to perform copy-up operations any time a container writes to a file for the first time. This can insert latency into the write operation — especially if the file being copied up is large. However, once the file has been copied up, all subsequent writes to that file occur without the need for further copy-up operations. +- **copy_up**. As with AUFS, OverlayFS has to perform copy-up operations any +time a container writes to a file for the first time. This can insert latency +into the write operation — especially if the file being copied up is +large. However, once the file has been copied up, all subsequent writes to that + file occur without the need for further copy-up operations. - The OverlayFS copy_up operation should be faster than the same operation with AUFS. This is because AUFS supports more layers than OverlayFS and it is possible to incur far larger latencies if searching through many AUFS layers. + The OverlayFS copy_up operation should be faster than the same operation +with AUFS. This is because AUFS supports more layers than OverlayFS and it is +possible to incur far larger latencies if searching through many AUFS layers. -- **RPMs and Yum**. OverlayFS only implements a subset of the POSIX standards. This can result in certain OverlayFS operations breaking POSIX standards. One such operation is the *copy-up* operation. Therefore, using `yum` inside of a container on a Docker host using the `overlay` storage driver is unlikely to work without implementing workarounds. +- **RPMs and Yum**. OverlayFS only implements a subset of the POSIX standards. +This can result in certain OverlayFS operations breaking POSIX standards. One +such operation is the *copy-up* operation. Therefore, using `yum` inside of a +container on a Docker host using the `overlay` storage driver is unlikely to +work without implementing workarounds. -- **Inode limits**. Use of the `overlay` storage driver can cause excessive inode consumption. This is especially so as the number of images and containers on the Docker host grows. A Docker host with a large number of images and lots of started and stopped containers can quickly run out of inodes. +- **Inode limits**. Use of the `overlay` storage driver can cause excessive +inode consumption. This is especially so as the number of images and containers + on the Docker host grows. A Docker host with a large number of images and lots + of started and stopped containers can quickly run out of inodes. - Unfortunately you can only specify the number of inodes in a filesystem at the time of creation. For this reason, you may wish to consider putting `/var/lib/docker` on a separate device with its own filesystem or manually specifying the number of inodes when creating the filesystem. +Unfortunately you can only specify the number of inodes in a filesystem at the +time of creation. For this reason, you may wish to consider putting +`/var/lib/docker` on a separate device with its own filesystem, or manually +specifying the number of inodes when creating the filesystem. The following generic performance best practices also apply to OverlayFS. -- **Solid State Devices (SSD)**. For best performance it is always a good idea to use fast storage media such as solid state devices (SSD). +- **Solid State Devices (SSD)**. For best performance it is always a good idea +to use fast storage media such as solid state devices (SSD). -- **Use Data Volumes**. Data volumes provide the best and most predictable performance. This is because they bypass the storage driver and do not incur any of the potential overheads introduced by thin provisioning and copy-on-write. For this reason, you may want to place heavy write workloads on data volumes. +- **Use Data Volumes**. Data volumes provide the best and most predictable +performance. This is because they bypass the storage driver and do not incur +any of the potential overheads introduced by thin provisioning and +copy-on-write. For this reason, you should place heavy write workloads on data +volumes. diff --git a/docs/userguide/storagedriver/selectadriver.md b/docs/userguide/storagedriver/selectadriver.md index a45b517c13..8cc9b355ba 100644 --- a/docs/userguide/storagedriver/selectadriver.md +++ b/docs/userguide/storagedriver/selectadriver.md @@ -4,7 +4,7 @@ title = "Select a storage driver" description = "Learn how select the proper storage driver for your container." keywords = ["container, storage, driver, AUFS, btfs, devicemapper,zvfs"] [menu.main] -parent = "mn_storage_docker" +parent = "engine_driver" weight = -1 +++ @@ -12,15 +12,27 @@ weight = -1 # Select a storage driver This page describes Docker's storage driver feature. It lists the storage -driver's that Docker supports and the basic commands associated with managing them. Finally, this page provides guidance on choosing a storage driver. +driver's that Docker supports and the basic commands associated with managing +them. Finally, this page provides guidance on choosing a storage driver. -The material on this page is intended for readers who already have an [understanding of the storage driver technology](imagesandcontainers.md). +The material on this page is intended for readers who already have an +[understanding of the storage driver technology](imagesandcontainers.md). ## A pluggable storage driver architecture -The Docker has a pluggable storage driver architecture. This gives you the flexibility to "plug in" the storage driver is best for your environment and use-case. Each Docker storage driver is based on a Linux filesystem or volume manager. Further, each storage driver is free to implement the management of image layers and the container layer in it's own unique way. This means some storage drivers perform better than others in different circumstances. +Docker has a pluggable storage driver architecture. This gives you the +flexibility to "plug in" the storage driver that is best for your environment +and use-case. Each Docker storage driver is based on a Linux filesystem or +volume manager. Further, each storage driver is free to implement the +management of image layers and the container layer in its own unique way. This +means some storage drivers perform better than others in different +circumstances. -Once you decide which driver is best, you set this driver on the Docker daemon at start time. As a result, the Docker daemon can only run one storage driver, and all containers created by that daemon instance use the same storage driver. The table below shows the supported storage driver technologies and their driver names: +Once you decide which driver is best, you set this driver on the Docker daemon +at start time. As a result, the Docker daemon can only run one storage driver, +and all containers created by that daemon instance use the same storage driver. + The table below shows the supported storage driver technologies and their +driver names: |Technology |Storage driver name | |--------------|---------------------| @@ -31,7 +43,8 @@ Once you decide which driver is best, you set this driver on the Docker daemon a |VFS* |`vfs` | |ZFS |`zfs` | -To find out which storage driver is set on the daemon , you use the `docker info` command: +To find out which storage driver is set on the daemon , you use the +`docker info` command: $ docker info Containers: 0 @@ -44,9 +57,19 @@ To find out which storage driver is set on the daemon , you use the `docker info Operating System: Ubuntu 15.04 ... output truncated ... -The `info` subcommand reveals that the Docker daemon is using the `overlay` storage driver with a `Backing Filesystem` value of `extfs`. The `extfs` value means that the `overlay` storage driver is operating on top of an existing (ext) filesystem. The backing filesystem refers to the filesystem that was used to create the Docker host's local storage area under `/var/lib/docker`. +The `info` subcommand reveals that the Docker daemon is using the `overlay` +storage driver with a `Backing Filesystem` value of `extfs`. The `extfs` value +means that the `overlay` storage driver is operating on top of an existing +(ext) filesystem. The backing filesystem refers to the filesystem that was used + to create the Docker host's local storage area under `/var/lib/docker`. -Which storage driver you use, in part, depends on the backing filesystem you plan to use for your Docker host's local storage area. Some storage drivers can operate on top of different backing filesystems. However, other storage drivers require the backing filesystem to be the same as the storage driver. For example, the `btrfs` storage driver on a `btrfs` backing filesystem. The following table lists each storage driver and whether it must match the host's backing file system: +Which storage driver you use, in part, depends on the backing filesystem you +plan to use for your Docker host's local storage area. Some storage drivers can + operate on top of different backing filesystems. However, other storage +drivers require the backing filesystem to be the same as the storage driver. +For example, the `btrfs` storage driver on a Btrfs backing filesystem. The +following table lists each storage driver and whether it must match the host's +backing file system: |Storage driver |Must match backing filesystem | |---------------|------------------------------| @@ -58,9 +81,12 @@ Which storage driver you use, in part, depends on the backing filesystem you pla |zfs |Yes | -You can set the storage driver by passing the `--storage-driver=` option to the `docker daemon` command line or by setting the option on the `DOCKER_OPTS` line in `/etc/default/docker` file. +You can set the storage driver by passing the `--storage-driver=` option +to the `docker daemon` command line, or by setting the option on the +`DOCKER_OPTS` line in the `/etc/default/docker` file. -The following command shows how to start the Docker daemon with the `devicemapper` storage driver using the `docker daemon` command: +The following command shows how to start the Docker daemon with the +`devicemapper` storage driver using the `docker daemon` command: $ docker daemon --storage-driver=devicemapper & @@ -90,25 +116,82 @@ The following command shows how to start the Docker daemon with the `devicemappe Operating System: Ubuntu 15.04 -Your choice of storage driver can affect the performance of your containerized applications. So it's important to understand the different storage driver options available and select the right one for your application. Later, in this page you'll find some advice for choosing an appropriate driver. +Your choice of storage driver can affect the performance of your containerized +applications. So it's important to understand the different storage driver +options available and select the right one for your application. Later, in this + page you'll find some advice for choosing an appropriate driver. ## Shared storage systems and the storage driver -Many enterprises consume storage from shared storage systems such as SAN and NAS arrays. These often provide increased performance and availability, as well as advanced features such as thin provisioning, deduplication and compression. +Many enterprises consume storage from shared storage systems such as SAN and +NAS arrays. These often provide increased performance and availability, as well + as advanced features such as thin provisioning, deduplication and compression. -The Docker storage driver and data volumes can both operate on top of storage provided by shared storage systems. This allows Docker to leverage the increased performance and availability these systems provide. However, Docker does not integrate with these underlying systems. +The Docker storage driver and data volumes can both operate on top of storage +provided by shared storage systems. This allows Docker to leverage the +increased performance and availability these systems provide. However, Docker +does not integrate with these underlying systems. -Remember that each Docker storage driver is based on a Linux filesystem or volume manager. Be sure to follow existing best practices for operating your storage driver (filesystem or volume manager) on top of your shared storage system. For example, if using the ZFS storage driver on top of *XYZ* shared storage system, be sure to follow best practices for operating ZFS filesystems on top of XYZ shared storage system. +Remember that each Docker storage driver is based on a Linux filesystem or +volume manager. Be sure to follow existing best practices for operating your +storage driver (filesystem or volume manager) on top of your shared storage +system. For example, if using the ZFS storage driver on top of *XYZ* shared +storage system, be sure to follow best practices for operating ZFS filesystems +on top of XYZ shared storage system. ## Which storage driver should you choose? -As you might expect, the answer to this question is "it depends". While there are some clear cases where one particular storage driver outperforms other for certain workloads, you should factor all of the following into your decision: +Several factors influence the selection of a storage driver. However, these two + facts must be kept in mind: -Choose a storage driver that you and your team/organization are comfortable with. Consider how much experience you have with a particular storage driver. There is no substitute for experience and it is rarely a good idea to try something brand new in production. That's what labs and laptops are for! +1. No single driver is well suited to every use-case +2. Storage drivers are improving and evolving all of the time -If your Docker infrastructure is under support contracts, choose an option that will get you good support. You probably don't want to go with a solution that your support partners have little or no experience with. +With these factors in mind, the following points, coupled with the table below, + should provide some guidance. -Whichever driver you choose, make sure it has strong community support and momentum. This is important because storage driver development in the Docker project relies on the community as much as the Docker staff to thrive. +### Stability +For the most stable and hassle-free Docker experience, you should consider the +following: + +- **Use the default storage driver for your distribution**. When Docker +installs, it chooses a default storage driver based on the configuration of +your system. Stability is an important factor influencing which storage driver +is used by default. Straying from this default may increase your chances of +encountering bugs and nuances. +- **Follow the configuration specified on the CS Engine +[compatibility matrix](https://www.docker.com/compatibility-maintenance)**. The + CS Engine is the commercially supported version of the Docker Engine. It's +code-base is identical to the open source Engine, but it has a limited set of +supported configurations. These *supported configurations* use the most stable +and mature storage drivers. Straying from these configurations may also +increase your chances of encountering bugs and nuances. + +### Experience and expertise + +Choose a storage driver that you and your team/organization have experience +with. For example, if you use RHEL or one of its downstream forks, you may +already have experience with LVM and Device Mapper. If so, you may wish to use +the `devicemapper` driver. + +If you do not feel you have expertise with any of the storage drivers supported + by Docker, and you want an easy-to-use stable Docker experience, you should +consider using the default driver installed by your distribution's Docker +package. + +### Future-proofing + +Many people consider OverlayFS as the future of the Docker storage driver. +However, it is less mature, and potentially less stable than some of the more +mature drivers such as `aufs` and `devicemapper`. For this reason, you should +use the OverlayFS driver with caution and expect to encounter more bugs and +nuances than if you were using a more mature driver. + +The following diagram lists each storage driver and provides insight into some +of their pros and cons. When selecting which storage driver to use, consider +the guidance offered by the table below along with the points mentioned above. + +![](images/driver-pros-cons.png) ## Related information diff --git a/docs/userguide/storagedriver/zfs-driver.md b/docs/userguide/storagedriver/zfs-driver.md index 33ac83194e..3ecbb300d0 100644 --- a/docs/userguide/storagedriver/zfs-driver.md +++ b/docs/userguide/storagedriver/zfs-driver.md @@ -4,19 +4,30 @@ title = "ZFS storage in practice" description = "Learn how to optimize your use of ZFS driver." keywords = ["container, storage, driver, ZFS "] [menu.main] -parent = "mn_storage_docker" +parent = "engine_driver" +++ # Docker and ZFS in practice -ZFS is a next generation filesystem that supports many advanced storage technologies such as volume management, snapshots, checksumming, compression and deduplication, replication and more. +ZFS is a next generation filesystem that supports many advanced storage +technologies such as volume management, snapshots, checksumming, compression +and deduplication, replication and more. -It was created by Sun Microsystems (now Oracle Corporation) and is open sourced under the CDDL license. Due to licensing incompatibilities between the CDDL and GPL, ZFS cannot be shipped as part of the mainline Linux kernel. However, the ZFS On Linux (ZoL) project provides an out-of-tree kernel module and userspace tools which can be installed separately. +It was created by Sun Microsystems (now Oracle Corporation) and is open sourced + under the CDDL license. Due to licensing incompatibilities between the CDDL +and GPL, ZFS cannot be shipped as part of the mainline Linux kernel. However, +the ZFS On Linux (ZoL) project provides an out-of-tree kernel module and +userspace tools which can be installed separately. -The ZFS on Linux (ZoL) port is healthy and maturing. However, at this point in time it is not recommended to use the `zfs` Docker storage driver for production use unless you have substantial experience with ZFS on Linux. +The ZFS on Linux (ZoL) port is healthy and maturing. However, at this point in +time it is not recommended to use the `zfs` Docker storage driver for +production use unless you have substantial experience with ZFS on Linux. -> **Note:** There is also a FUSE implementation of ZFS on the Linux platform. This should work with Docker but is not recommended. The native ZFS driver (ZoL) is more tested, more performant, and is more widely used. The remainder of this document will relate to the native ZoL port. +> **Note:** There is also a FUSE implementation of ZFS on the Linux platform. +> This should work with Docker but is not recommended. The native ZFS driver +> (ZoL) is more tested, more performant, and is more widely used. The remainder +> of this document will relate to the native ZoL port. ## Image layering and sharing with ZFS @@ -27,53 +38,96 @@ The Docker `zfs` storage driver makes extensive use of three ZFS datasets: - snapshots - clones -ZFS filesystems are thinly provisioned and have space allocated to them from a ZFS pool (zpool) via allocate on demand operations. Snapshots and clones are space-efficient point-in-time copies of ZFS filesystems. Snapshots are read-only. Clones are read-write. Clones can only be created from snapshots. This simple relationship is shown in the diagram below. +ZFS filesystems are thinly provisioned and have space allocated to them from a +ZFS pool (zpool) via allocate on demand operations. Snapshots and clones are +space-efficient point-in-time copies of ZFS filesystems. Snapshots are +read-only. Clones are read-write. Clones can only be created from snapshots. +This simple relationship is shown in the diagram below. ![](images/zfs_clones.jpg) -The solid line in the diagram shows the process flow for creating a clone. Step 1 creates a snapshot of the filesystem, and step two creates the clone from the snapshot. The dashed line shows the relationship between the clone and the filesystem, via the snapshot. All three ZFS datasets draw space form the same underlying zpool. +The solid line in the diagram shows the process flow for creating a clone. Step + 1 creates a snapshot of the filesystem, and step two creates the clone from +the snapshot. The dashed line shows the relationship between the clone and the +filesystem, via the snapshot. All three ZFS datasets draw space form the same +underlying zpool. -On Docker hosts using the `zfs` storage driver, the base layer of an image is a ZFS filesystem. Each child layer is a ZFS clone based on a ZFS snapshot of the layer below it. A container is a ZFS clone based on a ZFS Snapshot of the top layer of the image it's created from. All ZFS datasets draw their space from a common zpool. The diagram below shows how this is put together with a running container based on a two-layer image. +On Docker hosts using the `zfs` storage driver, the base layer of an image is a + ZFS filesystem. Each child layer is a ZFS clone based on a ZFS snapshot of the + layer below it. A container is a ZFS clone based on a ZFS Snapshot of the top +layer of the image it's created from. All ZFS datasets draw their space from a +common zpool. The diagram below shows how this is put together with a running +container based on a two-layer image. ![](images/zfs_zpool.jpg) -The following process explains how images are layered and containers created. The process is based on the diagram above. +The following process explains how images are layered and containers created. +The process is based on the diagram above. 1. The base layer of the image exists on the Docker host as a ZFS filesystem. - This filesystem consumes space from the zpool used to create the Docker host's local storage area at `/var/lib/docker`. + This filesystem consumes space from the zpool used to create the Docker +host's local storage area at `/var/lib/docker`. -2. Additional image layers are clones of the dataset hosting the image layer directly below it. +2. Additional image layers are clones of the dataset hosting the image layer +directly below it. - In the diagram, "Layer 1" is added by making a ZFS snapshot of the base layer and then creating a clone from that snapshot. The clone is writable and consumes space on-demand from the zpool. The snapshot is read-only, maintaining the base layer as an immutable object. + In the diagram, "Layer 1" is added by making a ZFS snapshot of the base +layer and then creating a clone from that snapshot. The clone is writable and +consumes space on-demand from the zpool. The snapshot is read-only, maintaining + the base layer as an immutable object. 3. When the container is launched, a read-write layer is added above the image. - In the diagram above, the container's read-write layer is created by making a snapshot of the top layer of the image (Layer 1) and creating a clone from that snapshot. + In the diagram above, the container's read-write layer is created by making + a snapshot of the top layer of the image (Layer 1) and creating a clone from +that snapshot. - As changes are made to the container, space is allocated to it from the zpool via allocate-on-demand operations. By default, ZFS will allocate space in blocks of 128K. + As changes are made to the container, space is allocated to it from the +zpool via allocate-on-demand operations. By default, ZFS will allocate space in + blocks of 128K. -This process of creating child layers and containers from *read-only* snapshots allows images to be maintained as immutable objects. +This process of creating child layers and containers from *read-only* snapshots + allows images to be maintained as immutable objects. ## Container reads and writes with ZFS -Container reads with the `zfs` storage driver are very simple. A newly launched container is based on a ZFS clone. This clone initially shares all of its data with the dataset it was created from. This means that read operations with the `zfs` storage driver are fast – even if the data being read was copied into the container yet. This sharing of data blocks is shown in the diagram below. +Container reads with the `zfs` storage driver are very simple. A newly launched + container is based on a ZFS clone. This clone initially shares all of its data + with the dataset it was created from. This means that read operations with the + `zfs` storage driver are fast – even if the data being read was note +copied into the container yet. This sharing of data blocks is shown in the +diagram below. ![](images/zpool_blocks.jpg) -Writing new data to a container is accomplished via an allocate-on-demand operation. Every time a new area of the container needs writing to, a new block is allocated from the zpool. This means that containers consume additional space as new data is written to them. New space is allocated to the container (ZFS Clone) from the underlying zpool. +Writing new data to a container is accomplished via an allocate-on-demand +operation. Every time a new area of the container needs writing to, a new block + is allocated from the zpool. This means that containers consume additional +space as new data is written to them. New space is allocated to the container +(ZFS Clone) from the underlying zpool. -Updating *existing data* in a container is accomplished by allocating new blocks to the containers clone and storing the changed data in those new blocks. The original are unchanged, allowing the underlying image dataset to remain immutable. This is the same as writing to a normal ZFS filesystem and is an implementation of copy-on-write semantics. +Updating *existing data* in a container is accomplished by allocating new +blocks to the containers clone and storing the changed data in those new +blocks. The original blocks are unchanged, allowing the underlying image +dataset to remain immutable. This is the same as writing to a normal ZFS +filesystem and is an implementation of copy-on-write semantics. ## Configure Docker with the ZFS storage driver -The `zfs` storage driver is only supported on a Docker host where `/var/lib/docker` is mounted as a ZFS filesystem. This section shows you how to install and configure native ZFS on Linux (ZoL) on an Ubuntu 14.04 system. +The `zfs` storage driver is only supported on a Docker host where +`/var/lib/docker` is mounted as a ZFS filesystem. This section shows you how to + install and configure native ZFS on Linux (ZoL) on an Ubuntu 14.04 system. ### Prerequisites -If you have already used the Docker daemon on your Docker host and have images you want to keep, `push` them Docker Hub or your private Docker Trusted Registry before attempting this procedure. +If you have already used the Docker daemon on your Docker host and have images +you want to keep, `push` them Docker Hub or your private Docker Trusted +Registry before attempting this procedure. -Stop the Docker daemon. Then, ensure that you have a spare block device at `/dev/xvdb`. The device identifier may be be different in your environment and you should substitute your own values throughout the procedure. +Stop the Docker daemon. Then, ensure that you have a spare block device at +`/dev/xvdb`. The device identifier may be be different in your environment and +you should substitute your own values throughout the procedure. ### Install Zfs on Ubuntu 14.04 LTS @@ -98,7 +152,8 @@ Stop the Docker daemon. Then, ensure that you have a spare block device at `/dev gpg: imported: 1 (RSA: 1) OK -3. Get the latest package lists for all registered repositories and package archives. +3. Get the latest package lists for all registered repositories and package +archives. $ sudo apt-get update Ign http://us-west-2.ec2.archive.ubuntu.com trusty InRelease @@ -156,7 +211,8 @@ Once ZFS is installed and loaded, you're ready to configure ZFS for Docker. zpool-docker 93.5K 3.84G 19K /zpool-docker zpool-docker/docker 19K 3.84G 19K /var/lib/docker - Now that you have a ZFS filesystem mounted to `/var/lib/docker`, the daemon should automatically load with the `zfs` storage driver. + Now that you have a ZFS filesystem mounted to `/var/lib/docker`, the daemon + should automatically load with the `zfs` storage driver. 5. Start the Docker daemon. @@ -165,9 +221,9 @@ Once ZFS is installed and loaded, you're ready to configure ZFS for Docker. The procedure for starting the Docker daemon may differ depending on the Linux distribution you are using. It is possible to force the Docker daemon - to start with the `zfs` storage driver by passing the `--storage-driver=zfs` - flag to the `docker daemon` command, or to the `DOCKER_OPTS` line in the - Docker config file. + to start with the `zfs` storage driver by passing the + `--storage-driver=zfs`flag to the `docker daemon` command, or to the + `DOCKER_OPTS` line in the Docker config file. 6. Verify that the daemon is using the `zfs` storage driver. @@ -186,33 +242,55 @@ Once ZFS is installed and loaded, you're ready to configure ZFS for Docker. [...] The output of the command above shows that the Docker daemon is using the - `zfs` storage driver and that the parent dataset is the `zpool-docker/docker` - filesystem created earlier. + `zfs` storage driver and that the parent dataset is the + `zpool-docker/docker` filesystem created earlier. Your Docker host is now using ZFS to store to manage its images and containers. ## ZFS and Docker performance -There are several factors that influence the performance of Docker using the `zfs` storage driver. +There are several factors that influence the performance of Docker using the +`zfs` storage driver. -- **Memory**. Memory has a major impact on ZFS performance. This goes back to the fact that ZFS was originally designed for use on big Sun Solaris servers with large amounts of memory. Keep this in mind when sizing your Docker hosts. +- **Memory**. Memory has a major impact on ZFS performance. This goes back to +the fact that ZFS was originally designed for use on big Sun Solaris servers +with large amounts of memory. Keep this in mind when sizing your Docker hosts. -- **ZFS Features**. Using ZFS features, such as deduplication, can significantly increase the amount -of memory ZFS uses. For memory consumption and performance reasons it is -recommended to turn off ZFS deduplication. However, deduplication at other -layers in the stack (such as SAN or NAS arrays) can still be used as these do -not impact ZFS memory usage and performance. If using SAN, NAS or other hardware -RAID technologies you should continue to follow existing best practices for -using them with ZFS. +- **ZFS Features**. Using ZFS features, such as deduplication, can +significantly increase the amount of memory ZFS uses. For memory consumption +and performance reasons it is recommended to turn off ZFS deduplication. +However, deduplication at other layers in the stack (such as SAN or NAS arrays) + can still be used as these do not impact ZFS memory usage and performance. If +using SAN, NAS or other hardware RAID technologies you should continue to +follow existing best practices for using them with ZFS. -* **ZFS Caching**. ZFS caches disk blocks in a memory structure called the adaptive replacement cache (ARC). The *Single Copy ARC* feature of ZFS allows a single cached copy of a block to be shared by multiple clones of a filesystem. This means that multiple running containers can share a single copy of cached block. This means that ZFS is a good option for PaaS and other high density use cases. +- **ZFS Caching**. ZFS caches disk blocks in a memory structure called the +adaptive replacement cache (ARC). The *Single Copy ARC* feature of ZFS allows a + single cached copy of a block to be shared by multiple clones of a filesystem. + This means that multiple running containers can share a single copy of cached +block. This means that ZFS is a good option for PaaS and other high density use + cases. -- **Fragmentation**. Fragmentation is a natural byproduct of copy-on-write filesystems like ZFS. However, ZFS writes in 128K blocks and allocates *slabs* (multiple 128K blocks) to CoW operations in an attempt to reduce fragmentation. The ZFS intent log (ZIL) and the coalescing of writes (delayed writes) also help to reduce fragmentation. +- **Fragmentation**. Fragmentation is a natural byproduct of copy-on-write +filesystems like ZFS. However, ZFS writes in 128K blocks and allocates *slabs* +(multiple 128K blocks) to CoW operations in an attempt to reduce fragmentation. + The ZFS intent log (ZIL) and the coalescing of writes (delayed writes) also +help to reduce fragmentation. -- **Use the native ZFS driver for Linux**. Although the Docker `zfs` storage driver supports the ZFS FUSE implementation, it is not recommended when high performance is required. The native ZFS on Linux driver tends to perform better than the FUSE implementation. +- **Use the native ZFS driver for Linux**. Although the Docker `zfs` storage +driver supports the ZFS FUSE implementation, it is not recommended when high +performance is required. The native ZFS on Linux driver tends to perform better + than the FUSE implementation. The following generic performance best practices also apply to ZFS. -- **Use of SSD**. For best performance it is always a good idea to use fast storage media such as solid state devices (SSD). However, if you only have a limited amount of SSD storage available it is recommended to place the ZIL on SSD. +- **Use of SSD**. For best performance it is always a good idea to use fast +storage media such as solid state devices (SSD). However, if you only have a +limited amount of SSD storage available it is recommended to place the ZIL on +SSD. -- **Use Data Volumes**. Data volumes provide the best and most predictable performance. This is because they bypass the storage driver and do not incur any of the potential overheads introduced by thin provisioning and copy-on-write. For this reason, you may want to place heavy write workloads on data volumes. +- **Use Data Volumes**. Data volumes provide the best and most predictable +performance. This is because they bypass the storage driver and do not incur +any of the potential overheads introduced by thin provisioning and +copy-on-write. For this reason, you should place heavy write workloads on data +volumes. diff --git a/errors/daemon.go b/errors/daemon.go index 278e712275..78c4422ab1 100644 --- a/errors/daemon.go +++ b/errors/daemon.go @@ -733,6 +733,15 @@ var ( HTTPStatusCode: http.StatusConflict, }) + // ErrorCodeExecRestarting is generated when we try to start an exec + // but the container is restarting. + ErrorCodeExecRestarting = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "EXECRESTARTING", + Message: "Container %s is restarting, wait until the container is running", + Description: "An attempt to start an 'exec' was made, but the owning container is restarting", + HTTPStatusCode: http.StatusConflict, + }) + // ErrorCodeExecRunning is generated when we try to start an exec // but its already running. ErrorCodeExecRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ @@ -742,6 +751,15 @@ var ( HTTPStatusCode: http.StatusInternalServerError, }) + // ErrorCodeExecExited is generated when we try to start an exec + // but its already running. + ErrorCodeExecExited = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "EXECEXITED", + Message: "Error: Exec command %s has already run", + Description: "An attempt to start an 'exec' was made, but 'exec' was already run", + HTTPStatusCode: http.StatusConflict, + }) + // ErrorCodeExecCantRun is generated when we try to start an exec // but it failed for some reason. ErrorCodeExecCantRun = errcode.Register(errGroup, errcode.ErrorDescriptor{ @@ -957,4 +975,13 @@ var ( Description: "Engine's predefined networks cannot be deleted", HTTPStatusCode: http.StatusForbidden, }) + + // ErrorCodeMultipleNetworkConnect is generated when more than one network is passed + // when creating a container + ErrorCodeMultipleNetworkConnect = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "CANNOT_CONNECT_TO_MULTIPLE_NETWORKS", + Message: "Container cannot be connected to %s", + Description: "A container can only be connected to one network at the time", + HTTPStatusCode: http.StatusBadRequest, + }) ) diff --git a/hack/install.sh b/hack/install.sh index eae3b13ad0..79ac0add6c 100755 --- a/hack/install.sh +++ b/hack/install.sh @@ -105,6 +105,14 @@ rpm_import_repository_key() { rm -rf "$tmpdir" } +semverParse() { + major="${1%%.*}" + minor="${1#$major.}" + minor="${minor%%.*}" + patch="${1#$major.$minor.}" + patch="${patch%%[-.]*}" +} + do_install() { case "$(uname -m)" in *64) @@ -119,6 +127,21 @@ do_install() { esac if command_exists docker; then + version="$(docker -v | awk -F '[ ,]+' '{ print $3 }')" + MAJOR_W=1 + MINOR_W=10 + + semverParse $version + + shouldWarn=0 + if [ $major -lt $MAJOR_W ]; then + shouldWarn=1 + fi + + if [ $major -le $MAJOR_W ] && [ $minor -lt $MINOR_W ]; then + shouldWarn=1 + fi + cat >&2 <<-'EOF' Warning: the "docker" command appears to already exist on this system. @@ -127,7 +150,23 @@ do_install() { installation. If you installed the current Docker package using this script and are using it + EOF + + if [ $shouldWarn -eq 1 ]; then + cat >&2 <<-'EOF' + again to update Docker, we urge you to migrate your image store before upgrading + to v1.10+. + + You can find instructions for this here: + https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration + EOF + else + cat >&2 <<-'EOF' again to update Docker, you can safely ignore this message. + EOF + fi + + cat >&2 <<-'EOF' You may press Ctrl+C now to abort this script. EOF diff --git a/hack/make/.build-deb/docker-engine.install b/hack/make/.build-deb/docker-engine.install index 8e0739e217..dc6b25f04f 100644 --- a/hack/make/.build-deb/docker-engine.install +++ b/hack/make/.build-deb/docker-engine.install @@ -10,4 +10,3 @@ contrib/init/systemd/docker.socket lib/systemd/system/ contrib/mk* usr/share/docker-engine/contrib/ contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/ contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/ -contrib/apparmor/docker-engine etc/apparmor.d/ diff --git a/hack/make/.build-rpm/docker-engine.spec b/hack/make/.build-rpm/docker-engine.spec index 9fa8984e38..a6f3a74bec 100644 --- a/hack/make/.build-rpm/docker-engine.spec +++ b/hack/make/.build-rpm/docker-engine.spec @@ -17,7 +17,7 @@ Packager: Docker %global debug_package %{nil} # is_systemd conditional -%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1300 +%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1210 %global is_systemd 1 %endif @@ -25,9 +25,12 @@ Packager: Docker # most are already in the container (see contrib/builder/rpm/generate.sh) # only require systemd on those systems %if 0%{?is_systemd} +%if 0%{?suse_version} >= 1210 +BuildRequires: systemd-rpm-macros +%{?systemd_requires} +%else BuildRequires: pkgconfig(systemd) Requires: systemd-units -%if !0%{?suse_version} BuildRequires: pkgconfig(libsystemd-journal) %endif %else @@ -40,16 +43,20 @@ Requires(preun): initscripts # required packages on install Requires: /bin/sh Requires: iptables +%if !0%{?suse_version} Requires: libcgroup +%else +Requires: libcgroup1 +%endif Requires: tar Requires: xz %if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 # Resolves: rhbz#1165615 Requires: device-mapper-libs >= 1.02.90-1 %endif -%if 0%{?oraclelinux} == 6 -# Require Oracle Unbreakable Enterprise Kernel R3 and newer device-mapper -Requires: kernel-uek >= 3.8 +%if 0%{?oraclelinux} >= 6 +# Require Oracle Unbreakable Enterprise Kernel R4 and newer device-mapper +Requires: kernel-uek >= 4.1 Requires: device-mapper >= 1.02.90-2 %endif @@ -105,7 +112,7 @@ for deploying and scaling web apps, databases, and backend services without depending on a particular stack or provider. %prep -%if 0%{?centos} <= 6 +%if 0%{?centos} <= 6 || 0%{?oraclelinux} <=6 %setup -n %{name} %else %autosetup -n %{name} diff --git a/hack/make/build-deb b/hack/make/build-deb index b8fd7b3585..d9e44d23f6 100644 --- a/hack/make/build-deb +++ b/hack/make/build-deb @@ -59,8 +59,6 @@ set -e echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build" fi cat >> "$DEST/$version/Dockerfile.build" <<-EOF - RUN GOPATH=/go go build -o aagen contrib/apparmor/*.go \ - && ./aagen contrib/apparmor/docker-engine RUN ln -sfv hack/make/.build-deb debian RUN { echo '$debSource (${debVersion}-0~${suite}) $suite; urgency=low'; echo; echo ' * Version: $VERSION'; echo; echo " -- $debMaintainer $debDate"; } > debian/changelog && cat >&2 debian/changelog RUN dpkg-buildpackage -uc -us diff --git a/hack/make/build-rpm b/hack/make/build-rpm index 36abc6f29b..fed3827ed2 100644 --- a/hack/make/build-rpm +++ b/hack/make/build-rpm @@ -10,17 +10,20 @@ set -e # TODO consider using frozen images for the dockercore/builder-rpm tags rpmName=docker-engine - rpmVersion="${VERSION%%-*}" + rpmVersion="$VERSION" rpmRelease=1 # rpmRelease versioning is as follows # Docker 1.7.0: version=1.7.0, release=1 # Docker 1.7.0-rc1: version=1.7.0, release=0.1.rc1 + # Docker 1.7.0-cs1: version=1.7.0.cs1, release=1 + # Docker 1.7.0-cs1-rc1: version=1.7.0.cs1, release=0.1.rc1 # Docker 1.7.0-dev nightly: version=1.7.0, release=0.0.YYYYMMDD.HHMMSS.gitHASH # if we have a "-rc*" suffix, set appropriate release - if [[ "$VERSION" == *-rc* ]]; then - rcVersion=${VERSION#*-rc} + if [[ "$rpmVersion" =~ .*-rc[0-9]+$ ]] ; then + rcVersion=${rpmVersion#*-rc} + rpmVersion=${rpmVersion%-rc*} rpmRelease="0.${rcVersion}.rc${rcVersion}" fi @@ -30,15 +33,19 @@ set -e fi # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better - if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + if [[ "$rpmVersion" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then gitUnix="$(git log -1 --pretty='%at')" gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')" gitCommit="$(git log -1 --pretty='%h')" gitVersion="${gitDate}.git${gitCommit}" # gitVersion is now something like '20150128.112847.17e840a' + rpmVersion="${rpmVersion%-dev}" rpmRelease="0.0.$gitVersion" fi + # Replace any other dashes with periods + rpmVersion="${rpmVersion/-/.}" + rpmPackager="$(awk -F ': ' '$1 == "Packager" { print $2; exit }' hack/make/.build-rpm/${rpmName}.spec)" rpmDate="$(date +'%a %b %d %Y')" @@ -87,6 +94,7 @@ set -e RUN tar -cz -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar.gz ${rpmName} RUN { cat /usr/src/${rpmName}/contrib/builder/rpm/changelog; } >> ${rpmName}.spec && tail >&2 ${rpmName}.spec RUN rpmbuild -ba \ + --define '_gitcommit $DOCKER_GITCOMMIT' \ --define '_release $rpmRelease' \ --define '_version $rpmVersion' \ --define '_origversion $VERSION' \ diff --git a/hack/vendor.sh b/hack/vendor.sh index 091c9775ec..e2936bc337 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -22,12 +22,12 @@ clone git github.com/vdemeester/shakers 3c10293ce22b900c27acad7b28656196fcc2f73b clone git golang.org/x/net 47990a1ba55743e6ef1affd3a14e5bac8553615d https://github.com/golang/net.git clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3 clone git github.com/docker/go-connections v0.1.2 -clone git github.com/docker/engine-api v0.2.2 +clone git github.com/docker/engine-api v0.2.3 clone git github.com/RackSec/srslog 6eb773f331e46fbba8eecb8e794e635e75fc04de clone git github.com/imdario/mergo 0.2.1 #get libnetwork packages -clone git github.com/docker/libnetwork v0.5.6 +clone git github.com/docker/libnetwork v0.6.0-rc7 clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec clone git github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b clone git github.com/hashicorp/memberlist 9a1e242e454d2443df330bdd51a436d5a9058fc4 @@ -43,21 +43,21 @@ fix_rewritten_imports github.com/coreos/etcd clone git github.com/ugorji/go 5abd4e96a45c386928ed2ca2a7ef63e2533e18ec clone git github.com/hashicorp/consul v0.5.2 clone git github.com/boltdb/bolt v1.1.0 -clone git github.com/miekg/dns d27455715200c7d3e321a1e5cadb27c9ee0b0f02 +clone git github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7 # get graph and distribution packages -clone git github.com/docker/distribution cb08de17d74bef86ce6c5abe8b240e282f5750be +clone git github.com/docker/distribution c301f8ab27f4913c968b8d73a38e5dda79b9d3d7 clone git github.com/vbatts/tar-split v0.9.11 # get desired notary commit, might also need to be updated in Dockerfile -clone git github.com/docker/notary docker-v1.10-3 +clone git github.com/docker/notary docker-v1.10-5 clone git google.golang.org/grpc 174192fc93efcb188fc8f46ca447f0da606b6885 https://github.com/grpc/grpc-go.git clone git github.com/miekg/pkcs11 80f102b5cac759de406949c47f0928b99bd64cdf -clone git github.com/jfrazelle/go v1.5.1-1 +clone git github.com/docker/go v1.5.1-1-1-gbaf439e clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c -clone git github.com/opencontainers/runc d97d5e8b007e4657316eed76ea30bc0f690230cf # libcontainer +clone git github.com/opencontainers/runc 3d8a20bb772defc28c355534d83486416d1719b4 # libcontainer clone git github.com/seccomp/libseccomp-golang 1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1 # libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json) clone git github.com/coreos/go-systemd v4 diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go index ca5de07540..bdded2c5b8 100644 --- a/integration-cli/docker_api_containers_test.go +++ b/integration-cli/docker_api_containers_test.go @@ -20,6 +20,7 @@ import ( "github.com/docker/docker/pkg/stringid" "github.com/docker/engine-api/types" containertypes "github.com/docker/engine-api/types/container" + networktypes "github.com/docker/engine-api/types/network" "github.com/go-check/check" ) @@ -604,6 +605,29 @@ func (s *DockerSuite) TestContainerApiCreateEmptyConfig(c *check.C) { c.Assert(string(b), checker.Equals, expected) } +func (s *DockerSuite) TestContainerApiCreateMultipleNetworksConfig(c *check.C) { + // Container creation must fail if client specified configurations for more than one network + config := map[string]interface{}{ + "Image": "busybox", + "NetworkingConfig": networktypes.NetworkingConfig{ + EndpointsConfig: map[string]*networktypes.EndpointSettings{ + "net1": {}, + "net2": {}, + "net3": {}, + }, + }, + } + + status, b, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + // network name order in error message is not deterministic + c.Assert(string(b), checker.Contains, "Container cannot be connected to [") + c.Assert(string(b), checker.Contains, "net1") + c.Assert(string(b), checker.Contains, "net2") + c.Assert(string(b), checker.Contains, "net3") +} + func (s *DockerSuite) TestContainerApiCreateWithHostName(c *check.C) { testRequires(c, DaemonIsLinux) hostName := "test-host" diff --git a/integration-cli/docker_api_create_test.go b/integration-cli/docker_api_create_test.go index bfa11ff0ff..d29b35501b 100644 --- a/integration-cli/docker_api_create_test.go +++ b/integration-cli/docker_api_create_test.go @@ -32,4 +32,14 @@ func (s *DockerSuite) TestApiCreateWithNotExistImage(c *check.C) { expected = "No such image: test456:latest" c.Assert(strings.TrimSpace(string(resp)), checker.Equals, expected) + config3 := map[string]interface{}{ + "Image": "sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa", + } + + status, resp, err = sockRequest("POST", "/containers/create?name="+name, config3) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected = "No such image: sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa" + c.Assert(strings.TrimSpace(string(resp)), checker.Equals, expected) + } diff --git a/integration-cli/docker_api_exec_test.go b/integration-cli/docker_api_exec_test.go index de504751de..c6ed847e24 100644 --- a/integration-cli/docker_api_exec_test.go +++ b/integration-cli/docker_api_exec_test.go @@ -8,6 +8,7 @@ import ( "fmt" "net/http" "strings" + "time" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" @@ -66,33 +67,23 @@ func (s *DockerSuite) TestExecAPIStart(c *check.C) { testRequires(c, DaemonIsLinux) // Uses pause/unpause but bits may be salvagable to Windows to Windows CI dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") - startExec := func(id string, code int) { - resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json") - c.Assert(err, checker.IsNil) - - b, err := readBody(body) - comment := check.Commentf("response body: %s", b) - c.Assert(err, checker.IsNil, comment) - c.Assert(resp.StatusCode, checker.Equals, code, comment) - } - id := createExec(c, "test") - startExec(id, http.StatusOK) + startExec(c, id, http.StatusOK) id = createExec(c, "test") dockerCmd(c, "stop", "test") - startExec(id, http.StatusNotFound) + startExec(c, id, http.StatusNotFound) dockerCmd(c, "start", "test") - startExec(id, http.StatusNotFound) + startExec(c, id, http.StatusNotFound) // make sure exec is created before pausing id = createExec(c, "test") dockerCmd(c, "pause", "test") - startExec(id, http.StatusConflict) + startExec(c, id, http.StatusConflict) dockerCmd(c, "unpause", "test") - startExec(id, http.StatusOK) + startExec(c, id, http.StatusOK) } func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) { @@ -108,6 +99,30 @@ func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) { c.Assert(resp.StatusCode, checker.Equals, http.StatusOK, comment) } +// #19362 +func (s *DockerSuite) TestExecAPIStartMultipleTimesError(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + execID := createExec(c, "test") + startExec(c, execID, http.StatusOK) + + timeout := time.After(10 * time.Second) + var execJSON struct{ Running bool } + for { + select { + case <-timeout: + c.Fatal("timeout waiting for exec to start") + default: + } + + inspectExec(c, execID, &execJSON) + if !execJSON.Running { + break + } + } + + startExec(c, execID, http.StatusConflict) +} + func createExec(c *check.C, name string) string { _, b, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}}) c.Assert(err, checker.IsNil, check.Commentf(string(b))) @@ -118,3 +133,22 @@ func createExec(c *check.C, name string) string { c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) return createResp.ID } + +func startExec(c *check.C, id string, code int) { + resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json") + c.Assert(err, checker.IsNil) + + b, err := readBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + c.Assert(resp.StatusCode, checker.Equals, code, comment) +} + +func inspectExec(c *check.C, id string, out interface{}) { + resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/exec/%s/json", id), nil, "") + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + err = json.NewDecoder(body).Decode(out) + c.Assert(err, checker.IsNil) +} diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index b60b24dace..766c0b3ba7 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -6632,3 +6632,18 @@ func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) { c.Assert(out, checker.Not(checker.Contains), "Using cache") } + +// #19375 +func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) { + cmd := exec.Command(dockerBinary, "build", "github.com/docker/v1.10-migrator.git") + cmd.Env = append(cmd.Env, "PATH=") + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") + + cmd = exec.Command(dockerBinary, "build", "https://github.com/docker/v1.10-migrator.git") + cmd.Env = append(cmd.Env, "PATH=") + out, _, err = runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") +} diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go index 096074c27e..633313e67c 100644 --- a/integration-cli/docker_cli_create_test.go +++ b/integration-cli/docker_cli_create_test.go @@ -301,18 +301,19 @@ func (s *DockerTrustSuite) TestTrustedCreate(c *check.C) { } func (s *DockerTrustSuite) TestUntrustedCreate(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL) + repoName := fmt.Sprintf("%v/dockercliuntrusted/createtest", privateRegistryURL) + withTagName := fmt.Sprintf("%s:latest", repoName) // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - dockerCmd(c, "push", repoName) - dockerCmd(c, "rmi", repoName) + dockerCmd(c, "tag", "busybox", withTagName) + dockerCmd(c, "push", withTagName) + dockerCmd(c, "rmi", withTagName) // Try trusted create on untrusted tag - createCmd := exec.Command(dockerBinary, "create", repoName) + createCmd := exec.Command(dockerBinary, "create", withTagName) s.trustedCmd(createCmd) out, _, err := runCommandWithOutput(createCmd) c.Assert(err, check.Not(check.IsNil)) - c.Assert(string(out), checker.Contains, "trust data unavailable. Has a notary repository been initialized?", check.Commentf("Missing expected output on trusted create:\n%s", out)) + c.Assert(string(out), checker.Contains, fmt.Sprintf("does not have trust data for %s", repoName), check.Commentf("Missing expected output on trusted create:\n%s", out)) } diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go index 9280d597a2..36175a23bc 100644 --- a/integration-cli/docker_cli_links_test.go +++ b/integration-cli/docker_cli_links_test.go @@ -212,3 +212,9 @@ func (s *DockerSuite) TestLinksEtcHostsRegularFile(c *check.C) { // /etc/hosts should be a regular file c.Assert(out, checker.Matches, "^-.+\n") } + +func (s *DockerSuite) TestLinksMultipleWithSameName(c *check.C) { + dockerCmd(c, "run", "-d", "--name=upstream-a", "busybox", "top") + dockerCmd(c, "run", "-d", "--name=upstream-b", "busybox", "top") + dockerCmd(c, "run", "--link", "upstream-a:upstream", "--link", "upstream-b:upstream", "busybox", "sh", "-c", "ping -c 1 upstream") +} diff --git a/integration-cli/docker_cli_network_unix_test.go b/integration-cli/docker_cli_network_unix_test.go index d09c2f2917..fc59e855cc 100644 --- a/integration-cli/docker_cli_network_unix_test.go +++ b/integration-cli/docker_cli_network_unix_test.go @@ -12,6 +12,7 @@ import ( "os" "sort" "strings" + "time" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/runconfig" @@ -56,6 +57,10 @@ func (s *DockerNetworkSuite) SetUpSuite(c *check.C) { mux := http.NewServeMux() s.server = httptest.NewServer(mux) c.Assert(s.server, check.NotNil, check.Commentf("Failed to start a HTTP Server")) + setupRemoteNetworkDrivers(c, mux, s.server.URL, dummyNetworkDriver, dummyIpamDriver) +} + +func setupRemoteNetworkDrivers(c *check.C, mux *http.ServeMux, url, netDrv, ipamDrv string) { mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") @@ -199,12 +204,12 @@ func (s *DockerNetworkSuite) SetUpSuite(c *check.C) { err := os.MkdirAll("/etc/docker/plugins", 0755) c.Assert(err, checker.IsNil) - fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", dummyNetworkDriver) - err = ioutil.WriteFile(fileName, []byte(s.server.URL), 0644) + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", netDrv) + err = ioutil.WriteFile(fileName, []byte(url), 0644) c.Assert(err, checker.IsNil) - ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", dummyIpamDriver) - err = ioutil.WriteFile(ipamFileName, []byte(s.server.URL), 0644) + ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", ipamDrv) + err = ioutil.WriteFile(ipamFileName, []byte(url), 0644) c.Assert(err, checker.IsNil) } @@ -277,6 +282,24 @@ func (s *DockerNetworkSuite) TestDockerNetworkLsDefault(c *check.C) { } } +func (s *DockerNetworkSuite) TestDockerNetworkCreatePredefined(c *check.C) { + predefined := []string{"bridge", "host", "none", "default"} + for _, net := range predefined { + // predefined networks can't be created again + out, _, err := dockerCmdWithError("network", "create", net) + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkRmPredefined(c *check.C) { + predefined := []string{"bridge", "host", "none", "default"} + for _, net := range predefined { + // predefined networks can't be removed + out, _, err := dockerCmdWithError("network", "rm", net) + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + } +} + func (s *DockerNetworkSuite) TestDockerNetworkLsFilter(c *check.C) { out, _ := dockerCmd(c, "network", "create", "dev") defer func() { @@ -812,6 +835,51 @@ func (s *DockerNetworkSuite) TestDockerNetworkOverlayPortMapping(c *check.C) { c.Assert(out, checker.Contains, unpPort2) } +func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dnd := "dnd" + did := "did" + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) + + s.d.StartWithBusybox() + _, err := s.d.Cmd("network", "create", "-d", dnd, "--subnet", "1.1.1.0/24", "net1") + c.Assert(err, checker.IsNil) + + _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "foo", "--ip", "1.1.1.10", "busybox", "sh") + c.Assert(err, checker.IsNil) + + // Kill daemon and restart + if err = s.d.cmd.Process.Kill(); err != nil { + c.Fatal(err) + } + + server.Close() + + startTime := time.Now().Unix() + if err = s.d.Restart(); err != nil { + c.Fatal(err) + } + lapse := time.Now().Unix() - startTime + if lapse > 60 { + // In normal scenarios, daemon restart takes ~1 second. + // Plugin retry mechanism can delay the daemon start. systemd may not like it. + // Avoid accessing plugins during daemon bootup + c.Logf("daemon restart took too long : %d seconds", lapse) + } + + // Restart the custom dummy plugin + mux = http.NewServeMux() + server = httptest.NewServer(mux) + setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) + + // trying to reuse the same ip must succeed + _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "bar", "--ip", "1.1.1.10", "busybox", "sh") + c.Assert(err, checker.IsNil) +} + func (s *DockerNetworkSuite) TestDockerNetworkMacInspect(c *check.C) { // Verify endpoint MAC address is correctly populated in container's network settings nwn := "ov" @@ -1055,10 +1123,12 @@ func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIP(c *check.C) { // run a container on first network specifying the ip addresses dockerCmd(c, "run", "-d", "--name", "c0", "--net=n0", "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") c.Assert(waitRun("c0"), check.IsNil) + verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") // connect the container to the second network specifying the preferred ip addresses dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n1", "c0") + verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") // Stop and restart the container @@ -1066,7 +1136,9 @@ func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIP(c *check.C) { dockerCmd(c, "start", "c0") // verify preferred addresses are applied + verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") // Still it should fail to connect to the default network with a specified IP (whatever ip) @@ -1076,9 +1148,32 @@ func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIP(c *check.C) { } +func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIPStoppedContainer(c *check.C) { + // create a container + dockerCmd(c, "create", "--name", "c0", "busybox", "top") + + // create a network + dockerCmd(c, "network", "create", "--subnet=172.30.0.0/16", "--subnet=2001:db8:abcd::/64", "n0") + assertNwIsAvailable(c, "n0") + + // connect the container to the network specifying an ip addresses + dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n0", "c0") + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + + // start the container, verify config has not changed and ip addresses are assigned + dockerCmd(c, "start", "c0") + c.Assert(waitRun("c0"), check.IsNil) + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + + // stop the container and check ip config has not changed + dockerCmd(c, "stop", "c0") + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") +} + func (s *DockerNetworkSuite) TestDockerNetworkUnsupportedPreferredIP(c *check.C) { // preferred IP is not supported on predefined networks - for _, mode := range []string{"none", "host", "bridge"} { + for _, mode := range []string{"none", "host", "bridge", "default"} { checkUnsupportedNetworkAndIP(c, mode) } @@ -1104,6 +1199,20 @@ func checkUnsupportedNetworkAndIP(c *check.C, nwMode string) { c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndIP.Error()) } +func verifyIPAddressConfig(c *check.C, cName, nwname, ipv4, ipv6 string) { + if ipv4 != "" { + out, err := inspectField(cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv4Address", nwname)) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, ipv4) + } + + if ipv6 != "" { + out, err := inspectField(cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv6Address", nwname)) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, ipv6) + } +} + func verifyIPAddresses(c *check.C, cName, nwname, ipv4, ipv6 string) { out, _ := dockerCmd(c, "inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.%s.IPAddress }}'", nwname), cName) c.Assert(strings.TrimSpace(out), check.Equals, ipv4) @@ -1231,4 +1340,51 @@ func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectAlias(c *check.C) { // ping to net2 scoped alias "bar" must still succeed _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") c.Assert(err, check.IsNil) + + // verify the alias option is rejected when running on predefined network + out, _, err := dockerCmdWithError("run", "--rm", "--name=any", "--net-alias=any", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + + // verify the alias option is rejected when connecting to predefined network + out, _, err = dockerCmdWithError("network", "connect", "--alias=any", "bridge", "first") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectivity(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "-d", "bridge", "br.net1") + + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c1.net1", "busybox", "top") + c.Assert(waitRun("c1.net1"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c2.net1", "busybox", "top") + c.Assert(waitRun("c2.net1"), check.IsNil) + + // ping first container by its unqualified name + _, _, err := dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1") + c.Assert(err, check.IsNil) + + // ping first container by its qualified name + _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1") + c.Assert(err, check.IsNil) + + // ping with first qualified name masked by an additional domain. should fail + _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1.google.com") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestDockerNetworkConnectFailsNoInspectChange(c *check.C) { + dockerCmd(c, "run", "-d", "--name=bb", "busybox", "top") + c.Assert(waitRun("bb"), check.IsNil) + + ns0, _ := dockerCmd(c, "inspect", "--format='{{ .NetworkSettings.Networks.bridge }}'", "bb") + + // A failing redundant network connect should not alter current container's endpoint settings + _, _, err := dockerCmdWithError("network", "connect", "bridge", "bb") + c.Assert(err, check.NotNil) + + ns1, _ := dockerCmd(c, "inspect", "--format='{{ .NetworkSettings.Networks.bridge }}'", "bb") + c.Assert(ns1, check.Equals, ns0) } diff --git a/integration-cli/docker_cli_port_test.go b/integration-cli/docker_cli_port_test.go index 0d9306f4ad..a4361f2eaa 100644 --- a/integration-cli/docker_cli_port_test.go +++ b/integration-cli/docker_cli_port_test.go @@ -293,3 +293,24 @@ func (s *DockerSuite) TestPortExposeHostBinding(c *check.C) { // Port is still bound after the Container is removed c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) } + +func (s *DockerSuite) TestPortBindingOnSandbox(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "--internal", "-d", "bridge", "internal-net") + dockerCmd(c, "run", "--net", "internal-net", "-d", "--name", "c1", + "-p", "8080:8080", "busybox", "nc", "-l", "-p", "8080") + c.Assert(waitRun("c1"), check.IsNil) + + _, _, err := dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") + c.Assert(err, check.NotNil, + check.Commentf("Port mapping on internal network is expected to fail")) + + // Connect container to another normal bridge network + dockerCmd(c, "network", "create", "-d", "bridge", "foo-net") + dockerCmd(c, "network", "connect", "foo-net", "c1") + + _, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") + c.Assert(err, check.IsNil, + check.Commentf("Port mapping on the new network is expected to succeed")) + +} diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go index b7c234e028..65be5a7c44 100644 --- a/integration-cli/docker_cli_ps_test.go +++ b/integration-cli/docker_cli_ps_test.go @@ -638,3 +638,20 @@ func (s *DockerSuite) TestPsImageIDAfterUpdate(c *check.C) { } } + +func (s *DockerSuite) TestPsNotShowPortsOfStoppedContainer(c *check.C) { + dockerCmd(c, "run", "--name=foo", "-d", "-p", "5000:5000", "busybox", "top") + c.Assert(waitRun("foo"), checker.IsNil) + out, _ := dockerCmd(c, "ps") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := "0.0.0.0:5000->5000/tcp" + fields := strings.Fields(lines[1]) + c.Assert(fields[len(fields)-2], checker.Equals, expected, check.Commentf("Expected: %v, got: %v", expected, fields[len(fields)-2])) + + dockerCmd(c, "kill", "foo") + dockerCmd(c, "wait", "foo") + out, _ = dockerCmd(c, "ps", "-l") + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + fields = strings.Fields(lines[1]) + c.Assert(fields[len(fields)-2], checker.Not(checker.Equals), expected, check.Commentf("Should not got %v", expected)) +} diff --git a/integration-cli/docker_cli_pull_local_test.go b/integration-cli/docker_cli_pull_local_test.go index 1037951d24..768bb8172e 100644 --- a/integration-cli/docker_cli_pull_local_test.go +++ b/integration-cli/docker_cli_pull_local_test.go @@ -279,18 +279,6 @@ func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *check.C) { testPullIDStability(c) } -// TestPullFallbackOn404 tries to pull a nonexistent manifest and confirms that -// the pull falls back to the v1 protocol. -// -// Ref: docker/docker#18832 -func (s *DockerRegistrySuite) TestPullFallbackOn404(c *check.C) { - repoName := fmt.Sprintf("%v/does/not/exist", privateRegistryURL) - - out, _, _ := dockerCmdWithError("pull", repoName) - - c.Assert(out, checker.Contains, "v1 ping attempt") -} - func (s *DockerRegistrySuite) TestPullManifestList(c *check.C) { pushDigest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go index 0c06499e29..9d36296091 100644 --- a/integration-cli/docker_cli_pull_test.go +++ b/integration-cli/docker_cli_pull_test.go @@ -62,6 +62,7 @@ func (s *DockerHubPullSuite) TestPullNonExistingImage(c *check.C) { out, err := s.CmdWithError("pull", "-a", e.Alias) c.Assert(err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", out)) c.Assert(out, checker.Contains, fmt.Sprintf("Error: image %s not found", e.Repo), check.Commentf("expected image not found error messages")) + c.Assert(out, checker.Not(checker.Contains), "unauthorized", check.Commentf(`message should not contain "unauthorized"`)) } } diff --git a/integration-cli/docker_cli_pull_trusted_test.go b/integration-cli/docker_cli_pull_trusted_test.go index fbd50b5d96..86868276f1 100644 --- a/integration-cli/docker_cli_pull_trusted_test.go +++ b/integration-cli/docker_cli_pull_trusted_test.go @@ -47,7 +47,7 @@ func (s *DockerTrustSuite) TestTrustedIsolatedPull(c *check.C) { } func (s *DockerTrustSuite) TestUntrustedPull(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL) + repoName := fmt.Sprintf("%v/dockercliuntrusted/pulltest:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) dockerCmd(c, "push", repoName) diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go index c100772b00..0e6717644d 100644 --- a/integration-cli/docker_cli_push_test.go +++ b/integration-cli/docker_cli_push_test.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/docker/distribution/digest" "github.com/docker/docker/cliconfig" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" @@ -157,6 +158,9 @@ func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) { // ensure that none of the layers were mounted from another repository during push c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) + digest1 := digest.DigestRegexp.FindString(out1) + c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) // retag the image to upload the same layers to another repo in the same registry dockerCmd(c, "tag", "busybox", destRepoName) @@ -166,6 +170,10 @@ func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) { // ensure that layers were mounted from the first repo during push c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, true) + digest2 := digest.DigestRegexp.FindString(out2) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest1, check.Equals, digest2) + // ensure that we can pull and run the cross-repo-pushed repository dockerCmd(c, "rmi", destRepoName) dockerCmd(c, "pull", destRepoName) @@ -183,6 +191,9 @@ func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c // ensure that none of the layers were mounted from another repository during push c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) + digest1 := digest.DigestRegexp.FindString(out1) + c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) // retag the image to upload the same layers to another repo in the same registry dockerCmd(c, "tag", "busybox", destRepoName) @@ -192,6 +203,10 @@ func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c // schema1 registry should not support cross-repo layer mounts, so ensure that this does not happen c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, false) + digest2 := digest.DigestRegexp.FindString(out2) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest1, check.Equals, digest2) + // ensure that we can pull and run the second pushed repository dockerCmd(c, "rmi", destRepoName) dockerCmd(c, "pull", destRepoName) @@ -200,7 +215,7 @@ func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c } func (s *DockerTrustSuite) TestTrustedPush(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL) + repoName := fmt.Sprintf("%v/dockerclitrusted/pushtest:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) @@ -252,7 +267,7 @@ func (s *DockerTrustSuite) TestTrustedPushWithDeprecatedEnvPasswords(c *check.C) } func (s *DockerTrustSuite) TestTrustedPushWithFailingServer(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL) + repoName := fmt.Sprintf("%v/dockerclitrusted/failingserver:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) @@ -264,7 +279,7 @@ func (s *DockerTrustSuite) TestTrustedPushWithFailingServer(c *check.C) { } func (s *DockerTrustSuite) TestTrustedPushWithoutServerAndUntrusted(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL) + repoName := fmt.Sprintf("%v/dockerclitrusted/trustedandnot:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index f34c235d35..e4f8045fdc 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -22,6 +22,7 @@ import ( "github.com/docker/docker/pkg/mount" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork/netutils" "github.com/docker/libnetwork/resolvconf" "github.com/go-check/check" ) @@ -1258,13 +1259,13 @@ func (s *DockerSuite) TestRunDnsOptionsBasedOnHostResolvConf(c *check.C) { c.Fatalf("/etc/resolv.conf does not exist") } - hostNamservers := resolvconf.GetNameservers(origResolvConf) + hostNamservers := resolvconf.GetNameservers(origResolvConf, netutils.IP) hostSearch := resolvconf.GetSearchDomains(origResolvConf) var out string out, _ = dockerCmd(c, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf") - if actualNameservers := resolvconf.GetNameservers([]byte(out)); string(actualNameservers[0]) != "127.0.0.1" { + if actualNameservers := resolvconf.GetNameservers([]byte(out), netutils.IP); string(actualNameservers[0]) != "127.0.0.1" { c.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0])) } @@ -1280,7 +1281,7 @@ func (s *DockerSuite) TestRunDnsOptionsBasedOnHostResolvConf(c *check.C) { out, _ = dockerCmd(c, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") - actualNameservers := resolvconf.GetNameservers([]byte(out)) + actualNameservers := resolvconf.GetNameservers([]byte(out), netutils.IP) if len(actualNameservers) != len(hostNamservers) { c.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNamservers), len(actualNameservers)) } @@ -1311,11 +1312,11 @@ func (s *DockerSuite) TestRunDnsOptionsBasedOnHostResolvConf(c *check.C) { c.Fatalf("/etc/resolv.conf does not exist") } - hostNamservers = resolvconf.GetNameservers(resolvConf) + hostNamservers = resolvconf.GetNameservers(resolvConf, netutils.IP) hostSearch = resolvconf.GetSearchDomains(resolvConf) out, _ = dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") - if actualNameservers = resolvconf.GetNameservers([]byte(out)); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 { + if actualNameservers = resolvconf.GetNameservers([]byte(out), netutils.IP); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 { c.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers) } @@ -3178,7 +3179,7 @@ func (s *DockerTrustSuite) TestTrustedRun(c *check.C) { func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) { // Windows does not support this functionality testRequires(c, DaemonIsLinux) - repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL) + repoName := fmt.Sprintf("%v/dockercliuntrusted/runtest:latest", privateRegistryURL) // tag the image and upload it to the private registry dockerCmd(c, "tag", "busybox", repoName) dockerCmd(c, "push", repoName) @@ -3465,6 +3466,84 @@ func (s *DockerSuite) TestRunContainerWithCgroupParentAbsPath(c *check.C) { } } +// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. +func (s *DockerSuite) TestRunInvalidCgroupParent(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + cgroupParent := "../../../../../../../../SHOULD_NOT_EXIST" + cleanCgroupParent := "SHOULD_NOT_EXIST" + name := "cgroup-invalid-test" + + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + // XXX: This may include a daemon crash. + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + + // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. + if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { + c.Fatalf("SECURITY: --cgroup-parent with ../../ relative paths cause files to be created in the host (this is bad) !!") + } + + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cleanCgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. +func (s *DockerSuite) TestRunAbsoluteInvalidCgroupParent(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + cgroupParent := "/../../../../../../../../SHOULD_NOT_EXIST" + cleanCgroupParent := "/SHOULD_NOT_EXIST" + name := "cgroup-absolute-invalid-test" + + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + // XXX: This may include a daemon crash. + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + + // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. + if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { + c.Fatalf("SECURITY: --cgroup-parent with /../../ garbage paths cause files to be created in the host (this is bad) !!") + } + + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cleanCgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + func (s *DockerSuite) TestRunContainerWithCgroupMountRO(c *check.C) { // Not applicable on Windows as uses Unix specific functionality // --read-only + userns has remount issues @@ -4058,3 +4137,42 @@ func (s *DockerSuite) TestRunNamedVolumeCopyImageData(c *check.C) { out, _ := dockerCmd(c, "run", "-v", "foo:/foo", "busybox", "cat", "/foo/hello") c.Assert(strings.TrimSpace(out), check.Equals, "hello") } + +func (s *DockerSuite) TestRunNamedVolumeNotRemoved(c *check.C) { + prefix := "" + if daemonPlatform == "windows" { + prefix = "c:" + } + + dockerCmd(c, "volume", "create", "--name", "test") + + dockerCmd(c, "run", "--rm", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "volume", "inspect", "test") + out, _ := dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") + + dockerCmd(c, "run", "--name=test", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "rm", "-fv", "test") + dockerCmd(c, "volume", "inspect", "test") + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") +} + +func (s *DockerSuite) TestRunNamedVolumesFromNotRemoved(c *check.C) { + prefix := "" + if daemonPlatform == "windows" { + prefix = "c:" + } + + dockerCmd(c, "volume", "create", "--name", "test") + dockerCmd(c, "run", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "run", "--name=child", "--volumes-from=parent", "busybox", "true") + + // Remove the parent so there are not other references to the volumes + dockerCmd(c, "rm", "-f", "parent") + // now remove the child and ensure the named volume (and only the named volume) still exists + dockerCmd(c, "rm", "-fv", "child") + dockerCmd(c, "volume", "inspect", "test") + out, _ := dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") +} diff --git a/integration-cli/docker_cli_start_volume_driver_unix_test.go b/integration-cli/docker_cli_start_volume_driver_unix_test.go index a8ad58f233..30c540428b 100644 --- a/integration-cli/docker_cli_start_volume_driver_unix_test.go +++ b/integration-cli/docker_cli_start_volume_driver_unix_test.go @@ -228,6 +228,9 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(out, checker.Contains, s.server.URL) + _, err = s.d.Cmd("volume", "rm", "external-volume-test") + c.Assert(err, checker.IsNil) + p := hostVolumePath("external-volume-test") _, err = os.Lstat(p) c.Assert(err, checker.NotNil) @@ -296,33 +299,6 @@ func hostVolumePath(name string) string { return fmt.Sprintf("/var/lib/docker/volumes/%s", name) } -func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamedCheckBindLocalVolume(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) - - expected := s.server.URL - dockerfile := fmt.Sprintf(`FROM busybox:latest - RUN mkdir /nobindthenlocalvol - RUN echo %s > /nobindthenlocalvol/test - VOLUME ["/nobindthenlocalvol"]`, expected) - - img := "test-checkbindlocalvolume" - - _, err = buildImageWithOutInDamon(s.d.sock(), img, dockerfile, true) - c.Assert(err, checker.IsNil) - - out, err := s.d.Cmd("run", "--rm", "--name", "test-data-nobind", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", img, "cat", "/nobindthenlocalvol/test") - c.Assert(err, checker.IsNil) - - c.Assert(out, checker.Contains, expected) - - c.Assert(s.ec.activations, checker.Equals, 1) - c.Assert(s.ec.creations, checker.Equals, 1) - c.Assert(s.ec.removals, checker.Equals, 1) - c.Assert(s.ec.mounts, checker.Equals, 1) - c.Assert(s.ec.unmounts, checker.Equals, 1) -} - // Make sure a request to use a down driver doesn't block other requests func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverLookupNotBlocked(c *check.C) { specPath := "/etc/docker/plugins/down-driver.spec" @@ -389,6 +365,9 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverRetryNotImmediatelyE c.Fatal("volume creates fail when plugin not immediately available") } + _, err = s.d.Cmd("volume", "rm", "external-volume-test") + c.Assert(err, checker.IsNil) + c.Assert(s.ec.activations, checker.Equals, 1) c.Assert(s.ec.creations, checker.Equals, 1) c.Assert(s.ec.removals, checker.Equals, 1) @@ -412,7 +391,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverBindExternalVolume(c c.Assert(mounts[0].Driver, checker.Equals, "test-external-volume-driver") } -func (s *DockerExternalVolumeSuite) TestStartExternalVolumeDriverList(c *check.C) { +func (s *DockerExternalVolumeSuite) TesttExternalVolumeDriverList(c *check.C) { dockerCmd(c, "volume", "create", "-d", "test-external-volume-driver", "--name", "abc") out, _ := dockerCmd(c, "volume", "ls") ls := strings.Split(strings.TrimSpace(out), "\n") @@ -426,7 +405,7 @@ func (s *DockerExternalVolumeSuite) TestStartExternalVolumeDriverList(c *check.C c.Assert(s.ec.lists, check.Equals, 1) } -func (s *DockerExternalVolumeSuite) TestStartExternalVolumeDriverGet(c *check.C) { +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGet(c *check.C) { out, _, err := dockerCmdWithError("volume", "inspect", "dummy") c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(s.ec.gets, check.Equals, 1) diff --git a/integration-cli/docker_cli_volume_driver_compat_unix_test.go b/integration-cli/docker_cli_volume_driver_compat_unix_test.go new file mode 100644 index 0000000000..b15baccb03 --- /dev/null +++ b/integration-cli/docker_cli_volume_driver_compat_unix_test.go @@ -0,0 +1,215 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + + "github.com/go-check/check" +) + +func init() { + check.Suite(&DockerExternalVolumeSuiteCompatV1_1{ + ds: &DockerSuite{}, + }) +} + +type DockerExternalVolumeSuiteCompatV1_1 struct { + server *httptest.Server + ds *DockerSuite + d *Daemon + ec *eventCounter +} + +func (s *DockerExternalVolumeSuiteCompatV1_1) SetUpTest(c *check.C) { + s.d = NewDaemon(c) + s.ec = &eventCounter{} +} + +func (s *DockerExternalVolumeSuiteCompatV1_1) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} + +func (s *DockerExternalVolumeSuiteCompatV1_1) SetUpSuite(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + + type pluginRequest struct { + Name string + } + + type pluginResp struct { + Mountpoint string `json:",omitempty"` + Err string `json:",omitempty"` + } + + type vol struct { + Name string + Mountpoint string + } + var volList []vol + + read := func(b io.ReadCloser) (pluginRequest, error) { + defer b.Close() + var pr pluginRequest + if err := json.NewDecoder(b).Decode(&pr); err != nil { + return pr, err + } + return pr, nil + } + + send := func(w http.ResponseWriter, data interface{}) { + switch t := data.(type) { + case error: + http.Error(w, t.Error(), 500) + case string: + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, t) + default: + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + json.NewEncoder(w).Encode(&data) + } + } + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + s.ec.activations++ + send(w, `{"Implements": ["VolumeDriver"]}`) + }) + + mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { + s.ec.creations++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + volList = append(volList, vol{Name: pr.Name}) + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + s.ec.removals++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + if err := os.RemoveAll(hostVolumePath(pr.Name)); err != nil { + send(w, &pluginResp{Err: err.Error()}) + return + } + + for i, v := range volList { + if v.Name == pr.Name { + if err := os.RemoveAll(hostVolumePath(v.Name)); err != nil { + send(w, fmt.Sprintf(`{"Err": "%v"}`, err)) + return + } + volList = append(volList[:i], volList[i+1:]...) + break + } + } + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { + s.ec.paths++ + + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + p := hostVolumePath(pr.Name) + send(w, &pluginResp{Mountpoint: p}) + }) + + mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { + s.ec.mounts++ + + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + p := hostVolumePath(pr.Name) + if err := os.MkdirAll(p, 0755); err != nil { + send(w, &pluginResp{Err: err.Error()}) + return + } + + if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.server.URL), 0644); err != nil { + send(w, err) + return + } + + send(w, &pluginResp{Mountpoint: p}) + }) + + mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { + s.ec.unmounts++ + + _, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + send(w, nil) + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + err = ioutil.WriteFile("/etc/docker/plugins/test-external-volume-driver.spec", []byte(s.server.URL), 0644) + c.Assert(err, checker.IsNil) +} + +func (s *DockerExternalVolumeSuiteCompatV1_1) TearDownSuite(c *check.C) { + s.server.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func (s *DockerExternalVolumeSuiteCompatV1_1) TestExternalVolumeDriverCompatV1_1(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--name=test", "-v", "foo:/bar", "--volume-driver", "test-external-volume-driver", "busybox", "sh", "-c", "echo hello > /bar/hello") + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("rm", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--name=test2", "-v", "foo:/bar", "busybox", "cat", "/bar/hello") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + + err = s.d.Restart() + c.Assert(err, checker.IsNil) + + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + + out, err = s.d.Cmd("rm", "test2") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "inspect", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "rm", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) +} diff --git a/integration-cli/docker_cli_volume_test.go b/integration-cli/docker_cli_volume_test.go index 7d1683a7ee..5a19ba539e 100644 --- a/integration-cli/docker_cli_volume_test.go +++ b/integration-cli/docker_cli_volume_test.go @@ -106,8 +106,8 @@ func (s *DockerSuite) TestVolumeCliLsFilterDangling(c *check.C) { out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=false") - // Same as above, but explicitly disabling dangling - c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + // Explicitly disabling dangling + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) @@ -117,6 +117,30 @@ func (s *DockerSuite) TestVolumeCliLsFilterDangling(c *check.C) { c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=1") + // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output, dangling also accept 1 + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) + c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=0") + // dangling=0 is same as dangling=false case + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) +} + +func (s *DockerSuite) TestVolumeCliLsErrorWithInvalidFilterName(c *check.C) { + out, _, err := dockerCmdWithError("volume", "ls", "-f", "FOO=123") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestVolumeCliLsWithIncorrectFilterValue(c *check.C) { + out, _, err := dockerCmdWithError("volume", "ls", "-f", "dangling=invalid") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") } func (s *DockerSuite) TestVolumeCliRm(c *check.C) { diff --git a/layer/filestore.go b/layer/filestore.go index 236c9ba5ad..a0044b3663 100644 --- a/layer/filestore.go +++ b/layer/filestore.go @@ -10,6 +10,7 @@ import ( "path/filepath" "regexp" "strconv" + "strings" "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" @@ -154,7 +155,7 @@ func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { return "", err } - dgst, err := digest.ParseDigest(string(content)) + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) if err != nil { return "", err } @@ -168,7 +169,7 @@ func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { return "", err } - dgst, err := digest.ParseDigest(string(content)) + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) if err != nil { return "", err } @@ -177,16 +178,17 @@ func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { } func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) if err != nil { return "", err } + content := strings.TrimSpace(string(contentBytes)) - if !stringIDRegexp.MatchString(string(content)) { + if !stringIDRegexp.MatchString(content) { return "", errors.New("invalid cache id value") } - return string(content), nil + return content, nil } func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { @@ -227,32 +229,34 @@ func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error } func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { - content, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) if err != nil { return "", err } + content := strings.TrimSpace(string(contentBytes)) - if !stringIDRegexp.MatchString(string(content)) { + if !stringIDRegexp.MatchString(content) { return "", errors.New("invalid mount id value") } - return string(content), nil + return content, nil } func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { - content, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) if err != nil { if os.IsNotExist(err) { return "", nil } return "", err } + content := strings.TrimSpace(string(contentBytes)) - if !stringIDRegexp.MatchString(string(content)) { + if !stringIDRegexp.MatchString(content) { return "", errors.New("invalid init id value") } - return string(content), nil + return content, nil } func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { @@ -264,7 +268,7 @@ func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { return "", err } - dgst, err := digest.ParseDigest(string(content)) + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) if err != nil { return "", err } diff --git a/layer/layer_store.go b/layer/layer_store.go index 495841f498..619c1a3020 100644 --- a/layer/layer_store.go +++ b/layer/layer_store.go @@ -86,6 +86,7 @@ func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (St l, err := ls.loadLayer(id) if err != nil { logrus.Debugf("Failed to load layer %s: %s", id, err) + continue } if l.parent != nil { l.parent.referenceCount++ @@ -109,22 +110,22 @@ func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { diff, err := ls.store.GetDiffID(layer) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) } size, err := ls.store.GetSize(layer) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) } cacheID, err := ls.store.GetCacheID(layer) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) } parent, err := ls.store.GetParent(layer) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) } cl = &roLayer{ diff --git a/man/docker-create.1.md b/man/docker-create.1.md index 08074ac431..a45eef7d63 100644 --- a/man/docker-create.1.md +++ b/man/docker-create.1.md @@ -319,10 +319,7 @@ unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image - This command mounts a `tmpfs` at `/tmp` within the container. The mount copies -the underlying content of `my_image` into `/tmp`. For example if there was a -directory `/tmp/content` in the base image, docker will copy this directory and -all of its content on top of the tmpfs mounted on `/tmp`. The supported mount + This command mounts a `tmpfs` at `/tmp` within the container. The supported mount options are the same as the Linux default `mount` flags. If you do not specify any options, the systems uses the following options: `rw,noexec,nosuid,nodev,size=65536k`. @@ -411,6 +408,14 @@ will convert /foo into a `shared` mount point. Alternatively one can directly change propagation properties of source mount. Say `/` is source mount for `/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. +> **Note**: +> When using systemd to manage the Docker daemon's start and stop, in the systemd +> unit file there is an option to control mount propagation for the Docker daemon +> itself, called `MountFlags`. The value of this setting may cause Docker to not +> see mount propagation changes made on the mount point. For example, if this value +> is `slave`, you may not be able to use the `shared` or `rshared` propagation on +> a volume. + **--volume-driver**="" Container's volume driver. This driver creates volumes specified either from a Dockerfile's `VOLUME` instruction or from the `docker run -v` flag. diff --git a/man/docker-images.1.md b/man/docker-images.1.md index 75355ac5c0..8410280a1d 100644 --- a/man/docker-images.1.md +++ b/man/docker-images.1.md @@ -70,7 +70,7 @@ To list the images in a local repository (not the registry) run: The list will contain the image repository name, a tag for the image, and an image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, -IMAGE ID, CREATED, and VIRTUAL SIZE. +IMAGE ID, CREATED, and SIZE. The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument that restricts the list to images that match the argument. If you specify diff --git a/man/docker-load.1.md b/man/docker-load.1.md index 508ab6bbb9..75ae84e6c5 100644 --- a/man/docker-load.1.md +++ b/man/docker-load.1.md @@ -25,11 +25,11 @@ Restores both images and tags. # EXAMPLES $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE busybox latest 769b9341d937 7 weeks ago 2.489 MB $ docker load --input fedora.tar $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE busybox latest 769b9341d937 7 weeks ago 2.489 MB fedora rawhide 0d20aec6529d 7 weeks ago 387 MB fedora 20 58394af37342 7 weeks ago 385.5 MB diff --git a/man/docker-network-connect.1.md b/man/docker-network-connect.1.md index 0fc4d4cf49..d6ee159391 100644 --- a/man/docker-network-connect.1.md +++ b/man/docker-network-connect.1.md @@ -28,12 +28,14 @@ $ docker run -itd --net=multi-host-network --ip 172.20.88.22 --ip6 2001:db8::882 You can pause, restart, and stop containers that are connected to a network. Paused containers remain connected and can be revealed by a `network inspect`. When the container is stopped, it does not appear on the network until you restart -it. If specified, the container's IP address(es) will be reapplied (if still available) -when a stopped container rejoins the network. One way to guarantee that the container -will be assigned the same IP addresses when it rejoins the network after a stop -or a disconnect, is to specify the `--ip-range` when creating the network, and choose -the static IP address(es) from outside the range. This will ensure that the IP address -will not be given to other dynamic containers while this container is not on the network. +it. + +If specified, the container's IP address(es) is reapplied when a stopped +container is restarted. If the IP address is no longer available, the container +fails to start. One way to guarantee that the IP address is available is +to specify an `--ip-range` when creating the network, and choose the static IP +address(es) from outside that range. This ensures that the IP address is not +given to another container while this container is not on the network. ```bash $ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network diff --git a/man/docker-network-create.1.md b/man/docker-network-create.1.md index c560f7a5f3..e1fea9f367 100644 --- a/man/docker-network-create.1.md +++ b/man/docker-network-create.1.md @@ -150,10 +150,10 @@ If you want to create an externally isolated `overlay` network, you can specify IP Address Management Driver **--ipam-opt**=map[] - Set custom IPAM plugin options + Set custom IPAM driver options **-o**, **--opt**=map[] - Set custom network plugin options + Set custom driver options **--subnet**=[] Subnet in CIDR format that represents a network segment diff --git a/man/docker-ps.1.md b/man/docker-ps.1.md index 0d0cae5792..91d1b21733 100644 --- a/man/docker-ps.1.md +++ b/man/docker-ps.1.md @@ -29,7 +29,7 @@ the running containers. Filter output based on these conditions: - exited= an exit code of - label= or label== - - status=(created|restarting|running|paused|exited) + - status=(created|restarting|running|paused|exited|dead) - name= a container's name - id= a container's ID - before=(|) diff --git a/man/docker-pull.1.md b/man/docker-pull.1.md index 9e0e2ca818..338ee5d713 100644 --- a/man/docker-pull.1.md +++ b/man/docker-pull.1.md @@ -43,7 +43,7 @@ Note that if the image is previously downloaded then the status would be Status: Downloaded newer image for fedora $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB fedora 20 105182bb5e8b 5 days ago 372.7 MB fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB @@ -60,7 +60,7 @@ Note that if the image is previously downloaded then the status would be Status: Downloaded newer image for debian:latest $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE debian latest 4a5e6db8c069 5 days ago 125.1 MB @@ -77,7 +77,7 @@ Note that if the image is previously downloaded then the status would be Status: Downloaded newer image for registry.hub.docker.com/fedora:20 $ docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + REPOSITORY TAG IMAGE ID CREATED SIZE fedora 20 3f2fed40e4b0 4 days ago 372.7 MB diff --git a/man/docker-rm.1.md b/man/docker-rm.1.md index d99c1d836b..9ae3142a6c 100644 --- a/man/docker-rm.1.md +++ b/man/docker-rm.1.md @@ -48,6 +48,22 @@ command. The use that name as follows: docker rm hopeful_morse +## Removing a container and all associated volumes + + $ docker rm -v redis + redis + +This command will remove the container and any volumes associated with it. +Note that if a volume was specified with a name, it will not be removed. + + $ docker create -v awesome:/foo -v /bar --name hello redis + hello + $ docker rm -v hello + +In this example, the volume for `/foo` will remain in tact, but the volume for +`/bar` will be removed. The same behavior holds for volumes inherited with +`--volumes-from`. + # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. diff --git a/man/docker-run.1.md b/man/docker-run.1.md index ea9b0b8168..210343e3e4 100644 --- a/man/docker-run.1.md +++ b/man/docker-run.1.md @@ -490,10 +490,7 @@ standard input. $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image - This command mounts a `tmpfs` at `/tmp` within the container. The mount copies -the underlying content of `my_image` into `/tmp`. For example if there was a -directory `/tmp/content` in the base image, docker will copy this directory and -all of its content on top of the tmpfs mounted on `/tmp`. The supported mount + This command mounts a `tmpfs` at `/tmp` within the container. The supported mount options are the same as the Linux default `mount` flags. If you do not specify any options, the systems uses the following options: `rw,noexec,nosuid,nodev,size=65536k`. @@ -582,6 +579,14 @@ will convert /foo into a `shared` mount point. Alternatively one can directly change propagation properties of source mount. Say `/` is source mount for `/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. +> **Note**: +> When using systemd to manage the Docker daemon's start and stop, in the systemd +> unit file there is an option to control mount propagation for the Docker daemon +> itself, called `MountFlags`. The value of this setting may cause Docker to not +> see mount propagation changes made on the mount point. For example, if this value +> is `slave`, you may not be able to use the `shared` or `rshared` propagation on +> a volume. + **--volume-driver**="" Container's volume driver. This driver creates volumes specified either from a Dockerfile's `VOLUME` instruction or from the `docker run -v` flag. @@ -754,6 +759,12 @@ Create a 3rd container using the new --ipc=container:CONTAINERID option, now it ## Linking Containers +> **Note**: This section describes linking between containers on the +> default (bridge) network, also known as "legacy links". Using `--link` +> on user-defined networks uses the DNS-based discovery, which does not add +> entries to `/etc/hosts`, and does not set environment variables for +> discovery. + The link feature allows multiple containers to communicate with each other. For example, a container whose Dockerfile has exposed port 80 can be run and named as follows: diff --git a/man/docker-stats.1.md b/man/docker-stats.1.md index c7fa7d504f..520466b5b8 100644 --- a/man/docker-stats.1.md +++ b/man/docker-stats.1.md @@ -31,9 +31,9 @@ Running `docker stats` on all running containers $ docker stats CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O - redis1 0.07% 796 KB / 64 MB 1.21% 788 B / 648 B 3.568 MB / 512 KB - redis2 0.07% 2.746 MB / 64 MB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B - nginx1 0.03% 4.583 MB / 64 MB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B + 1285939c1fd3 0.07% 796 KB / 64 MB 1.21% 788 B / 648 B 3.568 MB / 512 KB + 9c76f7834ae2 0.07% 2.746 MB / 64 MB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B + d1ea048f04e4 0.03% 4.583 MB / 64 MB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B Running `docker stats` on multiple containers by name and id. diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go index 2ad299accd..e7fabe0a9e 100644 --- a/pkg/mflag/flag.go +++ b/pkg/mflag/flag.go @@ -1223,11 +1223,27 @@ func (v mergeVal) IsBoolFlag() bool { return false } +// Name returns the name of a mergeVal. +// If the original value had a name, return the original name, +// otherwise, return the key asinged to this mergeVal. +func (v mergeVal) Name() string { + type namedValue interface { + Name() string + } + if nVal, ok := v.Value.(namedValue); ok { + return nVal.Name() + } + return v.key +} + // Merge is an helper function that merges n FlagSets into a single dest FlagSet // In case of name collision between the flagsets it will apply // the destination FlagSet's errorHandling behavior. func Merge(dest *FlagSet, flagsets ...*FlagSet) error { for _, fset := range flagsets { + if fset.formal == nil { + continue + } for k, f := range fset.formal { if _, ok := dest.formal[k]; ok { var err error @@ -1249,6 +1265,9 @@ func Merge(dest *FlagSet, flagsets ...*FlagSet) error { } newF := *f newF.Value = mergeVal{f.Value, k, fset} + if dest.formal == nil { + dest.formal = make(map[string]*Flag) + } dest.formal[k] = &newF } } diff --git a/pkg/mflag/flag_test.go b/pkg/mflag/flag_test.go index c28deda896..138355546e 100644 --- a/pkg/mflag/flag_test.go +++ b/pkg/mflag/flag_test.go @@ -514,3 +514,14 @@ func TestSortFlags(t *testing.T) { t.Fatalf("NFlag (%d) != fs.NFlag() (%d) of elements visited", nflag, fs.NFlag()) } } + +func TestMergeFlags(t *testing.T) { + base := NewFlagSet("base", ContinueOnError) + base.String([]string{"f"}, "", "") + + fs := NewFlagSet("test", ContinueOnError) + Merge(fs, base) + if len(fs.formal) != 1 { + t.Fatalf("FlagCount (%d) != number (1) of elements merged", len(fs.formal)) + } +} diff --git a/pkg/plugins/client.go b/pkg/plugins/client.go index 934a829ee4..ba7772dfbb 100644 --- a/pkg/plugins/client.go +++ b/pkg/plugins/client.go @@ -3,7 +3,6 @@ package plugins import ( "bytes" "encoding/json" - "fmt" "io" "io/ioutil" "net/http" @@ -16,7 +15,7 @@ import ( ) const ( - versionMimetype = "application/vnd.docker.plugins.v1.1+json" + versionMimetype = "application/vnd.docker.plugins.v1.2+json" defaultTimeOut = 30 ) @@ -124,7 +123,7 @@ func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) if resp.StatusCode != http.StatusOK { b, err := ioutil.ReadAll(resp.Body) if err != nil { - return nil, fmt.Errorf("%s: %s", serviceMethod, err) + return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} } // Plugins' Response(s) should have an Err field indicating what went @@ -136,11 +135,11 @@ func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) remoteErr := responseErr{} if err := json.Unmarshal(b, &remoteErr); err == nil { if remoteErr.Err != "" { - return nil, fmt.Errorf("%s: %s", serviceMethod, remoteErr.Err) + return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} } } // old way... - return nil, fmt.Errorf("%s: %s", serviceMethod, string(b)) + return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} } return resp.Body, nil } diff --git a/pkg/plugins/errors.go b/pkg/plugins/errors.go new file mode 100644 index 0000000000..a1826c8906 --- /dev/null +++ b/pkg/plugins/errors.go @@ -0,0 +1,33 @@ +package plugins + +import ( + "fmt" + "net/http" +) + +type statusError struct { + status int + method string + err string +} + +// Error returns a formated string for this error type +func (e *statusError) Error() string { + return fmt.Sprintf("%s: %v", e.method, e.err) +} + +// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin +func IsNotFound(err error) bool { + return isStatusError(err, http.StatusNotFound) +} + +func isStatusError(err error, status int) bool { + if err == nil { + return false + } + e, ok := err.(*statusError) + if !ok { + return false + } + return e.status == status +} diff --git a/pkg/term/term_windows.go b/pkg/term/term_windows.go index 04870d1bf7..18f2d735f5 100644 --- a/pkg/term/term_windows.go +++ b/pkg/term/term_windows.go @@ -32,8 +32,8 @@ type Winsize struct { func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { switch { case os.Getenv("ConEmuANSI") == "ON": - // The ConEmu shell emulates ANSI well by default. - return os.Stdin, os.Stdout, os.Stderr + // The ConEmu terminal emulates ANSI on output streams well. + return windows.ConEmuStreams() case os.Getenv("MSYSTEM") != "": // MSYS (mingw) does not emulate ANSI well. return windows.ConsoleStreams() diff --git a/pkg/term/windows/console.go b/pkg/term/windows/console.go index 3711d9883c..3036a04605 100644 --- a/pkg/term/windows/console.go +++ b/pkg/term/windows/console.go @@ -8,8 +8,44 @@ import ( "syscall" "github.com/Azure/go-ansiterm/winterm" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Sirupsen/logrus" + "io/ioutil" ) +// ConEmuStreams returns prepared versions of console streams, +// for proper use in ConEmu terminal. +// The ConEmu terminal emulates ANSI on output streams well by default. +func ConEmuStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + if IsConsole(os.Stdin.Fd()) { + stdIn = newAnsiReader(syscall.STD_INPUT_HANDLE) + } else { + stdIn = os.Stdin + } + + stdOut = os.Stdout + stdErr = os.Stderr + + // WARNING (BEGIN): sourced from newAnsiWriter + + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("ansiReaderWriter.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.DebugLevel, + } + + // WARNING (END): sourced from newAnsiWriter + + return stdIn, stdOut, stdErr +} + // ConsoleStreams returns a wrapped version for each standard stream referencing a console, // that handles ANSI character sequences. func ConsoleStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { diff --git a/registry/registry.go b/registry/registry.go index 643fa56e6a..d3f78108e7 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -31,7 +31,6 @@ var ( // ErrAlreadyExists is an error returned if an image being pushed // already exists on the remote side ErrAlreadyExists = errors.New("Image already exists") - errLoginRequired = errors.New("Authentication is required.") ) // dockerUserAgent is the User-Agent the Docker client uses to identify itself. @@ -109,7 +108,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { keyName := certName[:len(certName)-5] + ".key" logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) if !hasFile(fs, keyName) { - return fmt.Errorf("Missing key %s for certificate %s", keyName, certName) + return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName) } cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) if err != nil { @@ -122,7 +121,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { certName := keyName[:len(keyName)-4] + ".cert" logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) if !hasFile(fs, certName) { - return fmt.Errorf("Missing certificate %s for key %s", certName, keyName) + return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) } } } diff --git a/registry/session.go b/registry/session.go index 57acbc0cf6..4b18d0d1a1 100644 --- a/registry/session.go +++ b/registry/session.go @@ -19,6 +19,7 @@ import ( "strings" "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/stringid" @@ -213,7 +214,7 @@ func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { defer res.Body.Close() if res.StatusCode != 200 { if res.StatusCode == 401 { - return nil, errLoginRequired + return nil, errcode.ErrorCodeUnauthorized.WithArgs() } return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) } @@ -427,7 +428,7 @@ func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, erro } defer res.Body.Close() if res.StatusCode == 401 { - return nil, errLoginRequired + return nil, errcode.ErrorCodeUnauthorized.WithArgs() } // TODO: Right now we're ignoring checksums in the response body. // In the future, we need to use them to check image validity. @@ -661,7 +662,7 @@ func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, defer res.Body.Close() if res.StatusCode == 401 { - return nil, errLoginRequired + return nil, errcode.ErrorCodeUnauthorized.WithArgs() } var tokens, endpoints []string diff --git a/runconfig/hostconfig_unix.go b/runconfig/hostconfig_unix.go index e536313185..28d209b694 100644 --- a/runconfig/hostconfig_unix.go +++ b/runconfig/hostconfig_unix.go @@ -19,7 +19,7 @@ func DefaultDaemonNetworkMode() container.NetworkMode { // IsPreDefinedNetwork indicates if a network is predefined by the daemon func IsPreDefinedNetwork(network string) bool { n := container.NetworkMode(network) - return n.IsBridge() || n.IsHost() || n.IsNone() + return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() } // ValidateNetMode ensures that the various combinations of requested diff --git a/runconfig/opts/parse.go b/runconfig/opts/parse.go index f716022620..41cb377ae9 100644 --- a/runconfig/opts/parse.go +++ b/runconfig/opts/parse.go @@ -442,7 +442,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.Host networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig } - if hostConfig.NetworkMode.IsUserDefined() && flAliases.Len() > 0 { + if flAliases.Len() > 0 { epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] if epConfig == nil { epConfig = &networktypes.EndpointSettings{} diff --git a/vendor/src/github.com/docker/distribution/Dockerfile b/vendor/src/github.com/docker/distribution/Dockerfile index 1a5822229e..5329cee718 100644 --- a/vendor/src/github.com/docker/distribution/Dockerfile +++ b/vendor/src/github.com/docker/distribution/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.5.2 +FROM golang:1.5.3 RUN apt-get update && \ apt-get install -y librados-dev apache2-utils && \ diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go b/vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go index 98a7d81705..160f9cd996 100644 --- a/vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go +++ b/vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go @@ -39,11 +39,11 @@ func init() { desc := distribution.Descriptor{ Digest: digest.FromBytes(sm.Canonical), Size: int64(len(sm.Canonical)), - MediaType: MediaTypeManifest, + MediaType: MediaTypeSignedManifest, } return sm, desc, err } - err := distribution.RegisterManifestSchema(MediaTypeManifest, schema1Func) + err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } @@ -51,7 +51,7 @@ func init() { if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } - err = distribution.RegisterManifestSchema("application/json; charset=utf-8", schema1Func) + err = distribution.RegisterManifestSchema("application/json", schema1Func) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } @@ -167,7 +167,7 @@ func (sm *SignedManifest) MarshalJSON() ([]byte, error) { // Payload returns the signed content of the signed manifest. func (sm SignedManifest) Payload() (string, []byte, error) { - return MediaTypeManifest, sm.all, nil + return MediaTypeSignedManifest, sm.all, nil } // Signatures returns the signatures as provided by diff --git a/vendor/src/github.com/docker/distribution/manifests.go b/vendor/src/github.com/docker/distribution/manifests.go index 1f93812dd4..1acb0500d7 100644 --- a/vendor/src/github.com/docker/distribution/manifests.go +++ b/vendor/src/github.com/docker/distribution/manifests.go @@ -2,6 +2,7 @@ package distribution import ( "fmt" + "mime" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" @@ -68,7 +69,9 @@ type Describable interface { // ManifestMediaTypes returns the supported media types for manifests. func ManifestMediaTypes() (mediaTypes []string) { for t := range mappings { - mediaTypes = append(mediaTypes, t) + if t != "" { + mediaTypes = append(mediaTypes, t) + } } return } @@ -80,10 +83,24 @@ var mappings = make(map[string]UnmarshalFunc, 0) // UnmarshalManifest looks up manifest unmarshall functions based on // MediaType -func UnmarshalManifest(mediatype string, p []byte) (Manifest, Descriptor, error) { +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { + // Need to look up by the actual media type, not the raw contents of + // the header. Strip semicolons and anything following them. + var mediatype string + if ctHeader != "" { + var err error + mediatype, _, err = mime.ParseMediaType(ctHeader) + if err != nil { + return nil, Descriptor{}, err + } + } + unmarshalFunc, ok := mappings[mediatype] if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype: %s", mediatype) + unmarshalFunc, ok = mappings[""] + if !ok { + return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) + } } return unmarshalFunc(p) diff --git a/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go index 9a405d2164..6d9bb4b62a 100644 --- a/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go +++ b/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go @@ -69,6 +69,15 @@ func (ec *ErrorCode) UnmarshalText(text []byte) error { return nil } +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + // WithDetail creates a new Error struct based on the passed-in info and // set the Detail property appropriately func (ec ErrorCode) WithDetail(detail interface{}) Error { diff --git a/vendor/src/github.com/docker/distribution/registry/client/errors.go b/vendor/src/github.com/docker/distribution/registry/client/errors.go index 8e3cb10846..a528a86574 100644 --- a/vendor/src/github.com/docker/distribution/registry/client/errors.go +++ b/vendor/src/github.com/docker/distribution/registry/client/errors.go @@ -31,13 +31,26 @@ func (e *UnexpectedHTTPResponseError) Error() string { return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) } -func parseHTTPErrorResponse(r io.Reader) error { +func parseHTTPErrorResponse(statusCode int, r io.Reader) error { var errors errcode.Errors body, err := ioutil.ReadAll(r) if err != nil { return err } + // For backward compatibility, handle irregularly formatted + // messages that contain a "details" field. + var detailsErr struct { + Details string `json:"details"` + } + err = json.Unmarshal(body, &detailsErr) + if err == nil && detailsErr.Details != "" { + if statusCode == http.StatusUnauthorized { + return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) + } + return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) + } + if err := json.Unmarshal(body, &errors); err != nil { return &UnexpectedHTTPResponseError{ ParseErr: err, @@ -53,14 +66,14 @@ func parseHTTPErrorResponse(r io.Reader) error { // range. func HandleErrorResponse(resp *http.Response) error { if resp.StatusCode == 401 { - err := parseHTTPErrorResponse(resp.Body) + err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } return err } if resp.StatusCode >= 400 && resp.StatusCode < 500 { - return parseHTTPErrorResponse(resp.Body) + return parseHTTPErrorResponse(resp.StatusCode, resp.Body) } return &UnexpectedHTTPStatusError{Status: resp.Status} } diff --git a/vendor/src/github.com/docker/engine-api/types/types.go b/vendor/src/github.com/docker/engine-api/types/types.go index 9666ea4561..64c9981828 100644 --- a/vendor/src/github.com/docker/engine-api/types/types.go +++ b/vendor/src/github.com/docker/engine-api/types/types.go @@ -198,6 +198,7 @@ type Info struct { Images int Driver string DriverStatus [][2]string + SystemStatus [][2]string Plugins PluginsInfo MemoryLimit bool SwapLimit bool diff --git a/vendor/src/github.com/jfrazelle/go/LICENSE b/vendor/src/github.com/docker/go/LICENSE similarity index 100% rename from vendor/src/github.com/jfrazelle/go/LICENSE rename to vendor/src/github.com/docker/go/LICENSE diff --git a/vendor/src/github.com/jfrazelle/go/canonical/json/decode.go b/vendor/src/github.com/docker/go/canonical/json/decode.go similarity index 100% rename from vendor/src/github.com/jfrazelle/go/canonical/json/decode.go rename to vendor/src/github.com/docker/go/canonical/json/decode.go diff --git a/vendor/src/github.com/jfrazelle/go/canonical/json/encode.go b/vendor/src/github.com/docker/go/canonical/json/encode.go similarity index 100% rename from vendor/src/github.com/jfrazelle/go/canonical/json/encode.go rename to vendor/src/github.com/docker/go/canonical/json/encode.go diff --git a/vendor/src/github.com/jfrazelle/go/canonical/json/fold.go b/vendor/src/github.com/docker/go/canonical/json/fold.go similarity index 100% rename from vendor/src/github.com/jfrazelle/go/canonical/json/fold.go rename to vendor/src/github.com/docker/go/canonical/json/fold.go diff --git a/vendor/src/github.com/jfrazelle/go/canonical/json/indent.go b/vendor/src/github.com/docker/go/canonical/json/indent.go similarity index 100% rename from vendor/src/github.com/jfrazelle/go/canonical/json/indent.go rename to vendor/src/github.com/docker/go/canonical/json/indent.go diff --git a/vendor/src/github.com/jfrazelle/go/canonical/json/scanner.go b/vendor/src/github.com/docker/go/canonical/json/scanner.go similarity index 100% rename from vendor/src/github.com/jfrazelle/go/canonical/json/scanner.go rename to vendor/src/github.com/docker/go/canonical/json/scanner.go diff --git a/vendor/src/github.com/jfrazelle/go/canonical/json/stream.go b/vendor/src/github.com/docker/go/canonical/json/stream.go similarity index 100% rename from vendor/src/github.com/jfrazelle/go/canonical/json/stream.go rename to vendor/src/github.com/docker/go/canonical/json/stream.go diff --git a/vendor/src/github.com/jfrazelle/go/canonical/json/tags.go b/vendor/src/github.com/docker/go/canonical/json/tags.go similarity index 100% rename from vendor/src/github.com/jfrazelle/go/canonical/json/tags.go rename to vendor/src/github.com/docker/go/canonical/json/tags.go diff --git a/vendor/src/github.com/docker/libnetwork/CHANGELOG.md b/vendor/src/github.com/docker/libnetwork/CHANGELOG.md index ea136da5b4..a47d5fa707 100644 --- a/vendor/src/github.com/docker/libnetwork/CHANGELOG.md +++ b/vendor/src/github.com/docker/libnetwork/CHANGELOG.md @@ -1,5 +1,34 @@ # Changelog +## 0.6.0-rc7 (2016-02-01) +- Allow inter-network connections via exposed ports + +## 0.6.0-rc6 (2016-01-30) +- Properly fixes https://github.com/docker/docker/issues/18814 + +## 0.6.0-rc5 (2016-01-26) +- Cleanup stale overlay sandboxes + +## 0.6.0-rc4 (2016-01-25) +- Add Endpoints() API to Sandbox interface +- Fixed a race-condition in default gateway network creation + +## 0.6.0-rc3 (2016-01-25) +- Fixes docker/docker#19576 +- Fixed embedded DNS to listen in TCP as well +- Fixed a race-condition in IPAM to choose non-overlapping subnet for concurrent requests + +## 0.6.0-rc2 (2016-01-21) +- Fixes docker/docker#19376 +- Fixes docker/docker#15819 +- Fixes libnetwork/#885, Not filter v6 DNS servers from resolv.conf +- Fixes docker/docker #19448, also handles the . in service and network names correctly. + +## 0.6.0-rc1 (2016-01-14) +- Fixes docker/docker#19404 +- Fixes the ungraceful daemon restart issue in systemd with remote network plugin + (https://github.com/docker/libnetwork/issues/813) + ## 0.5.6 (2016-01-14) - Setup embedded DNS server correctly on container restart. Fixes docker/docker#19354 @@ -50,6 +79,6 @@ - Fixed a bunch of issues with osl namespace mgmt ## 0.3.0 (2015-05-27) - + - Introduce CNM (Container Networking Model) - Replace docker networking with CNM & Bridge driver diff --git a/vendor/src/github.com/docker/libnetwork/Makefile b/vendor/src/github.com/docker/libnetwork/Makefile index 0741af2f62..2031826c7d 100644 --- a/vendor/src/github.com/docker/libnetwork/Makefile +++ b/vendor/src/github.com/docker/libnetwork/Makefile @@ -63,7 +63,7 @@ run-tests: if ls $$dir/*.go &> /dev/null; then \ pushd . &> /dev/null ; \ cd $$dir ; \ - $(shell which godep) go test ${INSIDECONTAINER} -test.parallel 3 -test.v -covermode=count -coverprofile=./profile.tmp ; \ + $(shell which godep) go test ${INSIDECONTAINER} -test.parallel 5 -test.v -covermode=count -coverprofile=./profile.tmp ; \ ret=$$? ;\ if [ $$ret -ne 0 ]; then exit $$ret; fi ;\ popd &> /dev/null; \ diff --git a/vendor/src/github.com/docker/libnetwork/controller.go b/vendor/src/github.com/docker/libnetwork/controller.go index 7efc409356..ef214fd2ce 100644 --- a/vendor/src/github.com/docker/libnetwork/controller.go +++ b/vendor/src/github.com/docker/libnetwork/controller.go @@ -387,7 +387,7 @@ func (c *controller) NewNetwork(networkType, name string, options ...NetworkOpti // Make sure we have a driver available for this network type // before we allocate anything. - if _, err := network.driver(); err != nil { + if _, err := network.driver(true); err != nil { return nil, err } @@ -432,7 +432,7 @@ func (c *controller) NewNetwork(networkType, name string, options ...NetworkOpti } func (c *controller) addNetwork(n *network) error { - d, err := n.driver() + d, err := n.driver(true) if err != nil { return err } diff --git a/vendor/src/github.com/docker/libnetwork/default_gateway.go b/vendor/src/github.com/docker/libnetwork/default_gateway.go index bfd7b725d3..2df047a348 100644 --- a/vendor/src/github.com/docker/libnetwork/default_gateway.go +++ b/vendor/src/github.com/docker/libnetwork/default_gateway.go @@ -12,6 +12,8 @@ const ( gwEPlen = 12 ) +var procGwNetwork = make(chan (bool), 1) + /* libnetwork creates a bridge network "docker_gw_bridge" for provding default gateway for the containers if none of the container's endpoints @@ -35,13 +37,11 @@ func (sb *sandbox) setupDefaultGW(srcEp *endpoint) error { return nil } + // Look for default gw network. In case of error (includes not found), + // retry and create it if needed in a serialized execution. n, err := c.NetworkByName(libnGWNetwork) if err != nil { - if _, ok := err.(types.NotFoundError); !ok { - return err - } - n, err = c.createGWNetwork() - if err != nil { + if n, err = c.defaultGwNetwork(); err != nil { return err } } @@ -84,7 +84,7 @@ func (sb *sandbox) clearDefaultGW() error { return nil } - if err := ep.sbLeave(sb); err != nil { + if err := ep.sbLeave(sb, false); err != nil { return fmt.Errorf("container %s: endpoint leaving GW Network failed: %v", sb.containerID, err) } if err := ep.Delete(false); err != nil { @@ -150,3 +150,18 @@ func (sb *sandbox) getEPwithoutGateway() *endpoint { } return nil } + +// Looks for the default gw network and creates it if not there. +// Parallel executions are serialized. +func (c *controller) defaultGwNetwork() (Network, error) { + procGwNetwork <- true + defer func() { <-procGwNetwork }() + + n, err := c.NetworkByName(libnGWNetwork) + if err != nil { + if _, ok := err.(types.NotFoundError); ok { + n, err = c.createGWNetwork() + } + } + return n, err +} diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go index 1d523d6c2c..f5ceed2130 100644 --- a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go +++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go @@ -138,6 +138,7 @@ func setupIPTablesInternal(bridgeIface string, addr net.Addr, icc, ipmasq, hairp address = addr.String() natRule = iptRule{table: iptables.Nat, chain: "POSTROUTING", preArgs: []string{"-t", "nat"}, args: []string{"-s", address, "!", "-o", bridgeIface, "-j", "MASQUERADE"}} hpNatRule = iptRule{table: iptables.Nat, chain: "POSTROUTING", preArgs: []string{"-t", "nat"}, args: []string{"-m", "addrtype", "--src-type", "LOCAL", "-o", bridgeIface, "-j", "MASQUERADE"}} + skipDNAT = iptRule{table: iptables.Nat, chain: DockerChain, preArgs: []string{"-t", "nat"}, args: []string{"-i", bridgeIface, "-j", "RETURN"}} outRule = iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"}} inRule = iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"}} ) @@ -147,6 +148,9 @@ func setupIPTablesInternal(bridgeIface string, addr net.Addr, icc, ipmasq, hairp if err := programChainRule(natRule, "NAT", enable); err != nil { return err } + if err := programChainRule(skipDNAT, "SKIP DNAT", enable); err != nil { + return err + } } // In hairpin mode, masquerade traffic from localhost diff --git a/vendor/src/github.com/docker/libnetwork/drivers/overlay/joinleave.go b/vendor/src/github.com/docker/libnetwork/drivers/overlay/joinleave.go index a520453a63..d87c032dfc 100644 --- a/vendor/src/github.com/docker/libnetwork/drivers/overlay/joinleave.go +++ b/vendor/src/github.com/docker/libnetwork/drivers/overlay/joinleave.go @@ -49,33 +49,33 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, sbox := n.sandbox() - name1, name2, err := createVethPair() + overlayIfName, containerIfName, err := createVethPair() if err != nil { return err } - ep.ifName = name2 + ep.ifName = overlayIfName // Set the container interface and its peer MTU to 1450 to allow // for 50 bytes vxlan encap (inner eth header(14) + outer IP(20) + // outer UDP(8) + vxlan header(8)) - veth, err := netlink.LinkByName(name1) + veth, err := netlink.LinkByName(overlayIfName) if err != nil { - return fmt.Errorf("cound not find link by name %s: %v", name1, err) + return fmt.Errorf("cound not find link by name %s: %v", overlayIfName, err) } err = netlink.LinkSetMTU(veth, vxlanVethMTU) if err != nil { return err } - if err := sbox.AddInterface(name1, "veth", + if err := sbox.AddInterface(overlayIfName, "veth", sbox.InterfaceOptions().Master(s.brName)); err != nil { return fmt.Errorf("could not add veth pair inside the network sandbox: %v", err) } - veth, err = netlink.LinkByName(name2) + veth, err = netlink.LinkByName(containerIfName) if err != nil { - return fmt.Errorf("could not find link by name %s: %v", name2, err) + return fmt.Errorf("could not find link by name %s: %v", containerIfName, err) } err = netlink.LinkSetMTU(veth, vxlanVethMTU) if err != nil { @@ -96,7 +96,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, } if iNames := jinfo.InterfaceName(); iNames != nil { - err = iNames.SetNames(name2, "eth") + err = iNames.SetNames(containerIfName, "eth") if err != nil { return err } @@ -136,14 +136,5 @@ func (d *driver) Leave(nid, eid string) error { n.leaveSandbox() - link, err := netlink.LinkByName(ep.ifName) - if err != nil { - log.Warnf("Failed to retrieve interface link for interface removal on endpoint leave: %v", err) - return nil - } - if err := netlink.LinkDel(link); err != nil { - log.Warnf("Failed to delete interface link on endpoint leave: %v", err) - } - return nil } diff --git a/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go b/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go index c3fe10cf5a..2dd288fd26 100644 --- a/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go +++ b/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go @@ -4,8 +4,10 @@ import ( "fmt" "net" + log "github.com/Sirupsen/logrus" "github.com/docker/libnetwork/driverapi" "github.com/docker/libnetwork/netutils" + "github.com/vishvananda/netlink" ) type endpointTable map[string]*endpoint @@ -97,6 +99,20 @@ func (d *driver) DeleteEndpoint(nid, eid string) error { } n.deleteEndpoint(eid) + + if ep.ifName == "" { + return nil + } + + link, err := netlink.LinkByName(ep.ifName) + if err != nil { + log.Debugf("Failed to retrieve interface (%s)'s link on endpoint (%s) delete: %v", ep.ifName, ep.id, err) + return nil + } + if err := netlink.LinkDel(link); err != nil { + log.Debugf("Failed to delete interface (%s)'s link on endpoint (%s) delete: %v", ep.ifName, ep.id, err) + } + return nil } diff --git a/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_network.go b/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_network.go index 0a891c0e5e..f0b9b2b1f5 100644 --- a/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_network.go +++ b/vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_network.go @@ -5,6 +5,8 @@ import ( "fmt" "net" "os" + "path/filepath" + "strings" "sync" "syscall" @@ -158,7 +160,9 @@ func (n *network) destroySandbox() { sbox := n.sandbox() if sbox != nil { for _, iface := range sbox.Info().Interfaces() { - iface.Remove() + if err := iface.Remove(); err != nil { + logrus.Debugf("Remove interface %s failed: %v", iface.SrcName(), err) + } } for _, s := range n.subnets { @@ -298,6 +302,26 @@ func (n *network) initSubnetSandbox(s *subnet) error { return nil } +func (n *network) cleanupStaleSandboxes() { + filepath.Walk(filepath.Dir(osl.GenerateKey("walk")), + func(path string, info os.FileInfo, err error) error { + _, fname := filepath.Split(path) + + pList := strings.Split(fname, "-") + if len(pList) <= 1 { + return nil + } + + pattern := pList[1] + if strings.Contains(n.id, pattern) { + syscall.Unmount(path, syscall.MNT_DETACH) + os.Remove(path) + } + + return nil + }) +} + func (n *network) initSandbox() error { n.Lock() n.initEpoch++ @@ -311,6 +335,10 @@ func (n *network) initSandbox() error { } } + // If there are any stale sandboxes related to this network + // from previous daemon life clean it up here + n.cleanupStaleSandboxes() + sbox, err := osl.NewSandbox( osl.GenerateKey(fmt.Sprintf("%d-", n.initEpoch)+n.id), !hostMode) if err != nil { diff --git a/vendor/src/github.com/docker/libnetwork/endpoint.go b/vendor/src/github.com/docker/libnetwork/endpoint.go index 88312e9c15..6455686a32 100644 --- a/vendor/src/github.com/docker/libnetwork/endpoint.go +++ b/vendor/src/github.com/docker/libnetwork/endpoint.go @@ -406,7 +406,7 @@ func (ep *endpoint) sbJoin(sbox Sandbox, options ...EndpointOption) error { ep.processOptions(options...) - driver, err := network.driver() + driver, err := network.driver(true) if err != nil { return fmt.Errorf("failed to join endpoint: %v", err) } @@ -465,7 +465,7 @@ func (ep *endpoint) sbJoin(sbox Sandbox, options ...EndpointOption) error { if sb.needDefaultGW() { return sb.setupDefaultGW(ep) } - return sb.clearDefaultGW() + return nil } func (ep *endpoint) rename(name string) error { @@ -533,10 +533,10 @@ func (ep *endpoint) Leave(sbox Sandbox, options ...EndpointOption) error { sb.joinLeaveStart() defer sb.joinLeaveEnd() - return ep.sbLeave(sbox, options...) + return ep.sbLeave(sbox, false, options...) } -func (ep *endpoint) sbLeave(sbox Sandbox, options ...EndpointOption) error { +func (ep *endpoint) sbLeave(sbox Sandbox, force bool, options ...EndpointOption) error { sb, ok := sbox.(*sandbox) if !ok { return types.BadRequestErrorf("not a valid Sandbox interface") @@ -565,7 +565,7 @@ func (ep *endpoint) sbLeave(sbox Sandbox, options ...EndpointOption) error { ep.processOptions(options...) - d, err := n.driver() + d, err := n.driver(!force) if err != nil { return fmt.Errorf("failed to leave endpoint: %v", err) } @@ -575,9 +575,11 @@ func (ep *endpoint) sbLeave(sbox Sandbox, options ...EndpointOption) error { ep.network = n ep.Unlock() - if err := d.Leave(n.id, ep.id); err != nil { - if _, ok := err.(types.MaskableError); !ok { - log.Warnf("driver error disconnecting container %s : %v", ep.name, err) + if d != nil { + if err := d.Leave(n.id, ep.id); err != nil { + if _, ok := err.(types.MaskableError); !ok { + log.Warnf("driver error disconnecting container %s : %v", ep.name, err) + } } } @@ -595,15 +597,7 @@ func (ep *endpoint) sbLeave(sbox Sandbox, options ...EndpointOption) error { } sb.deleteHostsEntries(n.getSvcRecords(ep)) - - if !sb.inDelete && sb.needDefaultGW() { - ep := sb.getEPwithoutGateway() - if ep == nil { - return fmt.Errorf("endpoint without GW expected, but not found") - } - return sb.setupDefaultGW(ep) - } - return sb.clearDefaultGW() + return nil } func (n *network) validateForceDelete(locator string) error { @@ -649,7 +643,7 @@ func (ep *endpoint) Delete(force bool) error { } if sb != nil { - if e := ep.sbLeave(sb); e != nil { + if e := ep.sbLeave(sb, force); e != nil { log.Warnf("failed to leave sandbox for endpoint %s : %v", name, e) } } @@ -681,7 +675,7 @@ func (ep *endpoint) Delete(force bool) error { // unwatch for service records n.getController().unWatchSvcRecord(ep) - if err = ep.deleteEndpoint(); err != nil && !force { + if err = ep.deleteEndpoint(force); err != nil && !force { return err } @@ -690,18 +684,22 @@ func (ep *endpoint) Delete(force bool) error { return nil } -func (ep *endpoint) deleteEndpoint() error { +func (ep *endpoint) deleteEndpoint(force bool) error { ep.Lock() n := ep.network name := ep.name epid := ep.id ep.Unlock() - driver, err := n.driver() + driver, err := n.driver(!force) if err != nil { return fmt.Errorf("failed to delete endpoint: %v", err) } + if driver == nil { + return nil + } + if err := driver.DeleteEndpoint(n.id, epid); err != nil { if _, ok := err.(types.ForbiddenError); ok { return err @@ -913,7 +911,7 @@ func (ep *endpoint) assignAddressVersion(ipVer int, ipam ipamapi.Ipam) error { } } if progAdd != nil { - return types.BadRequestErrorf("Invalid preferred address %s: It does not belong to any of this network's subnets") + return types.BadRequestErrorf("Invalid preferred address %s: It does not belong to any of this network's subnets", prefAdd) } return fmt.Errorf("no available IPv%d addresses on this network's address pools: %s (%s)", ipVer, n.Name(), n.ID()) } @@ -956,7 +954,7 @@ func (c *controller) cleanupLocalEndpoints() { } for _, ep := range epl { - if err := ep.Delete(false); err != nil { + if err := ep.Delete(true); err != nil { log.Warnf("Could not delete local endpoint %s during endpoint cleanup: %v", ep.name, err) } } diff --git a/vendor/src/github.com/docker/libnetwork/endpoint_info.go b/vendor/src/github.com/docker/libnetwork/endpoint_info.go index 624bc533c5..4ba8e3d548 100644 --- a/vendor/src/github.com/docker/libnetwork/endpoint_info.go +++ b/vendor/src/github.com/docker/libnetwork/endpoint_info.go @@ -188,7 +188,7 @@ func (ep *endpoint) DriverInfo() (map[string]interface{}, error) { return nil, fmt.Errorf("could not find network in store for driver info: %v", err) } - driver, err := n.driver() + driver, err := n.driver(true) if err != nil { return nil, fmt.Errorf("failed to get driver info: %v", err) } diff --git a/vendor/src/github.com/docker/libnetwork/ipam/allocator.go b/vendor/src/github.com/docker/libnetwork/ipam/allocator.go index ce404e2ad2..4fa7e1e972 100644 --- a/vendor/src/github.com/docker/libnetwork/ipam/allocator.go +++ b/vendor/src/github.com/docker/libnetwork/ipam/allocator.go @@ -145,12 +145,12 @@ func (a *Allocator) GetDefaultAddressSpaces() (string, string, error) { // RequestPool returns an address pool along with its unique id. func (a *Allocator) RequestPool(addressSpace, pool, subPool string, options map[string]string, v6 bool) (string, *net.IPNet, map[string]string, error) { log.Debugf("RequestPool(%s, %s, %s, %v, %t)", addressSpace, pool, subPool, options, v6) - k, nw, ipr, err := a.parsePoolRequest(addressSpace, pool, subPool, v6) +retry: + k, nw, ipr, pdf, err := a.parsePoolRequest(addressSpace, pool, subPool, v6) if err != nil { return "", nil, nil, types.InternalErrorf("failed to parse pool request for address space %q pool %q subpool %q: %v", addressSpace, pool, subPool, err) } -retry: if err := a.refresh(addressSpace); err != nil { return "", nil, nil, err } @@ -160,8 +160,12 @@ retry: return "", nil, nil, err } - insert, err := aSpace.updatePoolDBOnAdd(*k, nw, ipr) + insert, err := aSpace.updatePoolDBOnAdd(*k, nw, ipr, pdf) if err != nil { + if _, ok := err.(types.MaskableError); ok { + log.Debugf("Retrying predefined pool search: %v", err) + goto retry + } return "", nil, nil, err } @@ -221,38 +225,39 @@ func (a *Allocator) getAddrSpace(as string) (*addrSpace, error) { return aSpace, nil } -func (a *Allocator) parsePoolRequest(addressSpace, pool, subPool string, v6 bool) (*SubnetKey, *net.IPNet, *AddressRange, error) { +func (a *Allocator) parsePoolRequest(addressSpace, pool, subPool string, v6 bool) (*SubnetKey, *net.IPNet, *AddressRange, bool, error) { var ( nw *net.IPNet ipr *AddressRange err error + pdf = false ) if addressSpace == "" { - return nil, nil, nil, ipamapi.ErrInvalidAddressSpace + return nil, nil, nil, false, ipamapi.ErrInvalidAddressSpace } if pool == "" && subPool != "" { - return nil, nil, nil, ipamapi.ErrInvalidSubPool + return nil, nil, nil, false, ipamapi.ErrInvalidSubPool } if pool != "" { if _, nw, err = net.ParseCIDR(pool); err != nil { - return nil, nil, nil, ipamapi.ErrInvalidPool + return nil, nil, nil, false, ipamapi.ErrInvalidPool } if subPool != "" { if ipr, err = getAddressRange(subPool, nw); err != nil { - return nil, nil, nil, err + return nil, nil, nil, false, err } } } else { if nw, err = a.getPredefinedPool(addressSpace, v6); err != nil { - return nil, nil, nil, err + return nil, nil, nil, false, err } - + pdf = true } - return &SubnetKey{AddressSpace: addressSpace, Subnet: nw.String(), ChildSubnet: subPool}, nw, ipr, nil + return &SubnetKey{AddressSpace: addressSpace, Subnet: nw.String(), ChildSubnet: subPool}, nw, ipr, pdf, nil } func (a *Allocator) insertBitMask(key SubnetKey, pool *net.IPNet) error { diff --git a/vendor/src/github.com/docker/libnetwork/ipam/structures.go b/vendor/src/github.com/docker/libnetwork/ipam/structures.go index cd0593ceff..601eda4fba 100644 --- a/vendor/src/github.com/docker/libnetwork/ipam/structures.go +++ b/vendor/src/github.com/docker/libnetwork/ipam/structures.go @@ -257,12 +257,15 @@ func (aSpace *addrSpace) New() datastore.KVObject { } } -func (aSpace *addrSpace) updatePoolDBOnAdd(k SubnetKey, nw *net.IPNet, ipr *AddressRange) (func() error, error) { +func (aSpace *addrSpace) updatePoolDBOnAdd(k SubnetKey, nw *net.IPNet, ipr *AddressRange, pdf bool) (func() error, error) { aSpace.Lock() defer aSpace.Unlock() // Check if already allocated if p, ok := aSpace.subnets[k]; ok { + if pdf { + return nil, types.InternalMaskableErrorf("predefined pool %s is already reserved", nw) + } aSpace.incRefCount(p, 1) return func() error { return nil }, nil } diff --git a/vendor/src/github.com/docker/libnetwork/netutils/utils.go b/vendor/src/github.com/docker/libnetwork/netutils/utils.go index 62287efcc9..482e4f038f 100644 --- a/vendor/src/github.com/docker/libnetwork/netutils/utils.go +++ b/vendor/src/github.com/docker/libnetwork/netutils/utils.go @@ -14,6 +14,13 @@ import ( "github.com/docker/libnetwork/types" ) +// constants for the IP address type +const ( + IP = iota // IPv4 and IPv6 + IPv4 + IPv6 +) + var ( // ErrNetworkOverlapsWithNameservers preformatted error ErrNetworkOverlapsWithNameservers = errors.New("requested network overlaps with nameserver") diff --git a/vendor/src/github.com/docker/libnetwork/network.go b/vendor/src/github.com/docker/libnetwork/network.go index 7449c90ac4..aa32cb8d68 100644 --- a/vendor/src/github.com/docker/libnetwork/network.go +++ b/vendor/src/github.com/docker/libnetwork/network.go @@ -149,6 +149,7 @@ type network struct { name string networkType string id string + scope string ipamType string ipamOptions map[string]string addrSpace string @@ -246,6 +247,7 @@ func (n *network) New() datastore.KVObject { return &network{ ctrlr: n.ctrlr, drvOnce: &sync.Once{}, + scope: n.scope, } } @@ -295,6 +297,7 @@ func (n *network) CopyTo(o datastore.KVObject) error { dstN.name = n.name dstN.id = n.id dstN.networkType = n.networkType + dstN.scope = n.scope dstN.ipamType = n.ipamType dstN.enableIPv6 = n.enableIPv6 dstN.persist = n.persist @@ -337,7 +340,7 @@ func (n *network) CopyTo(o datastore.KVObject) error { } func (n *network) DataScope() string { - return n.driverScope() + return n.Scope() } func (n *network) getEpCnt() *endpointCnt { @@ -353,6 +356,7 @@ func (n *network) MarshalJSON() ([]byte, error) { netMap["name"] = n.name netMap["id"] = n.id netMap["networkType"] = n.networkType + netMap["scope"] = n.scope netMap["ipamType"] = n.ipamType netMap["addrSpace"] = n.addrSpace netMap["enableIPv6"] = n.enableIPv6 @@ -456,6 +460,9 @@ func (n *network) UnmarshalJSON(b []byte) (err error) { if v, ok := netMap["internal"]; ok { n.internal = v.(bool) } + if s, ok := netMap["scope"]; ok { + n.scope = s.(string) + } return nil } @@ -566,7 +573,7 @@ func (n *network) driverScope() string { return dd.capability.DataScope } -func (n *network) driver() (driverapi.Driver, error) { +func (n *network) driver(load bool) (driverapi.Driver, error) { c := n.getController() c.Lock() @@ -574,14 +581,20 @@ func (n *network) driver() (driverapi.Driver, error) { dd, ok := c.drivers[n.networkType] c.Unlock() - if !ok { + if !ok && load { var err error dd, err = c.loadDriver(n.networkType) if err != nil { return nil, err } + } else if !ok { + // dont fail if driver loading is not required + return nil, nil } + n.Lock() + n.scope = dd.capability.DataScope + n.Unlock() return dd.driver, nil } @@ -631,7 +644,7 @@ func (n *network) Delete() error { } func (n *network) deleteNetwork() error { - d, err := n.driver() + d, err := n.driver(true) if err != nil { return fmt.Errorf("failed deleting network: %v", err) } @@ -651,7 +664,7 @@ func (n *network) deleteNetwork() error { } func (n *network) addEndpoint(ep *endpoint) error { - d, err := n.driver() + d, err := n.driver(true) if err != nil { return fmt.Errorf("failed to add endpoint: %v", err) } @@ -725,7 +738,7 @@ func (n *network) CreateEndpoint(name string, options ...EndpointOption) (Endpoi } defer func() { if err != nil { - if e := ep.deleteEndpoint(); e != nil { + if e := ep.deleteEndpoint(false); e != nil { log.Warnf("cleaning up endpoint failed %s : %v", name, e) } } @@ -1169,7 +1182,9 @@ func (n *network) DriverOptions() map[string]string { } func (n *network) Scope() string { - return n.driverScope() + n.Lock() + defer n.Unlock() + return n.scope } func (n *network) IpamConfig() (string, map[string]string, []*IpamConf, []*IpamConf) { diff --git a/vendor/src/github.com/docker/libnetwork/osl/interface_linux.go b/vendor/src/github.com/docker/libnetwork/osl/interface_linux.go index 7c569d63bb..de74ee4852 100644 --- a/vendor/src/github.com/docker/libnetwork/osl/interface_linux.go +++ b/vendor/src/github.com/docker/libnetwork/osl/interface_linux.go @@ -8,6 +8,7 @@ import ( "sync" "syscall" + log "github.com/Sirupsen/logrus" "github.com/docker/libnetwork/types" "github.com/vishvananda/netlink" ) @@ -127,7 +128,7 @@ func (i *nwIface) Remove() error { err = netlink.LinkSetName(iface, i.SrcName()) if err != nil { - fmt.Println("LinkSetName failed: ", err) + log.Debugf("LinkSetName failed for interface %s: %v", i.SrcName(), err) return err } @@ -139,7 +140,7 @@ func (i *nwIface) Remove() error { } else if !isDefault { // Move the network interface to caller namespace. if err := netlink.LinkSetNsFd(iface, callerFD); err != nil { - fmt.Println("LinkSetNsPid failed: ", err) + log.Debugf("LinkSetNsPid failed for interface %s: %v", i.SrcName(), err) return err } } diff --git a/vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf.go b/vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf.go index d5169ada35..507d9ef50d 100644 --- a/vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf.go +++ b/vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf.go @@ -10,6 +10,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/ioutils" + "github.com/docker/libnetwork/netutils" "github.com/docker/libnetwork/resolvconf/dns" ) @@ -29,6 +30,8 @@ var ( localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + dns.IPLocalhost + `\s*\n*`) nsIPv6Regexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`) nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`) + nsIPv6Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv6Address + `))\s*$`) + nsIPv4Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `))\s*$`) searchRegexp = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`) optionsRegexp = regexp.MustCompile(`^\s*options\s*(([^\s]+\s*)*)$`) ) @@ -119,7 +122,7 @@ func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) { } // if the resulting resolvConf has no more nameservers defined, add appropriate // default DNS servers for IPv4 and (optionally) IPv6 - if len(GetNameservers(cleanedResolvConf)) == 0 { + if len(GetNameservers(cleanedResolvConf, netutils.IP)) == 0 { logrus.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers : %v", defaultIPv4Dns) dns := defaultIPv4Dns if ipv6Enabled { @@ -151,10 +154,17 @@ func getLines(input []byte, commentMarker []byte) [][]byte { } // GetNameservers returns nameservers (if any) listed in /etc/resolv.conf -func GetNameservers(resolvConf []byte) []string { +func GetNameservers(resolvConf []byte, kind int) []string { nameservers := []string{} for _, line := range getLines(resolvConf, []byte("#")) { - var ns = nsRegexp.FindSubmatch(line) + var ns [][]byte + if kind == netutils.IP { + ns = nsRegexp.FindSubmatch(line) + } else if kind == netutils.IPv4 { + ns = nsIPv4Regexpmatch.FindSubmatch(line) + } else if kind == netutils.IPv6 { + ns = nsIPv6Regexpmatch.FindSubmatch(line) + } if len(ns) > 0 { nameservers = append(nameservers, string(ns[1])) } @@ -167,7 +177,7 @@ func GetNameservers(resolvConf []byte) []string { // This function's output is intended for net.ParseCIDR func GetNameserversAsCIDR(resolvConf []byte) []string { nameservers := []string{} - for _, nameserver := range GetNameservers(resolvConf) { + for _, nameserver := range GetNameservers(resolvConf, netutils.IP) { nameservers = append(nameservers, nameserver+"/32") } return nameservers diff --git a/vendor/src/github.com/docker/libnetwork/resolver.go b/vendor/src/github.com/docker/libnetwork/resolver.go index d395ab46b3..a839298491 100644 --- a/vendor/src/github.com/docker/libnetwork/resolver.go +++ b/vendor/src/github.com/docker/libnetwork/resolver.go @@ -36,15 +36,18 @@ const ( ptrIPv4domain = ".in-addr.arpa." ptrIPv6domain = ".ip6.arpa." respTTL = 1800 + maxExtDNS = 3 //max number of external servers to try ) // resolver implements the Resolver interface type resolver struct { - sb *sandbox - extDNS []string - server *dns.Server - conn *net.UDPConn - err error + sb *sandbox + extDNS []string + server *dns.Server + conn *net.UDPConn + tcpServer *dns.Server + tcpListen *net.TCPListener + err error } // NewResolver creates a new instance of the Resolver @@ -59,6 +62,7 @@ func (r *resolver) SetupFunc() func() { return (func() { var err error + // DNS operates primarily on UDP addr := &net.UDPAddr{ IP: net.ParseIP(resolverIP), } @@ -71,9 +75,23 @@ func (r *resolver) SetupFunc() func() { laddr := r.conn.LocalAddr() _, ipPort, _ := net.SplitHostPort(laddr.String()) + // Listen on a TCP as well + tcpaddr := &net.TCPAddr{ + IP: net.ParseIP(resolverIP), + } + + r.tcpListen, err = net.ListenTCP("tcp", tcpaddr) + if err != nil { + r.err = fmt.Errorf("error in opening name TCP server socket %v", err) + return + } + ltcpaddr := r.tcpListen.Addr() + _, tcpPort, _ := net.SplitHostPort(ltcpaddr.String()) rules := [][]string{ {"-t", "nat", "-A", "OUTPUT", "-d", resolverIP, "-p", "udp", "--dport", dnsPort, "-j", "DNAT", "--to-destination", laddr.String()}, {"-t", "nat", "-A", "POSTROUTING", "-s", resolverIP, "-p", "udp", "--sport", ipPort, "-j", "SNAT", "--to-source", ":" + dnsPort}, + {"-t", "nat", "-A", "OUTPUT", "-d", resolverIP, "-p", "tcp", "--dport", dnsPort, "-j", "DNAT", "--to-destination", ltcpaddr.String()}, + {"-t", "nat", "-A", "POSTROUTING", "-s", resolverIP, "-p", "tcp", "--sport", tcpPort, "-j", "SNAT", "--to-source", ":" + dnsPort}, } for _, rule := range rules { @@ -96,6 +114,12 @@ func (r *resolver) Start() error { go func() { s.ActivateAndServe() }() + + tcpServer := &dns.Server{Handler: r, Listener: r.tcpListen} + r.tcpServer = tcpServer + go func() { + tcpServer.ActivateAndServe() + }() return nil } @@ -103,7 +127,11 @@ func (r *resolver) Stop() { if r.server != nil { r.server.Shutdown() } + if r.tcpServer != nil { + r.tcpServer.Shutdown() + } r.conn = nil + r.tcpServer = nil r.err = fmt.Errorf("setup not done yet") } @@ -188,15 +216,24 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) { if len(r.extDNS) == 0 { return } - log.Debugf("Querying ext dns %s for %s[%d]", r.extDNS[0], name, query.Question[0].Qtype) - c := &dns.Client{Net: "udp"} - addr := fmt.Sprintf("%s:%d", r.extDNS[0], 53) + num := maxExtDNS + if len(r.extDNS) < maxExtDNS { + num = len(r.extDNS) + } + for i := 0; i < num; i++ { + log.Debugf("Querying ext dns %s:%s for %s[%d]", w.LocalAddr().Network(), r.extDNS[i], name, query.Question[0].Qtype) - // TODO: iterate over avilable servers in case of error - resp, _, err = c.Exchange(query, addr) - if err != nil { + c := &dns.Client{Net: w.LocalAddr().Network()} + addr := fmt.Sprintf("%s:%d", r.extDNS[i], 53) + + resp, _, err = c.Exchange(query, addr) + if err == nil { + break + } log.Errorf("external resolution failed, %s", err) + } + if resp == nil { return } } diff --git a/vendor/src/github.com/docker/libnetwork/sandbox.go b/vendor/src/github.com/docker/libnetwork/sandbox.go index 9dbb100ef9..71c8ebb753 100644 --- a/vendor/src/github.com/docker/libnetwork/sandbox.go +++ b/vendor/src/github.com/docker/libnetwork/sandbox.go @@ -14,6 +14,7 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/libnetwork/etchosts" + "github.com/docker/libnetwork/netutils" "github.com/docker/libnetwork/osl" "github.com/docker/libnetwork/resolvconf" "github.com/docker/libnetwork/types" @@ -46,6 +47,8 @@ type Sandbox interface { // ResolveIP returns the service name for the passed in IP. IP is in reverse dotted // notation; the format used for DNS PTR records ResolveIP(name string) string + // Endpoints returns all the endpoints connected to the sandbox + Endpoints() []Endpoint } // SandboxOption is a option setter function type used to pass varios options to @@ -160,6 +163,10 @@ func (sb *sandbox) Statistics() (map[string]*types.InterfaceStatistics, error) { } func (sb *sandbox) Delete() error { + return sb.delete(false) +} + +func (sb *sandbox) delete(force bool) error { sb.Lock() if sb.inDelete { sb.Unlock() @@ -181,12 +188,6 @@ func (sb *sandbox) Delete() error { // Detach from all endpoints retain := false for _, ep := range sb.getConnectedEndpoints() { - // endpoint in the Gateway network will be cleaned up - // when when sandbox no longer needs external connectivity - if ep.endpointInGWNetwork() { - continue - } - // Retain the sanbdox if we can't obtain the network from store. if _, err := c.getNetworkFromStore(ep.getNetwork().ID()); err != nil { retain = true @@ -194,11 +195,13 @@ func (sb *sandbox) Delete() error { continue } - if err := ep.Leave(sb); err != nil { - log.Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err) + if !force { + if err := ep.Leave(sb); err != nil { + log.Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err) + } } - if err := ep.Delete(false); err != nil { + if err := ep.Delete(force); err != nil { log.Warnf("Failed deleting endpoint %s: %v\n", ep.ID(), err) } } @@ -316,11 +319,15 @@ func (sb *sandbox) startResolver() { } }() - sb.rebuildDNS() + err = sb.rebuildDNS() + if err != nil { + log.Errorf("Updating resolv.conf failed for container %s, %q", sb.ContainerID(), err) + return + } sb.resolver.SetExtServers(sb.extDNS) sb.osSbox.InvokeFunc(sb.resolver.SetupFunc()) - if err := sb.resolver.Start(); err != nil { + if err = sb.resolver.Start(); err != nil { log.Errorf("Resolver Setup/Start failed for container %s, %q", sb.ContainerID(), err) } }) @@ -342,6 +349,17 @@ func (sb *sandbox) setupResolutionFiles() error { return nil } +func (sb *sandbox) Endpoints() []Endpoint { + sb.Lock() + defer sb.Unlock() + + endpoints := make([]Endpoint, len(sb.endpoints)) + for i, ep := range sb.endpoints { + endpoints[i] = ep + } + return endpoints +} + func (sb *sandbox) getConnectedEndpoints() []*endpoint { sb.Lock() defer sb.Unlock() @@ -421,23 +439,51 @@ func (sb *sandbox) ResolveIP(ip string) string { func (sb *sandbox) ResolveName(name string) net.IP { var ip net.IP - parts := strings.Split(name, ".") - log.Debugf("To resolve %v", parts) - reqName := parts[0] - networkName := "" - if len(parts) > 1 { - networkName = parts[1] + // Embedded server owns the docker network domain. Resolution should work + // for both container_name and container_name.network_name + // We allow '.' in service name and network name. For a name a.b.c.d the + // following have to tried; + // {a.b.c.d in the networks container is connected to} + // {a.b.c in network d}, + // {a.b in network c.d}, + // {a in network b.c.d}, + + name = strings.TrimSuffix(name, ".") + reqName := []string{name} + networkName := []string{""} + + if strings.Contains(name, ".") { + var i int + dup := name + for { + if i = strings.LastIndex(dup, "."); i == -1 { + break + } + networkName = append(networkName, name[i+1:]) + reqName = append(reqName, name[:i]) + + dup = dup[:i] + } } + epList := sb.getConnectedEndpoints() - // First check for local container alias - ip = sb.resolveName(reqName, networkName, epList, true) - if ip != nil { - return ip - } + for i := 0; i < len(reqName); i++ { + log.Debugf("To resolve: %v in %v", reqName[i], networkName[i]) - // Resolve the actual container name - return sb.resolveName(reqName, networkName, epList, false) + // First check for local container alias + ip = sb.resolveName(reqName[i], networkName[i], epList, true) + if ip != nil { + return ip + } + + // Resolve the actual container name + ip = sb.resolveName(reqName[i], networkName[i], epList, false) + if ip != nil { + return ip + } + } + return nil } func (sb *sandbox) resolveName(req string, networkName string, epList []*endpoint, alias bool) net.IP { @@ -543,7 +589,7 @@ func releaseOSSboxResources(osSbox osl.Sandbox, ep *endpoint) { // Only remove the interfaces owned by this endpoint from the sandbox. if ep.hasInterface(i.SrcName()) { if err := i.Remove(); err != nil { - log.Debugf("Remove interface failed: %v", err) + log.Debugf("Remove interface %s failed: %v", i.SrcName(), err) } } } @@ -817,7 +863,7 @@ func (sb *sandbox) setupDNS() error { if len(sb.config.dnsList) > 0 || len(sb.config.dnsSearchList) > 0 || len(sb.config.dnsOptionsList) > 0 { var ( err error - dnsList = resolvconf.GetNameservers(currRC.Content) + dnsList = resolvconf.GetNameservers(currRC.Content, netutils.IP) dnsSearchList = resolvconf.GetSearchDomains(currRC.Content) dnsOptionsList = resolvconf.GetOptions(currRC.Content) ) @@ -859,6 +905,11 @@ func (sb *sandbox) updateDNS(ipv6Enabled bool) error { hashFile = sb.config.resolvConfHashFile ) + // This is for the host mode networking + if sb.config.originResolvConfPath != "" { + return nil + } + if len(sb.config.dnsList) > 0 || len(sb.config.dnsSearchList) > 0 || len(sb.config.dnsOptionsList) > 0 { return nil } @@ -891,36 +942,21 @@ func (sb *sandbox) updateDNS(ipv6Enabled bool) error { if err != nil { return err } + err = ioutil.WriteFile(sb.config.resolvConfPath, newRC.Content, 0644) + if err != nil { + return err + } - // for atomic updates to these files, use temporary files with os.Rename: + // write the new hash in a temp file and rename it to make the update atomic dir := path.Dir(sb.config.resolvConfPath) tmpHashFile, err := ioutil.TempFile(dir, "hash") if err != nil { return err } - tmpResolvFile, err := ioutil.TempFile(dir, "resolv") - if err != nil { - return err - } - - // Change the perms to filePerm (0644) since ioutil.TempFile creates it by default as 0600 - if err := os.Chmod(tmpResolvFile.Name(), filePerm); err != nil { - return err - } - - // write the updates to the temp files if err = ioutil.WriteFile(tmpHashFile.Name(), []byte(newRC.Hash), filePerm); err != nil { return err } - if err = ioutil.WriteFile(tmpResolvFile.Name(), newRC.Content, filePerm); err != nil { - return err - } - - // rename the temp files for atomic replace - if err = os.Rename(tmpHashFile.Name(), hashFile); err != nil { - return err - } - return os.Rename(tmpResolvFile.Name(), sb.config.resolvConfPath) + return os.Rename(tmpHashFile.Name(), hashFile) } // Embedded DNS server has to be enabled for this sandbox. Rebuild the container's @@ -935,7 +971,8 @@ func (sb *sandbox) rebuildDNS() error { } // localhost entries have already been filtered out from the list - sb.extDNS = resolvconf.GetNameservers(currRC.Content) + // retain only the v4 servers in sb for forwarding the DNS queries + sb.extDNS = resolvconf.GetNameservers(currRC.Content, netutils.IPv4) var ( dnsList = []string{sb.resolver.NameServer()} @@ -943,26 +980,14 @@ func (sb *sandbox) rebuildDNS() error { dnsSearchList = resolvconf.GetSearchDomains(currRC.Content) ) + // external v6 DNS servers has to be listed in resolv.conf + dnsList = append(dnsList, resolvconf.GetNameservers(currRC.Content, netutils.IPv6)...) + // Resolver returns the options in the format resolv.conf expects dnsOptionsList = append(dnsOptionsList, sb.resolver.ResolverOptions()...) - dir := path.Dir(sb.config.resolvConfPath) - tmpResolvFile, err := ioutil.TempFile(dir, "resolv") - if err != nil { - return err - } - - // Change the perms to filePerm (0644) since ioutil.TempFile creates it by default as 0600 - if err := os.Chmod(tmpResolvFile.Name(), filePerm); err != nil { - return err - } - - _, err = resolvconf.Build(tmpResolvFile.Name(), dnsList, dnsSearchList, dnsOptionsList) - if err != nil { - return err - } - - return os.Rename(tmpResolvFile.Name(), sb.config.resolvConfPath) + _, err = resolvconf.Build(sb.config.resolvConfPath, dnsList, dnsSearchList, dnsOptionsList) + return err } // joinLeaveStart waits to ensure there are no joins or leaves in progress and diff --git a/vendor/src/github.com/docker/libnetwork/sandbox_store.go b/vendor/src/github.com/docker/libnetwork/sandbox_store.go index 61eda408e4..d3f327193e 100644 --- a/vendor/src/github.com/docker/libnetwork/sandbox_store.go +++ b/vendor/src/github.com/docker/libnetwork/sandbox_store.go @@ -226,7 +226,7 @@ func (c *controller) sandboxCleanup() { heap.Push(&sb.endpoints, ep) } - if err := sb.Delete(); err != nil { + if err := sb.delete(true); err != nil { logrus.Errorf("failed to delete sandbox %s while trying to cleanup: %v", sb.id, err) } } diff --git a/vendor/src/github.com/docker/libnetwork/store.go b/vendor/src/github.com/docker/libnetwork/store.go index be3e8ae638..89248800c9 100644 --- a/vendor/src/github.com/docker/libnetwork/store.go +++ b/vendor/src/github.com/docker/libnetwork/store.go @@ -75,6 +75,7 @@ func (c *controller) getNetworkFromStore(nid string) (*network, error) { } n.epCnt = ec + n.scope = store.Scope() return n, nil } @@ -107,6 +108,7 @@ func (c *controller) getNetworksForScope(scope string) ([]*network, error) { } n.epCnt = ec + n.scope = scope nl = append(nl, n) } @@ -140,6 +142,7 @@ func (c *controller) getNetworksFromStore() ([]*network, error) { } n.epCnt = ec + n.scope = store.Scope() nl = append(nl, n) } } @@ -148,17 +151,21 @@ func (c *controller) getNetworksFromStore() ([]*network, error) { } func (n *network) getEndpointFromStore(eid string) (*endpoint, error) { - store := n.ctrlr.getStore(n.Scope()) - if store == nil { - return nil, fmt.Errorf("could not find endpoint %s: datastore not found for scope %s", eid, n.Scope()) + var errors []string + for _, store := range n.ctrlr.getStores() { + ep := &endpoint{id: eid, network: n} + err := store.GetObject(datastore.Key(ep.Key()...), ep) + // Continue searching in the next store if the key is not found in this store + if err != nil { + if err != datastore.ErrKeyNotFound { + errors = append(errors, fmt.Sprintf("{%s:%v}, ", store.Scope(), err)) + log.Debugf("could not find endpoint %s in %s: %v", eid, store.Scope(), err) + } + continue + } + return ep, nil } - - ep := &endpoint{id: eid, network: n} - err := store.GetObject(datastore.Key(ep.Key()...), ep) - if err != nil { - return nil, fmt.Errorf("could not find endpoint %s: %v", eid, err) - } - return ep, nil + return nil, fmt.Errorf("could not find endpoint %s: %v", eid, errors) } func (n *network) getEndpointsFromStore() ([]*endpoint, error) { diff --git a/vendor/src/github.com/docker/notary/Makefile b/vendor/src/github.com/docker/notary/Makefile index c632423ffa..949d79864a 100644 --- a/vendor/src/github.com/docker/notary/Makefile +++ b/vendor/src/github.com/docker/notary/Makefile @@ -34,7 +34,7 @@ _space := $(empty) $(empty) COVERDIR=.cover COVERPROFILE?=$(COVERDIR)/cover.out COVERMODE=count -PKGS = $(shell go list ./... | tr '\n' ' ') +PKGS ?= $(shell go list ./... | tr '\n' ' ') GO_VERSION = $(shell go version | awk '{print $$3}') @@ -118,12 +118,13 @@ protos: # be run first define gocover -$(GO_EXC) test $(OPTS) $(TESTOPTS) -covermode="$(COVERMODE)" -coverprofile="$(COVERDIR)/$(subst /,-,$(1)).$(subst $(_space),.,$(NOTARY_BUILDTAGS)).cover" "$(1)" || exit 1; +$(GO_EXC) test $(OPTS) $(TESTOPTS) -covermode="$(COVERMODE)" -coverprofile="$(COVERDIR)/$(subst /,-,$(1)).$(subst $(_space),.,$(NOTARY_BUILDTAGS)).coverage.txt" "$(1)" || exit 1; endef gen-cover: go_version @mkdir -p "$(COVERDIR)" $(foreach PKG,$(PKGS),$(call gocover,$(PKG))) + rm -f "$(COVERDIR)"/*testutils*.coverage.txt # Generates the cover binaries and runs them all in serial, so this can be used # run all tests with a yubikey without any problems @@ -139,6 +140,9 @@ ci: OPTS = -tags "${NOTARY_BUILDTAGS}" -race -coverpkg "$(shell ./coverpkg.sh $( # Codecov knows how to merge multiple coverage files, so covmerge is not needed ci: gen-cover +yubikey-tests: override PKGS = github.com/docker/notary/cmd/notary github.com/docker/notary/trustmanager/yubikey +yubikey-tests: ci + covmerge: @gocovmerge $(shell ls -1 $(COVERDIR)/* | tr "\n" " ") > $(COVERPROFILE) @go tool cover -func="$(COVERPROFILE)" diff --git a/vendor/src/github.com/docker/notary/certs/certmanager.go b/vendor/src/github.com/docker/notary/certs/certs.go similarity index 79% rename from vendor/src/github.com/docker/notary/certs/certmanager.go rename to vendor/src/github.com/docker/notary/certs/certs.go index e73af79ddd..d8ba0d9255 100644 --- a/vendor/src/github.com/docker/notary/certs/certmanager.go +++ b/vendor/src/github.com/docker/notary/certs/certs.go @@ -4,7 +4,6 @@ import ( "crypto/x509" "errors" "fmt" - "path/filepath" "time" "github.com/Sirupsen/logrus" @@ -13,14 +12,6 @@ import ( "github.com/docker/notary/tuf/signed" ) -// Manager is an abstraction around trusted root CA stores -type Manager struct { - trustedCAStore trustmanager.X509Store - trustedCertificateStore trustmanager.X509Store -} - -const trustDir = "trusted_certificates" - // ErrValidationFail is returned when there is no valid trusted certificates // being served inside of the roots.json type ErrValidationFail struct { @@ -45,63 +36,6 @@ func (err ErrRootRotationFail) Error() string { return fmt.Sprintf("could not rotate trust to a new trusted root: %s", err.Reason) } -// NewManager returns an initialized Manager, or an error -// if it fails to load certificates -func NewManager(baseDir string) (*Manager, error) { - trustPath := filepath.Join(baseDir, trustDir) - - // Load all CAs that aren't expired and don't use SHA1 - trustedCAStore, err := trustmanager.NewX509FilteredFileStore(trustPath, func(cert *x509.Certificate) bool { - return cert.IsCA && cert.BasicConstraintsValid && cert.SubjectKeyId != nil && - time.Now().Before(cert.NotAfter) && - cert.SignatureAlgorithm != x509.SHA1WithRSA && - cert.SignatureAlgorithm != x509.DSAWithSHA1 && - cert.SignatureAlgorithm != x509.ECDSAWithSHA1 - }) - if err != nil { - return nil, err - } - - // Load all individual (non-CA) certificates that aren't expired and don't use SHA1 - trustedCertificateStore, err := trustmanager.NewX509FilteredFileStore(trustPath, func(cert *x509.Certificate) bool { - return !cert.IsCA && - time.Now().Before(cert.NotAfter) && - cert.SignatureAlgorithm != x509.SHA1WithRSA && - cert.SignatureAlgorithm != x509.DSAWithSHA1 && - cert.SignatureAlgorithm != x509.ECDSAWithSHA1 - }) - if err != nil { - return nil, err - } - - return &Manager{ - trustedCAStore: trustedCAStore, - trustedCertificateStore: trustedCertificateStore, - }, nil -} - -// TrustedCertificateStore returns the trusted certificate store being managed -// by this Manager -func (m *Manager) TrustedCertificateStore() trustmanager.X509Store { - return m.trustedCertificateStore -} - -// TrustedCAStore returns the CA store being managed by this Manager -func (m *Manager) TrustedCAStore() trustmanager.X509Store { - return m.trustedCAStore -} - -// AddTrustedCert adds a cert to the trusted certificate store (not the CA -// store) -func (m *Manager) AddTrustedCert(cert *x509.Certificate) { - m.trustedCertificateStore.AddCert(cert) -} - -// AddTrustedCACert adds a cert to the trusted CA certificate store -func (m *Manager) AddTrustedCACert(cert *x509.Certificate) { - m.trustedCAStore.AddCert(cert) -} - /* ValidateRoot receives a new root, validates its correctness and attempts to do root key rotation if needed. @@ -111,7 +45,7 @@ that list is non-empty means that we've already seen this repository before, and have a list of trusted certificates for it. In this case, we use this list of certificates to attempt to validate this root file. -If the previous validation suceeds, or in the case where we found no trusted +If the previous validation succeeds, or in the case where we found no trusted certificates for this particular GUN, we check the integrity of the root by making sure that it is validated by itself. This means that we will attempt to validate the root data with the certificates that are included in the root keys @@ -129,7 +63,7 @@ we are using the current public PKI to validate the first download of the certif adding an extra layer of security over the normal (SSH style) trust model. We shall call this: TOFUS. */ -func (m *Manager) ValidateRoot(root *data.Signed, gun string) error { +func ValidateRoot(certStore trustmanager.X509Store, root *data.Signed, gun string) error { logrus.Debugf("entered ValidateRoot with dns: %s", gun) signedRoot, err := data.RootFromSigned(root) if err != nil { @@ -144,7 +78,7 @@ func (m *Manager) ValidateRoot(root *data.Signed, gun string) error { } // Retrieve all the trusted certificates that match this gun - certsForCN, err := m.trustedCertificateStore.GetCertificatesByCN(gun) + certsForCN, err := certStore.GetCertificatesByCN(gun) if err != nil { // If the error that we get back is different than ErrNoCertificatesFound // we couldn't check if there are any certificates with this CN already @@ -183,7 +117,7 @@ func (m *Manager) ValidateRoot(root *data.Signed, gun string) error { // Do root certificate rotation: we trust only the certs present in the new root // First we add all the new certificates (even if they already exist) for _, cert := range allValidCerts { - err := m.trustedCertificateStore.AddCert(cert) + err := certStore.AddCert(cert) if err != nil { // If the error is already exists we don't fail the rotation if _, ok := err.(*trustmanager.ErrCertExists); ok { @@ -197,7 +131,7 @@ func (m *Manager) ValidateRoot(root *data.Signed, gun string) error { // Now we delete old certificates that aren't present in the new root for certID, cert := range certsToRemove(certsForCN, allValidCerts) { logrus.Debugf("removing certificate with certID: %s", certID) - err = m.trustedCertificateStore.RemoveCert(cert) + err = certStore.RemoveCert(cert) if err != nil { logrus.Debugf("failed to remove trusted certificate with keyID: %s, %v", certID, err) return &ErrRootRotationFail{Reason: "failed to rotate root keys"} @@ -208,7 +142,7 @@ func (m *Manager) ValidateRoot(root *data.Signed, gun string) error { return nil } -// validRootLeafCerts returns a list of non-exipired, non-sha1 certificates whoose +// validRootLeafCerts returns a list of non-exipired, non-sha1 certificates whose // Common-Names match the provided GUN func validRootLeafCerts(root *data.SignedRoot, gun string) ([]*x509.Certificate, error) { // Get a list of all of the leaf certificates present in root @@ -219,7 +153,8 @@ func validRootLeafCerts(root *data.SignedRoot, gun string) ([]*x509.Certificate, for _, cert := range allLeafCerts { // Validate that this leaf certificate has a CN that matches the exact gun if cert.Subject.CommonName != gun { - logrus.Debugf("error leaf certificate CN: %s doesn't match the given GUN: %s", cert.Subject.CommonName) + logrus.Debugf("error leaf certificate CN: %s doesn't match the given GUN: %s", + cert.Subject.CommonName, gun) continue } // Make sure the certificate is not expired diff --git a/vendor/src/github.com/docker/notary/circle.yml b/vendor/src/github.com/docker/notary/circle.yml index 10a28305d3..163d610f24 100644 --- a/vendor/src/github.com/docker/notary/circle.yml +++ b/vendor/src/github.com/docker/notary/circle.yml @@ -40,8 +40,7 @@ dependencies: # For the stable go version, additionally install linting tools - > gvm use stable && - go get github.com/golang/lint/golint github.com/wadey/gocovmerge && - go install github.com/wadey/gocovmerge + go get github.com/golang/lint/golint test: pre: # Output the go versions we are going to test @@ -72,11 +71,6 @@ test: pwd: $BASE_STABLE post: - - gvm use stable && make covmerge: - timeout: 600 - parallel: true - pwd: $BASE_STABLE - # Report to codecov.io - bash <(curl -s https://codecov.io/bash): parallel: true diff --git a/vendor/src/github.com/docker/notary/client/client.go b/vendor/src/github.com/docker/notary/client/client.go index a3c7b7fe29..b383c94dca 100644 --- a/vendor/src/github.com/docker/notary/client/client.go +++ b/vendor/src/github.com/docker/notary/client/client.go @@ -9,10 +9,10 @@ import ( "net/url" "os" "path/filepath" - "strings" "time" "github.com/Sirupsen/logrus" + "github.com/docker/notary" "github.com/docker/notary/certs" "github.com/docker/notary/client/changelist" "github.com/docker/notary/cryptoservice" @@ -53,9 +53,9 @@ type ErrInvalidRemoteRole struct { Role string } -func (e ErrInvalidRemoteRole) Error() string { +func (err ErrInvalidRemoteRole) Error() string { return fmt.Sprintf( - "notary does not support the server managing the %s key", e.Role) + "notary does not support the server managing the %s key", err.Role) } // ErrRepositoryNotExist is returned when an action is taken on a remote @@ -84,7 +84,7 @@ type NotaryRepository struct { CryptoService signed.CryptoService tufRepo *tuf.Repo roundTrip http.RoundTripper - CertManager *certs.Manager + CertStore trustmanager.X509Store } // repositoryFromKeystores is a helper function for NewNotaryRepository that @@ -93,7 +93,11 @@ type NotaryRepository struct { func repositoryFromKeystores(baseDir, gun, baseURL string, rt http.RoundTripper, keyStores []trustmanager.KeyStore) (*NotaryRepository, error) { - certManager, err := certs.NewManager(baseDir) + certPath := filepath.Join(baseDir, notary.TrustedCertsDir) + certStore, err := trustmanager.NewX509FilteredFileStore( + certPath, + trustmanager.FilterCertsExpiredSha1, + ) if err != nil { return nil, err } @@ -107,7 +111,7 @@ func repositoryFromKeystores(baseDir, gun, baseURL string, rt http.RoundTripper, tufRepoPath: filepath.Join(baseDir, tufDir, filepath.FromSlash(gun)), CryptoService: cryptoService, roundTrip: rt, - CertManager: certManager, + CertStore: certStore, } fileStore, err := store.NewFilesystemStore( @@ -165,7 +169,7 @@ func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...st // currently we only support server managing timestamps and snapshots, and // nothing else - timestamps are always managed by the server, and implicit // (do not have to be passed in as part of `serverManagedRoles`, so that - // the API of Initialize doens't change). + // the API of Initialize doesn't change). var serverManagesSnapshot bool locallyManagedKeys := []string{ data.CanonicalTargetsRole, @@ -197,7 +201,7 @@ func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...st if err != nil { return err } - r.CertManager.AddTrustedCert(rootCert) + r.CertStore.AddCert(rootCert) // The root key gets stored in the TUF metadata X509 encoded, linking // the tuf root.json to our X509 PKI. @@ -275,8 +279,6 @@ func addChange(cl *changelist.FileChangelist, c changelist.Change, roles ...stri var changes []changelist.Change for _, role := range roles { - role = strings.ToLower(role) - // Ensure we can only add targets to the CanonicalTargetsRole, // or a Delegation role (which is /something else) if role != data.CanonicalTargetsRole && !data.IsDelegation(role) { @@ -347,7 +349,7 @@ func (r *NotaryRepository) AddDelegation(name string, threshold int, // the repository when the changelist gets applied at publish time. // This does not validate that the delegation exists, since one might exist // after applying all changes. -func (r *NotaryRepository) RemoveDelegation(name string) error { +func (r *NotaryRepository) RemoveDelegation(name string, keyIDs, paths []string, removeAll bool) error { if !data.IsDelegation(name) { return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"} @@ -360,20 +362,41 @@ func (r *NotaryRepository) RemoveDelegation(name string) error { defer cl.Close() logrus.Debugf(`Removing delegation "%s"\n`, name) + var template *changelist.TufChange - template := changelist.NewTufChange( - changelist.ActionDelete, - name, - changelist.TypeTargetsDelegation, - "", // no path - nil, - ) + // We use the Delete action only for force removal, Update is used for removing individual keys and paths + if removeAll { + template = changelist.NewTufChange( + changelist.ActionDelete, + name, + changelist.TypeTargetsDelegation, + "", // no path + nil, // deleting role, no data needed + ) + + } else { + tdJSON, err := json.Marshal(&changelist.TufDelegation{ + RemoveKeys: keyIDs, + RemovePaths: paths, + }) + if err != nil { + return err + } + + template = changelist.NewTufChange( + changelist.ActionUpdate, + name, + changelist.TypeTargetsDelegation, + "", // no path + tdJSON, + ) + } return addChange(cl, template, name) } // AddTarget creates new changelist entries to add a target to the given roles -// in the repository when the changelist gets appied at publish time. +// in the repository when the changelist gets applied at publish time. // If roles are unspecified, the default role is "targets". func (r *NotaryRepository) AddTarget(target *Target, roles ...string) error { @@ -431,7 +454,7 @@ func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, erro for _, role := range roles { // we don't need to do anything special with removing role from // roles because listSubtree always processes role and only excludes - // descendent delegations that appear in roles. + // descendant delegations that appear in roles. r.listSubtree(targets, role, roles...) } @@ -509,6 +532,92 @@ func (r *NotaryRepository) GetChangelist() (changelist.Changelist, error) { return cl, nil } +// GetDelegationRoles returns the keys and roles of the repository's delegations +func (r *NotaryRepository) GetDelegationRoles() ([]*data.Role, error) { + // Update state of the repo to latest + if _, err := r.Update(false); err != nil { + return nil, err + } + + // All top level delegations (ex: targets/level1) are stored exclusively in targets.json + targets, ok := r.tufRepo.Targets[data.CanonicalTargetsRole] + if !ok { + return nil, store.ErrMetaNotFound{Resource: data.CanonicalTargetsRole} + } + + allDelegations := targets.Signed.Delegations.Roles + + // make a copy for traversing nested delegations + delegationsList := make([]*data.Role, len(allDelegations)) + copy(delegationsList, allDelegations) + + // Now traverse to lower level delegations (ex: targets/level1/level2) + for len(delegationsList) > 0 { + // Pop off first delegation to traverse + delegation := delegationsList[0] + delegationsList = delegationsList[1:] + + // Get metadata + delegationMeta, ok := r.tufRepo.Targets[delegation.Name] + // If we get an error, don't try to traverse further into this subtree because it doesn't exist or is malformed + if !ok { + continue + } + + // Add nested delegations to return list and exploration list + allDelegations = append(allDelegations, delegationMeta.Signed.Delegations.Roles...) + delegationsList = append(delegationsList, delegationMeta.Signed.Delegations.Roles...) + } + return allDelegations, nil +} + +// RoleWithSignatures is a Role with its associated signatures +type RoleWithSignatures struct { + Signatures []data.Signature + data.Role +} + +// ListRoles returns a list of RoleWithSignatures objects for this repo +// This represents the latest metadata for each role in this repo +func (r *NotaryRepository) ListRoles() ([]RoleWithSignatures, error) { + // Update to latest repo state + _, err := r.Update(false) + if err != nil { + return nil, err + } + + // Get all role info from our updated keysDB, can be empty + roles := r.tufRepo.GetAllLoadedRoles() + + var roleWithSigs []RoleWithSignatures + + // Populate RoleWithSignatures with Role from keysDB and signatures from TUF metadata + for _, role := range roles { + roleWithSig := RoleWithSignatures{Role: *role, Signatures: nil} + switch role.Name { + case data.CanonicalRootRole: + roleWithSig.Signatures = r.tufRepo.Root.Signatures + case data.CanonicalTargetsRole: + roleWithSig.Signatures = r.tufRepo.Targets[data.CanonicalTargetsRole].Signatures + case data.CanonicalSnapshotRole: + roleWithSig.Signatures = r.tufRepo.Snapshot.Signatures + case data.CanonicalTimestampRole: + roleWithSig.Signatures = r.tufRepo.Timestamp.Signatures + default: + // If the role isn't a delegation, we should error -- this is only possible if we have invalid keyDB state + if !data.IsDelegation(role.Name) { + return nil, data.ErrInvalidRole{Role: role.Name, Reason: "invalid role name"} + } + if _, ok := r.tufRepo.Targets[role.Name]; ok { + // We'll only find a signature if we've published any targets with this delegation + roleWithSig.Signatures = r.tufRepo.Targets[role.Name].Signatures + } + } + roleWithSigs = append(roleWithSigs, roleWithSig) + } + return roleWithSigs, nil +} + // Publish pushes the local changes in signed material to the remote notary-server // Conceptually it performs an operation similar to a `git rebase` func (r *NotaryRepository) Publish() error { @@ -837,7 +946,7 @@ func (r *NotaryRepository) validateRoot(rootJSON []byte) (*data.SignedRoot, erro return nil, err } - err = r.CertManager.ValidateRoot(root, r.gun) + err = certs.ValidateRoot(r.CertStore, root, r.gun) if err != nil { return nil, err } @@ -904,3 +1013,27 @@ func (r *NotaryRepository) rootFileKeyChange(role, action string, key data.Publi } return nil } + +// DeleteTrustData removes the trust data stored for this repo in the TUF cache and certificate store on the client side +func (r *NotaryRepository) DeleteTrustData() error { + // Clear TUF files and cache + if err := r.fileStore.RemoveAll(); err != nil { + return fmt.Errorf("error clearing TUF repo data: %v", err) + } + r.tufRepo = tuf.NewRepo(nil, nil) + // Clear certificates + certificates, err := r.CertStore.GetCertificatesByCN(r.gun) + if err != nil { + // If there were no certificates to delete, we're done + if _, ok := err.(*trustmanager.ErrNoCertificatesFound); ok { + return nil + } + return fmt.Errorf("error retrieving certificates for %s: %v", r.gun, err) + } + for _, cert := range certificates { + if err := r.CertStore.RemoveCert(cert); err != nil { + return fmt.Errorf("error removing certificate: %v: %v", cert, err) + } + } + return nil +} diff --git a/vendor/src/github.com/docker/notary/client/helpers.go b/vendor/src/github.com/docker/notary/client/helpers.go index 304ac3d621..a9fd590a9f 100644 --- a/vendor/src/github.com/docker/notary/client/helpers.go +++ b/vendor/src/github.com/docker/notary/client/helpers.go @@ -5,6 +5,7 @@ import ( "fmt" "net/http" "path" + "strings" "time" "github.com/Sirupsen/logrus" @@ -85,13 +86,13 @@ func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error { return err } if err == nil { - // role existed - return data.ErrInvalidRole{ - Role: c.Scope(), - Reason: "cannot create a role that already exists", + // role existed, attempt to merge paths and keys + if err := r.AddPaths(td.AddPaths); err != nil { + return err } + return repo.UpdateDelegations(r, td.AddKeys) } - // role doesn't exist, create brand new + // create brand new role r, err = td.ToNewRole(c.Scope()) if err != nil { return err @@ -107,7 +108,12 @@ func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error { if err != nil { return err } - // role exists, merge + // If we specify the only keys left delete the role, else just delete specified keys + if strings.Join(r.KeyIDs, ";") == strings.Join(td.RemoveKeys, ";") && len(td.AddKeys) == 0 { + r := data.Role{Name: c.Scope()} + return repo.DeleteDelegation(r) + } + // if we aren't deleting and the role exists, merge if err := r.AddPaths(td.AddPaths); err != nil { return err } diff --git a/vendor/src/github.com/docker/notary/const.go b/vendor/src/github.com/docker/notary/const.go index 3c22c05024..a1140c0dc0 100644 --- a/vendor/src/github.com/docker/notary/const.go +++ b/vendor/src/github.com/docker/notary/const.go @@ -2,6 +2,16 @@ package notary // application wide constants const ( + // MinRSABitSize is the minimum bit size for RSA keys allowed in notary + MinRSABitSize = 2048 + // MinThreshold requires a minimum of one threshold for roles; currently we do not support a higher threshold + MinThreshold = 1 + // PrivKeyPerms are the file permissions to use when writing private keys to disk PrivKeyPerms = 0700 + // PubCertPerms are the file permissions to use when writing public certificates to disk PubCertPerms = 0755 + // Sha256HexSize is how big a Sha256 hex is in number of characters + Sha256HexSize = 64 + // TrustedCertsDir is the directory, under the notary repo base directory, where trusted certs are stored + TrustedCertsDir = "trusted_certificates" ) diff --git a/vendor/src/github.com/docker/notary/cryptoservice/crypto_service.go b/vendor/src/github.com/docker/notary/cryptoservice/crypto_service.go index f5bfa073b0..5488c8feff 100644 --- a/vendor/src/github.com/docker/notary/cryptoservice/crypto_service.go +++ b/vendor/src/github.com/docker/notary/cryptoservice/crypto_service.go @@ -69,8 +69,8 @@ func (cs *CryptoService) Create(role, algorithm string) (data.PublicKey, error) if err != nil { return nil, fmt.Errorf("failed to add key to filestore: %v", err) } - return nil, fmt.Errorf("keystores would not accept new private keys for unknown reasons") + return nil, fmt.Errorf("keystores would not accept new private keys for unknown reasons") } // GetPrivateKey returns a private key and role if present by ID. diff --git a/vendor/src/github.com/docker/notary/trustmanager/x509utils.go b/vendor/src/github.com/docker/notary/trustmanager/x509utils.go index 6b0bc76258..f39ca8eb22 100644 --- a/vendor/src/github.com/docker/notary/trustmanager/x509utils.go +++ b/vendor/src/github.com/docker/notary/trustmanager/x509utils.go @@ -19,6 +19,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/agl/ed25519" + "github.com/docker/notary" "github.com/docker/notary/tuf/data" ) @@ -205,7 +206,8 @@ func GetLeafCerts(certs []*x509.Certificate) []*x509.Certificate { // GetIntermediateCerts parses a list of x509 Certificates and returns all of the // ones marked as a CA, to be used as intermediates -func GetIntermediateCerts(certs []*x509.Certificate) (intCerts []*x509.Certificate) { +func GetIntermediateCerts(certs []*x509.Certificate) []*x509.Certificate { + var intCerts []*x509.Certificate for _, cert := range certs { if cert.IsCA { intCerts = append(intCerts, cert) @@ -299,6 +301,54 @@ func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, er } } +// ParsePEMPublicKey returns a data.PublicKey from a PEM encoded public key or certificate. +func ParsePEMPublicKey(pubKeyBytes []byte) (data.PublicKey, error) { + pemBlock, _ := pem.Decode(pubKeyBytes) + if pemBlock == nil { + return nil, errors.New("no valid public key found") + } + + switch pemBlock.Type { + case "CERTIFICATE": + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("could not parse provided certificate: %v", err) + } + err = ValidateCertificate(cert) + if err != nil { + return nil, fmt.Errorf("invalid certificate: %v", err) + } + return CertToKey(cert), nil + default: + return nil, fmt.Errorf("unsupported PEM block type %q, expected certificate", pemBlock.Type) + } +} + +// ValidateCertificate returns an error if the certificate is not valid for notary +// Currently this is only a time expiry check, and ensuring the public key has a large enough modulus if RSA +func ValidateCertificate(c *x509.Certificate) error { + if (c.NotBefore).After(c.NotAfter) { + return fmt.Errorf("certificate validity window is invalid") + } + now := time.Now() + tomorrow := now.AddDate(0, 0, 1) + // Give one day leeway on creation "before" time, check "after" against today + if (tomorrow).Before(c.NotBefore) || now.After(c.NotAfter) { + return fmt.Errorf("certificate is expired") + } + // If we have an RSA key, make sure it's long enough + if c.PublicKeyAlgorithm == x509.RSA { + rsaKey, ok := c.PublicKey.(*rsa.PublicKey) + if !ok { + return fmt.Errorf("unable to parse RSA public key") + } + if rsaKey.N.BitLen() < notary.MinRSABitSize { + return fmt.Errorf("RSA bit length is too short") + } + } + return nil +} + // GenerateRSAKey generates an RSA private key and returns a TUF PrivateKey func GenerateRSAKey(random io.Reader, bits int) (data.PrivateKey, error) { rsaPrivKey, err := rsa.GenerateKey(random, bits) @@ -532,3 +582,14 @@ func X509PublicKeyID(certPubKey data.PublicKey) (string, error) { return key.ID(), nil } + +// FilterCertsExpiredSha1 can be used as the filter function to cert store +// initializers to filter out all expired or SHA-1 certificate that we +// shouldn't load. +func FilterCertsExpiredSha1(cert *x509.Certificate) bool { + return !cert.IsCA && + time.Now().Before(cert.NotAfter) && + cert.SignatureAlgorithm != x509.SHA1WithRSA && + cert.SignatureAlgorithm != x509.DSAWithSHA1 && + cert.SignatureAlgorithm != x509.ECDSAWithSHA1 +} diff --git a/vendor/src/github.com/docker/notary/tuf/client/client.go b/vendor/src/github.com/docker/notary/tuf/client/client.go index 263ee428b1..0eaa8c87e7 100644 --- a/vendor/src/github.com/docker/notary/tuf/client/client.go +++ b/vendor/src/github.com/docker/notary/tuf/client/client.go @@ -54,7 +54,7 @@ func (c *Client) Update() error { if err != nil { logrus.Debug("Error occurred. Root will be downloaded and another update attempted") if err := c.downloadRoot(); err != nil { - logrus.Error("client Update (Root):", err) + logrus.Debug("Client Update (Root):", err) return err } // If we error again, we now have the latest root and just want to fail @@ -68,12 +68,12 @@ func (c *Client) Update() error { func (c *Client) update() error { err := c.downloadTimestamp() if err != nil { - logrus.Errorf("Client Update (Timestamp): %s", err.Error()) + logrus.Debugf("Client Update (Timestamp): %s", err.Error()) return err } err = c.downloadSnapshot() if err != nil { - logrus.Errorf("Client Update (Snapshot): %s", err.Error()) + logrus.Debugf("Client Update (Snapshot): %s", err.Error()) return err } err = c.checkRoot() @@ -86,7 +86,7 @@ func (c *Client) update() error { // will always need top level targets at a minimum err = c.downloadTargets("targets") if err != nil { - logrus.Errorf("Client Update (Targets): %s", err.Error()) + logrus.Debugf("Client Update (Targets): %s", err.Error()) return err } return nil @@ -247,28 +247,27 @@ func (c *Client) downloadTimestamp() error { // We may not have a cached timestamp if this is the first time // we're interacting with the repo. This will result in the // version being 0 - var download bool - old := &data.Signed{} - version := 0 + var ( + saveToCache bool + old *data.Signed + version = 0 + ) cachedTS, err := c.cache.GetMeta(role, maxSize) if err == nil { - err := json.Unmarshal(cachedTS, old) + cached := &data.Signed{} + err := json.Unmarshal(cachedTS, cached) if err == nil { - ts, err := data.TimestampFromSigned(old) + ts, err := data.TimestampFromSigned(cached) if err == nil { version = ts.Signed.Version } - } else { - old = nil + old = cached } } // unlike root, targets and snapshot, always try and download timestamps // from remote, only using the cache one if we couldn't reach remote. raw, s, err := c.downloadSigned(role, maxSize, nil) if err != nil || len(raw) == 0 { - if err, ok := err.(store.ErrMetaNotFound); ok { - return err - } if old == nil { if err == nil { // couldn't retrieve data from server and don't have valid @@ -277,17 +276,18 @@ func (c *Client) downloadTimestamp() error { } return err } - logrus.Debug("using cached timestamp") + logrus.Debug(err.Error()) + logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely") s = old } else { - download = true + saveToCache = true } err = signed.Verify(s, role, version, c.keysDB) if err != nil { return err } logrus.Debug("successfully verified timestamp") - if download { + if saveToCache { c.cache.SetMeta(role, raw) } ts, err := data.TimestampFromSigned(s) @@ -327,7 +327,7 @@ func (c *Client) downloadSnapshot() error { } err := json.Unmarshal(raw, old) if err == nil { - snap, err := data.TimestampFromSigned(old) + snap, err := data.SnapshotFromSigned(old) if err == nil { version = snap.Signed.Version } else { diff --git a/vendor/src/github.com/docker/notary/tuf/data/keys.go b/vendor/src/github.com/docker/notary/tuf/data/keys.go index 1c2c60e92a..9f94d5552f 100644 --- a/vendor/src/github.com/docker/notary/tuf/data/keys.go +++ b/vendor/src/github.com/docker/notary/tuf/data/keys.go @@ -14,7 +14,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/agl/ed25519" - "github.com/jfrazelle/go/canonical/json" + "github.com/docker/go/canonical/json" ) // PublicKey is the necessary interface for public keys diff --git a/vendor/src/github.com/docker/notary/tuf/data/roles.go b/vendor/src/github.com/docker/notary/tuf/data/roles.go index 25e9ba4572..a505c92304 100644 --- a/vendor/src/github.com/docker/notary/tuf/data/roles.go +++ b/vendor/src/github.com/docker/notary/tuf/data/roles.go @@ -2,6 +2,7 @@ package data import ( "fmt" + "github.com/Sirupsen/logrus" "path" "regexp" "strings" @@ -109,10 +110,7 @@ func NewRole(name string, threshold int, keyIDs, paths, pathHashPrefixes []strin } if IsDelegation(name) { if len(paths) == 0 && len(pathHashPrefixes) == 0 { - return nil, ErrInvalidRole{ - Role: name, - Reason: "roles with no Paths and no PathHashPrefixes will never be able to publish content", - } + logrus.Debugf("role %s with no Paths and no PathHashPrefixes will never be able to publish content until one or more are added", name) } } if threshold < 1 { diff --git a/vendor/src/github.com/docker/notary/tuf/data/root.go b/vendor/src/github.com/docker/notary/tuf/data/root.go index e555cbd2f5..bd479206fd 100644 --- a/vendor/src/github.com/docker/notary/tuf/data/root.go +++ b/vendor/src/github.com/docker/notary/tuf/data/root.go @@ -3,7 +3,7 @@ package data import ( "time" - "github.com/jfrazelle/go/canonical/json" + "github.com/docker/go/canonical/json" ) // SignedRoot is a fully unpacked root.json diff --git a/vendor/src/github.com/docker/notary/tuf/data/serializer.go b/vendor/src/github.com/docker/notary/tuf/data/serializer.go index 91fa1bc93e..5c33d129b4 100644 --- a/vendor/src/github.com/docker/notary/tuf/data/serializer.go +++ b/vendor/src/github.com/docker/notary/tuf/data/serializer.go @@ -1,6 +1,6 @@ package data -import "github.com/jfrazelle/go/canonical/json" +import "github.com/docker/go/canonical/json" // Serializer is an interface that can marshal and unmarshal TUF data. This // is expected to be a canonical JSON marshaller diff --git a/vendor/src/github.com/docker/notary/tuf/data/snapshot.go b/vendor/src/github.com/docker/notary/tuf/data/snapshot.go index ca23d20ff9..f13951ca83 100644 --- a/vendor/src/github.com/docker/notary/tuf/data/snapshot.go +++ b/vendor/src/github.com/docker/notary/tuf/data/snapshot.go @@ -5,7 +5,7 @@ import ( "time" "github.com/Sirupsen/logrus" - "github.com/jfrazelle/go/canonical/json" + "github.com/docker/go/canonical/json" ) // SignedSnapshot is a fully unpacked snapshot.json diff --git a/vendor/src/github.com/docker/notary/tuf/data/targets.go b/vendor/src/github.com/docker/notary/tuf/data/targets.go index 61265ca054..a538d6afa5 100644 --- a/vendor/src/github.com/docker/notary/tuf/data/targets.go +++ b/vendor/src/github.com/docker/notary/tuf/data/targets.go @@ -5,7 +5,7 @@ import ( "encoding/hex" "errors" - "github.com/jfrazelle/go/canonical/json" + "github.com/docker/go/canonical/json" ) // SignedTargets is a fully unpacked targets.json, or target delegation diff --git a/vendor/src/github.com/docker/notary/tuf/data/timestamp.go b/vendor/src/github.com/docker/notary/tuf/data/timestamp.go index da5367ab47..f68252ca5b 100644 --- a/vendor/src/github.com/docker/notary/tuf/data/timestamp.go +++ b/vendor/src/github.com/docker/notary/tuf/data/timestamp.go @@ -4,7 +4,7 @@ import ( "bytes" "time" - "github.com/jfrazelle/go/canonical/json" + "github.com/docker/go/canonical/json" ) // SignedTimestamp is a fully unpacked timestamp.json diff --git a/vendor/src/github.com/docker/notary/tuf/data/types.go b/vendor/src/github.com/docker/notary/tuf/data/types.go index 61a311664f..6459b8e664 100644 --- a/vendor/src/github.com/docker/notary/tuf/data/types.go +++ b/vendor/src/github.com/docker/notary/tuf/data/types.go @@ -11,7 +11,7 @@ import ( "time" "github.com/Sirupsen/logrus" - "github.com/jfrazelle/go/canonical/json" + "github.com/docker/go/canonical/json" ) // SigAlgorithm for types of signatures diff --git a/vendor/src/github.com/docker/notary/tuf/keys/db.go b/vendor/src/github.com/docker/notary/tuf/keys/db.go index 2e27d052cd..92d2ef863e 100644 --- a/vendor/src/github.com/docker/notary/tuf/keys/db.go +++ b/vendor/src/github.com/docker/notary/tuf/keys/db.go @@ -58,6 +58,15 @@ func (db *KeyDB) AddRole(r *data.Role) error { return nil } +// GetAllRoles gets all roles from the database +func (db *KeyDB) GetAllRoles() []*data.Role { + roles := []*data.Role{} + for _, role := range db.roles { + roles = append(roles, role) + } + return roles +} + // GetKey pulls a key out of the database by its ID func (db *KeyDB) GetKey(id string) data.PublicKey { return db.keys[id] diff --git a/vendor/src/github.com/docker/notary/tuf/signed/verifiers.go b/vendor/src/github.com/docker/notary/tuf/signed/verifiers.go index 4570bce3df..792186366d 100644 --- a/vendor/src/github.com/docker/notary/tuf/signed/verifiers.go +++ b/vendor/src/github.com/docker/notary/tuf/signed/verifiers.go @@ -60,7 +60,7 @@ func (v Ed25519Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) erro } var sigBytes [ed25519.SignatureSize]byte if len(sig) != ed25519.SignatureSize { - logrus.Infof("signature length is incorrect, must be %d, was %d.", ed25519.SignatureSize, len(sig)) + logrus.Debugf("signature length is incorrect, must be %d, was %d.", ed25519.SignatureSize, len(sig)) return ErrInvalid } copy(sigBytes[:], sig) @@ -78,7 +78,7 @@ func (v Ed25519Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) erro } if !ed25519.Verify(&keyBytes, msg, &sigBytes) { - logrus.Infof("failed ed25519 verification") + logrus.Debugf("failed ed25519 verification") return ErrInvalid } return nil @@ -87,23 +87,23 @@ func (v Ed25519Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) erro func verifyPSS(key interface{}, digest, sig []byte) error { rsaPub, ok := key.(*rsa.PublicKey) if !ok { - logrus.Infof("value was not an RSA public key") + logrus.Debugf("value was not an RSA public key") return ErrInvalid } if rsaPub.N.BitLen() < minRSAKeySizeBit { - logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided key has length %d.", rsaPub.N.BitLen()) + logrus.Debugf("RSA keys less than 2048 bits are not acceptable, provided key has length %d.", rsaPub.N.BitLen()) return ErrInvalidKeyLength{msg: fmt.Sprintf("RSA key must be at least %d bits.", minRSAKeySizeBit)} } if len(sig) < minRSAKeySizeByte { - logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided signature has length %d.", len(sig)) + logrus.Debugf("RSA keys less than 2048 bits are not acceptable, provided signature has length %d.", len(sig)) return ErrInvalid } opts := rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256} if err := rsa.VerifyPSS(rsaPub, crypto.SHA256, digest[:], sig, &opts); err != nil { - logrus.Infof("failed RSAPSS verification: %s", err) + logrus.Debugf("failed RSAPSS verification: %s", err) return ErrInvalid } return nil @@ -117,12 +117,12 @@ func getRSAPubKey(key data.PublicKey) (crypto.PublicKey, error) { case data.RSAx509Key: pemCert, _ := pem.Decode([]byte(key.Public())) if pemCert == nil { - logrus.Infof("failed to decode PEM-encoded x509 certificate") + logrus.Debugf("failed to decode PEM-encoded x509 certificate") return nil, ErrInvalid } cert, err := x509.ParseCertificate(pemCert.Bytes) if err != nil { - logrus.Infof("failed to parse x509 certificate: %s\n", err) + logrus.Debugf("failed to parse x509 certificate: %s\n", err) return nil, ErrInvalid } pubKey = cert.PublicKey @@ -130,12 +130,12 @@ func getRSAPubKey(key data.PublicKey) (crypto.PublicKey, error) { var err error pubKey, err = x509.ParsePKIXPublicKey(key.Public()) if err != nil { - logrus.Infof("failed to parse public key: %s\n", err) + logrus.Debugf("failed to parse public key: %s\n", err) return nil, ErrInvalid } default: // only accept RSA keys - logrus.Infof("invalid key type for RSAPSS verifier: %s", algorithm) + logrus.Debugf("invalid key type for RSAPSS verifier: %s", algorithm) return nil, ErrInvalidKeyType{} } @@ -172,17 +172,17 @@ func (v RSAPKCS1v15Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) rsaPub, ok := pubKey.(*rsa.PublicKey) if !ok { - logrus.Infof("value was not an RSA public key") + logrus.Debugf("value was not an RSA public key") return ErrInvalid } if rsaPub.N.BitLen() < minRSAKeySizeBit { - logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided key has length %d.", rsaPub.N.BitLen()) + logrus.Debugf("RSA keys less than 2048 bits are not acceptable, provided key has length %d.", rsaPub.N.BitLen()) return ErrInvalidKeyLength{msg: fmt.Sprintf("RSA key must be at least %d bits.", minRSAKeySizeBit)} } if len(sig) < minRSAKeySizeByte { - logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided signature has length %d.", len(sig)) + logrus.Debugf("RSA keys less than 2048 bits are not acceptable, provided signature has length %d.", len(sig)) return ErrInvalid } @@ -207,13 +207,13 @@ func (v RSAPyCryptoVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) k, _ := pem.Decode([]byte(key.Public())) if k == nil { - logrus.Infof("failed to decode PEM-encoded x509 certificate") + logrus.Debugf("failed to decode PEM-encoded x509 certificate") return ErrInvalid } pub, err := x509.ParsePKIXPublicKey(k.Bytes) if err != nil { - logrus.Infof("failed to parse public key: %s\n", err) + logrus.Debugf("failed to parse public key: %s\n", err) return ErrInvalid } @@ -232,13 +232,13 @@ func (v ECDSAVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error case data.ECDSAx509Key: pemCert, _ := pem.Decode([]byte(key.Public())) if pemCert == nil { - logrus.Infof("failed to decode PEM-encoded x509 certificate for keyID: %s", key.ID()) + logrus.Debugf("failed to decode PEM-encoded x509 certificate for keyID: %s", key.ID()) logrus.Debugf("certificate bytes: %s", string(key.Public())) return ErrInvalid } cert, err := x509.ParseCertificate(pemCert.Bytes) if err != nil { - logrus.Infof("failed to parse x509 certificate: %s\n", err) + logrus.Debugf("failed to parse x509 certificate: %s\n", err) return ErrInvalid } pubKey = cert.PublicKey @@ -246,25 +246,25 @@ func (v ECDSAVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error var err error pubKey, err = x509.ParsePKIXPublicKey(key.Public()) if err != nil { - logrus.Infof("Failed to parse private key for keyID: %s, %s\n", key.ID(), err) + logrus.Debugf("Failed to parse private key for keyID: %s, %s\n", key.ID(), err) return ErrInvalid } default: // only accept ECDSA keys. - logrus.Infof("invalid key type for ECDSA verifier: %s", algorithm) + logrus.Debugf("invalid key type for ECDSA verifier: %s", algorithm) return ErrInvalidKeyType{} } ecdsaPubKey, ok := pubKey.(*ecdsa.PublicKey) if !ok { - logrus.Infof("value isn't an ECDSA public key") + logrus.Debugf("value isn't an ECDSA public key") return ErrInvalid } sigLength := len(sig) expectedOctetLength := 2 * ((ecdsaPubKey.Params().BitSize + 7) >> 3) if sigLength != expectedOctetLength { - logrus.Infof("signature had an unexpected length") + logrus.Debugf("signature had an unexpected length") return ErrInvalid } @@ -275,7 +275,7 @@ func (v ECDSAVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error digest := sha256.Sum256(msg) if !ecdsa.Verify(ecdsaPubKey, digest[:], r, s) { - logrus.Infof("failed ECDSA signature validation") + logrus.Debugf("failed ECDSA signature validation") return ErrInvalid } diff --git a/vendor/src/github.com/docker/notary/tuf/signed/verify.go b/vendor/src/github.com/docker/notary/tuf/signed/verify.go index 3c1646f81f..9548e4e53d 100644 --- a/vendor/src/github.com/docker/notary/tuf/signed/verify.go +++ b/vendor/src/github.com/docker/notary/tuf/signed/verify.go @@ -6,9 +6,9 @@ import ( "time" "github.com/Sirupsen/logrus" + "github.com/docker/go/canonical/json" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/keys" - "github.com/jfrazelle/go/canonical/json" ) // Various basic signing errors diff --git a/vendor/src/github.com/docker/notary/tuf/store/filestore.go b/vendor/src/github.com/docker/notary/tuf/store/filestore.go index 6ce39ed3ec..52e7c8f289 100644 --- a/vendor/src/github.com/docker/notary/tuf/store/filestore.go +++ b/vendor/src/github.com/docker/notary/tuf/store/filestore.go @@ -39,11 +39,14 @@ type FilesystemStore struct { targetsDir string } +func (f *FilesystemStore) getPath(name string) string { + fileName := fmt.Sprintf("%s.%s", name, f.metaExtension) + return filepath.Join(f.metaDir, fileName) +} + // GetMeta returns the meta for the given name (a role) func (f *FilesystemStore) GetMeta(name string, size int64) ([]byte, error) { - fileName := fmt.Sprintf("%s.%s", name, f.metaExtension) - path := filepath.Join(f.metaDir, fileName) - meta, err := ioutil.ReadFile(path) + meta, err := ioutil.ReadFile(f.getPath(name)) if err != nil { if os.IsNotExist(err) { err = ErrMetaNotFound{Resource: name} @@ -66,21 +69,31 @@ func (f *FilesystemStore) SetMultiMeta(metas map[string][]byte) error { // SetMeta sets the meta for a single role func (f *FilesystemStore) SetMeta(name string, meta []byte) error { - fileName := fmt.Sprintf("%s.%s", name, f.metaExtension) - path := filepath.Join(f.metaDir, fileName) + fp := f.getPath(name) // Ensures the parent directories of the file we are about to write exist - err := os.MkdirAll(filepath.Dir(path), 0700) + err := os.MkdirAll(filepath.Dir(fp), 0700) if err != nil { return err } // if something already exists, just delete it and re-write it - os.RemoveAll(path) + os.RemoveAll(fp) // Write the file to disk - if err = ioutil.WriteFile(path, meta, 0600); err != nil { + if err = ioutil.WriteFile(fp, meta, 0600); err != nil { return err } return nil } + +// RemoveAll clears the existing filestore by removing its base directory +func (f *FilesystemStore) RemoveAll() error { + return os.RemoveAll(f.baseDir) +} + +// RemoveMeta removes the metadata for a single role - if the metadata doesn't +// exist, no error is returned +func (f *FilesystemStore) RemoveMeta(name string) error { + return os.RemoveAll(f.getPath(name)) // RemoveAll succeeds if path doesn't exist +} diff --git a/vendor/src/github.com/docker/notary/tuf/store/httpstore.go b/vendor/src/github.com/docker/notary/tuf/store/httpstore.go index ef69a611df..7444a311b9 100644 --- a/vendor/src/github.com/docker/notary/tuf/store/httpstore.go +++ b/vendor/src/github.com/docker/notary/tuf/store/httpstore.go @@ -85,6 +85,9 @@ func NewHTTPStore(baseURL, metaPrefix, metaExtension, targetsPrefix, keyExtensio if !base.IsAbs() { return nil, errors.New("HTTPStore requires an absolute baseURL") } + if roundTrip == nil { + return &OfflineStore{}, nil + } return &HTTPStore{ baseURL: *base, metaPrefix: metaPrefix, @@ -182,6 +185,12 @@ func (s HTTPStore) SetMeta(name string, blob []byte) error { return translateStatusToError(resp, "POST "+name) } +// RemoveMeta always fails, because we should never be able to delete metadata +// remotely +func (s HTTPStore) RemoveMeta(name string) error { + return ErrInvalidOperation{msg: "cannot delete metadata"} +} + // NewMultiPartMetaRequest builds a request with the provided metadata updates // in multipart form func NewMultiPartMetaRequest(url string, metas map[string][]byte) (*http.Request, error) { @@ -227,6 +236,11 @@ func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error { return translateStatusToError(resp, "POST metadata endpoint") } +// RemoveAll in the interface is not supported, admins should use the DeleteHandler endpoint directly to delete remote data for a GUN +func (s HTTPStore) RemoveAll() error { + return errors.New("remove all functionality not supported for HTTPStore") +} + func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) { var filename string if name != "" { diff --git a/vendor/src/github.com/docker/notary/tuf/store/interfaces.go b/vendor/src/github.com/docker/notary/tuf/store/interfaces.go index 13bffccbdb..6d73da8a96 100644 --- a/vendor/src/github.com/docker/notary/tuf/store/interfaces.go +++ b/vendor/src/github.com/docker/notary/tuf/store/interfaces.go @@ -14,6 +14,8 @@ type MetadataStore interface { GetMeta(name string, size int64) ([]byte, error) SetMeta(name string, blob []byte) error SetMultiMeta(map[string][]byte) error + RemoveAll() error + RemoveMeta(name string) error } // PublicKeyStore must be implemented by a key service diff --git a/vendor/src/github.com/docker/notary/tuf/store/memorystore.go b/vendor/src/github.com/docker/notary/tuf/store/memorystore.go index 5d3e44beb3..6072a8c446 100644 --- a/vendor/src/github.com/docker/notary/tuf/store/memorystore.go +++ b/vendor/src/github.com/docker/notary/tuf/store/memorystore.go @@ -54,6 +54,13 @@ func (m *memoryStore) SetMultiMeta(metas map[string][]byte) error { return nil } +// RemoveMeta removes the metadata for a single role - if the metadata doesn't +// exist, no error is returned +func (m *memoryStore) RemoveMeta(name string) error { + delete(m.meta, name) + return nil +} + func (m *memoryStore) GetTarget(path string) (io.ReadCloser, error) { return &utils.NoopCloser{Reader: bytes.NewReader(m.files[path])}, nil } @@ -95,3 +102,11 @@ func (m *memoryStore) Commit(map[string][]byte, bool, map[string]data.Hashes) er func (m *memoryStore) GetKey(role string) ([]byte, error) { return nil, fmt.Errorf("GetKey is not implemented for the memoryStore") } + +// Clear this existing memory store by setting this store as new empty one +func (m *memoryStore) RemoveAll() error { + m.meta = make(map[string][]byte) + m.files = make(map[string][]byte) + m.keys = make(map[string][]data.PrivateKey) + return nil +} diff --git a/vendor/src/github.com/docker/notary/tuf/store/offlinestore.go b/vendor/src/github.com/docker/notary/tuf/store/offlinestore.go index d32e113c0a..b0f057b2b8 100644 --- a/vendor/src/github.com/docker/notary/tuf/store/offlinestore.go +++ b/vendor/src/github.com/docker/notary/tuf/store/offlinestore.go @@ -14,30 +14,40 @@ func (e ErrOffline) Error() string { var err = ErrOffline{} // OfflineStore is to be used as a placeholder for a nil store. It simply -// return ErrOffline for every operation +// returns ErrOffline for every operation type OfflineStore struct{} -// GetMeta return ErrOffline +// GetMeta returns ErrOffline func (es OfflineStore) GetMeta(name string, size int64) ([]byte, error) { return nil, err } -// SetMeta return ErrOffline +// SetMeta returns ErrOffline func (es OfflineStore) SetMeta(name string, blob []byte) error { return err } -// SetMultiMeta return ErrOffline +// SetMultiMeta returns ErrOffline func (es OfflineStore) SetMultiMeta(map[string][]byte) error { return err } -// GetKey return ErrOffline +// RemoveMeta returns ErrOffline +func (es OfflineStore) RemoveMeta(name string) error { + return err +} + +// GetKey returns ErrOffline func (es OfflineStore) GetKey(role string) ([]byte, error) { return nil, err } -// GetTarget return ErrOffline +// GetTarget returns ErrOffline func (es OfflineStore) GetTarget(path string) (io.ReadCloser, error) { return nil, err } + +// RemoveAll return ErrOffline +func (es OfflineStore) RemoveAll() error { + return err +} diff --git a/vendor/src/github.com/docker/notary/tuf/tuf.go b/vendor/src/github.com/docker/notary/tuf/tuf.go index 83e49467fb..96ab7da1d6 100644 --- a/vendor/src/github.com/docker/notary/tuf/tuf.go +++ b/vendor/src/github.com/docker/notary/tuf/tuf.go @@ -173,6 +173,11 @@ func (tr *Repo) RemoveBaseKeys(role string, keyIDs ...string) error { return nil } +// GetAllLoadedRoles returns a list of all role entries loaded in this TUF repo, could be empty +func (tr *Repo) GetAllLoadedRoles() []*data.Role { + return tr.keysDB.GetAllRoles() +} + // GetDelegation finds the role entry representing the provided // role name or ErrInvalidRole func (tr *Repo) GetDelegation(role string) (*data.Role, error) { diff --git a/vendor/src/github.com/miekg/dns/server.go b/vendor/src/github.com/miekg/dns/server.go index b2888f38d3..bb0d074a75 100644 --- a/vendor/src/github.com/miekg/dns/server.go +++ b/vendor/src/github.com/miekg/dns/server.go @@ -535,6 +535,9 @@ Redo: h.ServeDNS(w, req) // Writes back to the client Exit: + if w.tcp == nil { + return + } // TODO(miek): make this number configurable? if q > maxTCPQueries { // close socket after this many queries w.Close() diff --git a/vendor/src/github.com/miekg/dns/udp_linux.go b/vendor/src/github.com/miekg/dns/udp_linux.go index 7a107857e1..c62d21881b 100644 --- a/vendor/src/github.com/miekg/dns/udp_linux.go +++ b/vendor/src/github.com/miekg/dns/udp_linux.go @@ -24,6 +24,12 @@ func setUDPSocketOptions4(conn *net.UDPConn) error { if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil { return err } + // Calling File() above results in the connection becoming blocking, we must fix that. + // See https://github.com/miekg/dns/issues/279 + err = syscall.SetNonblock(int(file.Fd()), true) + if err != nil { + return err + } return nil } @@ -36,6 +42,10 @@ func setUDPSocketOptions6(conn *net.UDPConn) error { if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil { return err } + err = syscall.SetNonblock(int(file.Fd()), true) + if err != nil { + return err + } return nil } diff --git a/vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go b/vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go index 0c4d207ee7..7d2da2dc07 100644 --- a/vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go +++ b/vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go @@ -230,12 +230,39 @@ func (m *Manager) GetPids() ([]int, error) { return cgroups.GetPids(dir) } +// pathClean makes a path safe for use with filepath.Join. This is done by not +// only cleaning the path, but also (if the path is relative) adding a leading +// '/' and cleaning it (then removing the leading '/'). This ensures that a +// path resulting from prepending another path will always resolve to lexically +// be a subdirectory of the prefixed path. This is all done lexically, so paths +// that include symlinks won't be safe as a result of using pathClean. +func pathClean(path string) string { + // Ensure that all paths are cleaned (especially problematic ones like + // "/../../../../../" which can cause lots of issues). + path = filepath.Clean(path) + + // If the path isn't absolute, we need to do more processing to fix paths + // such as "../../../..//some/path". We also shouldn't convert absolute + // paths to relative ones. + if !filepath.IsAbs(path) { + path = filepath.Clean(string(os.PathSeparator) + path) + // This can't fail, as (by definition) all paths are relative to root. + path, _ = filepath.Rel(string(os.PathSeparator), path) + } + + // Clean the path again for good measure. + return filepath.Clean(path) +} + func getCgroupData(c *configs.Cgroup, pid int) (*cgroupData, error) { root, err := getCgroupRoot() if err != nil { return nil, err } + // Clean the parent slice path. + c.Parent = pathClean(c.Parent) + return &cgroupData{ root: root, parent: c.Parent, diff --git a/vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go b/vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go index b7632108f8..8daacfc609 100644 --- a/vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go +++ b/vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go @@ -4,6 +4,7 @@ package fs import ( "bytes" + "fmt" "io/ioutil" "os" "path/filepath" @@ -95,6 +96,10 @@ func (s *CpusetGroup) ensureParent(current, root string) error { if filepath.Clean(parent) == root { return nil } + // Avoid infinite recursion. + if parent == current { + return fmt.Errorf("cpuset: cgroup parent path outside cgroup root") + } if err := s.ensureParent(parent, root); err != nil { return err } diff --git a/vendor/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go b/vendor/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go index ef781324fb..b9a06de26a 100644 --- a/vendor/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go +++ b/vendor/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go @@ -20,7 +20,7 @@ type Cgroup struct { ScopePrefix string `json:"scope_prefix"` // Resources contains various cgroups settings to apply - Resources *Resources `json:"resources"` + *Resources } type Resources struct { diff --git a/vendor/src/github.com/opencontainers/runc/libcontainer/container_linux.go b/vendor/src/github.com/opencontainers/runc/libcontainer/container_linux.go index de98e97ca2..916511ebf5 100644 --- a/vendor/src/github.com/opencontainers/runc/libcontainer/container_linux.go +++ b/vendor/src/github.com/opencontainers/runc/libcontainer/container_linux.go @@ -21,6 +21,7 @@ import ( "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/criurpc" + "github.com/opencontainers/runc/libcontainer/utils" "github.com/vishvananda/netlink/nl" ) @@ -968,7 +969,7 @@ func (c *linuxContainer) updateState(process parentProcess) error { } defer f.Close() os.Remove(filepath.Join(c.root, "checkpoint")) - return json.NewEncoder(f).Encode(state) + return utils.WriteJSON(f, state) } func (c *linuxContainer) currentStatus() (Status, error) { diff --git a/vendor/src/github.com/opencontainers/runc/libcontainer/factory_linux.go b/vendor/src/github.com/opencontainers/runc/libcontainer/factory_linux.go index 70513f7b67..d03ce8642e 100644 --- a/vendor/src/github.com/opencontainers/runc/libcontainer/factory_linux.go +++ b/vendor/src/github.com/opencontainers/runc/libcontainer/factory_linux.go @@ -5,7 +5,6 @@ package libcontainer import ( "encoding/json" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -19,6 +18,7 @@ import ( "github.com/opencontainers/runc/libcontainer/cgroups/systemd" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/configs/validate" + "github.com/opencontainers/runc/libcontainer/utils" ) const ( @@ -226,10 +226,7 @@ func (l *LinuxFactory) StartInitialization() (err error) { // if we have an error during the initialization of the container's init then send it back to the // parent process in the form of an initError. if err != nil { - // ensure that any data sent from the parent is consumed so it doesn't - // receive ECONNRESET when the child writes to the pipe. - ioutil.ReadAll(pipe) - if err := json.NewEncoder(pipe).Encode(newSystemError(err)); err != nil { + if err := utils.WriteJSON(pipe, newSystemError(err)); err != nil { panic(err) } } diff --git a/vendor/src/github.com/opencontainers/runc/libcontainer/process_linux.go b/vendor/src/github.com/opencontainers/runc/libcontainer/process_linux.go index 114c71b3b5..ee647369d3 100644 --- a/vendor/src/github.com/opencontainers/runc/libcontainer/process_linux.go +++ b/vendor/src/github.com/opencontainers/runc/libcontainer/process_linux.go @@ -15,6 +15,7 @@ import ( "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/runc/libcontainer/utils" ) type parentProcess interface { @@ -83,7 +84,7 @@ func (p *setnsProcess) start() (err error) { return newSystemError(err) } } - if err := json.NewEncoder(p.parentPipe).Encode(p.config); err != nil { + if err := utils.WriteJSON(p.parentPipe, p.config); err != nil { return newSystemError(err) } if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil { @@ -270,7 +271,7 @@ func (p *initProcess) startTime() (string, error) { func (p *initProcess) sendConfig() error { // send the state to the container's init process then shutdown writes for the parent - if err := json.NewEncoder(p.parentPipe).Encode(p.config); err != nil { + if err := utils.WriteJSON(p.parentPipe, p.config); err != nil { return err } // shutdown writes for the parent side of the pipe diff --git a/vendor/src/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go b/vendor/src/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go index aff1b63a53..623e227748 100644 --- a/vendor/src/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go +++ b/vendor/src/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go @@ -3,8 +3,11 @@ package seccomp import ( + "bufio" "fmt" "log" + "os" + "strings" "syscall" "github.com/opencontainers/runc/libcontainer/configs" @@ -17,6 +20,9 @@ var ( actKill = libseccomp.ActKill actTrace = libseccomp.ActTrace.SetReturnCode(int16(syscall.EPERM)) actErrno = libseccomp.ActErrno.SetReturnCode(int16(syscall.EPERM)) + + // SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER. + SeccompModeFilter = uintptr(2) ) // Filters given syscalls in a container, preventing them from being used @@ -73,6 +79,24 @@ func InitSeccomp(config *configs.Seccomp) error { return nil } +// IsEnabled returns if the kernel has been configured to support seccomp. +func IsEnabled() bool { + // Try to read from /proc/self/status for kernels > 3.8 + s, err := parseStatusFile("/proc/self/status") + if err != nil { + // Check if Seccomp is supported, via CONFIG_SECCOMP. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL { + return true + } + } + return false + } + _, ok := s["Seccomp"] + return ok +} + // Convert Libcontainer Action to Libseccomp ScmpAction func getAction(act configs.Action) (libseccomp.ScmpAction, error) { switch act { @@ -178,3 +202,30 @@ func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall) error { return nil } + +func parseStatusFile(path string) (map[string]string, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + s := bufio.NewScanner(f) + status := make(map[string]string) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + text := s.Text() + parts := strings.Split(text, ":") + + if len(parts) <= 1 { + continue + } + + status[parts[0]] = parts[1] + } + return status, nil +} diff --git a/vendor/src/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go b/vendor/src/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go index 87d3abbc64..888483e768 100644 --- a/vendor/src/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go +++ b/vendor/src/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go @@ -17,3 +17,8 @@ func InitSeccomp(config *configs.Seccomp) error { } return nil } + +// IsEnabled returns false, because it is not supported. +func IsEnabled() bool { + return false +} diff --git a/vendor/src/github.com/opencontainers/runc/libcontainer/utils/utils.go b/vendor/src/github.com/opencontainers/runc/libcontainer/utils/utils.go index 86cf1d65e7..1378006b0a 100644 --- a/vendor/src/github.com/opencontainers/runc/libcontainer/utils/utils.go +++ b/vendor/src/github.com/opencontainers/runc/libcontainer/utils/utils.go @@ -3,6 +3,7 @@ package utils import ( "crypto/rand" "encoding/hex" + "encoding/json" "io" "path/filepath" "syscall" @@ -36,10 +37,20 @@ func ResolveRootfs(uncleanRootfs string) (string, error) { } // ExitStatus returns the correct exit status for a process based on if it -// was signaled or exited cleanly. +// was signaled or exited cleanly func ExitStatus(status syscall.WaitStatus) int { if status.Signaled() { return exitSignalOffset + int(status.Signal()) } return status.ExitStatus() } + +// WriteJSON writes the provided struct v to w using standard json marshaling +func WriteJSON(w io.Writer, v interface{}) error { + data, err := json.Marshal(v) + if err != nil { + return err + } + _, err = w.Write(data) + return err +} diff --git a/volume/drivers/adapter.go b/volume/drivers/adapter.go index f29cf75748..98e4c11ddf 100644 --- a/volume/drivers/adapter.go +++ b/volume/drivers/adapter.go @@ -1,6 +1,9 @@ package volumedrivers -import "github.com/docker/docker/volume" +import ( + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/volume" +) type volumeDriverAdapter struct { name string @@ -47,7 +50,11 @@ func (a *volumeDriverAdapter) List() ([]volume.Volume, error) { func (a *volumeDriverAdapter) Get(name string) (volume.Volume, error) { v, err := a.proxy.Get(name) if err != nil { - return nil, err + // TODO: remove this hack. Allows back compat with volume drivers that don't support this call + if !plugins.IsNotFound(err) { + return nil, err + } + return a.Create(name, nil) } return &volumeAdapter{ diff --git a/volume/drivers/extpoint.go b/volume/drivers/extpoint.go index 6f894e0ae6..dd45d1365a 100644 --- a/volume/drivers/extpoint.go +++ b/volume/drivers/extpoint.go @@ -6,6 +6,7 @@ import ( "fmt" "sync" + "github.com/docker/docker/pkg/locker" "github.com/docker/docker/pkg/plugins" "github.com/docker/docker/volume" ) @@ -13,7 +14,7 @@ import ( // currently created by hand. generation tool would generate this like: // $ extpoint-gen Driver > volume/extpoint.go -var drivers = &driverExtpoint{extensions: make(map[string]volume.Driver)} +var drivers = &driverExtpoint{extensions: make(map[string]volume.Driver), driverLock: &locker.Locker{}} const extName = "VolumeDriver" @@ -49,16 +50,19 @@ type volumeDriver interface { type driverExtpoint struct { extensions map[string]volume.Driver sync.Mutex + driverLock *locker.Locker } // Register associates the given driver to the given name, checking if // the name is already associated func Register(extension volume.Driver, name string) bool { - drivers.Lock() - defer drivers.Unlock() if name == "" { return false } + + drivers.Lock() + defer drivers.Unlock() + _, exists := drivers.extensions[name] if exists { return false @@ -71,6 +75,7 @@ func Register(extension volume.Driver, name string) bool { func Unregister(name string) bool { drivers.Lock() defer drivers.Unlock() + _, exists := drivers.extensions[name] if !exists { return false @@ -83,12 +88,16 @@ func Unregister(name string) bool { // driver with the given name has not been registered it checks if // there is a VolumeDriver plugin available with the given name. func Lookup(name string) (volume.Driver, error) { + drivers.driverLock.Lock(name) + defer drivers.driverLock.Unlock(name) + drivers.Lock() ext, ok := drivers.extensions[name] drivers.Unlock() if ok { return ext, nil } + pl, err := plugins.Get(name, extName) if err != nil { return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err) @@ -118,9 +127,11 @@ func GetDriver(name string) (volume.Driver, error) { // If no driver is registered, empty string list will be returned. func GetDriverList() []string { var driverList []string + drivers.Lock() for driverName := range drivers.extensions { driverList = append(driverList, driverName) } + drivers.Unlock() return driverList } @@ -144,6 +155,7 @@ func GetAllDrivers() ([]volume.Driver, error) { if ok { continue } + ext = NewVolumeDriver(p.Name, p.Client) drivers.extensions[p.Name] = ext ds = append(ds, ext) diff --git a/volume/store/store.go b/volume/store/store.go index 9811fd904a..b871902fd8 100644 --- a/volume/store/store.go +++ b/volume/store/store.go @@ -14,23 +14,23 @@ import ( func New() *VolumeStore { return &VolumeStore{ locks: &locker.Locker{}, - names: make(map[string]string), + names: make(map[string]volume.Volume), refs: make(map[string][]string), } } -func (s *VolumeStore) getNamed(name string) (string, bool) { +func (s *VolumeStore) getNamed(name string) (volume.Volume, bool) { s.globalLock.Lock() - driverName, exists := s.names[name] + v, exists := s.names[name] s.globalLock.Unlock() - return driverName, exists + return v, exists } -func (s *VolumeStore) setNamed(name, driver, ref string) { +func (s *VolumeStore) setNamed(v volume.Volume, ref string) { s.globalLock.Lock() - s.names[name] = driver + s.names[v.Name()] = v if len(ref) > 0 { - s.refs[name] = append(s.refs[name], ref) + s.refs[v.Name()] = append(s.refs[v.Name()], ref) } s.globalLock.Unlock() } @@ -48,7 +48,7 @@ type VolumeStore struct { globalLock sync.Mutex // names stores the volume name -> driver name relationship. // This is used for making lookups faster so we don't have to probe all drivers - names map[string]string + names map[string]volume.Volume // refs stores the volume name and the list of things referencing it refs map[string][]string } @@ -67,12 +67,12 @@ func (s *VolumeStore) List() ([]volume.Volume, []string, error) { name := normaliseVolumeName(v.Name()) s.locks.Lock(name) - driverName, exists := s.getNamed(name) + storedV, exists := s.getNamed(name) if !exists { - s.setNamed(name, v.DriverName(), "") + s.setNamed(v, "") } - if exists && driverName != v.DriverName() { - logrus.Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), driverName, v.DriverName()) + if exists && storedV.DriverName() != v.DriverName() { + logrus.Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), storedV.DriverName(), v.DriverName()) s.locks.Unlock(v.Name()) continue } @@ -95,8 +95,9 @@ func (s *VolumeStore) list() ([]volume.Volume, []string, error) { ) type vols struct { - vols []volume.Volume - err error + vols []volume.Volume + err error + driverName string } chVols := make(chan vols, len(drivers)) @@ -104,23 +105,32 @@ func (s *VolumeStore) list() ([]volume.Volume, []string, error) { go func(d volume.Driver) { vs, err := d.List() if err != nil { - chVols <- vols{err: &OpErr{Err: err, Name: d.Name(), Op: "list"}} + chVols <- vols{driverName: d.Name(), err: &OpErr{Err: err, Name: d.Name(), Op: "list"}} return } chVols <- vols{vols: vs} }(vd) } + badDrivers := make(map[string]struct{}) for i := 0; i < len(drivers); i++ { vs := <-chVols if vs.err != nil { warnings = append(warnings, vs.err.Error()) + badDrivers[vs.driverName] = struct{}{} logrus.Warn(vs.err) - continue } ls = append(ls, vs.vols...) } + + if len(badDrivers) > 0 { + for _, v := range s.names { + if _, exists := badDrivers[v.DriverName()]; exists { + ls = append(ls, v) + } + } + } return ls, warnings, nil } @@ -137,7 +147,7 @@ func (s *VolumeStore) CreateWithRef(name, driverName, ref string, opts map[strin return nil, &OpErr{Err: err, Name: name, Op: "create"} } - s.setNamed(name, v.DriverName(), ref) + s.setNamed(v, ref) return v, nil } @@ -151,7 +161,7 @@ func (s *VolumeStore) Create(name, driverName string, opts map[string]string) (v if err != nil { return nil, &OpErr{Err: err, Name: name, Op: "create"} } - s.setNamed(name, v.DriverName(), "") + s.setNamed(v, "") return v, nil } @@ -169,12 +179,11 @@ func (s *VolumeStore) create(name, driverName string, opts map[string]string) (v return nil, &OpErr{Err: errInvalidName, Name: name, Op: "create"} } - vdName, exists := s.getNamed(name) - if exists { - if vdName != driverName && driverName != "" && driverName != volume.DefaultDriverName { + if v, exists := s.getNamed(name); exists { + if v.DriverName() != driverName && driverName != "" && driverName != volume.DefaultDriverName { return nil, errNameConflict } - driverName = vdName + return v, nil } logrus.Debugf("Registering new volume reference: driver %s, name %s", driverName, name) @@ -207,7 +216,7 @@ func (s *VolumeStore) GetWithRef(name, driverName, ref string) (volume.Volume, e return nil, &OpErr{Err: err, Name: name, Op: "get"} } - s.setNamed(name, v.DriverName(), ref) + s.setNamed(v, ref) return v, nil } @@ -221,6 +230,7 @@ func (s *VolumeStore) Get(name string) (volume.Volume, error) { if err != nil { return nil, &OpErr{Err: err, Name: name, Op: "get"} } + s.setNamed(v, "") return v, nil } @@ -229,8 +239,8 @@ func (s *VolumeStore) Get(name string) (volume.Volume, error) { // it is expected that callers of this function hold any neccessary locks func (s *VolumeStore) getVolume(name string) (volume.Volume, error) { logrus.Debugf("Getting volume reference for name: %s", name) - if vdName, exists := s.names[name]; exists { - vd, err := volumedrivers.GetDriver(vdName) + if v, exists := s.names[name]; exists { + vd, err := volumedrivers.GetDriver(v.DriverName()) if err != nil { return nil, err } @@ -283,6 +293,7 @@ func (s *VolumeStore) Dereference(v volume.Volume, ref string) { defer s.locks.Unlock(v.Name()) s.globalLock.Lock() + defer s.globalLock.Unlock() refs, exists := s.refs[v.Name()] if !exists { return @@ -293,7 +304,6 @@ func (s *VolumeStore) Dereference(v volume.Volume, ref string) { s.refs[v.Name()] = append(s.refs[v.Name()][:i], s.refs[v.Name()][i+1:]...) } } - s.globalLock.Unlock() } // Refs gets the current list of refs for the given volume @@ -326,12 +336,18 @@ func (s *VolumeStore) FilterByDriver(name string) ([]volume.Volume, error) { return ls, nil } -// FilterByUsed returns the available volumes filtered by if they are not in use -func (s *VolumeStore) FilterByUsed(vols []volume.Volume) []volume.Volume { +// FilterByUsed returns the available volumes filtered by if they are in use or not. +// `used=true` returns only volumes that are being used, while `used=false` returns +// only volumes that are not being used. +func (s *VolumeStore) FilterByUsed(vols []volume.Volume, used bool) []volume.Volume { return s.filter(vols, func(v volume.Volume) bool { s.locks.Lock(v.Name()) - defer s.locks.Unlock(v.Name()) - return len(s.refs[v.Name()]) == 0 + l := len(s.refs[v.Name()]) + s.locks.Unlock(v.Name()) + if (used && l > 0) || (!used && l == 0) { + return true + } + return false }) } diff --git a/volume/store/store_test.go b/volume/store/store_test.go index 652feaa594..83d49821be 100644 --- a/volume/store/store_test.go +++ b/volume/store/store_test.go @@ -123,3 +123,37 @@ func TestFilterByDriver(t *testing.T) { t.Fatalf("Expected 1 volume, got %v, %v", len(l), l) } } + +func TestFilterByUsed(t *testing.T) { + volumedrivers.Register(vt.NewFakeDriver("fake"), "fake") + volumedrivers.Register(vt.NewFakeDriver("noop"), "noop") + + s := New() + if _, err := s.CreateWithRef("fake1", "fake", "volReference", nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("fake2", "fake", nil); err != nil { + t.Fatal(err) + } + + vols, _, err := s.List() + if err != nil { + t.Fatal(err) + } + + dangling := s.FilterByUsed(vols, false) + if len(dangling) != 1 { + t.Fatalf("expected 1 danging volume, got %v", len(dangling)) + } + if dangling[0].Name() != "fake2" { + t.Fatalf("expected danging volume fake2, got %s", dangling[0].Name()) + } + + used := s.FilterByUsed(vols, true) + if len(used) != 1 { + t.Fatalf("expected 1 used volume, got %v", len(used)) + } + if used[0].Name() != "fake1" { + t.Fatalf("expected used volume fake1, got %s", used[0].Name()) + } +} diff --git a/volume/volume.go b/volume/volume.go index 0044430a64..d270b156f8 100644 --- a/volume/volume.go +++ b/volume/volume.go @@ -59,6 +59,7 @@ type MountPoint struct { // Note Propagation is not used on Windows Propagation string // Mount propagation string + Named bool // specifies if the mountpoint was specified by name } // Setup sets up a mount point by either mounting the volume if it is