Просмотр исходного кода

Merge pull request #29229 from vieux/1.13.0-rc4-cherrypicks

1.13.0-rc4 cherry-picks: part1
Victor Vieux 8 лет назад
Родитель
Сommit
1b521547f0
100 измененных файлов с 1081 добавлено и 520 удалено
  1. 20 3
      CHANGELOG.md
  2. 14 9
      Dockerfile.windows
  3. 15 1
      api/server/router/build/build_routes.go
  4. 2 1
      api/server/router/container/backend.go
  5. 3 7
      api/server/router/container/container_routes.go
  6. 8 2
      api/server/router/container/exec.go
  7. 1 1
      api/server/router/image/backend.go
  8. 3 7
      api/server/router/image/image_routes.go
  9. 2 1
      api/server/router/network/backend.go
  10. 1 10
      api/server/router/network/network_routes.go
  11. 2 1
      api/server/router/plugin/backend.go
  12. 2 1
      api/server/router/plugin/plugin.go
  13. 35 22
      api/server/router/plugin/plugin_routes.go
  14. 2 1
      api/server/router/volume/backend.go
  15. 2 11
      api/server/router/volume/volume_routes.go
  16. 120 24
      api/swagger.yaml
  17. 7 4
      api/types/client.go
  18. 4 0
      api/types/plugin.go
  19. 6 0
      api/types/swarm/common.go
  20. 0 6
      api/types/swarm/network.go
  21. 0 21
      api/types/types.go
  22. 1 1
      builder/dockerfile/builder.go
  23. 10 7
      builder/dockerfile/dispatchers.go
  24. 2 2
      builder/dockerfile/dispatchers_test.go
  25. 1 1
      builder/dockerfile/evaluator.go
  26. 3 0
      cli/command/checkpoint/create.go
  27. 45 4
      cli/command/cli.go
  28. 2 2
      cli/command/container/prune.go
  29. 5 5
      cli/command/image/build.go
  30. 5 4
      cli/command/image/prune.go
  31. 2 2
      cli/command/network/prune.go
  32. 2 2
      cli/command/registry.go
  33. 1 1
      cli/command/registry/login.go
  34. 1 1
      cli/command/registry/logout.go
  35. 46 18
      cli/command/secret/utils.go
  36. 9 0
      cli/command/stack/deploy.go
  37. 23 10
      cli/command/task/print.go
  38. 6 0
      cli/command/utils.go
  39. 2 2
      cli/command/volume/prune.go
  40. 83 11
      cliconfig/config_test.go
  41. 3 1
      cliconfig/configfile/file.go
  42. 2 2
      cliconfig/credentials/native_store.go
  43. 4 4
      client/container_copy_test.go
  44. 8 2
      client/container_prune.go
  45. 7 4
      client/image_build_test.go
  46. 8 2
      client/image_prune.go
  47. 4 4
      client/image_search_test.go
  48. 4 4
      client/interface.go
  49. 12 2
      client/network_prune.go
  50. 1 1
      client/plugin_inspect.go
  51. 22 11
      client/plugin_install.go
  52. 1 1
      client/plugin_push_test.go
  53. 19 1
      client/utils.go
  54. 8 2
      client/volume_prune.go
  55. 2 1
      contrib/check-config.sh
  56. 135 4
      contrib/completion/bash/docker
  57. 38 19
      contrib/completion/zsh/_docker
  58. 17 11
      daemon/apparmor_default.go
  59. 2 1
      daemon/apparmor_default_unsupported.go
  60. 2 1
      daemon/cluster/executor/container/container.go
  61. 12 2
      daemon/container.go
  62. 5 3
      daemon/container_operations.go
  63. 1 1
      daemon/create.go
  64. 4 1
      daemon/daemon.go
  65. 3 3
      daemon/graphdriver/plugin.go
  66. 15 0
      daemon/graphdriver/proxy.go
  67. 17 2
      daemon/oci_linux.go
  68. 21 10
      daemon/prune.go
  69. 3 2
      daemon/start.go
  70. 8 4
      distribution/errors.go
  71. 2 2
      distribution/pull.go
  72. 1 2
      distribution/pull_v2.go
  73. 12 7
      distribution/registry.go
  74. 2 1
      docs/api/v1.18.md
  75. 2 1
      docs/api/v1.19.md
  76. 2 1
      docs/api/v1.20.md
  77. 2 1
      docs/api/v1.21.md
  78. 2 1
      docs/api/v1.22.md
  79. 2 1
      docs/api/v1.23.md
  80. 9 1
      docs/api/v1.24.md
  81. 32 12
      docs/deprecated.md
  82. 8 4
      docs/extend/config.md
  83. 4 0
      docs/extend/plugins_volume.md
  84. 2 3
      docs/reference/commandline/build.md
  85. 1 1
      docs/reference/commandline/network_ls.md
  86. 4 2
      docs/reference/commandline/service_create.md
  87. 55 33
      docs/reference/commandline/service_ps.md
  88. 0 1
      docs/reference/commandline/stack_deploy.md
  89. 1 2
      docs/reference/commandline/stack_ls.md
  90. 1 2
      docs/reference/commandline/stack_ps.md
  91. 2 3
      docs/reference/commandline/stack_rm.md
  92. 2 3
      docs/reference/commandline/stack_services.md
  93. 4 4
      docs/reference/commandline/swarm_join.md
  94. 3 3
      docs/reference/commandline/swarm_update.md
  95. 5 5
      docs/reference/run.md
  96. 0 95
      hack/install.sh
  97. 19 12
      hack/make.ps1
  98. 1 5
      hack/make.sh
  99. 5 0
      integration-cli/daemon.go
  100. 10 0
      integration-cli/docker_api_exec_test.go

+ 20 - 3
CHANGELOG.md

@@ -7,6 +7,22 @@ be found.
 
 
 ## 1.13.0 (2017-01-10)
 ## 1.13.0 (2017-01-10)
 
 
+**IMPORTANT**: In Docker 1.13, the managed plugin api changed, as compared to the experimental
+version introduced in Docker 1.12. You must **uninstall** plugins which you installed with Docker 1.12
+_before_ upgrading to Docker 1.13. You can uninstall plugins using the `docker plugin rm` command.
+
+If you have already upgraded to Docker 1.13 without uninstalling
+previously-installed plugins, you may see this message when the Docker daemon
+starts:
+
+    Error starting daemon: json: cannot unmarshal string into Go value of type types.PluginEnv
+
+To manually remove all plugins and resolve this problem, take the following steps:
+
+1. Remove plugins.json from: `/var/lib/docker/plugins/`.
+2. Restart Docker. Verify that the Docker daemon starts with no errors.
+3. Reinstall your plugins.
+
 ### Builder
 ### Builder
 + Add capability to specify images used as a cache source on build. These images do not need to have local parent chain and can be pulled from other registries [#26839](https://github.com/docker/docker/pull/26839)
 + Add capability to specify images used as a cache source on build. These images do not need to have local parent chain and can be pulled from other registries [#26839](https://github.com/docker/docker/pull/26839)
 + (experimental) Add option to squash image layers to the FROM image after successful builds [#22641](https://github.com/docker/docker/pull/22641)
 + (experimental) Add option to squash image layers to the FROM image after successful builds [#22641](https://github.com/docker/docker/pull/22641)
@@ -185,7 +201,7 @@ be found.
 
 
 ### Volume
 ### Volume
 
 
-+ Add support for labels on volumes [#25628](https://github.com/docker/docker/pull/21567)
++ Add support for labels on volumes [#21270](https://github.com/docker/docker/pull/21270)
 + Add support for filtering volumes by label [#25628](https://github.com/docker/docker/pull/25628)
 + Add support for filtering volumes by label [#25628](https://github.com/docker/docker/pull/25628)
 * Add a `--force` flag in `docker volume rm` to forcefully purge the data of the volume that has already been deleted [#23436](https://github.com/docker/docker/pull/23436)
 * Add a `--force` flag in `docker volume rm` to forcefully purge the data of the volume that has already been deleted [#23436](https://github.com/docker/docker/pull/23436)
 * Enhance `docker volume inspect` to show all options used when creating the volume [#26671](https://github.com/docker/docker/pull/26671)
 * Enhance `docker volume inspect` to show all options used when creating the volume [#26671](https://github.com/docker/docker/pull/26671)
@@ -207,6 +223,7 @@ be found.
 - Deprecate `MAINTAINER` in Dockerfile [#25466](https://github.com/docker/docker/pull/25466)
 - Deprecate `MAINTAINER` in Dockerfile [#25466](https://github.com/docker/docker/pull/25466)
 - Deprecate `filter` param for endpoint `/images/json` [#27872](https://github.com/docker/docker/pull/27872)
 - Deprecate `filter` param for endpoint `/images/json` [#27872](https://github.com/docker/docker/pull/27872)
 - Deprecate setting duplicate engine labels [#24533](https://github.com/docker/docker/pull/24533)
 - Deprecate setting duplicate engine labels [#24533](https://github.com/docker/docker/pull/24533)
+- Deprecate "top-level" network information in `NetworkSettings` [#28437](https://github.com/docker/docker/pull/28437)
 
 
 ## 1.12.3 (2016-10-26)
 ## 1.12.3 (2016-10-26)
 
 
@@ -595,7 +612,7 @@ installing docker, please make sure to update them accordingly.
 
 
 
 
 ### DEPRECATION
 ### DEPRECATION
-* Environment variables `DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and `DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` have been renamed  
+* Environment variables `DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and `DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` have been renamed
   to `DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE` and `DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE` respectively [#22574](https://github.com/docker/docker/pull/22574)
   to `DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE` and `DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE` respectively [#22574](https://github.com/docker/docker/pull/22574)
 * Remove deprecated `syslog-tag`, `gelf-tag`, `fluentd-tag` log option in favor of the more generic `tag` one [#22620](https://github.com/docker/docker/pull/22620)
 * Remove deprecated `syslog-tag`, `gelf-tag`, `fluentd-tag` log option in favor of the more generic `tag` one [#22620](https://github.com/docker/docker/pull/22620)
 * Remove deprecated feature of passing HostConfig at API container start [#22570](https://github.com/docker/docker/pull/22570)
 * Remove deprecated feature of passing HostConfig at API container start [#22570](https://github.com/docker/docker/pull/22570)
@@ -766,7 +783,7 @@ installing docker, please make sure to update them accordingly.
 - Fix a panic that could occur when cleanup after a container started with invalid parameters ([#21716](https://github.com/docker/docker/pull/21716))
 - Fix a panic that could occur when cleanup after a container started with invalid parameters ([#21716](https://github.com/docker/docker/pull/21716))
 - Fix a race with event timers stopping early ([#21692](https://github.com/docker/docker/pull/21692))
 - Fix a race with event timers stopping early ([#21692](https://github.com/docker/docker/pull/21692))
 - Fix race conditions in the layer store, potentially corrupting the map and crashing the process ([#21677](https://github.com/docker/docker/pull/21677))
 - Fix race conditions in the layer store, potentially corrupting the map and crashing the process ([#21677](https://github.com/docker/docker/pull/21677))
-- Un-deprecate auto-creation of host directories for mounts. This feature was marked deprecated in ([#21666](https://github.com/docker/docker/pull/21666))  
+- Un-deprecate auto-creation of host directories for mounts. This feature was marked deprecated in ([#21666](https://github.com/docker/docker/pull/21666))
   Docker 1.9, but was decided to be too much of a backward-incompatible change, so it was decided to keep the feature.
   Docker 1.9, but was decided to be too much of a backward-incompatible change, so it was decided to keep the feature.
 + It is now possible for containers to share the NET and IPC namespaces when `userns` is enabled ([#21383](https://github.com/docker/docker/pull/21383))
 + It is now possible for containers to share the NET and IPC namespaces when `userns` is enabled ([#21383](https://github.com/docker/docker/pull/21383))
 + `docker inspect <image-id>` will now expose the rootfs layers ([#21370](https://github.com/docker/docker/pull/21370))
 + `docker inspect <image-id>` will now expose the rootfs layers ([#21370](https://github.com/docker/docker/pull/21370))

+ 14 - 9
Dockerfile.windows

@@ -62,14 +62,17 @@
 #    >>   cd C:\go\src\github.com\docker\docker 
 #    >>   cd C:\go\src\github.com\docker\docker 
 #
 #
 #
 #
-# 3. Build a docker image with the components required to build the docker binaries from source:
+# 3. Build a docker image with the components required to build the docker binaries from source
+#    by running one of the following:
 #
 #
-#    >>   docker build -t nativebuildimage -f Dockerfile.windows .
+#    >>   docker build -t nativebuildimage -f Dockerfile.windows .          
+#    >>   docker build -t nativebuildimage -f Dockerfile.windows -m 2GB .    (if using Hyper-V containers)
 #
 #
 #
 #
-# 4. Build the docker executable binaries:
+# 4. Build the docker executable binaries by running one of the following:
 #
 #
 #    >>   docker run --name binaries nativebuildimage hack\make.ps1 -Binary
 #    >>   docker run --name binaries nativebuildimage hack\make.ps1 -Binary
+#    >>   docker run --name binaries -m 2GB nativebuildimage hack\make.ps1 -Binary    (if using Hyper-V containers)
 #
 #
 #
 #
 # 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination 
 # 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination 
@@ -96,10 +99,11 @@
 
 
 
 
 #  The validation tests can either run in a container, or directly on the host. To run in a
 #  The validation tests can either run in a container, or directly on the host. To run in a
-#  container, ensure you have created the nativebuildimage above. Then run the following
-#  from an (elevated) Windows PowerShell prompt:
+#  container, ensure you have created the nativebuildimage above. Then run one of the
+#  following from an (elevated) Windows PowerShell prompt:
 #
 #
 #    >>   docker run --rm nativebuildimage hack\make.ps1 -DCO -PkgImports -GoFormat
 #    >>   docker run --rm nativebuildimage hack\make.ps1 -DCO -PkgImports -GoFormat
+#    >>   docker run --rm -m 2GB nativebuildimage hack\make.ps1 -DCO -PkgImports -GoFormat    (if using Hyper-V containers)
 
 
 # To run the validation tests on the host, from the root of the repository, run the
 # To run the validation tests on the host, from the root of the repository, run the
 # following from a Windows PowerShell prompt (elevation is not required): (Note Go
 # following from a Windows PowerShell prompt (elevation is not required): (Note Go
@@ -110,20 +114,21 @@
 # -----------------------------------------------------------------------------------------
 # -----------------------------------------------------------------------------------------
 
 
 
 
-#  To run unit tests, ensure you have created the nativebuildimage above. Then run the
-#  following from an (elevated) Windows PowerShell prompt:
+#  To run unit tests, ensure you have created the nativebuildimage above. Then run one of
+#  the following from an (elevated) Windows PowerShell prompt:
 #
 #
 #    >>   docker run --rm nativebuildimage hack\make.ps1 -TestUnit
 #    >>   docker run --rm nativebuildimage hack\make.ps1 -TestUnit
+#    >>   docker run --rm -m 2GB nativebuildimage hack\make.ps1 -TestUnit    (if using Hyper-V containers)
 
 
 
 
 # -----------------------------------------------------------------------------------------
 # -----------------------------------------------------------------------------------------
 
 
 
 
 #  To run all tests and binary build, ensure you have created the nativebuildimage above. Then 
 #  To run all tests and binary build, ensure you have created the nativebuildimage above. Then 
-# run the following from an (elevated) Windows PowerShell prompt:
+#  run one of the following from an (elevated) Windows PowerShell prompt:
 #
 #
 #    >>   docker run nativebuildimage hack\make.ps1 -All
 #    >>   docker run nativebuildimage hack\make.ps1 -All
-
+#    >>   docker run -m 2GB nativebuildimage hack\make.ps1 -All    (if using Hyper-V containers)
 
 
 # -----------------------------------------------------------------------------------------
 # -----------------------------------------------------------------------------------------
 
 

+ 15 - 1
api/server/router/build/build_routes.go

@@ -84,14 +84,28 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
 		options.Ulimits = buildUlimits
 		options.Ulimits = buildUlimits
 	}
 	}
 
 
-	var buildArgs = map[string]string{}
+	var buildArgs = map[string]*string{}
 	buildArgsJSON := r.FormValue("buildargs")
 	buildArgsJSON := r.FormValue("buildargs")
+
+	// Note that there are two ways a --build-arg might appear in the
+	// json of the query param:
+	//     "foo":"bar"
+	// and "foo":nil
+	// The first is the normal case, ie. --build-arg foo=bar
+	// or  --build-arg foo
+	// where foo's value was picked up from an env var.
+	// The second ("foo":nil) is where they put --build-arg foo
+	// but "foo" isn't set as an env var. In that case we can't just drop
+	// the fact they mentioned it, we need to pass that along to the builder
+	// so that it can print a warning about "foo" being unused if there is
+	// no "ARG foo" in the Dockerfile.
 	if buildArgsJSON != "" {
 	if buildArgsJSON != "" {
 		if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil {
 		if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
 		options.BuildArgs = buildArgs
 		options.BuildArgs = buildArgs
 	}
 	}
+
 	var labels = map[string]string{}
 	var labels = map[string]string{}
 	labelsJSON := r.FormValue("labels")
 	labelsJSON := r.FormValue("labels")
 	if labelsJSON != "" {
 	if labelsJSON != "" {

+ 2 - 1
api/server/router/container/backend.go

@@ -9,6 +9,7 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/api/types/container"
+	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/archive"
 )
 )
 
 
@@ -64,7 +65,7 @@ type attachBackend interface {
 
 
 // systemBackend includes functions to implement to provide system wide containers functionality
 // systemBackend includes functions to implement to provide system wide containers functionality
 type systemBackend interface {
 type systemBackend interface {
-	ContainersPrune(config *types.ContainersPruneConfig) (*types.ContainersPruneReport, error)
+	ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error)
 }
 }
 
 
 // Backend is all the methods that need to be implemented to provide container specific functionality.
 // Backend is all the methods that need to be implemented to provide container specific functionality.

+ 3 - 7
api/server/router/container/container_routes.go

@@ -541,16 +541,12 @@ func (s *containerRouter) postContainersPrune(ctx context.Context, w http.Respon
 		return err
 		return err
 	}
 	}
 
 
-	if err := httputils.CheckForJSON(r); err != nil {
-		return err
-	}
-
-	var cfg types.ContainersPruneConfig
-	if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil {
+	pruneFilters, err := filters.FromParam(r.Form.Get("filters"))
+	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	pruneReport, err := s.backend.ContainersPrune(&cfg)
+	pruneReport, err := s.backend.ContainersPrune(pruneFilters)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}

+ 8 - 2
api/server/router/container/exec.go

@@ -92,11 +92,17 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
 		defer httputils.CloseStreams(inStream, outStream)
 		defer httputils.CloseStreams(inStream, outStream)
 
 
 		if _, ok := r.Header["Upgrade"]; ok {
 		if _, ok := r.Header["Upgrade"]; ok {
-			fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n")
+			fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
 		} else {
 		} else {
-			fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
+			fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n")
 		}
 		}
 
 
+		// copy headers that were removed as part of hijack
+		if err := w.Header().WriteSubset(outStream, nil); err != nil {
+			return err
+		}
+		fmt.Fprint(outStream, "\r\n")
+
 		stdin = inStream
 		stdin = inStream
 		stdout = outStream
 		stdout = outStream
 		if !execStartCheck.Tty {
 		if !execStartCheck.Tty {

+ 1 - 1
api/server/router/image/backend.go

@@ -29,7 +29,7 @@ type imageBackend interface {
 	Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error)
 	Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error)
 	LookupImage(name string) (*types.ImageInspect, error)
 	LookupImage(name string) (*types.ImageInspect, error)
 	TagImage(imageName, repository, tag string) error
 	TagImage(imageName, repository, tag string) error
-	ImagesPrune(config *types.ImagesPruneConfig) (*types.ImagesPruneReport, error)
+	ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error)
 }
 }
 
 
 type importExportBackend interface {
 type importExportBackend interface {

+ 3 - 7
api/server/router/image/image_routes.go

@@ -331,16 +331,12 @@ func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter
 		return err
 		return err
 	}
 	}
 
 
-	if err := httputils.CheckForJSON(r); err != nil {
-		return err
-	}
-
-	var cfg types.ImagesPruneConfig
-	if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil {
+	pruneFilters, err := filters.FromParam(r.Form.Get("filters"))
+	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	pruneReport, err := s.backend.ImagesPrune(&cfg)
+	pruneReport, err := s.backend.ImagesPrune(pruneFilters)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}

+ 2 - 1
api/server/router/network/backend.go

@@ -2,6 +2,7 @@ package network
 
 
 import (
 import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/api/types/network"
 	"github.com/docker/docker/api/types/network"
 	"github.com/docker/libnetwork"
 	"github.com/docker/libnetwork"
 )
 )
@@ -17,5 +18,5 @@ type Backend interface {
 	ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
 	ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
 	DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error
 	DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error
 	DeleteNetwork(name string) error
 	DeleteNetwork(name string) error
-	NetworksPrune(config *types.NetworksPruneConfig) (*types.NetworksPruneReport, error)
+	NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error)
 }
 }

+ 1 - 10
api/server/router/network/network_routes.go

@@ -300,16 +300,7 @@ func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWr
 		return err
 		return err
 	}
 	}
 
 
-	if err := httputils.CheckForJSON(r); err != nil {
-		return err
-	}
-
-	var cfg types.NetworksPruneConfig
-	if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil {
-		return err
-	}
-
-	pruneReport, err := n.backend.NetworksPrune(&cfg)
+	pruneReport, err := n.backend.NetworksPrune(filters.Args{})
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}

+ 2 - 1
api/server/router/plugin/backend.go

@@ -16,7 +16,8 @@ type Backend interface {
 	Inspect(name string) (enginetypes.Plugin, error)
 	Inspect(name string) (enginetypes.Plugin, error)
 	Remove(name string, config *enginetypes.PluginRmConfig) error
 	Remove(name string, config *enginetypes.PluginRmConfig) error
 	Set(name string, args []string) error
 	Set(name string, args []string) error
-	Pull(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error)
+	Privileges(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error)
+	Pull(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges) error
 	Push(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) error
 	Push(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) error
 	CreateFromContext(ctx context.Context, tarCtx io.Reader, options *enginetypes.PluginCreateOptions) error
 	CreateFromContext(ctx context.Context, tarCtx io.Reader, options *enginetypes.PluginCreateOptions) error
 }
 }

+ 2 - 1
api/server/router/plugin/plugin.go

@@ -25,7 +25,8 @@ func (r *pluginRouter) Routes() []router.Route {
 func (r *pluginRouter) initRoutes() {
 func (r *pluginRouter) initRoutes() {
 	r.routes = []router.Route{
 	r.routes = []router.Route{
 		router.NewGetRoute("/plugins", r.listPlugins),
 		router.NewGetRoute("/plugins", r.listPlugins),
-		router.NewGetRoute("/plugins/{name:.*}", r.inspectPlugin),
+		router.NewGetRoute("/plugins/{name:.*}/json", r.inspectPlugin),
+		router.NewGetRoute("/plugins/privileges", r.getPrivileges),
 		router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin),
 		router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin),
 		router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH?
 		router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH?
 		router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin),
 		router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin),

+ 35 - 22
api/server/router/plugin/plugin_routes.go

@@ -12,20 +12,17 @@ import (
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
-func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	if err := httputils.ParseForm(r); err != nil {
-		return err
-	}
+func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig) {
 
 
 	metaHeaders := map[string][]string{}
 	metaHeaders := map[string][]string{}
-	for k, v := range r.Header {
+	for k, v := range headers {
 		if strings.HasPrefix(k, "X-Meta-") {
 		if strings.HasPrefix(k, "X-Meta-") {
 			metaHeaders[k] = v
 			metaHeaders[k] = v
 		}
 		}
 	}
 	}
 
 
 	// Get X-Registry-Auth
 	// Get X-Registry-Auth
-	authEncoded := r.Header.Get("X-Registry-Auth")
+	authEncoded := headers.Get("X-Registry-Auth")
 	authConfig := &types.AuthConfig{}
 	authConfig := &types.AuthConfig{}
 	if authEncoded != "" {
 	if authEncoded != "" {
 		authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
 		authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
@@ -34,13 +31,42 @@ func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r
 		}
 		}
 	}
 	}
 
 
-	privileges, err := pr.backend.Pull(r.FormValue("name"), metaHeaders, authConfig)
+	return metaHeaders, authConfig
+}
+
+func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if err := httputils.ParseForm(r); err != nil {
+		return err
+	}
+
+	metaHeaders, authConfig := parseHeaders(r.Header)
+
+	privileges, err := pr.backend.Privileges(r.FormValue("name"), metaHeaders, authConfig)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 	return httputils.WriteJSON(w, http.StatusOK, privileges)
 	return httputils.WriteJSON(w, http.StatusOK, privileges)
 }
 }
 
 
+func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if err := httputils.ParseForm(r); err != nil {
+		return err
+	}
+
+	var privileges types.PluginPrivileges
+	if err := json.NewDecoder(r.Body).Decode(&privileges); err != nil {
+		return err
+	}
+
+	metaHeaders, authConfig := parseHeaders(r.Header)
+
+	if err := pr.backend.Pull(r.FormValue("name"), metaHeaders, authConfig, privileges); err != nil {
+		return err
+	}
+	w.WriteHeader(http.StatusCreated)
+	return nil
+}
+
 func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := httputils.ParseForm(r); err != nil {
 	if err := httputils.ParseForm(r); err != nil {
 		return err
 		return err
@@ -52,6 +78,7 @@ func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter,
 	if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil {
 	if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil {
 		return err
 		return err
 	}
 	}
+	//TODO: send progress bar
 	w.WriteHeader(http.StatusNoContent)
 	w.WriteHeader(http.StatusNoContent)
 	return nil
 	return nil
 }
 }
@@ -92,22 +119,8 @@ func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r
 		return err
 		return err
 	}
 	}
 
 
-	metaHeaders := map[string][]string{}
-	for k, v := range r.Header {
-		if strings.HasPrefix(k, "X-Meta-") {
-			metaHeaders[k] = v
-		}
-	}
+	metaHeaders, authConfig := parseHeaders(r.Header)
 
 
-	// Get X-Registry-Auth
-	authEncoded := r.Header.Get("X-Registry-Auth")
-	authConfig := &types.AuthConfig{}
-	if authEncoded != "" {
-		authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
-		if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
-			authConfig = &types.AuthConfig{}
-		}
-	}
 	return pr.backend.Push(vars["name"], metaHeaders, authConfig)
 	return pr.backend.Push(vars["name"], metaHeaders, authConfig)
 }
 }
 
 

+ 2 - 1
api/server/router/volume/backend.go

@@ -3,6 +3,7 @@ package volume
 import (
 import (
 	// TODO return types need to be refactored into pkg
 	// TODO return types need to be refactored into pkg
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/filters"
 )
 )
 
 
 // Backend is the methods that need to be implemented to provide
 // Backend is the methods that need to be implemented to provide
@@ -12,5 +13,5 @@ type Backend interface {
 	VolumeInspect(name string) (*types.Volume, error)
 	VolumeInspect(name string) (*types.Volume, error)
 	VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error)
 	VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error)
 	VolumeRm(name string, force bool) error
 	VolumeRm(name string, force bool) error
-	VolumesPrune(config *types.VolumesPruneConfig) (*types.VolumesPruneReport, error)
+	VolumesPrune(pruneFilters filters.Args) (*types.VolumesPruneReport, error)
 }
 }

+ 2 - 11
api/server/router/volume/volume_routes.go

@@ -5,7 +5,7 @@ import (
 	"net/http"
 	"net/http"
 
 
 	"github.com/docker/docker/api/server/httputils"
 	"github.com/docker/docker/api/server/httputils"
-	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/filters"
 	volumetypes "github.com/docker/docker/api/types/volume"
 	volumetypes "github.com/docker/docker/api/types/volume"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
@@ -72,16 +72,7 @@ func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWrit
 		return err
 		return err
 	}
 	}
 
 
-	if err := httputils.CheckForJSON(r); err != nil {
-		return err
-	}
-
-	var cfg types.VolumesPruneConfig
-	if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil {
-		return err
-	}
-
-	pruneReport, err := v.backend.VolumesPrune(&cfg)
+	pruneReport, err := v.backend.VolumesPrune(filters.Args{})
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}

+ 120 - 24
api/swagger.yaml

@@ -97,37 +97,49 @@ info:
 tags:
 tags:
   # Primary objects
   # Primary objects
   - name: "Container"
   - name: "Container"
+    x-displayName: "Containers"
     description: |
     description: |
       Create and manage containers.
       Create and manage containers.
   - name: "Image"
   - name: "Image"
+    x-displayName: "Images"
   - name: "Network"
   - name: "Network"
+    x-displayName: "Networks"
     description: |
     description: |
       Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information.
       Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information.
   - name: "Volume"
   - name: "Volume"
+    x-displayName: "Volumes"
     description: |
     description: |
       Create and manage persistent storage that can be attached to containers.
       Create and manage persistent storage that can be attached to containers.
   - name: "Exec"
   - name: "Exec"
+    x-displayName: "Exec"
     description: |
     description: |
       Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information.
       Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information.
 
 
       To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`.
       To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`.
   - name: "Secret"
   - name: "Secret"
+    x-displayName: "Secrets"
   # Swarm things
   # Swarm things
   - name: "Swarm"
   - name: "Swarm"
+    x-displayName: "Swarm"
     description: |
     description: |
       Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information.
       Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information.
   - name: "Node"
   - name: "Node"
+    x-displayName: "Nodes"
     description: |
     description: |
       Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work.
       Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work.
   - name: "Service"
   - name: "Service"
+    x-displayName: "Services"
     description: |
     description: |
       Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work.
       Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work.
   - name: "Task"
   - name: "Task"
+    x-displayName: "Tasks"
     description: |
     description: |
       A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work.
       A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work.
   # System things
   # System things
   - name: "Plugin"
   - name: "Plugin"
+    x-displayName: "Plugins"
   - name: "System"
   - name: "System"
+    x-displayName: "System"
 
 
 definitions:
 definitions:
   Port:
   Port:
@@ -1382,6 +1394,7 @@ definitions:
           - Workdir
           - Workdir
           - Network
           - Network
           - Linux
           - Linux
+          - PropagatedMount
           - Mounts
           - Mounts
           - Env
           - Env
           - Args
           - Args
@@ -1446,6 +1459,9 @@ definitions:
                 type: "array"
                 type: "array"
                 items:
                 items:
                   $ref: "#/definitions/PluginDevice"
                   $ref: "#/definitions/PluginDevice"
+          PropagatedMount:
+            type: "string"
+            x-nullable: false
           Mounts:
           Mounts:
             type: "array"
             type: "array"
             items:
             items:
@@ -4186,11 +4202,17 @@ paths:
   /containers/prune:
   /containers/prune:
     post:
     post:
       summary: "Delete stopped containers"
       summary: "Delete stopped containers"
-      consumes:
-        - "application/json"
       produces:
       produces:
         - "application/json"
         - "application/json"
       operationId: "ContainerPrune"
       operationId: "ContainerPrune"
+      parameters:
+        - name: "filters"
+          in: "query"
+          description: |
+            Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
+
+            Available filters:
+          type: "string"
       responses:
       responses:
         200:
         200:
           description: "No error"
           description: "No error"
@@ -4442,6 +4464,10 @@ paths:
       responses:
       responses:
         200:
         200:
           description: "no error"
           description: "no error"
+        404:
+          description: "repository does not exist or no read access"
+          schema:
+            $ref: "#/definitions/ErrorResponse"
         500:
         500:
           description: "server error"
           description: "server error"
           schema:
           schema:
@@ -4848,21 +4874,20 @@ paths:
   /images/prune:
   /images/prune:
     post:
     post:
       summary: "Delete unused images"
       summary: "Delete unused images"
-      consumes:
-        - "application/json"
       produces:
       produces:
         - "application/json"
         - "application/json"
       operationId: "ImagePrune"
       operationId: "ImagePrune"
       parameters:
       parameters:
-        - name: "body"
-          in: "body"
-          schema:
-            type: "object"
-            properties:
-              DanglingOnly:
-                description: "Only delete unused *and* untagged images"
-                type: "boolean"
-                default: false
+        - name: "filters"
+          in: "query"
+          description: |
+            Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
+
+            Available filters:
+            - `dangling=<boolean>` When set to `true` (or `1`), prune only
+               unused *and* untagged images. When set to `false`
+               (or `0`), all unused images are pruned.
+          type: "string"
       responses:
       responses:
         200:
         200:
           description: "No error"
           description: "No error"
@@ -5944,11 +5969,17 @@ paths:
   /volumes/prune:
   /volumes/prune:
     post:
     post:
       summary: "Delete unused volumes"
       summary: "Delete unused volumes"
-      consumes:
-        - "application/json"
       produces:
       produces:
         - "application/json"
         - "application/json"
       operationId: "VolumePrune"
       operationId: "VolumePrune"
+      parameters:
+        - name: "filters"
+          in: "query"
+          description: |
+            Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
+
+            Available filters:
+          type: "string"
       responses:
       responses:
         200:
         200:
           description: "No error"
           description: "No error"
@@ -6118,6 +6149,10 @@ paths:
             example:
             example:
               Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30"
               Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30"
               Warning: ""
               Warning: ""
+        403:
+          description: "operation not supported for pre-defined networks"
+          schema:
+            $ref: "#/definitions/ErrorResponse"
         404:
         404:
           description: "plugin not found"
           description: "plugin not found"
           schema:
           schema:
@@ -6286,6 +6321,14 @@ paths:
       produces:
       produces:
         - "application/json"
         - "application/json"
       operationId: "NetworkPrune"
       operationId: "NetworkPrune"
+      parameters:
+        - name: "filters"
+          in: "query"
+          description: |
+            Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
+
+            Available filters:
+          type: "string"
       responses:
       responses:
         200:
         200:
           description: "No error"
           description: "No error"
@@ -6396,14 +6439,10 @@ paths:
             $ref: "#/definitions/ErrorResponse"
             $ref: "#/definitions/ErrorResponse"
       tags: ["Plugin"]
       tags: ["Plugin"]
 
 
-  /plugins/pull:
-    post:
-      summary: "Install a plugin"
-      operationId: "PluginPull"
-      description: |
-        Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PluginEnable).
-      produces:
-        - "application/json"
+  /plugins/privileges:
+    get:
+      summary: "Get plugin privileges"
+      operationId: "GetPluginPrivileges"
       responses:
       responses:
         200:
         200:
           description: "no error"
           description: "no error"
@@ -6438,6 +6477,30 @@ paths:
           description: "server error"
           description: "server error"
           schema:
           schema:
             $ref: "#/definitions/ErrorResponse"
             $ref: "#/definitions/ErrorResponse"
+      parameters:
+        - name: "name"
+          in: "query"
+          description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+          required: true
+          type: "string"
+      tags:
+        - "Plugin"
+
+  /plugins/pull:
+    post:
+      summary: "Install a plugin"
+      operationId: "PluginPull"
+      description: |
+        Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable).
+      produces:
+        - "application/json"
+      responses:
+        204:
+          description: "no error"
+        500:
+          description: "server error"
+          schema:
+            $ref: "#/definitions/ErrorResponse"
       parameters:
       parameters:
         - name: "name"
         - name: "name"
           in: "query"
           in: "query"
@@ -6451,8 +6514,37 @@ paths:
           in: "header"
           in: "header"
           description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)"
           description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)"
           type: "string"
           type: "string"
+        - name: "body"
+          in: "body"
+          schema:
+            type: "array"
+            items:
+              description: "Describes a permission accepted by the user upon installing the plugin."
+              type: "object"
+              properties:
+                Name:
+                  type: "string"
+                Description:
+                  type: "string"
+                Value:
+                  type: "array"
+                  items:
+                    type: "string"
+            example:
+              - Name: "network"
+                Description: ""
+                Value:
+                  - "host"
+              - Name: "mount"
+                Description: ""
+                Value:
+                  - "/data"
+              - Name: "device"
+                Description: ""
+                Value:
+                  - "/dev/cpu_dma_latency"
       tags: ["Plugin"]
       tags: ["Plugin"]
-  /plugins/{name}:
+  /plugins/{name}/json:
     get:
     get:
       summary: "Inspect a plugin"
       summary: "Inspect a plugin"
       operationId: "PluginInspect"
       operationId: "PluginInspect"
@@ -7058,6 +7150,10 @@ paths:
             example:
             example:
               ID: "ak7w3gjqoa3kuz8xcpnyy0pvl"
               ID: "ak7w3gjqoa3kuz8xcpnyy0pvl"
               Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
               Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
+        403:
+          description: "network is not eligible for services"
+          schema:
+            $ref: "#/definitions/ErrorResponse"
         409:
         409:
           description: "name conflicts with an existing service"
           description: "name conflicts with an existing service"
           schema:
           schema:

+ 7 - 4
api/types/client.go

@@ -160,10 +160,13 @@ type ImageBuildOptions struct {
 	ShmSize        int64
 	ShmSize        int64
 	Dockerfile     string
 	Dockerfile     string
 	Ulimits        []*units.Ulimit
 	Ulimits        []*units.Ulimit
-	BuildArgs      map[string]string
-	AuthConfigs    map[string]AuthConfig
-	Context        io.Reader
-	Labels         map[string]string
+	// See the parsing of buildArgs in api/server/router/build/build_routes.go
+	// for an explaination of why BuildArgs needs to use *string instead of
+	// just a string
+	BuildArgs   map[string]*string
+	AuthConfigs map[string]AuthConfig
+	Context     io.Reader
+	Labels      map[string]string
 	// squash the resulting image's layers to the parent
 	// squash the resulting image's layers to the parent
 	// preserves the original image and creates a new one from the parent with all
 	// preserves the original image and creates a new one from the parent with all
 	// the changes applied to a single layer
 	// the changes applied to a single layer

+ 4 - 0
api/types/plugin.go

@@ -71,6 +71,10 @@ type PluginConfig struct {
 	// Required: true
 	// Required: true
 	Network PluginConfigNetwork `json:"Network"`
 	Network PluginConfigNetwork `json:"Network"`
 
 
+	// propagated mount
+	// Required: true
+	PropagatedMount string `json:"PropagatedMount"`
+
 	// user
 	// user
 	User PluginConfigUser `json:"User,omitempty"`
 	User PluginConfigUser `json:"User,omitempty"`
 
 

+ 6 - 0
api/types/swarm/common.go

@@ -19,3 +19,9 @@ type Annotations struct {
 	Name   string            `json:",omitempty"`
 	Name   string            `json:",omitempty"`
 	Labels map[string]string `json:",omitempty"`
 	Labels map[string]string `json:",omitempty"`
 }
 }
+
+// Driver represents a driver (network, logging).
+type Driver struct {
+	Name    string            `json:",omitempty"`
+	Options map[string]string `json:",omitempty"`
+}

+ 0 - 6
api/types/swarm/network.go

@@ -109,9 +109,3 @@ type IPAMConfig struct {
 	Range   string `json:",omitempty"`
 	Range   string `json:",omitempty"`
 	Gateway string `json:",omitempty"`
 	Gateway string `json:",omitempty"`
 }
 }
-
-// Driver represents a network driver.
-type Driver struct {
-	Name    string            `json:",omitempty"`
-	Options map[string]string `json:",omitempty"`
-}

+ 0 - 21
api/types/types.go

@@ -509,27 +509,6 @@ type DiskUsage struct {
 	Volumes    []*Volume
 	Volumes    []*Volume
 }
 }
 
 
-// ImagesPruneConfig contains the configuration for Engine API:
-// POST "/images/prune"
-type ImagesPruneConfig struct {
-	DanglingOnly bool
-}
-
-// ContainersPruneConfig contains the configuration for Engine API:
-// POST "/images/prune"
-type ContainersPruneConfig struct {
-}
-
-// VolumesPruneConfig contains the configuration for Engine API:
-// POST "/images/prune"
-type VolumesPruneConfig struct {
-}
-
-// NetworksPruneConfig contains the configuration for Engine API:
-// POST "/networks/prune"
-type NetworksPruneConfig struct {
-}
-
 // ContainersPruneReport contains the response for Engine API:
 // ContainersPruneReport contains the response for Engine API:
 // POST "/containers/prune"
 // POST "/containers/prune"
 type ContainersPruneReport struct {
 type ContainersPruneReport struct {

+ 1 - 1
builder/dockerfile/builder.go

@@ -125,7 +125,7 @@ func NewBuilder(clientCtx context.Context, config *types.ImageBuildOptions, back
 		config = new(types.ImageBuildOptions)
 		config = new(types.ImageBuildOptions)
 	}
 	}
 	if config.BuildArgs == nil {
 	if config.BuildArgs == nil {
-		config.BuildArgs = make(map[string]string)
+		config.BuildArgs = make(map[string]*string)
 	}
 	}
 	ctx, cancel := context.WithCancel(clientCtx)
 	ctx, cancel := context.WithCancel(clientCtx)
 	b = &Builder{
 	b = &Builder{

+ 10 - 7
builder/dockerfile/dispatchers.go

@@ -384,8 +384,8 @@ func run(b *Builder, args []string, attributes map[string]bool, original string)
 			// the entire file (see 'leftoverArgs' processing in evaluator.go )
 			// the entire file (see 'leftoverArgs' processing in evaluator.go )
 			continue
 			continue
 		}
 		}
-		if _, ok := configEnv[key]; !ok {
-			cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, val))
+		if _, ok := configEnv[key]; !ok && val != nil {
+			cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, *val))
 		}
 		}
 	}
 	}
 
 
@@ -728,7 +728,7 @@ func arg(b *Builder, args []string, attributes map[string]bool, original string)
 
 
 	var (
 	var (
 		name       string
 		name       string
-		value      string
+		newValue   string
 		hasDefault bool
 		hasDefault bool
 	)
 	)
 
 
@@ -745,7 +745,7 @@ func arg(b *Builder, args []string, attributes map[string]bool, original string)
 		}
 		}
 
 
 		name = parts[0]
 		name = parts[0]
-		value = parts[1]
+		newValue = parts[1]
 		hasDefault = true
 		hasDefault = true
 	} else {
 	} else {
 		name = arg
 		name = arg
@@ -756,9 +756,12 @@ func arg(b *Builder, args []string, attributes map[string]bool, original string)
 
 
 	// If there is a default value associated with this arg then add it to the
 	// If there is a default value associated with this arg then add it to the
 	// b.buildArgs if one is not already passed to the builder. The args passed
 	// b.buildArgs if one is not already passed to the builder. The args passed
-	// to builder override the default value of 'arg'.
-	if _, ok := b.options.BuildArgs[name]; !ok && hasDefault {
-		b.options.BuildArgs[name] = value
+	// to builder override the default value of 'arg'. Note that a 'nil' for
+	// a value means that the user specified "--build-arg FOO" and "FOO" wasn't
+	// defined as an env var - and in that case we DO want to use the default
+	// value specified in the ARG cmd.
+	if baValue, ok := b.options.BuildArgs[name]; (!ok || baValue == nil) && hasDefault {
+		b.options.BuildArgs[name] = &newValue
 	}
 	}
 
 
 	return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ARG %s", arg))
 	return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ARG %s", arg))

+ 2 - 2
builder/dockerfile/dispatchers_test.go

@@ -460,7 +460,7 @@ func TestStopSignal(t *testing.T) {
 }
 }
 
 
 func TestArg(t *testing.T) {
 func TestArg(t *testing.T) {
-	buildOptions := &types.ImageBuildOptions{BuildArgs: make(map[string]string)}
+	buildOptions := &types.ImageBuildOptions{BuildArgs: make(map[string]*string)}
 
 
 	b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true, allowedBuildArgs: make(map[string]bool), options: buildOptions}
 	b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true, allowedBuildArgs: make(map[string]bool), options: buildOptions}
 
 
@@ -488,7 +488,7 @@ func TestArg(t *testing.T) {
 		t.Fatalf("%s argument should be a build arg", argName)
 		t.Fatalf("%s argument should be a build arg", argName)
 	}
 	}
 
 
-	if val != "bar" {
+	if *val != "bar" {
 		t.Fatalf("%s argument should have default value 'bar', got %s", argName, val)
 		t.Fatalf("%s argument should have default value 'bar', got %s", argName, val)
 	}
 	}
 }
 }

+ 1 - 1
builder/dockerfile/evaluator.go

@@ -158,7 +158,7 @@ func (b *Builder) dispatch(stepN int, stepTotal int, ast *parser.Node) error {
 			// the entire file (see 'leftoverArgs' processing in evaluator.go )
 			// the entire file (see 'leftoverArgs' processing in evaluator.go )
 			continue
 			continue
 		}
 		}
-		envs = append(envs, fmt.Sprintf("%s=%s", key, val))
+		envs = append(envs, fmt.Sprintf("%s=%s", key, *val))
 	}
 	}
 	for ast.Next != nil {
 	for ast.Next != nil {
 		ast = ast.Next
 		ast = ast.Next

+ 3 - 0
cli/command/checkpoint/create.go

@@ -1,6 +1,8 @@
 package checkpoint
 package checkpoint
 
 
 import (
 import (
+	"fmt"
+
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
@@ -51,5 +53,6 @@ func runCreate(dockerCli *command.DockerCli, opts createOptions) error {
 		return err
 		return err
 	}
 	}
 
 
+	fmt.Fprintf(dockerCli.Out(), "%s\n", opts.checkpoint)
 	return nil
 	return nil
 }
 }

+ 45 - 4
cli/command/cli.go

@@ -10,6 +10,7 @@ import (
 	"runtime"
 	"runtime"
 
 
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api"
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/versions"
 	"github.com/docker/docker/api/types/versions"
 	cliflags "github.com/docker/docker/cli/flags"
 	cliflags "github.com/docker/docker/cli/flags"
 	"github.com/docker/docker/cliconfig"
 	"github.com/docker/docker/cliconfig"
@@ -86,15 +87,55 @@ func (cli *DockerCli) ConfigFile() *configfile.ConfigFile {
 	return cli.configFile
 	return cli.configFile
 }
 }
 
 
+// GetAllCredentials returns all of the credentials stored in all of the
+// configured credential stores.
+func (cli *DockerCli) GetAllCredentials() (map[string]types.AuthConfig, error) {
+	auths := make(map[string]types.AuthConfig)
+	for registry := range cli.configFile.CredentialHelpers {
+		helper := cli.CredentialsStore(registry)
+		newAuths, err := helper.GetAll()
+		if err != nil {
+			return nil, err
+		}
+		addAll(auths, newAuths)
+	}
+	defaultStore := cli.CredentialsStore("")
+	newAuths, err := defaultStore.GetAll()
+	if err != nil {
+		return nil, err
+	}
+	addAll(auths, newAuths)
+	return auths, nil
+}
+
+func addAll(to, from map[string]types.AuthConfig) {
+	for reg, ac := range from {
+		to[reg] = ac
+	}
+}
+
 // CredentialsStore returns a new credentials store based
 // CredentialsStore returns a new credentials store based
-// on the settings provided in the configuration file.
-func (cli *DockerCli) CredentialsStore() credentials.Store {
-	if cli.configFile.CredentialsStore != "" {
-		return credentials.NewNativeStore(cli.configFile)
+// on the settings provided in the configuration file. Empty string returns
+// the default credential store.
+func (cli *DockerCli) CredentialsStore(serverAddress string) credentials.Store {
+	if helper := getConfiguredCredentialStore(cli.configFile, serverAddress); helper != "" {
+		return credentials.NewNativeStore(cli.configFile, helper)
 	}
 	}
 	return credentials.NewFileStore(cli.configFile)
 	return credentials.NewFileStore(cli.configFile)
 }
 }
 
 
+// getConfiguredCredentialStore returns the credential helper configured for the
+// given registry, the default credsStore, or the empty string if neither are
+// configured.
+func getConfiguredCredentialStore(c *configfile.ConfigFile, serverAddress string) string {
+	if c.CredentialHelpers != nil && serverAddress != "" {
+		if helper, exists := c.CredentialHelpers[serverAddress]; exists {
+			return helper
+		}
+	}
+	return c.CredentialsStore
+}
+
 // Initialize the dockerCli runs initialization that must happen after command
 // Initialize the dockerCli runs initialization that must happen after command
 // line flags are parsed.
 // line flags are parsed.
 func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error {
 func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error {

+ 2 - 2
cli/command/container/prune.go

@@ -5,7 +5,7 @@ import (
 
 
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 
 
-	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command"
 	units "github.com/docker/go-units"
 	units "github.com/docker/go-units"
@@ -52,7 +52,7 @@ func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed u
 		return
 		return
 	}
 	}
 
 
-	report, err := dockerCli.Client().ContainersPrune(context.Background(), types.ContainersPruneConfig{})
+	report, err := dockerCli.Client().ContainersPrune(context.Background(), filters.Args{})
 	if err != nil {
 	if err != nil {
 		return
 		return
 	}
 	}

+ 5 - 5
cli/command/image/build.go

@@ -67,7 +67,7 @@ func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command {
 	ulimits := make(map[string]*units.Ulimit)
 	ulimits := make(map[string]*units.Ulimit)
 	options := buildOptions{
 	options := buildOptions{
 		tags:      opts.NewListOpts(validateTag),
 		tags:      opts.NewListOpts(validateTag),
-		buildArgs: opts.NewListOpts(runconfigopts.ValidateArg),
+		buildArgs: opts.NewListOpts(runconfigopts.ValidateEnv),
 		ulimits:   runconfigopts.NewUlimitOpt(&ulimits),
 		ulimits:   runconfigopts.NewUlimitOpt(&ulimits),
 		labels:    opts.NewListOpts(runconfigopts.ValidateEnv),
 		labels:    opts.NewListOpts(runconfigopts.ValidateEnv),
 	}
 	}
@@ -107,7 +107,7 @@ func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command {
 	flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources")
 	flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources")
 	flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip")
 	flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip")
 	flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options")
 	flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options")
-	flags.StringVar(&options.networkMode, "network", "default", "Connect a container to a network")
+	flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
 
 
 	command.AddTrustedFlags(flags, true)
 	command.AddTrustedFlags(flags, true)
 
 
@@ -280,7 +280,7 @@ func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
 		}
 		}
 	}
 	}
 
 
-	authConfig, _ := dockerCli.CredentialsStore().GetAll()
+	authConfigs, _ := dockerCli.GetAllCredentials()
 	buildOptions := types.ImageBuildOptions{
 	buildOptions := types.ImageBuildOptions{
 		Memory:         memory,
 		Memory:         memory,
 		MemorySwap:     memorySwap,
 		MemorySwap:     memorySwap,
@@ -300,8 +300,8 @@ func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
 		Dockerfile:     relDockerfile,
 		Dockerfile:     relDockerfile,
 		ShmSize:        shmSize,
 		ShmSize:        shmSize,
 		Ulimits:        options.ulimits.GetList(),
 		Ulimits:        options.ulimits.GetList(),
-		BuildArgs:      runconfigopts.ConvertKVStringsToMap(options.buildArgs.GetAll()),
-		AuthConfigs:    authConfig,
+		BuildArgs:      runconfigopts.ConvertKVStringsToMapWithNil(options.buildArgs.GetAll()),
+		AuthConfigs:    authConfigs,
 		Labels:         runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()),
 		Labels:         runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()),
 		CacheFrom:      options.cacheFrom,
 		CacheFrom:      options.cacheFrom,
 		SecurityOpt:    options.securityOpt,
 		SecurityOpt:    options.securityOpt,

+ 5 - 4
cli/command/image/prune.go

@@ -5,7 +5,7 @@ import (
 
 
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 
 
-	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command"
 	units "github.com/docker/go-units"
 	units "github.com/docker/go-units"
@@ -54,6 +54,9 @@ Are you sure you want to continue?`
 )
 )
 
 
 func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) {
 func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) {
+	pruneFilters := filters.NewArgs()
+	pruneFilters.Add("dangling", fmt.Sprintf("%v", !opts.all))
+
 	warning := danglingWarning
 	warning := danglingWarning
 	if opts.all {
 	if opts.all {
 		warning = allImageWarning
 		warning = allImageWarning
@@ -62,9 +65,7 @@ func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed u
 		return
 		return
 	}
 	}
 
 
-	report, err := dockerCli.Client().ImagesPrune(context.Background(), types.ImagesPruneConfig{
-		DanglingOnly: !opts.all,
-	})
+	report, err := dockerCli.Client().ImagesPrune(context.Background(), pruneFilters)
 	if err != nil {
 	if err != nil {
 		return
 		return
 	}
 	}

+ 2 - 2
cli/command/network/prune.go

@@ -5,7 +5,7 @@ import (
 
 
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 
 
-	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command"
 	"github.com/spf13/cobra"
 	"github.com/spf13/cobra"
@@ -50,7 +50,7 @@ func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (output string, e
 		return
 		return
 	}
 	}
 
 
-	report, err := dockerCli.Client().NetworksPrune(context.Background(), types.NetworksPruneConfig{})
+	report, err := dockerCli.Client().NetworksPrune(context.Background(), filters.Args{})
 	if err != nil {
 	if err != nil {
 		return
 		return
 	}
 	}

+ 2 - 2
cli/command/registry.go

@@ -67,7 +67,7 @@ func ResolveAuthConfig(ctx context.Context, cli *DockerCli, index *registrytypes
 		configKey = ElectAuthServer(ctx, cli)
 		configKey = ElectAuthServer(ctx, cli)
 	}
 	}
 
 
-	a, _ := cli.CredentialsStore().Get(configKey)
+	a, _ := cli.CredentialsStore(configKey).Get(configKey)
 	return a
 	return a
 }
 }
 
 
@@ -82,7 +82,7 @@ func ConfigureAuth(cli *DockerCli, flUser, flPassword, serverAddress string, isD
 		serverAddress = registry.ConvertToHostname(serverAddress)
 		serverAddress = registry.ConvertToHostname(serverAddress)
 	}
 	}
 
 
-	authconfig, err := cli.CredentialsStore().Get(serverAddress)
+	authconfig, err := cli.CredentialsStore(serverAddress).Get(serverAddress)
 	if err != nil {
 	if err != nil {
 		return authconfig, err
 		return authconfig, err
 	}
 	}

+ 1 - 1
cli/command/registry/login.go

@@ -74,7 +74,7 @@ func runLogin(dockerCli *command.DockerCli, opts loginOptions) error {
 		authConfig.Password = ""
 		authConfig.Password = ""
 		authConfig.IdentityToken = response.IdentityToken
 		authConfig.IdentityToken = response.IdentityToken
 	}
 	}
-	if err := dockerCli.CredentialsStore().Store(authConfig); err != nil {
+	if err := dockerCli.CredentialsStore(serverAddress).Store(authConfig); err != nil {
 		return fmt.Errorf("Error saving credentials: %v", err)
 		return fmt.Errorf("Error saving credentials: %v", err)
 	}
 	}
 
 

+ 1 - 1
cli/command/registry/logout.go

@@ -68,7 +68,7 @@ func runLogout(dockerCli *command.DockerCli, serverAddress string) error {
 
 
 	fmt.Fprintf(dockerCli.Out(), "Removing login credentials for %s\n", hostnameAddress)
 	fmt.Fprintf(dockerCli.Out(), "Removing login credentials for %s\n", hostnameAddress)
 	for _, r := range regsToLogout {
 	for _, r := range regsToLogout {
-		if err := dockerCli.CredentialsStore().Erase(r); err != nil {
+		if err := dockerCli.CredentialsStore(r).Erase(r); err != nil {
 			fmt.Fprintf(dockerCli.Err(), "WARNING: could not erase credentials: %v\n", err)
 			fmt.Fprintf(dockerCli.Err(), "WARNING: could not erase credentials: %v\n", err)
 		}
 		}
 	}
 	}

+ 46 - 18
cli/command/secret/utils.go

@@ -1,6 +1,9 @@
 package secret
 package secret
 
 
 import (
 import (
+	"fmt"
+	"strings"
+
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/api/types/swarm"
@@ -8,10 +11,11 @@ import (
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
-func getSecretsByName(ctx context.Context, client client.APIClient, names []string) ([]swarm.Secret, error) {
+func getSecretsByNameOrIDPrefixes(ctx context.Context, client client.APIClient, terms []string) ([]swarm.Secret, error) {
 	args := filters.NewArgs()
 	args := filters.NewArgs()
-	for _, n := range names {
+	for _, n := range terms {
 		args.Add("names", n)
 		args.Add("names", n)
+		args.Add("id", n)
 	}
 	}
 
 
 	return client.SecretList(ctx, types.SecretListOptions{
 	return client.SecretList(ctx, types.SecretListOptions{
@@ -19,29 +23,53 @@ func getSecretsByName(ctx context.Context, client client.APIClient, names []stri
 	})
 	})
 }
 }
 
 
-func getCliRequestedSecretIDs(ctx context.Context, client client.APIClient, names []string) ([]string, error) {
-	ids := names
-
-	// attempt to lookup secret by name
-	secrets, err := getSecretsByName(ctx, client, ids)
+func getCliRequestedSecretIDs(ctx context.Context, client client.APIClient, terms []string) ([]string, error) {
+	secrets, err := getSecretsByNameOrIDPrefixes(ctx, client, terms)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	lookup := make(map[string]struct{})
-	for _, id := range ids {
-		lookup[id] = struct{}{}
-	}
-
 	if len(secrets) > 0 {
 	if len(secrets) > 0 {
-		ids = []string{}
-
-		for _, s := range secrets {
-			if _, ok := lookup[s.Spec.Annotations.Name]; ok {
-				ids = append(ids, s.ID)
+		found := make(map[string]struct{})
+	next:
+		for _, term := range terms {
+			// attempt to lookup secret by full ID
+			for _, s := range secrets {
+				if s.ID == term {
+					found[s.ID] = struct{}{}
+					continue next
+				}
+			}
+			// attempt to lookup secret by full name
+			for _, s := range secrets {
+				if s.Spec.Annotations.Name == term {
+					found[s.ID] = struct{}{}
+					continue next
+				}
+			}
+			// attempt to lookup secret by partial ID (prefix)
+			// return error if more than one matches found (ambiguous)
+			n := 0
+			for _, s := range secrets {
+				if strings.HasPrefix(s.ID, term) {
+					found[s.ID] = struct{}{}
+					n++
+				}
+			}
+			if n > 1 {
+				return nil, fmt.Errorf("secret %s is ambiguous (%d matches found)", term, n)
 			}
 			}
 		}
 		}
+
+		// We already collected all the IDs found.
+		// Now we will remove duplicates by converting the map to slice
+		ids := []string{}
+		for id := range found {
+			ids = append(ids, id)
+		}
+
+		return ids, nil
 	}
 	}
 
 
-	return ids, nil
+	return terms, nil
 }
 }

+ 9 - 0
cli/command/stack/deploy.go

@@ -567,6 +567,14 @@ func convertService(
 		return swarm.ServiceSpec{}, err
 		return swarm.ServiceSpec{}, err
 	}
 	}
 
 
+	var logDriver *swarm.Driver
+	if service.Logging != nil {
+		logDriver = &swarm.Driver{
+			Name:    service.Logging.Driver,
+			Options: service.Logging.Options,
+		}
+	}
+
 	serviceSpec := swarm.ServiceSpec{
 	serviceSpec := swarm.ServiceSpec{
 		Annotations: swarm.Annotations{
 		Annotations: swarm.Annotations{
 			Name:   name,
 			Name:   name,
@@ -589,6 +597,7 @@ func convertService(
 				TTY:             service.Tty,
 				TTY:             service.Tty,
 				OpenStdin:       service.StdinOpen,
 				OpenStdin:       service.StdinOpen,
 			},
 			},
+			LogDriver:     logDriver,
 			Resources:     resources,
 			Resources:     resources,
 			RestartPolicy: restartPolicy,
 			RestartPolicy: restartPolicy,
 			Placement: &swarm.Placement{
 			Placement: &swarm.Placement{

+ 23 - 10
cli/command/task/print.go

@@ -14,11 +14,12 @@ import (
 	"github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command/idresolver"
 	"github.com/docker/docker/cli/command/idresolver"
+	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/go-units"
 	"github.com/docker/go-units"
 )
 )
 
 
 const (
 const (
-	psTaskItemFmt = "%s\t%s\t%s\t%s\t%s %s ago\t%s\t%s\n"
+	psTaskItemFmt = "%s\t%s\t%s\t%s\t%s\t%s %s ago\t%s\t%s\n"
 	maxErrLength  = 30
 	maxErrLength  = 30
 )
 )
 
 
@@ -67,7 +68,7 @@ func Print(dockerCli *command.DockerCli, ctx context.Context, tasks []swarm.Task
 
 
 	// Ignore flushing errors
 	// Ignore flushing errors
 	defer writer.Flush()
 	defer writer.Flush()
-	fmt.Fprintln(writer, strings.Join([]string{"NAME", "IMAGE", "NODE", "DESIRED STATE", "CURRENT STATE", "ERROR", "PORTS"}, "\t"))
+	fmt.Fprintln(writer, strings.Join([]string{"ID", "NAME", "IMAGE", "NODE", "DESIRED STATE", "CURRENT STATE", "ERROR", "PORTS"}, "\t"))
 
 
 	if err := print(writer, ctx, tasks, resolver, noTrunc); err != nil {
 	if err := print(writer, ctx, tasks, resolver, noTrunc); err != nil {
 		return err
 		return err
@@ -90,25 +91,36 @@ func PrintQuiet(dockerCli *command.DockerCli, tasks []swarm.Task) error {
 }
 }
 
 
 func print(out io.Writer, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver, noTrunc bool) error {
 func print(out io.Writer, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver, noTrunc bool) error {
-	prevService := ""
-	prevSlot := 0
+	prevName := ""
 	for _, task := range tasks {
 	for _, task := range tasks {
-		name, err := resolver.Resolve(ctx, task, task.ID)
+		id := task.ID
+		if !noTrunc {
+			id = stringid.TruncateID(id)
+		}
+
+		serviceName, err := resolver.Resolve(ctx, swarm.Service{}, task.ServiceID)
+		if err != nil {
+			return err
+		}
 
 
 		nodeValue, err := resolver.Resolve(ctx, swarm.Node{}, task.NodeID)
 		nodeValue, err := resolver.Resolve(ctx, swarm.Node{}, task.NodeID)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
 
 
+		name := ""
+		if task.Slot != 0 {
+			name = fmt.Sprintf("%v.%v", serviceName, task.Slot)
+		} else {
+			name = fmt.Sprintf("%v.%v", serviceName, task.NodeID)
+		}
+
 		// Indent the name if necessary
 		// Indent the name if necessary
 		indentedName := name
 		indentedName := name
-		// Since the new format of the task name is <ServiceName>.<Slot>.<taskID>, we should only compare
-		// <ServiceName> and <Slot> here.
-		if prevService == task.ServiceID && prevSlot == task.Slot {
+		if name == prevName {
 			indentedName = fmt.Sprintf(" \\_ %s", indentedName)
 			indentedName = fmt.Sprintf(" \\_ %s", indentedName)
 		}
 		}
-		prevService = task.ServiceID
-		prevSlot = task.Slot
+		prevName = name
 
 
 		// Trim and quote the error message.
 		// Trim and quote the error message.
 		taskErr := task.Status.Err
 		taskErr := task.Status.Err
@@ -134,6 +146,7 @@ func print(out io.Writer, ctx context.Context, tasks []swarm.Task, resolver *idr
 		fmt.Fprintf(
 		fmt.Fprintf(
 			out,
 			out,
 			psTaskItemFmt,
 			psTaskItemFmt,
+			id,
 			indentedName,
 			indentedName,
 			image,
 			image,
 			nodeValue,
 			nodeValue,

+ 6 - 0
cli/command/utils.go

@@ -6,6 +6,7 @@ import (
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
+	"runtime"
 	"strings"
 	"strings"
 )
 )
 
 
@@ -71,6 +72,11 @@ func PromptForConfirmation(ins *InStream, outs *OutStream, message string) bool
 
 
 	fmt.Fprintf(outs, message)
 	fmt.Fprintf(outs, message)
 
 
+	// On Windows, force the use of the regular OS stdin stream.
+	if runtime.GOOS == "windows" {
+		ins = NewInStream(os.Stdin)
+	}
+
 	answer := ""
 	answer := ""
 	n, _ := fmt.Fscan(ins, &answer)
 	n, _ := fmt.Fscan(ins, &answer)
 	if n != 1 || (answer != "y" && answer != "Y") {
 	if n != 1 || (answer != "y" && answer != "Y") {

+ 2 - 2
cli/command/volume/prune.go

@@ -5,7 +5,7 @@ import (
 
 
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 
 
-	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command"
 	units "github.com/docker/go-units"
 	units "github.com/docker/go-units"
@@ -52,7 +52,7 @@ func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed u
 		return
 		return
 	}
 	}
 
 
-	report, err := dockerCli.Client().VolumesPrune(context.Background(), types.VolumesPruneConfig{})
+	report, err := dockerCli.Client().VolumesPrune(context.Background(), filters.Args{})
 	if err != nil {
 	if err != nil {
 		return
 		return
 	}
 	}

+ 83 - 11
cliconfig/config_test.go

@@ -86,7 +86,7 @@ func TestEmptyFile(t *testing.T) {
 	}
 	}
 }
 }
 
 
-func TestEmptyJson(t *testing.T) {
+func TestEmptyJSON(t *testing.T) {
 	tmpHome, err := ioutil.TempDir("", "config-test")
 	tmpHome, err := ioutil.TempDir("", "config-test")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -193,7 +193,7 @@ func TestOldValidAuth(t *testing.T) {
 	}
 	}
 }
 }
 
 
-func TestOldJsonInvalid(t *testing.T) {
+func TestOldJSONInvalid(t *testing.T) {
 	tmpHome, err := ioutil.TempDir("", "config-test")
 	tmpHome, err := ioutil.TempDir("", "config-test")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -219,7 +219,7 @@ func TestOldJsonInvalid(t *testing.T) {
 	}
 	}
 }
 }
 
 
-func TestOldJson(t *testing.T) {
+func TestOldJSON(t *testing.T) {
 	tmpHome, err := ioutil.TempDir("", "config-test")
 	tmpHome, err := ioutil.TempDir("", "config-test")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -265,7 +265,7 @@ func TestOldJson(t *testing.T) {
 	}
 	}
 }
 }
 
 
-func TestNewJson(t *testing.T) {
+func TestNewJSON(t *testing.T) {
 	tmpHome, err := ioutil.TempDir("", "config-test")
 	tmpHome, err := ioutil.TempDir("", "config-test")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -304,7 +304,7 @@ func TestNewJson(t *testing.T) {
 	}
 	}
 }
 }
 
 
-func TestNewJsonNoEmail(t *testing.T) {
+func TestNewJSONNoEmail(t *testing.T) {
 	tmpHome, err := ioutil.TempDir("", "config-test")
 	tmpHome, err := ioutil.TempDir("", "config-test")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -343,7 +343,7 @@ func TestNewJsonNoEmail(t *testing.T) {
 	}
 	}
 }
 }
 
 
-func TestJsonWithPsFormat(t *testing.T) {
+func TestJSONWithPsFormat(t *testing.T) {
 	tmpHome, err := ioutil.TempDir("", "config-test")
 	tmpHome, err := ioutil.TempDir("", "config-test")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -376,6 +376,78 @@ func TestJsonWithPsFormat(t *testing.T) {
 	}
 	}
 }
 }
 
 
+func TestJSONWithCredentialStore(t *testing.T) {
+	tmpHome, err := ioutil.TempDir("", "config-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpHome)
+
+	fn := filepath.Join(tmpHome, ConfigFileName)
+	js := `{
+		"auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } },
+		"credsStore": "crazy-secure-storage"
+}`
+	if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil {
+		t.Fatal(err)
+	}
+
+	config, err := Load(tmpHome)
+	if err != nil {
+		t.Fatalf("Failed loading on empty json file: %q", err)
+	}
+
+	if config.CredentialsStore != "crazy-secure-storage" {
+		t.Fatalf("Unknown credential store: %s\n", config.CredentialsStore)
+	}
+
+	// Now save it and make sure it shows up in new form
+	configStr := saveConfigAndValidateNewFormat(t, config, tmpHome)
+	if !strings.Contains(configStr, `"credsStore":`) ||
+		!strings.Contains(configStr, "crazy-secure-storage") {
+		t.Fatalf("Should have save in new form: %s", configStr)
+	}
+}
+
+func TestJSONWithCredentialHelpers(t *testing.T) {
+	tmpHome, err := ioutil.TempDir("", "config-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpHome)
+
+	fn := filepath.Join(tmpHome, ConfigFileName)
+	js := `{
+		"auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } },
+		"credHelpers": { "images.io": "images-io", "containers.com": "crazy-secure-storage" }
+}`
+	if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil {
+		t.Fatal(err)
+	}
+
+	config, err := Load(tmpHome)
+	if err != nil {
+		t.Fatalf("Failed loading on empty json file: %q", err)
+	}
+
+	if config.CredentialHelpers == nil {
+		t.Fatal("config.CredentialHelpers was nil")
+	} else if config.CredentialHelpers["images.io"] != "images-io" ||
+		config.CredentialHelpers["containers.com"] != "crazy-secure-storage" {
+		t.Fatalf("Credential helpers not deserialized properly: %v\n", config.CredentialHelpers)
+	}
+
+	// Now save it and make sure it shows up in new form
+	configStr := saveConfigAndValidateNewFormat(t, config, tmpHome)
+	if !strings.Contains(configStr, `"credHelpers":`) ||
+		!strings.Contains(configStr, "images.io") ||
+		!strings.Contains(configStr, "images-io") ||
+		!strings.Contains(configStr, "containers.com") ||
+		!strings.Contains(configStr, "crazy-secure-storage") {
+		t.Fatalf("Should have save in new form: %s", configStr)
+	}
+}
+
 // Save it and make sure it shows up in new form
 // Save it and make sure it shows up in new form
 func saveConfigAndValidateNewFormat(t *testing.T, config *configfile.ConfigFile, homeFolder string) string {
 func saveConfigAndValidateNewFormat(t *testing.T, config *configfile.ConfigFile, homeFolder string) string {
 	if err := config.Save(); err != nil {
 	if err := config.Save(); err != nil {
@@ -420,7 +492,7 @@ func TestConfigFile(t *testing.T) {
 	}
 	}
 }
 }
 
 
-func TestJsonReaderNoFile(t *testing.T) {
+func TestJSONReaderNoFile(t *testing.T) {
 	js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }`
 	js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }`
 
 
 	config, err := LoadFromReader(strings.NewReader(js))
 	config, err := LoadFromReader(strings.NewReader(js))
@@ -435,7 +507,7 @@ func TestJsonReaderNoFile(t *testing.T) {
 
 
 }
 }
 
 
-func TestOldJsonReaderNoFile(t *testing.T) {
+func TestOldJSONReaderNoFile(t *testing.T) {
 	js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}`
 	js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}`
 
 
 	config, err := LegacyLoadFromReader(strings.NewReader(js))
 	config, err := LegacyLoadFromReader(strings.NewReader(js))
@@ -449,7 +521,7 @@ func TestOldJsonReaderNoFile(t *testing.T) {
 	}
 	}
 }
 }
 
 
-func TestJsonWithPsFormatNoFile(t *testing.T) {
+func TestJSONWithPsFormatNoFile(t *testing.T) {
 	js := `{
 	js := `{
 		"auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } },
 		"auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } },
 		"psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}"
 		"psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}"
@@ -465,7 +537,7 @@ func TestJsonWithPsFormatNoFile(t *testing.T) {
 
 
 }
 }
 
 
-func TestJsonSaveWithNoFile(t *testing.T) {
+func TestJSONSaveWithNoFile(t *testing.T) {
 	js := `{
 	js := `{
 		"auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } },
 		"auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } },
 		"psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}"
 		"psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}"
@@ -507,7 +579,7 @@ func TestJsonSaveWithNoFile(t *testing.T) {
 	}
 	}
 }
 }
 
 
-func TestLegacyJsonSaveWithNoFile(t *testing.T) {
+func TestLegacyJSONSaveWithNoFile(t *testing.T) {
 
 
 	js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}`
 	js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}`
 	config, err := LegacyLoadFromReader(strings.NewReader(js))
 	config, err := LegacyLoadFromReader(strings.NewReader(js))

+ 3 - 1
cliconfig/configfile/file.go

@@ -31,6 +31,7 @@ type ConfigFile struct {
 	StatsFormat          string                      `json:"statsFormat,omitempty"`
 	StatsFormat          string                      `json:"statsFormat,omitempty"`
 	DetachKeys           string                      `json:"detachKeys,omitempty"`
 	DetachKeys           string                      `json:"detachKeys,omitempty"`
 	CredentialsStore     string                      `json:"credsStore,omitempty"`
 	CredentialsStore     string                      `json:"credsStore,omitempty"`
+	CredentialHelpers    map[string]string           `json:"credHelpers,omitempty"`
 	Filename             string                      `json:"-"` // Note: for internal use only
 	Filename             string                      `json:"-"` // Note: for internal use only
 	ServiceInspectFormat string                      `json:"serviceInspectFormat,omitempty"`
 	ServiceInspectFormat string                      `json:"serviceInspectFormat,omitempty"`
 }
 }
@@ -96,7 +97,8 @@ func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
 // in this file or not.
 // in this file or not.
 func (configFile *ConfigFile) ContainsAuth() bool {
 func (configFile *ConfigFile) ContainsAuth() bool {
 	return configFile.CredentialsStore != "" ||
 	return configFile.CredentialsStore != "" ||
-		(configFile.AuthConfigs != nil && len(configFile.AuthConfigs) > 0)
+		len(configFile.CredentialHelpers) > 0 ||
+		len(configFile.AuthConfigs) > 0
 }
 }
 
 
 // SaveToWriter encodes and writes out all the authorization information to
 // SaveToWriter encodes and writes out all the authorization information to

+ 2 - 2
cliconfig/credentials/native_store.go

@@ -22,8 +22,8 @@ type nativeStore struct {
 
 
 // NewNativeStore creates a new native store that
 // NewNativeStore creates a new native store that
 // uses a remote helper program to manage credentials.
 // uses a remote helper program to manage credentials.
-func NewNativeStore(file *configfile.ConfigFile) Store {
-	name := remoteCredentialsPrefix + file.CredentialsStore
+func NewNativeStore(file *configfile.ConfigFile, helperSuffix string) Store {
+	name := remoteCredentialsPrefix + helperSuffix
 	return &nativeStore{
 	return &nativeStore{
 		programFunc: client.NewShellProgramFunc(name),
 		programFunc: client.NewShellProgramFunc(name),
 		fileStore:   NewFileStore(file),
 		fileStore:   NewFileStore(file),

+ 4 - 4
client/container_copy_test.go

@@ -78,10 +78,10 @@ func TestContainerStatPath(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	if stat.Name != "name" {
 	if stat.Name != "name" {
-		t.Fatalf("expected container path stat name to be 'name', was '%s'", stat.Name)
+		t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name)
 	}
 	}
 	if stat.Mode != 0700 {
 	if stat.Mode != 0700 {
-		t.Fatalf("expected container path stat mode to be 0700, was '%v'", stat.Mode)
+		t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode)
 	}
 	}
 }
 }
 
 
@@ -226,10 +226,10 @@ func TestCopyFromContainer(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	if stat.Name != "name" {
 	if stat.Name != "name" {
-		t.Fatalf("expected container path stat name to be 'name', was '%s'", stat.Name)
+		t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name)
 	}
 	}
 	if stat.Mode != 0700 {
 	if stat.Mode != 0700 {
-		t.Fatalf("expected container path stat mode to be 0700, was '%v'", stat.Mode)
+		t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode)
 	}
 	}
 	content, err := ioutil.ReadAll(r)
 	content, err := ioutil.ReadAll(r)
 	if err != nil {
 	if err != nil {

+ 8 - 2
client/container_prune.go

@@ -5,18 +5,24 @@ import (
 	"fmt"
 	"fmt"
 
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/filters"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
 // ContainersPrune requests the daemon to delete unused data
 // ContainersPrune requests the daemon to delete unused data
-func (cli *Client) ContainersPrune(ctx context.Context, cfg types.ContainersPruneConfig) (types.ContainersPruneReport, error) {
+func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) {
 	var report types.ContainersPruneReport
 	var report types.ContainersPruneReport
 
 
 	if err := cli.NewVersionError("1.25", "container prune"); err != nil {
 	if err := cli.NewVersionError("1.25", "container prune"); err != nil {
 		return report, err
 		return report, err
 	}
 	}
 
 
-	serverResp, err := cli.post(ctx, "/containers/prune", nil, cfg, nil)
+	query, err := getFiltersQuery(pruneFilters)
+	if err != nil {
+		return report, err
+	}
+
+	serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil)
 	if err != nil {
 	if err != nil {
 		return report, err
 		return report, err
 	}
 	}

+ 7 - 4
client/image_build_test.go

@@ -27,6 +27,8 @@ func TestImageBuildError(t *testing.T) {
 }
 }
 
 
 func TestImageBuild(t *testing.T) {
 func TestImageBuild(t *testing.T) {
+	v1 := "value1"
+	v2 := "value2"
 	emptyRegistryConfig := "bnVsbA=="
 	emptyRegistryConfig := "bnVsbA=="
 	buildCases := []struct {
 	buildCases := []struct {
 		buildOptions           types.ImageBuildOptions
 		buildOptions           types.ImageBuildOptions
@@ -105,13 +107,14 @@ func TestImageBuild(t *testing.T) {
 		},
 		},
 		{
 		{
 			buildOptions: types.ImageBuildOptions{
 			buildOptions: types.ImageBuildOptions{
-				BuildArgs: map[string]string{
-					"ARG1": "value1",
-					"ARG2": "value2",
+				BuildArgs: map[string]*string{
+					"ARG1": &v1,
+					"ARG2": &v2,
+					"ARG3": nil,
 				},
 				},
 			},
 			},
 			expectedQueryParams: map[string]string{
 			expectedQueryParams: map[string]string{
-				"buildargs": `{"ARG1":"value1","ARG2":"value2"}`,
+				"buildargs": `{"ARG1":"value1","ARG2":"value2","ARG3":null}`,
 				"rm":        "0",
 				"rm":        "0",
 			},
 			},
 			expectedTags:           []string{},
 			expectedTags:           []string{},

+ 8 - 2
client/image_prune.go

@@ -5,18 +5,24 @@ import (
 	"fmt"
 	"fmt"
 
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/filters"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
 // ImagesPrune requests the daemon to delete unused data
 // ImagesPrune requests the daemon to delete unused data
-func (cli *Client) ImagesPrune(ctx context.Context, cfg types.ImagesPruneConfig) (types.ImagesPruneReport, error) {
+func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) {
 	var report types.ImagesPruneReport
 	var report types.ImagesPruneReport
 
 
 	if err := cli.NewVersionError("1.25", "image prune"); err != nil {
 	if err := cli.NewVersionError("1.25", "image prune"); err != nil {
 		return report, err
 		return report, err
 	}
 	}
 
 
-	serverResp, err := cli.post(ctx, "/images/prune", nil, cfg, nil)
+	query, err := getFiltersQuery(pruneFilters)
+	if err != nil {
+		return report, err
+	}
+
+	serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil)
 	if err != nil {
 	if err != nil {
 		return report, err
 		return report, err
 	}
 	}

+ 4 - 4
client/image_search_test.go

@@ -81,12 +81,12 @@ func TestImageSearchWithPrivilegedFuncNoError(t *testing.T) {
 				}, nil
 				}, nil
 			}
 			}
 			if auth != "IAmValid" {
 			if auth != "IAmValid" {
-				return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth)
+				return nil, fmt.Errorf("Invalid auth header : expected 'IAmValid', got %s", auth)
 			}
 			}
 			query := req.URL.Query()
 			query := req.URL.Query()
 			term := query.Get("term")
 			term := query.Get("term")
 			if term != "some-image" {
 			if term != "some-image" {
-				return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "some-image", term)
+				return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term)
 			}
 			}
 			content, err := json.Marshal([]registry.SearchResult{
 			content, err := json.Marshal([]registry.SearchResult{
 				{
 				{
@@ -113,7 +113,7 @@ func TestImageSearchWithPrivilegedFuncNoError(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	if len(results) != 1 {
 	if len(results) != 1 {
-		t.Fatalf("expected a result, got %v", results)
+		t.Fatalf("expected 1 result, got %v", results)
 	}
 	}
 }
 }
 
 
@@ -133,7 +133,7 @@ func TestImageSearchWithoutErrors(t *testing.T) {
 			query := req.URL.Query()
 			query := req.URL.Query()
 			term := query.Get("term")
 			term := query.Get("term")
 			if term != "some-image" {
 			if term != "some-image" {
-				return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "some-image", term)
+				return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term)
 			}
 			}
 			filters := query.Get("filters")
 			filters := query.Get("filters")
 			if filters != expectedFilters {
 			if filters != expectedFilters {

+ 4 - 4
client/interface.go

@@ -64,7 +64,7 @@ type ContainerAPIClient interface {
 	ContainerWait(ctx context.Context, container string) (int64, error)
 	ContainerWait(ctx context.Context, container string) (int64, error)
 	CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
 	CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
 	CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error
 	CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error
-	ContainersPrune(ctx context.Context, cfg types.ContainersPruneConfig) (types.ContainersPruneReport, error)
+	ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error)
 }
 }
 
 
 // ImageAPIClient defines API client methods for the images
 // ImageAPIClient defines API client methods for the images
@@ -82,7 +82,7 @@ type ImageAPIClient interface {
 	ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
 	ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
 	ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
 	ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
 	ImageTag(ctx context.Context, image, ref string) error
 	ImageTag(ctx context.Context, image, ref string) error
-	ImagesPrune(ctx context.Context, cfg types.ImagesPruneConfig) (types.ImagesPruneReport, error)
+	ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error)
 }
 }
 
 
 // NetworkAPIClient defines API client methods for the networks
 // NetworkAPIClient defines API client methods for the networks
@@ -94,7 +94,7 @@ type NetworkAPIClient interface {
 	NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error)
 	NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error)
 	NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error)
 	NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error)
 	NetworkRemove(ctx context.Context, networkID string) error
 	NetworkRemove(ctx context.Context, networkID string) error
-	NetworksPrune(ctx context.Context, cfg types.NetworksPruneConfig) (types.NetworksPruneReport, error)
+	NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error)
 }
 }
 
 
 // NodeAPIClient defines API client methods for the nodes
 // NodeAPIClient defines API client methods for the nodes
@@ -157,7 +157,7 @@ type VolumeAPIClient interface {
 	VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error)
 	VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error)
 	VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error)
 	VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error)
 	VolumeRemove(ctx context.Context, volumeID string, force bool) error
 	VolumeRemove(ctx context.Context, volumeID string, force bool) error
-	VolumesPrune(ctx context.Context, cfg types.VolumesPruneConfig) (types.VolumesPruneReport, error)
+	VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error)
 }
 }
 
 
 // SecretAPIClient defines API client methods for secrets
 // SecretAPIClient defines API client methods for secrets

+ 12 - 2
client/network_prune.go

@@ -5,14 +5,24 @@ import (
 	"fmt"
 	"fmt"
 
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/filters"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
 // NetworksPrune requests the daemon to delete unused networks
 // NetworksPrune requests the daemon to delete unused networks
-func (cli *Client) NetworksPrune(ctx context.Context, cfg types.NetworksPruneConfig) (types.NetworksPruneReport, error) {
+func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) {
 	var report types.NetworksPruneReport
 	var report types.NetworksPruneReport
 
 
-	serverResp, err := cli.post(ctx, "/networks/prune", nil, cfg, nil)
+	if err := cli.NewVersionError("1.25", "network prune"); err != nil {
+		return report, err
+	}
+
+	query, err := getFiltersQuery(pruneFilters)
+	if err != nil {
+		return report, err
+	}
+
+	serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil)
 	if err != nil {
 	if err != nil {
 		return report, err
 		return report, err
 	}
 	}

+ 1 - 1
client/plugin_inspect.go

@@ -12,7 +12,7 @@ import (
 
 
 // PluginInspectWithRaw inspects an existing plugin
 // PluginInspectWithRaw inspects an existing plugin
 func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) {
 func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) {
-	resp, err := cli.get(ctx, "/plugins/"+name, nil, nil)
+	resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil)
 	if err != nil {
 	if err != nil {
 		if resp.statusCode == http.StatusNotFound {
 		if resp.statusCode == http.StatusNotFound {
 			return nil, nil, pluginNotFoundError{name}
 			return nil, nil, pluginNotFoundError{name}

+ 22 - 11
client/plugin_install.go

@@ -14,27 +14,21 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types
 	// FIXME(vdemeester) name is a ref, we might want to parse/validate it here.
 	// FIXME(vdemeester) name is a ref, we might want to parse/validate it here.
 	query := url.Values{}
 	query := url.Values{}
 	query.Set("name", name)
 	query.Set("name", name)
-	resp, err := cli.tryPluginPull(ctx, query, options.RegistryAuth)
+	resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
 	if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
 	if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
 		newAuthHeader, privilegeErr := options.PrivilegeFunc()
 		newAuthHeader, privilegeErr := options.PrivilegeFunc()
 		if privilegeErr != nil {
 		if privilegeErr != nil {
 			ensureReaderClosed(resp)
 			ensureReaderClosed(resp)
 			return privilegeErr
 			return privilegeErr
 		}
 		}
-		resp, err = cli.tryPluginPull(ctx, query, newAuthHeader)
+		options.RegistryAuth = newAuthHeader
+		resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
 	}
 	}
 	if err != nil {
 	if err != nil {
 		ensureReaderClosed(resp)
 		ensureReaderClosed(resp)
 		return err
 		return err
 	}
 	}
 
 
-	defer func() {
-		if err != nil {
-			delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
-			ensureReaderClosed(delResp)
-		}
-	}()
-
 	var privileges types.PluginPrivileges
 	var privileges types.PluginPrivileges
 	if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil {
 	if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil {
 		ensureReaderClosed(resp)
 		ensureReaderClosed(resp)
@@ -52,6 +46,18 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types
 		}
 		}
 	}
 	}
 
 
+	_, err = cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth)
+	if err != nil {
+		return err
+	}
+
+	defer func() {
+		if err != nil {
+			delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
+			ensureReaderClosed(delResp)
+		}
+	}()
+
 	if len(options.Args) > 0 {
 	if len(options.Args) > 0 {
 		if err := cli.PluginSet(ctx, name, options.Args); err != nil {
 		if err := cli.PluginSet(ctx, name, options.Args); err != nil {
 			return err
 			return err
@@ -65,7 +71,12 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types
 	return cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0})
 	return cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0})
 }
 }
 
 
-func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
+func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
+	headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+	return cli.get(ctx, "/plugins/privileges", query, headers)
+}
+
+func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) {
 	headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
 	headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
-	return cli.post(ctx, "/plugins/pull", query, nil, headers)
+	return cli.post(ctx, "/plugins/pull", query, privileges, headers)
 }
 }

+ 1 - 1
client/plugin_push_test.go

@@ -35,7 +35,7 @@ func TestPluginPush(t *testing.T) {
 			}
 			}
 			auth := req.Header.Get("X-Registry-Auth")
 			auth := req.Header.Get("X-Registry-Auth")
 			if auth != "authtoken" {
 			if auth != "authtoken" {
-				return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "authtoken", auth)
+				return nil, fmt.Errorf("Invalid auth header : expected 'authtoken', got %s", auth)
 			}
 			}
 			return &http.Response{
 			return &http.Response{
 				StatusCode: http.StatusOK,
 				StatusCode: http.StatusOK,

+ 19 - 1
client/utils.go

@@ -1,6 +1,10 @@
 package client
 package client
 
 
-import "regexp"
+import (
+	"github.com/docker/docker/api/types/filters"
+	"net/url"
+	"regexp"
+)
 
 
 var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`)
 var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`)
 
 
@@ -13,3 +17,17 @@ func getDockerOS(serverHeader string) string {
 	}
 	}
 	return osType
 	return osType
 }
 }
+
+// getFiltersQuery returns a url query with "filters" query term, based on the
+// filters provided.
+func getFiltersQuery(f filters.Args) (url.Values, error) {
+	query := url.Values{}
+	if f.Len() > 0 {
+		filterJSON, err := filters.ToParam(f)
+		if err != nil {
+			return query, err
+		}
+		query.Set("filters", filterJSON)
+	}
+	return query, nil
+}

+ 8 - 2
client/volume_prune.go

@@ -5,18 +5,24 @@ import (
 	"fmt"
 	"fmt"
 
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/filters"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
 // VolumesPrune requests the daemon to delete unused data
 // VolumesPrune requests the daemon to delete unused data
-func (cli *Client) VolumesPrune(ctx context.Context, cfg types.VolumesPruneConfig) (types.VolumesPruneReport, error) {
+func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) {
 	var report types.VolumesPruneReport
 	var report types.VolumesPruneReport
 
 
 	if err := cli.NewVersionError("1.25", "volume prune"); err != nil {
 	if err := cli.NewVersionError("1.25", "volume prune"); err != nil {
 		return report, err
 		return report, err
 	}
 	}
 
 
-	serverResp, err := cli.post(ctx, "/volumes/prune", nil, cfg, nil)
+	query, err := getFiltersQuery(pruneFilters)
+	if err != nil {
+		return report, err
+	}
+
+	serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil)
 	if err != nil {
 	if err != nil {
 		return report, err
 		return report, err
 	}
 	}

+ 2 - 1
contrib/check-config.sh

@@ -224,7 +224,8 @@ echo 'Optional Features:'
 }
 }
 {
 {
 	if is_set LEGACY_VSYSCALL_NATIVE; then
 	if is_set LEGACY_VSYSCALL_NATIVE; then
-		echo -n "- "; wrap_good "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
+		echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
+		echo "    $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
 	elif is_set LEGACY_VSYSCALL_EMULATE; then
 	elif is_set LEGACY_VSYSCALL_EMULATE; then
 		echo -n "- "; wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
 		echo -n "- "; wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
 	elif is_set LEGACY_VSYSCALL_NONE; then
 	elif is_set LEGACY_VSYSCALL_NONE; then

+ 135 - 4
contrib/completion/bash/docker

@@ -23,6 +23,7 @@
 # DOCKER_COMPLETION_SHOW_CONTAINER_IDS
 # DOCKER_COMPLETION_SHOW_CONTAINER_IDS
 # DOCKER_COMPLETION_SHOW_NETWORK_IDS
 # DOCKER_COMPLETION_SHOW_NETWORK_IDS
 # DOCKER_COMPLETION_SHOW_NODE_IDS
 # DOCKER_COMPLETION_SHOW_NODE_IDS
+# DOCKER_COMPLETION_SHOW_SECRET_IDS
 # DOCKER_COMPLETION_SHOW_SERVICE_IDS
 # DOCKER_COMPLETION_SHOW_SERVICE_IDS
 #   "no"  - Show names only (default)
 #   "no"  - Show names only (default)
 #   "yes" - Show names and ids
 #   "yes" - Show names and ids
@@ -311,6 +312,22 @@ __docker_complete_runtimes() {
 	COMPREPLY=( $(compgen -W "$(__docker_runtimes)" -- "$cur") )
 	COMPREPLY=( $(compgen -W "$(__docker_runtimes)" -- "$cur") )
 }
 }
 
 
+# __docker_secrets returns a list of all secrets.
+# By default, only names of secrets are returned.
+# Set DOCKER_COMPLETION_SHOW_SECRET_IDS=yes to also complete IDs of secrets.
+__docker_secrets() {
+	local fields='$2'  # default: name only
+	[ "${DOCKER_COMPLETION_SHOW_SECRET_IDS}" = yes ] && fields='$1,$2' # ID and name
+
+	__docker_q secret ls | awk "NR>1 {print $fields}"
+}
+
+# __docker_complete_secrets applies completion of secrets based on the current value
+# of `$cur`.
+__docker_complete_secrets() {
+	COMPREPLY=( $(compgen -W "$(__docker_secrets)" -- "$cur") )
+}
+
 # __docker_stacks returns a list of all stacks.
 # __docker_stacks returns a list of all stacks.
 __docker_stacks() {
 __docker_stacks() {
 	__docker_q stack ls | awk 'NR>1 {print $1}'
 	__docker_q stack ls | awk 'NR>1 {print $1}'
@@ -2276,7 +2293,7 @@ _docker_inspect() {
 			;;
 			;;
 		--type)
 		--type)
 			if [ -z "$preselected_type" ] ; then
 			if [ -z "$preselected_type" ] ; then
-				COMPREPLY=( $( compgen -W "container image network node service volume" -- "$cur" ) )
+				COMPREPLY=( $( compgen -W "container image network node plugin service volume" -- "$cur" ) )
 				return
 				return
 			fi
 			fi
 			;;
 			;;
@@ -2298,6 +2315,7 @@ _docker_inspect() {
 						$(__docker_images)
 						$(__docker_images)
 						$(__docker_networks)
 						$(__docker_networks)
 						$(__docker_nodes)
 						$(__docker_nodes)
+						$(__docker_plugins_installed)
 						$(__docker_services)
 						$(__docker_services)
 						$(__docker_volumes)
 						$(__docker_volumes)
 					" -- "$cur" ) )
 					" -- "$cur" ) )
@@ -2314,6 +2332,9 @@ _docker_inspect() {
 				node)
 				node)
 					__docker_complete_nodes
 					__docker_complete_nodes
 					;;
 					;;
+				plugin)
+					__docker_complete_plugins_installed
+					;;
 				service)
 				service)
 					__docker_complete_services
 					__docker_complete_services
 					;;
 					;;
@@ -2425,7 +2446,7 @@ _docker_network_create() {
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--aux-address --driver -d --gateway --help --internal --ip-range --ipam-driver --ipam-opt --ipv6 --label --opt -o --subnet" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--attachable --aux-address --driver -d --gateway --help --internal --ip-range --ipam-driver --ipam-opt --ipv6 --label --opt -o --subnet" -- "$cur" ) )
 			;;
 			;;
 	esac
 	esac
 }
 }
@@ -2736,6 +2757,7 @@ _docker_service_update() {
 			--mode
 			--mode
 			--name
 			--name
 			--port
 			--port
+			--secret
 		"
 		"
 
 
 		case "$prev" in
 		case "$prev" in
@@ -2755,6 +2777,10 @@ _docker_service_update() {
 				COMPREPLY=( $( compgen -W "global replicated" -- "$cur" ) )
 				COMPREPLY=( $( compgen -W "global replicated" -- "$cur" ) )
 				return
 				return
 				;;
 				;;
+			--secret)
+				__docker_complete_secrets
+				return
+				;;
 			--group)
 			--group)
 			COMPREPLY=( $(compgen -g -- "$cur") )
 			COMPREPLY=( $(compgen -g -- "$cur") )
 			return
 			return
@@ -2779,6 +2805,8 @@ _docker_service_update() {
 			--image
 			--image
 			--port-add
 			--port-add
 			--port-rm
 			--port-rm
+			--secret-add
+			--secret-rm
 		"
 		"
 
 
 		case "$prev" in
 		case "$prev" in
@@ -2802,6 +2830,10 @@ _docker_service_update() {
 				__docker_complete_image_repos_and_tags
 				__docker_complete_image_repos_and_tags
 				return
 				return
 				;;
 				;;
+			--secret-add|--secret-rm)
+				__docker_complete_secrets
+				return
+				;;
 		esac
 		esac
 	fi
 	fi
 
 
@@ -2846,9 +2878,17 @@ _docker_service_update() {
 			COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
 			COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
+			local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) )
 			if [ "$subcommand" = "update" ] ; then
 			if [ "$subcommand" = "update" ] ; then
-				__docker_complete_services
+				if [ $cword -eq $counter ]; then
+					__docker_complete_services
+				fi
+			else
+				if [ $cword -eq $counter ]; then
+					__docker_complete_images
+				fi
 			fi
 			fi
+			;;
 	esac
 	esac
 }
 }
 
 
@@ -3329,6 +3369,90 @@ _docker_save() {
 	_docker_image_save
 	_docker_image_save
 }
 }
 
 
+
+_docker_secret() {
+	local subcommands="
+		create
+		inspect
+		ls
+		rm
+	"
+	local aliases="
+		list
+		remove
+	"
+	__docker_subcommands "$subcommands $aliases" && return
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			;;
+		*)
+			COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) )
+			;;
+	esac
+}
+
+_docker_secret_create() {
+	case "$prev" in
+		--label|-l)
+			return
+			;;
+	esac
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help --label -l" -- "$cur" ) )
+			;;
+	esac
+}
+
+_docker_secret_inspect() {
+	case "$prev" in
+		--format|-f)
+			return
+			;;
+	esac
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) )
+			;;
+		*)
+			__docker_complete_secrets
+			;;
+	esac
+}
+
+_docker_secret_list() {
+	_docker_secret_ls
+}
+
+_docker_secret_ls() {
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help --quiet -q" -- "$cur" ) )
+			;;
+	esac
+}
+
+_docker_secret_remove() {
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			;;
+		*)
+			__docker_complete_secrets
+			;;
+	esac
+}
+
+_docker_secret_rm() {
+	_docker_secret_remove
+}
+
+
+
 _docker_search() {
 _docker_search() {
 	local key=$(__docker_map_key_of_current_option '--filter|-f')
 	local key=$(__docker_map_key_of_current_option '--filter|-f')
 	case "$key" in
 	case "$key" in
@@ -3690,9 +3814,15 @@ _docker_top() {
 }
 }
 
 
 _docker_version() {
 _docker_version() {
+	case "$prev" in
+		--format|-f)
+			return
+			;;
+	esac
+
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) )
 			;;
 			;;
 	esac
 	esac
 }
 }
@@ -3852,6 +3982,7 @@ _docker() {
 		run
 		run
 		save
 		save
 		search
 		search
+		secret
 		service
 		service
 		stack
 		stack
 		start
 		start

+ 38 - 19
contrib/completion/zsh/_docker

@@ -348,14 +348,14 @@ __docker_complete_ps_filters() {
                 __docker_complete_containers_names && ret=0
                 __docker_complete_containers_names && ret=0
                 ;;
                 ;;
             (network)
             (network)
-                __docker_networks && ret=0
+                __docker_complete_networks && ret=0
                 ;;
                 ;;
             (status)
             (status)
                 status_opts=('created' 'dead' 'exited' 'paused' 'restarting' 'running' 'removing')
                 status_opts=('created' 'dead' 'exited' 'paused' 'restarting' 'running' 'removing')
                 _describe -t status-filter-opts "status filter options" status_opts && ret=0
                 _describe -t status-filter-opts "status filter options" status_opts && ret=0
                 ;;
                 ;;
             (volume)
             (volume)
-                __docker_volumes && ret=0
+                __docker_complete_volumes && ret=0
                 ;;
                 ;;
             *)
             *)
                 _message 'value' && ret=0
                 _message 'value' && ret=0
@@ -453,7 +453,7 @@ __docker_complete_events_filter() {
                 __docker_complete_images && ret=0
                 __docker_complete_images && ret=0
                 ;;
                 ;;
             (network)
             (network)
-                __docker_networks && ret=0
+                __docker_complete_networks && ret=0
                 ;;
                 ;;
             (type)
             (type)
                 local -a type_opts
                 local -a type_opts
@@ -461,7 +461,7 @@ __docker_complete_events_filter() {
                 _describe -t type-filter-opts "type filter options" type_opts && ret=0
                 _describe -t type-filter-opts "type filter options" type_opts && ret=0
                 ;;
                 ;;
             (volume)
             (volume)
-                __docker_volumes && ret=0
+                __docker_complete_volumes && ret=0
                 ;;
                 ;;
             *)
             *)
                 _message 'value' && ret=0
                 _message 'value' && ret=0
@@ -1033,10 +1033,10 @@ __docker_network_complete_ls_filters() {
                 __docker_complete_info_plugins Network && ret=0
                 __docker_complete_info_plugins Network && ret=0
                 ;;
                 ;;
             (id)
             (id)
-                __docker_networks_ids && ret=0
+                __docker_complete_networks_ids && ret=0
                 ;;
                 ;;
             (name)
             (name)
-                __docker_networks_names && ret=0
+                __docker_complete_networks_names && ret=0
                 ;;
                 ;;
             (type)
             (type)
                 type_opts=('builtin' 'custom')
                 type_opts=('builtin' 'custom')
@@ -1082,6 +1082,7 @@ __docker_get_networks() {
         for line in $lines; do
         for line in $lines; do
             s="${line[${begin[NETWORK ID]},${end[NETWORK ID]}]%% ##}"
             s="${line[${begin[NETWORK ID]},${end[NETWORK ID]}]%% ##}"
             s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}"
             s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}"
+            s="$s, ${${line[${begin[SCOPE]},${end[SCOPE]}]}%% ##}"
             networks=($networks $s)
             networks=($networks $s)
         done
         done
     fi
     fi
@@ -1091,6 +1092,7 @@ __docker_get_networks() {
         for line in $lines; do
         for line in $lines; do
             s="${line[${begin[NAME]},${end[NAME]}]%% ##}"
             s="${line[${begin[NAME]},${end[NAME]}]%% ##}"
             s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}"
             s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}"
+            s="$s, ${${line[${begin[SCOPE]},${end[SCOPE]}]}%% ##}"
             networks=($networks $s)
             networks=($networks $s)
         done
         done
     fi
     fi
@@ -1099,17 +1101,17 @@ __docker_get_networks() {
     return ret
     return ret
 }
 }
 
 
-__docker_networks() {
+__docker_complete_networks() {
     [[ $PREFIX = -* ]] && return 1
     [[ $PREFIX = -* ]] && return 1
     __docker_get_networks all "$@"
     __docker_get_networks all "$@"
 }
 }
 
 
-__docker_networks_ids() {
+__docker_complete_networks_ids() {
     [[ $PREFIX = -* ]] && return 1
     [[ $PREFIX = -* ]] && return 1
     __docker_get_networks ids "$@"
     __docker_get_networks ids "$@"
 }
 }
 
 
-__docker_networks_names() {
+__docker_complete_networks_names() {
     [[ $PREFIX = -* ]] && return 1
     [[ $PREFIX = -* ]] && return 1
     __docker_get_networks names "$@"
     __docker_get_networks names "$@"
 }
 }
@@ -1144,7 +1146,7 @@ __docker_network_subcommand() {
                 "($help)--ip6=[Container IPv6 address]:IPv6: " \
                 "($help)--ip6=[Container IPv6 address]:IPv6: " \
                 "($help)*--link=[Add a link to another container]:link:->link" \
                 "($help)*--link=[Add a link to another container]:link:->link" \
                 "($help)*--link-local-ip=[Add a link-local address for the container]:IPv4/IPv6: " \
                 "($help)*--link-local-ip=[Add a link-local address for the container]:IPv4/IPv6: " \
-                "($help -)1:network:__docker_networks" \
+                "($help -)1:network:__docker_complete_networks" \
                 "($help -)2:containers:__docker_complete_containers" && ret=0
                 "($help -)2:containers:__docker_complete_containers" && ret=0
 
 
             case $state in
             case $state in
@@ -1160,6 +1162,7 @@ __docker_network_subcommand() {
         (create)
         (create)
             _arguments $(__docker_arguments) -A '-*' \
             _arguments $(__docker_arguments) -A '-*' \
                 $opts_help \
                 $opts_help \
+                "($help)--attachable[Enable manual container attachment]" \
                 "($help)*--aux-address[Auxiliary IPv4 or IPv6 addresses used by network driver]:key=IP: " \
                 "($help)*--aux-address[Auxiliary IPv4 or IPv6 addresses used by network driver]:key=IP: " \
                 "($help -d --driver)"{-d=,--driver=}"[Driver to manage the Network]:driver:(null host bridge overlay)" \
                 "($help -d --driver)"{-d=,--driver=}"[Driver to manage the Network]:driver:(null host bridge overlay)" \
                 "($help)*--gateway=[IPv4 or IPv6 Gateway for the master subnet]:IP: " \
                 "($help)*--gateway=[IPv4 or IPv6 Gateway for the master subnet]:IP: " \
@@ -1176,14 +1179,14 @@ __docker_network_subcommand() {
         (disconnect)
         (disconnect)
             _arguments $(__docker_arguments) \
             _arguments $(__docker_arguments) \
                 $opts_help \
                 $opts_help \
-                "($help -)1:network:__docker_networks" \
+                "($help -)1:network:__docker_complete_networks" \
                 "($help -)2:containers:__docker_complete_containers" && ret=0
                 "($help -)2:containers:__docker_complete_containers" && ret=0
             ;;
             ;;
         (inspect)
         (inspect)
             _arguments $(__docker_arguments) \
             _arguments $(__docker_arguments) \
                 $opts_help \
                 $opts_help \
                 "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \
                 "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \
-                "($help -)*:network:__docker_networks" && ret=0
+                "($help -)*:network:__docker_complete_networks" && ret=0
             ;;
             ;;
         (ls)
         (ls)
             _arguments $(__docker_arguments) \
             _arguments $(__docker_arguments) \
@@ -1206,7 +1209,7 @@ __docker_network_subcommand() {
         (rm)
         (rm)
             _arguments $(__docker_arguments) \
             _arguments $(__docker_arguments) \
                 $opts_help \
                 $opts_help \
-                "($help -)*:network:__docker_networks" && ret=0
+                "($help -)*:network:__docker_complete_networks" && ret=0
             ;;
             ;;
         (help)
         (help)
             _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0
             _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0
@@ -2167,7 +2170,7 @@ __docker_volume_complete_ls_filters() {
                 __docker_complete_info_plugins Volume && ret=0
                 __docker_complete_info_plugins Volume && ret=0
                 ;;
                 ;;
             (name)
             (name)
-                __docker_volumes && ret=0
+                __docker_complete_volumes && ret=0
                 ;;
                 ;;
             *)
             *)
                 _message 'value' && ret=0
                 _message 'value' && ret=0
@@ -2181,7 +2184,7 @@ __docker_volume_complete_ls_filters() {
     return ret
     return ret
 }
 }
 
 
-__docker_volumes() {
+__docker_complete_volumes() {
     [[ $PREFIX = -* ]] && return 1
     [[ $PREFIX = -* ]] && return 1
     integer ret=1
     integer ret=1
     declare -a lines volumes
     declare -a lines volumes
@@ -2245,7 +2248,7 @@ __docker_volume_subcommand() {
             _arguments $(__docker_arguments) \
             _arguments $(__docker_arguments) \
                 $opts_help \
                 $opts_help \
                 "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \
                 "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \
-                "($help -)1:volume:__docker_volumes" && ret=0
+                "($help -)1:volume:__docker_complete_volumes" && ret=0
             ;;
             ;;
         (ls)
         (ls)
             _arguments $(__docker_arguments) \
             _arguments $(__docker_arguments) \
@@ -2268,7 +2271,7 @@ __docker_volume_subcommand() {
             _arguments $(__docker_arguments) \
             _arguments $(__docker_arguments) \
                 $opts_help \
                 $opts_help \
                 "($help -f --force)"{-f,--force}"[Force the removal of one or more volumes]" \
                 "($help -f --force)"{-f,--force}"[Force the removal of one or more volumes]" \
-                "($help -):volume:__docker_volumes" && ret=0
+                "($help -):volume:__docker_complete_volumes" && ret=0
             ;;
             ;;
         (help)
         (help)
             _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0
             _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0
@@ -2457,7 +2460,7 @@ __docker_subcommand() {
                 $opts_help \
                 $opts_help \
                 "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \
                 "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \
                 "($help -s --size)"{-s,--size}"[Display total file sizes if the type is container]" \
                 "($help -s --size)"{-s,--size}"[Display total file sizes if the type is container]" \
-                "($help)--type=[Return JSON for specified type]:type:(image container)" \
+                "($help)--type=[Return JSON for specified type]:type:(container image network node plugin service volume)" \
                 "($help -)*: :->values" && ret=0
                 "($help -)*: :->values" && ret=0
 
 
             case $state in
             case $state in
@@ -2466,8 +2469,24 @@ __docker_subcommand() {
                         __docker_complete_containers && ret=0
                         __docker_complete_containers && ret=0
                     elif [[ ${words[(r)--type=image]} == --type=image ]]; then
                     elif [[ ${words[(r)--type=image]} == --type=image ]]; then
                         __docker_complete_images && ret=0
                         __docker_complete_images && ret=0
+                    elif [[ ${words[(r)--type=network]} == --type=network ]]; then
+                        __docker_complete_networks && ret=0
+                    elif [[ ${words[(r)--type=node]} == --type=node ]]; then
+                        __docker_complete_nodes && ret=0
+                    elif [[ ${words[(r)--type=plugin]} == --type=plugin ]]; then
+                        __docker_complete_plugins && ret=0
+                    elif [[ ${words[(r)--type=service]} == --type=service ]]; then
+                        __docker_complete_services && ret=0
+                    elif [[ ${words[(r)--type=volume]} == --type=volume ]]; then
+                        __docker_complete_volumes && ret=0
                     else
                     else
-                        __docker_complete_images && __docker_complete_containers && ret=0
+                        __docker_complete_containers
+                        __docker_complete_images
+                        __docker_complete_networks
+                        __docker_complete_nodes
+                        __docker_complete_plugins
+                        __docker_complete_services
+                        __docker_complete_volumes && ret=0
                     fi
                     fi
                     ;;
                     ;;
             esac
             esac

+ 17 - 11
daemon/apparmor_default.go

@@ -3,7 +3,8 @@
 package daemon
 package daemon
 
 
 import (
 import (
-	"github.com/Sirupsen/logrus"
+	"fmt"
+
 	aaprofile "github.com/docker/docker/profiles/apparmor"
 	aaprofile "github.com/docker/docker/profiles/apparmor"
 	"github.com/opencontainers/runc/libcontainer/apparmor"
 	"github.com/opencontainers/runc/libcontainer/apparmor"
 )
 )
@@ -13,18 +14,23 @@ const (
 	defaultApparmorProfile = "docker-default"
 	defaultApparmorProfile = "docker-default"
 )
 )
 
 
-func installDefaultAppArmorProfile() {
+func ensureDefaultAppArmorProfile() error {
 	if apparmor.IsEnabled() {
 	if apparmor.IsEnabled() {
+		loaded, err := aaprofile.IsLoaded(defaultApparmorProfile)
+		if err != nil {
+			return fmt.Errorf("Could not check if %s AppArmor profile was loaded: %s", defaultApparmorProfile, err)
+		}
+
+		// Nothing to do.
+		if loaded {
+			return nil
+		}
+
+		// Load the profile.
 		if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil {
 		if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil {
-			apparmorProfiles := []string{defaultApparmorProfile}
-
-			// Allow daemon to run if loading failed, but are active
-			// (possibly through another run, manually, or via system startup)
-			for _, policy := range apparmorProfiles {
-				if err := aaprofile.IsLoaded(policy); err != nil {
-					logrus.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", policy)
-				}
-			}
+			return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", defaultApparmorProfile)
 		}
 		}
 	}
 	}
+
+	return nil
 }
 }

+ 2 - 1
daemon/apparmor_default_unsupported.go

@@ -2,5 +2,6 @@
 
 
 package daemon
 package daemon
 
 
-func installDefaultAppArmorProfile() {
+func ensureDefaultAppArmorProfile() error {
+	return nil
 }
 }

+ 2 - 1
daemon/cluster/executor/container/container.go

@@ -566,7 +566,8 @@ func (c *containerConfig) networkCreateRequest(name string) (clustertypes.Networ
 		// ID:     na.Network.ID,
 		// ID:     na.Network.ID,
 		Driver: na.Network.DriverState.Name,
 		Driver: na.Network.DriverState.Name,
 		IPAM: &network.IPAM{
 		IPAM: &network.IPAM{
-			Driver: na.Network.IPAM.Driver.Name,
+			Driver:  na.Network.IPAM.Driver.Name,
+			Options: na.Network.IPAM.Driver.Options,
 		},
 		},
 		Options:        na.Network.DriverState.Options,
 		Options:        na.Network.DriverState.Options,
 		Labels:         na.Network.Spec.Annotations.Labels,
 		Labels:         na.Network.Spec.Annotations.Labels,

+ 12 - 2
daemon/container.go

@@ -2,6 +2,7 @@ package daemon
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"os"
 	"path/filepath"
 	"path/filepath"
 	"time"
 	"time"
 
 
@@ -101,7 +102,7 @@ func (daemon *Daemon) Register(c *container.Container) error {
 	return nil
 	return nil
 }
 }
 
 
-func (daemon *Daemon) newContainer(name string, config *containertypes.Config, imgID image.ID, managed bool) (*container.Container, error) {
+func (daemon *Daemon) newContainer(name string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) {
 	var (
 	var (
 		id             string
 		id             string
 		err            error
 		err            error
@@ -112,7 +113,16 @@ func (daemon *Daemon) newContainer(name string, config *containertypes.Config, i
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	daemon.generateHostname(id, config)
+	if hostConfig.NetworkMode.IsHost() {
+		if config.Hostname == "" {
+			config.Hostname, err = os.Hostname()
+			if err != nil {
+				return nil, err
+			}
+		}
+	} else {
+		daemon.generateHostname(id, config)
+	}
 	entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd)
 	entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd)
 
 
 	base := daemon.newBaseContainer(id)
 	base := daemon.newBaseContainer(id)

+ 5 - 3
daemon/container_operations.go

@@ -851,9 +851,11 @@ func (daemon *Daemon) initializeNetworking(container *container.Container) error
 	}
 	}
 
 
 	if container.HostConfig.NetworkMode.IsHost() {
 	if container.HostConfig.NetworkMode.IsHost() {
-		container.Config.Hostname, err = os.Hostname()
-		if err != nil {
-			return err
+		if container.Config.Hostname == "" {
+			container.Config.Hostname, err = os.Hostname()
+			if err != nil {
+				return err
+			}
 		}
 		}
 	}
 	}
 
 

+ 1 - 1
daemon/create.go

@@ -96,7 +96,7 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	if container, err = daemon.newContainer(params.Name, params.Config, imgID, managed); err != nil {
+	if container, err = daemon.newContainer(params.Name, params.Config, params.HostConfig, imgID, managed); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	defer func() {
 	defer func() {

+ 4 - 1
daemon/daemon.go

@@ -524,7 +524,10 @@ func NewDaemon(config *Config, registryService registry.Service, containerdRemot
 		logrus.Warnf("Failed to configure golang's threads limit: %v", err)
 		logrus.Warnf("Failed to configure golang's threads limit: %v", err)
 	}
 	}
 
 
-	installDefaultAppArmorProfile()
+	if err := ensureDefaultAppArmorProfile(); err != nil {
+		logrus.Errorf(err.Error())
+	}
+
 	daemonRepo := filepath.Join(config.Root, "containers")
 	daemonRepo := filepath.Join(config.Root, "containers")
 	if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
 	if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
 		return nil, err
 		return nil, err

+ 3 - 3
daemon/graphdriver/plugin.go

@@ -22,10 +22,10 @@ func lookupPlugin(name, home string, opts []string, pg plugingetter.PluginGetter
 	if err != nil {
 	if err != nil {
 		return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err)
 		return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err)
 	}
 	}
-	return newPluginDriver(name, home, opts, pl.Client())
+	return newPluginDriver(name, home, opts, pl)
 }
 }
 
 
-func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) {
-	proxy := &graphDriverProxy{name, c}
+func newPluginDriver(name, home string, opts []string, pl plugingetter.CompatPlugin) (Driver, error) {
+	proxy := &graphDriverProxy{name, pl.Client(), pl}
 	return proxy, proxy.Init(filepath.Join(home, name), opts)
 	return proxy, proxy.Init(filepath.Join(home, name), opts)
 }
 }

+ 15 - 0
daemon/graphdriver/proxy.go

@@ -6,11 +6,13 @@ import (
 	"io"
 	"io"
 
 
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/plugingetter"
 )
 )
 
 
 type graphDriverProxy struct {
 type graphDriverProxy struct {
 	name   string
 	name   string
 	client pluginClient
 	client pluginClient
+	p      plugingetter.CompatPlugin
 }
 }
 
 
 type graphDriverRequest struct {
 type graphDriverRequest struct {
@@ -35,6 +37,12 @@ type graphDriverInitRequest struct {
 }
 }
 
 
 func (d *graphDriverProxy) Init(home string, opts []string) error {
 func (d *graphDriverProxy) Init(home string, opts []string) error {
+	if !d.p.IsV1() {
+		if cp, ok := d.p.(plugingetter.CountedPlugin); ok {
+			// always acquire here, it will be cleaned up on daemon shutdown
+			cp.Acquire()
+		}
+	}
 	args := &graphDriverInitRequest{
 	args := &graphDriverInitRequest{
 		Home: home,
 		Home: home,
 		Opts: opts,
 		Opts: opts,
@@ -167,6 +175,13 @@ func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) {
 }
 }
 
 
 func (d *graphDriverProxy) Cleanup() error {
 func (d *graphDriverProxy) Cleanup() error {
+	if !d.p.IsV1() {
+		if cp, ok := d.p.(plugingetter.CountedPlugin); ok {
+			// always release
+			defer cp.Release()
+		}
+	}
+
 	args := &graphDriverRequest{}
 	args := &graphDriverRequest{}
 	var ret graphDriverResponse
 	var ret graphDriverResponse
 	if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil {
 	if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil {

+ 17 - 2
daemon/oci_linux.go

@@ -733,12 +733,27 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) {
 	}
 	}
 
 
 	if apparmor.IsEnabled() {
 	if apparmor.IsEnabled() {
-		appArmorProfile := "docker-default"
-		if len(c.AppArmorProfile) > 0 {
+		var appArmorProfile string
+		if c.AppArmorProfile != "" {
 			appArmorProfile = c.AppArmorProfile
 			appArmorProfile = c.AppArmorProfile
 		} else if c.HostConfig.Privileged {
 		} else if c.HostConfig.Privileged {
 			appArmorProfile = "unconfined"
 			appArmorProfile = "unconfined"
+		} else {
+			appArmorProfile = "docker-default"
+		}
+
+		if appArmorProfile == "docker-default" {
+			// Unattended upgrades and other fun services can unload AppArmor
+			// profiles inadvertently. Since we cannot store our profile in
+			// /etc/apparmor.d, nor can we practically add other ways of
+			// telling the system to keep our profile loaded, in order to make
+			// sure that we keep the default profile enabled we dynamically
+			// reload it if necessary.
+			if err := ensureDefaultAppArmorProfile(); err != nil {
+				return nil, err
+			}
 		}
 		}
+
 		s.Process.ApparmorProfile = appArmorProfile
 		s.Process.ApparmorProfile = appArmorProfile
 	}
 	}
 	s.Process.SelinuxLabel = c.GetProcessLabel()
 	s.Process.SelinuxLabel = c.GetProcessLabel()

+ 21 - 10
daemon/prune.go

@@ -1,11 +1,13 @@
 package daemon
 package daemon
 
 
 import (
 import (
+	"fmt"
 	"regexp"
 	"regexp"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/distribution/digest"
 	"github.com/docker/distribution/digest"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/pkg/directory"
 	"github.com/docker/docker/pkg/directory"
@@ -16,7 +18,7 @@ import (
 )
 )
 
 
 // ContainersPrune removes unused containers
 // ContainersPrune removes unused containers
-func (daemon *Daemon) ContainersPrune(config *types.ContainersPruneConfig) (*types.ContainersPruneReport, error) {
+func (daemon *Daemon) ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error) {
 	rep := &types.ContainersPruneReport{}
 	rep := &types.ContainersPruneReport{}
 
 
 	allContainers := daemon.List()
 	allContainers := daemon.List()
@@ -40,7 +42,7 @@ func (daemon *Daemon) ContainersPrune(config *types.ContainersPruneConfig) (*typ
 }
 }
 
 
 // VolumesPrune removes unused local volumes
 // VolumesPrune removes unused local volumes
-func (daemon *Daemon) VolumesPrune(config *types.VolumesPruneConfig) (*types.VolumesPruneReport, error) {
+func (daemon *Daemon) VolumesPrune(pruneFilters filters.Args) (*types.VolumesPruneReport, error) {
 	rep := &types.VolumesPruneReport{}
 	rep := &types.VolumesPruneReport{}
 
 
 	pruneVols := func(v volume.Volume) error {
 	pruneVols := func(v volume.Volume) error {
@@ -70,11 +72,20 @@ func (daemon *Daemon) VolumesPrune(config *types.VolumesPruneConfig) (*types.Vol
 }
 }
 
 
 // ImagesPrune removes unused images
 // ImagesPrune removes unused images
-func (daemon *Daemon) ImagesPrune(config *types.ImagesPruneConfig) (*types.ImagesPruneReport, error) {
+func (daemon *Daemon) ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error) {
 	rep := &types.ImagesPruneReport{}
 	rep := &types.ImagesPruneReport{}
 
 
+	danglingOnly := true
+	if pruneFilters.Include("dangling") {
+		if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") {
+			danglingOnly = false
+		} else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") {
+			return nil, fmt.Errorf("Invalid filter 'dangling=%s'", pruneFilters.Get("dangling"))
+		}
+	}
+
 	var allImages map[image.ID]*image.Image
 	var allImages map[image.ID]*image.Image
-	if config.DanglingOnly {
+	if danglingOnly {
 		allImages = daemon.imageStore.Heads()
 		allImages = daemon.imageStore.Heads()
 	} else {
 	} else {
 		allImages = daemon.imageStore.Map()
 		allImages = daemon.imageStore.Map()
@@ -106,7 +117,7 @@ func (daemon *Daemon) ImagesPrune(config *types.ImagesPruneConfig) (*types.Image
 		deletedImages := []types.ImageDelete{}
 		deletedImages := []types.ImageDelete{}
 		refs := daemon.referenceStore.References(dgst)
 		refs := daemon.referenceStore.References(dgst)
 		if len(refs) > 0 {
 		if len(refs) > 0 {
-			if config.DanglingOnly {
+			if danglingOnly {
 				// Not a dangling image
 				// Not a dangling image
 				continue
 				continue
 			}
 			}
@@ -156,7 +167,7 @@ func (daemon *Daemon) ImagesPrune(config *types.ImagesPruneConfig) (*types.Image
 }
 }
 
 
 // localNetworksPrune removes unused local networks
 // localNetworksPrune removes unused local networks
-func (daemon *Daemon) localNetworksPrune(config *types.NetworksPruneConfig) (*types.NetworksPruneReport, error) {
+func (daemon *Daemon) localNetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) {
 	rep := &types.NetworksPruneReport{}
 	rep := &types.NetworksPruneReport{}
 	var err error
 	var err error
 	// When the function returns true, the walk will stop.
 	// When the function returns true, the walk will stop.
@@ -177,7 +188,7 @@ func (daemon *Daemon) localNetworksPrune(config *types.NetworksPruneConfig) (*ty
 }
 }
 
 
 // clusterNetworksPrune removes unused cluster networks
 // clusterNetworksPrune removes unused cluster networks
-func (daemon *Daemon) clusterNetworksPrune(config *types.NetworksPruneConfig) (*types.NetworksPruneReport, error) {
+func (daemon *Daemon) clusterNetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) {
 	rep := &types.NetworksPruneReport{}
 	rep := &types.NetworksPruneReport{}
 	cluster := daemon.GetCluster()
 	cluster := daemon.GetCluster()
 	networks, err := cluster.GetNetworks()
 	networks, err := cluster.GetNetworks()
@@ -207,15 +218,15 @@ func (daemon *Daemon) clusterNetworksPrune(config *types.NetworksPruneConfig) (*
 }
 }
 
 
 // NetworksPrune removes unused networks
 // NetworksPrune removes unused networks
-func (daemon *Daemon) NetworksPrune(config *types.NetworksPruneConfig) (*types.NetworksPruneReport, error) {
+func (daemon *Daemon) NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) {
 	rep := &types.NetworksPruneReport{}
 	rep := &types.NetworksPruneReport{}
-	clusterRep, err := daemon.clusterNetworksPrune(config)
+	clusterRep, err := daemon.clusterNetworksPrune(pruneFilters)
 	if err != nil {
 	if err != nil {
 		logrus.Warnf("could not remove cluster networks: %v", err)
 		logrus.Warnf("could not remove cluster networks: %v", err)
 	} else {
 	} else {
 		rep.NetworksDeleted = append(rep.NetworksDeleted, clusterRep.NetworksDeleted...)
 		rep.NetworksDeleted = append(rep.NetworksDeleted, clusterRep.NetworksDeleted...)
 	}
 	}
-	localRep, err := daemon.localNetworksPrune(config)
+	localRep, err := daemon.localNetworksPrune(pruneFilters)
 	if err != nil {
 	if err != nil {
 		logrus.Warnf("could not remove local networks: %v", err)
 		logrus.Warnf("could not remove local networks: %v", err)
 	} else {
 	} else {

+ 3 - 2
daemon/start.go

@@ -119,6 +119,9 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint
 				container.SetExitCode(128)
 				container.SetExitCode(128)
 			}
 			}
 			container.ToDisk()
 			container.ToDisk()
+
+			container.Reset(false)
+
 			daemon.Cleanup(container)
 			daemon.Cleanup(container)
 			// if containers AutoRemove flag is set, remove it after clean up
 			// if containers AutoRemove flag is set, remove it after clean up
 			if container.HostConfig.AutoRemove {
 			if container.HostConfig.AutoRemove {
@@ -187,8 +190,6 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint
 			container.SetExitCode(127)
 			container.SetExitCode(127)
 		}
 		}
 
 
-		container.Reset(false)
-
 		return fmt.Errorf("%s", errDesc)
 		return fmt.Errorf("%s", errDesc)
 	}
 	}
 
 

+ 8 - 4
distribution/errors.go

@@ -60,21 +60,25 @@ func shouldV2Fallback(err errcode.Error) bool {
 	return false
 	return false
 }
 }
 
 
-func translatePullError(err error, ref reference.Named) error {
+// TranslatePullError is used to convert an error from a registry pull
+// operation to an error representing the entire pull operation. Any error
+// information which is not used by the returned error gets output to
+// log at info level.
+func TranslatePullError(err error, ref reference.Named) error {
 	switch v := err.(type) {
 	switch v := err.(type) {
 	case errcode.Errors:
 	case errcode.Errors:
 		if len(v) != 0 {
 		if len(v) != 0 {
 			for _, extra := range v[1:] {
 			for _, extra := range v[1:] {
 				logrus.Infof("Ignoring extra error returned from registry: %v", extra)
 				logrus.Infof("Ignoring extra error returned from registry: %v", extra)
 			}
 			}
-			return translatePullError(v[0], ref)
+			return TranslatePullError(v[0], ref)
 		}
 		}
 	case errcode.Error:
 	case errcode.Error:
 		var newErr error
 		var newErr error
 		switch v.Code {
 		switch v.Code {
 		case errcode.ErrorCodeDenied:
 		case errcode.ErrorCodeDenied:
 			// ErrorCodeDenied is used when access to the repository was denied
 			// ErrorCodeDenied is used when access to the repository was denied
-			newErr = errors.Errorf("repository %s not found: does not exist or no read access", ref.Name())
+			newErr = errors.Errorf("repository %s not found: does not exist or no pull access", ref.Name())
 		case v2.ErrorCodeManifestUnknown:
 		case v2.ErrorCodeManifestUnknown:
 			newErr = errors.Errorf("manifest for %s not found", ref.String())
 			newErr = errors.Errorf("manifest for %s not found", ref.String())
 		case v2.ErrorCodeNameUnknown:
 		case v2.ErrorCodeNameUnknown:
@@ -85,7 +89,7 @@ func translatePullError(err error, ref reference.Named) error {
 			return newErr
 			return newErr
 		}
 		}
 	case xfer.DoNotRetry:
 	case xfer.DoNotRetry:
-		return translatePullError(v.Err, ref)
+		return TranslatePullError(v.Err, ref)
 	}
 	}
 
 
 	return err
 	return err

+ 2 - 2
distribution/pull.go

@@ -168,7 +168,7 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo
 				continue
 				continue
 			}
 			}
 			logrus.Errorf("Not continuing with pull after error: %v", err)
 			logrus.Errorf("Not continuing with pull after error: %v", err)
-			return translatePullError(err, ref)
+			return TranslatePullError(err, ref)
 		}
 		}
 
 
 		imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull")
 		imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull")
@@ -179,7 +179,7 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo
 		lastErr = fmt.Errorf("no endpoints found for %s", ref.String())
 		lastErr = fmt.Errorf("no endpoints found for %s", ref.String())
 	}
 	}
 
 
-	return translatePullError(lastErr, ref)
+	return TranslatePullError(lastErr, ref)
 }
 }
 
 
 // writeStatus writes a status message to out. If layersDownloaded is true, the
 // writeStatus writes a status message to out. If layersDownloaded is true, the

+ 1 - 2
distribution/pull_v2.go

@@ -355,8 +355,7 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat
 	}
 	}
 
 
 	if m, ok := manifest.(*schema2.DeserializedManifest); ok {
 	if m, ok := manifest.(*schema2.DeserializedManifest); ok {
-		if m.Manifest.Config.MediaType == schema2.MediaTypePluginConfig ||
-			m.Manifest.Config.MediaType == "application/vnd.docker.plugin.image.v0+json" { //TODO: remove this v0 before 1.13 GA
+		if m.Manifest.Config.MediaType == schema2.MediaTypePluginConfig {
 			return false, errMediaTypePlugin
 			return false, errMediaTypePlugin
 		}
 		}
 	}
 	}

+ 12 - 7
distribution/registry.go

@@ -70,17 +70,22 @@ func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, end
 		passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken}
 		passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken}
 		modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler))
 		modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler))
 	} else {
 	} else {
+		scope := auth.RepositoryScope{
+			Repository: repoName,
+			Actions:    actions,
+		}
+
+		// Keep image repositories blank for scope compatibility
+		if repoInfo.Class != "image" {
+			scope.Class = repoInfo.Class
+		}
+
 		creds := registry.NewStaticCredentialStore(authConfig)
 		creds := registry.NewStaticCredentialStore(authConfig)
 		tokenHandlerOptions := auth.TokenHandlerOptions{
 		tokenHandlerOptions := auth.TokenHandlerOptions{
 			Transport:   authTransport,
 			Transport:   authTransport,
 			Credentials: creds,
 			Credentials: creds,
-			Scopes: []auth.Scope{
-				auth.RepositoryScope{
-					Repository: repoName,
-					Actions:    actions,
-				},
-			},
-			ClientID: registry.AuthClientID,
+			Scopes:      []auth.Scope{scope},
+			ClientID:    registry.AuthClientID,
 		}
 		}
 		tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions)
 		tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions)
 		basicHandler := auth.NewBasicHandler(creds)
 		basicHandler := auth.NewBasicHandler(creds)

+ 2 - 1
docs/api/v1.18.md

@@ -19,7 +19,7 @@ redirect_from:
 ## 1. Brief introduction
 ## 1. Brief introduction
 
 
  - The daemon listens on `unix:///var/run/docker.sock` but you can
  - The daemon listens on `unix:///var/run/docker.sock` but you can
-   [Bind Docker to another host/port or a Unix socket](../commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
+   [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
  - The API tends to be REST, but for some complex commands, like `attach`
  - The API tends to be REST, but for some complex commands, like `attach`
    or `pull`, the HTTP connection is hijacked to transport `STDOUT`,
    or `pull`, the HTTP connection is hijacked to transport `STDOUT`,
    `STDIN` and `STDERR`.
    `STDIN` and `STDERR`.
@@ -1294,6 +1294,7 @@ a base64-encoded AuthConfig object.
 **Status codes**:
 **Status codes**:
 
 
 -   **200** – no error
 -   **200** – no error
+-   **404** - repository does not exist or no read access
 -   **500** – server error
 -   **500** – server error
 
 
 
 

+ 2 - 1
docs/api/v1.19.md

@@ -19,7 +19,7 @@ redirect_from:
 ## 1. Brief introduction
 ## 1. Brief introduction
 
 
  - The daemon listens on `unix:///var/run/docker.sock` but you can
  - The daemon listens on `unix:///var/run/docker.sock` but you can
-   [Bind Docker to another host/port or a Unix socket](../commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
+   [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
  - The API tends to be REST. However, for some complex commands, like `attach`
  - The API tends to be REST. However, for some complex commands, like `attach`
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    `stdin` and `stderr`.
    `stdin` and `stderr`.
@@ -1340,6 +1340,7 @@ a base64-encoded AuthConfig object.
 **Status codes**:
 **Status codes**:
 
 
 -   **200** – no error
 -   **200** – no error
+-   **404** - repository does not exist or no read access
 -   **500** – server error
 -   **500** – server error
 
 
 
 

+ 2 - 1
docs/api/v1.20.md

@@ -19,7 +19,7 @@ redirect_from:
 ## 1. Brief introduction
 ## 1. Brief introduction
 
 
  - The daemon listens on `unix:///var/run/docker.sock` but you can
  - The daemon listens on `unix:///var/run/docker.sock` but you can
-   [Bind Docker to another host/port or a Unix socket](../commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
+   [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
  - The API tends to be REST. However, for some complex commands, like `attach`
  - The API tends to be REST. However, for some complex commands, like `attach`
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    `stdin` and `stderr`.
    `stdin` and `stderr`.
@@ -1494,6 +1494,7 @@ a base64-encoded AuthConfig object.
 **Status codes**:
 **Status codes**:
 
 
 -   **200** – no error
 -   **200** – no error
+-   **404** - repository does not exist or no read access
 -   **500** – server error
 -   **500** – server error
 
 
 
 

+ 2 - 1
docs/api/v1.21.md

@@ -19,7 +19,7 @@ redirect_from:
 ## 1. Brief introduction
 ## 1. Brief introduction
 
 
  - The daemon listens on `unix:///var/run/docker.sock` but you can
  - The daemon listens on `unix:///var/run/docker.sock` but you can
-   [Bind Docker to another host/port or a Unix socket](../commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
+   [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
  - The API tends to be REST. However, for some complex commands, like `attach`
  - The API tends to be REST. However, for some complex commands, like `attach`
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    `stdin` and `stderr`.
    `stdin` and `stderr`.
@@ -1587,6 +1587,7 @@ a base64-encoded AuthConfig object.
 **Status codes**:
 **Status codes**:
 
 
 -   **200** – no error
 -   **200** – no error
+-   **404** - repository does not exist or no read access
 -   **500** – server error
 -   **500** – server error
 
 
 
 

+ 2 - 1
docs/api/v1.22.md

@@ -19,7 +19,7 @@ redirect_from:
 ## 1. Brief introduction
 ## 1. Brief introduction
 
 
  - The daemon listens on `unix:///var/run/docker.sock` but you can
  - The daemon listens on `unix:///var/run/docker.sock` but you can
-   [Bind Docker to another host/port or a Unix socket](../commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
+   [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
  - The API tends to be REST. However, for some complex commands, like `attach`
  - The API tends to be REST. However, for some complex commands, like `attach`
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    `stdin` and `stderr`.
    `stdin` and `stderr`.
@@ -1785,6 +1785,7 @@ a base64-encoded AuthConfig object.
 **Status codes**:
 **Status codes**:
 
 
 -   **200** – no error
 -   **200** – no error
+-   **404** - repository does not exist or no read access
 -   **500** – server error
 -   **500** – server error
 
 
 
 

+ 2 - 1
docs/api/v1.23.md

@@ -19,7 +19,7 @@ redirect_from:
 ## 1. Brief introduction
 ## 1. Brief introduction
 
 
  - The daemon listens on `unix:///var/run/docker.sock` but you can
  - The daemon listens on `unix:///var/run/docker.sock` but you can
-   [Bind Docker to another host/port or a Unix socket](../commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
+   [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
  - The API tends to be REST. However, for some complex commands, like `attach`
  - The API tends to be REST. However, for some complex commands, like `attach`
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    `stdin` and `stderr`.
    `stdin` and `stderr`.
@@ -1821,6 +1821,7 @@ a base64-encoded AuthConfig object.
 **Status codes**:
 **Status codes**:
 
 
 -   **200** – no error
 -   **200** – no error
+-   **404** - repository does not exist or no read access
 -   **500** – server error
 -   **500** – server error
 
 
 
 

+ 9 - 1
docs/api/v1.24.md

@@ -19,7 +19,7 @@ redirect_from:
 ## 1. Brief introduction
 ## 1. Brief introduction
 
 
  - The daemon listens on `unix:///var/run/docker.sock` but you can
  - The daemon listens on `unix:///var/run/docker.sock` but you can
-   [Bind Docker to another host/port or a Unix socket](../commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
+   [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket).
  - The API tends to be REST. However, for some complex commands, like `attach`
  - The API tends to be REST. However, for some complex commands, like `attach`
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    `stdin` and `stderr`.
    `stdin` and `stderr`.
@@ -1818,6 +1818,7 @@ a base64-encoded AuthConfig object.
 **Status codes**:
 **Status codes**:
 
 
 -   **200** – no error
 -   **200** – no error
+-   **404** - repository does not exist or no read access
 -   **500** – server error
 -   **500** – server error
 
 
 
 
@@ -3319,6 +3320,7 @@ Content-Type: application/json
 **Status codes**:
 **Status codes**:
 
 
 - **201** - no error
 - **201** - no error
+- **403** - operation not supported for pre-defined networks
 - **404** - plugin not found
 - **404** - plugin not found
 - **500** - server error
 - **500** - server error
 
 
@@ -4561,6 +4563,11 @@ image](#create-an-image) section for more details.
           ],
           ],
           "User": "33"
           "User": "33"
         },
         },
+        "Networks": [
+            {
+              "Target": "overlay1"
+            }
+        ],
         "LogDriver": {
         "LogDriver": {
           "Name": "json-file",
           "Name": "json-file",
           "Options": {
           "Options": {
@@ -4618,6 +4625,7 @@ image](#create-an-image) section for more details.
 **Status codes**:
 **Status codes**:
 
 
 -   **201** – no error
 -   **201** – no error
+-   **403** - network is not eligible for services
 -   **406** – node is not part of a swarm
 -   **406** – node is not part of a swarm
 -   **409** – name conflicts with an existing object
 -   **409** – name conflicts with an existing object
 -   **500** - server error
 -   **500** - server error

+ 32 - 12
docs/deprecated.md

@@ -20,29 +20,49 @@ The following list of features are deprecated in Engine.
 To learn more about Docker Engine's deprecation policy,
 To learn more about Docker Engine's deprecation policy,
 see [Feature Deprecation Policy](https://docs.docker.com/engine/#feature-deprecation-policy).
 see [Feature Deprecation Policy](https://docs.docker.com/engine/#feature-deprecation-policy).
 
 
+
+### Top-level network properties in NetworkSettings
+
+**Deprecated In Release: v1.13.0**
+
+**Target For Removal In Release: v1.16**
+
+When inspecting a container, `NetworkSettings` contains top-level information
+about the default ("bridge") network;
+
+`EndpointID`, `Gateway`, `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`,
+`IPPrefixLen`, `IPv6Gateway`, and `MacAddress`.
+
+These properties are deprecated in favor of per-network properties in
+`NetworkSettings.Networks`. These properties were already "deprecated" in
+docker 1.9, but kept around for backward compatibility.
+
+Refer to [#17538](https://github.com/docker/docker/pull/17538) for further
+information.
+
 ## `filter` param for `/images/json` endpoint
 ## `filter` param for `/images/json` endpoint
-**Deprecated In Release: [v1.13](https://github.com/docker/docker/releases/tag/v1.13.0)**
+**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
 
 
 **Target For Removal In Release: v1.16**
 **Target For Removal In Release: v1.16**
 
 
 The `filter` param to filter the list of image by reference (name or name:tag) is now implemented as a regular filter, named `reference`.
 The `filter` param to filter the list of image by reference (name or name:tag) is now implemented as a regular filter, named `reference`.
 
 
 ### `repository:shortid` image references
 ### `repository:shortid` image references
-**Deprecated In Release: [v1.13](https://github.com/docker/docker/releases/tag/v1.13.0)**
+**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
 
 
 **Target For Removal In Release: v1.16**
 **Target For Removal In Release: v1.16**
 
 
 `repository:shortid` syntax for referencing images is very little used, collides with tag references can be confused with digest references.
 `repository:shortid` syntax for referencing images is very little used, collides with tag references can be confused with digest references.
 
 
 ### `docker daemon` subcommand
 ### `docker daemon` subcommand
-**Deprecated In Release: [v1.13](https://github.com/docker/docker/releases/tag/v1.13.0)**
+**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
 
 
 **Target For Removal In Release: v1.16**
 **Target For Removal In Release: v1.16**
 
 
 The daemon is moved to a separate binary (`dockerd`), and should be used instead.
 The daemon is moved to a separate binary (`dockerd`), and should be used instead.
 
 
 ### Duplicate keys with conflicting values in engine labels
 ### Duplicate keys with conflicting values in engine labels
-**Deprecated In Release: [v1.13](https://github.com/docker/docker/releases/tag/v1.13.0)**
+**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
 
 
 **Target For Removal In Release: v1.16**
 **Target For Removal In Release: v1.16**
 
 
@@ -50,12 +70,12 @@ Duplicate keys with conflicting values have been deprecated. A warning is displa
 in the output, and an error will be returned in the future.
 in the output, and an error will be returned in the future.
 
 
 ### `MAINTAINER` in Dockerfile
 ### `MAINTAINER` in Dockerfile
-**Deprecated In Release: v1.13.0**
+**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
 
 
 `MAINTAINER` was an early very limited form of `LABEL` which should be used instead.
 `MAINTAINER` was an early very limited form of `LABEL` which should be used instead.
 
 
 ### API calls without a version
 ### API calls without a version
-**Deprecated In Release: [v1.13](https://github.com/docker/docker/releases/)**
+**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
 
 
 **Target For Removal In Release: v1.16**
 **Target For Removal In Release: v1.16**
 
 
@@ -64,7 +84,7 @@ future Engine versions. Instead of just requesting, for example, the URL
 `/containers/json`, you must now request `/v1.25/containers/json`.
 `/containers/json`, you must now request `/v1.25/containers/json`.
 
 
 ### Backing filesystem without `d_type` support for overlay/overlay2
 ### Backing filesystem without `d_type` support for overlay/overlay2
-**Deprecated In Release: v1.13.0**
+**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
 
 
 **Target For Removal In Release: v1.16**
 **Target For Removal In Release: v1.16**
 
 
@@ -75,12 +95,12 @@ if it is formatted with the `ftype=0` option.
 Please also refer to [#27358](https://github.com/docker/docker/issues/27358) for
 Please also refer to [#27358](https://github.com/docker/docker/issues/27358) for
 further information.
 further information.
 
 
-### Three argument form in `docker import`
+### Three arguments form in `docker import`
 **Deprecated In Release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)**
 **Deprecated In Release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)**
 
 
 **Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
 **Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
 
 
-The `docker import` command format 'file|URL|- [REPOSITORY [TAG]]' is deprecated since November 2013. It's no more supported.
+The `docker import` command format `file|URL|- [REPOSITORY [TAG]]` is deprecated since November 2013. It's no more supported.
 
 
 ### `-h` shorthand for `--help`
 ### `-h` shorthand for `--help`
 
 
@@ -136,7 +156,7 @@ To make tagging consistent across the various `docker` commands, the `-f` flag o
 Passing an `HostConfig` to `POST /containers/{name}/start` is deprecated in favor of
 Passing an `HostConfig` to `POST /containers/{name}/start` is deprecated in favor of
 defining it at container creation (`POST /containers/create`).
 defining it at container creation (`POST /containers/create`).
 
 
-### Docker ps 'before' and 'since' options
+### `--before` and `--since` flags on `docker ps`
 
 
 **Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)**
 **Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)**
 
 
@@ -145,7 +165,7 @@ defining it at container creation (`POST /containers/create`).
 The `docker ps --before` and `docker ps --since` options are deprecated.
 The `docker ps --before` and `docker ps --since` options are deprecated.
 Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead.
 Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead.
 
 
-### Docker search 'automated' and 'stars' options
+### `--automated` and `--stars` flags on `docker search`
 
 
 **Deprecated in Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
 **Deprecated in Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
 
 
@@ -234,7 +254,7 @@ The single-dash (`-help`) was removed, in favor of the double-dash `--help`
 
 
 **Deprecated In Release: [v0.10.0](https://github.com/docker/docker/releases/tag/v0.10.0)**
 **Deprecated In Release: [v0.10.0](https://github.com/docker/docker/releases/tag/v0.10.0)**
 
 
-**Removed In Release: [v1.13.0](https://github.com/docker/docker/releases/)**
+**Removed In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)**
 
 
 The flag `--run` of the docker commit (and its short version `-run`) were deprecated in favor
 The flag `--run` of the docker commit (and its short version `-run`) were deprecated in favor
 of the `--changes` flag that allows to pass `Dockerfile` commands.
 of the `--changes` flag that allows to pass `Dockerfile` commands.

+ 8 - 4
docs/extend/config.md

@@ -14,7 +14,7 @@ keywords: "API, Usage, plugins, documentation, developer"
 -->
 -->
 
 
 
 
-# Plugin Config Version 0 of Plugin V2
+# Plugin Config Version 1 of Plugin V2
 
 
 This document outlines the format of the V0 plugin configuration. The plugin
 This document outlines the format of the V0 plugin configuration. The plugin
 config described herein was introduced in the Docker daemon in the [v1.12.0
 config described herein was introduced in the Docker daemon in the [v1.12.0
@@ -25,7 +25,7 @@ configs can be serialized to JSON format with the following media types:
 
 
 Config Type  | Media Type
 Config Type  | Media Type
 ------------- | -------------
 ------------- | -------------
-config  | "application/vnd.docker.plugin.v0+json"
+config  | "application/vnd.docker.plugin.v1+json"
 
 
 
 
 ## *Config* Field Descriptions
 ## *Config* Field Descriptions
@@ -111,6 +111,10 @@ Config provides the base accessible fields for working with V0 plugin format
 
 
 	  options of the mount.
 	  options of the mount.
 
 
+- **`propagatedMount`** *string*
+
+   path to be mounted as rshared, so that mounts under that path are visible to docker. This is useful for volume plugins.
+
 - **`env`** *PluginEnv array*
 - **`env`** *PluginEnv array*
 
 
    env of the plugin, struct consisting of the following fields
    env of the plugin, struct consisting of the following fields
@@ -133,11 +137,11 @@ Config provides the base accessible fields for working with V0 plugin format
 
 
     - **`name`** *string*
     - **`name`** *string*
 
 
-	  name of the env.
+	  name of the args.
 
 
     - **`description`** *string*
     - **`description`** *string*
 
 
-      description of the env.
+      description of the args.
 
 
     - **`value`** *string array*
     - **`value`** *string array*
 
 

+ 4 - 0
docs/extend/plugins_volume.md

@@ -22,6 +22,10 @@ beyond the lifetime of a single Engine host. See the
 
 
 ## Changelog
 ## Changelog
 
 
+### 1.13.0
+
+- If used as part of the v2 plugin architecture, mountpoints that are part of paths returned by plugin have to be mounted under the directory specified by PropagatedMount in the plugin configuration [#26398](https://github.com/docker/docker/pull/26398)
+
 ### 1.12.0
 ### 1.12.0
 
 
 - Add `Status` field to `VolumeDriver.Get` response ([#21006](https://github.com/docker/docker/pull/21006#))
 - Add `Status` field to `VolumeDriver.Get` response ([#21006](https://github.com/docker/docker/pull/21006#))

+ 2 - 3
docs/reference/commandline/build.md

@@ -38,8 +38,7 @@ Options:
       --label value             Set metadata for an image (default [])
       --label value             Set metadata for an image (default [])
   -m, --memory string           Memory limit
   -m, --memory string           Memory limit
       --memory-swap string      Swap limit equal to memory plus swap: '-1' to enable unlimited swap
       --memory-swap string      Swap limit equal to memory plus swap: '-1' to enable unlimited swap
-      --network string          Set the networking mode for the run commands
-                                during build.
+      --network string          Set the networking mode for the RUN instructions during build
                                 'bridge': use default Docker bridge
                                 'bridge': use default Docker bridge
                                 'none': no networking
                                 'none': no networking
                                 'container:<name|id>': reuse another container's network stack
                                 'container:<name|id>': reuse another container's network stack
@@ -54,7 +53,7 @@ Options:
                                 The format is `<number><unit>`. `number` must be greater than `0`.
                                 The format is `<number><unit>`. `number` must be greater than `0`.
                                 Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes),
                                 Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes),
                                 or `g` (gigabytes). If you omit the unit, the system uses bytes.
                                 or `g` (gigabytes). If you omit the unit, the system uses bytes.
-  --squash                      Squash newly built layers into a single new layer (**Experimental Only**) 
+      --squash                  Squash newly built layers into a single new layer (**Experimental Only**)
   -t, --tag value               Name and optionally a tag in the 'name:tag' format (default [])
   -t, --tag value               Name and optionally a tag in the 'name:tag' format (default [])
       --ulimit value            Ulimit options (default [])
       --ulimit value            Ulimit options (default [])
 ```
 ```

+ 1 - 1
docs/reference/commandline/network_ls.md

@@ -40,7 +40,7 @@ NETWORK ID          NAME                DRIVER          SCOPE
 7fca4eb8c647        bridge              bridge          local
 7fca4eb8c647        bridge              bridge          local
 9f904ee27bf5        none                null            local
 9f904ee27bf5        none                null            local
 cf03ee007fb4        host                host            local
 cf03ee007fb4        host                host            local
-78b03ee04fc4        multi-host          overlay         local
+78b03ee04fc4        multi-host          overlay         swarm
 ```
 ```
 
 
 Use the `--no-trunc` option to display the full network id:
 Use the `--no-trunc` option to display the full network id:

+ 4 - 2
docs/reference/commandline/service_create.md

@@ -534,9 +534,11 @@ service's name and the node's ID where it sits.
 ```bash
 ```bash
 $ docker service create --name hosttempl --hostname={% raw %}"{{.Node.ID}}-{{.Service.Name}}"{% endraw %} busybox top
 $ docker service create --name hosttempl --hostname={% raw %}"{{.Node.ID}}-{{.Service.Name}}"{% endraw %} busybox top
 va8ew30grofhjoychbr6iot8c
 va8ew30grofhjoychbr6iot8c
+
 $ docker service ps va8ew30grofhjoychbr6iot8c
 $ docker service ps va8ew30grofhjoychbr6iot8c
-NAME                      IMAGE                                                                                   NODE          DESIRED STATE  CURRENT STATE               ERROR  PORTS
-hosttempl.1.wo41w8hg8qan  busybox:latest@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912  2e7a8a9c4da2  Running        Running about a minute ago
+ID            NAME         IMAGE                                                                                   NODE          DESIRED STATE  CURRENT STATE               ERROR  PORTS
+wo41w8hg8qan  hosttempl.1  busybox:latest@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912  2e7a8a9c4da2  Running        Running about a minute ago
+
 $ docker inspect --format={% raw %}"{{.Config.Hostname}}"{% endraw %} hosttempl.1.wo41w8hg8qanxwjwsg4kxpprj
 $ docker inspect --format={% raw %}"{{.Config.Hostname}}"{% endraw %} hosttempl.1.wo41w8hg8qanxwjwsg4kxpprj
 x3ti0erg11rjpg64m75kej2mz-hosttempl
 x3ti0erg11rjpg64m75kej2mz-hosttempl
 ```
 ```

+ 55 - 33
docs/reference/commandline/service_ps.md

@@ -40,36 +40,57 @@ The following command shows all the tasks that are part of the `redis` service:
 
 
 ```bash
 ```bash
 $ docker service ps redis
 $ docker service ps redis
-NAME                                IMAGE        NODE      DESIRED STATE  CURRENT STATE
-redis.1.0qihejybwf1x5vqi8lgzlgnpq   redis:3.0.6  manager1  Running        Running 8 seconds
-redis.2.bk658fpbex0d57cqcwoe3jthu   redis:3.0.6  worker2   Running        Running 9 seconds
-redis.3.5ls5s5fldaqg37s9pwayjecrf   redis:3.0.6  worker1   Running        Running 9 seconds
-redis.4.8ryt076polmclyihzx67zsssj   redis:3.0.6  worker1   Running        Running 9 seconds
-redis.5.1x0v8yomsncd6sbvfn0ph6ogc   redis:3.0.6  manager1  Running        Running 8 seconds
-redis.6.71v7je3el7rrw0osfywzs0lko   redis:3.0.6  worker2   Running        Running 9 seconds
-redis.7.4l3zm9b7tfr7cedaik8roxq6r   redis:3.0.6  worker2   Running        Running 9 seconds
-redis.8.9tfpyixiy2i74ad9uqmzp1q6o   redis:3.0.6  worker1   Running        Running 9 seconds
-redis.9.3w1wu13yuplna8ri3fx47iwad   redis:3.0.6  manager1  Running        Running 8 seconds
-redis.10.8eaxrb2fqpbnv9x30vr06i6vt  redis:3.0.6  manager1  Running        Running 8 seconds
+
+ID             NAME      IMAGE        NODE      DESIRED STATE  CURRENT STATE          ERROR  PORTS
+0qihejybwf1x   redis.1   redis:3.0.5  manager1  Running        Running 8 seconds
+bk658fpbex0d   redis.2   redis:3.0.5  worker2   Running        Running 9 seconds
+5ls5s5fldaqg   redis.3   redis:3.0.5  worker1   Running        Running 9 seconds
+8ryt076polmc   redis.4   redis:3.0.5  worker1   Running        Running 9 seconds
+1x0v8yomsncd   redis.5   redis:3.0.5  manager1  Running        Running 8 seconds
+71v7je3el7rr   redis.6   redis:3.0.5  worker2   Running        Running 9 seconds
+4l3zm9b7tfr7   redis.7   redis:3.0.5  worker2   Running        Running 9 seconds
+9tfpyixiy2i7   redis.8   redis:3.0.5  worker1   Running        Running 9 seconds
+3w1wu13yupln   redis.9   redis:3.0.5  manager1  Running        Running 8 seconds
+8eaxrb2fqpbn   redis.10  redis:3.0.5  manager1  Running        Running 8 seconds
+```
+
+In addition to _running_ tasks, the output also shows the task history. For
+example, after updating the service to use the `redis:3.0.6` image, the output
+may look like this:
+
+```bash
+$ docker service ps redis
+
+ID            NAME         IMAGE        NODE      DESIRED STATE  CURRENT STATE                   ERROR  PORTS
+50qe8lfnxaxk  redis.1      redis:3.0.6  manager1  Running        Running 6 seconds ago
+ky2re9oz86r9   \_ redis.1  redis:3.0.5  manager1  Shutdown       Shutdown 8 seconds ago
+3s46te2nzl4i  redis.2      redis:3.0.6  worker2   Running        Running less than a second ago
+nvjljf7rmor4   \_ redis.2  redis:3.0.6  worker2   Shutdown       Rejected 23 seconds ago        "No such image: redis@sha256:6…"
+vtiuz2fpc0yb   \_ redis.2  redis:3.0.5  worker2   Shutdown       Shutdown 1 second ago
+jnarweeha8x4  redis.3      redis:3.0.6  worker1   Running        Running 3 seconds ago
+vs448yca2nz4   \_ redis.3  redis:3.0.5  worker1   Shutdown       Shutdown 4 seconds ago
+jf1i992619ir  redis.4      redis:3.0.6  worker1   Running        Running 10 seconds ago
+blkttv7zs8ee   \_ redis.4  redis:3.0.5  worker1   Shutdown       Shutdown 11 seconds ago
 ```
 ```
 
 
+The number of items in the task history is determined by the
+`--task-history-limit` option that was set when initializing the swarm. You can
+change the task history retention limit using the
+[`docker swarm update`](swarm_update.md) command.
+
 When deploying a service, docker resolves the digest for the service's
 When deploying a service, docker resolves the digest for the service's
 image, and pins the service to that digest. The digest is not shown by
 image, and pins the service to that digest. The digest is not shown by
-default, but is printed if `--no-trunc` is used;
+default, but is printed if `--no-trunc` is used. The `--no-trunc` option
+also shows the non-truncated task ID, and error-messages, as can be seen below;
 
 
 ```bash
 ```bash
 $ docker service ps --no-trunc redis
 $ docker service ps --no-trunc redis
-NAME                                IMAGE                                                                                NODE      DESIRED STATE  CURRENT STATE
-redis.1.0qihejybwf1x5vqi8lgzlgnpq   redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  manager1  Running        Running 28 seconds
-redis.2.bk658fpbex0d57cqcwoe3jthu   redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  worker2   Running        Running 29 seconds
-redis.3.5ls5s5fldaqg37s9pwayjecrf   redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  worker1   Running        Running 29 seconds
-redis.4.8ryt076polmclyihzx67zsssj   redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  worker1   Running        Running 29 seconds
-redis.5.1x0v8yomsncd6sbvfn0ph6ogc   redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  manager1  Running        Running 28 seconds
-redis.6.71v7je3el7rrw0osfywzs0lko   redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  worker2   Running        Running 29 seconds
-redis.7.4l3zm9b7tfr7cedaik8roxq6r   redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  worker2   Running        Running 29 seconds
-redis.8.9tfpyixiy2i74ad9uqmzp1q6o   redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  worker1   Running        Running 29 seconds
-redis.9.3w1wu13yuplna8ri3fx47iwad   redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  manager1  Running        Running 28 seconds
-redis.10.8eaxrb2fqpbnv9x30vr06i6vt  redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  manager1  Running        Running 28 seconds
+
+ID                          NAME         IMAGE                                                                                NODE      DESIRED STATE  CURRENT STATE            ERROR                                                                                           PORTS
+50qe8lfnxaxksi9w2a704wkp7   redis.1      redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  manager1  Running        Running 5 minutes ago
+ky2re9oz86r9556i2szb8a8af   \_ redis.1   redis:3.0.5@sha256:f8829e00d95672c48c60f468329d6693c4bdd28d1f057e755f8ba8b40008682e  worker2   Shutdown       Shutdown 5 minutes ago
+bk658fpbex0d57cqcwoe3jthu   redis.2      redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  worker2   Running        Running 5 seconds
+nvjljf7rmor4htv7l8rwcx7i7   \_ redis.2   redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842  worker2   Shutdown       Rejected 5 minutes ago   "No such image: redis@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842"
 ```
 ```
 
 
 ## Filtering
 ## Filtering
@@ -93,9 +114,10 @@ The `id` filter matches on all or a prefix of a task's ID.
 
 
 ```bash
 ```bash
 $ docker service ps -f "id=8" redis
 $ docker service ps -f "id=8" redis
-NAME                                IMAGE        NODE      DESIRED STATE  CURRENT STATE
-redis.4.8ryt076polmclyihzx67zsssj   redis:3.0.6  worker1   Running        Running 9 seconds
-redis.10.8eaxrb2fqpbnv9x30vr06i6vt  redis:3.0.6  manager1  Running        Running 8 seconds
+
+ID             NAME      IMAGE        NODE      DESIRED STATE  CURRENT STATE      ERROR  PORTS
+8ryt076polmc   redis.4   redis:3.0.6  worker1   Running        Running 9 seconds
+8eaxrb2fqpbn   redis.10  redis:3.0.6  manager1  Running        Running 8 seconds
 ```
 ```
 
 
 #### Name
 #### Name
@@ -104,8 +126,8 @@ The `name` filter matches on task names.
 
 
 ```bash
 ```bash
 $ docker service ps -f "name=redis.1" redis
 $ docker service ps -f "name=redis.1" redis
-NAME                                IMAGE        NODE      DESIRED STATE  CURRENT STATE
-redis.1.0qihejybwf1x5vqi8lgzlgnpq   redis:3.0.6  manager1  Running        Running 8 seconds
+ID            NAME     IMAGE        NODE      DESIRED STATE  CURRENT STATE      ERROR  PORTS
+qihejybwf1x5  redis.1  redis:3.0.6  manager1  Running        Running 8 seconds
 ```
 ```
 
 
 
 
@@ -115,11 +137,11 @@ The `node` filter matches on a node name or a node ID.
 
 
 ```bash
 ```bash
 $ docker service ps -f "node=manager1" redis
 $ docker service ps -f "node=manager1" redis
-NAME                                IMAGE        NODE      DESIRED STATE  CURRENT STATE
-redis.1.0qihejybwf1x5vqi8lgzlgnpq   redis:3.0.6  manager1  Running        Running 8 seconds
-redis.5.1x0v8yomsncd6sbvfn0ph6ogc   redis:3.0.6  manager1  Running        Running 8 seconds
-redis.9.3w1wu13yuplna8ri3fx47iwad   redis:3.0.6  manager1  Running        Running 8 seconds
-redis.10.8eaxrb2fqpbnv9x30vr06i6vt  redis:3.0.6  manager1  Running        Running 8 seconds
+ID            NAME      IMAGE        NODE      DESIRED STATE  CURRENT STATE      ERROR  PORTS
+0qihejybwf1x  redis.1   redis:3.0.6  manager1  Running        Running 8 seconds
+1x0v8yomsncd  redis.5   redis:3.0.6  manager1  Running        Running 8 seconds
+3w1wu13yupln  redis.9   redis:3.0.6  manager1  Running        Running 8 seconds
+8eaxrb2fqpbn  redis.10  redis:3.0.6  manager1  Running        Running 8 seconds
 ```
 ```
 
 
 
 

+ 0 - 1
docs/reference/commandline/stack_deploy.md

@@ -92,7 +92,6 @@ axqh55ipl40h  vossibility_vossibility-collector  replicated  1/1       icecrime/
 
 
 ## Related information
 ## Related information
 
 
-* [stack config](stack_config.md)
 * [stack ls](stack_ls.md)
 * [stack ls](stack_ls.md)
 * [stack ps](stack_ps.md)
 * [stack ps](stack_ps.md)
 * [stack rm](stack_rm.md)
 * [stack rm](stack_rm.md)

+ 1 - 2
docs/reference/commandline/stack_ls.md

@@ -41,8 +41,7 @@ myapp              2
 
 
 ## Related information
 ## Related information
 
 
-* [stack config](stack_config.md)
 * [stack deploy](stack_deploy.md)
 * [stack deploy](stack_deploy.md)
-* [stack rm](stack_rm.md)
 * [stack ps](stack_ps.md)
 * [stack ps](stack_ps.md)
+* [stack rm](stack_rm.md)
 * [stack services](stack_services.md)
 * [stack services](stack_services.md)

+ 1 - 2
docs/reference/commandline/stack_ps.md

@@ -45,8 +45,7 @@ The currently supported filters are:
 
 
 ## Related information
 ## Related information
 
 
-* [stack config](stack_config.md)
 * [stack deploy](stack_deploy.md)
 * [stack deploy](stack_deploy.md)
+* [stack rm](stack_ls.md)
 * [stack rm](stack_rm.md)
 * [stack rm](stack_rm.md)
 * [stack services](stack_services.md)
 * [stack services](stack_services.md)
-* [stack ls](stack_ls.md)

+ 2 - 3
docs/reference/commandline/stack_rm.md

@@ -32,8 +32,7 @@ a manager node.
 
 
 ## Related information
 ## Related information
 
 
-* [stack config](stack_config.md)
 * [stack deploy](stack_deploy.md)
 * [stack deploy](stack_deploy.md)
-* [stack services](stack_services.md)
-* [stack ps](stack_ps.md)
 * [stack ls](stack_ls.md)
 * [stack ls](stack_ls.md)
+* [stack ps](stack_ps.md)
+* [stack services](stack_services.md)

+ 2 - 3
docs/reference/commandline/stack_services.md

@@ -64,8 +64,7 @@ The currently supported filters are:
 
 
 ## Related information
 ## Related information
 
 
-* [stack config](stack_config.md)
 * [stack deploy](stack_deploy.md)
 * [stack deploy](stack_deploy.md)
-* [stack rm](stack_rm.md)
-* [stack ps](stack_ps.md)
 * [stack ls](stack_ls.md)
 * [stack ls](stack_ls.md)
+* [stack ps](stack_ps.md)
+* [stack rm](stack_rm.md)

+ 4 - 4
docs/reference/commandline/swarm_join.md

@@ -21,10 +21,10 @@ Usage:  docker swarm join [OPTIONS] HOST:PORT
 Join a swarm as a node and/or manager
 Join a swarm as a node and/or manager
 
 
 Options:
 Options:
-      --advertise-addr value   Advertised address (format: <ip|interface>[:port])
-      --help                   Print usage
-      --listen-addr value      Listen address (format: <ip|interface>[:port)
-      --token string           Token for entry into the swarm
+      --advertise-addr string   Advertised address (format: <ip|interface>[:port])
+      --help                    Print usage
+      --listen-addr node-addr   Listen address (format: <ip|interface>[:port]) (default 0.0.0.0:2377)
+      --token string            Token for entry into the swarm
 ```
 ```
 
 
 Join a node to a swarm. The node joins as a manager node or worker node based upon the token you
 Join a node to a swarm. The node joins as a manager node or worker node based upon the token you

+ 3 - 3
docs/reference/commandline/swarm_update.md

@@ -24,10 +24,10 @@ Options:
       --autolock                        Change manager autolocking setting (true|false)
       --autolock                        Change manager autolocking setting (true|false)
       --cert-expiry duration            Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s)
       --cert-expiry duration            Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s)
       --dispatcher-heartbeat duration   Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s)
       --dispatcher-heartbeat duration   Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s)
-      --external-ca value               Specifications of one or more certificate signing endpoints
+      --external-ca external-ca         Specifications of one or more certificate signing endpoints
       --help                            Print usage
       --help                            Print usage
-      --max-snapshots int               Number of additional Raft snapshots to retain
-      --snapshot-interval int           Number of log entries between Raft snapshots
+      --max-snapshots uint              Number of additional Raft snapshots to retain
+      --snapshot-interval uint          Number of log entries between Raft snapshots (default 10000)
       --task-history-limit int          Task history retention limit (default 5)
       --task-history-limit int          Task history retention limit (default 5)
 ```
 ```
 
 

+ 5 - 5
docs/reference/run.md

@@ -63,15 +63,15 @@ Only the operator (the person executing `docker run`) can set the
 following options.
 following options.
 
 
  - [Detached vs foreground](#detached-vs-foreground)
  - [Detached vs foreground](#detached-vs-foreground)
-     - [Detached (-d)](#detached-d)
+     - [Detached (-d)](#detached--d)
      - [Foreground](#foreground)
      - [Foreground](#foreground)
  - [Container identification](#container-identification)
  - [Container identification](#container-identification)
-     - [Name (--name)](#name-name)
+     - [Name (--name)](#name---name)
      - [PID equivalent](#pid-equivalent)
      - [PID equivalent](#pid-equivalent)
- - [IPC settings (--ipc)](#ipc-settings-ipc)
+ - [IPC settings (--ipc)](#ipc-settings---ipc)
  - [Network settings](#network-settings)
  - [Network settings](#network-settings)
- - [Restart policies (--restart)](#restart-policies-restart)
- - [Clean up (--rm)](#clean-up-rm)
+ - [Restart policies (--restart)](#restart-policies---restart)
+ - [Clean up (--rm)](#clean-up---rm)
  - [Runtime constraints on resources](#runtime-constraints-on-resources)
  - [Runtime constraints on resources](#runtime-constraints-on-resources)
  - [Runtime privilege and Linux capabilities](#runtime-privilege-and-linux-capabilities)
  - [Runtime privilege and Linux capabilities](#runtime-privilege-and-linux-capabilities)
 
 

+ 0 - 95
hack/install.sh

@@ -348,74 +348,6 @@ do_install() {
 
 
 	# Run setup for each distro accordingly
 	# Run setup for each distro accordingly
 	case "$lsb_dist" in
 	case "$lsb_dist" in
-		amzn)
-			(
-			set -x
-			$sh_c 'sleep 3; yum -y -q install docker'
-			)
-			echo_docker_as_nonroot
-			exit 0
-			;;
-
-		'opensuse project'|opensuse)
-			echo 'Going to perform the following operations:'
-			if [ "$repo" != 'main' ]; then
-				echo '  * add repository obs://Virtualization:containers'
-			fi
-			echo '  * install Docker'
-			$sh_c 'echo "Press CTRL-C to abort"; sleep 3'
-
-			if [ "$repo" != 'main' ]; then
-				# install experimental packages from OBS://Virtualization:containers
-				(
-					set -x
-					zypper -n ar -f obs://Virtualization:containers Virtualization:containers
-					rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2
-				)
-			fi
-			(
-				set -x
-				zypper -n install docker
-			)
-			echo_docker_as_nonroot
-			exit 0
-			;;
-		'suse linux'|sle[sd])
-			echo 'Going to perform the following operations:'
-			if [ "$repo" != 'main' ]; then
-				echo '  * add repository obs://Virtualization:containers'
-				echo '  * install experimental Docker using packages NOT supported by SUSE'
-			else
-				echo '  * add the "Containers" module'
-				echo '  * install Docker using packages supported by SUSE'
-			fi
-			$sh_c 'echo "Press CTRL-C to abort"; sleep 3'
-
-			if [ "$repo" != 'main' ]; then
-				# install experimental packages from OBS://Virtualization:containers
-				echo >&2 'Warning: installing experimental packages from OBS, these packages are NOT supported by SUSE'
-				(
-					set -x
-					zypper -n ar -f obs://Virtualization:containers/SLE_12 Virtualization:containers
-					rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2
-				)
-			else
-				# Add the containers module
-				# Note well-1: the SLE machine must already be registered against SUSE Customer Center
-				# Note well-2: the `-r ""` is required to workaround a known issue of SUSEConnect
-				(
-					set -x
-					SUSEConnect -p sle-module-containers/12/x86_64 -r ""
-				)
-			fi
-			(
-				set -x
-				zypper -n install docker
-			)
-			echo_docker_as_nonroot
-			exit 0
-			;;
-
 		ubuntu|debian|raspbian)
 		ubuntu|debian|raspbian)
 			export DEBIAN_FRONTEND=noninteractive
 			export DEBIAN_FRONTEND=noninteractive
 
 
@@ -522,33 +454,6 @@ do_install() {
 			echo_docker_as_nonroot
 			echo_docker_as_nonroot
 			exit 0
 			exit 0
 			;;
 			;;
-		gentoo)
-			if [ "$url" = "https://test.docker.com/" ]; then
-				# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output
-				cat >&2 <<-'EOF'
-
-				  You appear to be trying to install the latest nightly build in Gentoo.'
-				  The portage tree should contain the latest stable release of Docker, but'
-				  if you want something more recent, you can always use the live ebuild'
-				  provided in the "docker" overlay available via layman.  For more'
-				  instructions, please see the following URL:'
-
-				    https://github.com/tianon/docker-overlay#using-this-overlay'
-
-				  After adding the "docker" overlay, you should be able to:'
-
-				    emerge -av =app-emulation/docker-9999'
-
-				EOF
-				exit 1
-			fi
-
-			(
-				set -x
-				$sh_c 'sleep 3; emerge app-emulation/docker'
-			)
-			exit 0
-			;;
 	esac
 	esac
 
 
 	# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output
 	# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output

+ 19 - 12
hack/make.ps1

@@ -6,17 +6,17 @@
              by hack\make.sh, but uses native Windows PowerShell semantics. It does
              by hack\make.sh, but uses native Windows PowerShell semantics. It does
              not support the full set of options provided by the Linux counterpart.
              not support the full set of options provided by the Linux counterpart.
              For example:
              For example:
-             
+
              - You can't cross-build Linux docker binaries on Windows
              - You can't cross-build Linux docker binaries on Windows
              - Hashes aren't generated on binaries
              - Hashes aren't generated on binaries
              - 'Releasing' isn't supported.
              - 'Releasing' isn't supported.
              - Integration tests. This is because they currently cannot run inside a container,
              - Integration tests. This is because they currently cannot run inside a container,
-               and require significant external setup. 
-             
-             It does however provided the minimum necessary to support parts of local Windows 
+               and require significant external setup.
+
+             It does however provided the minimum necessary to support parts of local Windows
              development and Windows to Windows CI.
              development and Windows to Windows CI.
 
 
-             Usage Examples (run from repo root): 
+             Usage Examples (run from repo root):
                 "hack\make.ps1 -Binary" to build the binaries
                 "hack\make.ps1 -Binary" to build the binaries
                 "hack\make.ps1 -Client" to build just the client 64-bit binary
                 "hack\make.ps1 -Client" to build just the client 64-bit binary
                 "hack\make.ps1 -TestUnit" to run unit tests
                 "hack\make.ps1 -TestUnit" to run unit tests
@@ -176,6 +176,7 @@ Function Execute-Build($type, $additionalBuildTags, $directory) {
     Pop-Location; $global:pushed=$False
     Pop-Location; $global:pushed=$False
 }
 }
 
 
+
 # Validates the DCO marker is present on each commit
 # Validates the DCO marker is present on each commit
 Function Validate-DCO($headCommit, $upstreamCommit) {
 Function Validate-DCO($headCommit, $upstreamCommit) {
     Write-Host "INFO: Validating Developer Certificate of Origin..."
     Write-Host "INFO: Validating Developer Certificate of Origin..."
@@ -189,7 +190,11 @@ Function Validate-DCO($headCommit, $upstreamCommit) {
     if ($LASTEXITCODE -ne 0) { Throw "Failed git diff --numstat" }
     if ($LASTEXITCODE -ne 0) { Throw "Failed git diff --numstat" }
 
 
     # Counts of adds and deletes after removing multiple white spaces. AWK anyone? :(
     # Counts of adds and deletes after removing multiple white spaces. AWK anyone? :(
-    $adds=0; $dels=0; $($counts -replace '\s+', ' ') | %{ $a=$_.Split(" "); $adds+=[int]$a[0]; $dels+=[int]$a[1] }
+    $adds=0; $dels=0; $($counts -replace '\s+', ' ') | %{ 
+        $a=$_.Split(" "); 
+        if ($a[0] -ne "-") { $adds+=[int]$a[0] }
+        if ($a[1] -ne "-") { $dels+=[int]$a[1] }
+    }
     if (($adds -eq 0) -and ($dels -eq 0)) { 
     if (($adds -eq 0) -and ($dels -eq 0)) { 
         Write-Warning "DCO validation - nothing to validate!"
         Write-Warning "DCO validation - nothing to validate!"
         return
         return
@@ -199,7 +204,7 @@ Function Validate-DCO($headCommit, $upstreamCommit) {
     if ($LASTEXITCODE -ne 0) { Throw "Failed git log --format" }
     if ($LASTEXITCODE -ne 0) { Throw "Failed git log --format" }
     $commits = $($commits -split '\s+' -match '\S')
     $commits = $($commits -split '\s+' -match '\S')
     $badCommits=@()
     $badCommits=@()
-    $commits | %{ 
+    $commits | %{
         # Skip commits with no content such as merge commits etc
         # Skip commits with no content such as merge commits etc
         if ($(git log -1 --format=format: --name-status $_).Length -gt 0) {
         if ($(git log -1 --format=format: --name-status $_).Length -gt 0) {
             # Ignore exit code on next call - always process regardless
             # Ignore exit code on next call - always process regardless
@@ -230,7 +235,7 @@ Function Validate-PkgImports($headCommit, $upstreamCommit) {
         # For the current changed file, get its list of dependencies, sorted and uniqued.
         # For the current changed file, get its list of dependencies, sorted and uniqued.
         $imports = Invoke-Expression "go list -e -f `'{{ .Deps }}`' $file"
         $imports = Invoke-Expression "go list -e -f `'{{ .Deps }}`' $file"
         if ($LASTEXITCODE -ne 0) { Throw "Failed go list for dependencies on $file" }
         if ($LASTEXITCODE -ne 0) { Throw "Failed go list for dependencies on $file" }
-        $imports = $imports -Replace "\[" -Replace "\]", "" -Split(" ") | Sort-Object | Get-Unique 
+        $imports = $imports -Replace "\[" -Replace "\]", "" -Split(" ") | Sort-Object | Get-Unique
         # Filter out what we are looking for
         # Filter out what we are looking for
         $imports = $imports -NotMatch "^github.com/docker/docker/pkg/" `
         $imports = $imports -NotMatch "^github.com/docker/docker/pkg/" `
                             -NotMatch "^github.com/docker/docker/vendor" `
                             -NotMatch "^github.com/docker/docker/vendor" `
@@ -255,11 +260,11 @@ Function Validate-GoFormat($headCommit, $upstreamCommit) {
     if ($(Get-Command gofmt -ErrorAction SilentlyContinue) -eq $nil) { Throw "gofmt does not appear to be installed" }
     if ($(Get-Command gofmt -ErrorAction SilentlyContinue) -eq $nil) { Throw "gofmt does not appear to be installed" }
 
 
     # Get a list of all go source-code files which have changed.  Ignore exit code on next call - always process regardless
     # Get a list of all go source-code files which have changed.  Ignore exit code on next call - always process regardless
-    $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'*.go`'" 
+    $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'*.go`'"
     $files = $files | Select-String -NotMatch "^vendor/"
     $files = $files | Select-String -NotMatch "^vendor/"
     $badFiles=@(); $files | %{
     $badFiles=@(); $files | %{
         # Deliberately ignore error on next line - treat as failed
         # Deliberately ignore error on next line - treat as failed
-        $content=Invoke-Expression "git show $headCommit`:$_" 
+        $content=Invoke-Expression "git show $headCommit`:$_"
 
 
         # Next set of hoops are to ensure we have LF not CRLF semantics as otherwise gofmt on Windows will not succeed.
         # Next set of hoops are to ensure we have LF not CRLF semantics as otherwise gofmt on Windows will not succeed.
         # Also note that gofmt on Windows does not appear to support stdin piping correctly. Hence go through a temporary file.
         # Also note that gofmt on Windows does not appear to support stdin piping correctly. Hence go through a temporary file.
@@ -327,7 +332,7 @@ Try {
     # Get the version of docker (eg 1.14.0-dev)
     # Get the version of docker (eg 1.14.0-dev)
     $dockerVersion=Get-DockerVersion
     $dockerVersion=Get-DockerVersion
 
 
-    # Give a warning if we are not running in a container and are building binaries or running unit tests. 
+    # Give a warning if we are not running in a container and are building binaries or running unit tests.
     # Not relevant for validation tests as these are fine to run outside of a container.
     # Not relevant for validation tests as these are fine to run outside of a container.
     if ($Client -or $Daemon -or $TestUnit) { Check-InContainer }
     if ($Client -or $Daemon -or $TestUnit) { Check-InContainer }
 
 
@@ -341,7 +346,7 @@ Try {
         Catch [Exception] { Throw $_ }
         Catch [Exception] { Throw $_ }
     }
     }
 
 
-    # DCO, Package import and Go formatting tests. 
+    # DCO, Package import and Go formatting tests.
     if ($DCO -or $PkgImports -or $GoFormat) {
     if ($DCO -or $PkgImports -or $GoFormat) {
         # We need the head and upstream commits for these
         # We need the head and upstream commits for these
         $headCommit=Get-HeadCommit
         $headCommit=Get-HeadCommit
@@ -394,6 +399,8 @@ Catch [Exception] {
     Write-Host -ForegroundColor Red  " \___  /  (____  /__`|____/\___  `>____ `| "
     Write-Host -ForegroundColor Red  " \___  /  (____  /__`|____/\___  `>____ `| "
     Write-Host -ForegroundColor Red  "     \/        \/             \/     \/ "
     Write-Host -ForegroundColor Red  "     \/        \/             \/     \/ "
     Write-Host
     Write-Host
+
+    Throw $_
 }
 }
 Finally {
 Finally {
     if ($global:pushed) { Pop-Location }
     if ($global:pushed) { Pop-Location }

+ 1 - 5
hack/make.sh

@@ -69,6 +69,7 @@ DEFAULT_BUNDLES=(
 )
 )
 
 
 VERSION=$(< ./VERSION)
 VERSION=$(< ./VERSION)
+! BUILDTIME=$(date --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/')
 if command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; then
 if command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; then
 	GITCOMMIT=$(git rev-parse --short HEAD)
 	GITCOMMIT=$(git rev-parse --short HEAD)
 	if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
 	if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
@@ -82,11 +83,6 @@ if command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; the
 		git status --porcelain --untracked-files=no
 		git status --porcelain --untracked-files=no
 		echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
 		echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
 	fi
 	fi
-	! BUILDTIME=$(date --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/') &> /dev/null
-	if [ -z $BUILDTIME ]; then
-		# If using bash 3.1 which doesn't support --rfc-3389, eg Windows CI
-		BUILDTIME=$(date -u)
-	fi
 elif [ "$DOCKER_GITCOMMIT" ]; then
 elif [ "$DOCKER_GITCOMMIT" ]; then
 	GITCOMMIT="$DOCKER_GITCOMMIT"
 	GITCOMMIT="$DOCKER_GITCOMMIT"
 else
 else

+ 5 - 0
integration-cli/daemon.go

@@ -88,6 +88,11 @@ func NewDaemon(c *check.C) *Daemon {
 	}
 	}
 }
 }
 
 
+// RootDir returns the root directory of the daemon.
+func (d *Daemon) RootDir() string {
+	return d.root
+}
+
 func (d *Daemon) getClientConfig() (*clientConfig, error) {
 func (d *Daemon) getClientConfig() (*clientConfig, error) {
 	var (
 	var (
 		transport *http.Transport
 		transport *http.Transport

+ 10 - 0
integration-cli/docker_api_exec_test.go

@@ -89,6 +89,16 @@ func (s *DockerSuite) TestExecAPIStart(c *check.C) {
 	startExec(c, id, http.StatusOK)
 	startExec(c, id, http.StatusOK)
 }
 }
 
 
+func (s *DockerSuite) TestExecAPIStartEnsureHeaders(c *check.C) {
+	testRequires(c, DaemonIsLinux)
+	dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top")
+
+	id := createExec(c, "test")
+	resp, _, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json")
+	c.Assert(err, checker.IsNil)
+	c.Assert(resp.Header.Get("Server"), checker.Not(checker.Equals), "")
+}
+
 func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) {
 func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) {
 	testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later
 	testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later
 	runSleepingContainer(c, "-d", "--name", "test")
 	runSleepingContainer(c, "-d", "--name", "test")

Некоторые файлы не были показаны из-за большого количества измененных файлов