Pārlūkot izejas kodu

Merge pull request #28408 from vieux/1.13.0-rc2-cherrypicks

1.13.0 rc2 cherrypicks
Victor Vieux 8 gadi atpakaļ
vecāks
revīzija
2dfa08bc50
47 mainītis faili ar 698 papildinājumiem un 328 dzēšanām
  1. 1 0
      .gitignore
  2. 6 7
      cli/command/container/list.go
  3. 26 0
      cli/command/formatter/container_test.go
  4. 2 2
      cli/command/plugin/push.go
  5. 9 5
      cli/command/service/create.go
  6. 8 4
      cmd/docker/docker.go
  7. 25 4
      container/container.go
  8. 1 1
      container/monitor.go
  9. 35 35
      container/stream/streams.go
  10. 19 2
      contrib/completion/bash/docker
  11. 134 1
      contrib/completion/zsh/_docker
  12. 7 0
      daemon/cluster/secrets.go
  13. 2 2
      daemon/container.go
  14. 2 2
      daemon/exec.go
  15. 25 20
      daemon/exec/exec.go
  16. 2 60
      daemon/info.go
  17. 81 0
      daemon/info_unix.go
  18. 9 0
      daemon/info_windows.go
  19. 2 2
      daemon/monitor.go
  20. 7 4
      dockerversion/version_lib.go
  21. 1 1
      docs/deprecated.md
  22. 3 3
      docs/reference/api/docker_remote_api_v1.24.md
  23. 8 7
      docs/reference/api/docker_remote_api_v1.25.md
  24. 4 2
      docs/reference/commandline/deploy.md
  25. 1 2
      docs/reference/commandline/dockerd.md
  26. 3 2
      docs/reference/commandline/plugin_create.md
  27. 2 1
      docs/reference/commandline/plugin_disable.md
  28. 2 1
      docs/reference/commandline/plugin_enable.md
  29. 2 1
      docs/reference/commandline/plugin_inspect.md
  30. 3 2
      docs/reference/commandline/plugin_install.md
  31. 2 1
      docs/reference/commandline/plugin_ls.md
  32. 50 0
      docs/reference/commandline/plugin_push.md
  33. 3 2
      docs/reference/commandline/plugin_rm.md
  34. 3 2
      docs/reference/commandline/plugin_set.md
  35. 3 2
      docs/reference/commandline/stack_deploy.md
  36. 2 2
      docs/reference/glossary.md
  37. 0 4
      docs/reference/run.md
  38. 14 0
      hack/make/.go-autogen
  39. 2 0
      integration-cli/docker_cli_authz_plugin_v2_test.go
  40. 3 3
      integration-cli/docker_cli_daemon_plugins_test.go
  41. 11 5
      integration-cli/docker_cli_logs_test.go
  42. 1 1
      integration-cli/docker_cli_network_unix_test.go
  43. 1 1
      integration-cli/docker_cli_plugins_test.go
  44. 146 112
      man/dockerd.8.md
  45. 1 1
      pkg/tailfile/tailfile.go
  46. 23 18
      plugin/manager_linux.go
  47. 1 1
      volume/local/local.go

+ 1 - 0
.gitignore

@@ -17,6 +17,7 @@ bundles/
 cmd/dockerd/dockerd
 cmd/dockerd/dockerd
 cmd/docker/docker
 cmd/docker/docker
 dockerversion/version_autogen.go
 dockerversion/version_autogen.go
+dockerversion/version_autogen_unix.go
 docs/AWS_S3_BUCKET
 docs/AWS_S3_BUCKET
 docs/GITCOMMIT
 docs/GITCOMMIT
 docs/GIT_BRANCH
 docs/GIT_BRANCH

+ 6 - 7
cli/command/container/list.go

@@ -62,6 +62,12 @@ func newListCommand(dockerCli *command.DockerCli) *cobra.Command {
 type preProcessor struct {
 type preProcessor struct {
 	types.Container
 	types.Container
 	opts *types.ContainerListOptions
 	opts *types.ContainerListOptions
+
+	// Fields that need to exist so the template doesn't error out
+	// These are needed since they are available on the final object but are not
+	// fields in types.Container
+	// TODO(cpuguy83): this seems rather broken
+	Networks, CreatedAt, RunningFor bool
 }
 }
 
 
 // Size sets the size option when called by a template execution.
 // Size sets the size option when called by a template execution.
@@ -70,13 +76,6 @@ func (p *preProcessor) Size() bool {
 	return true
 	return true
 }
 }
 
 
-// Networks does nothing but return true.
-// It is needed to avoid the template check to fail as this field
-// doesn't exist in `types.Container`
-func (p *preProcessor) Networks() bool {
-	return true
-}
-
 func buildContainerListOptions(opts *psOptions) (*types.ContainerListOptions, error) {
 func buildContainerListOptions(opts *psOptions) (*types.ContainerListOptions, error) {
 	options := &types.ContainerListOptions{
 	options := &types.ContainerListOptions{
 		All:     opts.all,
 		All:     opts.all,

+ 26 - 0
cli/command/formatter/container_test.go

@@ -370,3 +370,29 @@ func TestContainerContextWriteJSONField(t *testing.T) {
 		assert.Equal(t, s, containers[i].ID)
 		assert.Equal(t, s, containers[i].ID)
 	}
 	}
 }
 }
+
+func TestContainerBackCompat(t *testing.T) {
+	containers := []types.Container{types.Container{ID: "brewhaha"}}
+	cases := []string{
+		"ID",
+		"Names",
+		"Image",
+		"Command",
+		"CreatedAt",
+		"RunningFor",
+		"Ports",
+		"Status",
+		"Size",
+		"Labels",
+		"Mounts",
+	}
+	buf := bytes.NewBuffer(nil)
+	for _, c := range cases {
+		ctx := Context{Format: Format(fmt.Sprintf("{{ .%s }}", c)), Output: buf}
+		if err := ContainerWrite(ctx, containers); err != nil {
+			t.Log("could not render template for field '%s': %v", c, err)
+			t.Fail()
+		}
+		buf.Reset()
+	}
+}

+ 2 - 2
cli/command/plugin/push.go

@@ -14,8 +14,8 @@ import (
 
 
 func newPushCommand(dockerCli *command.DockerCli) *cobra.Command {
 func newPushCommand(dockerCli *command.DockerCli) *cobra.Command {
 	cmd := &cobra.Command{
 	cmd := &cobra.Command{
-		Use:   "push PLUGIN",
-		Short: "Push a plugin",
+		Use:   "push NAME[:TAG]",
+		Short: "Push a plugin to a registry",
 		Args:  cli.ExactArgs(1),
 		Args:  cli.ExactArgs(1),
 		RunE: func(cmd *cobra.Command, args []string) error {
 		RunE: func(cmd *cobra.Command, args []string) error {
 			return runPush(dockerCli, args[0])
 			return runPush(dockerCli, args[0])

+ 9 - 5
cli/command/service/create.go

@@ -62,12 +62,16 @@ func runCreate(dockerCli *command.DockerCli, opts *serviceOptions) error {
 		return err
 		return err
 	}
 	}
 
 
-	// parse and validate secrets
-	secrets, err := parseSecrets(apiClient, opts.secrets.Value())
-	if err != nil {
-		return err
+	specifiedSecrets := opts.secrets.Value()
+	if len(specifiedSecrets) > 0 {
+		// parse and validate secrets
+		secrets, err := parseSecrets(apiClient, specifiedSecrets)
+		if err != nil {
+			return err
+		}
+		service.TaskTemplate.ContainerSpec.Secrets = secrets
+
 	}
 	}
-	service.TaskTemplate.ContainerSpec.Secrets = secrets
 
 
 	ctx := context.Background()
 	ctx := context.Background()
 
 

+ 8 - 4
cmd/docker/docker.go

@@ -126,8 +126,10 @@ func dockerPreRun(opts *cliflags.ClientOptions) {
 func hideUnsupportedFeatures(cmd *cobra.Command, clientVersion string, hasExperimental bool) {
 func hideUnsupportedFeatures(cmd *cobra.Command, clientVersion string, hasExperimental bool) {
 	cmd.Flags().VisitAll(func(f *pflag.Flag) {
 	cmd.Flags().VisitAll(func(f *pflag.Flag) {
 		// hide experimental flags
 		// hide experimental flags
-		if _, ok := f.Annotations["experimental"]; ok {
-			f.Hidden = true
+		if !hasExperimental {
+			if _, ok := f.Annotations["experimental"]; ok {
+				f.Hidden = true
+			}
 		}
 		}
 
 
 		// hide flags not supported by the server
 		// hide flags not supported by the server
@@ -139,8 +141,10 @@ func hideUnsupportedFeatures(cmd *cobra.Command, clientVersion string, hasExperi
 
 
 	for _, subcmd := range cmd.Commands() {
 	for _, subcmd := range cmd.Commands() {
 		// hide experimental subcommands
 		// hide experimental subcommands
-		if _, ok := subcmd.Tags["experimental"]; ok {
-			subcmd.Hidden = true
+		if !hasExperimental {
+			if _, ok := subcmd.Tags["experimental"]; ok {
+				subcmd.Hidden = true
+			}
 		}
 		}
 
 
 		// hide subcommands not supported by the server
 		// hide subcommands not supported by the server

+ 25 - 4
container/container.go

@@ -19,6 +19,7 @@ import (
 	containertypes "github.com/docker/docker/api/types/container"
 	containertypes "github.com/docker/docker/api/types/container"
 	mounttypes "github.com/docker/docker/api/types/mount"
 	mounttypes "github.com/docker/docker/api/types/mount"
 	networktypes "github.com/docker/docker/api/types/network"
 	networktypes "github.com/docker/docker/api/types/network"
+	"github.com/docker/docker/container/stream"
 	"github.com/docker/docker/daemon/exec"
 	"github.com/docker/docker/daemon/exec"
 	"github.com/docker/docker/daemon/logger"
 	"github.com/docker/docker/daemon/logger"
 	"github.com/docker/docker/daemon/logger/jsonfilelog"
 	"github.com/docker/docker/daemon/logger/jsonfilelog"
@@ -65,7 +66,7 @@ func (DetachError) Error() string {
 // CommonContainer holds the fields for a container which are
 // CommonContainer holds the fields for a container which are
 // applicable across all platforms supported by the daemon.
 // applicable across all platforms supported by the daemon.
 type CommonContainer struct {
 type CommonContainer struct {
-	*runconfig.StreamConfig
+	StreamConfig *stream.Config
 	// embed for Container to support states directly.
 	// embed for Container to support states directly.
 	*State          `json:"State"` // Needed for remote api version <= 1.11
 	*State          `json:"State"` // Needed for remote api version <= 1.11
 	Root            string         `json:"-"` // Path to the "home" of the container, including metadata.
 	Root            string         `json:"-"` // Path to the "home" of the container, including metadata.
@@ -109,7 +110,7 @@ func NewBaseContainer(id, root string) *Container {
 			ExecCommands:  exec.NewStore(),
 			ExecCommands:  exec.NewStore(),
 			Root:          root,
 			Root:          root,
 			MountPoints:   make(map[string]*volume.MountPoint),
 			MountPoints:   make(map[string]*volume.MountPoint),
-			StreamConfig:  runconfig.NewStreamConfig(),
+			StreamConfig:  stream.NewConfig(),
 			attachContext: &attachContext{},
 			attachContext: &attachContext{},
 		},
 		},
 	}
 	}
@@ -377,7 +378,7 @@ func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr
 
 
 // AttachStreams connects streams to a TTY.
 // AttachStreams connects streams to a TTY.
 // Used by exec too. Should this move somewhere else?
 // Used by exec too. Should this move somewhere else?
-func AttachStreams(ctx context.Context, streamConfig *runconfig.StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error {
+func AttachStreams(ctx context.Context, streamConfig *stream.Config, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error {
 	var (
 	var (
 		cStdout, cStderr io.ReadCloser
 		cStdout, cStderr io.ReadCloser
 		cStdin           io.WriteCloser
 		cStdin           io.WriteCloser
@@ -1064,6 +1065,26 @@ func (container *Container) startLogging() error {
 	return nil
 	return nil
 }
 }
 
 
+// StdinPipe gets the stdin stream of the container
+func (container *Container) StdinPipe() io.WriteCloser {
+	return container.StreamConfig.StdinPipe()
+}
+
+// StdoutPipe gets the stdout stream of the container
+func (container *Container) StdoutPipe() io.ReadCloser {
+	return container.StreamConfig.StdoutPipe()
+}
+
+// StderrPipe gets the stderr stream of the container
+func (container *Container) StderrPipe() io.ReadCloser {
+	return container.StreamConfig.StderrPipe()
+}
+
+// CloseStreams closes the container's stdio streams
+func (container *Container) CloseStreams() error {
+	return container.StreamConfig.CloseStreams()
+}
+
 // InitializeStdio is called by libcontainerd to connect the stdio.
 // InitializeStdio is called by libcontainerd to connect the stdio.
 func (container *Container) InitializeStdio(iop libcontainerd.IOPipe) error {
 func (container *Container) InitializeStdio(iop libcontainerd.IOPipe) error {
 	if err := container.startLogging(); err != nil {
 	if err := container.startLogging(); err != nil {
@@ -1073,7 +1094,7 @@ func (container *Container) InitializeStdio(iop libcontainerd.IOPipe) error {
 
 
 	container.StreamConfig.CopyToPipe(iop)
 	container.StreamConfig.CopyToPipe(iop)
 
 
-	if container.Stdin() == nil && !container.Config.Tty {
+	if container.StreamConfig.Stdin() == nil && !container.Config.Tty {
 		if iop.Stdin != nil {
 		if iop.Stdin != nil {
 			if err := iop.Stdin.Close(); err != nil {
 			if err := iop.Stdin.Close(); err != nil {
 				logrus.Warnf("error closing stdin: %+v", err)
 				logrus.Warnf("error closing stdin: %+v", err)

+ 1 - 1
container/monitor.go

@@ -23,7 +23,7 @@ func (container *Container) Reset(lock bool) {
 
 
 	// Re-create a brand new stdin pipe once the container exited
 	// Re-create a brand new stdin pipe once the container exited
 	if container.Config.OpenStdin {
 	if container.Config.OpenStdin {
-		container.NewInputPipes()
+		container.StreamConfig.NewInputPipes()
 	}
 	}
 
 
 	if container.LogDriver != nil {
 	if container.LogDriver != nil {

+ 35 - 35
runconfig/streams.go → container/stream/streams.go

@@ -1,4 +1,4 @@
-package runconfig
+package stream
 
 
 import (
 import (
 	"fmt"
 	"fmt"
@@ -14,16 +14,16 @@ import (
 	"github.com/docker/docker/pkg/pools"
 	"github.com/docker/docker/pkg/pools"
 )
 )
 
 
-// StreamConfig holds information about I/O streams managed together.
+// Config holds information about I/O streams managed together.
 //
 //
-// streamConfig.StdinPipe returns a WriteCloser which can be used to feed data
+// config.StdinPipe returns a WriteCloser which can be used to feed data
 // to the standard input of the streamConfig's active process.
 // to the standard input of the streamConfig's active process.
-// streamConfig.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser
+// config.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser
 // which can be used to retrieve the standard output (and error) generated
 // which can be used to retrieve the standard output (and error) generated
 // by the container's active process. The output (and error) are actually
 // by the container's active process. The output (and error) are actually
 // copied and delivered to all StdoutPipe and StderrPipe consumers, using
 // copied and delivered to all StdoutPipe and StderrPipe consumers, using
 // a kind of "broadcaster".
 // a kind of "broadcaster".
-type StreamConfig struct {
+type Config struct {
 	sync.WaitGroup
 	sync.WaitGroup
 	stdout    *broadcaster.Unbuffered
 	stdout    *broadcaster.Unbuffered
 	stderr    *broadcaster.Unbuffered
 	stderr    *broadcaster.Unbuffered
@@ -31,76 +31,76 @@ type StreamConfig struct {
 	stdinPipe io.WriteCloser
 	stdinPipe io.WriteCloser
 }
 }
 
 
-// NewStreamConfig creates a stream config and initializes
+// NewConfig creates a stream config and initializes
 // the standard err and standard out to new unbuffered broadcasters.
 // the standard err and standard out to new unbuffered broadcasters.
-func NewStreamConfig() *StreamConfig {
-	return &StreamConfig{
+func NewConfig() *Config {
+	return &Config{
 		stderr: new(broadcaster.Unbuffered),
 		stderr: new(broadcaster.Unbuffered),
 		stdout: new(broadcaster.Unbuffered),
 		stdout: new(broadcaster.Unbuffered),
 	}
 	}
 }
 }
 
 
 // Stdout returns the standard output in the configuration.
 // Stdout returns the standard output in the configuration.
-func (streamConfig *StreamConfig) Stdout() *broadcaster.Unbuffered {
-	return streamConfig.stdout
+func (c *Config) Stdout() *broadcaster.Unbuffered {
+	return c.stdout
 }
 }
 
 
 // Stderr returns the standard error in the configuration.
 // Stderr returns the standard error in the configuration.
-func (streamConfig *StreamConfig) Stderr() *broadcaster.Unbuffered {
-	return streamConfig.stderr
+func (c *Config) Stderr() *broadcaster.Unbuffered {
+	return c.stderr
 }
 }
 
 
 // Stdin returns the standard input in the configuration.
 // Stdin returns the standard input in the configuration.
-func (streamConfig *StreamConfig) Stdin() io.ReadCloser {
-	return streamConfig.stdin
+func (c *Config) Stdin() io.ReadCloser {
+	return c.stdin
 }
 }
 
 
 // StdinPipe returns an input writer pipe as an io.WriteCloser.
 // StdinPipe returns an input writer pipe as an io.WriteCloser.
-func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser {
-	return streamConfig.stdinPipe
+func (c *Config) StdinPipe() io.WriteCloser {
+	return c.stdinPipe
 }
 }
 
 
 // StdoutPipe creates a new io.ReadCloser with an empty bytes pipe.
 // StdoutPipe creates a new io.ReadCloser with an empty bytes pipe.
 // It adds this new out pipe to the Stdout broadcaster.
 // It adds this new out pipe to the Stdout broadcaster.
-func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser {
+func (c *Config) StdoutPipe() io.ReadCloser {
 	bytesPipe := ioutils.NewBytesPipe()
 	bytesPipe := ioutils.NewBytesPipe()
-	streamConfig.stdout.Add(bytesPipe)
+	c.stdout.Add(bytesPipe)
 	return bytesPipe
 	return bytesPipe
 }
 }
 
 
 // StderrPipe creates a new io.ReadCloser with an empty bytes pipe.
 // StderrPipe creates a new io.ReadCloser with an empty bytes pipe.
 // It adds this new err pipe to the Stderr broadcaster.
 // It adds this new err pipe to the Stderr broadcaster.
-func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser {
+func (c *Config) StderrPipe() io.ReadCloser {
 	bytesPipe := ioutils.NewBytesPipe()
 	bytesPipe := ioutils.NewBytesPipe()
-	streamConfig.stderr.Add(bytesPipe)
+	c.stderr.Add(bytesPipe)
 	return bytesPipe
 	return bytesPipe
 }
 }
 
 
 // NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe.
 // NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe.
-func (streamConfig *StreamConfig) NewInputPipes() {
-	streamConfig.stdin, streamConfig.stdinPipe = io.Pipe()
+func (c *Config) NewInputPipes() {
+	c.stdin, c.stdinPipe = io.Pipe()
 }
 }
 
 
 // NewNopInputPipe creates a new input pipe that will silently drop all messages in the input.
 // NewNopInputPipe creates a new input pipe that will silently drop all messages in the input.
-func (streamConfig *StreamConfig) NewNopInputPipe() {
-	streamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard)
+func (c *Config) NewNopInputPipe() {
+	c.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard)
 }
 }
 
 
 // CloseStreams ensures that the configured streams are properly closed.
 // CloseStreams ensures that the configured streams are properly closed.
-func (streamConfig *StreamConfig) CloseStreams() error {
+func (c *Config) CloseStreams() error {
 	var errors []string
 	var errors []string
 
 
-	if streamConfig.stdin != nil {
-		if err := streamConfig.stdin.Close(); err != nil {
+	if c.stdin != nil {
+		if err := c.stdin.Close(); err != nil {
 			errors = append(errors, fmt.Sprintf("error close stdin: %s", err))
 			errors = append(errors, fmt.Sprintf("error close stdin: %s", err))
 		}
 		}
 	}
 	}
 
 
-	if err := streamConfig.stdout.Clean(); err != nil {
+	if err := c.stdout.Clean(); err != nil {
 		errors = append(errors, fmt.Sprintf("error close stdout: %s", err))
 		errors = append(errors, fmt.Sprintf("error close stdout: %s", err))
 	}
 	}
 
 
-	if err := streamConfig.stderr.Clean(); err != nil {
+	if err := c.stderr.Clean(); err != nil {
 		errors = append(errors, fmt.Sprintf("error close stderr: %s", err))
 		errors = append(errors, fmt.Sprintf("error close stderr: %s", err))
 	}
 	}
 
 
@@ -112,25 +112,25 @@ func (streamConfig *StreamConfig) CloseStreams() error {
 }
 }
 
 
 // CopyToPipe connects streamconfig with a libcontainerd.IOPipe
 // CopyToPipe connects streamconfig with a libcontainerd.IOPipe
-func (streamConfig *StreamConfig) CopyToPipe(iop libcontainerd.IOPipe) {
+func (c *Config) CopyToPipe(iop libcontainerd.IOPipe) {
 	copyFunc := func(w io.Writer, r io.Reader) {
 	copyFunc := func(w io.Writer, r io.Reader) {
-		streamConfig.Add(1)
+		c.Add(1)
 		go func() {
 		go func() {
 			if _, err := pools.Copy(w, r); err != nil {
 			if _, err := pools.Copy(w, r); err != nil {
 				logrus.Errorf("stream copy error: %+v", err)
 				logrus.Errorf("stream copy error: %+v", err)
 			}
 			}
-			streamConfig.Done()
+			c.Done()
 		}()
 		}()
 	}
 	}
 
 
 	if iop.Stdout != nil {
 	if iop.Stdout != nil {
-		copyFunc(streamConfig.Stdout(), iop.Stdout)
+		copyFunc(c.Stdout(), iop.Stdout)
 	}
 	}
 	if iop.Stderr != nil {
 	if iop.Stderr != nil {
-		copyFunc(streamConfig.Stderr(), iop.Stderr)
+		copyFunc(c.Stderr(), iop.Stderr)
 	}
 	}
 
 
-	if stdin := streamConfig.Stdin(); stdin != nil {
+	if stdin := c.Stdin(); stdin != nil {
 		if iop.Stdin != nil {
 		if iop.Stdin != nil {
 			go func() {
 			go func() {
 				pools.Copy(iop.Stdin, stdin)
 				pools.Copy(iop.Stdin, stdin)

+ 19 - 2
contrib/completion/bash/docker

@@ -1121,7 +1121,7 @@ _docker_container_ls() {
 
 
 	case "$prev" in
 	case "$prev" in
 		--filter|-f)
 		--filter|-f)
-			COMPREPLY=( $( compgen -S = -W "ancestor before exited health id label name network since status volume" -- "$cur" ) )
+			COMPREPLY=( $( compgen -S = -W "ancestor before exited health id is-task label name network since status volume" -- "$cur" ) )
 			__docker_nospace
 			__docker_nospace
 			return
 			return
 			;;
 			;;
@@ -1901,6 +1901,7 @@ _docker_image() {
 _docker_image_build() {
 _docker_image_build() {
 	local options_with_args="
 	local options_with_args="
 		--build-arg
 		--build-arg
+		--cache-from
 		--cgroup-parent
 		--cgroup-parent
 		--cpuset-cpus
 		--cpuset-cpus
 		--cpuset-mems
 		--cpuset-mems
@@ -1936,6 +1937,10 @@ _docker_image_build() {
 			__docker_nospace
 			__docker_nospace
 			return
 			return
 			;;
 			;;
+		--cache-from)
+			__docker_complete_image_repos_and_tags
+			return
+			;;
 		--file|-f)
 		--file|-f)
 			_filedir
 			_filedir
 			return
 			return
@@ -2582,7 +2587,7 @@ _docker_service_ps() {
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--filter -f --help --no-resolve --no-trunc --quiet -q" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--all -a --filter -f --help --no-resolve --no-trunc --quiet -q" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			local counter=$(__docker_pos_first_nonflag '--filter|-f')
 			local counter=$(__docker_pos_first_nonflag '--filter|-f')
@@ -2643,11 +2648,15 @@ _docker_service_update() {
 	if [ "$subcommand" = "create" ] ; then
 	if [ "$subcommand" = "create" ] ; then
 		options_with_args="$options_with_args
 		options_with_args="$options_with_args
 			--container-label
 			--container-label
+			--dns
+			--dns-option
+			--dns-search
 			--env-file
 			--env-file
 			--group
 			--group
 			--hostname
 			--hostname
 			--mode
 			--mode
 			--name
 			--name
+			--port
 		"
 		"
 
 
 		case "$prev" in
 		case "$prev" in
@@ -2670,9 +2679,17 @@ _docker_service_update() {
 			--arg
 			--arg
 			--container-label-add
 			--container-label-add
 			--container-label-rm
 			--container-label-rm
+			--dns-add
+			--dns-option-add
+			--dns-option-rm
+			--dns-rm
+			--dns-search-add
+			--dns-search-rm
 			--group-add
 			--group-add
 			--group-rm
 			--group-rm
 			--image
 			--image
+			--port-add
+			--port-rm
 		"
 		"
 
 
 		case "$prev" in
 		case "$prev" in

+ 134 - 1
contrib/completion/zsh/_docker

@@ -900,7 +900,8 @@ __docker_image_subcommand() {
         (build)
         (build)
             _arguments $(__docker_arguments) \
             _arguments $(__docker_arguments) \
                 $opts_help \
                 $opts_help \
-                "($help)*--build-arg[Build-time variables]:<varname>=<value>: " \
+                "($help)*--build-arg=[Build-time variables]:<varname>=<value>: " \
+                "($help)*--cache-from=[Images to consider as cache sources]: :__docker_complete_repositories_with_tags" \
                 "($help -c --cpu-shares)"{-c=,--cpu-shares=}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" \
                 "($help -c --cpu-shares)"{-c=,--cpu-shares=}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" \
                 "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " \
                 "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " \
                 "($help)--compress[Compress the build context using gzip]" \
                 "($help)--compress[Compress the build context using gzip]" \
@@ -917,6 +918,7 @@ __docker_image_subcommand() {
                 "($help)*--label=[Set metadata for an image]:label=value: " \
                 "($help)*--label=[Set metadata for an image]:label=value: " \
                 "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " \
                 "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " \
                 "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " \
                 "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " \
+                "($help)--network=[Connect a container to a network]:network mode:(bridge none container host)"
                 "($help)--no-cache[Do not use cache when building the image]" \
                 "($help)--no-cache[Do not use cache when building the image]" \
                 "($help)--pull[Attempt to pull a newer version of the image]" \
                 "($help)--pull[Attempt to pull a newer version of the image]" \
                 "($help -q --quiet)"{-q,--quiet}"[Suppress verbose build output]" \
                 "($help -q --quiet)"{-q,--quiet}"[Suppress verbose build output]" \
@@ -1514,6 +1516,107 @@ __docker_plugin_subcommand() {
 
 
 # EO plugin
 # EO plugin
 
 
+# BO secret
+
+__docker_secrets() {
+    [[ $PREFIX = -* ]] && return 1
+    integer ret=1
+    local line s
+    declare -a lines secrets
+
+    type=$1; shift
+
+    lines=(${(f)${:-"$(_call_program commands docker $docker_options secret ls)"$'\n'}})
+
+    # Parse header line to find columns
+    local i=1 j=1 k header=${lines[1]}
+    declare -A begin end
+    while (( j < ${#header} - 1 )); do
+        i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 ))
+        j=$(( i + ${${header[$i,-1]}[(i)  ]} - 1 ))
+        k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 ))
+        begin[${header[$i,$((j-1))]}]=$i
+        end[${header[$i,$((j-1))]}]=$k
+    done
+    end[${header[$i,$((j-1))]}]=-1
+    lines=(${lines[2,-1]})
+
+    # ID
+    if [[ $type = (ids|all) ]]; then
+        for line in $lines; do
+            s="${line[${begin[ID]},${end[ID]}]%% ##}"
+            secrets=($secrets $s)
+        done
+    fi
+
+    # Names
+    if [[ $type = (names|all) ]]; then
+        for line in $lines; do
+            s="${line[${begin[NAME]},${end[NAME]}]%% ##}"
+            secrets=($secrets $s)
+        done
+    fi
+
+    _describe -t secrets-list "secrets" secrets "$@" && ret=0
+    return ret
+}
+
+__docker_complete_secrets() {
+    [[ $PREFIX = -* ]] && return 1
+    __docker_secrets all "$@"
+}
+
+__docker_secret_commands() {
+    local -a _docker_secret_subcommands
+    _docker_secret_subcommands=(
+        "create:Create a secret using stdin as content"
+        "inspect:Display detailed information on one or more secrets"
+        "ls:List secrets"
+        "rm:Remove one or more secrets"
+    )
+    _describe -t docker-secret-commands "docker secret command" _docker_secret_subcommands
+}
+
+__docker_secret_subcommand() {
+    local -a _command_args opts_help
+    local expl help="--help"
+    integer ret=1
+
+    opts_help=("(: -)--help[Print usage]")
+
+    case "$words[1]" in
+        (create)
+            _arguments $(__docker_arguments) \
+                $opts_help \
+                "($help)*"{-l=,--label=}"[Secret labels]:label: " \
+                "($help -):secret: " && ret=0
+            ;;
+        (inspect)
+            _arguments $(__docker_arguments) \
+                $opts_help \
+                "($help -f --format)"{-f=,--format=}"[Format the output using the given Go template]:template: " \
+                "($help -)*:secret:__docker_complete_secrets" && ret=0
+            ;;
+        (ls|list)
+            _arguments $(__docker_arguments) \
+                $opts_help \
+                "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0
+            ;;
+        (rm|remove)
+            _arguments $(__docker_arguments) \
+                $opts_help \
+                "($help -)*:secret:__docker_complete_secrets" && ret=0
+            ;;
+        (help)
+            _arguments $(__docker_arguments) ":subcommand:__docker_secret_commands" && ret=0
+            ;;
+    esac
+
+    return ret
+}
+
+# EO secret
+
 # BO service
 # BO service
 
 
 __docker_service_complete_ls_filters() {
 __docker_service_complete_ls_filters() {
@@ -1666,6 +1769,7 @@ __docker_service_subcommand() {
         "($help)--restart-delay=[Delay between restart attempts]:delay: "
         "($help)--restart-delay=[Delay between restart attempts]:delay: "
         "($help)--restart-max-attempts=[Maximum number of restarts before giving up]:max-attempts: "
         "($help)--restart-max-attempts=[Maximum number of restarts before giving up]:max-attempts: "
         "($help)--restart-window=[Window used to evaluate the restart policy]:window: "
         "($help)--restart-window=[Window used to evaluate the restart policy]:window: "
+        "($help)*--secret=[Specify secrets to expose to the service]:secret:__docker_complete_secrets"
         "($help)--stop-grace-period=[Time to wait before force killing a container]:grace period: "
         "($help)--stop-grace-period=[Time to wait before force killing a container]:grace period: "
         "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-TTY]"
         "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-TTY]"
         "($help)--update-delay=[Delay between updates]:delay: "
         "($help)--update-delay=[Delay between updates]:delay: "
@@ -1684,10 +1788,14 @@ __docker_service_subcommand() {
                 $opts_help \
                 $opts_help \
                 $opts_create_update \
                 $opts_create_update \
                 "($help)*--container-label=[Container labels]:label: " \
                 "($help)*--container-label=[Container labels]:label: " \
+                "($help)*--dns=[Set custom DNS servers]:DNS: " \
+                "($help)*--dns-option=[Set DNS options]:DNS option: " \
+                "($help)*--dns-search=[Set custom DNS search domains]:DNS search: " \
                 "($help)*--env-file=[Read environment variables from a file]:environment file:_files" \
                 "($help)*--env-file=[Read environment variables from a file]:environment file:_files" \
                 "($help)--hostname=[Service containers hostname]:hostname: " \
                 "($help)--hostname=[Service containers hostname]:hostname: " \
                 "($help)--mode=[Service Mode]:mode:(global replicated)" \
                 "($help)--mode=[Service Mode]:mode:(global replicated)" \
                 "($help)--name=[Service name]:name: " \
                 "($help)--name=[Service name]:name: " \
+                "($help)*--port=[Publish a port]:port: " \
                 "($help -): :__docker_complete_images" \
                 "($help -): :__docker_complete_images" \
                 "($help -):command: _command_names -e" \
                 "($help -):command: _command_names -e" \
                 "($help -)*::arguments: _normal" && ret=0
                 "($help -)*::arguments: _normal" && ret=0
@@ -1750,10 +1858,18 @@ __docker_service_subcommand() {
                 "($help)--arg=[Service command args]:arguments: _normal" \
                 "($help)--arg=[Service command args]:arguments: _normal" \
                 "($help)*--container-label-add=[Add or update container labels]:label: " \
                 "($help)*--container-label-add=[Add or update container labels]:label: " \
                 "($help)*--container-label-rm=[Remove a container label by its key]:label: " \
                 "($help)*--container-label-rm=[Remove a container label by its key]:label: " \
+                "($help)*--dns-add=[Add or update custom DNS servers]:DNS: " \
+                "($help)*--dns-rm=[Remove custom DNS servers]:DNS: " \
+                "($help)*--dns-option-add=[Add or update DNS options]:DNS option: " \
+                "($help)*--dns-option-rm=[Remove DNS options]:DNS option: " \
+                "($help)*--dns-search-add=[Add or update custom DNS search domains]:DNS search: " \
+                "($help)*--dns-search-rm=[Remove DNS search domains]:DNS search: " \
                 "($help)--force[Force update]" \
                 "($help)--force[Force update]" \
                 "($help)*--group-add=[Add additional supplementary user groups to the container]:group:_groups" \
                 "($help)*--group-add=[Add additional supplementary user groups to the container]:group:_groups" \
                 "($help)*--group-rm=[Remove previously added supplementary user groups from the container]:group:_groups" \
                 "($help)*--group-rm=[Remove previously added supplementary user groups from the container]:group:_groups" \
                 "($help)--image=[Service image tag]:image:__docker_complete_repositories" \
                 "($help)--image=[Service image tag]:image:__docker_complete_repositories" \
+                "($help)*--port-add=[Add or update a port]:port: " \
+                "($help)*--port-rm=[Remove a port(target-port mandatory)]:port: " \
                 "($help)--rollback[Rollback to previous specification]" \
                 "($help)--rollback[Rollback to previous specification]" \
                 "($help -)1:service:__docker_complete_services" && ret=0
                 "($help -)1:service:__docker_complete_services" && ret=0
             ;;
             ;;
@@ -2298,6 +2414,23 @@ __docker_subcommand() {
                     ;;
                     ;;
             esac
             esac
             ;;
             ;;
+        (secret)
+            local curcontext="$curcontext" state
+            _arguments $(__docker_arguments) \
+                $opts_help \
+                "($help -): :->command" \
+                "($help -)*:: :->option-or-argument" && ret=0
+
+            case $state in
+                (command)
+                    __docker_secret_commands && ret=0
+                    ;;
+                (option-or-argument)
+                    curcontext=${curcontext%:*:*}:docker-${words[-1]}:
+                    __docker_secret_subcommand && ret=0
+                    ;;
+            esac
+            ;;
         (service)
         (service)
             local curcontext="$curcontext" state
             local curcontext="$curcontext" state
             _arguments $(__docker_arguments) \
             _arguments $(__docker_arguments) \

+ 7 - 0
daemon/cluster/secrets.go

@@ -9,6 +9,13 @@ import (
 
 
 // GetSecret returns a secret from a managed swarm cluster
 // GetSecret returns a secret from a managed swarm cluster
 func (c *Cluster) GetSecret(id string) (types.Secret, error) {
 func (c *Cluster) GetSecret(id string) (types.Secret, error) {
+	c.RLock()
+	defer c.RUnlock()
+
+	if !c.isActiveManager() {
+		return types.Secret{}, c.errNoManager()
+	}
+
 	ctx, cancel := c.getRequestContext()
 	ctx, cancel := c.getRequestContext()
 	defer cancel()
 	defer cancel()
 
 

+ 2 - 2
daemon/container.go

@@ -91,9 +91,9 @@ func (daemon *Daemon) load(id string) (*container.Container, error) {
 func (daemon *Daemon) Register(c *container.Container) error {
 func (daemon *Daemon) Register(c *container.Container) error {
 	// Attach to stdout and stderr
 	// Attach to stdout and stderr
 	if c.Config.OpenStdin {
 	if c.Config.OpenStdin {
-		c.NewInputPipes()
+		c.StreamConfig.NewInputPipes()
 	} else {
 	} else {
-		c.NewNopInputPipe()
+		c.StreamConfig.NewNopInputPipe()
 	}
 	}
 
 
 	daemon.containers.Add(c.ID, c)
 	daemon.containers.Add(c.ID, c)

+ 2 - 2
daemon/exec.go

@@ -195,9 +195,9 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
 	}
 	}
 
 
 	if ec.OpenStdin {
 	if ec.OpenStdin {
-		ec.NewInputPipes()
+		ec.StreamConfig.NewInputPipes()
 	} else {
 	} else {
-		ec.NewNopInputPipe()
+		ec.StreamConfig.NewNopInputPipe()
 	}
 	}
 
 
 	p := libcontainerd.Process{
 	p := libcontainerd.Process{

+ 25 - 20
daemon/exec/exec.go

@@ -5,9 +5,9 @@ import (
 	"sync"
 	"sync"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/container/stream"
 	"github.com/docker/docker/libcontainerd"
 	"github.com/docker/docker/libcontainerd"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/stringid"
-	"github.com/docker/docker/runconfig"
 )
 )
 
 
 // Config holds the configurations for execs. The Daemon keeps
 // Config holds the configurations for execs. The Daemon keeps
@@ -15,30 +15,30 @@ import (
 // examined both during and after completion.
 // examined both during and after completion.
 type Config struct {
 type Config struct {
 	sync.Mutex
 	sync.Mutex
-	*runconfig.StreamConfig
-	ID          string
-	Running     bool
-	ExitCode    *int
-	OpenStdin   bool
-	OpenStderr  bool
-	OpenStdout  bool
-	CanRemove   bool
-	ContainerID string
-	DetachKeys  []byte
-	Entrypoint  string
-	Args        []string
-	Tty         bool
-	Privileged  bool
-	User        string
-	Env         []string
-	Pid         int
+	StreamConfig *stream.Config
+	ID           string
+	Running      bool
+	ExitCode     *int
+	OpenStdin    bool
+	OpenStderr   bool
+	OpenStdout   bool
+	CanRemove    bool
+	ContainerID  string
+	DetachKeys   []byte
+	Entrypoint   string
+	Args         []string
+	Tty          bool
+	Privileged   bool
+	User         string
+	Env          []string
+	Pid          int
 }
 }
 
 
 // NewConfig initializes the a new exec configuration
 // NewConfig initializes the a new exec configuration
 func NewConfig() *Config {
 func NewConfig() *Config {
 	return &Config{
 	return &Config{
 		ID:           stringid.GenerateNonCryptoID(),
 		ID:           stringid.GenerateNonCryptoID(),
-		StreamConfig: runconfig.NewStreamConfig(),
+		StreamConfig: stream.NewConfig(),
 	}
 	}
 }
 }
 
 
@@ -46,7 +46,7 @@ func NewConfig() *Config {
 func (c *Config) InitializeStdio(iop libcontainerd.IOPipe) error {
 func (c *Config) InitializeStdio(iop libcontainerd.IOPipe) error {
 	c.StreamConfig.CopyToPipe(iop)
 	c.StreamConfig.CopyToPipe(iop)
 
 
-	if c.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" {
+	if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" {
 		if iop.Stdin != nil {
 		if iop.Stdin != nil {
 			if err := iop.Stdin.Close(); err != nil {
 			if err := iop.Stdin.Close(); err != nil {
 				logrus.Errorf("error closing exec stdin: %+v", err)
 				logrus.Errorf("error closing exec stdin: %+v", err)
@@ -57,6 +57,11 @@ func (c *Config) InitializeStdio(iop libcontainerd.IOPipe) error {
 	return nil
 	return nil
 }
 }
 
 
+// CloseStreams closes the stdio streams for the exec
+func (c *Config) CloseStreams() error {
+	return c.StreamConfig.CloseStreams()
+}
+
 // Store keeps track of the exec configurations.
 // Store keeps track of the exec configurations.
 type Store struct {
 type Store struct {
 	commands map[string]*Config
 	commands map[string]*Config

+ 2 - 60
daemon/info.go

@@ -1,11 +1,8 @@
 package daemon
 package daemon
 
 
 import (
 import (
-	"context"
 	"os"
 	"os"
-	"os/exec"
 	"runtime"
 	"runtime"
-	"strings"
 	"sync/atomic"
 	"sync/atomic"
 	"time"
 	"time"
 
 
@@ -135,63 +132,8 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
 		Isolation:          daemon.defaultIsolation,
 		Isolation:          daemon.defaultIsolation,
 	}
 	}
 
 
-	// TODO Windows. Refactor this more once sysinfo is refactored into
-	// platform specific code. On Windows, sysinfo.cgroupMemInfo and
-	// sysinfo.cgroupCpuInfo will be nil otherwise and cause a SIGSEGV if
-	// an attempt is made to access through them.
-	if runtime.GOOS != "windows" {
-		v.MemoryLimit = sysInfo.MemoryLimit
-		v.SwapLimit = sysInfo.SwapLimit
-		v.KernelMemory = sysInfo.KernelMemory
-		v.OomKillDisable = sysInfo.OomKillDisable
-		v.CPUCfsPeriod = sysInfo.CPUCfsPeriod
-		v.CPUCfsQuota = sysInfo.CPUCfsQuota
-		v.CPUShares = sysInfo.CPUShares
-		v.CPUSet = sysInfo.Cpuset
-		v.Runtimes = daemon.configStore.GetAllRuntimes()
-		v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName()
-		v.InitBinary = daemon.configStore.GetInitPath()
-
-		v.ContainerdCommit.Expected = dockerversion.ContainerdCommitID
-		if sv, err := daemon.containerd.GetServerVersion(context.Background()); err == nil {
-			v.ContainerdCommit.ID = sv.Revision
-		} else {
-			logrus.Warnf("failed to retrieve containerd version: %v", err)
-			v.ContainerdCommit.ID = "N/A"
-		}
-
-		v.RuncCommit.Expected = dockerversion.RuncCommitID
-		if rv, err := exec.Command(DefaultRuntimeBinary, "--version").Output(); err == nil {
-			parts := strings.Split(strings.TrimSpace(string(rv)), "\n")
-			if len(parts) == 3 {
-				parts = strings.Split(parts[1], ": ")
-				if len(parts) == 2 {
-					v.RuncCommit.ID = strings.TrimSpace(parts[1])
-				}
-			}
-		} else {
-			logrus.Warnf("failed to retrieve %s version: %v", DefaultRuntimeBinary, err)
-			v.RuncCommit.ID = "N/A"
-		}
-		if v.RuncCommit.ID == "" {
-			logrus.Warnf("failed to retrieve %s version: unknown output format", DefaultRuntimeBinary)
-			v.RuncCommit.ID = "N/A"
-		}
-
-		v.InitCommit.Expected = dockerversion.InitCommitID
-		if rv, err := exec.Command(DefaultInitBinary, "--version").Output(); err == nil {
-			parts := strings.Split(string(rv), " ")
-			if len(parts) == 3 {
-				v.InitCommit.ID = strings.TrimSpace(parts[2])
-			} else {
-				logrus.Warnf("failed to retrieve %s version: unknown output format", DefaultInitBinary)
-				v.InitCommit.ID = "N/A"
-			}
-		} else {
-			logrus.Warnf("failed to retrieve %s version", DefaultInitBinary)
-			v.InitCommit.ID = "N/A"
-		}
-	}
+	// Retrieve platform specific info
+	daemon.FillPlatformInfo(v, sysInfo)
 
 
 	hostname := ""
 	hostname := ""
 	if hn, err := os.Hostname(); err != nil {
 	if hn, err := os.Hostname(); err != nil {

+ 81 - 0
daemon/info_unix.go

@@ -0,0 +1,81 @@
+// +build !windows
+
+package daemon
+
+import (
+	"context"
+	"os/exec"
+	"strings"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/pkg/sysinfo"
+)
+
+func (daemon *Daemon) FillPlatformInfo(v *types.InfoBase, sysInfo *sysinfo.SysInfo) {
+	v.MemoryLimit = sysInfo.MemoryLimit
+	v.SwapLimit = sysInfo.SwapLimit
+	v.KernelMemory = sysInfo.KernelMemory
+	v.OomKillDisable = sysInfo.OomKillDisable
+	v.CPUCfsPeriod = sysInfo.CPUCfsPeriod
+	v.CPUCfsQuota = sysInfo.CPUCfsQuota
+	v.CPUShares = sysInfo.CPUShares
+	v.CPUSet = sysInfo.Cpuset
+	v.Runtimes = daemon.configStore.GetAllRuntimes()
+	v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName()
+	v.InitBinary = daemon.configStore.GetInitPath()
+
+	v.ContainerdCommit.Expected = dockerversion.ContainerdCommitID
+	if sv, err := daemon.containerd.GetServerVersion(context.Background()); err == nil {
+		v.ContainerdCommit.ID = sv.Revision
+	} else {
+		logrus.Warnf("failed to retrieve containerd version: %v", err)
+		v.ContainerdCommit.ID = "N/A"
+	}
+
+	v.RuncCommit.Expected = dockerversion.RuncCommitID
+	if rv, err := exec.Command(DefaultRuntimeBinary, "--version").Output(); err == nil {
+		parts := strings.Split(strings.TrimSpace(string(rv)), "\n")
+		if len(parts) == 3 {
+			parts = strings.Split(parts[1], ": ")
+			if len(parts) == 2 {
+				v.RuncCommit.ID = strings.TrimSpace(parts[1])
+			}
+		}
+
+		if v.RuncCommit.ID == "" {
+			logrus.Warnf("failed to retrieve %s version: unknown output format: %s", DefaultRuntimeBinary, string(rv))
+			v.RuncCommit.ID = "N/A"
+		}
+	} else {
+		logrus.Warnf("failed to retrieve %s version: %v", DefaultRuntimeBinary, err)
+		v.RuncCommit.ID = "N/A"
+	}
+
+	v.InitCommit.Expected = dockerversion.InitCommitID
+	if rv, err := exec.Command(DefaultInitBinary, "--version").Output(); err == nil {
+		parts := strings.Split(strings.TrimSpace(string(rv)), " - ")
+		if len(parts) == 2 {
+			if dockerversion.InitCommitID[0] == 'v' {
+				vs := strings.TrimPrefix(parts[0], "tini version ")
+				v.InitCommit.ID = "v" + vs
+			} else {
+				// Get the sha1
+				gitParts := strings.Split(parts[1], ".")
+				if len(gitParts) == 2 && gitParts[0] == "git" {
+					v.InitCommit.ID = gitParts[1]
+					v.InitCommit.Expected = dockerversion.InitCommitID[0:len(gitParts[1])]
+				}
+			}
+		}
+
+		if v.InitCommit.ID == "" {
+			logrus.Warnf("failed to retrieve %s version: unknown output format: %s", DefaultInitBinary, string(rv))
+			v.InitCommit.ID = "N/A"
+		}
+	} else {
+		logrus.Warnf("failed to retrieve %s version", DefaultInitBinary)
+		v.InitCommit.ID = "N/A"
+	}
+}

+ 9 - 0
daemon/info_windows.go

@@ -0,0 +1,9 @@
+package daemon
+
+import (
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/pkg/sysinfo"
+)
+
+func (daemon *Daemon) FillPlatformInfo(v *types.InfoBase, sysInfo *sysinfo.SysInfo) {
+}

+ 2 - 2
daemon/monitor.go

@@ -39,7 +39,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
 		}
 		}
 
 
 		c.Lock()
 		c.Lock()
-		c.Wait()
+		c.StreamConfig.Wait()
 		c.Reset(false)
 		c.Reset(false)
 
 
 		restart, wait, err := c.RestartManager().ShouldRestart(e.ExitCode, false, time.Since(c.StartedAt))
 		restart, wait, err := c.RestartManager().ShouldRestart(e.ExitCode, false, time.Since(c.StartedAt))
@@ -88,7 +88,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
 			defer execConfig.Unlock()
 			defer execConfig.Unlock()
 			execConfig.ExitCode = &ec
 			execConfig.ExitCode = &ec
 			execConfig.Running = false
 			execConfig.Running = false
-			execConfig.Wait()
+			execConfig.StreamConfig.Wait()
 			if err := execConfig.CloseStreams(); err != nil {
 			if err := execConfig.CloseStreams(); err != nil {
 				logrus.Errorf("%s: %s", c.ID, err)
 				logrus.Errorf("%s: %s", c.ID, err)
 			}
 			}

+ 7 - 4
dockerversion/version_lib.go

@@ -6,8 +6,11 @@ package dockerversion
 // Default build-time variable for library-import.
 // Default build-time variable for library-import.
 // This file is overridden on build with build-time informations.
 // This file is overridden on build with build-time informations.
 const (
 const (
-	GitCommit string = "library-import"
-	Version   string = "library-import"
-	BuildTime string = "library-import"
-	IAmStatic string = "library-import"
+	GitCommit          string = "library-import"
+	Version            string = "library-import"
+	BuildTime          string = "library-import"
+	IAmStatic          string = "library-import"
+	ContainerdCommitID string = "library-import"
+	RuncCommitID       string = "library-import"
+	InitCommitID       string = "library-import"
 )
 )

+ 1 - 1
docs/deprecated.md

@@ -73,7 +73,7 @@ filesystem does not support `d_type`. For example, XFS does not support `d_type`
 if it is formatted with the `ftype=0` option.
 if it is formatted with the `ftype=0` option.
 
 
 Please also refer to [#27358](https://github.com/docker/docker/issues/27358) for
 Please also refer to [#27358](https://github.com/docker/docker/issues/27358) for
-futher information.
+further information.
 
 
 ### Three argument form in `docker import`
 ### Three argument form in `docker import`
 **Deprecated In Release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)**
 **Deprecated In Release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)**

+ 3 - 3
docs/reference/api/docker_remote_api_v1.24.md

@@ -4113,9 +4113,9 @@ an empty value or the default cluster-wide value.
 
 
 JSON Parameters:
 JSON Parameters:
 
 
-- **Annotations** – Optional medata to associate with the service.
-    - **Name** – User-defined name for the service.
-    - **Labels** – A map of labels to associate with the service (e.g.,
+- **Annotations** – Optional medata to associate with the node.
+    - **Name** – User-defined name for the node.
+    - **Labels** – A map of labels to associate with the node (e.g.,
       `{"key":"value", "key2":"value2"}`).
       `{"key":"value", "key2":"value2"}`).
 - **Role** - Role of the node (worker/manager).
 - **Role** - Role of the node (worker/manager).
 - **Availability** - Availability of the node (active/pause/drain).
 - **Availability** - Availability of the node (active/pause/drain).

+ 8 - 7
docs/reference/api/docker_remote_api_v1.25.md

@@ -1686,7 +1686,7 @@ Delete stopped containers
 
 
 **Example request, with digest information**:
 **Example request, with digest information**:
 
 
-    GET /v1.25/v1.25/images/json?digests=1 HTTP/1.1
+    GET /v1.25/images/json?digests=1 HTTP/1.1
 
 
 **Example response, with digest information**:
 **Example response, with digest information**:
 
 
@@ -1743,7 +1743,7 @@ Build an image from a Dockerfile
 
 
 **Example request**:
 **Example request**:
 
 
-    POST /v1.25/v1.25/build HTTP/1.1
+    POST /v1.25/build HTTP/1.1
 
 
     {% raw %}
     {% raw %}
     {{ TAR STREAM }}
     {{ TAR STREAM }}
@@ -4713,9 +4713,9 @@ an empty value or the default cluster-wide value.
 
 
 JSON Parameters:
 JSON Parameters:
 
 
-- **Annotations** – Optional medata to associate with the service.
-    - **Name** – User-defined name for the service.
-    - **Labels** – A map of labels to associate with the service (e.g.,
+- **Annotations** – Optional medata to associate with the node.
+    - **Name** – User-defined name for the node.
+    - **Labels** – A map of labels to associate with the node (e.g.,
       `{"key":"value", "key2":"value2"}`).
       `{"key":"value", "key2":"value2"}`).
 - **Role** - Role of the node (worker/manager).
 - **Role** - Role of the node (worker/manager).
 - **Availability** - Availability of the node (active/pause/drain).
 - **Availability** - Availability of the node (active/pause/drain).
@@ -6060,9 +6060,9 @@ Create a secret
 
 
 ### Inspect a secret
 ### Inspect a secret
 
 
-`GET /secrets/(secret id)`
+`GET /secrets/(id)`
 
 
-Get details on a secret
+Get details on the secret `id`
 
 
 **Example request**:
 **Example request**:
 
 
@@ -6088,6 +6088,7 @@ Get details on a secret
 
 
 - **200** – no error
 - **200** – no error
 - **404** – unknown secret
 - **404** – unknown secret
+- **406** – node is not part of a swarm
 - **500** – server error
 - **500** – server error
 
 
 ### Remove a secret
 ### Remove a secret

+ 4 - 2
docs/reference/commandline/deploy.md

@@ -14,7 +14,7 @@ advisory: "experimental"
      will be rejected.
      will be rejected.
 -->
 -->
 
 
-# stack deploy (experimental)
+# deploy (alias for stack deploy) (experimental)
 
 
 ```markdown
 ```markdown
 Usage:  docker deploy [OPTIONS] STACK
 Usage:  docker deploy [OPTIONS] STACK
@@ -58,5 +58,7 @@ axqh55ipl40h  vossibility-stack_vossibility-collector  replicated   1/1
 
 
 * [stack config](stack_config.md)
 * [stack config](stack_config.md)
 * [stack deploy](stack_deploy.md)
 * [stack deploy](stack_deploy.md)
+* [stack ls](stack_ls.md)
+* [stack ps](stack_ps.md)
 * [stack rm](stack_rm.md)
 * [stack rm](stack_rm.md)
-* [stack tasks](stack_tasks.md)
+* [stack services](stack_services.md)

+ 1 - 2
docs/reference/commandline/dockerd.md

@@ -62,7 +62,7 @@ Options:
       --iptables                              Enable addition of iptables rules (default true)
       --iptables                              Enable addition of iptables rules (default true)
       --ipv6                                  Enable IPv6 networking
       --ipv6                                  Enable IPv6 networking
       --label value                           Set key=value labels to the daemon (default [])
       --label value                           Set key=value labels to the daemon (default [])
-      --live-restore                          Enable live restore of docker when containers are still running
+      --live-restore                          Enable live restore of docker when containers are still running (Linux only)
       --log-driver string                     Default driver for container logs (default "json-file")
       --log-driver string                     Default driver for container logs (default "json-file")
   -l, --log-level string                      Set the logging level ("debug", "info", "warn", "error", "fatal") (default "info")
   -l, --log-level string                      Set the logging level ("debug", "info", "warn", "error", "fatal") (default "info")
       --log-opt value                         Default log driver options for containers (default map[])
       --log-opt value                         Default log driver options for containers (default map[])
@@ -1234,7 +1234,6 @@ This is a full example of the allowed configuration options on Windows:
     "storage-driver": "",
     "storage-driver": "",
     "storage-opts": [],
     "storage-opts": [],
     "labels": [],
     "labels": [],
-    "live-restore": true,
     "log-driver": "",
     "log-driver": "",
     "mtu": 0,
     "mtu": 0,
     "pidfile": "",
     "pidfile": "",

+ 3 - 2
docs/reference/commandline/plugin_create.md

@@ -49,10 +49,11 @@ The plugin can subsequently be enabled for local use or pushed to the public reg
 
 
 ## Related information
 ## Related information
 
 
-* [plugin ls](plugin_ls.md)
-* [plugin enable](plugin_enable.md)
 * [plugin disable](plugin_disable.md)
 * [plugin disable](plugin_disable.md)
+* [plugin enable](plugin_enable.md)
 * [plugin inspect](plugin_inspect.md)
 * [plugin inspect](plugin_inspect.md)
 * [plugin install](plugin_install.md)
 * [plugin install](plugin_install.md)
+* [plugin ls](plugin_ls.md)
+* [plugin push](plugin_push.md)
 * [plugin rm](plugin_rm.md)
 * [plugin rm](plugin_rm.md)
 * [plugin set](plugin_set.md)
 * [plugin set](plugin_set.md)

+ 2 - 1
docs/reference/commandline/plugin_disable.md

@@ -53,10 +53,11 @@ tiborvass/no-remove   latest              A test plugin for Docker   false
 
 
 ## Related information
 ## Related information
 
 
-* [plugin ls](plugin_ls.md)
 * [plugin create](plugin_create.md)
 * [plugin create](plugin_create.md)
 * [plugin enable](plugin_enable.md)
 * [plugin enable](plugin_enable.md)
 * [plugin inspect](plugin_inspect.md)
 * [plugin inspect](plugin_inspect.md)
 * [plugin install](plugin_install.md)
 * [plugin install](plugin_install.md)
+* [plugin ls](plugin_ls.md)
+* [plugin push](plugin_push.md)
 * [plugin rm](plugin_rm.md)
 * [plugin rm](plugin_rm.md)
 * [plugin set](plugin_set.md)
 * [plugin set](plugin_set.md)

+ 2 - 1
docs/reference/commandline/plugin_enable.md

@@ -54,9 +54,10 @@ tiborvass/no-remove   latest              A test plugin for Docker   true
 ## Related information
 ## Related information
 
 
 * [plugin create](plugin_create.md)
 * [plugin create](plugin_create.md)
-* [plugin ls](plugin_ls.md)
 * [plugin disable](plugin_disable.md)
 * [plugin disable](plugin_disable.md)
 * [plugin inspect](plugin_inspect.md)
 * [plugin inspect](plugin_inspect.md)
 * [plugin install](plugin_install.md)
 * [plugin install](plugin_install.md)
+* [plugin ls](plugin_ls.md)
+* [plugin push](plugin_push.md)
 * [plugin rm](plugin_rm.md)
 * [plugin rm](plugin_rm.md)
 * [plugin set](plugin_set.md)
 * [plugin set](plugin_set.md)

+ 2 - 1
docs/reference/commandline/plugin_inspect.md

@@ -154,9 +154,10 @@ $ docker plugin inspect -f '{{.Id}}' tiborvass/no-remove:latest
 ## Related information
 ## Related information
 
 
 * [plugin create](plugin_create.md)
 * [plugin create](plugin_create.md)
-* [plugin ls](plugin_ls.md)
 * [plugin enable](plugin_enable.md)
 * [plugin enable](plugin_enable.md)
 * [plugin disable](plugin_disable.md)
 * [plugin disable](plugin_disable.md)
 * [plugin install](plugin_install.md)
 * [plugin install](plugin_install.md)
+* [plugin ls](plugin_ls.md)
+* [plugin push](plugin_push.md)
 * [plugin rm](plugin_rm.md)
 * [plugin rm](plugin_rm.md)
 * [plugin set](plugin_set.md)
 * [plugin set](plugin_set.md)

+ 3 - 2
docs/reference/commandline/plugin_install.md

@@ -60,9 +60,10 @@ tiborvass/no-remove   latest              A test plugin for Docker   true
 ## Related information
 ## Related information
 
 
 * [plugin create](plugin_create.md)
 * [plugin create](plugin_create.md)
-* [plugin ls](plugin_ls.md)
-* [plugin enable](plugin_enable.md)
 * [plugin disable](plugin_disable.md)
 * [plugin disable](plugin_disable.md)
+* [plugin enable](plugin_enable.md)
 * [plugin inspect](plugin_inspect.md)
 * [plugin inspect](plugin_inspect.md)
+* [plugin ls](plugin_ls.md)
+* [plugin push](plugin_push.md)
 * [plugin rm](plugin_rm.md)
 * [plugin rm](plugin_rm.md)
 * [plugin set](plugin_set.md)
 * [plugin set](plugin_set.md)

+ 2 - 1
docs/reference/commandline/plugin_ls.md

@@ -43,9 +43,10 @@ tiborvass/no-remove   latest              A test plugin for Docker   true
 ## Related information
 ## Related information
 
 
 * [plugin create](plugin_create.md)
 * [plugin create](plugin_create.md)
-* [plugin enable](plugin_enable.md)
 * [plugin disable](plugin_disable.md)
 * [plugin disable](plugin_disable.md)
+* [plugin enable](plugin_enable.md)
 * [plugin inspect](plugin_inspect.md)
 * [plugin inspect](plugin_inspect.md)
 * [plugin install](plugin_install.md)
 * [plugin install](plugin_install.md)
+* [plugin push](plugin_push.md)
 * [plugin rm](plugin_rm.md)
 * [plugin rm](plugin_rm.md)
 * [plugin set](plugin_set.md)
 * [plugin set](plugin_set.md)

+ 50 - 0
docs/reference/commandline/plugin_push.md

@@ -0,0 +1,50 @@
+---
+title: "plugin push"
+description: "the plugin push command description and usage"
+keywords: "plugin, push"
+---
+
+<!-- This file is maintained within the docker/docker Github
+     repository at https://github.com/docker/docker/. Make all
+     pull requests against that repo. If you see this file in
+     another repository, consider it read-only there, as it will
+     periodically be overwritten by the definitive file. Pull
+     requests which include edits to this file in other repositories
+     will be rejected.
+-->
+
+```markdown
+Usage:  docker plugin push NAME[:TAG]
+
+Push a plugin to a registry
+
+Options:
+      --help       Print usage
+```
+
+Use `docker plugin create` to create the plugin. Once the plugin is ready for distribution,
+use `docker plugin push` to share your images to the Docker Hub registry or to a self-hosted one.
+
+Registry credentials are managed by [docker login](login.md).
+
+The following example shows how to push a sample `user/plugin`.
+
+```bash
+
+$ docker plugin ls
+NAME                  	TAG                 DESCRIPTION                  ENABLED
+user/plugin             latest              A sample plugin for Docker   false
+
+$ docker plugin push user/plugin
+```
+
+## Related information
+
+* [plugin create](plugin_create.md)
+* [plugin disable](plugin_disable.md)
+* [plugin enable](plugin_enable.md)
+* [plugin inspect](plugin_inspect.md)
+* [plugin install](plugin_install.md)
+* [plugin ls](plugin_ls.md)
+* [plugin rm](plugin_rm.md)
+* [plugin set](plugin_set.md)

+ 3 - 2
docs/reference/commandline/plugin_rm.md

@@ -46,9 +46,10 @@ tiborvass/no-remove
 ## Related information
 ## Related information
 
 
 * [plugin create](plugin_create.md)
 * [plugin create](plugin_create.md)
-* [plugin ls](plugin_ls.md)
-* [plugin enable](plugin_enable.md)
 * [plugin disable](plugin_disable.md)
 * [plugin disable](plugin_disable.md)
+* [plugin enable](plugin_enable.md)
 * [plugin inspect](plugin_inspect.md)
 * [plugin inspect](plugin_inspect.md)
 * [plugin install](plugin_install.md)
 * [plugin install](plugin_install.md)
+* [plugin ls](plugin_ls.md)
+* [plugin push](plugin_push.md)
 * [plugin set](plugin_set.md)
 * [plugin set](plugin_set.md)

+ 3 - 2
docs/reference/commandline/plugin_set.md

@@ -90,9 +90,10 @@ $ docker plugin inspect -f '{{.Settings.Args}}' myplugin
 ## Related information
 ## Related information
 
 
 * [plugin create](plugin_create.md)
 * [plugin create](plugin_create.md)
-* [plugin ls](plugin_ls.md)
-* [plugin enable](plugin_enable.md)
 * [plugin disable](plugin_disable.md)
 * [plugin disable](plugin_disable.md)
+* [plugin enable](plugin_enable.md)
 * [plugin inspect](plugin_inspect.md)
 * [plugin inspect](plugin_inspect.md)
 * [plugin install](plugin_install.md)
 * [plugin install](plugin_install.md)
+* [plugin ls](plugin_ls.md)
+* [plugin push](plugin_push.md)
 * [plugin rm](plugin_rm.md)
 * [plugin rm](plugin_rm.md)

+ 3 - 2
docs/reference/commandline/stack_deploy.md

@@ -60,7 +60,8 @@ axqh55ipl40h  vossibility-stack_vossibility-collector  replicated  1/1       ice
 ## Related information
 ## Related information
 
 
 * [stack config](stack_config.md)
 * [stack config](stack_config.md)
+* [stack ls](stack_ls.md)
+* [stack ps](stack_ps.md)
 * [stack rm](stack_rm.md)
 * [stack rm](stack_rm.md)
 * [stack services](stack_services.md)
 * [stack services](stack_services.md)
-* [stack ps](stack_ps.md)
-* [stack ls](stack_ls.md)
+* [deploy](deploy.md)

+ 2 - 2
docs/reference/glossary.md

@@ -212,7 +212,7 @@ environment.
 
 
 ## service discovery
 ## service discovery
 
 
-Swarm mode [service discovery](https://docs.docker.com/engine/swarm/networking/) is a DNS component
+Swarm mode [service discovery](https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery) is a DNS component
 internal to the swarm that automatically assigns each service on an overlay
 internal to the swarm that automatically assigns each service on an overlay
 network in the swarm a VIP and DNS entry. Containers on the network share DNS
 network in the swarm a VIP and DNS entry. Containers on the network share DNS
 mappings for the service via gossip so any container on the network can access
 mappings for the service via gossip so any container on the network can access
@@ -226,7 +226,7 @@ automatically distributes requests to the service VIP among the active tasks.
 
 
 A [swarm](https://docs.docker.com/engine/swarm/) is a cluster of one or more Docker Engines running in [swarm mode](#swarm-mode).
 A [swarm](https://docs.docker.com/engine/swarm/) is a cluster of one or more Docker Engines running in [swarm mode](#swarm-mode).
 
 
-## Swarm
+## Docker Swarm
 
 
 Do not confuse [Docker Swarm](https://github.com/docker/swarm) with the [swarm mode](#swarm-mode) features in Docker Engine.
 Do not confuse [Docker Swarm](https://github.com/docker/swarm) with the [swarm mode](#swarm-mode) features in Docker Engine.
 
 

+ 0 - 4
docs/reference/run.md

@@ -1238,10 +1238,6 @@ since Docker 1.12. In Docker 1.10 and 1.11 this did not happen and it may be nec
 to use a custom seccomp profile or use `--security-opt seccomp=unconfined` when adding
 to use a custom seccomp profile or use `--security-opt seccomp=unconfined` when adding
 capabilities.
 capabilities.
 
 
-It is only possible to grant capabilities to a container running as a user other than `root`
-on a system with a Linux kernel version of 4.3 or later, as this requires "ambient capabilities"
-to be granted. These will be added if the kernel allows it from Docker version 1.13.
-
 ## Logging drivers (--log-driver)
 ## Logging drivers (--log-driver)
 
 
 The container can have a different logging driver than the Docker daemon. Use
 The container can have a different logging driver than the Docker daemon. Use

+ 14 - 0
hack/make/.go-autogen

@@ -17,6 +17,20 @@ const (
 	Version            string = "$VERSION"
 	Version            string = "$VERSION"
 	BuildTime          string = "$BUILDTIME"
 	BuildTime          string = "$BUILDTIME"
 	IAmStatic          string = "${IAMSTATIC:-true}"
 	IAmStatic          string = "${IAMSTATIC:-true}"
+)
+
+// AUTOGENERATED FILE; see /go/src/github.com/docker/docker/hack/make/.go-autogen
+DVEOF
+
+cat > dockerversion/version_autogen_unix.go <<DVEOF
+// +build autogen,!windows
+
+// Package dockerversion is auto-generated at build-time
+package dockerversion
+
+// Default build-time variable for library-import.
+// This file is overridden on build with build-time informations.
+const (
 	ContainerdCommitID string = "${CONTAINERD_COMMIT}"
 	ContainerdCommitID string = "${CONTAINERD_COMMIT}"
 	RuncCommitID       string = "${RUNC_COMMIT}"
 	RuncCommitID       string = "${RUNC_COMMIT}"
 	InitCommitID       string = "${TINI_COMMIT}"
 	InitCommitID       string = "${TINI_COMMIT}"

+ 2 - 0
integration-cli/docker_cli_authz_plugin_v2_test.go

@@ -41,6 +41,7 @@ func (s *DockerAuthzV2Suite) TearDownTest(c *check.C) {
 }
 }
 
 
 func (s *DockerAuthzV2Suite) TestAuthZPluginAllowNonVolumeRequest(c *check.C) {
 func (s *DockerAuthzV2Suite) TestAuthZPluginAllowNonVolumeRequest(c *check.C) {
+	testRequires(c, IsAmd64)
 	// Install authz plugin
 	// Install authz plugin
 	_, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag)
 	_, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag)
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
@@ -70,6 +71,7 @@ func (s *DockerAuthzV2Suite) TestAuthZPluginAllowNonVolumeRequest(c *check.C) {
 }
 }
 
 
 func (s *DockerAuthzV2Suite) TestAuthZPluginRejectVolumeRequests(c *check.C) {
 func (s *DockerAuthzV2Suite) TestAuthZPluginRejectVolumeRequests(c *check.C) {
+	testRequires(c, IsAmd64)
 	// Install authz plugin
 	// Install authz plugin
 	_, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag)
 	_, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag)
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)

+ 3 - 3
integration-cli/docker_cli_daemon_plugins_test.go

@@ -81,7 +81,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithPluginDisabled(c *check.C) {
 // TestDaemonKillLiveRestoreWithPlugins SIGKILLs daemon started with --live-restore.
 // TestDaemonKillLiveRestoreWithPlugins SIGKILLs daemon started with --live-restore.
 // Plugins should continue to run.
 // Plugins should continue to run.
 func (s *DockerDaemonSuite) TestDaemonKillLiveRestoreWithPlugins(c *check.C) {
 func (s *DockerDaemonSuite) TestDaemonKillLiveRestoreWithPlugins(c *check.C) {
-	testRequires(c, Network)
+	testRequires(c, Network, IsAmd64)
 
 
 	if err := s.d.Start("--live-restore"); err != nil {
 	if err := s.d.Start("--live-restore"); err != nil {
 		c.Fatalf("Could not start daemon: %v", err)
 		c.Fatalf("Could not start daemon: %v", err)
@@ -114,7 +114,7 @@ func (s *DockerDaemonSuite) TestDaemonKillLiveRestoreWithPlugins(c *check.C) {
 // TestDaemonShutdownLiveRestoreWithPlugins SIGTERMs daemon started with --live-restore.
 // TestDaemonShutdownLiveRestoreWithPlugins SIGTERMs daemon started with --live-restore.
 // Plugins should continue to run.
 // Plugins should continue to run.
 func (s *DockerDaemonSuite) TestDaemonShutdownLiveRestoreWithPlugins(c *check.C) {
 func (s *DockerDaemonSuite) TestDaemonShutdownLiveRestoreWithPlugins(c *check.C) {
-	testRequires(c, Network)
+	testRequires(c, Network, IsAmd64)
 
 
 	if err := s.d.Start("--live-restore"); err != nil {
 	if err := s.d.Start("--live-restore"); err != nil {
 		c.Fatalf("Could not start daemon: %v", err)
 		c.Fatalf("Could not start daemon: %v", err)
@@ -185,7 +185,7 @@ func (s *DockerDaemonSuite) TestDaemonShutdownWithPlugins(c *check.C) {
 
 
 // TestVolumePlugin tests volume creation using a plugin.
 // TestVolumePlugin tests volume creation using a plugin.
 func (s *DockerDaemonSuite) TestVolumePlugin(c *check.C) {
 func (s *DockerDaemonSuite) TestVolumePlugin(c *check.C) {
-	testRequires(c, Network)
+	testRequires(c, Network, IsAmd64)
 
 
 	volName := "plugin-volume"
 	volName := "plugin-volume"
 	volRoot := "/data"
 	volRoot := "/data"

+ 11 - 5
integration-cli/docker_cli_logs_test.go

@@ -117,22 +117,28 @@ func (s *DockerSuite) TestLogsTail(c *check.C) {
 	id := strings.TrimSpace(out)
 	id := strings.TrimSpace(out)
 	dockerCmd(c, "wait", id)
 	dockerCmd(c, "wait", id)
 
 
-	out, _ = dockerCmd(c, "logs", "--tail", "5", id)
-
+	out, _ = dockerCmd(c, "logs", "--tail", "0", id)
 	lines := strings.Split(out, "\n")
 	lines := strings.Split(out, "\n")
+	c.Assert(lines, checker.HasLen, 1)
 
 
+	out, _ = dockerCmd(c, "logs", "--tail", "5", id)
+	lines = strings.Split(out, "\n")
 	c.Assert(lines, checker.HasLen, 6)
 	c.Assert(lines, checker.HasLen, 6)
 
 
-	out, _ = dockerCmd(c, "logs", "--tail", "all", id)
+	out, _ = dockerCmd(c, "logs", "--tail", "99", id)
+	lines = strings.Split(out, "\n")
+	c.Assert(lines, checker.HasLen, 100)
 
 
+	out, _ = dockerCmd(c, "logs", "--tail", "all", id)
 	lines = strings.Split(out, "\n")
 	lines = strings.Split(out, "\n")
+	c.Assert(lines, checker.HasLen, testLen+1)
 
 
+	out, _ = dockerCmd(c, "logs", "--tail", "-1", id)
+	lines = strings.Split(out, "\n")
 	c.Assert(lines, checker.HasLen, testLen+1)
 	c.Assert(lines, checker.HasLen, testLen+1)
 
 
 	out, _, _ = dockerCmdWithStdoutStderr(c, "logs", "--tail", "random", id)
 	out, _, _ = dockerCmdWithStdoutStderr(c, "logs", "--tail", "random", id)
-
 	lines = strings.Split(out, "\n")
 	lines = strings.Split(out, "\n")
-
 	c.Assert(lines, checker.HasLen, testLen+1)
 	c.Assert(lines, checker.HasLen, testLen+1)
 }
 }
 
 

+ 1 - 1
integration-cli/docker_cli_network_unix_test.go

@@ -769,7 +769,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkDriverOptions(c *check.C) {
 }
 }
 
 
 func (s *DockerNetworkSuite) TestDockerPluginV2NetworkDriver(c *check.C) {
 func (s *DockerNetworkSuite) TestDockerPluginV2NetworkDriver(c *check.C) {
-	testRequires(c, DaemonIsLinux, Network)
+	testRequires(c, DaemonIsLinux, Network, IsAmd64)
 
 
 	var (
 	var (
 		npName        = "mavenugo/test-docker-netplugin"
 		npName        = "mavenugo/test-docker-netplugin"

+ 1 - 1
integration-cli/docker_cli_plugins_test.go

@@ -60,7 +60,7 @@ func (s *DockerSuite) TestPluginForceRemove(c *check.C) {
 }
 }
 
 
 func (s *DockerSuite) TestPluginActive(c *check.C) {
 func (s *DockerSuite) TestPluginActive(c *check.C) {
-	testRequires(c, DaemonIsLinux, Network)
+	testRequires(c, DaemonIsLinux, Network, IsAmd64)
 	out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag)
 	out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag)
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
 
 

+ 146 - 112
man/dockerd.8.md

@@ -71,13 +71,13 @@ dockerd - Enable daemon mode
 [**--userns-remap**[=*default*]]
 [**--userns-remap**[=*default*]]
 
 
 # DESCRIPTION
 # DESCRIPTION
-**dockerd** is used for starting the Docker daemon(i.e., to command the daemon to manage images,
-containers etc.) So **dockerd** is a server, as a daemon.
+**dockerd** is used for starting the Docker daemon (i.e., to command the daemon
+to manage images, containers etc).  So **dockerd** is a server, as a daemon.
 
 
 To run the Docker daemon you can specify **dockerd**.
 To run the Docker daemon you can specify **dockerd**.
 You can check the daemon options using **dockerd --help**.
 You can check the daemon options using **dockerd --help**.
-Daemon options should be specified after the **dockerd** keyword in the following
-format.
+Daemon options should be specified after the **dockerd** keyword in the
+following format.
 
 
 **dockerd [OPTIONS]**
 **dockerd [OPTIONS]**
 
 
@@ -87,27 +87,31 @@ format.
   Set additional OCI compatible runtime.
   Set additional OCI compatible runtime.
 
 
 **--api-cors-header**=""
 **--api-cors-header**=""
-  Set CORS headers in the remote API. Default is cors disabled. Give urls like "http://foo, http://bar, ...". Give "*" to allow all.
+  Set CORS headers in the remote API. Default is cors disabled. Give urls like
+  "http://foo, http://bar, ...". Give "*" to allow all.
 
 
 **--authorization-plugin**=""
 **--authorization-plugin**=""
   Set authorization plugins to load
   Set authorization plugins to load
 
 
 **-b**, **--bridge**=""
 **-b**, **--bridge**=""
-  Attach containers to a pre\-existing network bridge; use 'none' to disable container networking
+  Attach containers to a pre\-existing network bridge; use 'none' to disable
+  container networking
 
 
 **--bip**=""
 **--bip**=""
-  Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of \-b
+  Use the provided CIDR notation address for the dynamically created bridge
+  (docker0); Mutually exclusive of \-b
 
 
 **--cgroup-parent**=""
 **--cgroup-parent**=""
-  Set parent cgroup for all containers. Default is "/docker" for fs cgroup driver and "system.slice" for systemd cgroup driver.
+  Set parent cgroup for all containers. Default is "/docker" for fs cgroup
+  driver and "system.slice" for systemd cgroup driver.
 
 
 **--cluster-store**=""
 **--cluster-store**=""
   URL of the distributed storage backend
   URL of the distributed storage backend
 
 
 **--cluster-advertise**=""
 **--cluster-advertise**=""
-  Specifies the 'host:port' or `interface:port` combination that this particular
-  daemon instance should use when advertising itself to the cluster. The daemon
-  is reached through this value.
+  Specifies the 'host:port' or `interface:port` combination that this
+  particular daemon instance should use when advertising itself to the cluster.
+  The daemon is reached through this value.
 
 
 **--cluster-store-opt**=""
 **--cluster-store-opt**=""
   Specifies options for the Key/Value store.
   Specifies options for the Key/Value store.
@@ -122,7 +126,8 @@ format.
   Enable debug mode. Default is false.
   Enable debug mode. Default is false.
 
 
 **--default-gateway**=""
 **--default-gateway**=""
-  IPv4 address of the container default gateway; this address must be part of the bridge subnet (which is defined by \-b or \--bip)
+  IPv4 address of the container default gateway; this address must be part of
+  the bridge subnet (which is defined by \-b or \--bip)
 
 
 **--default-gateway-v6**=""
 **--default-gateway-v6**=""
   IPv6 address of the container default gateway
   IPv6 address of the container default gateway
@@ -146,13 +151,15 @@ format.
   Set runtime execution options. See RUNTIME EXECUTION OPTIONS.
   Set runtime execution options. See RUNTIME EXECUTION OPTIONS.
 
 
 **--exec-root**=""
 **--exec-root**=""
-  Path to use as the root of the Docker execution state files. Default is `/var/run/docker`.
+  Path to use as the root of the Docker execution state files. Default is
+  `/var/run/docker`.
 
 
 **--experimental**=""
 **--experimental**=""
   Enable the daemon experimental features.
   Enable the daemon experimental features.
 
 
 **--fixed-cidr**=""
 **--fixed-cidr**=""
-  IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip)
+  IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in
+  the bridge subnet (which is defined by \-b or \-\-bip).
 
 
 **--fixed-cidr-v6**=""
 **--fixed-cidr-v6**=""
   IPv6 subnet for global IPv6 addresses (e.g., 2a00:1450::/64)
   IPv6 subnet for global IPv6 addresses (e.g., 2a00:1450::/64)
@@ -173,28 +180,46 @@ unix://[/path/to/socket] to use.
   Print usage statement
   Print usage statement
 
 
 **--icc**=*true*|*false*
 **--icc**=*true*|*false*
-  Allow unrestricted inter\-container and Docker daemon host communication. If disabled, containers can still be linked together using the **--link** option (see **docker-run(1)**). Default is true.
+  Allow unrestricted inter\-container and Docker daemon host communication. If
+  disabled, containers can still be linked together using the **--link** option
+  (see **docker-run(1)**). Default is true.
 
 
 **--init**
 **--init**
-Run an init process inside containers for signal forwarding and process reaping.
+  Run an init process inside containers for signal forwarding and process
+  reaping.
 
 
 **--init-path**
 **--init-path**
-Path to the docker-init binary.
+  Path to the docker-init binary.
 
 
 **--insecure-registry**=[]
 **--insecure-registry**=[]
-  Enable insecure registry communication, i.e., enable un-encrypted and/or untrusted communication.
+  Enable insecure registry communication, i.e., enable un-encrypted and/or
+  untrusted communication.
 
 
-  List of insecure registries can contain an element with CIDR notation to specify a whole subnet. Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs.
+  List of insecure registries can contain an element with CIDR notation to
+  specify a whole subnet. Insecure registries accept HTTP and/or accept HTTPS
+  with certificates from unknown CAs.
 
 
-  Enabling `--insecure-registry` is useful when running a local registry.  However, because its use creates security vulnerabilities it should ONLY be enabled for testing purposes.  For increased security, users should add their CA to their system's list of trusted CAs instead of using `--insecure-registry`.
+  Enabling `--insecure-registry` is useful when running a local registry.
+  However, because its use creates security vulnerabilities it should ONLY be
+  enabled for testing purposes.  For increased security, users should add their
+  CA to their system's list of trusted CAs instead of using
+  `--insecure-registry`.
 
 
 **--ip**=""
 **--ip**=""
   Default IP address to use when binding container ports. Default is `0.0.0.0`.
   Default IP address to use when binding container ports. Default is `0.0.0.0`.
 
 
 **--ip-forward**=*true*|*false*
 **--ip-forward**=*true*|*false*
-  Enables IP forwarding on the Docker host. The default is `true`. This flag interacts with the IP forwarding setting on your host system's kernel. If your system has IP forwarding disabled, this setting enables it. If your system has IP forwarding enabled, setting this flag to `--ip-forward=false` has no effect.
-
-  This setting will also enable IPv6 forwarding if you have both `--ip-forward=true` and `--fixed-cidr-v6` set. Note that this may reject Router Advertisements and interfere with the host's existing IPv6 configuration. For more information, please consult the documentation about "Advanced Networking - IPv6".
+  Enables IP forwarding on the Docker host. The default is `true`. This flag
+  interacts with the IP forwarding setting on your host system's kernel. If
+  your system has IP forwarding disabled, this setting enables it. If your
+  system has IP forwarding enabled, setting this flag to `--ip-forward=false`
+  has no effect.
+
+  This setting will also enable IPv6 forwarding if you have both
+  `--ip-forward=true` and `--fixed-cidr-v6` set. Note that this may reject
+  Router Advertisements and interfere with the host's existing IPv6
+  configuration. For more information, please consult the documentation about
+  "Advanced Networking - IPv6".
 
 
 **--ip-masq**=*true*|*false*
 **--ip-masq**=*true*|*false*
   Enable IP masquerading for bridge's IP range. Default is true.
   Enable IP masquerading for bridge's IP range. Default is true.
@@ -203,12 +228,18 @@ Path to the docker-init binary.
   Enable Docker's addition of iptables rules. Default is true.
   Enable Docker's addition of iptables rules. Default is true.
 
 
 **--ipv6**=*true*|*false*
 **--ipv6**=*true*|*false*
-  Enable IPv6 support. Default is false. Docker will create an IPv6-enabled bridge with address fe80::1 which will allow you to create IPv6-enabled containers. Use together with `--fixed-cidr-v6` to provide globally routable IPv6 addresses. IPv6 forwarding will be enabled if not used with `--ip-forward=false`. This may collide with your host's current IPv6 settings. For more information please consult the documentation about "Advanced Networking - IPv6".
+  Enable IPv6 support. Default is false. Docker will create an IPv6-enabled
+  bridge with address fe80::1 which will allow you to create IPv6-enabled
+  containers. Use together with `--fixed-cidr-v6` to provide globally routable
+  IPv6 addresses. IPv6 forwarding will be enabled if not used with
+  `--ip-forward=false`. This may collide with your host's current IPv6
+  settings. For more information please consult the documentation about
+  "Advanced Networking - IPv6".
 
 
 **--isolation**="*default*"
 **--isolation**="*default*"
-   Isolation specifies the type of isolation technology used by containers. Note
-that the default on Windows server is `process`, and the default on Windows client
-is `hyperv`. Linux only supports `default`.
+   Isolation specifies the type of isolation technology used by containers.
+   Note that the default on Windows server is `process`, and the default on
+   Windows client is `hyperv`. Linux only supports `default`.
 
 
 **-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*"
 **-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*"
   Set the logging level. Default is `info`.
   Set the logging level. Default is `info`.
@@ -217,7 +248,9 @@ is `hyperv`. Linux only supports `default`.
   Set key=value labels to the daemon (displayed in `docker info`)
   Set key=value labels to the daemon (displayed in `docker info`)
 
 
 **--live-restore**=*false*
 **--live-restore**=*false*
-  Enable live restore of running containers when the daemon starts so that they are not restarted.
+  Enable live restore of running containers when the daemon starts so that they
+  are not restarted. This option is applicable only for docker daemon running
+  on Linux host.
 
 
 **--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*"
 **--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*"
   Default driver for container logs. Default is `json-file`.
   Default driver for container logs. Default is `json-file`.
@@ -239,12 +272,13 @@ is `hyperv`. Linux only supports `default`.
   Path to use for daemon PID file. Default is `/var/run/docker.pid`
   Path to use for daemon PID file. Default is `/var/run/docker.pid`
 
 
 **--raw-logs**
 **--raw-logs**
-Output daemon logs in full timestamp format without ANSI coloring. If this flag is not set,
-the daemon outputs condensed, colorized logs if a terminal is detected, or full ("raw")
-output otherwise.
+  Output daemon logs in full timestamp format without ANSI coloring. If this
+  flag is not set, the daemon outputs condensed, colorized logs if a terminal
+  is detected, or full ("raw") output otherwise.
 
 
 **--registry-mirror**=*<scheme>://<host>*
 **--registry-mirror**=*<scheme>://<host>*
-  Prepend a registry mirror to be used for image pulls. May be specified multiple times.
+  Prepend a registry mirror to be used for image pulls. May be specified
+  multiple times.
 
 
 **-s**, **--storage-driver**=""
 **-s**, **--storage-driver**=""
   Force the Docker runtime to use a specific storage driver.
   Force the Docker runtime to use a specific storage driver.
@@ -262,9 +296,10 @@ output otherwise.
   Set storage driver options. See STORAGE DRIVER OPTIONS.
   Set storage driver options. See STORAGE DRIVER OPTIONS.
 
 
 **--swarm-default-advertise-addr**=*IP|INTERFACE*
 **--swarm-default-advertise-addr**=*IP|INTERFACE*
-  Set default address or interface for swarm to advertise as its externally-reachable address to other cluster
-  members. This can be a hostname, an IP address, or an interface such as `eth0`. A port cannot be specified with
-  this option.
+  Set default address or interface for swarm to advertise as its
+  externally-reachable address to other cluster members. This can be a
+  hostname, an IP address, or an interface such as `eth0`. A port cannot be
+  specified with this option.
 
 
 **--tls**=*true*|*false*
 **--tls**=*true*|*false*
   Use TLS; implied by --tlsverify. Default is false.
   Use TLS; implied by --tlsverify. Default is false.
@@ -283,13 +318,19 @@ output otherwise.
   Default is false.
   Default is false.
 
 
 **--userland-proxy**=*true*|*false*
 **--userland-proxy**=*true*|*false*
-    Rely on a userland proxy implementation for inter-container and outside-to-container loopback communications. Default is true.
+  Rely on a userland proxy implementation for inter-container and
+  outside-to-container loopback communications. Default is true.
 
 
 **--userland-proxy-path**=""
 **--userland-proxy-path**=""
   Path to the userland proxy binary.
   Path to the userland proxy binary.
 
 
 **--userns-remap**=*default*|*uid:gid*|*user:group*|*user*|*uid*
 **--userns-remap**=*default*|*uid:gid*|*user:group*|*user*|*uid*
-    Enable user namespaces for containers on the daemon. Specifying "default" will cause a new user and group to be created to handle UID and GID range remapping for the user namespace mappings used for contained processes. Specifying a user (or uid) and optionally a group (or gid) will cause the daemon to lookup the user and group's subordinate ID ranges for use as the user namespace mappings for contained processes.
+  Enable user namespaces for containers on the daemon. Specifying "default"
+  will cause a new user and group to be created to handle UID and GID range
+  remapping for the user namespace mappings used for contained processes.
+  Specifying a user (or uid) and optionally a group (or gid) will cause the
+  daemon to lookup the user and group's subordinate ID ranges for use as the
+  user namespace mappings for contained processes.
 
 
 # STORAGE DRIVER OPTIONS
 # STORAGE DRIVER OPTIONS
 
 
@@ -402,8 +443,8 @@ exits.
 For example, when a container exits, its associated thin device is removed. If
 For example, when a container exits, its associated thin device is removed. If
 that device has leaked into some other mount namespace and can't be removed,
 that device has leaked into some other mount namespace and can't be removed,
 the container exit still succeeds and this option causes the system to schedule
 the container exit still succeeds and this option causes the system to schedule
-the device for deferred removal. It does not wait in a loop trying to remove a busy
-device.
+the device for deferred removal. It does not wait in a loop trying to remove a
+busy device.
 
 
 Example use: `dockerd --storage-opt dm.use_deferred_removal=true`
 Example use: `dockerd --storage-opt dm.use_deferred_removal=true`
 
 
@@ -431,23 +472,23 @@ namespaces.
 
 
 #### dm.loopdatasize
 #### dm.loopdatasize
 
 
-**Note**: This option configures devicemapper loopback, which should not be used in production.
+**Note**: This option configures devicemapper loopback, which should not be
+used in production.
 
 
-Specifies the size to use when creating the loopback file for the
-"data" device which is used for the thin pool. The default size is
-100G. The file is sparse, so it will not initially take up
-this much space.
+Specifies the size to use when creating the loopback file for the "data" device
+which is used for the thin pool. The default size is 100G. The file is sparse,
+so it will not initially take up this much space.
 
 
 Example use: `dockerd --storage-opt dm.loopdatasize=200G`
 Example use: `dockerd --storage-opt dm.loopdatasize=200G`
 
 
 #### dm.loopmetadatasize
 #### dm.loopmetadatasize
 
 
-**Note**: This option configures devicemapper loopback, which should not be used in production.
+**Note**: This option configures devicemapper loopback, which should not be
+used in production.
 
 
-Specifies the size to use when creating the loopback file for the
-"metadata" device which is used for the thin pool. The default size
-is 2G. The file is sparse, so it will not initially take up
-this much space.
+Specifies the size to use when creating the loopback file for the "metadata"
+device which is used for the thin pool. The default size is 2G. The file is
+sparse, so it will not initially take up this much space.
 
 
 Example use: `dockerd --storage-opt dm.loopmetadatasize=4G`
 Example use: `dockerd --storage-opt dm.loopmetadatasize=4G`
 
 
@@ -455,17 +496,16 @@ Example use: `dockerd --storage-opt dm.loopmetadatasize=4G`
 
 
 (Deprecated, use `dm.thinpooldev`)
 (Deprecated, use `dm.thinpooldev`)
 
 
-Specifies a custom blockdevice to use for data for a
-Docker-managed thin pool.  It is better to use `dm.thinpooldev` - see
-the documentation for it above for discussion of the advantages.
+Specifies a custom blockdevice to use for data for a Docker-managed thin pool.
+It is better to use `dm.thinpooldev` - see the documentation for it above for
+discussion of the advantages.
 
 
 #### dm.metadatadev
 #### dm.metadatadev
 
 
 (Deprecated, use `dm.thinpooldev`)
 (Deprecated, use `dm.thinpooldev`)
 
 
-Specifies a custom blockdevice to use for metadata for a
-Docker-managed thin pool.  See `dm.datadev` for why this is
-deprecated.
+Specifies a custom blockdevice to use for metadata for a Docker-managed thin
+pool.  See `dm.datadev` for why this is deprecated.
 
 
 #### dm.blocksize
 #### dm.blocksize
 
 
@@ -476,24 +516,22 @@ Example use: `dockerd --storage-opt dm.blocksize=512K`
 
 
 #### dm.blkdiscard
 #### dm.blkdiscard
 
 
-Enables or disables the use of `blkdiscard` when removing devicemapper
-devices.  This is disabled by default due to the additional latency,
-but as a special case with loopback devices it will be enabled, in
-order to re-sparsify the loopback file on image/container removal.
+Enables or disables the use of `blkdiscard` when removing devicemapper devices.
+This is disabled by default due to the additional latency, but as a special
+case with loopback devices it will be enabled, in order to re-sparsify the
+loopback file on image/container removal.
 
 
-Disabling this on loopback can lead to *much* faster container removal
-times, but it also prevents the space used in `/var/lib/docker` directory
-from being returned to the system for other use when containers are
-removed.
+Disabling this on loopback can lead to *much* faster container removal times,
+but it also prevents the space used in `/var/lib/docker` directory from being
+returned to the system for other use when containers are removed.
 
 
 Example use: `dockerd --storage-opt dm.blkdiscard=false`
 Example use: `dockerd --storage-opt dm.blkdiscard=false`
 
 
 #### dm.override_udev_sync_check
 #### dm.override_udev_sync_check
 
 
-By default, the devicemapper backend attempts to synchronize with the
-`udev` device manager for the Linux kernel.  This option allows
-disabling that synchronization, to continue even though the
-configuration may be buggy.
+By default, the devicemapper backend attempts to synchronize with the `udev`
+device manager for the Linux kernel.  This option allows disabling that
+synchronization, to continue even though the configuration may be buggy.
 
 
 To view the `udev` sync support of a Docker daemon that is using the
 To view the `udev` sync support of a Docker daemon that is using the
 `devicemapper` driver, run:
 `devicemapper` driver, run:
@@ -506,10 +544,9 @@ To view the `udev` sync support of a Docker daemon that is using the
 When `udev` sync support is `true`, then `devicemapper` and `udev` can
 When `udev` sync support is `true`, then `devicemapper` and `udev` can
 coordinate the activation and deactivation of devices for containers.
 coordinate the activation and deactivation of devices for containers.
 
 
-When `udev` sync support is `false`, a race condition occurs between
-the `devicemapper` and `udev` during create and cleanup. The race
-condition results in errors and failures. (For information on these
-failures, see
+When `udev` sync support is `false`, a race condition occurs between the
+`devicemapper` and `udev` during create and cleanup. The race condition results
+in errors and failures. (For information on these failures, see
 [docker#4036](https://github.com/docker/docker/issues/4036))
 [docker#4036](https://github.com/docker/docker/issues/4036))
 
 
 To allow the `docker` daemon to start, regardless of whether `udev` sync is
 To allow the `docker` daemon to start, regardless of whether `udev` sync is
@@ -517,15 +554,14 @@ To allow the `docker` daemon to start, regardless of whether `udev` sync is
 
 
         $ dockerd --storage-opt dm.override_udev_sync_check=true
         $ dockerd --storage-opt dm.override_udev_sync_check=true
 
 
-When this value is `true`, the driver continues and simply warns you
-the errors are happening.
+When this value is `true`, the driver continues and simply warns you the errors
+are happening.
 
 
-**Note**: The ideal is to pursue a `docker` daemon and environment
-that does support synchronizing with `udev`. For further discussion on
-this topic, see
+**Note**: The ideal is to pursue a `docker` daemon and environment that does
+support synchronizing with `udev`. For further discussion on this topic, see
 [docker#4036](https://github.com/docker/docker/issues/4036).
 [docker#4036](https://github.com/docker/docker/issues/4036).
-Otherwise, set this flag for migrating existing Docker daemons to a
-daemon with a supported environment.
+Otherwise, set this flag for migrating existing Docker daemons to a daemon with
+a supported environment.
 
 
 #### dm.min_free_space
 #### dm.min_free_space
 
 
@@ -536,14 +572,13 @@ free space checking logic. If user does not specify a value for this option,
 the Engine uses a default value of 10%.
 the Engine uses a default value of 10%.
 
 
 Whenever a new a thin pool device is created (during `docker pull` or during
 Whenever a new a thin pool device is created (during `docker pull` or during
-container creation), the Engine checks if the minimum free space is
-available. If the space is unavailable, then device creation fails and any
-relevant `docker` operation fails.
+container creation), the Engine checks if the minimum free space is available.
+If the space is unavailable, then device creation fails and any relevant
+`docker` operation fails.
 
 
 To recover from this error, you must create more free space in the thin pool to
 To recover from this error, you must create more free space in the thin pool to
-recover from the error. You can create free space by deleting some images
-and containers from tge thin pool. You can also add
-more storage to the thin pool.
+recover from the error. You can create free space by deleting some images and
+containers from tge thin pool. You can also add more storage to the thin pool.
 
 
 To add more space to an LVM (logical volume management) thin pool, just add
 To add more space to an LVM (logical volume management) thin pool, just add
 more storage to the  group container thin pool; this should automatically
 more storage to the  group container thin pool; this should automatically
@@ -555,13 +590,13 @@ Example use:: `dockerd --storage-opt dm.min_free_space=10%`
 
 
 #### dm.xfs_nospace_max_retries
 #### dm.xfs_nospace_max_retries
 
 
-Specifies the maximum number of retries XFS should attempt to complete
-IO when ENOSPC (no space) error is returned by underlying storage device.
+Specifies the maximum number of retries XFS should attempt to complete IO when
+ENOSPC (no space) error is returned by underlying storage device.
 
 
-By default XFS retries infinitely for IO to finish and this can result
-in unkillable process. To change this behavior one can set
-xfs_nospace_max_retries to say 0 and XFS will not retry IO after getting
-ENOSPC and will shutdown filesystem.
+By default XFS retries infinitely for IO to finish and this can result in
+unkillable process. To change this behavior one can set xfs_nospace_max_retries
+to say 0 and XFS will not retry IO after getting ENOSPC and will shutdown
+filesystem.
 
 
 Example use:
 Example use:
 
 
@@ -572,9 +607,9 @@ Example use:
 
 
 #### zfs.fsname
 #### zfs.fsname
 
 
-Set zfs filesystem under which docker will create its own datasets.
-By default docker will pick up the zfs filesystem where docker graph
-(`/var/lib/docker`) is located.
+Set zfs filesystem under which docker will create its own datasets.  By default
+docker will pick up the zfs filesystem where docker graph (`/var/lib/docker`)
+is located.
 
 
 Example use: `dockerd -s zfs --storage-opt zfs.fsname=zroot/docker`
 Example use: `dockerd -s zfs --storage-opt zfs.fsname=zroot/docker`
 
 
@@ -582,20 +617,19 @@ Example use: `dockerd -s zfs --storage-opt zfs.fsname=zroot/docker`
 
 
 #### btrfs.min_space
 #### btrfs.min_space
 
 
-Specifies the mininum size to use when creating the subvolume which is used
-for containers. If user uses disk quota for btrfs when creating or running
-a container with **--storage-opt size** option, docker should ensure the
-**size** cannot be smaller than **btrfs.min_space**.
+Specifies the mininum size to use when creating the subvolume which is used for
+containers. If user uses disk quota for btrfs when creating or running a
+container with **--storage-opt size** option, docker should ensure the **size**
+cannot be smaller than **btrfs.min_space**.
 
 
 Example use: `docker daemon -s btrfs --storage-opt btrfs.min_space=10G`
 Example use: `docker daemon -s btrfs --storage-opt btrfs.min_space=10G`
 
 
 # CLUSTER STORE OPTIONS
 # CLUSTER STORE OPTIONS
 
 
-The daemon uses libkv to advertise
-the node within the cluster.  Some Key/Value backends support mutual
-TLS, and the client TLS settings used by the daemon can be configured
-using the **--cluster-store-opt** flag, specifying the paths to PEM encoded
-files.
+The daemon uses libkv to advertise the node within the cluster.  Some Key/Value
+backends support mutual TLS, and the client TLS settings used by the daemon can
+be configured using the **--cluster-store-opt** flag, specifying the paths to
+PEM encoded files.
 
 
 #### kv.cacertfile
 #### kv.cacertfile
 
 
@@ -604,19 +638,19 @@ Specifies the path to a local file with PEM encoded CA certificates to trust
 #### kv.certfile
 #### kv.certfile
 
 
 Specifies the path to a local file with a PEM encoded certificate.  This
 Specifies the path to a local file with a PEM encoded certificate.  This
-certificate is used as the client cert for communication with the
-Key/Value store.
+certificate is used as the client cert for communication with the Key/Value
+store.
 
 
 #### kv.keyfile
 #### kv.keyfile
 
 
 Specifies the path to a local file with a PEM encoded private key.  This
 Specifies the path to a local file with a PEM encoded private key.  This
-private key is used as the client key for communication with the
-Key/Value store.
+private key is used as the client key for communication with the Key/Value
+store.
 
 
 # Access authorization
 # Access authorization
 
 
-Docker's access authorization can be extended by authorization plugins that your
-organization can purchase or build themselves. You can install one or more
+Docker's access authorization can be extended by authorization plugins that
+your organization can purchase or build themselves. You can install one or more
 authorization plugins when you start the Docker `daemon` using the
 authorization plugins when you start the Docker `daemon` using the
 `--authorization-plugin=PLUGIN_ID` option.
 `--authorization-plugin=PLUGIN_ID` option.
 
 
@@ -624,10 +658,10 @@ authorization plugins when you start the Docker `daemon` using the
 dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,...
 dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,...
 ```
 ```
 
 
-The `PLUGIN_ID` value is either the plugin's name or a path to its specification
-file. The plugin's implementation determines whether you can specify a name or
-path. Consult with your Docker administrator to get information about the
-plugins available to you.
+The `PLUGIN_ID` value is either the plugin's name or a path to its
+specification file. The plugin's implementation determines whether you can
+specify a name or path. Consult with your Docker administrator to get
+information about the plugins available to you.
 
 
 Once a plugin is installed, requests made to the `daemon` through the command
 Once a plugin is installed, requests made to the `daemon` through the command
 line or Docker's remote API are allowed or denied by the plugin.  If you have
 line or Docker's remote API are allowed or denied by the plugin.  If you have

+ 1 - 1
pkg/tailfile/tailfile.go

@@ -44,7 +44,7 @@ func TailFile(f io.ReadSeeker, n int) ([][]byte, error) {
 			break
 			break
 		} else {
 		} else {
 			b = make([]byte, blockSize)
 			b = make([]byte, blockSize)
-			if _, err := f.Seek(step, os.SEEK_END); err != nil {
+			if _, err := f.Seek(left, os.SEEK_SET); err != nil {
 				return nil, err
 				return nil, err
 			}
 			}
 			if _, err := f.Read(b); err != nil {
 			if _, err := f.Read(b); err != nil {

+ 23 - 18
plugin/manager_linux.go

@@ -9,6 +9,7 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/libcontainerd"
 	"github.com/docker/docker/oci"
 	"github.com/docker/docker/oci"
 	"github.com/docker/docker/pkg/plugins"
 	"github.com/docker/docker/pkg/plugins"
 	"github.com/docker/docker/plugin/v2"
 	"github.com/docker/docker/plugin/v2"
@@ -48,6 +49,25 @@ func (pm *Manager) restore(p *v2.Plugin) error {
 	return pm.containerdClient.Restore(p.GetID(), attachToLog(p.GetID()))
 	return pm.containerdClient.Restore(p.GetID(), attachToLog(p.GetID()))
 }
 }
 
 
+func shutdownPlugin(p *v2.Plugin, containerdClient libcontainerd.Client) {
+	pluginID := p.GetID()
+
+	err := containerdClient.Signal(pluginID, int(syscall.SIGTERM))
+	if err != nil {
+		logrus.Errorf("Sending SIGTERM to plugin failed with error: %v", err)
+	} else {
+		select {
+		case <-p.ExitChan:
+			logrus.Debug("Clean shutdown of plugin")
+		case <-time.After(time.Second * 10):
+			logrus.Debug("Force shutdown plugin")
+			if err := containerdClient.Signal(pluginID, int(syscall.SIGKILL)); err != nil {
+				logrus.Errorf("Sending SIGKILL to plugin failed with error: %v", err)
+			}
+		}
+	}
+}
+
 func (pm *Manager) disable(p *v2.Plugin) error {
 func (pm *Manager) disable(p *v2.Plugin) error {
 	if !p.IsEnabled() {
 	if !p.IsEnabled() {
 		return fmt.Errorf("plugin %s is already disabled", p.Name())
 		return fmt.Errorf("plugin %s is already disabled", p.Name())
@@ -55,9 +75,8 @@ func (pm *Manager) disable(p *v2.Plugin) error {
 	p.Lock()
 	p.Lock()
 	p.Restart = false
 	p.Restart = false
 	p.Unlock()
 	p.Unlock()
-	if err := pm.containerdClient.Signal(p.GetID(), int(syscall.SIGKILL)); err != nil {
-		logrus.Error(err)
-	}
+
+	shutdownPlugin(p, pm.containerdClient)
 	pm.pluginStore.SetState(p, false)
 	pm.pluginStore.SetState(p, false)
 	return nil
 	return nil
 }
 }
@@ -71,25 +90,11 @@ func (pm *Manager) Shutdown() {
 			continue
 			continue
 		}
 		}
 		if pm.containerdClient != nil && p.IsEnabled() {
 		if pm.containerdClient != nil && p.IsEnabled() {
-			pluginID := p.GetID()
 			p.Lock()
 			p.Lock()
 			p.ExitChan = make(chan bool)
 			p.ExitChan = make(chan bool)
 			p.Restart = false
 			p.Restart = false
 			p.Unlock()
 			p.Unlock()
-			err := pm.containerdClient.Signal(p.PluginObj.ID, int(syscall.SIGTERM))
-			if err != nil {
-				logrus.Errorf("Sending SIGTERM to plugin failed with error: %v", err)
-			} else {
-				select {
-				case <-p.ExitChan:
-					logrus.Debug("Clean shutdown of plugin")
-				case <-time.After(time.Second * 10):
-					logrus.Debug("Force shutdown plugin")
-					if err := pm.containerdClient.Signal(pluginID, int(syscall.SIGKILL)); err != nil {
-						logrus.Errorf("Sending SIGKILL to plugin failed with error: %v", err)
-					}
-				}
-			}
+			shutdownPlugin(p, pm.containerdClient)
 		}
 		}
 	}
 	}
 }
 }

+ 1 - 1
volume/local/local.go

@@ -269,7 +269,7 @@ func (r *Root) validateName(name string) error {
 		return validationError{fmt.Errorf("volume name is too short, names should be at least two alphanumeric characters")}
 		return validationError{fmt.Errorf("volume name is too short, names should be at least two alphanumeric characters")}
 	}
 	}
 	if !volumeNameRegex.MatchString(name) {
 	if !volumeNameRegex.MatchString(name) {
-		return validationError{fmt.Errorf("%q includes invalid characters for a local volume name, only %q are allowed", name, utils.RestrictedNameChars)}
+		return validationError{fmt.Errorf("%q includes invalid characters for a local volume name, only %q are allowed. If you intented to pass a host directory, use absolute path.", name, utils.RestrictedNameChars)}
 	}
 	}
 	return nil
 	return nil
 }
 }