Explorar o código

Remove unneeded references to execDriver

This includes:
 - updating the docs
 - removing dangling variables

Signed-off-by: Kenfe-Mickael Laventure <mickael.laventure@gmail.com>
Kenfe-Mickael Laventure %!s(int64=9) %!d(string=hai) anos
pai
achega
8af4f89cba

+ 1 - 1
contrib/completion/fish/docker.fish

@@ -51,7 +51,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable
 complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-opt -d 'Force Docker to use specific DNS options'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-opt -d 'Force Docker to use specific DNS options'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains'
-complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set exec driver options'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set runtime execution options'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode'

+ 2 - 2
contrib/completion/zsh/_docker

@@ -650,8 +650,8 @@ __docker_subcommand() {
                 "($help)*--dns-opt=[DNS options to use]:DNS option: " \
                 "($help)*--dns-opt=[DNS options to use]:DNS option: " \
                 "($help)*--default-ulimit=[Default ulimit settings for containers]:ulimit: " \
                 "($help)*--default-ulimit=[Default ulimit settings for containers]:ulimit: " \
                 "($help)--disable-legacy-registry[Do not contact legacy registries]" \
                 "($help)--disable-legacy-registry[Do not contact legacy registries]" \
-                "($help)*--exec-opt=[Exec driver options]:exec driver options: " \
-                "($help)--exec-root=[Root of the Docker execdriver]:path:_directories" \
+                "($help)*--exec-opt=[Runtime execution options]:runtime execution options: " \
+                "($help)--exec-root=[Root directory for execution state files]:path:_directories" \
                 "($help)--fixed-cidr=[IPv4 subnet for fixed IPs]:IPv4 subnet: " \
                 "($help)--fixed-cidr=[IPv4 subnet for fixed IPs]:IPv4 subnet: " \
                 "($help)--fixed-cidr-v6=[IPv6 subnet for fixed IPs]:IPv6 subnet: " \
                 "($help)--fixed-cidr-v6=[IPv6 subnet for fixed IPs]:IPv6 subnet: " \
                 "($help -G --group)"{-G=,--group=}"[Group for the unix socket]:group:_groups" \
                 "($help -G --group)"{-G=,--group=}"[Group for the unix socket]:group:_groups" \

+ 2 - 2
daemon/config.go

@@ -112,10 +112,10 @@ func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string)
 
 
 	cmd.Var(opts.NewNamedListOptsRef("storage-opts", &config.GraphOptions, nil), []string{"-storage-opt"}, usageFn("Set storage driver options"))
 	cmd.Var(opts.NewNamedListOptsRef("storage-opts", &config.GraphOptions, nil), []string{"-storage-opt"}, usageFn("Set storage driver options"))
 	cmd.Var(opts.NewNamedListOptsRef("authorization-plugins", &config.AuthorizationPlugins, nil), []string{"-authorization-plugin"}, usageFn("List authorization plugins in order from first evaluator to last"))
 	cmd.Var(opts.NewNamedListOptsRef("authorization-plugins", &config.AuthorizationPlugins, nil), []string{"-authorization-plugin"}, usageFn("List authorization plugins in order from first evaluator to last"))
-	cmd.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), []string{"-exec-opt"}, usageFn("Set exec driver options"))
+	cmd.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), []string{"-exec-opt"}, usageFn("Set runtime execution options"))
 	cmd.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, defaultPidFile, usageFn("Path to use for daemon PID file"))
 	cmd.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, defaultPidFile, usageFn("Path to use for daemon PID file"))
 	cmd.StringVar(&config.Root, []string{"g", "-graph"}, defaultGraph, usageFn("Root of the Docker runtime"))
 	cmd.StringVar(&config.Root, []string{"g", "-graph"}, defaultGraph, usageFn("Root of the Docker runtime"))
-	cmd.StringVar(&config.ExecRoot, []string{"-exec-root"}, defaultExecRoot, usageFn("Root of the Docker execdriver"))
+	cmd.StringVar(&config.ExecRoot, []string{"-exec-root"}, defaultExecRoot, usageFn("Root directory for execution state files"))
 	cmd.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, usageFn("--restart on the daemon has been deprecated in favor of --restart policies on docker run"))
 	cmd.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, usageFn("--restart on the daemon has been deprecated in favor of --restart policies on docker run"))
 	cmd.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", usageFn("Storage driver to use"))
 	cmd.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", usageFn("Storage driver to use"))
 	cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU"))
 	cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU"))

+ 16 - 11
daemon/daemon_unix.go

@@ -51,6 +51,10 @@ const (
 	// constants for remapped root settings
 	// constants for remapped root settings
 	defaultIDSpecifier string = "default"
 	defaultIDSpecifier string = "default"
 	defaultRemappedID  string = "dockremap"
 	defaultRemappedID  string = "dockremap"
+
+	// constant for cgroup drivers
+	cgroupFsDriver      = "cgroupfs"
+	cgroupSystemdDriver = "systemd"
 )
 )
 
 
 func getMemoryResources(config containertypes.Resources) *specs.Memory {
 func getMemoryResources(config containertypes.Resources) *specs.Memory {
@@ -460,29 +464,30 @@ func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysi
 }
 }
 
 
 func (daemon *Daemon) getCgroupDriver() string {
 func (daemon *Daemon) getCgroupDriver() string {
-	cgroupDriver := "cgroupfs"
-	if daemon.usingSystemd() {
-		cgroupDriver = "systemd"
-	}
-	return cgroupDriver
-}
+	cgroupDriver := cgroupFsDriver
 
 
-func usingSystemd(config *Config) bool {
-	for _, option := range config.ExecOptions {
+	// No other cgroup drivers are supported at the moment. Warn the
+	// user if they tried to set one other than cgroupfs
+	for _, option := range daemon.configStore.ExecOptions {
 		key, val, err := parsers.ParseKeyValueOpt(option)
 		key, val, err := parsers.ParseKeyValueOpt(option)
 		if err != nil || !strings.EqualFold(key, "native.cgroupdriver") {
 		if err != nil || !strings.EqualFold(key, "native.cgroupdriver") {
 			continue
 			continue
 		}
 		}
-		if val == "systemd" {
-			return true
+		if val != cgroupFsDriver {
+			logrus.Warnf("cgroupdriver '%s' is not supported", val)
 		}
 		}
 	}
 	}
 
 
+	return cgroupDriver
+}
+
+func usingSystemd(config *Config) bool {
+	// No support for systemd cgroup atm
 	return false
 	return false
 }
 }
 
 
 func (daemon *Daemon) usingSystemd() bool {
 func (daemon *Daemon) usingSystemd() bool {
-	return usingSystemd(daemon.configStore)
+	return daemon.getCgroupDriver() == cgroupSystemdDriver
 }
 }
 
 
 // verifyPlatformContainerSettings performs platform-specific validation of the
 // verifyPlatformContainerSettings performs platform-specific validation of the

+ 11 - 12
daemon/graphdriver/devmapper/README.md

@@ -9,21 +9,21 @@ daemon via the `--storage-opt dm.thinpooldev` option.
 
 
 As a fallback if no thin pool is provided, loopback files will be
 As a fallback if no thin pool is provided, loopback files will be
 created.  Loopback is very slow, but can be used without any
 created.  Loopback is very slow, but can be used without any
-pre-configuration of storage.  It is strongly recommended that you do 
+pre-configuration of storage.  It is strongly recommended that you do
 not use loopback in production.  Ensure your Docker daemon has a
 not use loopback in production.  Ensure your Docker daemon has a
 `--storage-opt dm.thinpooldev` argument provided.
 `--storage-opt dm.thinpooldev` argument provided.
 
 
 In loopback, a thin pool is created at `/var/lib/docker/devicemapper`
 In loopback, a thin pool is created at `/var/lib/docker/devicemapper`
-(devicemapper graph location) based on two block devices, one for 
-data and one for metadata. By default these block devices are created 
-automatically by using loopback mounts of automatically created sparse 
+(devicemapper graph location) based on two block devices, one for
+data and one for metadata. By default these block devices are created
+automatically by using loopback mounts of automatically created sparse
 files.
 files.
 
 
-The default loopback files used are 
-`/var/lib/docker/devicemapper/devicemapper/data` and 
-`/var/lib/docker/devicemapper/devicemapper/metadata`. Additional metadata 
-required to map from docker entities to the corresponding devicemapper 
-volumes is stored in the `/var/lib/docker/devicemapper/devicemapper/json` 
+The default loopback files used are
+`/var/lib/docker/devicemapper/devicemapper/data` and
+`/var/lib/docker/devicemapper/devicemapper/metadata`. Additional metadata
+required to map from docker entities to the corresponding devicemapper
+volumes is stored in the `/var/lib/docker/devicemapper/devicemapper/json`
 file (encoded as Json).
 file (encoded as Json).
 
 
 In order to support multiple devicemapper graphs on a system, the thin
 In order to support multiple devicemapper graphs on a system, the thin
@@ -92,6 +92,5 @@ This uses the `dm` prefix and would be used something like `docker daemon --stor
 
 
 These options are currently documented both in [the man
 These options are currently documented both in [the man
 page](../../../man/docker.1.md) and in [the online
 page](../../../man/docker.1.md) and in [the online
-documentation](https://docs.docker.com/reference/commandline/daemon/#docker-
-execdriver-option).  If you add an options, update both the `man` page and the
-documentation.
+documentation](https://docs.docker.com/reference/commandline/daemon/#storage-driver-options).
+If you add an options, update both the `man` page and the documentation.

+ 1 - 1
daemon/kill.go

@@ -103,7 +103,7 @@ func (daemon *Daemon) Kill(container *container.Container) error {
 		// because if we can't stop the container by this point then
 		// because if we can't stop the container by this point then
 		// its probably because its already stopped. Meaning, between
 		// its probably because its already stopped. Meaning, between
 		// the time of the IsRunning() call above and now it stopped.
 		// the time of the IsRunning() call above and now it stopped.
-		// Also, since the err return will be exec driver specific we can't
+		// Also, since the err return will be environment specific we can't
 		// look for any particular (common) error that would indicate
 		// look for any particular (common) error that would indicate
 		// that the process is already dead vs something else going wrong.
 		// that the process is already dead vs something else going wrong.
 		// So, instead we'll give it up to 2 more seconds to complete and if
 		// So, instead we'll give it up to 2 more seconds to complete and if

+ 2 - 2
daemon/volumes_windows.go

@@ -11,8 +11,8 @@ import (
 )
 )
 
 
 // setupMounts configures the mount points for a container by appending each
 // setupMounts configures the mount points for a container by appending each
-// of the configured mounts on the container to the oci mount structure
-// which will ultimately be passed into the exec driver during container creation.
+// of the configured mounts on the container to the OCI mount structure
+// which will ultimately be passed into the oci runtime during container creation.
 // It also ensures each of the mounts are lexographically sorted.
 // It also ensures each of the mounts are lexographically sorted.
 
 
 // BUGBUG TODO Windows containerd. This would be much better if it returned
 // BUGBUG TODO Windows containerd. This would be much better if it returned

+ 2 - 2
docs/admin/configuring.md

@@ -162,7 +162,7 @@ can be located at `/var/log/upstart/docker.log`
 
 
     $ tail -f /var/log/upstart/docker.log
     $ tail -f /var/log/upstart/docker.log
     INFO[0000] Loading containers: done.
     INFO[0000] Loading containers: done.
-    INFO[0000] docker daemon: 1.6.0 4749651; execdriver: native-0.2; graphdriver: aufs
+    INFO[0000] Docker daemon commit=1b09a95-unsupported graphdriver=aufs version=1.11.0-dev
     INFO[0000] +job acceptconnections()
     INFO[0000] +job acceptconnections()
     INFO[0000] -job acceptconnections() = OK (0)
     INFO[0000] -job acceptconnections() = OK (0)
     INFO[0000] Daemon has completed initialization
     INFO[0000] Daemon has completed initialization
@@ -273,7 +273,7 @@ be viewed using `journalctl -u docker`
     May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job init_networkdriver() = OK (0)"
     May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job init_networkdriver() = OK (0)"
     May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: start."
     May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: start."
     May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: done."
     May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: done."
-    May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="docker daemon: 1.5.0-dev fc0329b/1.5.0; execdriver: native-0.2; graphdriver: devicemapper"
+    May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Docker daemon commit=1b09a95-unsupported graphdriver=aufs version=1.11.0-dev"
     May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job acceptconnections()"
     May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job acceptconnections()"
     May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job acceptconnections() = OK (0)"
     May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job acceptconnections() = OK (0)"
 
 

+ 13 - 11
docs/reference/commandline/daemon.md

@@ -32,8 +32,8 @@ weight = -1
       --dns-opt=[]                           DNS options to use
       --dns-opt=[]                           DNS options to use
       --dns-search=[]                        DNS search domains to use
       --dns-search=[]                        DNS search domains to use
       --default-ulimit=[]                    Set default ulimit settings for containers
       --default-ulimit=[]                    Set default ulimit settings for containers
-      --exec-opt=[]                          Set exec driver options
-      --exec-root="/var/run/docker"          Root of the Docker execdriver
+      --exec-opt=[]                          Set runtime execution options
+      --exec-root="/var/run/docker"          Root directory for execution state files
       --fixed-cidr=""                        IPv4 subnet for fixed IPs
       --fixed-cidr=""                        IPv4 subnet for fixed IPs
       --fixed-cidr-v6=""                     IPv6 subnet for fixed IPs
       --fixed-cidr-v6=""                     IPv6 subnet for fixed IPs
       -G, --group="docker"                   Group for the unix socket
       -G, --group="docker"                   Group for the unix socket
@@ -476,24 +476,26 @@ Currently supported options of `zfs`:
 
 
         $ docker daemon -s zfs --storage-opt zfs.fsname=zroot/docker
         $ docker daemon -s zfs --storage-opt zfs.fsname=zroot/docker
 
 
-## Docker execdriver option
+## Docker runtime execution options
 
 
-The Docker daemon uses a specifically built `libcontainer` execution driver as
-its interface to the Linux kernel `namespaces`, `cgroups`, and `SELinux`.
+The Docker daemon relies on a
+[OCI](https://github.com/opencontainers/specs) compliant runtime
+(invoked via the `containerd` daemon) as its interface to the Linux
+kernel `namespaces`, `cgroups`, and `SELinux`.
 
 
-## Options for the native execdriver
+## Options for the runtime
 
 
-You can configure the `native` (libcontainer) execdriver using options specified
+You can configure the runtime using options specified
 with the `--exec-opt` flag. All the flag's options have the `native` prefix. A
 with the `--exec-opt` flag. All the flag's options have the `native` prefix. A
 single `native.cgroupdriver` option is available.
 single `native.cgroupdriver` option is available.
 
 
 The `native.cgroupdriver` option specifies the management of the container's
 The `native.cgroupdriver` option specifies the management of the container's
-cgroups. You can specify `cgroupfs` or `systemd`. If you specify `systemd` and
-it is not available, the system uses `cgroupfs`. If you omit the
+cgroups. You can specify only specify `cgroupfs` at the moment.  If you omit the
 `native.cgroupdriver` option,` cgroupfs` is used.
 `native.cgroupdriver` option,` cgroupfs` is used.
-This example sets the `cgroupdriver` to `systemd`:
 
 
-    $ sudo docker daemon --exec-opt native.cgroupdriver=systemd
+This example explicitely sets the `cgroupdriver` to `cgroupfs`:
+
+    $ sudo docker daemon --exec-opt native.cgroupdriver=cgroupfs
 
 
 Setting this option applies to all containers the daemon launches.
 Setting this option applies to all containers the daemon launches.
 
 

+ 4 - 4
docs/security/security.md

@@ -198,7 +198,7 @@ to the host.
 This won't affect regular web apps; but malicious users will find that
 This won't affect regular web apps; but malicious users will find that
 the arsenal at their disposal has shrunk considerably! By default Docker
 the arsenal at their disposal has shrunk considerably! By default Docker
 drops all capabilities except [those
 drops all capabilities except [those
-needed](https://github.com/docker/docker/blob/87de5fdd5972343a11847922e0f41d9898b5cff7/daemon/execdriver/native/template/default_template_linux.go#L16-L29),
+needed](https://github.com/docker/docker/blob/master/oci/defaults_linux.go#L64-L79),
 a whitelist instead of a blacklist approach. You can see a full list of
 a whitelist instead of a blacklist approach. You can see a full list of
 available capabilities in [Linux
 available capabilities in [Linux
 manpages](http://man7.org/linux/man-pages/man7/capabilities.7.html).
 manpages](http://man7.org/linux/man-pages/man7/capabilities.7.html).
@@ -243,11 +243,11 @@ with e.g., special network topologies or shared filesystems, you can
 expect to see tools to harden existing Docker containers without
 expect to see tools to harden existing Docker containers without
 affecting Docker's core.
 affecting Docker's core.
 
 
-As of Docker 1.10 User Namespaces are supported directly by the docker 
-daemon. This feature allows for the root user in a container to be mapped 
+As of Docker 1.10 User Namespaces are supported directly by the docker
+daemon. This feature allows for the root user in a container to be mapped
 to a non uid-0 user outside the container, which can help to mitigate the
 to a non uid-0 user outside the container, which can help to mitigate the
 risks of container breakout. This facility is available but not enabled
 risks of container breakout. This facility is available but not enabled
-by default. 
+by default.
 
 
 Refer to the [daemon command](../reference/commandline/daemon.md#daemon-user-namespace-options)
 Refer to the [daemon command](../reference/commandline/daemon.md#daemon-user-namespace-options)
 in the command line reference for more information on this feature.
 in the command line reference for more information on this feature.

+ 99 - 99
docs/userguide/storagedriver/device-mapper-driver.md

@@ -51,46 +51,46 @@ Device Mapper technology works at the block level rather than the file level.
 This means that `devicemapper` storage driver's thin provisioning and
 This means that `devicemapper` storage driver's thin provisioning and
 copy-on-write operations work with blocks rather than entire files.
 copy-on-write operations work with blocks rather than entire files.
 
 
->**Note**: Snapshots are also referred to as *thin devices* or *virtual 
->devices*. They all mean the same thing in the context of the `devicemapper` 
+>**Note**: Snapshots are also referred to as *thin devices* or *virtual
+>devices*. They all mean the same thing in the context of the `devicemapper`
 >storage driver.
 >storage driver.
 
 
 With `devicemapper` the high level process for creating images is as follows:
 With `devicemapper` the high level process for creating images is as follows:
 
 
 1. The `devicemapper` storage driver creates a thin pool.
 1. The `devicemapper` storage driver creates a thin pool.
 
 
-    The pool is created from block devices or loop mounted sparse files (more 
+    The pool is created from block devices or loop mounted sparse files (more
 on this later).
 on this later).
 
 
 2. Next it creates a *base device*.
 2. Next it creates a *base device*.
 
 
-    A base device is a thin device with a filesystem. You can see which 
-filesystem is in use by running the `docker info` command and checking the 
+    A base device is a thin device with a filesystem. You can see which
+filesystem is in use by running the `docker info` command and checking the
 `Backing filesystem` value.
 `Backing filesystem` value.
 
 
 3. Each new image (and image layer) is a snapshot of this base device.
 3. Each new image (and image layer) is a snapshot of this base device.
 
 
-    These are thin provisioned copy-on-write snapshots. This means that they 
-are initially empty and only consume space from the pool when data is written 
+    These are thin provisioned copy-on-write snapshots. This means that they
+are initially empty and only consume space from the pool when data is written
 to them.
 to them.
 
 
-With `devicemapper`, container layers are snapshots of the image they are 
-created from. Just as with images, container snapshots are thin provisioned 
-copy-on-write snapshots. The container snapshot stores all updates to the 
-container. The `devicemapper` allocates space to them on-demand from the pool 
+With `devicemapper`, container layers are snapshots of the image they are
+created from. Just as with images, container snapshots are thin provisioned
+copy-on-write snapshots. The container snapshot stores all updates to the
+container. The `devicemapper` allocates space to them on-demand from the pool
 as and when data is written to the container.
 as and when data is written to the container.
 
 
-The high level diagram below shows a thin pool with a base device and two 
+The high level diagram below shows a thin pool with a base device and two
 images.
 images.
 
 
 ![](images/base_device.jpg)
 ![](images/base_device.jpg)
 
 
-If you look closely at the diagram you'll see that it's snapshots all the way 
+If you look closely at the diagram you'll see that it's snapshots all the way
 down. Each image layer is a snapshot of the layer below it. The lowest layer of
 down. Each image layer is a snapshot of the layer below it. The lowest layer of
- each image is a snapshot of the base device that exists in the pool. This 
+ each image is a snapshot of the base device that exists in the pool. This
 base device is a `Device Mapper` artifact and not a Docker image layer.
 base device is a `Device Mapper` artifact and not a Docker image layer.
 
 
-A container is a snapshot of the image it is created from. The diagram below 
+A container is a snapshot of the image it is created from. The diagram below
 shows two containers - one based on the Ubuntu image and the other based on the
 shows two containers - one based on the Ubuntu image and the other based on the
  Busybox image.
  Busybox image.
 
 
@@ -99,22 +99,22 @@ shows two containers - one based on the Ubuntu image and the other based on the
 
 
 ## Reads with the devicemapper
 ## Reads with the devicemapper
 
 
-Let's look at how reads and writes occur using the `devicemapper` storage 
-driver. The diagram below shows the high level process for reading a single 
+Let's look at how reads and writes occur using the `devicemapper` storage
+driver. The diagram below shows the high level process for reading a single
 block (`0x44f`) in an example container.
 block (`0x44f`) in an example container.
 
 
 ![](images/dm_container.jpg)
 ![](images/dm_container.jpg)
 
 
 1. An application makes a read request for block `0x44f` in the container.
 1. An application makes a read request for block `0x44f` in the container.
 
 
-    Because the container is a thin snapshot of an image it does not have the 
-data. Instead, it has a pointer (PTR) to where the data is stored in the image 
+    Because the container is a thin snapshot of an image it does not have the
+data. Instead, it has a pointer (PTR) to where the data is stored in the image
 snapshot lower down in the image stack.
 snapshot lower down in the image stack.
 
 
-2. The storage driver follows the pointer to block `0xf33` in the snapshot 
+2. The storage driver follows the pointer to block `0xf33` in the snapshot
 relating to image layer `a005...`.
 relating to image layer `a005...`.
 
 
-3. The `devicemapper` copies the contents of block `0xf33` from the image 
+3. The `devicemapper` copies the contents of block `0xf33` from the image
 snapshot to memory in the container.
 snapshot to memory in the container.
 
 
 4. The storage driver returns the data to the requesting application.
 4. The storage driver returns the data to the requesting application.
@@ -122,11 +122,11 @@ snapshot to memory in the container.
 ### Write examples
 ### Write examples
 
 
 With the `devicemapper` driver, writing new data to a container is accomplished
 With the `devicemapper` driver, writing new data to a container is accomplished
- by an *allocate-on-demand* operation. Updating existing data uses a 
-copy-on-write operation. Because Device Mapper is a block-based technology 
+ by an *allocate-on-demand* operation. Updating existing data uses a
+copy-on-write operation. Because Device Mapper is a block-based technology
 these operations occur at the block level.
 these operations occur at the block level.
 
 
-For example, when making a small change to a large file in a container, the 
+For example, when making a small change to a large file in a container, the
 `devicemapper` storage driver does not copy the entire file. It only copies the
 `devicemapper` storage driver does not copy the entire file. It only copies the
  blocks to be modified. Each block is 64KB.
  blocks to be modified. Each block is 64KB.
 
 
@@ -136,10 +136,10 @@ To write 56KB of new data to a container:
 
 
 1. An application makes a request to write 56KB of new data to the container.
 1. An application makes a request to write 56KB of new data to the container.
 
 
-2. The allocate-on-demand operation allocates a single new 64KB block to the 
+2. The allocate-on-demand operation allocates a single new 64KB block to the
 container's snapshot.
 container's snapshot.
 
 
-    If the write operation is larger than 64KB, multiple new blocks are 
+    If the write operation is larger than 64KB, multiple new blocks are
 allocated to the container's snapshot.
 allocated to the container's snapshot.
 
 
 3. The data is written to the newly allocated block.
 3. The data is written to the newly allocated block.
@@ -152,7 +152,7 @@ To modify existing data for the first time:
 
 
 2. A copy-on-write operation locates the blocks that need updating.
 2. A copy-on-write operation locates the blocks that need updating.
 
 
-3. The operation allocates new empty blocks to the container snapshot and 
+3. The operation allocates new empty blocks to the container snapshot and
 copies the data into those blocks.
 copies the data into those blocks.
 
 
 4. The modified data is written into the newly allocated blocks.
 4. The modified data is written into the newly allocated blocks.
@@ -164,18 +164,18 @@ to the application's read and write operations.
 ## Configuring Docker with Device Mapper
 ## Configuring Docker with Device Mapper
 
 
 The `devicemapper` is the default Docker storage driver on some Linux
 The `devicemapper` is the default Docker storage driver on some Linux
-distributions. This includes RHEL and most of its forks. Currently, the 
+distributions. This includes RHEL and most of its forks. Currently, the
 following distributions support the driver:
 following distributions support the driver:
 
 
 * RHEL/CentOS/Fedora
 * RHEL/CentOS/Fedora
-* Ubuntu 12.04          
-* Ubuntu 14.04          
-* Debian  
+* Ubuntu 12.04
+* Ubuntu 14.04
+* Debian
 
 
 Docker hosts running the `devicemapper` storage driver default to a
 Docker hosts running the `devicemapper` storage driver default to a
 configuration mode known as `loop-lvm`. This mode uses sparse files to build
 configuration mode known as `loop-lvm`. This mode uses sparse files to build
-the thin pool used by image and container snapshots. The mode is designed to 
-work out-of-the-box with no additional configuration. However, production 
+the thin pool used by image and container snapshots. The mode is designed to
+work out-of-the-box with no additional configuration. However, production
 deployments should not run under `loop-lvm` mode.
 deployments should not run under `loop-lvm` mode.
 
 
 You can detect the mode by viewing the `docker info` command:
 You can detect the mode by viewing the `docker info` command:
@@ -193,83 +193,83 @@ You can detect the mode by viewing the `docker info` command:
      Library Version: 1.02.93-RHEL7 (2015-01-28)
      Library Version: 1.02.93-RHEL7 (2015-01-28)
      ...
      ...
 
 
-The output above shows a Docker host running with the `devicemapper` storage 
-driver operating in `loop-lvm` mode. This is indicated by the fact that the 
-`Data loop file` and a `Metadata loop file` are on files under 
-`/var/lib/docker/devicemapper/devicemapper`. These are loopback mounted sparse 
+The output above shows a Docker host running with the `devicemapper` storage
+driver operating in `loop-lvm` mode. This is indicated by the fact that the
+`Data loop file` and a `Metadata loop file` are on files under
+`/var/lib/docker/devicemapper/devicemapper`. These are loopback mounted sparse
 files.
 files.
 
 
 ### Configure direct-lvm mode for production
 ### Configure direct-lvm mode for production
 
 
 The preferred configuration for production deployments is `direct lvm`. This
 The preferred configuration for production deployments is `direct lvm`. This
 mode uses block devices to create the thin pool. The following procedure shows
 mode uses block devices to create the thin pool. The following procedure shows
-you how to configure a Docker host to use the `devicemapper` storage driver in 
+you how to configure a Docker host to use the `devicemapper` storage driver in
 a `direct-lvm` configuration.
 a `direct-lvm` configuration.
 
 
-> **Caution:** If you have already run the Docker daemon on your Docker host 
-> and have images you want to keep, `push` them Docker Hub or your private 
+> **Caution:** If you have already run the Docker daemon on your Docker host
+> and have images you want to keep, `push` them Docker Hub or your private
 > Docker Trusted Registry before attempting this procedure.
 > Docker Trusted Registry before attempting this procedure.
 
 
-The procedure below will create a 90GB data volume and 4GB metadata volume to 
-use as backing for the storage pool. It assumes that you have a spare block 
-device at `/dev/xvdf` with enough free space to complete the task. The device 
-identifier and volume sizes may be be different in your environment and you 
-should substitute your own values throughout the procedure. The procedure also 
+The procedure below will create a 90GB data volume and 4GB metadata volume to
+use as backing for the storage pool. It assumes that you have a spare block
+device at `/dev/xvdf` with enough free space to complete the task. The device
+identifier and volume sizes may be be different in your environment and you
+should substitute your own values throughout the procedure. The procedure also
 assumes that the Docker daemon is in the `stopped` state.
 assumes that the Docker daemon is in the `stopped` state.
 
 
 1. Log in to the Docker host you want to configure and stop the Docker daemon.
 1. Log in to the Docker host you want to configure and stop the Docker daemon.
 
 
-2. If it exists, delete your existing image store by removing the 
+2. If it exists, delete your existing image store by removing the
 `/var/lib/docker` directory.
 `/var/lib/docker` directory.
 
 
         $ sudo rm -rf /var/lib/docker
         $ sudo rm -rf /var/lib/docker
 
 
-3. Create an LVM physical volume (PV) on your spare block device using the 
+3. Create an LVM physical volume (PV) on your spare block device using the
 `pvcreate` command.
 `pvcreate` command.
 
 
         $ sudo pvcreate /dev/xvdf
         $ sudo pvcreate /dev/xvdf
         Physical volume `/dev/xvdf` successfully created
         Physical volume `/dev/xvdf` successfully created
 
 
-    The device identifier may be different on your system. Remember to 
+    The device identifier may be different on your system. Remember to
 substitute your value in the command above.
 substitute your value in the command above.
 
 
-4. Create a new volume group (VG) called `vg-docker` using the PV created in 
+4. Create a new volume group (VG) called `vg-docker` using the PV created in
 the previous step.
 the previous step.
 
 
         $ sudo vgcreate vg-docker /dev/xvdf
         $ sudo vgcreate vg-docker /dev/xvdf
         Volume group `vg-docker` successfully created
         Volume group `vg-docker` successfully created
 
 
-5. Create a new 90GB logical volume (LV) called `data` from space in the 
+5. Create a new 90GB logical volume (LV) called `data` from space in the
 `vg-docker` volume group.
 `vg-docker` volume group.
 
 
         $ sudo lvcreate -L 90G -n data vg-docker
         $ sudo lvcreate -L 90G -n data vg-docker
         Logical volume `data` created.
         Logical volume `data` created.
 
 
-    The command creates an LVM logical volume called `data` and an associated 
-block device file at `/dev/vg-docker/data`. In a later step, you instruct the 
-`devicemapper` storage driver to use this block device to store image and 
+    The command creates an LVM logical volume called `data` and an associated
+block device file at `/dev/vg-docker/data`. In a later step, you instruct the
+`devicemapper` storage driver to use this block device to store image and
 container data.
 container data.
 
 
-    If you receive a signature detection warning, make sure you are working on 
-the correct devices before continuing. Signature warnings indicate that the 
-device you're working on is currently in use by LVM or has been used by LVM in 
+    If you receive a signature detection warning, make sure you are working on
+the correct devices before continuing. Signature warnings indicate that the
+device you're working on is currently in use by LVM or has been used by LVM in
 the past.
 the past.
 
 
-6. Create a new logical volume (LV) called `metadata` from space in the 
+6. Create a new logical volume (LV) called `metadata` from space in the
 `vg-docker` volume group.
 `vg-docker` volume group.
 
 
         $ sudo lvcreate -L 4G -n metadata vg-docker
         $ sudo lvcreate -L 4G -n metadata vg-docker
         Logical volume `metadata` created.
         Logical volume `metadata` created.
 
 
-    This creates an LVM logical volume called `metadata` and an associated 
-block device file at `/dev/vg-docker/metadata`. In the next step you instruct 
-the `devicemapper` storage driver to use this block device to store image and 
+    This creates an LVM logical volume called `metadata` and an associated
+block device file at `/dev/vg-docker/metadata`. In the next step you instruct
+the `devicemapper` storage driver to use this block device to store image and
 container metadata.
 container metadata.
 
 
-7. Start the Docker daemon with the `devicemapper` storage driver and the 
+7. Start the Docker daemon with the `devicemapper` storage driver and the
 `--storage-opt` flags.
 `--storage-opt` flags.
 
 
-    The `data` and `metadata` devices that you pass to the `--storage-opt` 
+    The `data` and `metadata` devices that you pass to the `--storage-opt`
 options were created in the previous steps.
 options were created in the previous steps.
 
 
           $ sudo docker daemon --storage-driver=devicemapper --storage-opt dm.datadev=/dev/vg-docker/data --storage-opt dm.metadatadev=/dev/vg-docker/metadata &
           $ sudo docker daemon --storage-driver=devicemapper --storage-opt dm.datadev=/dev/vg-docker/data --storage-opt dm.metadatadev=/dev/vg-docker/metadata &
@@ -279,13 +279,13 @@ options were created in the previous steps.
           INFO[0027] Option DefaultNetwork: bridge
           INFO[0027] Option DefaultNetwork: bridge
           <output truncated>
           <output truncated>
           INFO[0027] Daemon has completed initialization
           INFO[0027] Daemon has completed initialization
-          INFO[0027] Docker daemon commit=0a8c2e3 execdriver=native-0.2 graphdriver=devicemapper version=1.8.2
+          INFO[0027] Docker daemon commit=1b09a95-unsupported graphdriver=aufs version=1.11.0-dev
 
 
     It is also possible to set the `--storage-driver` and `--storage-opt` flags
     It is also possible to set the `--storage-driver` and `--storage-opt` flags
  in the Docker config file and start the daemon normally using the `service` or
  in the Docker config file and start the daemon normally using the `service` or
  `systemd` commands.
  `systemd` commands.
 
 
-8. Use the `docker info` command to verify that the daemon is using `data` and 
+8. Use the `docker info` command to verify that the daemon is using `data` and
 `metadata` devices you created.
 `metadata` devices you created.
 
 
         $ sudo docker info
         $ sudo docker info
@@ -301,12 +301,12 @@ options were created in the previous steps.
         [...]
         [...]
 
 
     The output of the command above shows the storage driver as `devicemapper`.
     The output of the command above shows the storage driver as `devicemapper`.
- The last two lines also confirm that the correct devices are being used for 
+ The last two lines also confirm that the correct devices are being used for
 the `Data file` and the `Metadata file`.
 the `Data file` and the `Metadata file`.
 
 
 ### Examine devicemapper structures on the host
 ### Examine devicemapper structures on the host
 
 
-You can use the `lsblk` command to see the device files created above and the 
+You can use the `lsblk` command to see the device files created above and the
 `pool` that the `devicemapper` storage driver creates on top of them.
 `pool` that the `devicemapper` storage driver creates on top of them.
 
 
     $ sudo lsblk
     $ sudo lsblk
@@ -319,7 +319,7 @@ You can use the `lsblk` command to see the device files created above and the
     └─vg--docker-metadata      253:1    0    4G  0 lvm
     └─vg--docker-metadata      253:1    0    4G  0 lvm
       └─docker-202:1-1032-pool 253:2    0   10G  0 dm
       └─docker-202:1-1032-pool 253:2    0   10G  0 dm
 
 
-The diagram below shows the image from prior examples updated with the detail 
+The diagram below shows the image from prior examples updated with the detail
 from the `lsblk` command above.
 from the `lsblk` command above.
 
 
 ![](http://farm1.staticflickr.com/703/22116692899_0471e5e160_b.jpg)
 ![](http://farm1.staticflickr.com/703/22116692899_0471e5e160_b.jpg)
@@ -335,73 +335,73 @@ Docker-MAJ:MIN-INO-pool
 `MAJ`, `MIN` and `INO` refer to the major and minor device numbers and inode.
 `MAJ`, `MIN` and `INO` refer to the major and minor device numbers and inode.
 
 
 Because Device Mapper operates at the block level it is more difficult to see
 Because Device Mapper operates at the block level it is more difficult to see
-diffs between image layers and containers. Docker 1.10 and later no longer 
-matches image layer IDs with directory names in `/var/lib/docker`.  However, 
+diffs between image layers and containers. Docker 1.10 and later no longer
+matches image layer IDs with directory names in `/var/lib/docker`.  However,
 there are two key directories. The `/var/lib/docker/devicemapper/mnt` directory
 there are two key directories. The `/var/lib/docker/devicemapper/mnt` directory
- contains the mount points for image and container layers. The 
-`/var/lib/docker/devicemapper/metadata`directory contains one file for every 
-image layer and container snapshot. The files contain metadata about each 
+ contains the mount points for image and container layers. The
+`/var/lib/docker/devicemapper/metadata`directory contains one file for every
+image layer and container snapshot. The files contain metadata about each
 snapshot in JSON format.
 snapshot in JSON format.
 
 
 ## Device Mapper and Docker performance
 ## Device Mapper and Docker performance
 
 
-It is important to understand the impact that allocate-on-demand and 
+It is important to understand the impact that allocate-on-demand and
 copy-on-write operations can have on overall container performance.
 copy-on-write operations can have on overall container performance.
 
 
 ### Allocate-on-demand performance impact
 ### Allocate-on-demand performance impact
 
 
-The `devicemapper` storage driver allocates new blocks to a container via an 
-allocate-on-demand operation. This means that each time an app writes to 
-somewhere new inside a container, one or more empty blocks has to be located 
+The `devicemapper` storage driver allocates new blocks to a container via an
+allocate-on-demand operation. This means that each time an app writes to
+somewhere new inside a container, one or more empty blocks has to be located
 from the pool and mapped into the container.
 from the pool and mapped into the container.
 
 
 All blocks are 64KB. A write that uses less than 64KB still results in a single
 All blocks are 64KB. A write that uses less than 64KB still results in a single
- 64KB block being allocated. Writing more than 64KB of data uses multiple 64KB 
-blocks. This can impact container performance, especially in containers that 
+ 64KB block being allocated. Writing more than 64KB of data uses multiple 64KB
+blocks. This can impact container performance, especially in containers that
 perform lots of small writes. However, once a block is allocated to a container
 perform lots of small writes. However, once a block is allocated to a container
  subsequent reads and writes can operate directly on that block.
  subsequent reads and writes can operate directly on that block.
 
 
 ### Copy-on-write performance impact
 ### Copy-on-write performance impact
 
 
-Each time a container updates existing data for the first time, the 
-`devicemapper` storage driver has to perform a copy-on-write operation. This 
-copies the data from the image snapshot to the container's snapshot. This 
+Each time a container updates existing data for the first time, the
+`devicemapper` storage driver has to perform a copy-on-write operation. This
+copies the data from the image snapshot to the container's snapshot. This
 process can have a noticeable impact on container performance.
 process can have a noticeable impact on container performance.
 
 
-All copy-on-write operations have a 64KB granularity. As a results, updating 
-32KB of a 1GB file causes the driver to copy a single 64KB block into the 
-container's snapshot. This has obvious performance advantages over file-level 
-copy-on-write operations which would require copying the entire 1GB file into 
+All copy-on-write operations have a 64KB granularity. As a results, updating
+32KB of a 1GB file causes the driver to copy a single 64KB block into the
+container's snapshot. This has obvious performance advantages over file-level
+copy-on-write operations which would require copying the entire 1GB file into
 the container layer.
 the container layer.
 
 
-In practice, however, containers that perform lots of small block writes 
+In practice, however, containers that perform lots of small block writes
 (<64KB) can perform worse with `devicemapper` than with AUFS.
 (<64KB) can perform worse with `devicemapper` than with AUFS.
 
 
 ### Other device mapper performance considerations
 ### Other device mapper performance considerations
 
 
-There are several other things that impact the performance of the 
+There are several other things that impact the performance of the
 `devicemapper` storage driver.
 `devicemapper` storage driver.
 
 
-- **The mode.** The default mode for Docker running the `devicemapper` storage 
-driver is `loop-lvm`. This mode uses sparse files and suffers from poor 
+- **The mode.** The default mode for Docker running the `devicemapper` storage
+driver is `loop-lvm`. This mode uses sparse files and suffers from poor
 performance. It is **not recommended for production**. The recommended mode for
 performance. It is **not recommended for production**. The recommended mode for
- production environments is `direct-lvm` where the storage driver writes 
+ production environments is `direct-lvm` where the storage driver writes
 directly to raw block devices.
 directly to raw block devices.
 
 
 - **High speed storage.** For best performance you should place the `Data file`
 - **High speed storage.** For best performance you should place the `Data file`
- and `Metadata file` on high speed storage such as SSD. This can be direct 
+ and `Metadata file` on high speed storage such as SSD. This can be direct
 attached storage or from a SAN or NAS array.
 attached storage or from a SAN or NAS array.
 
 
-- **Memory usage.** `devicemapper` is not the most memory efficient Docker 
-storage driver. Launching *n* copies of the same container loads *n* copies of 
-its files into memory. This can have a memory impact on your Docker host. As a 
-result, the `devicemapper` storage driver may not be the best choice for PaaS 
+- **Memory usage.** `devicemapper` is not the most memory efficient Docker
+storage driver. Launching *n* copies of the same container loads *n* copies of
+its files into memory. This can have a memory impact on your Docker host. As a
+result, the `devicemapper` storage driver may not be the best choice for PaaS
 and other high density use cases.
 and other high density use cases.
 
 
-One final point, data volumes provide the best and most predictable 
-performance. This is because they bypass the storage driver and do not incur 
-any of the potential overheads introduced by thin provisioning and 
-copy-on-write. For this reason, you should to place heavy write workloads on 
+One final point, data volumes provide the best and most predictable
+performance. This is because they bypass the storage driver and do not incur
+any of the potential overheads introduced by thin provisioning and
+copy-on-write. For this reason, you should to place heavy write workloads on
 data volumes.
 data volumes.
 
 
 ## Related Information
 ## Related Information

+ 1 - 2
integration-cli/docker_test_vars.go

@@ -21,8 +21,7 @@ var (
 
 
 	// TODO Windows CI. These are incorrect and need fixing into
 	// TODO Windows CI. These are incorrect and need fixing into
 	// platform specific pieces.
 	// platform specific pieces.
-	runtimePath    = "/var/run/docker"
-	execDriverPath = runtimePath + "/execdriver/native"
+	runtimePath = "/var/run/docker"
 
 
 	workingDirectory string
 	workingDirectory string
 
 

+ 6 - 6
man/docker-daemon.8.md

@@ -126,10 +126,10 @@ format.
   DNS search domains to use.
   DNS search domains to use.
 
 
 **--exec-opt**=[]
 **--exec-opt**=[]
-  Set exec driver options. See EXEC DRIVER OPTIONS.
+  Set runtime execution options. See RUNTIME EXECUTION OPTIONS.
 
 
 **--exec-root**=""
 **--exec-root**=""
-  Path to use as the root of the Docker exec driver. Default is `/var/run/docker`.
+  Path to use as the root of the Docker execution state files. Default is `/var/run/docker`.
 
 
 **--fixed-cidr**=""
 **--fixed-cidr**=""
   IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip)
   IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip)
@@ -289,13 +289,13 @@ will use more space for base images the larger the device
 is.
 is.
 
 
 The base device size can be increased at daemon restart which will allow
 The base device size can be increased at daemon restart which will allow
-all future images and containers (based on those new images) to be of the 
+all future images and containers (based on those new images) to be of the
 new base device size.
 new base device size.
 
 
-Example use: `docker daemon --storage-opt dm.basesize=50G` 
+Example use: `docker daemon --storage-opt dm.basesize=50G`
 
 
-This will increase the base device size to 50G. The Docker daemon will throw an 
-error if existing base device size is larger than 50G. A user can use 
+This will increase the base device size to 50G. The Docker daemon will throw an
+error if existing base device size is larger than 50G. A user can use
 this option to expand the base device size however shrinking is not permitted.
 this option to expand the base device size however shrinking is not permitted.
 
 
 This value affects the system-wide "base" empty filesystem that may already
 This value affects the system-wide "base" empty filesystem that may already

+ 1 - 2
man/docker-inspect.1.md

@@ -16,7 +16,7 @@ CONTAINER|IMAGE [CONTAINER|IMAGE...]
 
 
 This displays all the information available in Docker for a given
 This displays all the information available in Docker for a given
 container or image. By default, this will render all results in a JSON
 container or image. By default, this will render all results in a JSON
-array. If the container and image have the same name, this will return 
+array. If the container and image have the same name, this will return
 container JSON for unspecified type. If a format is specified, the given
 container JSON for unspecified type. If a format is specified, the given
 template will be executed for each result.
 template will be executed for each result.
 
 
@@ -110,7 +110,6 @@ To get information on a container use its ID or instance name:
     "Name": "/adoring_wozniak",
     "Name": "/adoring_wozniak",
     "RestartCount": 0,
     "RestartCount": 0,
     "Driver": "devicemapper",
     "Driver": "devicemapper",
-    "ExecDriver": "native-0.2",
     "MountLabel": "",
     "MountLabel": "",
     "ProcessLabel": "",
     "ProcessLabel": "",
     "Mounts": [
     "Mounts": [

+ 3 - 4
man/docker.1.md

@@ -224,15 +224,14 @@ inside it)
   See **docker-wait(1)** for full documentation on the **wait** command.
   See **docker-wait(1)** for full documentation on the **wait** command.
 
 
 
 
-# EXEC DRIVER OPTIONS
+# RUNTIME EXECUTION OPTIONS
 
 
 Use the **--exec-opt** flags to specify options to the execution driver.
 Use the **--exec-opt** flags to specify options to the execution driver.
 The following options are available:
 The following options are available:
 
 
 #### native.cgroupdriver
 #### native.cgroupdriver
-Specifies the management of the container's `cgroups`. You can specify 
-`cgroupfs` or `systemd`. If you specify `systemd` and it is not available, the 
-system uses `cgroupfs`.
+Specifies the management of the container's `cgroups`. Only `cgroupfs` can be specified
+`cgroupfs` at the moment.
 
 
 #### Client
 #### Client
 For specific client examples please see the man page for the specific Docker
 For specific client examples please see the man page for the specific Docker