Преглед изворни кода

Merge pull request #21705 from tiborvass/cherrypicks-1.11.0-rc3

Cherrypicks 1.11.0 rc3
Tibor Vass пре 9 година
родитељ
комит
2f69842afa
100 измењених фајлова са 1441 додато и 958 уклоњено
  1. 5 4
      Dockerfile
  2. 3 3
      Dockerfile.aarch64
  3. 3 3
      Dockerfile.armhf
  4. 3 2
      Dockerfile.gccgo
  5. 3 3
      Dockerfile.ppc64le
  6. 3 3
      Dockerfile.s390x
  7. 2 2
      Dockerfile.simple
  8. 2 1
      Makefile
  9. 1 1
      api/client/trust.go
  10. 3 4
      api/server/router/system/system_routes.go
  11. 11 0
      builder/dockerfile/builder.go
  12. 14 1
      builder/dockerfile/internals.go
  13. 1 0
      contrib/completion/bash/docker
  14. 1 1
      daemon/config_unix.go
  15. 1 1
      daemon/container_operations.go
  16. 5 4
      daemon/daemon.go
  17. 26 64
      daemon/daemon_linux.go
  18. 2 2
      daemon/daemon_linux_test.go
  19. 26 15
      daemon/daemon_unix.go
  20. 7 0
      daemon/graphdriver/aufs/aufs.go
  21. 29 12
      daemon/logger/journald/read.go
  22. 17 8
      daemon/oci_linux.go
  23. 4 2
      daemon/start.go
  24. 14 1
      distribution/pull_v1.go
  25. 13 1
      distribution/pull_v2.go
  26. 4 0
      distribution/xfer/download_test.go
  27. 4 0
      docker/daemon_unix.go
  28. 5 5
      docs/admin/systemd.md
  29. 0 11
      docs/deprecated.md
  30. 7 1
      docs/installation/linux/ubuntulinux.md
  31. 1 1
      docs/reference/api/docker_remote_api_v1.14.md
  32. 5 5
      docs/reference/api/docker_remote_api_v1.15.md
  33. 5 5
      docs/reference/api/docker_remote_api_v1.16.md
  34. 5 5
      docs/reference/api/docker_remote_api_v1.17.md
  35. 5 5
      docs/reference/api/docker_remote_api_v1.18.md
  36. 5 5
      docs/reference/api/docker_remote_api_v1.19.md
  37. 5 5
      docs/reference/api/docker_remote_api_v1.20.md
  38. 5 5
      docs/reference/api/docker_remote_api_v1.21.md
  39. 26 109
      docs/reference/api/docker_remote_api_v1.22.md
  40. 26 109
      docs/reference/api/docker_remote_api_v1.23.md
  41. 1 0
      docs/reference/commandline/build.md
  42. 9 7
      docs/reference/commandline/daemon.md
  43. 1 0
      docs/reference/commandline/network_create.md
  44. 2 0
      docs/reference/commandline/pull.md
  45. 2 0
      docs/reference/commandline/push.md
  46. 1 0
      docs/reference/commandline/volume_create.md
  47. 0 3
      docs/reference/run.md
  48. 1 1
      docs/userguide/containers/dockervolumes.md
  49. 1 1
      docs/userguide/networking/work-with-networks.md
  50. 1 1
      hack/make.sh
  51. 1 2
      hack/make/.build-deb/control
  52. 0 2
      hack/make/.build-deb/rules
  53. 0 7
      hack/make/.build-rpm/docker-engine.spec
  54. 1 1
      hack/make/binary
  55. 41 6
      hack/make/tgz
  56. 15 10
      hack/release.sh
  57. 3 3
      hack/vendor.sh
  58. 1 0
      integration-cli/daemon.go
  59. 26 8
      integration-cli/docker_cli_build_test.go
  60. 45 0
      integration-cli/docker_cli_daemon_experimental_test.go
  61. 39 0
      integration-cli/docker_cli_daemon_not_experimental_test.go
  62. 35 6
      integration-cli/docker_cli_daemon_test.go
  63. 15 0
      integration-cli/docker_utils.go
  64. 1 0
      layer/layer.go
  65. 24 2
      layer/layer_store.go
  66. 21 0
      layer/mounted_layer.go
  67. 7 19
      libcontainerd/client.go
  68. 2 0
      libcontainerd/client_shutdownrestore_linux.go
  69. 2 0
      libcontainerd/container_linux.go
  70. 41 6
      libcontainerd/remote_linux.go
  71. 4 4
      libcontainerd/remote_windows.go
  72. 4 0
      man/docker-build.1.md
  73. 5 1
      man/docker-daemon.8.md
  74. 4 0
      man/docker-network-create.1.md
  75. 2 0
      man/docker-pull.1.md
  76. 2 0
      man/docker-push.1.md
  77. 4 0
      man/docker-volume-create.1.md
  78. 3 2
      man/docker.1.md
  79. 0 1
      project/PACKAGERS.md
  80. 2 2
      registry/auth.go
  81. 7 43
      vendor/src/github.com/boltdb/bolt/Makefile
  82. 197 24
      vendor/src/github.com/boltdb/bolt/README.md
  83. 18 0
      vendor/src/github.com/boltdb/bolt/appveyor.yml
  84. 0 138
      vendor/src/github.com/boltdb/bolt/batch.go
  85. 0 2
      vendor/src/github.com/boltdb/bolt/bolt_linux.go
  86. 0 2
      vendor/src/github.com/boltdb/bolt/bolt_openbsd.go
  87. 9 0
      vendor/src/github.com/boltdb/bolt/bolt_ppc.go
  88. 9 0
      vendor/src/github.com/boltdb/bolt/bolt_ppc64.go
  89. 5 16
      vendor/src/github.com/boltdb/bolt/bolt_unix.go
  90. 6 17
      vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go
  91. 73 5
      vendor/src/github.com/boltdb/bolt/bolt_windows.go
  92. 0 2
      vendor/src/github.com/boltdb/bolt/boltsync_unix.go
  93. 2 1
      vendor/src/github.com/boltdb/bolt/bucket.go
  94. 35 19
      vendor/src/github.com/boltdb/bolt/cursor.go
  95. 211 10
      vendor/src/github.com/boltdb/bolt/db.go
  96. 0 37
      vendor/src/github.com/boltdb/bolt/node.go
  97. 64 18
      vendor/src/github.com/boltdb/bolt/tx.go
  98. 122 120
      vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go
  99. 1 0
      vendor/src/github.com/docker/containerd/api/grpc/types/api.proto
  100. 6 0
      vendor/src/github.com/docker/libnetwork/CHANGELOG.md

+ 5 - 4
Dockerfile

@@ -74,6 +74,7 @@ RUN apt-get update && apt-get install -y \
 	xfsprogs \
 	xfsprogs \
 	libzfs-dev \
 	libzfs-dev \
 	tar \
 	tar \
+	zip \
 	--no-install-recommends \
 	--no-install-recommends \
 	&& pip install awscli==1.10.15 \
 	&& pip install awscli==1.10.15 \
 	&& ln -snf /usr/bin/clang-3.8 /usr/local/bin/clang \
 	&& ln -snf /usr/bin/clang-3.8 /usr/local/bin/clang \
@@ -102,7 +103,7 @@ RUN set -x \
 	&& export OSXCROSS_PATH="/osxcross" \
 	&& export OSXCROSS_PATH="/osxcross" \
 	&& git clone https://github.com/tpoechtrager/osxcross.git $OSXCROSS_PATH \
 	&& git clone https://github.com/tpoechtrager/osxcross.git $OSXCROSS_PATH \
 	&& ( cd $OSXCROSS_PATH && git checkout -q $OSX_CROSS_COMMIT) \
 	&& ( cd $OSXCROSS_PATH && git checkout -q $OSX_CROSS_COMMIT) \
-	&& curl -sSL https://s3.dockerproject.org/darwin/${OSX_SDK}.tar.xz -o "${OSXCROSS_PATH}/tarballs/${OSX_SDK}.tar.xz" \
+	&& curl -sSL https://s3.dockerproject.org/darwin/v2/${OSX_SDK}.tar.xz -o "${OSXCROSS_PATH}/tarballs/${OSX_SDK}.tar.xz" \
 	&& UNATTENDED=yes OSX_VERSION_MIN=10.6 ${OSXCROSS_PATH}/build.sh
 	&& UNATTENDED=yes OSX_VERSION_MIN=10.6 ${OSXCROSS_PATH}/build.sh
 ENV PATH /osxcross/target/bin:$PATH
 ENV PATH /osxcross/target/bin:$PATH
 
 
@@ -202,7 +203,7 @@ RUN useradd --create-home --gid docker unprivilegeduser
 
 
 VOLUME /var/lib/docker
 VOLUME /var/lib/docker
 WORKDIR /go/src/github.com/docker/docker
 WORKDIR /go/src/github.com/docker/docker
-ENV DOCKER_BUILDTAGS apparmor seccomp selinux
+ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
 
 
 # Let us use a .bashrc file
 # Let us use a .bashrc file
 RUN ln -sfv $PWD/.bashrc ~/.bashrc
 RUN ln -sfv $PWD/.bashrc ~/.bashrc
@@ -247,7 +248,7 @@ RUN set -x \
 	&& rm -rf "$GOPATH"
 	&& rm -rf "$GOPATH"
 
 
 # Install runc
 # Install runc
-ENV RUNC_COMMIT d563bd134293c1026976a8f5764d5df5612f1dbf
+ENV RUNC_COMMIT 0c1c615ebd6a15545b6a82ead01d2745ea49b242
 RUN set -x \
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
 	&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
@@ -257,7 +258,7 @@ RUN set -x \
 	&& cp runc /usr/local/bin/docker-runc
 	&& cp runc /usr/local/bin/docker-runc
 
 
 # Install containerd
 # Install containerd
-ENV CONTAINERD_COMMIT c761085e92be09df9d5298f852c328b538f5dc2f
+ENV CONTAINERD_COMMIT 07c95162cdcead88dfe4ca0ffb3cea02375ec54d
 RUN set -x \
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
 	&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

+ 3 - 3
Dockerfile.aarch64

@@ -145,7 +145,7 @@ RUN useradd --create-home --gid docker unprivilegeduser
 
 
 VOLUME /var/lib/docker
 VOLUME /var/lib/docker
 WORKDIR /go/src/github.com/docker/docker
 WORKDIR /go/src/github.com/docker/docker
-ENV DOCKER_BUILDTAGS apparmor seccomp selinux
+ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
 
 
 # Let us use a .bashrc file
 # Let us use a .bashrc file
 RUN ln -sfv $PWD/.bashrc ~/.bashrc
 RUN ln -sfv $PWD/.bashrc ~/.bashrc
@@ -181,7 +181,7 @@ RUN set -x \
 	&& rm -rf "$GOPATH"
 	&& rm -rf "$GOPATH"
 
 
 # Install runc
 # Install runc
-ENV RUNC_COMMIT d563bd134293c1026976a8f5764d5df5612f1dbf
+ENV RUNC_COMMIT 0c1c615ebd6a15545b6a82ead01d2745ea49b242
 RUN set -x \
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
 	&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
@@ -191,7 +191,7 @@ RUN set -x \
 	&& cp runc /usr/local/bin/docker-runc
 	&& cp runc /usr/local/bin/docker-runc
 
 
 # Install containerd
 # Install containerd
-ENV CONTAINERD_COMMIT c761085e92be09df9d5298f852c328b538f5dc2f
+ENV CONTAINERD_COMMIT 07c95162cdcead88dfe4ca0ffb3cea02375ec54d
 RUN set -x \
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
 	&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

+ 3 - 3
Dockerfile.armhf

@@ -154,7 +154,7 @@ RUN useradd --create-home --gid docker unprivilegeduser
 
 
 VOLUME /var/lib/docker
 VOLUME /var/lib/docker
 WORKDIR /go/src/github.com/docker/docker
 WORKDIR /go/src/github.com/docker/docker
-ENV DOCKER_BUILDTAGS apparmor seccomp selinux
+ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
 
 
 # Let us use a .bashrc file
 # Let us use a .bashrc file
 RUN ln -sfv $PWD/.bashrc ~/.bashrc
 RUN ln -sfv $PWD/.bashrc ~/.bashrc
@@ -198,7 +198,7 @@ RUN set -x \
 	&& rm -rf "$GOPATH"
 	&& rm -rf "$GOPATH"
 
 
 # Install runc
 # Install runc
-ENV RUNC_COMMIT d563bd134293c1026976a8f5764d5df5612f1dbf
+ENV RUNC_COMMIT 0c1c615ebd6a15545b6a82ead01d2745ea49b242
 RUN set -x \
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
 	&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
@@ -208,7 +208,7 @@ RUN set -x \
 	&& cp runc /usr/local/bin/docker-runc
 	&& cp runc /usr/local/bin/docker-runc
 
 
 # Install containerd
 # Install containerd
-ENV CONTAINERD_COMMIT c761085e92be09df9d5298f852c328b538f5dc2f
+ENV CONTAINERD_COMMIT 07c95162cdcead88dfe4ca0ffb3cea02375ec54d
 RUN set -x \
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
 	&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

+ 3 - 2
Dockerfile.gccgo

@@ -74,17 +74,18 @@ WORKDIR /go/src/github.com/docker/docker
 ENV DOCKER_BUILDTAGS apparmor seccomp selinux
 ENV DOCKER_BUILDTAGS apparmor seccomp selinux
 
 
 # Install runc
 # Install runc
-ENV RUNC_COMMIT d563bd134293c1026976a8f5764d5df5612f1dbf
+ENV RUNC_COMMIT 0c1c615ebd6a15545b6a82ead01d2745ea49b242
 RUN set -x \
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
 	&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
 	&& cd "$GOPATH/src/github.com/opencontainers/runc" \
 	&& cd "$GOPATH/src/github.com/opencontainers/runc" \
 	&& git checkout -q "$RUNC_COMMIT" \
 	&& git checkout -q "$RUNC_COMMIT" \
+	&& (find . -name "*_ffjson.go" | xargs rm) \
 	&& make static BUILDTAGS="seccomp apparmor selinux" \
 	&& make static BUILDTAGS="seccomp apparmor selinux" \
 	&& cp runc /usr/local/bin/docker-runc
 	&& cp runc /usr/local/bin/docker-runc
 
 
 # Install containerd
 # Install containerd
-ENV CONTAINERD_COMMIT c761085e92be09df9d5298f852c328b538f5dc2f
+ENV CONTAINERD_COMMIT 07c95162cdcead88dfe4ca0ffb3cea02375ec54d
 RUN set -x \
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
 	&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

+ 3 - 3
Dockerfile.ppc64le

@@ -155,7 +155,7 @@ RUN useradd --create-home --gid docker unprivilegeduser
 
 
 VOLUME /var/lib/docker
 VOLUME /var/lib/docker
 WORKDIR /go/src/github.com/docker/docker
 WORKDIR /go/src/github.com/docker/docker
-ENV DOCKER_BUILDTAGS apparmor selinux
+ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux
 
 
 # Let us use a .bashrc file
 # Let us use a .bashrc file
 RUN ln -sfv $PWD/.bashrc ~/.bashrc
 RUN ln -sfv $PWD/.bashrc ~/.bashrc
@@ -199,7 +199,7 @@ RUN set -x \
 	&& rm -rf "$GOPATH"
 	&& rm -rf "$GOPATH"
 
 
 # Install runc
 # Install runc
-ENV RUNC_COMMIT d563bd134293c1026976a8f5764d5df5612f1dbf
+ENV RUNC_COMMIT 0c1c615ebd6a15545b6a82ead01d2745ea49b242
 RUN set -x \
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
 	&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
@@ -209,7 +209,7 @@ RUN set -x \
 	&& cp runc /usr/local/bin/docker-runc
 	&& cp runc /usr/local/bin/docker-runc
 
 
 # Install containerd
 # Install containerd
-ENV CONTAINERD_COMMIT c761085e92be09df9d5298f852c328b538f5dc2f
+ENV CONTAINERD_COMMIT 07c95162cdcead88dfe4ca0ffb3cea02375ec54d
 RUN set -x \
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
 	&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

+ 3 - 3
Dockerfile.s390x

@@ -134,7 +134,7 @@ RUN useradd --create-home --gid docker unprivilegeduser
 
 
 VOLUME /var/lib/docker
 VOLUME /var/lib/docker
 WORKDIR /go/src/github.com/docker/docker
 WORKDIR /go/src/github.com/docker/docker
-ENV DOCKER_BUILDTAGS apparmor selinux
+ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux
 
 
 # Let us use a .bashrc file
 # Let us use a .bashrc file
 RUN ln -sfv $PWD/.bashrc ~/.bashrc
 RUN ln -sfv $PWD/.bashrc ~/.bashrc
@@ -178,7 +178,7 @@ RUN set -x \
 	&& rm -rf "$GOPATH"
 	&& rm -rf "$GOPATH"
 
 
 # Install runc
 # Install runc
-ENV RUNC_COMMIT d563bd134293c1026976a8f5764d5df5612f1dbf
+ENV RUNC_COMMIT 0c1c615ebd6a15545b6a82ead01d2745ea49b242
 RUN set -x \
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
 	&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
@@ -188,7 +188,7 @@ RUN set -x \
 	&& cp runc /usr/local/bin/docker-runc
 	&& cp runc /usr/local/bin/docker-runc
 
 
 # Install containerd
 # Install containerd
-ENV CONTAINERD_COMMIT c761085e92be09df9d5298f852c328b538f5dc2f
+ENV CONTAINERD_COMMIT 07c95162cdcead88dfe4ca0ffb3cea02375ec54d
 RUN set -x \
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
 	&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

+ 2 - 2
Dockerfile.simple

@@ -30,7 +30,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
 	&& rm -rf /var/lib/apt/lists/*
 	&& rm -rf /var/lib/apt/lists/*
 
 
 # Install runc
 # Install runc
-ENV RUNC_COMMIT d563bd134293c1026976a8f5764d5df5612f1dbf
+ENV RUNC_COMMIT 0c1c615ebd6a15545b6a82ead01d2745ea49b242
 RUN set -x \
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
 	&& git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
@@ -40,7 +40,7 @@ RUN set -x \
 	&& cp runc /usr/local/bin/docker-runc
 	&& cp runc /usr/local/bin/docker-runc
 
 
 # Install containerd
 # Install containerd
-ENV CONTAINERD_COMMIT c761085e92be09df9d5298f852c328b538f5dc2f
+ENV CONTAINERD_COMMIT 07c95162cdcead88dfe4ca0ffb3cea02375ec54d
 RUN set -x \
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
 	&& git clone git://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \

+ 2 - 1
Makefile

@@ -69,10 +69,11 @@ bundles:
 cross: build
 cross: build
 	$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross
 	$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross
 
 
-
 win: build
 win: build
 	$(DOCKER_RUN_DOCKER) hack/make.sh win
 	$(DOCKER_RUN_DOCKER) hack/make.sh win
 
 
+tgz: build
+	$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross tgz
 
 
 deb: build
 deb: build
 	$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb
 	$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb

+ 1 - 1
api/client/trust.go

@@ -252,7 +252,7 @@ func (cli *DockerCli) trustedReference(ref reference.NamedTagged) (reference.Can
 	// Resolve the Auth config relevant for this server
 	// Resolve the Auth config relevant for this server
 	authConfig := cli.resolveAuthConfig(repoInfo.Index)
 	authConfig := cli.resolveAuthConfig(repoInfo.Index)
 
 
-	notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig)
+	notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig, "pull")
 	if err != nil {
 	if err != nil {
 		fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err)
 		fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err)
 		return nil, err
 		return nil, err

+ 3 - 4
api/server/router/system/system_routes.go

@@ -55,11 +55,10 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
 		return err
 		return err
 	}
 	}
 
 
-	timer := time.NewTimer(0)
-	timer.Stop()
+	var timeout <-chan time.Time
 	if until > 0 || untilNano > 0 {
 	if until > 0 || untilNano > 0 {
 		dur := time.Unix(until, untilNano).Sub(time.Now())
 		dur := time.Unix(until, untilNano).Sub(time.Now())
-		timer = time.NewTimer(dur)
+		timeout = time.NewTimer(dur).C
 	}
 	}
 
 
 	ef, err := filters.FromParam(r.Form.Get("filters"))
 	ef, err := filters.FromParam(r.Form.Get("filters"))
@@ -99,7 +98,7 @@ func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *
 			if err := enc.Encode(jev); err != nil {
 			if err := enc.Encode(jev); err != nil {
 				return err
 				return err
 			}
 			}
-		case <-timer.C:
+		case <-timeout:
 			return nil
 			return nil
 		case <-closeNotify:
 		case <-closeNotify:
 			logrus.Debug("Client disconnected, stop sending events")
 			logrus.Debug("Client disconnected, stop sending events")

+ 11 - 0
builder/dockerfile/builder.go

@@ -236,6 +236,17 @@ func (b *Builder) build(config *types.ImageBuildOptions, context builder.Context
 			}
 			}
 			return "", err
 			return "", err
 		}
 		}
+
+		// Commit the layer when there are only one children in
+		// the dockerfile, this is only the `FROM` tag, and
+		// build labels. Otherwise, the new image won't be
+		// labeled properly.
+		// Commit here, so the ID of the final image is reported
+		// properly.
+		if len(b.dockerfile.Children) == 1 && len(b.options.Labels) > 0 {
+			b.commit("", b.runConfig.Cmd, "")
+		}
+
 		shortImgID = stringid.TruncateID(b.image)
 		shortImgID = stringid.TruncateID(b.image)
 		fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID)
 		fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID)
 		if b.options.Remove {
 		if b.options.Remove {

+ 14 - 1
builder/dockerfile/internals.go

@@ -413,7 +413,20 @@ func (b *Builder) processImageFrom(img builder.Image) error {
 		b.image = img.ImageID()
 		b.image = img.ImageID()
 
 
 		if img.RunConfig() != nil {
 		if img.RunConfig() != nil {
-			b.runConfig = img.RunConfig()
+			imgConfig := *img.RunConfig()
+			// inherit runConfig labels from the current
+			// state if they've been set already.
+			// Ensures that images with only a FROM
+			// get the labels populated properly.
+			if b.runConfig.Labels != nil {
+				if imgConfig.Labels == nil {
+					imgConfig.Labels = make(map[string]string)
+				}
+				for k, v := range b.runConfig.Labels {
+					imgConfig.Labels[k] = v
+				}
+			}
+			b.runConfig = &imgConfig
 		}
 		}
 	}
 	}
 
 

+ 1 - 0
contrib/completion/bash/docker

@@ -887,6 +887,7 @@ _docker_daemon() {
 				dm.fs
 				dm.fs
 				dm.loopdatasize
 				dm.loopdatasize
 				dm.loopmetadatasize
 				dm.loopmetadatasize
+				dm.min_free_space
 				dm.mkfsarg
 				dm.mkfsarg
 				dm.mountopt
 				dm.mountopt
 				dm.override_udev_sync_check
 				dm.override_udev_sync_check

+ 1 - 1
daemon/config_unix.go

@@ -82,7 +82,7 @@ func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) strin
 	cmd.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", usageFn("Set CORS headers in the remote API"))
 	cmd.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", usageFn("Set CORS headers in the remote API"))
 	cmd.StringVar(&config.CgroupParent, []string{"-cgroup-parent"}, "", usageFn("Set parent cgroup for all containers"))
 	cmd.StringVar(&config.CgroupParent, []string{"-cgroup-parent"}, "", usageFn("Set parent cgroup for all containers"))
 	cmd.StringVar(&config.RemappedRoot, []string{"-userns-remap"}, "", usageFn("User/Group setting for user namespaces"))
 	cmd.StringVar(&config.RemappedRoot, []string{"-userns-remap"}, "", usageFn("User/Group setting for user namespaces"))
-	cmd.StringVar(&config.ContainerdAddr, []string{"-containerd"}, "", usageFn("Path to containerD socket"))
+	cmd.StringVar(&config.ContainerdAddr, []string{"-containerd"}, "", usageFn("Path to containerd socket"))
 
 
 	config.attachExperimentalFlags(cmd, usageFn)
 	config.attachExperimentalFlags(cmd, usageFn)
 }
 }

+ 1 - 1
daemon/container_operations.go

@@ -720,7 +720,7 @@ func (daemon *Daemon) releaseNetwork(container *container.Container) {
 
 
 	sb, err := daemon.netController.SandboxByID(sid)
 	sb, err := daemon.netController.SandboxByID(sid)
 	if err != nil {
 	if err != nil {
-		logrus.Errorf("error locating sandbox id %s: %v", sid, err)
+		logrus.Warnf("error locating sandbox id %s: %v", sid, err)
 		return
 		return
 	}
 	}
 
 

+ 5 - 4
daemon/daemon.go

@@ -293,6 +293,11 @@ func (daemon *Daemon) restore() error {
 		go func(c *container.Container) {
 		go func(c *container.Container) {
 			defer wg.Done()
 			defer wg.Done()
 			if c.IsRunning() || c.IsPaused() {
 			if c.IsRunning() || c.IsPaused() {
+				// Fix activityCount such that graph mounts can be unmounted later
+				if err := daemon.layerStore.ReinitRWLayer(c.RWLayer); err != nil {
+					logrus.Errorf("Failed to ReinitRWLayer for %s due to %s", c.ID, err)
+					return
+				}
 				if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(c.RestartManager(true))); err != nil {
 				if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(c.RestartManager(true))); err != nil {
 					logrus.Errorf("Failed to restore with containerd: %q", err)
 					logrus.Errorf("Failed to restore with containerd: %q", err)
 					return
 					return
@@ -304,10 +309,6 @@ func (daemon *Daemon) restore() error {
 				mapLock.Lock()
 				mapLock.Lock()
 				restartContainers[c] = make(chan struct{})
 				restartContainers[c] = make(chan struct{})
 				mapLock.Unlock()
 				mapLock.Unlock()
-			} else if !c.IsRunning() && !c.IsPaused() {
-				if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil {
-					daemon.cleanupMountsByID(mountid)
-				}
 			}
 			}
 
 
 			// if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated
 			// if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated

+ 26 - 64
daemon/daemon_linux.go

@@ -5,7 +5,7 @@ import (
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"os"
 	"os"
-	"path/filepath"
+	"regexp"
 	"strings"
 	"strings"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
@@ -28,91 +28,53 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u
 		return nil
 		return nil
 	}
 	}
 	var errors []string
 	var errors []string
-	mountRoot := ""
-	shmSuffix := "/" + id + "/shm"
-	mergedSuffix := "/" + id + "/merged"
+
+	regexps := getCleanPatterns(id)
 	sc := bufio.NewScanner(reader)
 	sc := bufio.NewScanner(reader)
 	for sc.Scan() {
 	for sc.Scan() {
-		line := sc.Text()
-		fields := strings.Fields(line)
-		if strings.HasPrefix(fields[4], daemon.root) {
-			logrus.Debugf("Mount base: %v", fields[4])
-			mnt := fields[4]
-			if strings.HasSuffix(mnt, shmSuffix) || strings.HasSuffix(mnt, mergedSuffix) {
-				logrus.Debugf("Unmounting %v", mnt)
-				if err := unmount(mnt); err != nil {
-					logrus.Error(err)
-					errors = append(errors, err.Error())
+		if fields := strings.Fields(sc.Text()); len(fields) >= 4 {
+			if mnt := fields[4]; strings.HasPrefix(mnt, daemon.root) {
+				for _, p := range regexps {
+					if p.MatchString(mnt) {
+						if err := unmount(mnt); err != nil {
+							logrus.Error(err)
+							errors = append(errors, err.Error())
+						}
+					}
 				}
 				}
-			} else if mountBase := filepath.Base(mnt); mountBase == id {
-				mountRoot = mnt
 			}
 			}
 		}
 		}
 	}
 	}
 
 
-	if mountRoot != "" {
-		logrus.Debugf("Unmounting %v", mountRoot)
-		if err := unmount(mountRoot); err != nil {
-			logrus.Error(err)
-			errors = append(errors, err.Error())
-		}
-	}
-
 	if err := sc.Err(); err != nil {
 	if err := sc.Err(); err != nil {
 		return err
 		return err
 	}
 	}
 
 
 	if len(errors) > 0 {
 	if len(errors) > 0 {
-		return fmt.Errorf("Error cleaningup mounts:\n%v", strings.Join(errors, "\n"))
+		return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n"))
 	}
 	}
 
 
-	logrus.Debugf("Cleaning up old container shm/mqueue/rootfs mounts: done.")
+	logrus.Debugf("Cleaning up old mountid %v: done.", id)
 	return nil
 	return nil
 }
 }
 
 
 // cleanupMounts umounts shm/mqueue mounts for old containers
 // cleanupMounts umounts shm/mqueue mounts for old containers
 func (daemon *Daemon) cleanupMounts() error {
 func (daemon *Daemon) cleanupMounts() error {
-	logrus.Debugf("Cleaning up old container shm/mqueue/rootfs mounts: start.")
-	f, err := os.Open("/proc/self/mountinfo")
-	if err != nil {
-		return err
-	}
-	defer f.Close()
-
-	return daemon.cleanupMountsFromReader(f, mount.Unmount)
+	return daemon.cleanupMountsByID("")
 }
 }
 
 
-func (daemon *Daemon) cleanupMountsFromReader(reader io.Reader, unmount func(target string) error) error {
-	if daemon.root == "" {
-		return nil
+func getCleanPatterns(id string) (regexps []*regexp.Regexp) {
+	var patterns []string
+	if id == "" {
+		id = "[0-9a-f]{64}"
+		patterns = append(patterns, "containers/"+id+"/shm")
 	}
 	}
-	sc := bufio.NewScanner(reader)
-	var errors []string
-	for sc.Scan() {
-		line := sc.Text()
-		fields := strings.Fields(line)
-		if strings.HasPrefix(fields[4], daemon.root) {
-			logrus.Debugf("Mount base: %v", fields[4])
-			mnt := fields[4]
-			mountBase := filepath.Base(mnt)
-			if mountBase == "shm" || mountBase == "merged" {
-				logrus.Debugf("Unmounting %v", mnt)
-				if err := unmount(mnt); err != nil {
-					logrus.Error(err)
-					errors = append(errors, err.Error())
-				}
-			}
+	patterns = append(patterns, "aufs/mnt/"+id+"$", "overlay/"+id+"/merged$", "zfs/graph/"+id+"$")
+	for _, p := range patterns {
+		r, err := regexp.Compile(p)
+		if err == nil {
+			regexps = append(regexps, r)
 		}
 		}
 	}
 	}
-
-	if err := sc.Err(); err != nil {
-		return err
-	}
-
-	if len(errors) > 0 {
-		return fmt.Errorf("Error cleaningup mounts:\n%v", strings.Join(errors, "\n"))
-	}
-
-	logrus.Debugf("Cleaning up old container shm/mqueue/rootfs mounts: done.")
-	return nil
+	return
 }
 }

+ 2 - 2
daemon/daemon_linux_test.go

@@ -59,7 +59,7 @@ func TestCleanupMounts(t *testing.T) {
 		return nil
 		return nil
 	}
 	}
 
 
-	d.cleanupMountsFromReader(strings.NewReader(mountsFixture), unmount)
+	d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "", unmount)
 
 
 	if unmounted != 1 {
 	if unmounted != 1 {
 		t.Fatalf("Expected to unmount the shm (and the shm only)")
 		t.Fatalf("Expected to unmount the shm (and the shm only)")
@@ -97,7 +97,7 @@ func TestNotCleanupMounts(t *testing.T) {
 		return nil
 		return nil
 	}
 	}
 	mountInfo := `234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k`
 	mountInfo := `234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k`
-	d.cleanupMountsFromReader(strings.NewReader(mountInfo), unmount)
+	d.cleanupMountsFromReaderByID(strings.NewReader(mountInfo), "", unmount)
 	if unmounted {
 	if unmounted {
 		t.Fatalf("Expected not to clean up /dev/shm")
 		t.Fatalf("Expected not to clean up /dev/shm")
 	}
 	}

+ 26 - 15
daemon/daemon_unix.go

@@ -466,28 +466,36 @@ func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysi
 func (daemon *Daemon) getCgroupDriver() string {
 func (daemon *Daemon) getCgroupDriver() string {
 	cgroupDriver := cgroupFsDriver
 	cgroupDriver := cgroupFsDriver
 
 
-	// No other cgroup drivers are supported at the moment. Warn the
-	// user if they tried to set one other than cgroupfs
-	for _, option := range daemon.configStore.ExecOptions {
+	if UsingSystemd(daemon.configStore) {
+		cgroupDriver = cgroupSystemdDriver
+	}
+	return cgroupDriver
+}
+
+// getCD gets the raw value of the native.cgroupdriver option, if set.
+func getCD(config *Config) string {
+	for _, option := range config.ExecOptions {
 		key, val, err := parsers.ParseKeyValueOpt(option)
 		key, val, err := parsers.ParseKeyValueOpt(option)
 		if err != nil || !strings.EqualFold(key, "native.cgroupdriver") {
 		if err != nil || !strings.EqualFold(key, "native.cgroupdriver") {
 			continue
 			continue
 		}
 		}
-		if val != cgroupFsDriver {
-			logrus.Warnf("cgroupdriver '%s' is not supported", val)
-		}
+		return val
 	}
 	}
-
-	return cgroupDriver
+	return ""
 }
 }
 
 
-func usingSystemd(config *Config) bool {
-	// No support for systemd cgroup atm
-	return false
+// VerifyCgroupDriver validates native.cgroupdriver
+func VerifyCgroupDriver(config *Config) error {
+	cd := getCD(config)
+	if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver {
+		return nil
+	}
+	return fmt.Errorf("native.cgroupdriver option %s not supported", cd)
 }
 }
 
 
-func (daemon *Daemon) usingSystemd() bool {
-	return daemon.getCgroupDriver() == cgroupSystemdDriver
+// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd
+func UsingSystemd(config *Config) bool {
+	return getCD(config) == cgroupSystemdDriver
 }
 }
 
 
 // verifyPlatformContainerSettings performs platform-specific validation of the
 // verifyPlatformContainerSettings performs platform-specific validation of the
@@ -533,7 +541,7 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.
 			return warnings, fmt.Errorf("Cannot use the --read-only option when user namespaces are enabled")
 			return warnings, fmt.Errorf("Cannot use the --read-only option when user namespaces are enabled")
 		}
 		}
 	}
 	}
-	if hostConfig.CgroupParent != "" && daemon.usingSystemd() {
+	if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) {
 		// CgroupParent for systemd cgroup should be named as "xxx.slice"
 		// CgroupParent for systemd cgroup should be named as "xxx.slice"
 		if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") {
 		if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") {
 			return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
 			return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
@@ -554,7 +562,10 @@ func verifyDaemonSettings(config *Config) error {
 	if !config.bridgeConfig.EnableIPTables && config.bridgeConfig.EnableIPMasq {
 	if !config.bridgeConfig.EnableIPTables && config.bridgeConfig.EnableIPMasq {
 		config.bridgeConfig.EnableIPMasq = false
 		config.bridgeConfig.EnableIPMasq = false
 	}
 	}
-	if config.CgroupParent != "" && usingSystemd(config) {
+	if err := VerifyCgroupDriver(config); err != nil {
+		return err
+	}
+	if config.CgroupParent != "" && UsingSystemd(config) {
 		if len(config.CgroupParent) <= 6 || !strings.HasSuffix(config.CgroupParent, ".slice") {
 		if len(config.CgroupParent) <= 6 || !strings.HasSuffix(config.CgroupParent, ".slice") {
 			return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
 			return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
 		}
 		}

+ 7 - 0
daemon/graphdriver/aufs/aufs.go

@@ -67,6 +67,7 @@ func init() {
 
 
 // Driver contains information about the filesystem mounted.
 // Driver contains information about the filesystem mounted.
 type Driver struct {
 type Driver struct {
+	sync.Mutex
 	root          string
 	root          string
 	uidMaps       []idtools.IDMap
 	uidMaps       []idtools.IDMap
 	gidMaps       []idtools.IDMap
 	gidMaps       []idtools.IDMap
@@ -418,6 +419,9 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
 }
 }
 
 
 func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
 func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
+	a.Lock()
+	defer a.Unlock()
+
 	// If the id is mounted or we get an error return
 	// If the id is mounted or we get an error return
 	if mounted, err := a.mounted(target); err != nil || mounted {
 	if mounted, err := a.mounted(target); err != nil || mounted {
 		return err
 		return err
@@ -432,6 +436,9 @@ func (a *Driver) mount(id string, target string, mountLabel string, layers []str
 }
 }
 
 
 func (a *Driver) unmount(mountPath string) error {
 func (a *Driver) unmount(mountPath string) error {
+	a.Lock()
+	defer a.Unlock()
+
 	if mounted, err := a.mounted(mountPath); err != nil || !mounted {
 	if mounted, err := a.mounted(mountPath); err != nil || !mounted {
 		return err
 		return err
 	}
 	}

+ 29 - 12
daemon/logger/journald/read.go

@@ -63,11 +63,11 @@ package journald
 //		fds[0].events = POLLHUP;
 //		fds[0].events = POLLHUP;
 //		fds[1].fd = sd_journal_get_fd(j);
 //		fds[1].fd = sd_journal_get_fd(j);
 //		if (fds[1].fd < 0) {
 //		if (fds[1].fd < 0) {
-//			return -1;
+//			return fds[1].fd;
 //		}
 //		}
 //		jevents = sd_journal_get_events(j);
 //		jevents = sd_journal_get_events(j);
 //		if (jevents < 0) {
 //		if (jevents < 0) {
-//			return -1;
+//			return jevents;
 //		}
 //		}
 //		fds[1].events = jevents;
 //		fds[1].events = jevents;
 //		sd_journal_get_timeout(j, &when);
 //		sd_journal_get_timeout(j, &when);
@@ -81,7 +81,7 @@ package journald
 //		i = poll(fds, 2, timeout);
 //		i = poll(fds, 2, timeout);
 //		if ((i == -1) && (errno != EINTR)) {
 //		if ((i == -1) && (errno != EINTR)) {
 //			/* An unexpected error. */
 //			/* An unexpected error. */
-//			return -1;
+//			return (errno != 0) ? -errno : -EINTR;
 //		}
 //		}
 //		if (fds[0].revents & POLLHUP) {
 //		if (fds[0].revents & POLLHUP) {
 //			/* The close notification pipe was closed. */
 //			/* The close notification pipe was closed. */
@@ -101,6 +101,7 @@ import (
 	"time"
 	"time"
 	"unsafe"
 	"unsafe"
 
 
+	"github.com/Sirupsen/logrus"
 	"github.com/coreos/go-systemd/journal"
 	"github.com/coreos/go-systemd/journal"
 	"github.com/docker/docker/daemon/logger"
 	"github.com/docker/docker/daemon/logger"
 )
 )
@@ -177,9 +178,18 @@ func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.Re
 	s.readers.readers[logWatcher] = logWatcher
 	s.readers.readers[logWatcher] = logWatcher
 	s.readers.mu.Unlock()
 	s.readers.mu.Unlock()
 	go func() {
 	go func() {
-		// Keep copying journal data out until we're notified to stop.
-		for C.wait_for_data_or_close(j, pfd[0]) == 1 {
+		// Keep copying journal data out until we're notified to stop
+		// or we hit an error.
+		status := C.wait_for_data_or_close(j, pfd[0])
+		for status == 1 {
 			cursor = s.drainJournal(logWatcher, config, j, cursor)
 			cursor = s.drainJournal(logWatcher, config, j, cursor)
+			status = C.wait_for_data_or_close(j, pfd[0])
+		}
+		if status < 0 {
+			cerrstr := C.strerror(C.int(-status))
+			errstr := C.GoString(cerrstr)
+			fmtstr := "error %q while attempting to follow journal for container %q"
+			logrus.Errorf(fmtstr, errstr, s.vars["CONTAINER_ID_FULL"])
 		}
 		}
 		// Clean up.
 		// Clean up.
 		C.close(pfd[0])
 		C.close(pfd[0])
@@ -293,14 +303,21 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon
 	}
 	}
 	cursor = s.drainJournal(logWatcher, config, j, "")
 	cursor = s.drainJournal(logWatcher, config, j, "")
 	if config.Follow {
 	if config.Follow {
-		// Create a pipe that we can poll at the same time as the journald descriptor.
-		if C.pipe(&pipes[0]) == C.int(-1) {
-			logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe")
+		// Allocate a descriptor for following the journal, if we'll
+		// need one.  Do it here so that we can report if it fails.
+		if fd := C.sd_journal_get_fd(j); fd < C.int(0) {
+			logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd)))
 		} else {
 		} else {
-			s.followJournal(logWatcher, config, j, pipes, cursor)
-			// Let followJournal handle freeing the journal context
-			// object and closing the channel.
-			following = true
+			// Create a pipe that we can poll at the same time as
+			// the journald descriptor.
+			if C.pipe(&pipes[0]) == C.int(-1) {
+				logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe")
+			} else {
+				s.followJournal(logWatcher, config, j, pipes, cursor)
+				// Let followJournal handle freeing the journal context
+				// object and closing the channel.
+				following = true
+			}
 		}
 		}
 	}
 	}
 	return
 	return

+ 17 - 8
daemon/oci_linux.go

@@ -8,6 +8,7 @@ import (
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 
 
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/daemon/caps"
 	"github.com/docker/docker/daemon/caps"
 	"github.com/docker/docker/libcontainerd"
 	"github.com/docker/docker/libcontainerd"
@@ -583,16 +584,24 @@ func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, e
 	}
 	}
 
 
 	var cgroupsPath string
 	var cgroupsPath string
+	scopePrefix := "docker"
+	parent := "/docker"
+	useSystemd := UsingSystemd(daemon.configStore)
+	if useSystemd {
+		parent = "system.slice"
+	}
+
 	if c.HostConfig.CgroupParent != "" {
 	if c.HostConfig.CgroupParent != "" {
-		cgroupsPath = filepath.Join(c.HostConfig.CgroupParent, c.ID)
+		parent = c.HostConfig.CgroupParent
+	} else if daemon.configStore.CgroupParent != "" {
+		parent = daemon.configStore.CgroupParent
+	}
+
+	if useSystemd {
+		cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID
+		logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath)
 	} else {
 	} else {
-		defaultCgroupParent := "/docker"
-		if daemon.configStore.CgroupParent != "" {
-			defaultCgroupParent = daemon.configStore.CgroupParent
-		} else if daemon.usingSystemd() {
-			defaultCgroupParent = "system.slice"
-		}
-		cgroupsPath = filepath.Join(defaultCgroupParent, c.ID)
+		cgroupsPath = filepath.Join(parent, c.ID)
 	}
 	}
 	s.Linux.CgroupsPath = &cgroupsPath
 	s.Linux.CgroupsPath = &cgroupsPath
 
 

+ 4 - 2
daemon/start.go

@@ -174,8 +174,10 @@ func (daemon *Daemon) Cleanup(container *container.Container) {
 		daemon.unregisterExecCommand(container, eConfig)
 		daemon.unregisterExecCommand(container, eConfig)
 	}
 	}
 
 
-	if err := container.UnmountVolumes(false, daemon.LogVolumeEvent); err != nil {
-		logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
+	if container.BaseFS != "" {
+		if err := container.UnmountVolumes(false, daemon.LogVolumeEvent); err != nil {
+			logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
+		}
 	}
 	}
 	container.CancelAttachContext()
 	container.CancelAttachContext()
 }
 }

+ 14 - 1
distribution/pull_v1.go

@@ -330,7 +330,20 @@ func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progre
 	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name())
 	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name())
 
 
 	ld.tmpFile.Seek(0, 0)
 	ld.tmpFile.Seek(0, 0)
-	return ld.tmpFile, ld.layerSize, nil
+
+	// hand off the temporary file to the download manager, so it will only
+	// be closed once
+	tmpFile := ld.tmpFile
+	ld.tmpFile = nil
+
+	return ioutils.NewReadCloserWrapper(tmpFile, func() error {
+		tmpFile.Close()
+		err := os.RemoveAll(tmpFile.Name())
+		if err != nil {
+			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
+		}
+		return err
+	}), ld.layerSize, nil
 }
 }
 
 
 func (ld *v1LayerDescriptor) Close() {
 func (ld *v1LayerDescriptor) Close() {

+ 13 - 1
distribution/pull_v2.go

@@ -278,7 +278,19 @@ func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progre
 		ld.verifier = nil
 		ld.verifier = nil
 		return nil, 0, xfer.DoNotRetry{Err: err}
 		return nil, 0, xfer.DoNotRetry{Err: err}
 	}
 	}
-	return tmpFile, size, nil
+
+	// hand off the temporary file to the download manager, so it will only
+	// be closed once
+	ld.tmpFile = nil
+
+	return ioutils.NewReadCloserWrapper(tmpFile, func() error {
+		tmpFile.Close()
+		err := os.RemoveAll(tmpFile.Name())
+		if err != nil {
+			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
+		}
+		return err
+	}), size, nil
 }
 }
 
 
 func (ld *v2LayerDescriptor) Close() {
 func (ld *v2LayerDescriptor) Close() {

+ 4 - 0
distribution/xfer/download_test.go

@@ -121,6 +121,10 @@ func (ls *mockLayerStore) GetMountID(string) (string, error) {
 	return "", errors.New("not implemented")
 	return "", errors.New("not implemented")
 }
 }
 
 
+func (ls *mockLayerStore) ReinitRWLayer(layer.RWLayer) error {
+	return errors.New("not implemented")
+}
+
 func (ls *mockLayerStore) Cleanup() error {
 func (ls *mockLayerStore) Cleanup() error {
 	return nil
 	return nil
 }
 }

+ 4 - 0
docker/daemon_unix.go

@@ -74,5 +74,9 @@ func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption {
 	} else {
 	} else {
 		opts = append(opts, libcontainerd.WithStartDaemon(true))
 		opts = append(opts, libcontainerd.WithStartDaemon(true))
 	}
 	}
+	if daemon.UsingSystemd(cli.Config) {
+		args := []string{"--systemd-cgroup=true"}
+		opts = append(opts, libcontainerd.WithRuntimeArgs(args))
+	}
 	return opts
 	return opts
 }
 }

+ 5 - 5
docs/admin/systemd.md

@@ -56,14 +56,14 @@ directory including the following:
 
 
 To check if the `docker.service` uses an `EnvironmentFile`:
 To check if the `docker.service` uses an `EnvironmentFile`:
 
 
-    $ sudo systemctl show docker | grep EnvironmentFile
+    $ systemctl show docker | grep EnvironmentFile
     EnvironmentFile=-/etc/sysconfig/docker (ignore_errors=yes)
     EnvironmentFile=-/etc/sysconfig/docker (ignore_errors=yes)
 
 
 Alternatively, find out where the service file is located:
 Alternatively, find out where the service file is located:
 
 
-    $ sudo systemctl status docker | grep Loaded
-       Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled)
-    $ sudo grep EnvironmentFile /usr/lib/systemd/system/docker.service
+    $ systemctl show --property=FragmentPath docker
+    FragmentPath=/usr/lib/systemd/system/docker.service
+    $ grep EnvironmentFile /usr/lib/systemd/system/docker.service
     EnvironmentFile=-/etc/sysconfig/docker
     EnvironmentFile=-/etc/sysconfig/docker
 
 
 You can customize the Docker daemon options using override files as explained in the
 You can customize the Docker daemon options using override files as explained in the
@@ -143,7 +143,7 @@ Flush changes:
 
 
 Verify that the configuration has been loaded:
 Verify that the configuration has been loaded:
 
 
-    $ sudo systemctl show docker --property Environment
+    $ systemctl show --property=Environment docker
     Environment=HTTP_PROXY=http://proxy.example.com:80/
     Environment=HTTP_PROXY=http://proxy.example.com:80/
 
 
 Restart Docker:
 Restart Docker:

+ 0 - 11
docs/deprecated.md

@@ -133,17 +133,6 @@ The following double-dash options are deprecated and have no replacement:
     docker ps --before-id
     docker ps --before-id
     docker search --trusted
     docker search --trusted
 
 
-### Auto-creating missing host paths for bind mounts
-**Deprecated in Release: v1.9**
-
-**Target for Removal in Release: 1.11**
-
-When creating a container with a bind-mounted volume-- `docker run -v /host/path:/container/path` --
-docker was automatically creating the `/host/path` if it didn't already exist.
-
-This auto-creation of the host path is deprecated and docker will error out if
-the path does not exist.
-
 ### Interacting with V1 registries
 ### Interacting with V1 registries
 
 
 Version 1.9 adds a flag (`--disable-legacy-registry=false`) which prevents the docker daemon from `pull`, `push`, and `login` operations against v1 registries.  Though disabled by default, this signals the intent to deprecate the v1 protocol.
 Version 1.9 adds a flag (`--disable-legacy-registry=false`) which prevents the docker daemon from `pull`, `push`, and `login` operations against v1 registries.  Though disabled by default, this signals the intent to deprecate the v1 protocol.

+ 7 - 1
docs/installation/linux/ubuntulinux.md

@@ -14,6 +14,7 @@ weight = -6
 
 
 Docker is supported on these Ubuntu operating systems:
 Docker is supported on these Ubuntu operating systems:
 
 
+- Ubuntu Xenial 16.04 (LTS)
 - Ubuntu Wily 15.10
 - Ubuntu Wily 15.10
 - Ubuntu Trusty 14.04 (LTS)
 - Ubuntu Trusty 14.04 (LTS)
 - Ubuntu Precise 12.04 (LTS)
 - Ubuntu Precise 12.04 (LTS)
@@ -85,6 +86,10 @@ packages from the new repository:
 
 
             deb https://apt.dockerproject.org/repo ubuntu-wily main
             deb https://apt.dockerproject.org/repo ubuntu-wily main
 
 
+    - Ubuntu Xenial 16.04 (LTS)
+
+            deb https://apt.dockerproject.org/repo ubuntu-xenial main
+
     > **Note**: Docker does not provide packages for all architectures. You can find
     > **Note**: Docker does not provide packages for all architectures. You can find
 	> nightly built binaries in https://master.dockerproject.org. To install docker on
 	> nightly built binaries in https://master.dockerproject.org. To install docker on
     > a multi-architecture system, add an `[arch=...]` clause to the entry. Refer to the
     > a multi-architecture system, add an `[arch=...]` clause to the entry. Refer to the
@@ -109,10 +114,11 @@ packages from the new repository:
 
 
 ### Prerequisites by Ubuntu Version
 ### Prerequisites by Ubuntu Version
 
 
+- Ubuntu Xenial 16.04 (LTS)
 - Ubuntu Wily 15.10
 - Ubuntu Wily 15.10
 - Ubuntu Trusty 14.04 (LTS)
 - Ubuntu Trusty 14.04 (LTS)
 
 
-For Ubuntu Trusty and Wily, it's recommended to install the
+For Ubuntu Trusty, Wily, and Xenial, it's recommended to install the
 `linux-image-extra` kernel package. The `linux-image-extra` package
 `linux-image-extra` kernel package. The `linux-image-extra` package
 allows you use the `aufs` storage driver.
 allows you use the `aufs` storage driver.
 
 

+ 1 - 1
docs/reference/api/docker_remote_api_v1.14.md

@@ -996,7 +996,7 @@ Tag the image `name` into a repository
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
+        HTTP/1.1 201 Created
 
 
 Query Parameters:
 Query Parameters:
 
 

+ 5 - 5
docs/reference/api/docker_remote_api_v1.15.md

@@ -1147,7 +1147,7 @@ Tag the image `name` into a repository
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
+        HTTP/1.1 201 Created
 
 
 Query Parameters:
 Query Parameters:
 
 
@@ -1644,7 +1644,7 @@ Sets up an exec instance in a running container `id`
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
+        HTTP/1.1 201 Created
         Content-Type: application/json
         Content-Type: application/json
 
 
         {
         {
@@ -1685,8 +1685,8 @@ interactive session with the `exec` command.
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
-        Content-Type: application/json
+        HTTP/1.1 200 OK
+        Content-Type: application/vnd.docker.raw-stream
 
 
         {{ STREAM }}
         {{ STREAM }}
 
 
@@ -1717,7 +1717,7 @@ This API is valid only if `tty` was specified as part of creating and starting t
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
+        HTTP/1.1 201 Created
         Content-Type: plain/text
         Content-Type: plain/text
 
 
 Query Parameters:
 Query Parameters:

+ 5 - 5
docs/reference/api/docker_remote_api_v1.16.md

@@ -1095,7 +1095,7 @@ Tag the image `name` into a repository
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
+        HTTP/1.1 201 Created
 
 
 Query Parameters:
 Query Parameters:
 
 
@@ -1606,7 +1606,7 @@ Sets up an exec instance in a running container `id`
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
+        HTTP/1.1 201 Created
         Content-Type: application/json
         Content-Type: application/json
 
 
         {
         {
@@ -1647,8 +1647,8 @@ interactive session with the `exec` command.
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
-        Content-Type: application/json
+        HTTP/1.1 200 OK
+        Content-Type: application/vnd.docker.raw-stream
 
 
         {{ STREAM }}
         {{ STREAM }}
 
 
@@ -1679,7 +1679,7 @@ This API is valid only if `tty` was specified as part of creating and starting t
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
+        HTTP/1.1 201 Created
         Content-Type: plain/text
         Content-Type: plain/text
 
 
 Query Parameters:
 Query Parameters:

+ 5 - 5
docs/reference/api/docker_remote_api_v1.17.md

@@ -1308,7 +1308,7 @@ Tag the image `name` into a repository
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
+        HTTP/1.1 201 Created
 
 
 Query Parameters:
 Query Parameters:
 
 
@@ -1769,7 +1769,7 @@ Sets up an exec instance in a running container `id`
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
+        HTTP/1.1 201 Created
         Content-Type: application/json
         Content-Type: application/json
 
 
         {
         {
@@ -1810,8 +1810,8 @@ interactive session with the `exec` command.
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
-        Content-Type: application/json
+        HTTP/1.1 200 OK
+        Content-Type: application/vnd.docker.raw-stream
 
 
         {{ STREAM }}
         {{ STREAM }}
 
 
@@ -1842,7 +1842,7 @@ This API is valid only if `tty` was specified as part of creating and starting t
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
+        HTTP/1.1 201 Created
         Content-Type: text/plain
         Content-Type: text/plain
 
 
 Query Parameters:
 Query Parameters:

+ 5 - 5
docs/reference/api/docker_remote_api_v1.18.md

@@ -1408,7 +1408,7 @@ Tag the image `name` into a repository
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
+        HTTP/1.1 201 Created
 
 
 Query Parameters:
 Query Parameters:
 
 
@@ -1887,7 +1887,7 @@ Sets up an exec instance in a running container `id`
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
+        HTTP/1.1 201 Created
         Content-Type: application/json
         Content-Type: application/json
 
 
         {
         {
@@ -1929,8 +1929,8 @@ interactive session with the `exec` command.
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
-        Content-Type: application/json
+        HTTP/1.1 200 OK
+        Content-Type: application/vnd.docker.raw-stream
 
 
         {{ STREAM }}
         {{ STREAM }}
 
 
@@ -1961,7 +1961,7 @@ This API is valid only if `tty` was specified as part of creating and starting t
 
 
 **Example response**:
 **Example response**:
 
 
-        HTTP/1.1 201 OK
+        HTTP/1.1 201 Created
         Content-Type: text/plain
         Content-Type: text/plain
 
 
 Query Parameters:
 Query Parameters:

+ 5 - 5
docs/reference/api/docker_remote_api_v1.19.md

@@ -1477,7 +1477,7 @@ Tag the image `name` into a repository
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
+    HTTP/1.1 201 Created
 
 
 Query Parameters:
 Query Parameters:
 
 
@@ -1970,7 +1970,7 @@ Sets up an exec instance in a running container `id`
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
+    HTTP/1.1 201 Created
     Content-Type: application/json
     Content-Type: application/json
 
 
     {
     {
@@ -2012,8 +2012,8 @@ interactive session with the `exec` command.
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
-    Content-Type: application/json
+    HTTP/1.1 200 OK
+    Content-Type: application/vnd.docker.raw-stream
 
 
     {{ STREAM }}
     {{ STREAM }}
 
 
@@ -2044,7 +2044,7 @@ This API is valid only if `tty` was specified as part of creating and starting t
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
+    HTTP/1.1 201 Created
     Content-Type: text/plain
     Content-Type: text/plain
 
 
 Query Parameters:
 Query Parameters:

+ 5 - 5
docs/reference/api/docker_remote_api_v1.20.md

@@ -1622,7 +1622,7 @@ Tag the image `name` into a repository
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
+    HTTP/1.1 201 Created
 
 
 Query Parameters:
 Query Parameters:
 
 
@@ -2116,7 +2116,7 @@ Sets up an exec instance in a running container `id`
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
+    HTTP/1.1 201 Created
     Content-Type: application/json
     Content-Type: application/json
 
 
     {
     {
@@ -2158,8 +2158,8 @@ interactive session with the `exec` command.
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
-    Content-Type: application/json
+    HTTP/1.1 200 OK
+    Content-Type: application/vnd.docker.raw-stream
 
 
     {{ STREAM }}
     {{ STREAM }}
 
 
@@ -2190,7 +2190,7 @@ This API is valid only if `tty` was specified as part of creating and starting t
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
+    HTTP/1.1 201 Created
     Content-Type: text/plain
     Content-Type: text/plain
 
 
 Query Parameters:
 Query Parameters:

+ 5 - 5
docs/reference/api/docker_remote_api_v1.21.md

@@ -1773,7 +1773,7 @@ Tag the image `name` into a repository
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
+    HTTP/1.1 201 Created
 
 
 Query Parameters:
 Query Parameters:
 
 
@@ -2270,7 +2270,7 @@ Sets up an exec instance in a running container `id`
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
+    HTTP/1.1 201 Created
     Content-Type: application/json
     Content-Type: application/json
 
 
     {
     {
@@ -2314,8 +2314,8 @@ interactive session with the `exec` command.
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
-    Content-Type: application/json
+    HTTP/1.1 200 OK
+    Content-Type: application/vnd.docker.raw-stream
 
 
     {{ STREAM }}
     {{ STREAM }}
 
 
@@ -2347,7 +2347,7 @@ This API is valid only if `tty` was specified as part of creating and starting t
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
+    HTTP/1.1 201 Created
     Content-Type: text/plain
     Content-Type: text/plain
 
 
 Query Parameters:
 Query Parameters:

+ 26 - 109
docs/reference/api/docker_remote_api_v1.22.md

@@ -1984,7 +1984,7 @@ Tag the image `name` into a repository
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
+    HTTP/1.1 201 Created
 
 
 Query Parameters:
 Query Parameters:
 
 
@@ -2547,7 +2547,7 @@ Sets up an exec instance in a running container `id`
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
+    HTTP/1.1 201 Created
     Content-Type: application/json
     Content-Type: application/json
 
 
     {
     {
@@ -2594,8 +2594,8 @@ interactive session with the `exec` command.
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
-    Content-Type: application/json
+    HTTP/1.1 200 OK
+    Content-Type: application/vnd.docker.raw-stream
 
 
     {{ STREAM }}
     {{ STREAM }}
 
 
@@ -2627,7 +2627,7 @@ This API is valid only if `tty` was specified as part of creating and starting t
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
+    HTTP/1.1 201 Created
     Content-Type: text/plain
     Content-Type: text/plain
 
 
 Query Parameters:
 Query Parameters:
@@ -2653,112 +2653,28 @@ Return low-level information about the `exec` command `id`.
 **Example response**:
 **Example response**:
 
 
     HTTP/1.1 200 OK
     HTTP/1.1 200 OK
-    Content-Type: plain/text
+    Content-Type: application/json
 
 
     {
     {
-      "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39",
-      "Running" : false,
-      "ExitCode" : 2,
-      "ProcessConfig" : {
-        "privileged" : false,
-        "user" : "",
-        "tty" : false,
-        "entrypoint" : "sh",
-        "arguments" : [
-          "-c",
-          "exit 2"
-        ]
-      },
-      "OpenStdin" : false,
-      "OpenStderr" : false,
-      "OpenStdout" : false,
-      "Container" : {
-        "State" : {
-          "Status" : "running",
-          "Running" : true,
-          "Paused" : false,
-          "Restarting" : false,
-          "OOMKilled" : false,
-          "Pid" : 3650,
-          "ExitCode" : 0,
-          "Error" : "",
-          "StartedAt" : "2014-11-17T22:26:03.717657531Z",
-          "FinishedAt" : "0001-01-01T00:00:00Z"
-        },
-        "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c",
-        "Created" : "2014-11-17T22:26:03.626304998Z",
-        "Path" : "date",
-        "Args" : [],
-        "Config" : {
-          "Hostname" : "8f177a186b97",
-          "Domainname" : "",
-          "User" : "",
-          "AttachStdin" : false,
-          "AttachStdout" : false,
-          "AttachStderr" : false,
-          "ExposedPorts" : null,
-          "Tty" : false,
-          "OpenStdin" : false,
-          "StdinOnce" : false,
-          "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ],
-          "Cmd" : [
-            "date"
-          ],
-          "Image" : "ubuntu",
-          "Volumes" : null,
-          "WorkingDir" : "",
-          "Entrypoint" : null,
-          "NetworkDisabled" : false,
-          "MacAddress" : "",
-          "OnBuild" : null,
-          "SecurityOpt" : null
-        },
-        "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5",
-        "NetworkSettings": {
-            "Bridge": "",
-            "SandboxID": "",
-            "HairpinMode": false,
-            "LinkLocalIPv6Address": "",
-            "LinkLocalIPv6PrefixLen": 0,
-            "Ports": null,
-            "SandboxKey": "",
-            "SecondaryIPAddresses": null,
-            "SecondaryIPv6Addresses": null,
-            "EndpointID": "",
-            "Gateway": "",
-            "GlobalIPv6Address": "",
-            "GlobalIPv6PrefixLen": 0,
-            "IPAddress": "",
-            "IPPrefixLen": 0,
-            "IPv6Gateway": "",
-            "MacAddress": "",
-            "Networks": {
-                "bridge": {
-                    "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812",
-                    "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d",
-                    "Gateway": "172.17.0.1",
-                    "IPAddress": "172.17.0.2",
-                    "IPPrefixLen": 16,
-                    "IPv6Gateway": "",
-                    "GlobalIPv6Address": "",
-                    "GlobalIPv6PrefixLen": 0,
-                    "MacAddress": "02:42:ac:12:00:02"
-                }
-            }
+        "CanRemove": false,
+        "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126",
+        "DetachKeys": "",
+        "ExitCode": 2,
+        "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b",
+        "OpenStderr": true,
+        "OpenStdin": true,
+        "OpenStdout": true,
+        "ProcessConfig": {
+            "arguments": [
+                "-c",
+                "exit 2"
+            ],
+            "entrypoint": "sh",
+            "privileged": false,
+            "tty": true,
+            "user": "1000"
         },
         },
-        "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf",
-        "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname",
-        "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts",
-        "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log",
-        "Name" : "/test",
-        "Driver" : "aufs",
-        "ExecDriver" : "native-0.2",
-        "MountLabel" : "",
-        "ProcessLabel" : "",
-        "AppArmorProfile" : "",
-        "RestartCount" : 0,
-        "Mounts" : []
-      }
+        "Running": false
     }
     }
 
 
 Status Codes:
 Status Codes:
@@ -2789,7 +2705,8 @@ Status Codes:
           "Driver": "local",
           "Driver": "local",
           "Mountpoint": "/var/lib/docker/volumes/tardis"
           "Mountpoint": "/var/lib/docker/volumes/tardis"
         }
         }
-      ]
+      ],
+      "Warnings": []
     }
     }
 
 
 Query Parameters:
 Query Parameters:

+ 26 - 109
docs/reference/api/docker_remote_api_v1.23.md

@@ -2020,7 +2020,7 @@ Tag the image `name` into a repository
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
+    HTTP/1.1 201 Created
 
 
 Query Parameters:
 Query Parameters:
 
 
@@ -2590,7 +2590,7 @@ Sets up an exec instance in a running container `id`
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
+    HTTP/1.1 201 Created
     Content-Type: application/json
     Content-Type: application/json
 
 
     {
     {
@@ -2637,8 +2637,8 @@ interactive session with the `exec` command.
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
-    Content-Type: application/json
+    HTTP/1.1 200 OK
+    Content-Type: vnd.docker.raw-stream
 
 
     {{ STREAM }}
     {{ STREAM }}
 
 
@@ -2670,7 +2670,7 @@ This API is valid only if `tty` was specified as part of creating and starting t
 
 
 **Example response**:
 **Example response**:
 
 
-    HTTP/1.1 201 OK
+    HTTP/1.1 201 Created
     Content-Type: text/plain
     Content-Type: text/plain
 
 
 Query Parameters:
 Query Parameters:
@@ -2696,112 +2696,28 @@ Return low-level information about the `exec` command `id`.
 **Example response**:
 **Example response**:
 
 
     HTTP/1.1 200 OK
     HTTP/1.1 200 OK
-    Content-Type: plain/text
+    Content-Type: application/json
 
 
     {
     {
-      "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39",
-      "Running" : false,
-      "ExitCode" : 2,
-      "ProcessConfig" : {
-        "privileged" : false,
-        "user" : "",
-        "tty" : false,
-        "entrypoint" : "sh",
-        "arguments" : [
-          "-c",
-          "exit 2"
-        ]
-      },
-      "OpenStdin" : false,
-      "OpenStderr" : false,
-      "OpenStdout" : false,
-      "Container" : {
-        "State" : {
-          "Status" : "running",
-          "Running" : true,
-          "Paused" : false,
-          "Restarting" : false,
-          "OOMKilled" : false,
-          "Pid" : 3650,
-          "ExitCode" : 0,
-          "Error" : "",
-          "StartedAt" : "2014-11-17T22:26:03.717657531Z",
-          "FinishedAt" : "0001-01-01T00:00:00Z"
-        },
-        "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c",
-        "Created" : "2014-11-17T22:26:03.626304998Z",
-        "Path" : "date",
-        "Args" : [],
-        "Config" : {
-          "Hostname" : "8f177a186b97",
-          "Domainname" : "",
-          "User" : "",
-          "AttachStdin" : false,
-          "AttachStdout" : false,
-          "AttachStderr" : false,
-          "ExposedPorts" : null,
-          "Tty" : false,
-          "OpenStdin" : false,
-          "StdinOnce" : false,
-          "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ],
-          "Cmd" : [
-            "date"
-          ],
-          "Image" : "ubuntu",
-          "Volumes" : null,
-          "WorkingDir" : "",
-          "Entrypoint" : null,
-          "NetworkDisabled" : false,
-          "MacAddress" : "",
-          "OnBuild" : null,
-          "SecurityOpt" : null
-        },
-        "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5",
-        "NetworkSettings": {
-            "Bridge": "",
-            "SandboxID": "",
-            "HairpinMode": false,
-            "LinkLocalIPv6Address": "",
-            "LinkLocalIPv6PrefixLen": 0,
-            "Ports": null,
-            "SandboxKey": "",
-            "SecondaryIPAddresses": null,
-            "SecondaryIPv6Addresses": null,
-            "EndpointID": "",
-            "Gateway": "",
-            "GlobalIPv6Address": "",
-            "GlobalIPv6PrefixLen": 0,
-            "IPAddress": "",
-            "IPPrefixLen": 0,
-            "IPv6Gateway": "",
-            "MacAddress": "",
-            "Networks": {
-                "bridge": {
-                    "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812",
-                    "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d",
-                    "Gateway": "172.17.0.1",
-                    "IPAddress": "172.17.0.2",
-                    "IPPrefixLen": 16,
-                    "IPv6Gateway": "",
-                    "GlobalIPv6Address": "",
-                    "GlobalIPv6PrefixLen": 0,
-                    "MacAddress": "02:42:ac:12:00:02"
-                }
-            }
+        "CanRemove": false,
+        "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126",
+        "DetachKeys": "",
+        "ExitCode": 2,
+        "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b",
+        "OpenStderr": true,
+        "OpenStdin": true,
+        "OpenStdout": true,
+        "ProcessConfig": {
+            "arguments": [
+                "-c",
+                "exit 2"
+            ],
+            "entrypoint": "sh",
+            "privileged": false,
+            "tty": true,
+            "user": "1000"
         },
         },
-        "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf",
-        "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname",
-        "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts",
-        "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log",
-        "Name" : "/test",
-        "Driver" : "aufs",
-        "ExecDriver" : "native-0.2",
-        "MountLabel" : "",
-        "ProcessLabel" : "",
-        "AppArmorProfile" : "",
-        "RestartCount" : 0,
-        "Mounts" : []
-      }
+        "Running": false
     }
     }
 
 
 Status Codes:
 Status Codes:
@@ -2832,7 +2748,8 @@ Status Codes:
           "Driver": "local",
           "Driver": "local",
           "Mountpoint": "/var/lib/docker/volumes/tardis"
           "Mountpoint": "/var/lib/docker/volumes/tardis"
         }
         }
-      ]
+      ],
+      "Warnings": []
     }
     }
 
 
 Query Parameters:
 Query Parameters:

+ 1 - 0
docs/reference/commandline/build.md

@@ -26,6 +26,7 @@ parent = "smn_cli"
       --force-rm                      Always remove intermediate containers
       --force-rm                      Always remove intermediate containers
       --help                          Print usage
       --help                          Print usage
       --isolation=""                  Container isolation technology
       --isolation=""                  Container isolation technology
+      --label=[]                      Set metadata for an image
       -m, --memory=""                 Memory limit for all build containers
       -m, --memory=""                 Memory limit for all build containers
       --memory-swap=""                A positive integer equal to memory plus swap. Specify -1 to enable unlimited swap.
       --memory-swap=""                A positive integer equal to memory plus swap. Specify -1 to enable unlimited swap.
       --no-cache                      Do not use cache when building the image
       --no-cache                      Do not use cache when building the image

+ 9 - 7
docs/reference/commandline/daemon.md

@@ -21,13 +21,14 @@ weight = -1
       -b, --bridge=""                        Attach containers to a network bridge
       -b, --bridge=""                        Attach containers to a network bridge
       --bip=""                               Specify network bridge IP
       --bip=""                               Specify network bridge IP
       --cgroup-parent=                       Set parent cgroup for all containers
       --cgroup-parent=                       Set parent cgroup for all containers
-      -D, --debug                            Enable debug mode
-      --default-gateway=""                   Container default gateway IPv4 address
-      --default-gateway-v6=""                Container default gateway IPv6 address
       --cluster-store=""                     URL of the distributed storage backend
       --cluster-store=""                     URL of the distributed storage backend
       --cluster-advertise=""                 Address of the daemon instance on the cluster
       --cluster-advertise=""                 Address of the daemon instance on the cluster
       --cluster-store-opt=map[]              Set cluster options
       --cluster-store-opt=map[]              Set cluster options
       --config-file=/etc/docker/daemon.json  Daemon configuration file
       --config-file=/etc/docker/daemon.json  Daemon configuration file
+      --containerd                           Path to containerd socket
+      -D, --debug                            Enable debug mode
+      --default-gateway=""                   Container default gateway IPv4 address
+      --default-gateway-v6=""                Container default gateway IPv6 address
       --dns=[]                               DNS server to use
       --dns=[]                               DNS server to use
       --dns-opt=[]                           DNS options to use
       --dns-opt=[]                           DNS options to use
       --dns-search=[]                        DNS search domains to use
       --dns-search=[]                        DNS search domains to use
@@ -462,7 +463,7 @@ options for `zfs` start with `zfs`.
 
 
     Example use:
     Example use:
 
 
-        $ docker daemon --storage-opt dm.min_free_space_percent=10%
+        $ docker daemon --storage-opt dm.min_free_space=10%
 
 
 Currently supported options of `zfs`:
 Currently supported options of `zfs`:
 
 
@@ -490,12 +491,13 @@ with the `--exec-opt` flag. All the flag's options have the `native` prefix. A
 single `native.cgroupdriver` option is available.
 single `native.cgroupdriver` option is available.
 
 
 The `native.cgroupdriver` option specifies the management of the container's
 The `native.cgroupdriver` option specifies the management of the container's
-cgroups. You can specify only specify `cgroupfs` at the moment.  If you omit the
+cgroups. You can specify only specify `cgroupfs` or `systemd`. If you specify
+`systemd` and it is not available, the system errors out. If you omit the
 `native.cgroupdriver` option,` cgroupfs` is used.
 `native.cgroupdriver` option,` cgroupfs` is used.
 
 
-This example explicitely sets the `cgroupdriver` to `cgroupfs`:
+This example sets the `cgroupdriver` to `systemd`:
 
 
-    $ sudo docker daemon --exec-opt native.cgroupdriver=cgroupfs
+    $ sudo docker daemon --exec-opt native.cgroupdriver=systemd
 
 
 Setting this option applies to all containers the daemon launches.
 Setting this option applies to all containers the daemon launches.
 
 

+ 1 - 0
docs/reference/commandline/network_create.md

@@ -23,6 +23,7 @@ parent = "smn_cli"
     --ipam-driver=default    IP Address Management Driver
     --ipam-driver=default    IP Address Management Driver
     --ipam-opt=map[]         Set custom IPAM driver specific options
     --ipam-opt=map[]         Set custom IPAM driver specific options
     --ipv6                   Enable IPv6 networking
     --ipv6                   Enable IPv6 networking
+    --label=[]               Set metadata on a network
     -o --opt=map[]           Set custom driver specific options
     -o --opt=map[]           Set custom driver specific options
     --subnet=[]              Subnet in CIDR format that represents a network segment
     --subnet=[]              Subnet in CIDR format that represents a network segment
 
 

+ 2 - 0
docs/reference/commandline/pull.md

@@ -165,6 +165,8 @@ listening on port 5000 (`myregistry.local:5000`):
 $ docker pull myregistry.local:5000/testing/test-image
 $ docker pull myregistry.local:5000/testing/test-image
 ```
 ```
 
 
+Registry credentials are managed by [docker login](login.md).
+
 Docker uses the `https://` protocol to communicate with a registry, unless the
 Docker uses the `https://` protocol to communicate with a registry, unless the
 registry is allowed to be accessed over an insecure connection. Refer to the
 registry is allowed to be accessed over an insecure connection. Refer to the
 [insecure registries](daemon.md#insecure-registries) section for more information.
 [insecure registries](daemon.md#insecure-registries) section for more information.

+ 2 - 0
docs/reference/commandline/push.md

@@ -22,3 +22,5 @@ registry or to a self-hosted one.
 
 
 Killing the `docker push` process, for example by pressing `CTRL-c` while it is
 Killing the `docker push` process, for example by pressing `CTRL-c` while it is
 running in a terminal, will terminate the push operation.
 running in a terminal, will terminate the push operation.
+
+Registry credentials are managed by [docker login](login.md).

+ 1 - 0
docs/reference/commandline/volume_create.md

@@ -16,6 +16,7 @@ parent = "smn_cli"
 
 
       -d, --driver=local    Specify volume driver name
       -d, --driver=local    Specify volume driver name
       --help                Print usage
       --help                Print usage
+      --label=[]            Set metadata for a volume
       --name=               Specify volume name
       --name=               Specify volume name
       -o, --opt=map[]       Set driver specific options
       -o, --opt=map[]       Set driver specific options
 
 

+ 0 - 3
docs/reference/run.md

@@ -1414,9 +1414,6 @@ The example below mounts an empty tmpfs into the container with the `rw`,
 
 
     --volumes-from="": Mount all volumes from the given container(s)
     --volumes-from="": Mount all volumes from the given container(s)
 
 
-> **Note**:
-> The auto-creation of the host path has been [*deprecated*](../deprecated.md#auto-creating-missing-host-paths-for-bind-mounts).
-
 > **Note**:
 > **Note**:
 > When using systemd to manage the Docker daemon's start and stop, in the systemd
 > When using systemd to manage the Docker daemon's start and stop, in the systemd
 > unit file there is an option to control mount propagation for the Docker daemon
 > unit file there is an option to control mount propagation for the Docker daemon

+ 1 - 1
docs/userguide/containers/dockervolumes.md

@@ -144,7 +144,7 @@ Mounting a host directory can be useful for testing. For example, you can mount
 source code inside a container. Then, change the source code and see its effect
 source code inside a container. Then, change the source code and see its effect
 on the application in real time. The directory on the host must be specified as
 on the application in real time. The directory on the host must be specified as
 an absolute path and if the directory doesn't exist the Engine daemon automatically
 an absolute path and if the directory doesn't exist the Engine daemon automatically
-creates it for you.  This auto-creation of the host path has been [*deprecated*](#auto-creating-missing-host-paths-for-bind-mounts).
+creates it for you.
 
 
 Docker volumes default to mount in read-write mode, but you can also set it to
 Docker volumes default to mount in read-write mode, but you can also set it to
 be mounted read-only.
 be mounted read-only.

+ 1 - 1
docs/userguide/networking/work-with-networks.md

@@ -87,7 +87,7 @@ specify a single subnet. An `overlay` network supports multiple subnets.
 > in your infrastructure that is not managed by docker. Such overlaps can cause
 > in your infrastructure that is not managed by docker. Such overlaps can cause
 > connectivity issues or failures when containers are connected to that network.
 > connectivity issues or failures when containers are connected to that network.
 
 
-In addition to the `--subnetwork` option, you also specify the `--gateway` `--ip-range` and `--aux-address` options.
+In addition to the `--subnet` option, you also specify the `--gateway` `--ip-range` and `--aux-address` options.
 
 
 ```bash
 ```bash
 $ docker network create -d overlay
 $ docker network create -d overlay

+ 1 - 1
hack/make.sh

@@ -121,7 +121,7 @@ fi
 if [ "$DOCKER_EXPERIMENTAL" ]; then
 if [ "$DOCKER_EXPERIMENTAL" ]; then
 	echo >&2 '# WARNING! DOCKER_EXPERIMENTAL is set: building experimental features'
 	echo >&2 '# WARNING! DOCKER_EXPERIMENTAL is set: building experimental features'
 	echo >&2
 	echo >&2
-	DOCKER_BUILDTAGS+=" experimental pkcs11"
+	DOCKER_BUILDTAGS+=" experimental"
 fi
 fi
 
 
 if [ -z "$DOCKER_CLIENTONLY" ]; then
 if [ -z "$DOCKER_CLIENTONLY" ]; then

+ 1 - 2
hack/make/.build-deb/control

@@ -15,8 +15,7 @@ Recommends: aufs-tools,
             cgroupfs-mount | cgroup-lite,
             cgroupfs-mount | cgroup-lite,
             git,
             git,
             xz-utils,
             xz-utils,
-            ${apparmor:Recommends},
-            ${yubico:Recommends}
+            ${apparmor:Recommends}
 Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package, docker-engine-cs
 Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package, docker-engine-cs
 Description: Docker: the open-source application container engine
 Description: Docker: the open-source application container engine
  Docker is an open source project to build, ship and run any application as a
  Docker is an open source project to build, ship and run any application as a

+ 0 - 2
hack/make/.build-deb/rules

@@ -5,8 +5,6 @@ VERSION = $(shell cat VERSION)
 override_dh_gencontrol:
 override_dh_gencontrol:
 	# if we're on Ubuntu, we need to Recommends: apparmor
 	# if we're on Ubuntu, we need to Recommends: apparmor
 	echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars
 	echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars
-	# if we are building experimental we recommend yubico-piv-tool
-	echo 'yubico:Recommends=$(shell [ "$DOCKER_EXPERIMENTAL" ] && echo "yubico-piv-tool (>= 1.1.0~)")' >> debian/docker-engine.substvars
 	dh_gencontrol
 	dh_gencontrol
 
 
 override_dh_auto_build:
 override_dh_auto_build:

+ 0 - 7
hack/make/.build-rpm/docker-engine.spec

@@ -60,13 +60,6 @@ Requires: device-mapper >= 1.02.90-2
 %global with_selinux 1
 %global with_selinux 1
 %endif
 %endif
 
 
-%if 0%{?_experimental}
-# yubico-piv-tool conditional
-%if 0%{?fedora} >= 20 || 0%{?centos} >= 7 || 0%{?rhel} >= 7
-Requires: yubico-piv-tool >= 1.1.0
-%endif
-%endif
-
 # start if with_selinux
 # start if with_selinux
 %if 0%{?with_selinux}
 %if 0%{?with_selinux}
 # Version of SELinux we were using
 # Version of SELinux we were using

+ 1 - 1
hack/make/binary

@@ -36,7 +36,7 @@ if [ "$(go env GOOS)" == "linux" ] ; then
 	esac
 	esac
 fi
 fi
 
 
-if [ "$IAMSTATIC" == "true" ] && [ "$(go env GOHOSTOS)" == "linux" ] && [ "$DOCKER_EXPERIMENTAL" ]; then
+if [ "$IAMSTATIC" == "true" ] && [ "$(go env GOHOSTOS)" == "linux" ]; then
 	if  [ "${GOOS}/${GOARCH}" == "darwin/amd64" ]; then
 	if  [ "${GOOS}/${GOARCH}" == "darwin/amd64" ]; then
 		export CGO_ENABLED=1
 		export CGO_ENABLED=1
 		export CC=o64-clang
 		export CC=o64-clang

+ 41 - 6
hack/make/tgz

@@ -15,19 +15,54 @@ for d in "$CROSS/"*/*; do
 	export GOOS="$(basename "$(dirname "$d")")"
 	export GOOS="$(basename "$(dirname "$d")")"
 	BINARY_NAME="docker-$VERSION"
 	BINARY_NAME="docker-$VERSION"
 	BINARY_EXTENSION="$(export GOOS && binary_extension)"
 	BINARY_EXTENSION="$(export GOOS && binary_extension)"
+	if [ "$GOOS" = 'windows' ]; then
+		# if windows use a zip, not tgz
+		BUNDLE_EXTENSION=".zip"
+		IS_TAR="false"
+	else
+		BUNDLE_EXTENSION=".tgz"
+		IS_TAR="true"
+	fi
 	BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
 	BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
 	mkdir -p "$DEST/$GOOS/$GOARCH"
 	mkdir -p "$DEST/$GOOS/$GOARCH"
-	TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME.tgz"
+	TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME$BUNDLE_EXTENSION"
 
 
-	mkdir -p "$DEST/build/usr/local/bin"
-	cp -L "$d/$BINARY_FULLNAME" "$DEST/build/usr/local/bin/docker$BINARY_EXTENSION"
-	copy_containerd "$DEST/build/usr/local/bin/"
+	# The staging directory for the files in the tgz
+	BUILD_PATH="$DEST/build"
 
 
-	tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr
+	# The directory that is at the root of the tar file
+	TAR_BASE_DIRECTORY="docker"
+
+	# $DEST/build/docker
+	TAR_PATH="$BUILD_PATH/$TAR_BASE_DIRECTORY"
+
+	# Copy the correct docker binary
+	mkdir -p $TAR_PATH
+	cp -L "$d/$BINARY_FULLNAME" "$TAR_PATH/docker$BINARY_EXTENSION"
+
+	# copy over all the containerd binaries
+	copy_containerd $TAR_PATH
+
+	if [ "$IS_TAR" == "true" ]; then
+		echo "Creating tgz from $BUILD_PATH and naming it $TGZ"
+		tar --numeric-owner --owner 0 -C "$BUILD_PATH" -czf "$TGZ" $TAR_BASE_DIRECTORY
+	else
+		# ZIP needs to full absolute dir path, not the absolute path
+		ZIP=`pwd`"/$TGZ"
+		# keep track of where we are, for later.
+		pushd .
+		# go into the BUILD_PATH since zip does not have a -C equivalent.
+		cd $BUILD_PATH
+		echo "Creating zip from $BUILD_PATH and naming it $ZIP"
+		zip -q -r $ZIP $TAR_BASE_DIRECTORY
+		# go back to where we started
+		popd
+	fi
 
 
 	hash_files "$TGZ"
 	hash_files "$TGZ"
 
 
-	rm -rf "$DEST/build"
+	# cleanup after ourselves
+	rm -rf "$BUILD_PATH"
 
 
 	echo "Created tgz: $TGZ"
 	echo "Created tgz: $TGZ"
 done
 done

+ 15 - 10
hack/release.sh

@@ -181,7 +181,9 @@ release_build() {
 	binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
 	binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
 	tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
 	tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
 	binary=docker-$VERSION
 	binary=docker-$VERSION
-	tgz=docker-$VERSION.tgz
+	zipExt=".tgz"
+	binaryExt=""
+	tgz=$binary$zipExt
 
 
 	latestBase=
 	latestBase=
 	if [ -z "$NOLATEST" ]; then
 	if [ -z "$NOLATEST" ]; then
@@ -204,11 +206,12 @@ release_build() {
 			s3Os=Linux
 			s3Os=Linux
 			;;
 			;;
 		windows)
 		windows)
+			# this is windows use the .zip and .exe extentions for the files.
 			s3Os=Windows
 			s3Os=Windows
-			binary+='.exe'
-			if [ "$latestBase" ]; then
-				latestBase+='.exe'
-			fi
+			zipExt=".zip"
+			binaryExt=".exe"
+			tgz=$binary$zipExt
+			binary+=$binaryExt
 			;;
 			;;
 		*)
 		*)
 			echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
 			echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
@@ -235,11 +238,13 @@ release_build() {
 	esac
 	esac
 
 
 	s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch"
 	s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch"
-	latest=
+	# latest=
 	latestTgz=
 	latestTgz=
 	if [ "$latestBase" ]; then
 	if [ "$latestBase" ]; then
-		latest="$s3Dir/$latestBase"
-		latestTgz="$s3Dir/$latestBase.tgz"
+		# commented out since we aren't uploading binaries right now.
+		# latest="$s3Dir/$latestBase$binaryExt"
+		# we don't include the $binaryExt because we don't want docker.exe.zip
+		latestTgz="$s3Dir/$latestBase$zipExt"
 	fi
 	fi
 
 
 	if [ ! -f "$tgzDir/$tgz" ]; then
 	if [ ! -f "$tgzDir/$tgz" ]; then
@@ -308,6 +313,6 @@ echo "We have just pushed $VERSION to $(s3_url). You can download it with the fo
 echo
 echo
 echo "Darwin/OSX 64bit client tgz: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION.tgz"
 echo "Darwin/OSX 64bit client tgz: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION.tgz"
 echo "Linux 64bit tgz: $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz"
 echo "Linux 64bit tgz: $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz"
-echo "Windows 64bit client tgz: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.tgz"
-echo "Windows 32bit client tgz: $(s3_url)/builds/Windows/i386/docker-$VERSION.tgz"
+echo "Windows 64bit client tgz: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.zip"
+echo "Windows 32bit client tgz: $(s3_url)/builds/Windows/i386/docker-$VERSION.zip"
 echo
 echo

+ 3 - 3
hack/vendor.sh

@@ -29,7 +29,7 @@ clone git github.com/RackSec/srslog 259aed10dfa74ea2961eddd1d9847619f6e98837
 clone git github.com/imdario/mergo 0.2.1
 clone git github.com/imdario/mergo 0.2.1
 
 
 #get libnetwork packages
 #get libnetwork packages
-clone git github.com/docker/libnetwork v0.7.0-dev.10
+clone git github.com/docker/libnetwork v0.7.0-rc.1
 clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
 clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
 clone git github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
 clone git github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
 clone git github.com/hashicorp/memberlist 9a1e242e454d2443df330bdd51a436d5a9058fc4
 clone git github.com/hashicorp/memberlist 9a1e242e454d2443df330bdd51a436d5a9058fc4
@@ -44,7 +44,7 @@ clone git github.com/coreos/etcd v2.2.0
 fix_rewritten_imports github.com/coreos/etcd
 fix_rewritten_imports github.com/coreos/etcd
 clone git github.com/ugorji/go 5abd4e96a45c386928ed2ca2a7ef63e2533e18ec
 clone git github.com/ugorji/go 5abd4e96a45c386928ed2ca2a7ef63e2533e18ec
 clone git github.com/hashicorp/consul v0.5.2
 clone git github.com/hashicorp/consul v0.5.2
-clone git github.com/boltdb/bolt v1.1.0
+clone git github.com/boltdb/bolt v1.2.0
 clone git github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
 clone git github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
 
 
 # get graph and distribution packages
 # get graph and distribution packages
@@ -89,5 +89,5 @@ clone git google.golang.org/api dc6d2353af16e2a2b0ff6986af051d473a4ed468 https:/
 clone git google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 https://code.googlesource.com/gocloud
 clone git google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 https://code.googlesource.com/gocloud
 
 
 # containerd
 # containerd
-clone git github.com/docker/containerd 142e22a4dce86f3b8ce068a0b043489d21976bb8
+clone git github.com/docker/containerd 07c95162cdcead88dfe4ca0ffb3cea02375ec54d
 clean
 clean

+ 1 - 0
integration-cli/daemon.go

@@ -144,6 +144,7 @@ func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error {
 		d.Command,
 		d.Command,
 		"--containerd", "/var/run/docker/libcontainerd/docker-containerd.sock",
 		"--containerd", "/var/run/docker/libcontainerd/docker-containerd.sock",
 		"--graph", d.root,
 		"--graph", d.root,
+		"--exec-root", filepath.Join(d.folder, "exec-root"),
 		"--pidfile", fmt.Sprintf("%s/docker.pid", d.folder),
 		"--pidfile", fmt.Sprintf("%s/docker.pid", d.folder),
 		fmt.Sprintf("--userland-proxy=%t", d.userlandProxy),
 		fmt.Sprintf("--userland-proxy=%t", d.userlandProxy),
 	)
 	)

+ 26 - 8
integration-cli/docker_cli_build_test.go

@@ -6680,11 +6680,9 @@ func (s *DockerSuite) TestBuildLabel(c *check.C) {
 	_, err := buildImage(name, `
 	_, err := buildImage(name, `
   FROM `+minimalBaseImage()+`
   FROM `+minimalBaseImage()+`
   LABEL default foo
   LABEL default foo
-`, false, []string{"--label", testLabel}...)
+`, false, "--label", testLabel)
 
 
-	if err != nil {
-		c.Fatal("error building image with labels", err)
-	}
+	c.Assert(err, checker.IsNil)
 
 
 	res := inspectFieldJSON(c, name, "Config.Labels")
 	res := inspectFieldJSON(c, name, "Config.Labels")
 
 
@@ -6699,6 +6697,28 @@ func (s *DockerSuite) TestBuildLabel(c *check.C) {
 	}
 	}
 }
 }
 
 
+func (s *DockerSuite) TestBuildLabelOneNode(c *check.C) {
+	name := "testbuildlabel"
+
+	_, err := buildImage(name, "FROM busybox", false, "--label", "foo=bar")
+
+	c.Assert(err, checker.IsNil)
+
+	res, err := inspectImage(name, "json .Config.Labels")
+	c.Assert(err, checker.IsNil)
+	var labels map[string]string
+
+	if err := json.Unmarshal([]byte(res), &labels); err != nil {
+		c.Fatal(err)
+	}
+
+	v, ok := labels["foo"]
+	if !ok {
+		c.Fatal("label `foo` not found in image")
+	}
+	c.Assert(v, checker.Equals, "bar")
+}
+
 func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) {
 func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) {
 	name := "testbuildlabelcachecommit"
 	name := "testbuildlabelcachecommit"
 	testLabel := "foo"
 	testLabel := "foo"
@@ -6713,11 +6733,9 @@ func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) {
 	_, err := buildImage(name, `
 	_, err := buildImage(name, `
   FROM `+minimalBaseImage()+`
   FROM `+minimalBaseImage()+`
   LABEL default foo
   LABEL default foo
-`, true, []string{"--label", testLabel}...)
+`, true, "--label", testLabel)
 
 
-	if err != nil {
-		c.Fatal("error building image with labels", err)
-	}
+	c.Assert(err, checker.IsNil)
 
 
 	res := inspectFieldJSON(c, name, "Config.Labels")
 	res := inspectFieldJSON(c, name, "Config.Labels")
 
 

+ 45 - 0
integration-cli/docker_cli_daemon_experimental_test.go

@@ -3,6 +3,8 @@
 package main
 package main
 
 
 import (
 import (
+	"io/ioutil"
+	"os"
 	"os/exec"
 	"os/exec"
 	"strings"
 	"strings"
 	"time"
 	"time"
@@ -56,6 +58,49 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *check
 
 
 }
 }
 
 
+// os.Kill should kill daemon ungracefully, leaving behind live containers.
+// The live containers should be known to the restarted daemon. Stopping
+// them now, should remove the mounts.
+func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *check.C) {
+	testRequires(c, DaemonIsLinux)
+	c.Assert(s.d.StartWithBusybox(), check.IsNil)
+
+	out, err := s.d.Cmd("run", "-d", "busybox", "top")
+	c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
+	id := strings.TrimSpace(out)
+
+	c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil)
+	mountOut, err := ioutil.ReadFile("/proc/self/mountinfo")
+	c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
+
+	// container mounts should exist even after daemon has crashed.
+	comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut)
+	c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment)
+
+	// restart daemon.
+	if err := s.d.Restart(); err != nil {
+		c.Fatal(err)
+	}
+
+	// container should be running.
+	out, err = s.d.Cmd("inspect", "--format='{{.State.Running}}'", id)
+	c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
+	out = strings.TrimSpace(out)
+	if out != "true" {
+		c.Fatalf("Container %s expected to stay alive after daemon restart", id)
+	}
+
+	// 'docker stop' should work.
+	out, err = s.d.Cmd("stop", id)
+	c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
+
+	// Now, container mounts should be gone.
+	mountOut, err = ioutil.ReadFile("/proc/self/mountinfo")
+	c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
+	comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut)
+	c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment)
+}
+
 // TestDaemonRestartWithPausedRunningContainer requires live restore of running containers
 // TestDaemonRestartWithPausedRunningContainer requires live restore of running containers
 func (s *DockerDaemonSuite) TestDaemonRestartWithPausedRunningContainer(t *check.C) {
 func (s *DockerDaemonSuite) TestDaemonRestartWithPausedRunningContainer(t *check.C) {
 	if err := s.d.StartWithBusybox(); err != nil {
 	if err := s.d.StartWithBusybox(); err != nil {

+ 39 - 0
integration-cli/docker_cli_daemon_not_experimental_test.go

@@ -0,0 +1,39 @@
+// +build daemon,!windows,!experimental
+
+package main
+
+import (
+	"io/ioutil"
+	"os"
+	"strings"
+
+	"github.com/go-check/check"
+)
+
+// os.Kill should kill daemon ungracefully, leaving behind container mounts.
+// A subsequent daemon restart shoud clean up said mounts.
+func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonKill(c *check.C) {
+	c.Assert(s.d.StartWithBusybox(), check.IsNil)
+
+	out, err := s.d.Cmd("run", "-d", "busybox", "top")
+	c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
+	id := strings.TrimSpace(out)
+	c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil)
+	mountOut, err := ioutil.ReadFile("/proc/self/mountinfo")
+	c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
+
+	// container mounts should exist even after daemon has crashed.
+	comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut)
+	c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment)
+
+	// restart daemon.
+	if err := s.d.Restart(); err != nil {
+		c.Fatal(err)
+	}
+
+	// Now, container mounts should be gone.
+	mountOut, err = ioutil.ReadFile("/proc/self/mountinfo")
+	c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
+	comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut)
+	c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment)
+}

+ 35 - 6
integration-cli/docker_cli_daemon_test.go

@@ -1501,25 +1501,54 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) {
 	c.Assert(s.d.Restart(), check.IsNil)
 	c.Assert(s.d.Restart(), check.IsNil)
 }
 }
 
 
-func (s *DockerDaemonSuite) TestCleanupMountsAfterCrash(c *check.C) {
+// os.Kill should kill daemon ungracefully, leaving behind container mounts.
+// A subsequent daemon restart shoud clean up said mounts.
+func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *check.C) {
 	c.Assert(s.d.StartWithBusybox(), check.IsNil)
 	c.Assert(s.d.StartWithBusybox(), check.IsNil)
 
 
 	out, err := s.d.Cmd("run", "-d", "busybox", "top")
 	out, err := s.d.Cmd("run", "-d", "busybox", "top")
 	c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
 	c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
 	id := strings.TrimSpace(out)
 	id := strings.TrimSpace(out)
-	c.Assert(s.d.Kill(), check.IsNil)
+	c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil)
+	mountOut, err := ioutil.ReadFile("/proc/self/mountinfo")
+	c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
+
+	// container mounts should exist even after daemon has crashed.
+	comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut)
+	c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment)
 
 
 	// kill the container
 	// kill the container
 	runCmd := exec.Command(ctrBinary, "--address", "/var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", id)
 	runCmd := exec.Command(ctrBinary, "--address", "/var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", id)
 	if out, ec, err := runCommandWithOutput(runCmd); err != nil {
 	if out, ec, err := runCommandWithOutput(runCmd); err != nil {
-		c.Fatalf("Failed to run ctr, ExitCode: %d, err: '%v' output: '%s' cid: '%s'\n", ec, err, out, id)
+		c.Fatalf("Failed to run ctr, ExitCode: %d, err: %v output: %s id: %s\n", ec, err, out, id)
+	}
+
+	// restart daemon.
+	if err := s.d.Restart(); err != nil {
+		c.Fatal(err)
 	}
 	}
 
 
-	// Give time to containerd to process the command if we don't
-	// the exit event might be received after we do the inspect
+	// Now, container mounts should be gone.
+	mountOut, err = ioutil.ReadFile("/proc/self/mountinfo")
+	c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
+	comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut)
+	c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment)
+}
+
+// os.Interrupt should perform a graceful daemon shutdown and hence cleanup mounts.
+func (s *DockerDaemonSuite) TestCleanupMountsAfterGracefulShutdown(c *check.C) {
+	c.Assert(s.d.StartWithBusybox(), check.IsNil)
+
+	out, err := s.d.Cmd("run", "-d", "busybox", "top")
+	c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
+	id := strings.TrimSpace(out)
+
+	// Send SIGINT and daemon should clean up
+	c.Assert(s.d.cmd.Process.Signal(os.Interrupt), check.IsNil)
+
+	// Wait a bit for the daemon to handle cleanups.
 	time.Sleep(3 * time.Second)
 	time.Sleep(3 * time.Second)
 
 
-	c.Assert(s.d.Start(), check.IsNil)
 	mountOut, err := ioutil.ReadFile("/proc/self/mountinfo")
 	mountOut, err := ioutil.ReadFile("/proc/self/mountinfo")
 	c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
 	c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
 
 

+ 15 - 0
integration-cli/docker_utils.go

@@ -886,6 +886,21 @@ func inspectMountPointJSON(j, destination string) (types.MountPoint, error) {
 	return *m, nil
 	return *m, nil
 }
 }
 
 
+func inspectImage(name, filter string) (string, error) {
+	args := []string{"inspect", "--type", "image"}
+	if filter != "" {
+		format := fmt.Sprintf("{{%s}}", filter)
+		args = append(args, "-f", format)
+	}
+	args = append(args, name)
+	inspectCmd := exec.Command(dockerBinary, args...)
+	out, exitCode, err := runCommandWithOutput(inspectCmd)
+	if err != nil || exitCode != 0 {
+		return "", fmt.Errorf("failed to inspect %s: %s", name, out)
+	}
+	return strings.TrimSpace(out), nil
+}
+
 func getIDByName(name string) (string, error) {
 func getIDByName(name string) (string, error) {
 	return inspectFieldWithError(name, "Id")
 	return inspectFieldWithError(name, "Id")
 }
 }

+ 1 - 0
layer/layer.go

@@ -174,6 +174,7 @@ type Store interface {
 	CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error)
 	CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error)
 	GetRWLayer(id string) (RWLayer, error)
 	GetRWLayer(id string) (RWLayer, error)
 	GetMountID(id string) (string, error)
 	GetMountID(id string) (string, error)
+	ReinitRWLayer(l RWLayer) error
 	ReleaseRWLayer(RWLayer) ([]Metadata, error)
 	ReleaseRWLayer(RWLayer) ([]Metadata, error)
 
 
 	Cleanup() error
 	Cleanup() error

+ 24 - 2
layer/layer_store.go

@@ -334,7 +334,10 @@ func (ls *layerStore) get(l ChainID) *roLayer {
 }
 }
 
 
 func (ls *layerStore) Get(l ChainID) (Layer, error) {
 func (ls *layerStore) Get(l ChainID) (Layer, error) {
-	layer := ls.get(l)
+	ls.layerL.Lock()
+	defer ls.layerL.Unlock()
+
+	layer := ls.getWithoutLock(l)
 	if layer == nil {
 	if layer == nil {
 		return nil, ErrLayerDoesNotExist
 		return nil, ErrLayerDoesNotExist
 	}
 	}
@@ -487,11 +490,30 @@ func (ls *layerStore) GetMountID(id string) (string, error) {
 	if !ok {
 	if !ok {
 		return "", ErrMountDoesNotExist
 		return "", ErrMountDoesNotExist
 	}
 	}
-	logrus.Debugf("GetRWLayer id: %s -> mountID: %s", id, mount.mountID)
+	logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID)
 
 
 	return mount.mountID, nil
 	return mount.mountID, nil
 }
 }
 
 
+// ReinitRWLayer reinitializes a given mount to the layerstore, specifically
+// initializing the usage count. It should strictly only be used in the
+// daemon's restore path to restore state of live containers.
+func (ls *layerStore) ReinitRWLayer(l RWLayer) error {
+	ls.mountL.Lock()
+	defer ls.mountL.Unlock()
+
+	m, ok := ls.mounts[l.Name()]
+	if !ok {
+		return ErrMountDoesNotExist
+	}
+
+	if err := m.incActivityCount(l); err != nil {
+		return err
+	}
+
+	return nil
+}
+
 func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) {
 func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) {
 	ls.mountL.Lock()
 	ls.mountL.Lock()
 	defer ls.mountL.Unlock()
 	defer ls.mountL.Unlock()

+ 21 - 0
layer/mounted_layer.go

@@ -83,6 +83,18 @@ func (ml *mountedLayer) hasReferences() bool {
 	return len(ml.references) > 0
 	return len(ml.references) > 0
 }
 }
 
 
+func (ml *mountedLayer) incActivityCount(ref RWLayer) error {
+	rl, ok := ml.references[ref]
+	if !ok {
+		return ErrLayerNotRetained
+	}
+
+	if err := rl.acquire(); err != nil {
+		return err
+	}
+	return nil
+}
+
 func (ml *mountedLayer) deleteReference(ref RWLayer) error {
 func (ml *mountedLayer) deleteReference(ref RWLayer) error {
 	rl, ok := ml.references[ref]
 	rl, ok := ml.references[ref]
 	if !ok {
 	if !ok {
@@ -111,6 +123,15 @@ type referencedRWLayer struct {
 	activityCount int
 	activityCount int
 }
 }
 
 
+func (rl *referencedRWLayer) acquire() error {
+	rl.activityL.Lock()
+	defer rl.activityL.Unlock()
+
+	rl.activityCount++
+
+	return nil
+}
+
 func (rl *referencedRWLayer) release() error {
 func (rl *referencedRWLayer) release() error {
 	rl.activityL.Lock()
 	rl.activityL.Lock()
 	defer rl.activityL.Unlock()
 	defer rl.activityL.Unlock()

+ 7 - 19
libcontainerd/client.go

@@ -4,35 +4,23 @@ import (
 	"fmt"
 	"fmt"
 	"sync"
 	"sync"
 
 
-	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/locker"
 )
 )
 
 
 // clientCommon contains the platform agnostic fields used in the client structure
 // clientCommon contains the platform agnostic fields used in the client structure
 type clientCommon struct {
 type clientCommon struct {
-	backend          Backend
-	containers       map[string]*container
-	containerMutexes map[string]*sync.Mutex // lock by container ID
-	mapMutex         sync.RWMutex           // protects read/write oprations from containers map
-	sync.Mutex                              // lock for containerMutexes map access
+	backend    Backend
+	containers map[string]*container
+	locker     *locker.Locker
+	mapMutex   sync.RWMutex // protects read/write oprations from containers map
 }
 }
 
 
 func (clnt *client) lock(containerID string) {
 func (clnt *client) lock(containerID string) {
-	clnt.Lock()
-	if _, ok := clnt.containerMutexes[containerID]; !ok {
-		clnt.containerMutexes[containerID] = &sync.Mutex{}
-	}
-	clnt.Unlock()
-	clnt.containerMutexes[containerID].Lock()
+	clnt.locker.Lock(containerID)
 }
 }
 
 
 func (clnt *client) unlock(containerID string) {
 func (clnt *client) unlock(containerID string) {
-	clnt.Lock()
-	if l, ok := clnt.containerMutexes[containerID]; ok {
-		l.Unlock()
-	} else {
-		logrus.Warnf("unlock of non-existing mutex: %s", containerID)
-	}
-	clnt.Unlock()
+	clnt.locker.Unlock(containerID)
 }
 }
 
 
 // must hold a lock for cont.containerID
 // must hold a lock for cont.containerID

+ 2 - 0
libcontainerd/client_shutdownrestore_linux.go

@@ -31,8 +31,10 @@ func (clnt *client) Restore(containerID string, options ...CreateOption) error {
 			select {
 			select {
 			case <-time.After(2 * time.Second):
 			case <-time.After(2 * time.Second):
 			case <-w.wait():
 			case <-w.wait():
+				return nil
 			}
 			}
 		case <-w.wait():
 		case <-w.wait():
+			return nil
 		}
 		}
 	}
 	}
 	return clnt.setExited(containerID)
 	return clnt.setExited(containerID)

+ 2 - 0
libcontainerd/container_linux.go

@@ -64,6 +64,8 @@ func (ctr *container) start() error {
 		Stdin:      ctr.fifo(syscall.Stdin),
 		Stdin:      ctr.fifo(syscall.Stdin),
 		Stdout:     ctr.fifo(syscall.Stdout),
 		Stdout:     ctr.fifo(syscall.Stdout),
 		Stderr:     ctr.fifo(syscall.Stderr),
 		Stderr:     ctr.fifo(syscall.Stderr),
+		// check to see if we are running in ramdisk to disable pivot root
+		NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "",
 	}
 	}
 	ctr.client.appendContainer(ctr)
 	ctr.client.appendContainer(ctr)
 
 

+ 41 - 6
libcontainerd/remote_linux.go

@@ -10,12 +10,14 @@ import (
 	"os/exec"
 	"os/exec"
 	"path/filepath"
 	"path/filepath"
 	"strconv"
 	"strconv"
+	"strings"
 	"sync"
 	"sync"
 	"syscall"
 	"syscall"
 	"time"
 	"time"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
 	containerd "github.com/docker/containerd/api/grpc/types"
 	containerd "github.com/docker/containerd/api/grpc/types"
+	"github.com/docker/docker/pkg/locker"
 	sysinfo "github.com/docker/docker/pkg/system"
 	sysinfo "github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
@@ -45,6 +47,7 @@ type remote struct {
 	clients     []*client
 	clients     []*client
 	eventTsPath string
 	eventTsPath string
 	pastEvents  map[string]*containerd.Event
 	pastEvents  map[string]*containerd.Event
+	runtimeArgs []string
 }
 }
 
 
 // New creates a fresh instance of libcontainerd remote.
 // New creates a fresh instance of libcontainerd remote.
@@ -169,9 +172,9 @@ func (r *remote) Cleanup() {
 func (r *remote) Client(b Backend) (Client, error) {
 func (r *remote) Client(b Backend) (Client, error) {
 	c := &client{
 	c := &client{
 		clientCommon: clientCommon{
 		clientCommon: clientCommon{
-			backend:          b,
-			containerMutexes: make(map[string]*sync.Mutex),
-			containers:       make(map[string]*container),
+			backend:    b,
+			containers: make(map[string]*container),
+			locker:     locker.New(),
 		},
 		},
 		remote:        r,
 		remote:        r,
 		exitNotifiers: make(map[string]*exitNotifier),
 		exitNotifiers: make(map[string]*exitNotifier),
@@ -210,7 +213,7 @@ func (r *remote) getLastEventTimestamp() int64 {
 	t := time.Now()
 	t := time.Now()
 
 
 	fi, err := os.Stat(r.eventTsPath)
 	fi, err := os.Stat(r.eventTsPath)
-	if os.IsNotExist(err) {
+	if os.IsNotExist(err) || fi.Size() == 0 {
 		return t.Unix()
 		return t.Unix()
 	}
 	}
 
 
@@ -340,11 +343,28 @@ func (r *remote) runContainerdDaemon() error {
 	// Start a new instance
 	// Start a new instance
 	args := []string{"-l", r.rpcAddr, "--runtime", "docker-runc"}
 	args := []string{"-l", r.rpcAddr, "--runtime", "docker-runc"}
 	if r.debugLog {
 	if r.debugLog {
-		args = append(args, "--debug", "true")
+		args = append(args, "--debug", "--metrics-interval=0")
 	}
 	}
+	if len(r.runtimeArgs) > 0 {
+		for _, v := range r.runtimeArgs {
+			args = append(args, "--runtime-args")
+			args = append(args, v)
+		}
+		logrus.Debugf("runContainerdDaemon: runtimeArgs: %s", args)
+	}
+
 	cmd := exec.Command(containerdBinary, args...)
 	cmd := exec.Command(containerdBinary, args...)
-	// TODO: store logs?
+	// redirect containerd logs to docker logs
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
 	cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
 	cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
+	cmd.Env = nil
+	// clear the NOTIFY_SOCKET from the env when starting containerd
+	for _, e := range os.Environ() {
+		if !strings.HasPrefix(e, "NOTIFY_SOCKET") {
+			cmd.Env = append(cmd.Env, e)
+		}
+	}
 	if err := cmd.Start(); err != nil {
 	if err := cmd.Start(); err != nil {
 		return err
 		return err
 	}
 	}
@@ -375,6 +395,21 @@ func (a rpcAddr) Apply(r Remote) error {
 	return fmt.Errorf("WithRemoteAddr option not supported for this remote")
 	return fmt.Errorf("WithRemoteAddr option not supported for this remote")
 }
 }
 
 
+// WithRuntimeArgs sets the list of runtime args passed to containerd
+func WithRuntimeArgs(args []string) RemoteOption {
+	return runtimeArgs(args)
+}
+
+type runtimeArgs []string
+
+func (rt runtimeArgs) Apply(r Remote) error {
+	if remote, ok := r.(*remote); ok {
+		remote.runtimeArgs = rt
+		return nil
+	}
+	return fmt.Errorf("WithRuntimeArgs option not supported for this remote")
+}
+
 // WithStartDaemon defines if libcontainerd should also run containerd daemon.
 // WithStartDaemon defines if libcontainerd should also run containerd daemon.
 func WithStartDaemon(start bool) RemoteOption {
 func WithStartDaemon(start bool) RemoteOption {
 	return startDaemon(start)
 	return startDaemon(start)

+ 4 - 4
libcontainerd/remote_windows.go

@@ -1,6 +1,6 @@
 package libcontainerd
 package libcontainerd
 
 
-import "sync"
+import "github.com/docker/docker/pkg/locker"
 
 
 type remote struct {
 type remote struct {
 }
 }
@@ -8,9 +8,9 @@ type remote struct {
 func (r *remote) Client(b Backend) (Client, error) {
 func (r *remote) Client(b Backend) (Client, error) {
 	c := &client{
 	c := &client{
 		clientCommon: clientCommon{
 		clientCommon: clientCommon{
-			backend:          b,
-			containerMutexes: make(map[string]*sync.Mutex),
-			containers:       make(map[string]*container),
+			backend:    b,
+			containers: make(map[string]*container),
+			locker:     locker.New(),
 		},
 		},
 	}
 	}
 	return c, nil
 	return c, nil

+ 4 - 0
man/docker-build.1.md

@@ -13,6 +13,7 @@ docker-build - Build a new image from the source code at PATH
 [**-f**|**--file**[=*PATH/Dockerfile*]]
 [**-f**|**--file**[=*PATH/Dockerfile*]]
 [**--force-rm**]
 [**--force-rm**]
 [**--isolation**[=*default*]]
 [**--isolation**[=*default*]]
+[**--label**[=*[]*]]
 [**--no-cache**]
 [**--no-cache**]
 [**--pull**]
 [**--pull**]
 [**-q**|**--quiet**]
 [**-q**|**--quiet**]
@@ -71,6 +72,9 @@ set as the **URL**, the repository is cloned locally and then sent as the contex
 **--isolation**="*default*"
 **--isolation**="*default*"
    Isolation specifies the type of isolation technology used by containers. 
    Isolation specifies the type of isolation technology used by containers. 
 
 
+**--label**=*label*
+   Set metadata for an image
+
 **--no-cache**=*true*|*false*
 **--no-cache**=*true*|*false*
    Do not use cache when building the image. The default is *false*.
    Do not use cache when building the image. The default is *false*.
 
 

+ 5 - 1
man/docker-daemon.8.md

@@ -15,6 +15,7 @@ docker-daemon - Enable daemon mode
 [**--cluster-advertise**[=*[]*]]
 [**--cluster-advertise**[=*[]*]]
 [**--cluster-store-opt**[=*map[]*]]
 [**--cluster-store-opt**[=*map[]*]]
 [**--config-file**[=*/etc/docker/daemon.json*]]
 [**--config-file**[=*/etc/docker/daemon.json*]]
+[**--containerd**[=*SOCKET-PATH*]]
 [**-D**|**--debug**]
 [**-D**|**--debug**]
 [**--default-gateway**[=*DEFAULT-GATEWAY*]]
 [**--default-gateway**[=*DEFAULT-GATEWAY*]]
 [**--default-gateway-v6**[=*DEFAULT-GATEWAY-V6*]]
 [**--default-gateway-v6**[=*DEFAULT-GATEWAY-V6*]]
@@ -101,6 +102,9 @@ format.
 **--config-file**="/etc/docker/daemon.json"
 **--config-file**="/etc/docker/daemon.json"
   Specifies the JSON file path to load the configuration from.
   Specifies the JSON file path to load the configuration from.
 
 
+**--containerd**=""
+  Path to containerd socket.
+
 **-D**, **--debug**=*true*|*false*
 **-D**, **--debug**=*true*|*false*
   Enable debug mode. Default is false.
   Enable debug mode. Default is false.
 
 
@@ -487,7 +491,7 @@ pool and that should automatically resolve it. If loop devices are being
 used, then stop docker, grow the size of loop files and restart docker and
 used, then stop docker, grow the size of loop files and restart docker and
 that should resolve the issue.
 that should resolve the issue.
 
 
-Example use: `docker daemon --storage-opt dm.min_free_space_percent=10%`
+Example use: `docker daemon --storage-opt dm.min_free_space=10%`
 
 
 ## ZFS options
 ## ZFS options
 
 

+ 4 - 0
man/docker-network-create.1.md

@@ -15,6 +15,7 @@ docker-network-create - create a new network
 [**--ipam-driver**=*default*]
 [**--ipam-driver**=*default*]
 [**--ipam-opt**=*map[]*]
 [**--ipam-opt**=*map[]*]
 [**--ipv6**]
 [**--ipv6**]
+[**--label**[=*[]*]]
 [**-o**|**--opt**=*map[]*]
 [**-o**|**--opt**=*map[]*]
 [**--subnet**=*[]*]
 [**--subnet**=*[]*]
 NETWORK-NAME
 NETWORK-NAME
@@ -156,6 +157,9 @@ If you want to create an externally isolated `overlay` network, you can specify
 **--ipv6**
 **--ipv6**
   Enable IPv6 networking
   Enable IPv6 networking
 
 
+**--label**=*label*
+   Set metadata for a network
+
 **-o**, **--opt**=map[]
 **-o**, **--opt**=map[]
   Set custom driver options
   Set custom driver options
 
 

+ 2 - 0
man/docker-pull.1.md

@@ -151,6 +151,8 @@ listening on port 5000 (`myregistry.local:5000`):
 
 
     $ docker pull myregistry.local:5000/testing/test-image
     $ docker pull myregistry.local:5000/testing/test-image
 
 
+Registry credentials are managed by **docker-login(1)**.
+
 Docker uses the `https://` protocol to communicate with a registry, unless the
 Docker uses the `https://` protocol to communicate with a registry, unless the
 registry is allowed to be accessed over an insecure connection. Refer to the
 registry is allowed to be accessed over an insecure connection. Refer to the
 [insecure registries](https://docs.docker.com/engine/reference/commandline/daemon/#insecure-registries)
 [insecure registries](https://docs.docker.com/engine/reference/commandline/daemon/#insecure-registries)

+ 2 - 0
man/docker-push.1.md

@@ -44,6 +44,8 @@ Check that this worked by running:
 You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd`
 You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd`
 listed.
 listed.
 
 
+Registry credentials are managed by **docker-login(1)**.
+
 # HISTORY
 # HISTORY
 April 2014, Originally compiled by William Henry (whenry at redhat dot com)
 April 2014, Originally compiled by William Henry (whenry at redhat dot com)
 based on docker.com source material and internal work.
 based on docker.com source material and internal work.

+ 4 - 0
man/docker-volume-create.1.md

@@ -8,6 +8,7 @@ docker-volume-create - Create a new volume
 **docker volume create**
 **docker volume create**
 [**-d**|**--driver**[=*DRIVER*]]
 [**-d**|**--driver**[=*DRIVER*]]
 [**--help**]
 [**--help**]
+[**--label**[=*[]*]]
 [**--name**[=*NAME*]]
 [**--name**[=*NAME*]]
 [**-o**|**--opt**[=*[]*]]
 [**-o**|**--opt**[=*[]*]]
 
 
@@ -51,6 +52,9 @@ Another example:
 **--help**
 **--help**
   Print usage statement
   Print usage statement
 
 
+**--label**=*label*
+   Set metadata for a volume
+
 **--name**=""
 **--name**=""
   Specify volume name
   Specify volume name
 
 

+ 3 - 2
man/docker.1.md

@@ -230,8 +230,9 @@ Use the **--exec-opt** flags to specify options to the execution driver.
 The following options are available:
 The following options are available:
 
 
 #### native.cgroupdriver
 #### native.cgroupdriver
-Specifies the management of the container's `cgroups`. Only `cgroupfs` can be specified
-`cgroupfs` at the moment.
+Specifies the management of the container's `cgroups`. You can specify `cgroupfs`
+or `systemd`. If you specify `systemd` and it is not available, the system errors
+out.
 
 
 #### Client
 #### Client
 For specific client examples please see the man page for the specific Docker
 For specific client examples please see the man page for the specific Docker

+ 0 - 1
project/PACKAGERS.md

@@ -60,7 +60,6 @@ To build the Docker daemon, you will additionally need:
 * btrfs-progs version 3.16.1 or later (unless using an older version is
 * btrfs-progs version 3.16.1 or later (unless using an older version is
   absolutely necessary, in which case 3.8 is the minimum)
   absolutely necessary, in which case 3.8 is the minimum)
 * libseccomp version 2.2.1 or later (for build tag seccomp)
 * libseccomp version 2.2.1 or later (for build tag seccomp)
-* yubico-piv-tool version 1.1.0 or later (for experimental)
 
 
 Be sure to also check out Docker's Dockerfile for the most up-to-date list of
 Be sure to also check out Docker's Dockerfile for the most up-to-date list of
 these build-time dependencies.
 these build-time dependencies.

+ 2 - 2
registry/auth.go

@@ -29,7 +29,7 @@ func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent st
 
 
 	serverAddress := registryEndpoint.String()
 	serverAddress := registryEndpoint.String()
 
 
-	logrus.Debugf("attempting v1 login to registry endpoint %s", registryEndpoint)
+	logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress)
 
 
 	if serverAddress == "" {
 	if serverAddress == "" {
 		return "", "", fmt.Errorf("Server Error: Server Address not set.")
 		return "", "", fmt.Errorf("Server Error: Server Address not set.")
@@ -103,7 +103,7 @@ func (err fallbackError) Error() string {
 // endpoint will be pinged to get authorization challenges. These challenges
 // endpoint will be pinged to get authorization challenges. These challenges
 // will be used to authenticate against the registry to validate credentials.
 // will be used to authenticate against the registry to validate credentials.
 func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) {
 func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) {
-	logrus.Debugf("attempting v2 login to registry endpoint %s", endpoint)
+	logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/")
 
 
 	modifiers := DockerHeaders(userAgent, nil)
 	modifiers := DockerHeaders(userAgent, nil)
 	authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...)
 	authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...)

+ 7 - 43
vendor/src/github.com/boltdb/bolt/Makefile

@@ -1,54 +1,18 @@
-TEST=.
-BENCH=.
-COVERPROFILE=/tmp/c.out
 BRANCH=`git rev-parse --abbrev-ref HEAD`
 BRANCH=`git rev-parse --abbrev-ref HEAD`
 COMMIT=`git rev-parse --short HEAD`
 COMMIT=`git rev-parse --short HEAD`
 GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
 GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
 
 
 default: build
 default: build
 
 
-bench:
-	go test -v -test.run=NOTHINCONTAINSTHIS -test.bench=$(BENCH)
-
-# http://cloc.sourceforge.net/
-cloc:
-	@cloc --not-match-f='Makefile|_test.go' .
-
-cover: fmt
-	go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) $(COVERFLAG) .
-	go tool cover -html=$(COVERPROFILE)
-	rm $(COVERPROFILE)
-
-cpuprofile: fmt
-	@go test -c
-	@./bolt.test -test.v -test.run=$(TEST) -test.cpuprofile cpu.prof
+race:
+	@go test -v -race -test.run="TestSimulate_(100op|1000op)"
 
 
 # go get github.com/kisielk/errcheck
 # go get github.com/kisielk/errcheck
 errcheck:
 errcheck:
-	@echo "=== errcheck ==="
-	@errcheck github.com/boltdb/bolt
+	@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
 
 
-fmt:
-	@go fmt ./...
-
-get:
-	@go get -d ./...
-
-build: get
-	@mkdir -p bin
-	@go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt ./cmd/bolt
-
-test: fmt
-	@go get github.com/stretchr/testify/assert
-	@echo "=== TESTS ==="
-	@go test -v -cover -test.run=$(TEST)
-	@echo ""
-	@echo ""
-	@echo "=== CLI ==="
-	@go test -v -test.run=$(TEST) ./cmd/bolt
-	@echo ""
-	@echo ""
-	@echo "=== RACE DETECTOR ==="
-	@go test -v -race -test.run="TestSimulate_(100op|1000op)"
+test: 
+	@go test -v -cover .
+	@go test -v ./cmd/bolt
 
 
-.PHONY: bench cloc cover cpuprofile fmt memprofile test
+.PHONY: fmt test

+ 197 - 24
vendor/src/github.com/boltdb/bolt/README.md

@@ -1,8 +1,8 @@
-Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.png?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.png)](https://godoc.org/github.com/boltdb/bolt) ![Version](http://img.shields.io/badge/version-1.0-green.png)
+Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg)
 ====
 ====
 
 
-Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] and
-the [LMDB project][lmdb]. The goal of the project is to provide a simple,
+Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
+[LMDB project][lmdb]. The goal of the project is to provide a simple,
 fast, and reliable database for projects that don't require a full database
 fast, and reliable database for projects that don't require a full database
 server such as Postgres or MySQL.
 server such as Postgres or MySQL.
 
 
@@ -13,7 +13,6 @@ and setting values. That's it.
 [hyc_symas]: https://twitter.com/hyc_symas
 [hyc_symas]: https://twitter.com/hyc_symas
 [lmdb]: http://symas.com/mdb/
 [lmdb]: http://symas.com/mdb/
 
 
-
 ## Project Status
 ## Project Status
 
 
 Bolt is stable and the API is fixed. Full unit test coverage and randomized
 Bolt is stable and the API is fixed. Full unit test coverage and randomized
@@ -22,6 +21,36 @@ Bolt is currently in high-load production environments serving databases as
 large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
 large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
 services every day.
 services every day.
 
 
+## Table of Contents
+
+- [Getting Started](#getting-started)
+  - [Installing](#installing)
+  - [Opening a database](#opening-a-database)
+  - [Transactions](#transactions)
+    - [Read-write transactions](#read-write-transactions)
+    - [Read-only transactions](#read-only-transactions)
+    - [Batch read-write transactions](#batch-read-write-transactions)
+    - [Managing transactions manually](#managing-transactions-manually)
+  - [Using buckets](#using-buckets)
+  - [Using key/value pairs](#using-keyvalue-pairs)
+  - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
+  - [Iterating over keys](#iterating-over-keys)
+    - [Prefix scans](#prefix-scans)
+    - [Range scans](#range-scans)
+    - [ForEach()](#foreach)
+  - [Nested buckets](#nested-buckets)
+  - [Database backups](#database-backups)
+  - [Statistics](#statistics)
+  - [Read-Only Mode](#read-only-mode)
+  - [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
+- [Resources](#resources)
+- [Comparison with other databases](#comparison-with-other-databases)
+  - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
+  - [LevelDB, RocksDB](#leveldb-rocksdb)
+  - [LMDB](#lmdb)
+- [Caveats & Limitations](#caveats--limitations)
+- [Reading the Source](#reading-the-source)
+- [Other Projects Using Bolt](#other-projects-using-bolt)
 
 
 ## Getting Started
 ## Getting Started
 
 
@@ -180,8 +209,8 @@ and then safely close your transaction if an error is returned. This is the
 recommended way to use Bolt transactions.
 recommended way to use Bolt transactions.
 
 
 However, sometimes you may want to manually start and end your transactions.
 However, sometimes you may want to manually start and end your transactions.
-You can use the `Tx.Begin()` function directly but _please_ be sure to close the
-transaction.
+You can use the `Tx.Begin()` function directly but **please** be sure to close
+the transaction.
 
 
 ```go
 ```go
 // Start a writable transaction.
 // Start a writable transaction.
@@ -269,7 +298,7 @@ then you must use `copy()` to copy it to another byte slice.
 
 
 
 
 ### Autoincrementing integer for the bucket
 ### Autoincrementing integer for the bucket
-By using the NextSequence() function, you can let Bolt determine a sequence
+By using the `NextSequence()` function, you can let Bolt determine a sequence
 which can be used as the unique identifier for your key/value pairs. See the
 which can be used as the unique identifier for your key/value pairs. See the
 example below.
 example below.
 
 
@@ -309,7 +338,6 @@ type User struct {
     ID int
     ID int
     ...
     ...
 }
 }
-
 ```
 ```
 
 
 ### Iterating over keys
 ### Iterating over keys
@@ -320,7 +348,9 @@ iteration over these keys extremely fast. To iterate over keys we'll use a
 
 
 ```go
 ```go
 db.View(func(tx *bolt.Tx) error {
 db.View(func(tx *bolt.Tx) error {
+	// Assume bucket exists and has keys
 	b := tx.Bucket([]byte("MyBucket"))
 	b := tx.Bucket([]byte("MyBucket"))
+
 	c := b.Cursor()
 	c := b.Cursor()
 
 
 	for k, v := c.First(); k != nil; k, v = c.Next() {
 	for k, v := c.First(); k != nil; k, v = c.Next() {
@@ -344,10 +374,15 @@ Next()   Move to the next key.
 Prev()   Move to the previous key.
 Prev()   Move to the previous key.
 ```
 ```
 
 
-When you have iterated to the end of the cursor then `Next()` will return `nil`.
-You must seek to a position using `First()`, `Last()`, or `Seek()` before
-calling `Next()` or `Prev()`. If you do not seek to a position then these
-functions will return `nil`.
+Each of those functions has a return signature of `(key []byte, value []byte)`.
+When you have iterated to the end of the cursor then `Next()` will return a
+`nil` key.  You must seek to a position using `First()`, `Last()`, or `Seek()`
+before calling `Next()` or `Prev()`. If you do not seek to a position then
+these functions will return a `nil` key.
+
+During iteration, if the key is non-`nil` but the value is `nil`, that means
+the key refers to a bucket rather than a value.  Use `Bucket.Bucket()` to
+access the sub-bucket.
 
 
 
 
 #### Prefix scans
 #### Prefix scans
@@ -356,6 +391,7 @@ To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
 
 
 ```go
 ```go
 db.View(func(tx *bolt.Tx) error {
 db.View(func(tx *bolt.Tx) error {
+	// Assume bucket exists and has keys
 	c := tx.Bucket([]byte("MyBucket")).Cursor()
 	c := tx.Bucket([]byte("MyBucket")).Cursor()
 
 
 	prefix := []byte("1234")
 	prefix := []byte("1234")
@@ -375,7 +411,7 @@ date range like this:
 
 
 ```go
 ```go
 db.View(func(tx *bolt.Tx) error {
 db.View(func(tx *bolt.Tx) error {
-	// Assume our events bucket has RFC3339 encoded time keys.
+	// Assume our events bucket exists and has RFC3339 encoded time keys.
 	c := tx.Bucket([]byte("Events")).Cursor()
 	c := tx.Bucket([]byte("Events")).Cursor()
 
 
 	// Our time range spans the 90's decade.
 	// Our time range spans the 90's decade.
@@ -399,7 +435,9 @@ all the keys in a bucket:
 
 
 ```go
 ```go
 db.View(func(tx *bolt.Tx) error {
 db.View(func(tx *bolt.Tx) error {
+	// Assume bucket exists and has keys
 	b := tx.Bucket([]byte("MyBucket"))
 	b := tx.Bucket([]byte("MyBucket"))
+	
 	b.ForEach(func(k, v []byte) error {
 	b.ForEach(func(k, v []byte) error {
 		fmt.Printf("key=%s, value=%s\n", k, v)
 		fmt.Printf("key=%s, value=%s\n", k, v)
 		return nil
 		return nil
@@ -426,8 +464,11 @@ func (*Bucket) DeleteBucket(key []byte) error
 Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
 Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
 function to write a consistent view of the database to a writer. If you call
 function to write a consistent view of the database to a writer. If you call
 this from a read-only transaction, it will perform a hot backup and not block
 this from a read-only transaction, it will perform a hot backup and not block
-your other database reads and writes. It will also use `O_DIRECT` when available
-to prevent page cache trashing.
+your other database reads and writes.
+
+By default, it will use a regular file handle which will utilize the operating
+system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
+documentation for information about optimizing for larger-than-RAM datasets.
 
 
 One common use case is to backup over HTTP so you can use tools like `cURL` to
 One common use case is to backup over HTTP so you can use tools like `cURL` to
 do database backups:
 do database backups:
@@ -509,6 +550,84 @@ if err != nil {
 }
 }
 ```
 ```
 
 
+### Mobile Use (iOS/Android)
+
+Bolt is able to run on mobile devices by leveraging the binding feature of the
+[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
+contain your database logic and a reference to a `*bolt.DB` with a initializing
+contstructor that takes in a filepath where the database file will be stored.
+Neither Android nor iOS require extra permissions or cleanup from using this method.
+
+```go
+func NewBoltDB(filepath string) *BoltDB {
+	db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	return &BoltDB{db}
+}
+
+type BoltDB struct {
+	db *bolt.DB
+	...
+}
+
+func (b *BoltDB) Path() string {
+	return b.db.Path()
+}
+
+func (b *BoltDB) Close() {
+	b.db.Close()
+}
+```
+
+Database logic should be defined as methods on this wrapper struct.
+
+To initialize this struct from the native language (both platforms now sync
+their local storage to the cloud. These snippets disable that functionality for the
+database file):
+
+#### Android
+
+```java
+String path;
+if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
+    path = getNoBackupFilesDir().getAbsolutePath();
+} else{
+    path = getFilesDir().getAbsolutePath();
+}
+Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
+```
+
+#### iOS
+
+```objc
+- (void)demo {
+    NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
+                                                          NSUserDomainMask,
+                                                          YES) objectAtIndex:0];
+	GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
+	[self addSkipBackupAttributeToItemAtPath:demo.path];
+	//Some DB Logic would go here
+	[demo close];
+}
+
+- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
+{
+    NSURL* URL= [NSURL fileURLWithPath: filePathString];
+    assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
+    
+    NSError *error = nil;
+    BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
+                                  forKey: NSURLIsExcludedFromBackupKey error: &error];
+    if(!success){
+        NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
+    }
+    return success;
+}
+
+```
 
 
 ## Resources
 ## Resources
 
 
@@ -544,7 +663,7 @@ they are libraries bundled into the application, however, their underlying
 structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
 structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
 random writes by using a write ahead log and multi-tiered, sorted files called
 random writes by using a write ahead log and multi-tiered, sorted files called
 SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
 SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
-have trade offs.
+have trade-offs.
 
 
 If you require a high random write throughput (>10,000 w/sec) or you need to use
 If you require a high random write throughput (>10,000 w/sec) or you need to use
 spinning disks then LevelDB could be a good choice. If your application is
 spinning disks then LevelDB could be a good choice. If your application is
@@ -580,9 +699,8 @@ It's important to pick the right tool for the job and Bolt is no exception.
 Here are a few things to note when evaluating and using Bolt:
 Here are a few things to note when evaluating and using Bolt:
 
 
 * Bolt is good for read intensive workloads. Sequential write performance is
 * Bolt is good for read intensive workloads. Sequential write performance is
-  also fast but random writes can be slow. You can add a write-ahead log or
-  [transaction coalescer](https://github.com/boltdb/coalescer) in front of Bolt
-  to mitigate this issue.
+  also fast but random writes can be slow. You can use `DB.Batch()` or add a
+  write-ahead log to help mitigate this issue.
 
 
 * Bolt uses a B+tree internally so there can be a lot of random page access.
 * Bolt uses a B+tree internally so there can be a lot of random page access.
   SSDs provide a significant performance boost over spinning disks.
   SSDs provide a significant performance boost over spinning disks.
@@ -618,7 +736,7 @@ Here are a few things to note when evaluating and using Bolt:
 
 
 * The data structures in the Bolt database are memory mapped so the data file
 * The data structures in the Bolt database are memory mapped so the data file
   will be endian specific. This means that you cannot copy a Bolt file from a
   will be endian specific. This means that you cannot copy a Bolt file from a
-  little endian machine to a big endian machine and have it work. For most 
+  little endian machine to a big endian machine and have it work. For most
   users this is not a concern since most modern CPUs are little endian.
   users this is not a concern since most modern CPUs are little endian.
 
 
 * Because of the way pages are laid out on disk, Bolt cannot truncate data files
 * Because of the way pages are laid out on disk, Bolt cannot truncate data files
@@ -633,6 +751,56 @@ Here are a few things to note when evaluating and using Bolt:
 [page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
 [page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
 
 
 
 
+## Reading the Source
+
+Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
+transactional key/value database so it can be a good starting point for people
+interested in how databases work.
+
+The best places to start are the main entry points into Bolt:
+
+- `Open()` - Initializes the reference to the database. It's responsible for
+  creating the database if it doesn't exist, obtaining an exclusive lock on the
+  file, reading the meta pages, & memory-mapping the file.
+
+- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
+  value of the `writable` argument. This requires briefly obtaining the "meta"
+  lock to keep track of open transactions. Only one read-write transaction can
+  exist at a time so the "rwlock" is acquired during the life of a read-write
+  transaction.
+
+- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
+  arguments, a cursor is used to traverse the B+tree to the page and position
+  where they key & value will be written. Once the position is found, the bucket
+  materializes the underlying page and the page's parent pages into memory as
+  "nodes". These nodes are where mutations occur during read-write transactions.
+  These changes get flushed to disk during commit.
+
+- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
+  to move to the page & position of a key/value pair. During a read-only
+  transaction, the key and value data is returned as a direct reference to the
+  underlying mmap file so there's no allocation overhead. For read-write
+  transactions, this data may reference the mmap file or one of the in-memory
+  node values.
+
+- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
+  or in-memory nodes. It can seek to a specific key, move to the first or last
+  value, or it can move forward or backward. The cursor handles the movement up
+  and down the B+tree transparently to the end user.
+
+- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
+  into pages to be written to disk. Writing to disk then occurs in two phases.
+  First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
+  new meta page with an incremented transaction ID is written and another
+  `fsync()` occurs. This two phase write ensures that partially written data
+  pages are ignored in the event of a crash since the meta page pointing to them
+  is never written. Partially written meta pages are invalidated because they
+  are written with a checksum.
+
+If you have additional notes that could be helpful for others, please submit
+them via pull request.
+
+
 ## Other Projects Using Bolt
 ## Other Projects Using Bolt
 
 
 Below is a list of public, open source projects that use Bolt:
 Below is a list of public, open source projects that use Bolt:
@@ -643,21 +811,21 @@ Below is a list of public, open source projects that use Bolt:
 * [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
 * [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
 * [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
 * [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
 * [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
 * [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
-* [ChainStore](https://github.com/nulayer/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
+* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
 * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
 * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
 * [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
 * [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
 * [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
 * [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
 * [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
 * [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
 * [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
 * [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
-* [photosite/session](http://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
+* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
 * [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
 * [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
 * [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
 * [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
 * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
 * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
 * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
 * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
 * [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
 * [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
 * [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database.
 * [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database.
-* [Seaweed File System](https://github.com/chrislusf/weed-fs) - Highly scalable distributed key~file system with O(1) disk read.
-* [InfluxDB](http://influxdb.com) - Scalable datastore for metrics, events, and real-time analytics.
+* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
+* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
 * [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
 * [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
 * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
 * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
 * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
 * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
@@ -667,5 +835,10 @@ Below is a list of public, open source projects that use Bolt:
   backed by boltdb.
   backed by boltdb.
 * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
 * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
   simple tx and key scans.
   simple tx and key scans.
+* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
+* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
+* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
+* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
+* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
 
 
 If you are using Bolt in a project please send a pull request to add it to the list.
 If you are using Bolt in a project please send a pull request to add it to the list.

+ 18 - 0
vendor/src/github.com/boltdb/bolt/appveyor.yml

@@ -0,0 +1,18 @@
+version: "{build}"
+
+os: Windows Server 2012 R2
+
+clone_folder: c:\gopath\src\github.com\boltdb\bolt
+
+environment:
+  GOPATH: c:\gopath
+
+install:
+  - echo %PATH%
+  - echo %GOPATH%
+  - go version
+  - go env
+  - go get -v -t ./...
+
+build_script:
+  - go test -v ./...

+ 0 - 138
vendor/src/github.com/boltdb/bolt/batch.go

@@ -1,138 +0,0 @@
-package bolt
-
-import (
-	"errors"
-	"fmt"
-	"sync"
-	"time"
-)
-
-// Batch calls fn as part of a batch. It behaves similar to Update,
-// except:
-//
-// 1. concurrent Batch calls can be combined into a single Bolt
-// transaction.
-//
-// 2. the function passed to Batch may be called multiple times,
-// regardless of whether it returns error or not.
-//
-// This means that Batch function side effects must be idempotent and
-// take permanent effect only after a successful return is seen in
-// caller.
-//
-// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
-// and DB.MaxBatchDelay, respectively.
-//
-// Batch is only useful when there are multiple goroutines calling it.
-func (db *DB) Batch(fn func(*Tx) error) error {
-	errCh := make(chan error, 1)
-
-	db.batchMu.Lock()
-	if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
-		// There is no existing batch, or the existing batch is full; start a new one.
-		db.batch = &batch{
-			db: db,
-		}
-		db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
-	}
-	db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
-	if len(db.batch.calls) >= db.MaxBatchSize {
-		// wake up batch, it's ready to run
-		go db.batch.trigger()
-	}
-	db.batchMu.Unlock()
-
-	err := <-errCh
-	if err == trySolo {
-		err = db.Update(fn)
-	}
-	return err
-}
-
-type call struct {
-	fn  func(*Tx) error
-	err chan<- error
-}
-
-type batch struct {
-	db    *DB
-	timer *time.Timer
-	start sync.Once
-	calls []call
-}
-
-// trigger runs the batch if it hasn't already been run.
-func (b *batch) trigger() {
-	b.start.Do(b.run)
-}
-
-// run performs the transactions in the batch and communicates results
-// back to DB.Batch.
-func (b *batch) run() {
-	b.db.batchMu.Lock()
-	b.timer.Stop()
-	// Make sure no new work is added to this batch, but don't break
-	// other batches.
-	if b.db.batch == b {
-		b.db.batch = nil
-	}
-	b.db.batchMu.Unlock()
-
-retry:
-	for len(b.calls) > 0 {
-		var failIdx = -1
-		err := b.db.Update(func(tx *Tx) error {
-			for i, c := range b.calls {
-				if err := safelyCall(c.fn, tx); err != nil {
-					failIdx = i
-					return err
-				}
-			}
-			return nil
-		})
-
-		if failIdx >= 0 {
-			// take the failing transaction out of the batch. it's
-			// safe to shorten b.calls here because db.batch no longer
-			// points to us, and we hold the mutex anyway.
-			c := b.calls[failIdx]
-			b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
-			// tell the submitter re-run it solo, continue with the rest of the batch
-			c.err <- trySolo
-			continue retry
-		}
-
-		// pass success, or bolt internal errors, to all callers
-		for _, c := range b.calls {
-			if c.err != nil {
-				c.err <- err
-			}
-		}
-		break retry
-	}
-}
-
-// trySolo is a special sentinel error value used for signaling that a
-// transaction function should be re-run. It should never be seen by
-// callers.
-var trySolo = errors.New("batch function returned an error and should be re-run solo")
-
-type panicked struct {
-	reason interface{}
-}
-
-func (p panicked) Error() string {
-	if err, ok := p.reason.(error); ok {
-		return err.Error()
-	}
-	return fmt.Sprintf("panic: %v", p.reason)
-}
-
-func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
-	defer func() {
-		if p := recover(); p != nil {
-			err = panicked{p}
-		}
-	}()
-	return fn(tx)
-}

+ 0 - 2
vendor/src/github.com/boltdb/bolt/bolt_linux.go

@@ -4,8 +4,6 @@ import (
 	"syscall"
 	"syscall"
 )
 )
 
 
-var odirect = syscall.O_DIRECT
-
 // fdatasync flushes written data to a file descriptor.
 // fdatasync flushes written data to a file descriptor.
 func fdatasync(db *DB) error {
 func fdatasync(db *DB) error {
 	return syscall.Fdatasync(int(db.file.Fd()))
 	return syscall.Fdatasync(int(db.file.Fd()))

+ 0 - 2
vendor/src/github.com/boltdb/bolt/bolt_openbsd.go

@@ -11,8 +11,6 @@ const (
 	msInvalidate             // invalidate cached data
 	msInvalidate             // invalidate cached data
 )
 )
 
 
-var odirect int
-
 func msync(db *DB) error {
 func msync(db *DB) error {
 	_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
 	_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
 	if errno != 0 {
 	if errno != 0 {

+ 9 - 0
vendor/src/github.com/boltdb/bolt/bolt_ppc.go

@@ -0,0 +1,9 @@
+// +build ppc
+
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x7FFFFFFF // 2GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF

+ 9 - 0
vendor/src/github.com/boltdb/bolt/bolt_ppc64.go

@@ -0,0 +1,9 @@
+// +build ppc64
+
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF

+ 5 - 16
vendor/src/github.com/boltdb/bolt/bolt_unix.go

@@ -11,7 +11,7 @@ import (
 )
 )
 
 
 // flock acquires an advisory lock on a file descriptor.
 // flock acquires an advisory lock on a file descriptor.
-func flock(f *os.File, exclusive bool, timeout time.Duration) error {
+func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
 	var t time.Time
 	var t time.Time
 	for {
 	for {
 		// If we're beyond our timeout then return an error.
 		// If we're beyond our timeout then return an error.
@@ -27,7 +27,7 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
 		}
 		}
 
 
 		// Otherwise attempt to obtain an exclusive lock.
 		// Otherwise attempt to obtain an exclusive lock.
-		err := syscall.Flock(int(f.Fd()), flag|syscall.LOCK_NB)
+		err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
 		if err == nil {
 		if err == nil {
 			return nil
 			return nil
 		} else if err != syscall.EWOULDBLOCK {
 		} else if err != syscall.EWOULDBLOCK {
@@ -40,25 +40,14 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
 }
 }
 
 
 // funlock releases an advisory lock on a file descriptor.
 // funlock releases an advisory lock on a file descriptor.
-func funlock(f *os.File) error {
-	return syscall.Flock(int(f.Fd()), syscall.LOCK_UN)
+func funlock(db *DB) error {
+	return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
 }
 }
 
 
 // mmap memory maps a DB's data file.
 // mmap memory maps a DB's data file.
 func mmap(db *DB, sz int) error {
 func mmap(db *DB, sz int) error {
-	// Truncate and fsync to ensure file size metadata is flushed.
-	// https://github.com/boltdb/bolt/issues/284
-	if !db.NoGrowSync && !db.readOnly {
-		if err := db.file.Truncate(int64(sz)); err != nil {
-			return fmt.Errorf("file resize error: %s", err)
-		}
-		if err := db.file.Sync(); err != nil {
-			return fmt.Errorf("file sync error: %s", err)
-		}
-	}
-
 	// Map the data file to memory.
 	// Map the data file to memory.
-	b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED)
+	b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}

+ 6 - 17
vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go

@@ -1,4 +1,3 @@
-
 package bolt
 package bolt
 
 
 import (
 import (
@@ -7,11 +6,12 @@ import (
 	"syscall"
 	"syscall"
 	"time"
 	"time"
 	"unsafe"
 	"unsafe"
+
 	"golang.org/x/sys/unix"
 	"golang.org/x/sys/unix"
 )
 )
 
 
 // flock acquires an advisory lock on a file descriptor.
 // flock acquires an advisory lock on a file descriptor.
-func flock(f *os.File, exclusive bool, timeout time.Duration) error {
+func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
 	var t time.Time
 	var t time.Time
 	for {
 	for {
 		// If we're beyond our timeout then return an error.
 		// If we're beyond our timeout then return an error.
@@ -32,7 +32,7 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
 		} else {
 		} else {
 			lock.Type = syscall.F_RDLCK
 			lock.Type = syscall.F_RDLCK
 		}
 		}
-		err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock)
+		err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
 		if err == nil {
 		if err == nil {
 			return nil
 			return nil
 		} else if err != syscall.EAGAIN {
 		} else if err != syscall.EAGAIN {
@@ -45,30 +45,19 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error {
 }
 }
 
 
 // funlock releases an advisory lock on a file descriptor.
 // funlock releases an advisory lock on a file descriptor.
-func funlock(f *os.File) error {
+func funlock(db *DB) error {
 	var lock syscall.Flock_t
 	var lock syscall.Flock_t
 	lock.Start = 0
 	lock.Start = 0
 	lock.Len = 0
 	lock.Len = 0
 	lock.Type = syscall.F_UNLCK
 	lock.Type = syscall.F_UNLCK
 	lock.Whence = 0
 	lock.Whence = 0
-	return syscall.FcntlFlock(uintptr(f.Fd()), syscall.F_SETLK, &lock)
+	return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
 }
 }
 
 
 // mmap memory maps a DB's data file.
 // mmap memory maps a DB's data file.
 func mmap(db *DB, sz int) error {
 func mmap(db *DB, sz int) error {
-	// Truncate and fsync to ensure file size metadata is flushed.
-	// https://github.com/boltdb/bolt/issues/284
-	if !db.NoGrowSync && !db.readOnly {
-		if err := db.file.Truncate(int64(sz)); err != nil {
-			return fmt.Errorf("file resize error: %s", err)
-		}
-		if err := db.file.Sync(); err != nil {
-			return fmt.Errorf("file sync error: %s", err)
-		}
-	}
-
 	// Map the data file to memory.
 	// Map the data file to memory.
-	b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED)
+	b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}

+ 73 - 5
vendor/src/github.com/boltdb/bolt/bolt_windows.go

@@ -8,7 +8,39 @@ import (
 	"unsafe"
 	"unsafe"
 )
 )
 
 
-var odirect int
+// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
+var (
+	modkernel32      = syscall.NewLazyDLL("kernel32.dll")
+	procLockFileEx   = modkernel32.NewProc("LockFileEx")
+	procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
+)
+
+const (
+	lockExt = ".lock"
+
+	// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
+	flagLockExclusive       = 2
+	flagLockFailImmediately = 1
+
+	// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
+	errLockViolation syscall.Errno = 0x21
+)
+
+func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
+	r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
+	if r == 0 {
+		return err
+	}
+	return nil
+}
+
+func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
+	r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
+	if r == 0 {
+		return err
+	}
+	return nil
+}
 
 
 // fdatasync flushes written data to a file descriptor.
 // fdatasync flushes written data to a file descriptor.
 func fdatasync(db *DB) error {
 func fdatasync(db *DB) error {
@@ -16,13 +48,49 @@ func fdatasync(db *DB) error {
 }
 }
 
 
 // flock acquires an advisory lock on a file descriptor.
 // flock acquires an advisory lock on a file descriptor.
-func flock(f *os.File, _ bool, _ time.Duration) error {
-	return nil
+func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
+	// Create a separate lock file on windows because a process
+	// cannot share an exclusive lock on the same file. This is
+	// needed during Tx.WriteTo().
+	f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
+	if err != nil {
+		return err
+	}
+	db.lockfile = f
+
+	var t time.Time
+	for {
+		// If we're beyond our timeout then return an error.
+		// This can only occur after we've attempted a flock once.
+		if t.IsZero() {
+			t = time.Now()
+		} else if timeout > 0 && time.Since(t) > timeout {
+			return ErrTimeout
+		}
+
+		var flag uint32 = flagLockFailImmediately
+		if exclusive {
+			flag |= flagLockExclusive
+		}
+
+		err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
+		if err == nil {
+			return nil
+		} else if err != errLockViolation {
+			return err
+		}
+
+		// Wait for a bit and try again.
+		time.Sleep(50 * time.Millisecond)
+	}
 }
 }
 
 
 // funlock releases an advisory lock on a file descriptor.
 // funlock releases an advisory lock on a file descriptor.
-func funlock(f *os.File) error {
-	return nil
+func funlock(db *DB) error {
+	err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
+	db.lockfile.Close()
+	os.Remove(db.path+lockExt)
+	return err
 }
 }
 
 
 // mmap memory maps a DB's data file.
 // mmap memory maps a DB's data file.

+ 0 - 2
vendor/src/github.com/boltdb/bolt/boltsync_unix.go

@@ -2,8 +2,6 @@
 
 
 package bolt
 package bolt
 
 
-var odirect int
-
 // fdatasync flushes written data to a file descriptor.
 // fdatasync flushes written data to a file descriptor.
 func fdatasync(db *DB) error {
 func fdatasync(db *DB) error {
 	return db.file.Sync()
 	return db.file.Sync()

+ 2 - 1
vendor/src/github.com/boltdb/bolt/bucket.go

@@ -11,7 +11,7 @@ const (
 	MaxKeySize = 32768
 	MaxKeySize = 32768
 
 
 	// MaxValueSize is the maximum length of a value, in bytes.
 	// MaxValueSize is the maximum length of a value, in bytes.
-	MaxValueSize = 4294967295
+	MaxValueSize = (1 << 31) - 2
 )
 )
 
 
 const (
 const (
@@ -273,6 +273,7 @@ func (b *Bucket) Get(key []byte) []byte {
 
 
 // Put sets the value for a key in the bucket.
 // Put sets the value for a key in the bucket.
 // If the key exist then its previous value will be overwritten.
 // If the key exist then its previous value will be overwritten.
+// Supplied value must remain valid for the life of the transaction.
 // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
 // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
 func (b *Bucket) Put(key []byte, value []byte) error {
 func (b *Bucket) Put(key []byte, value []byte) error {
 	if b.tx.db == nil {
 	if b.tx.db == nil {

+ 35 - 19
vendor/src/github.com/boltdb/bolt/cursor.go

@@ -34,6 +34,13 @@ func (c *Cursor) First() (key []byte, value []byte) {
 	p, n := c.bucket.pageNode(c.bucket.root)
 	p, n := c.bucket.pageNode(c.bucket.root)
 	c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
 	c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
 	c.first()
 	c.first()
+
+	// If we land on an empty page then move to the next value.
+	// https://github.com/boltdb/bolt/issues/450
+	if c.stack[len(c.stack)-1].count() == 0 {
+		c.next()
+	}
+
 	k, v, flags := c.keyValue()
 	k, v, flags := c.keyValue()
 	if (flags & uint32(bucketLeafFlag)) != 0 {
 	if (flags & uint32(bucketLeafFlag)) != 0 {
 		return k, nil
 		return k, nil
@@ -209,28 +216,37 @@ func (c *Cursor) last() {
 // next moves to the next leaf element and returns the key and value.
 // next moves to the next leaf element and returns the key and value.
 // If the cursor is at the last leaf element then it stays there and returns nil.
 // If the cursor is at the last leaf element then it stays there and returns nil.
 func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
 func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
-	// Attempt to move over one element until we're successful.
-	// Move up the stack as we hit the end of each page in our stack.
-	var i int
-	for i = len(c.stack) - 1; i >= 0; i-- {
-		elem := &c.stack[i]
-		if elem.index < elem.count()-1 {
-			elem.index++
-			break
+	for {
+		// Attempt to move over one element until we're successful.
+		// Move up the stack as we hit the end of each page in our stack.
+		var i int
+		for i = len(c.stack) - 1; i >= 0; i-- {
+			elem := &c.stack[i]
+			if elem.index < elem.count()-1 {
+				elem.index++
+				break
+			}
 		}
 		}
-	}
 
 
-	// If we've hit the root page then stop and return. This will leave the
-	// cursor on the last element of the last page.
-	if i == -1 {
-		return nil, nil, 0
-	}
+		// If we've hit the root page then stop and return. This will leave the
+		// cursor on the last element of the last page.
+		if i == -1 {
+			return nil, nil, 0
+		}
 
 
-	// Otherwise start from where we left off in the stack and find the
-	// first element of the first leaf page.
-	c.stack = c.stack[:i+1]
-	c.first()
-	return c.keyValue()
+		// Otherwise start from where we left off in the stack and find the
+		// first element of the first leaf page.
+		c.stack = c.stack[:i+1]
+		c.first()
+
+		// If this is an empty page then restart and move back up the stack.
+		// https://github.com/boltdb/bolt/issues/450
+		if c.stack[len(c.stack)-1].count() == 0 {
+			continue
+		}
+
+		return c.keyValue()
+	}
 }
 }
 
 
 // search recursively performs a binary search against a given page/node until it finds a given key.
 // search recursively performs a binary search against a given page/node until it finds a given key.

+ 211 - 10
vendor/src/github.com/boltdb/bolt/db.go

@@ -1,8 +1,10 @@
 package bolt
 package bolt
 
 
 import (
 import (
+	"errors"
 	"fmt"
 	"fmt"
 	"hash/fnv"
 	"hash/fnv"
+	"log"
 	"os"
 	"os"
 	"runtime"
 	"runtime"
 	"runtime/debug"
 	"runtime/debug"
@@ -24,13 +26,14 @@ const magic uint32 = 0xED0CDAED
 // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
 // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
 // syncing changes to a file.  This is required as some operating systems,
 // syncing changes to a file.  This is required as some operating systems,
 // such as OpenBSD, do not have a unified buffer cache (UBC) and writes
 // such as OpenBSD, do not have a unified buffer cache (UBC) and writes
-// must be synchronzied using the msync(2) syscall.
+// must be synchronized using the msync(2) syscall.
 const IgnoreNoSync = runtime.GOOS == "openbsd"
 const IgnoreNoSync = runtime.GOOS == "openbsd"
 
 
 // Default values if not set in a DB instance.
 // Default values if not set in a DB instance.
 const (
 const (
 	DefaultMaxBatchSize  int = 1000
 	DefaultMaxBatchSize  int = 1000
 	DefaultMaxBatchDelay     = 10 * time.Millisecond
 	DefaultMaxBatchDelay     = 10 * time.Millisecond
+	DefaultAllocSize         = 16 * 1024 * 1024
 )
 )
 
 
 // DB represents a collection of buckets persisted to a file on disk.
 // DB represents a collection of buckets persisted to a file on disk.
@@ -63,6 +66,10 @@ type DB struct {
 	// https://github.com/boltdb/bolt/issues/284
 	// https://github.com/boltdb/bolt/issues/284
 	NoGrowSync bool
 	NoGrowSync bool
 
 
+	// If you want to read the entire database fast, you can set MmapFlag to
+	// syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
+	MmapFlags int
+
 	// MaxBatchSize is the maximum size of a batch. Default value is
 	// MaxBatchSize is the maximum size of a batch. Default value is
 	// copied from DefaultMaxBatchSize in Open.
 	// copied from DefaultMaxBatchSize in Open.
 	//
 	//
@@ -79,11 +86,18 @@ type DB struct {
 	// Do not change concurrently with calls to Batch.
 	// Do not change concurrently with calls to Batch.
 	MaxBatchDelay time.Duration
 	MaxBatchDelay time.Duration
 
 
+	// AllocSize is the amount of space allocated when the database
+	// needs to create new pages. This is done to amortize the cost
+	// of truncate() and fsync() when growing the data file.
+	AllocSize int
+
 	path     string
 	path     string
 	file     *os.File
 	file     *os.File
+	lockfile *os.File // windows only
 	dataref  []byte // mmap'ed readonly, write throws SEGV
 	dataref  []byte // mmap'ed readonly, write throws SEGV
 	data     *[maxMapSize]byte
 	data     *[maxMapSize]byte
 	datasz   int
 	datasz   int
+	filesz   int // current on disk file size
 	meta0    *meta
 	meta0    *meta
 	meta1    *meta
 	meta1    *meta
 	pageSize int
 	pageSize int
@@ -136,10 +150,12 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
 		options = DefaultOptions
 		options = DefaultOptions
 	}
 	}
 	db.NoGrowSync = options.NoGrowSync
 	db.NoGrowSync = options.NoGrowSync
+	db.MmapFlags = options.MmapFlags
 
 
 	// Set default values for later DB operations.
 	// Set default values for later DB operations.
 	db.MaxBatchSize = DefaultMaxBatchSize
 	db.MaxBatchSize = DefaultMaxBatchSize
 	db.MaxBatchDelay = DefaultMaxBatchDelay
 	db.MaxBatchDelay = DefaultMaxBatchDelay
+	db.AllocSize = DefaultAllocSize
 
 
 	flag := os.O_RDWR
 	flag := os.O_RDWR
 	if options.ReadOnly {
 	if options.ReadOnly {
@@ -162,7 +178,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
 	// if !options.ReadOnly.
 	// if !options.ReadOnly.
 	// The database file is locked using the shared lock (more than one process may
 	// The database file is locked using the shared lock (more than one process may
 	// hold a lock at the same time) otherwise (options.ReadOnly is set).
 	// hold a lock at the same time) otherwise (options.ReadOnly is set).
-	if err := flock(db.file, !db.readOnly, options.Timeout); err != nil {
+	if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil {
 		_ = db.close()
 		_ = db.close()
 		return nil, err
 		return nil, err
 	}
 	}
@@ -172,7 +188,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
 
 
 	// Initialize the database if it doesn't exist.
 	// Initialize the database if it doesn't exist.
 	if info, err := db.file.Stat(); err != nil {
 	if info, err := db.file.Stat(); err != nil {
-		return nil, fmt.Errorf("stat error: %s", err)
+		return nil, err
 	} else if info.Size() == 0 {
 	} else if info.Size() == 0 {
 		// Initialize new files with meta pages.
 		// Initialize new files with meta pages.
 		if err := db.init(); err != nil {
 		if err := db.init(); err != nil {
@@ -184,14 +200,14 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
 		if _, err := db.file.ReadAt(buf[:], 0); err == nil {
 		if _, err := db.file.ReadAt(buf[:], 0); err == nil {
 			m := db.pageInBuffer(buf[:], 0).meta()
 			m := db.pageInBuffer(buf[:], 0).meta()
 			if err := m.validate(); err != nil {
 			if err := m.validate(); err != nil {
-				return nil, fmt.Errorf("meta0 error: %s", err)
+				return nil, err
 			}
 			}
 			db.pageSize = int(m.pageSize)
 			db.pageSize = int(m.pageSize)
 		}
 		}
 	}
 	}
 
 
 	// Memory map the data file.
 	// Memory map the data file.
-	if err := db.mmap(0); err != nil {
+	if err := db.mmap(options.InitialMmapSize); err != nil {
 		_ = db.close()
 		_ = db.close()
 		return nil, err
 		return nil, err
 	}
 	}
@@ -248,10 +264,10 @@ func (db *DB) mmap(minsz int) error {
 
 
 	// Validate the meta pages.
 	// Validate the meta pages.
 	if err := db.meta0.validate(); err != nil {
 	if err := db.meta0.validate(); err != nil {
-		return fmt.Errorf("meta0 error: %s", err)
+		return err
 	}
 	}
 	if err := db.meta1.validate(); err != nil {
 	if err := db.meta1.validate(); err != nil {
-		return fmt.Errorf("meta1 error: %s", err)
+		return err
 	}
 	}
 
 
 	return nil
 	return nil
@@ -266,7 +282,7 @@ func (db *DB) munmap() error {
 }
 }
 
 
 // mmapSize determines the appropriate size for the mmap given the current size
 // mmapSize determines the appropriate size for the mmap given the current size
-// of the database. The minimum size is 1MB and doubles until it reaches 1GB.
+// of the database. The minimum size is 32KB and doubles until it reaches 1GB.
 // Returns an error if the new mmap size is greater than the max allowed.
 // Returns an error if the new mmap size is greater than the max allowed.
 func (db *DB) mmapSize(size int) (int, error) {
 func (db *DB) mmapSize(size int) (int, error) {
 	// Double the size from 32KB until 1GB.
 	// Double the size from 32KB until 1GB.
@@ -364,6 +380,10 @@ func (db *DB) Close() error {
 }
 }
 
 
 func (db *DB) close() error {
 func (db *DB) close() error {
+	if !db.opened {
+		return nil
+	}
+	
 	db.opened = false
 	db.opened = false
 
 
 	db.freelist = nil
 	db.freelist = nil
@@ -382,7 +402,9 @@ func (db *DB) close() error {
 		// No need to unlock read-only file.
 		// No need to unlock read-only file.
 		if !db.readOnly {
 		if !db.readOnly {
 			// Unlock the file.
 			// Unlock the file.
-			_ = funlock(db.file)
+			if err := funlock(db); err != nil {
+				log.Printf("bolt.Close(): funlock error: %s", err)
+			}
 		}
 		}
 
 
 		// Close the file descriptor.
 		// Close the file descriptor.
@@ -401,11 +423,15 @@ func (db *DB) close() error {
 // will cause the calls to block and be serialized until the current write
 // will cause the calls to block and be serialized until the current write
 // transaction finishes.
 // transaction finishes.
 //
 //
-// Transactions should not be depedent on one another. Opening a read
+// Transactions should not be dependent on one another. Opening a read
 // transaction and a write transaction in the same goroutine can cause the
 // transaction and a write transaction in the same goroutine can cause the
 // writer to deadlock because the database periodically needs to re-mmap itself
 // writer to deadlock because the database periodically needs to re-mmap itself
 // as it grows and it cannot do that while a read transaction is open.
 // as it grows and it cannot do that while a read transaction is open.
 //
 //
+// If a long running read transaction (for example, a snapshot transaction) is
+// needed, you might want to set DB.InitialMmapSize to a large enough value
+// to avoid potential blocking of write transaction.
+//
 // IMPORTANT: You must close read-only transactions after you are finished or
 // IMPORTANT: You must close read-only transactions after you are finished or
 // else the database will not reclaim old pages.
 // else the database will not reclaim old pages.
 func (db *DB) Begin(writable bool) (*Tx, error) {
 func (db *DB) Begin(writable bool) (*Tx, error) {
@@ -589,6 +615,136 @@ func (db *DB) View(fn func(*Tx) error) error {
 	return nil
 	return nil
 }
 }
 
 
+// Batch calls fn as part of a batch. It behaves similar to Update,
+// except:
+//
+// 1. concurrent Batch calls can be combined into a single Bolt
+// transaction.
+//
+// 2. the function passed to Batch may be called multiple times,
+// regardless of whether it returns error or not.
+//
+// This means that Batch function side effects must be idempotent and
+// take permanent effect only after a successful return is seen in
+// caller.
+//
+// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
+// and DB.MaxBatchDelay, respectively.
+//
+// Batch is only useful when there are multiple goroutines calling it.
+func (db *DB) Batch(fn func(*Tx) error) error {
+	errCh := make(chan error, 1)
+
+	db.batchMu.Lock()
+	if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
+		// There is no existing batch, or the existing batch is full; start a new one.
+		db.batch = &batch{
+			db: db,
+		}
+		db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
+	}
+	db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
+	if len(db.batch.calls) >= db.MaxBatchSize {
+		// wake up batch, it's ready to run
+		go db.batch.trigger()
+	}
+	db.batchMu.Unlock()
+
+	err := <-errCh
+	if err == trySolo {
+		err = db.Update(fn)
+	}
+	return err
+}
+
+type call struct {
+	fn  func(*Tx) error
+	err chan<- error
+}
+
+type batch struct {
+	db    *DB
+	timer *time.Timer
+	start sync.Once
+	calls []call
+}
+
+// trigger runs the batch if it hasn't already been run.
+func (b *batch) trigger() {
+	b.start.Do(b.run)
+}
+
+// run performs the transactions in the batch and communicates results
+// back to DB.Batch.
+func (b *batch) run() {
+	b.db.batchMu.Lock()
+	b.timer.Stop()
+	// Make sure no new work is added to this batch, but don't break
+	// other batches.
+	if b.db.batch == b {
+		b.db.batch = nil
+	}
+	b.db.batchMu.Unlock()
+
+retry:
+	for len(b.calls) > 0 {
+		var failIdx = -1
+		err := b.db.Update(func(tx *Tx) error {
+			for i, c := range b.calls {
+				if err := safelyCall(c.fn, tx); err != nil {
+					failIdx = i
+					return err
+				}
+			}
+			return nil
+		})
+
+		if failIdx >= 0 {
+			// take the failing transaction out of the batch. it's
+			// safe to shorten b.calls here because db.batch no longer
+			// points to us, and we hold the mutex anyway.
+			c := b.calls[failIdx]
+			b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
+			// tell the submitter re-run it solo, continue with the rest of the batch
+			c.err <- trySolo
+			continue retry
+		}
+
+		// pass success, or bolt internal errors, to all callers
+		for _, c := range b.calls {
+			if c.err != nil {
+				c.err <- err
+			}
+		}
+		break retry
+	}
+}
+
+// trySolo is a special sentinel error value used for signaling that a
+// transaction function should be re-run. It should never be seen by
+// callers.
+var trySolo = errors.New("batch function returned an error and should be re-run solo")
+
+type panicked struct {
+	reason interface{}
+}
+
+func (p panicked) Error() string {
+	if err, ok := p.reason.(error); ok {
+		return err.Error()
+	}
+	return fmt.Sprintf("panic: %v", p.reason)
+}
+
+func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
+	defer func() {
+		if p := recover(); p != nil {
+			err = panicked{p}
+		}
+	}()
+	return fn(tx)
+}
+
 // Sync executes fdatasync() against the database file handle.
 // Sync executes fdatasync() against the database file handle.
 //
 //
 // This is not necessary under normal operation, however, if you use NoSync
 // This is not necessary under normal operation, however, if you use NoSync
@@ -655,6 +811,38 @@ func (db *DB) allocate(count int) (*page, error) {
 	return p, nil
 	return p, nil
 }
 }
 
 
+// grow grows the size of the database to the given sz.
+func (db *DB) grow(sz int) error {
+	// Ignore if the new size is less than available file size.
+	if sz <= db.filesz {
+		return nil
+	}
+
+	// If the data is smaller than the alloc size then only allocate what's needed.
+	// Once it goes over the allocation size then allocate in chunks.
+	if db.datasz < db.AllocSize {
+		sz = db.datasz
+	} else {
+		sz += db.AllocSize
+	}
+
+	// Truncate and fsync to ensure file size metadata is flushed.
+	// https://github.com/boltdb/bolt/issues/284
+	if !db.NoGrowSync && !db.readOnly {
+		if runtime.GOOS != "windows" {
+			if err := db.file.Truncate(int64(sz)); err != nil {
+				return fmt.Errorf("file resize error: %s", err)
+			}
+		}
+		if err := db.file.Sync(); err != nil {
+			return fmt.Errorf("file sync error: %s", err)
+		}
+	}
+
+	db.filesz = sz
+	return nil
+}
+
 func (db *DB) IsReadOnly() bool {
 func (db *DB) IsReadOnly() bool {
 	return db.readOnly
 	return db.readOnly
 }
 }
@@ -672,6 +860,19 @@ type Options struct {
 	// Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
 	// Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
 	// grab a shared lock (UNIX).
 	// grab a shared lock (UNIX).
 	ReadOnly bool
 	ReadOnly bool
+
+	// Sets the DB.MmapFlags flag before memory mapping the file.
+	MmapFlags int
+
+	// InitialMmapSize is the initial mmap size of the database
+	// in bytes. Read transactions won't block write transaction
+	// if the InitialMmapSize is large enough to hold database mmap
+	// size. (See DB.Begin for more information)
+	//
+	// If <=0, the initial map size is 0.
+	// If initialMmapSize is smaller than the previous database size,
+	// it takes no effect.
+	InitialMmapSize int
 }
 }
 
 
 // DefaultOptions represent the options used if nil options are passed into Open().
 // DefaultOptions represent the options used if nil options are passed into Open().

+ 0 - 37
vendor/src/github.com/boltdb/bolt/node.go

@@ -463,43 +463,6 @@ func (n *node) rebalance() {
 		target = n.prevSibling()
 		target = n.prevSibling()
 	}
 	}
 
 
-	// If target node has extra nodes then just move one over.
-	if target.numChildren() > target.minKeys() {
-		if useNextSibling {
-			// Reparent and move node.
-			if child, ok := n.bucket.nodes[target.inodes[0].pgid]; ok {
-				child.parent.removeChild(child)
-				child.parent = n
-				child.parent.children = append(child.parent.children, child)
-			}
-			n.inodes = append(n.inodes, target.inodes[0])
-			target.inodes = target.inodes[1:]
-
-			// Update target key on parent.
-			target.parent.put(target.key, target.inodes[0].key, nil, target.pgid, 0)
-			target.key = target.inodes[0].key
-			_assert(len(target.key) > 0, "rebalance(1): zero-length node key")
-		} else {
-			// Reparent and move node.
-			if child, ok := n.bucket.nodes[target.inodes[len(target.inodes)-1].pgid]; ok {
-				child.parent.removeChild(child)
-				child.parent = n
-				child.parent.children = append(child.parent.children, child)
-			}
-			n.inodes = append(n.inodes, inode{})
-			copy(n.inodes[1:], n.inodes)
-			n.inodes[0] = target.inodes[len(target.inodes)-1]
-			target.inodes = target.inodes[:len(target.inodes)-1]
-		}
-
-		// Update parent key for node.
-		n.parent.put(n.key, n.inodes[0].key, nil, n.pgid, 0)
-		n.key = n.inodes[0].key
-		_assert(len(n.key) > 0, "rebalance(2): zero-length node key")
-
-		return
-	}
-
 	// If both this node and the target node are too small then merge them.
 	// If both this node and the target node are too small then merge them.
 	if useNextSibling {
 	if useNextSibling {
 		// Reparent all child nodes being moved.
 		// Reparent all child nodes being moved.

+ 64 - 18
vendor/src/github.com/boltdb/bolt/tx.go

@@ -5,6 +5,7 @@ import (
 	"io"
 	"io"
 	"os"
 	"os"
 	"sort"
 	"sort"
+	"strings"
 	"time"
 	"time"
 	"unsafe"
 	"unsafe"
 )
 )
@@ -29,6 +30,14 @@ type Tx struct {
 	pages          map[pgid]*page
 	pages          map[pgid]*page
 	stats          TxStats
 	stats          TxStats
 	commitHandlers []func()
 	commitHandlers []func()
+
+	// WriteFlag specifies the flag for write-related methods like WriteTo().
+	// Tx opens the database file with the specified flag to copy the data.
+	//
+	// By default, the flag is unset, which works well for mostly in-memory
+	// workloads. For databases that are much larger than available RAM,
+	// set the flag to syscall.O_DIRECT to avoid trashing the page cache.
+	WriteFlag int
 }
 }
 
 
 // init initializes the transaction.
 // init initializes the transaction.
@@ -160,6 +169,8 @@ func (tx *Tx) Commit() error {
 	// Free the old root bucket.
 	// Free the old root bucket.
 	tx.meta.root.root = tx.root.root
 	tx.meta.root.root = tx.root.root
 
 
+	opgid := tx.meta.pgid
+
 	// Free the freelist and allocate new pages for it. This will overestimate
 	// Free the freelist and allocate new pages for it. This will overestimate
 	// the size of the freelist but not underestimate the size (which would be bad).
 	// the size of the freelist but not underestimate the size (which would be bad).
 	tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
 	tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
@@ -174,6 +185,14 @@ func (tx *Tx) Commit() error {
 	}
 	}
 	tx.meta.freelist = p.id
 	tx.meta.freelist = p.id
 
 
+	// If the high water mark has moved up then attempt to grow the database.
+	if tx.meta.pgid > opgid {
+		if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
+			tx.rollback()
+			return err
+		}
+	}
+
 	// Write dirty pages to disk.
 	// Write dirty pages to disk.
 	startTime = time.Now()
 	startTime = time.Now()
 	if err := tx.write(); err != nil {
 	if err := tx.write(); err != nil {
@@ -184,8 +203,17 @@ func (tx *Tx) Commit() error {
 	// If strict mode is enabled then perform a consistency check.
 	// If strict mode is enabled then perform a consistency check.
 	// Only the first consistency error is reported in the panic.
 	// Only the first consistency error is reported in the panic.
 	if tx.db.StrictMode {
 	if tx.db.StrictMode {
-		if err, ok := <-tx.Check(); ok {
-			panic("check fail: " + err.Error())
+		ch := tx.Check()
+		var errs []string
+		for {
+			err, ok := <-ch
+			if !ok {
+				break
+			}
+			errs = append(errs, err.Error())
+		}
+		if len(errs) > 0 {
+			panic("check fail: " + strings.Join(errs, "\n"))
 		}
 		}
 	}
 	}
 
 
@@ -263,7 +291,7 @@ func (tx *Tx) close() {
 }
 }
 
 
 // Copy writes the entire database to a writer.
 // Copy writes the entire database to a writer.
-// This function exists for backwards compatibility. Use WriteTo() in
+// This function exists for backwards compatibility. Use WriteTo() instead.
 func (tx *Tx) Copy(w io.Writer) error {
 func (tx *Tx) Copy(w io.Writer) error {
 	_, err := tx.WriteTo(w)
 	_, err := tx.WriteTo(w)
 	return err
 	return err
@@ -272,29 +300,47 @@ func (tx *Tx) Copy(w io.Writer) error {
 // WriteTo writes the entire database to a writer.
 // WriteTo writes the entire database to a writer.
 // If err == nil then exactly tx.Size() bytes will be written into the writer.
 // If err == nil then exactly tx.Size() bytes will be written into the writer.
 func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
 func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
-	// Attempt to open reader directly.
-	var f *os.File
-	if f, err = os.OpenFile(tx.db.path, os.O_RDONLY|odirect, 0); err != nil {
-		// Fallback to a regular open if that doesn't work.
-		if f, err = os.OpenFile(tx.db.path, os.O_RDONLY, 0); err != nil {
-			return 0, err
-		}
+	// Attempt to open reader with WriteFlag
+	f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
+	if err != nil {
+		return 0, err
 	}
 	}
+	defer func() { _ = f.Close() }()
 
 
-	// Copy the meta pages.
-	tx.db.metalock.Lock()
-	n, err = io.CopyN(w, f, int64(tx.db.pageSize*2))
-	tx.db.metalock.Unlock()
+	// Generate a meta page. We use the same page data for both meta pages.
+	buf := make([]byte, tx.db.pageSize)
+	page := (*page)(unsafe.Pointer(&buf[0]))
+	page.flags = metaPageFlag
+	*page.meta() = *tx.meta
+
+	// Write meta 0.
+	page.id = 0
+	page.meta().checksum = page.meta().sum64()
+	nn, err := w.Write(buf)
+	n += int64(nn)
 	if err != nil {
 	if err != nil {
-		_ = f.Close()
-		return n, fmt.Errorf("meta copy: %s", err)
+		return n, fmt.Errorf("meta 0 copy: %s", err)
+	}
+
+	// Write meta 1 with a lower transaction id.
+	page.id = 1
+	page.meta().txid -= 1
+	page.meta().checksum = page.meta().sum64()
+	nn, err = w.Write(buf)
+	n += int64(nn)
+	if err != nil {
+		return n, fmt.Errorf("meta 1 copy: %s", err)
+	}
+
+	// Move past the meta pages in the file.
+	if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
+		return n, fmt.Errorf("seek: %s", err)
 	}
 	}
 
 
 	// Copy data pages.
 	// Copy data pages.
 	wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
 	wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
 	n += wn
 	n += wn
 	if err != nil {
 	if err != nil {
-		_ = f.Close()
 		return n, err
 		return n, err
 	}
 	}
 
 
@@ -501,7 +547,7 @@ func (tx *Tx) writeMeta() error {
 }
 }
 
 
 // page returns a reference to the page with a given id.
 // page returns a reference to the page with a given id.
-// If page has been written to then a temporary bufferred page is returned.
+// If page has been written to then a temporary buffered page is returned.
 func (tx *Tx) page(id pgid) *page {
 func (tx *Tx) page(id pgid) *page {
 	// Check the dirty pages first.
 	// Check the dirty pages first.
 	if tx.pages != nil {
 	if tx.pages != nil {

+ 122 - 120
vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go

@@ -89,13 +89,14 @@ func (*UpdateProcessResponse) ProtoMessage()               {}
 func (*UpdateProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
 func (*UpdateProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
 
 
 type CreateContainerRequest struct {
 type CreateContainerRequest struct {
-	Id         string   `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
-	BundlePath string   `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"`
-	Checkpoint string   `protobuf:"bytes,3,opt,name=checkpoint" json:"checkpoint,omitempty"`
-	Stdin      string   `protobuf:"bytes,4,opt,name=stdin" json:"stdin,omitempty"`
-	Stdout     string   `protobuf:"bytes,5,opt,name=stdout" json:"stdout,omitempty"`
-	Stderr     string   `protobuf:"bytes,6,opt,name=stderr" json:"stderr,omitempty"`
-	Labels     []string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty"`
+	Id          string   `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+	BundlePath  string   `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"`
+	Checkpoint  string   `protobuf:"bytes,3,opt,name=checkpoint" json:"checkpoint,omitempty"`
+	Stdin       string   `protobuf:"bytes,4,opt,name=stdin" json:"stdin,omitempty"`
+	Stdout      string   `protobuf:"bytes,5,opt,name=stdout" json:"stdout,omitempty"`
+	Stderr      string   `protobuf:"bytes,6,opt,name=stderr" json:"stderr,omitempty"`
+	Labels      []string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty"`
+	NoPivotRoot bool     `protobuf:"varint,8,opt,name=noPivotRoot" json:"noPivotRoot,omitempty"`
 }
 }
 
 
 func (m *CreateContainerRequest) Reset()                    { *m = CreateContainerRequest{} }
 func (m *CreateContainerRequest) Reset()                    { *m = CreateContainerRequest{} }
@@ -1181,117 +1182,118 @@ var _API_serviceDesc = grpc.ServiceDesc{
 }
 }
 
 
 var fileDescriptor0 = []byte{
 var fileDescriptor0 = []byte{
-	// 1791 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x58, 0xd9, 0x8e, 0xdb, 0x54,
-	0x18, 0x9e, 0xec, 0x93, 0x3f, 0x71, 0x66, 0xc6, 0xb3, 0x65, 0xd2, 0x6d, 0x70, 0x0b, 0x1d, 0xa1,
-	0x6a, 0x54, 0x52, 0x96, 0x52, 0x24, 0x44, 0x99, 0x56, 0x14, 0x34, 0x2d, 0xe9, 0x2c, 0x20, 0xae,
-	0x22, 0xc7, 0x3e, 0x4d, 0xcc, 0x38, 0xb6, 0xf1, 0x39, 0x9e, 0xe5, 0x19, 0x10, 0xb7, 0xbc, 0x04,
-	0x12, 0xe2, 0x8a, 0x07, 0xe0, 0x59, 0xb8, 0xe2, 0x29, 0xf8, 0xcf, 0x62, 0xc7, 0x76, 0x96, 0x41,
-	0x42, 0x5c, 0x70, 0x13, 0xe5, 0x9c, 0xf3, 0xaf, 0xdf, 0xbf, 0x9d, 0x63, 0xa8, 0x9b, 0x81, 0xb3,
-	0x1f, 0x84, 0x3e, 0xf3, 0xf5, 0x0a, 0xbb, 0x0a, 0x08, 0x35, 0x06, 0xb0, 0x71, 0x1a, 0xd8, 0x26,
-	0x23, 0xbd, 0xd0, 0xb7, 0x08, 0xa5, 0x47, 0xe4, 0x87, 0x88, 0x50, 0xa6, 0x03, 0x14, 0x1d, 0xbb,
-	0x5d, 0xd8, 0x2d, 0xec, 0xd5, 0xf5, 0x06, 0x94, 0x02, 0x5c, 0x14, 0xc5, 0x02, 0x4f, 0x2c, 0xd7,
-	0xa7, 0xe4, 0x98, 0xd9, 0x8e, 0xd7, 0x2e, 0xe1, 0xde, 0xb2, 0xae, 0x41, 0xe5, 0xc2, 0xb1, 0xd9,
-	0xa8, 0x5d, 0xc6, 0xa5, 0xa6, 0xb7, 0xa0, 0x3a, 0x22, 0xce, 0x70, 0xc4, 0xda, 0x15, 0xbe, 0x36,
-	0xb6, 0x61, 0x33, 0xa7, 0x83, 0x06, 0xbe, 0x47, 0x89, 0xf1, 0x63, 0x01, 0xb6, 0x0e, 0x42, 0x82,
-	0x27, 0x07, 0xbe, 0xc7, 0x4c, 0xc7, 0x23, 0xe1, 0x2c, 0xfd, 0xb8, 0x18, 0x44, 0x9e, 0xed, 0x92,
-	0x9e, 0x89, 0x3a, 0x26, 0x66, 0x8c, 0x88, 0x75, 0x16, 0xf8, 0x8e, 0xc7, 0x84, 0x19, 0x75, 0x6e,
-	0x06, 0x15, 0x56, 0x95, 0xc5, 0x12, 0xcd, 0xc0, 0xa5, 0x1f, 0x49, 0x33, 0xe2, 0x35, 0x09, 0xc3,
-	0x76, 0x35, 0x5e, 0xbb, 0xe6, 0x80, 0xb8, 0xb4, 0x5d, 0xdb, 0x2d, 0xed, 0xd5, 0x8d, 0x4f, 0x61,
-	0x7b, 0xca, 0x18, 0x69, 0xa8, 0x7e, 0x17, 0xea, 0x56, 0xbc, 0x29, 0x8c, 0x6a, 0x74, 0x57, 0xf7,
-	0x05, 0x80, 0xfb, 0x09, 0xb1, 0xf1, 0x18, 0xb4, 0x63, 0x67, 0xe8, 0x99, 0xee, 0xb5, 0x18, 0x72,
-	0x4b, 0x04, 0xa5, 0x30, 0x5c, 0x33, 0x56, 0xa1, 0x15, 0x73, 0x2a, 0x64, 0x7e, 0x2d, 0xc2, 0xda,
-	0x53, 0xdb, 0x5e, 0x10, 0x94, 0x55, 0x58, 0x66, 0x24, 0x1c, 0x3b, 0x5c, 0x4a, 0x51, 0x44, 0x61,
-	0x07, 0xca, 0x11, 0x45, 0xfb, 0x4a, 0xc2, 0xbe, 0x86, 0xb2, 0xef, 0x14, 0xb7, 0xf4, 0x26, 0x94,
-	0xcd, 0x70, 0x48, 0x11, 0x98, 0x92, 0xb4, 0x85, 0x78, 0xe7, 0x88, 0x8a, 0x5a, 0x58, 0x17, 0xb6,
-	0x82, 0x44, 0x59, 0x59, 0xcb, 0xc2, 0xb9, 0x9c, 0x83, 0xb3, 0x9e, 0x83, 0x13, 0xc4, 0x7a, 0x03,
-	0x9a, 0x96, 0x19, 0x98, 0x03, 0xc7, 0x75, 0x98, 0x43, 0x68, 0xbb, 0x21, 0xc4, 0x6f, 0xc3, 0x8a,
-	0x19, 0x04, 0x66, 0x38, 0xf6, 0x43, 0x74, 0xe6, 0x8d, 0xe3, 0x92, 0x76, 0x33, 0x26, 0xa7, 0xc4,
-	0x75, 0xbc, 0xe8, 0xf2, 0x90, 0x07, 0xa1, 0xad, 0x89, 0x5d, 0x24, 0xf7, 0xfc, 0x57, 0xe4, 0xa2,
-	0x17, 0x3a, 0xe7, 0x48, 0x3b, 0x44, 0x39, 0x2d, 0xe1, 0xdc, 0x6d, 0xa8, 0x85, 0xae, 0x33, 0x76,
-	0x18, 0x6d, 0xaf, 0xa0, 0xe0, 0x46, 0x57, 0x53, 0xfe, 0x1d, 0x89, 0x5d, 0xa3, 0x0b, 0x55, 0xf9,
-	0x8f, 0xfb, 0xca, 0x4f, 0x14, 0x4c, 0xb8, 0xa2, 0xfe, 0x1b, 0x26, 0x20, 0x2a, 0xf3, 0xd5, 0xc8,
-	0x0c, 0x6d, 0x01, 0x51, 0x19, 0x03, 0x56, 0x16, 0xe8, 0xa0, 0xd7, 0x91, 0xc2, 0x55, 0xe3, 0x8b,
-	0xa1, 0x0a, 0x94, 0xa6, 0x6f, 0x41, 0xcb, 0xb4, 0x6d, 0xf4, 0xc7, 0x47, 0x98, 0xbf, 0x70, 0x6c,
-	0x8a, 0x9c, 0x25, 0x0c, 0xd8, 0x06, 0xe8, 0xe9, 0xe8, 0xa8, 0xa0, 0x1d, 0x26, 0x09, 0x94, 0x64,
-	0xe6, 0xac, 0xc8, 0xbd, 0x9d, 0x49, 0xdd, 0xa2, 0x88, 0xd6, 0x5a, 0x9c, 0x4d, 0xc9, 0x81, 0xd1,
-	0x81, 0xf6, 0xb4, 0x34, 0xa5, 0xe9, 0x11, 0x6c, 0x3f, 0x23, 0x2e, 0xb9, 0x4e, 0x13, 0xba, 0xeb,
-	0x99, 0x63, 0x22, 0xb3, 0x8e, 0x0b, 0x9c, 0x66, 0x52, 0x02, 0xef, 0xc2, 0xe6, 0xa1, 0x43, 0xd9,
-	0x42, 0x71, 0xc6, 0x77, 0x00, 0x13, 0x82, 0x44, 0x78, 0xa2, 0x8a, 0x5c, 0x3a, 0x4c, 0xa5, 0x22,
-	0x82, 0xc8, 0xac, 0x40, 0x75, 0x87, 0x75, 0x68, 0x44, 0x9e, 0x73, 0x79, 0xec, 0x5b, 0x67, 0x84,
-	0x51, 0x51, 0x9c, 0xa2, 0x65, 0xd0, 0x11, 0x71, 0x5d, 0x51, 0x9b, 0xcb, 0xc6, 0x67, 0xb0, 0x95,
-	0xd7, 0xaf, 0x4a, 0xef, 0x1d, 0x68, 0x4c, 0xd0, 0xa2, 0xa8, 0xad, 0x34, 0x0f, 0xae, 0xe6, 0x31,
-	0x43, 0xb4, 0x66, 0x19, 0xbe, 0x0b, 0xad, 0xa4, 0x4c, 0x05, 0x91, 0x4c, 0x5e, 0x93, 0x45, 0x54,
-	0x51, 0xfc, 0x52, 0x84, 0x9a, 0x0a, 0x67, 0x5c, 0x04, 0xff, 0x61, 0x99, 0xad, 0x41, 0x9d, 0x5e,
-	0x51, 0x46, 0xc6, 0x3d, 0x55, 0x6c, 0xda, 0xff, 0xab, 0xd8, 0x7e, 0x2a, 0x40, 0x3d, 0x01, 0xf4,
-	0xda, 0x56, 0xfd, 0x16, 0xd4, 0x03, 0x09, 0x2d, 0x91, 0xf5, 0xd3, 0xe8, 0xb6, 0x94, 0xbc, 0x18,
-	0xf2, 0x49, 0x38, 0xca, 0xb9, 0xd6, 0x2c, 0xd1, 0x43, 0x60, 0x03, 0x5e, 0x7d, 0x55, 0x5e, 0x7d,
-	0xfa, 0x0a, 0x9a, 0x17, 0x79, 0xcc, 0xc1, 0xe4, 0x13, 0x9d, 0xca, 0xb8, 0x0f, 0xb5, 0x97, 0xa6,
-	0x35, 0x42, 0x6b, 0x38, 0xa5, 0x15, 0xa8, 0xb0, 0x8a, 0x49, 0x34, 0x26, 0x88, 0xc6, 0x95, 0xac,
-	0x7f, 0xe3, 0x1b, 0x6c, 0xd1, 0x32, 0x49, 0x54, 0x76, 0xdd, 0xc3, 0x5a, 0x8c, 0x1d, 0x89, 0x93,
-	0x6b, 0xaa, 0xb3, 0xeb, 0x77, 0xa0, 0x36, 0x96, 0xf2, 0x55, 0xb9, 0xc6, 0xf6, 0x2b, 0xad, 0xc6,
-	0x19, 0x6c, 0xc9, 0x09, 0xb7, 0x70, 0x8e, 0x4d, 0xcd, 0x00, 0xe9, 0xb2, 0x1c, 0x5e, 0x7b, 0x50,
-	0x0f, 0x09, 0xf5, 0xa3, 0x10, 0x01, 0x11, 0x28, 0x34, 0xba, 0x9b, 0x71, 0x6e, 0x09, 0xd1, 0x47,
-	0xea, 0xd4, 0xf8, 0xb3, 0x00, 0xad, 0xec, 0x16, 0x2f, 0xb1, 0x81, 0x7b, 0xe6, 0xf8, 0xdf, 0xca,
-	0xb1, 0x2b, 0x9d, 0xc7, 0x2c, 0x43, 0x28, 0x8e, 0xb1, 0xe1, 0xa1, 0xc4, 0x62, 0x6a, 0xab, 0x47,
-	0x42, 0xc7, 0x97, 0x4d, 0x50, 0xe3, 0x09, 0x8e, 0x5b, 0xaf, 0x23, 0x9f, 0x99, 0x6a, 0x7c, 0xf3,
-	0xd1, 0x8a, 0x10, 0x12, 0x76, 0xc0, 0x81, 0xac, 0x24, 0xe3, 0x56, 0xec, 0xbd, 0x24, 0x63, 0xaa,
-	0xb2, 0x18, 0x95, 0x4a, 0x70, 0x0f, 0x79, 0x52, 0xa8, 0x3c, 0x46, 0x42, 0xb9, 0x79, 0x7c, 0x61,
-	0x06, 0x22, 0x99, 0x35, 0xac, 0x98, 0x35, 0xb9, 0x87, 0xf6, 0x92, 0xf0, 0xdc, 0xe4, 0xed, 0x54,
-	0xe4, 0xb5, 0x38, 0x3a, 0x23, 0xa1, 0x47, 0xdc, 0x97, 0x29, 0x49, 0x20, 0x86, 0xe2, 0x0e, 0x6c,
-	0x4f, 0x61, 0xaa, 0xba, 0x95, 0x01, 0xda, 0xf3, 0x73, 0x82, 0xed, 0x20, 0x46, 0x19, 0xfd, 0xe2,
-	0xe9, 0x80, 0x80, 0x8e, 0x03, 0xe1, 0x7d, 0xd9, 0x78, 0x0d, 0x15, 0x41, 0x93, 0x9b, 0x07, 0x32,
-	0x1e, 0xb3, 0x42, 0xa0, 0xc5, 0xf1, 0x29, 0xc7, 0x35, 0x3a, 0x11, 0x59, 0x11, 0x22, 0x7f, 0x2f,
-	0x40, 0xf3, 0x15, 0x61, 0x17, 0x7e, 0x78, 0xc6, 0xb3, 0x88, 0xe6, 0x5a, 0x20, 0x22, 0x19, 0x5e,
-	0xf6, 0x07, 0x57, 0x4c, 0xc1, 0x5d, 0xe6, 0x60, 0xe0, 0x4e, 0xcf, 0x94, 0x8d, 0x4f, 0x0c, 0x1d,
-	0x2e, 0xf7, 0xe8, 0xb2, 0x8f, 0x95, 0xec, 0x87, 0x32, 0xce, 0x82, 0x0c, 0xb7, 0xec, 0xd0, 0x0f,
-	0x02, 0x62, 0x4b, 0x5d, 0x5c, 0xd8, 0x49, 0x2c, 0xac, 0x1a, 0x53, 0xe1, 0x4e, 0xa0, 0x84, 0xd5,
-	0x62, 0x61, 0x27, 0x89, 0xb0, 0xe5, 0x14, 0x59, 0x2c, 0xac, 0x2e, 0x0c, 0x1f, 0xc3, 0x32, 0xc6,
-	0xf2, 0x94, 0x9a, 0x43, 0x91, 0x2a, 0x0c, 0x63, 0xed, 0xf6, 0x23, 0xbe, 0x94, 0x60, 0xf1, 0xfe,
-	0x10, 0x90, 0x10, 0x23, 0xac, 0x76, 0x8b, 0x58, 0x08, 0x65, 0xfd, 0x06, 0xac, 0x8b, 0x65, 0xdf,
-	0xf1, 0xfa, 0x32, 0x4a, 0x63, 0xdf, 0x26, 0xca, 0x0f, 0x8c, 0x5c, 0x72, 0xc8, 0xfb, 0xa1, 0x38,
-	0x12, 0xfe, 0x18, 0x27, 0xd0, 0x3a, 0x19, 0xe1, 0x25, 0x93, 0x61, 0xc7, 0x19, 0x3e, 0x33, 0x99,
-	0xc9, 0x2b, 0x36, 0x10, 0x49, 0x47, 0x95, 0x42, 0xe4, 0x66, 0x92, 0x84, 0xd8, 0xfd, 0xf8, 0x48,
-	0x82, 0x86, 0x33, 0x77, 0x72, 0x24, 0x8a, 0x5c, 0x4e, 0x6b, 0x26, 0x9c, 0x90, 0xc0, 0x1b, 0x22,
-	0x8f, 0x53, 0x2e, 0x34, 0xba, 0x2b, 0x71, 0xd5, 0xc6, 0x8e, 0xee, 0xc3, 0x0a, 0x4b, 0xac, 0xe8,
-	0x63, 0x22, 0x99, 0xaa, 0x78, 0xe3, 0xb2, 0xca, 0xd9, 0xc8, 0x7b, 0xa4, 0x68, 0xca, 0x4a, 0xac,
-	0xd4, 0x7a, 0x13, 0xea, 0xd8, 0xa4, 0xa9, 0x54, 0x8b, 0x6e, 0x58, 0x51, 0x18, 0x62, 0x56, 0xa9,
-	0x24, 0x7b, 0x05, 0x20, 0x13, 0x57, 0x48, 0xc0, 0x1e, 0x9e, 0x06, 0x15, 0x83, 0x33, 0x36, 0x2f,
-	0x13, 0x44, 0xf9, 0x16, 0x0a, 0x78, 0x63, 0x3a, 0xae, 0xa5, 0xae, 0xac, 0x65, 0xce, 0x22, 0x5a,
-	0xaa, 0x42, 0xee, 0xaf, 0x02, 0x34, 0xa4, 0x40, 0xa9, 0x10, 0x8f, 0x2d, 0x6c, 0x31, 0xb1, 0xc4,
-	0xdd, 0x58, 0x41, 0xf6, 0xd2, 0x90, 0x32, 0x01, 0xef, 0x16, 0x14, 0x0b, 0x2f, 0xe5, 0xc2, 0x4c,
-	0xb2, 0xfb, 0xd0, 0x94, 0x01, 0x55, 0x84, 0xe5, 0x79, 0x84, 0x0f, 0xf8, 0x58, 0x42, 0x4b, 0x44,
-	0x1f, 0x6e, 0x74, 0x6f, 0x65, 0x28, 0x84, 0x8d, 0xfb, 0xe2, 0xf7, 0xb9, 0xc7, 0xc2, 0xab, 0xce,
-	0x03, 0x80, 0xc9, 0x8a, 0x97, 0xd3, 0x19, 0xb9, 0x52, 0xc5, 0x81, 0x9e, 0x9c, 0x9b, 0x6e, 0xa4,
-	0x80, 0x78, 0x52, 0x7c, 0x5c, 0x30, 0xbe, 0x82, 0x95, 0xcf, 0x79, 0xd3, 0x4a, 0xb1, 0x20, 0xd5,
-	0xd8, 0xfc, 0xde, 0x0f, 0x95, 0xbf, 0x7c, 0xe9, 0x78, 0xb8, 0x94, 0xe8, 0x61, 0xed, 0xfa, 0xc1,
-	0xe4, 0xae, 0x2f, 0xe5, 0x49, 0xe0, 0xfe, 0x28, 0x01, 0x4c, 0x84, 0xe9, 0x4f, 0xa0, 0xe3, 0xf8,
-	0x7d, 0xde, 0x6c, 0x1c, 0x8b, 0xc8, 0x2a, 0xea, 0x87, 0x04, 0x63, 0x47, 0x9d, 0x73, 0xa2, 0xda,
-	0xfc, 0x96, 0xf2, 0x25, 0x6f, 0xc3, 0x07, 0xb0, 0x39, 0xe1, 0xb5, 0x53, 0x6c, 0xc5, 0x85, 0x6c,
-	0x8f, 0x60, 0x1d, 0xd9, 0xb0, 0x1d, 0x45, 0x19, 0xa6, 0xd2, 0x42, 0xa6, 0x8f, 0x61, 0x27, 0x65,
-	0x27, 0x4f, 0xf6, 0x14, 0x6b, 0x79, 0x21, 0xeb, 0x87, 0xb0, 0x85, 0xac, 0x17, 0xa6, 0xc3, 0xf2,
-	0x7c, 0x95, 0x7f, 0x60, 0xe7, 0x98, 0x84, 0xc3, 0x8c, 0x9d, 0xd5, 0x85, 0x4c, 0xef, 0xc1, 0x1a,
-	0x32, 0xe5, 0xf4, 0xd4, 0xae, 0x63, 0xa1, 0xc4, 0x62, 0xd8, 0x98, 0x52, 0x2c, 0xcb, 0x8b, 0x58,
-	0x8c, 0x1e, 0x34, 0x5f, 0x44, 0x43, 0xc2, 0xdc, 0x41, 0x92, 0xfd, 0xff, 0xb2, 0x9e, 0x7e, 0x2b,
-	0x42, 0xe3, 0x60, 0x18, 0xfa, 0x51, 0x90, 0xe9, 0x1b, 0x32, 0xa5, 0xa7, 0xfa, 0x86, 0xa4, 0xd9,
-	0x83, 0xa6, 0x9c, 0x56, 0x8a, 0x4c, 0xd6, 0x9a, 0x3e, 0x9d, 0xf9, 0xfc, 0x6a, 0x2a, 0xa6, 0xae,
-	0x22, 0xcc, 0x56, 0x5b, 0x2a, 0x1b, 0x3f, 0x01, 0x6d, 0x24, 0xfd, 0x52, 0x94, 0x32, 0xb2, 0xf7,
-	0x62, 0xcd, 0x13, 0x03, 0xf7, 0xd3, 0xfe, 0x4b, 0x1c, 0xf1, 0x86, 0xc2, 0xaf, 0x3e, 0xfd, 0xb8,
-	0x0c, 0xd3, 0x6f, 0xcf, 0xa4, 0x33, 0x75, 0x5e, 0xc0, 0xda, 0x34, 0x6b, 0xa6, 0x00, 0x8d, 0x74,
-	0x01, 0x36, 0xba, 0xeb, 0x4a, 0x44, 0x9a, 0x4b, 0x54, 0xe5, 0xa5, 0xbc, 0x22, 0x25, 0xaf, 0x1a,
-	0xfd, 0x5d, 0xd0, 0x3c, 0x39, 0xf4, 0x12, 0xdc, 0x4a, 0x29, 0x01, 0x99, 0x81, 0x88, 0xd8, 0x59,
-	0xc2, 0x9b, 0x99, 0xd8, 0xa5, 0x23, 0x91, 0x19, 0xaf, 0xb2, 0xd5, 0xaa, 0x1b, 0xfc, 0xac, 0xd7,
-	0x6e, 0xf7, 0xe7, 0x2a, 0x94, 0x9e, 0xf6, 0xbe, 0xd4, 0x8f, 0x60, 0x25, 0xf7, 0x46, 0xd7, 0xe3,
-	0x9e, 0x34, 0xfb, 0x43, 0x42, 0xe7, 0xf6, 0xbc, 0x63, 0x75, 0x97, 0x58, 0xe2, 0x32, 0x73, 0x17,
-	0x8d, 0x44, 0xe6, 0xec, 0x4b, 0x5d, 0x22, 0x73, 0xde, 0xfd, 0x64, 0x49, 0xff, 0x08, 0xaa, 0xf2,
-	0x45, 0xaf, 0x6f, 0x28, 0xda, 0xcc, 0xa7, 0x81, 0xce, 0x66, 0x6e, 0x37, 0x61, 0x3c, 0x04, 0x2d,
-	0xf3, 0xad, 0x44, 0xbf, 0x91, 0xd1, 0x95, 0xfd, 0x20, 0xd0, 0xb9, 0x39, 0xfb, 0x30, 0x91, 0x76,
-	0x00, 0x30, 0x79, 0xa7, 0xea, 0x6d, 0x45, 0x3d, 0xf5, 0x61, 0xa1, 0xb3, 0x33, 0xe3, 0x24, 0x11,
-	0x72, 0x0a, 0xab, 0xf9, 0x87, 0xa8, 0x9e, 0x43, 0x35, 0xff, 0x6c, 0xec, 0xdc, 0x99, 0x7b, 0x9e,
-	0x16, 0x9b, 0x7f, 0x8e, 0x26, 0x62, 0xe7, 0x3c, 0x6e, 0x13, 0xb1, 0x73, 0xdf, 0xb1, 0x4b, 0xfa,
-	0xd7, 0xd0, 0xca, 0xbe, 0x24, 0xf5, 0x18, 0xa4, 0x99, 0x0f, 0xdc, 0xce, 0xad, 0x39, 0xa7, 0x89,
-	0xc0, 0xf7, 0xa1, 0x22, 0xdf, 0x8c, 0x71, 0xc6, 0xa7, 0x9f, 0x99, 0x9d, 0x8d, 0xec, 0x66, 0xc2,
-	0xf5, 0x10, 0xaa, 0xf2, 0x8a, 0x9a, 0x24, 0x40, 0xe6, 0xc6, 0xda, 0x69, 0xa6, 0x77, 0x8d, 0xa5,
-	0x87, 0x85, 0x58, 0x0f, 0xcd, 0xe8, 0xa1, 0xb3, 0xf4, 0xa4, 0x82, 0x33, 0xa8, 0x8a, 0xaf, 0x79,
-	0x8f, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xfa, 0x78, 0x6c, 0xcd, 0xda, 0x13, 0x00, 0x00,
+	// 1806 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x58, 0xd9, 0x8e, 0xdb, 0x5e,
+	0x19, 0x9f, 0xec, 0xc9, 0x97, 0x38, 0x33, 0xe3, 0xd9, 0x32, 0xe9, 0x36, 0xb8, 0x85, 0x56, 0xa8,
+	0x1a, 0x95, 0x94, 0xa5, 0x14, 0x09, 0x51, 0xa6, 0x15, 0x05, 0x4d, 0x4b, 0x3a, 0x0b, 0x88, 0xab,
+	0xc8, 0xb1, 0x4f, 0x93, 0xc3, 0x38, 0xb6, 0xf1, 0x39, 0x9e, 0xe5, 0x21, 0xb8, 0xe5, 0x9a, 0x7b,
+	0x24, 0xc4, 0x15, 0x0f, 0xc0, 0xb3, 0x70, 0xc5, 0x53, 0xf0, 0x9d, 0xc5, 0x8e, 0xed, 0x2c, 0x83,
+	0x84, 0xfe, 0x17, 0xff, 0x9b, 0x28, 0xe7, 0x9c, 0x6f, 0xfd, 0x7d, 0xdb, 0x39, 0x86, 0x96, 0x1d,
+	0xd2, 0xe3, 0x30, 0x0a, 0x78, 0x60, 0xd6, 0xf8, 0x5d, 0x48, 0x98, 0x35, 0x86, 0xdd, 0xcb, 0xd0,
+	0xb5, 0x39, 0x19, 0x46, 0x81, 0x43, 0x18, 0x3b, 0x23, 0x7f, 0x8a, 0x09, 0xe3, 0x26, 0x40, 0x99,
+	0xba, 0xbd, 0xd2, 0x51, 0xe9, 0x45, 0xcb, 0x6c, 0x43, 0x25, 0xc4, 0x45, 0x59, 0x2e, 0xf0, 0xc4,
+	0xf1, 0x02, 0x46, 0xce, 0xb9, 0x4b, 0xfd, 0x5e, 0x05, 0xf7, 0x9a, 0xa6, 0x01, 0xb5, 0x1b, 0xea,
+	0xf2, 0x69, 0xaf, 0x8a, 0x4b, 0xc3, 0xec, 0x42, 0x7d, 0x4a, 0xe8, 0x64, 0xca, 0x7b, 0x35, 0xb1,
+	0xb6, 0x0e, 0x60, 0xaf, 0xa0, 0x83, 0x85, 0x81, 0xcf, 0x88, 0xf5, 0xd7, 0x12, 0xec, 0x9f, 0x44,
+	0x04, 0x4f, 0x4e, 0x02, 0x9f, 0xdb, 0xd4, 0x27, 0xd1, 0x32, 0xfd, 0xb8, 0x18, 0xc7, 0xbe, 0xeb,
+	0x91, 0xa1, 0x8d, 0x3a, 0xe6, 0x66, 0x4c, 0x89, 0x73, 0x15, 0x06, 0xd4, 0xe7, 0xd2, 0x8c, 0x96,
+	0x30, 0x83, 0x49, 0xab, 0xaa, 0x72, 0x89, 0x66, 0xe0, 0x32, 0x88, 0x95, 0x19, 0xc9, 0x9a, 0x44,
+	0x51, 0xaf, 0x9e, 0xac, 0x3d, 0x7b, 0x4c, 0x3c, 0xd6, 0x6b, 0x1c, 0x55, 0x70, 0xbd, 0x03, 0x6d,
+	0x3f, 0x18, 0xd2, 0xeb, 0x80, 0x9f, 0x05, 0x01, 0xef, 0x35, 0x85, 0x6b, 0xd6, 0xcf, 0xe1, 0x60,
+	0xc1, 0x42, 0x65, 0xbd, 0xf9, 0x14, 0x5a, 0x4e, 0xb2, 0x29, 0x2d, 0x6d, 0x0f, 0xb6, 0x8e, 0x25,
+	0xaa, 0xc7, 0x29, 0xb1, 0xf5, 0x06, 0x8c, 0x73, 0x3a, 0xf1, 0x6d, 0xef, 0x5e, 0x60, 0x85, 0x79,
+	0x92, 0x52, 0x7a, 0x63, 0x58, 0x5b, 0xd0, 0x4d, 0x38, 0x35, 0x5c, 0x7f, 0x2f, 0xc3, 0xf6, 0x3b,
+	0xd7, 0x5d, 0x13, 0xa9, 0x2d, 0x68, 0x72, 0x12, 0xcd, 0xa8, 0x90, 0x52, 0x96, 0xa1, 0x39, 0x84,
+	0x6a, 0xcc, 0xd0, 0xbe, 0x8a, 0xb4, 0xaf, 0xad, 0xed, 0xbb, 0xc4, 0x2d, 0xb3, 0x03, 0x55, 0x3b,
+	0x9a, 0x30, 0x44, 0xab, 0xa2, 0x6c, 0x21, 0xfe, 0x35, 0x42, 0xa5, 0x17, 0xce, 0x8d, 0xab, 0x71,
+	0xd2, 0x56, 0x36, 0xf2, 0x18, 0x37, 0x0b, 0x18, 0xb7, 0x0a, 0x18, 0x83, 0x5c, 0xef, 0x42, 0xc7,
+	0xb1, 0x43, 0x7b, 0x4c, 0x3d, 0xca, 0x29, 0x61, 0xbd, 0xb6, 0x14, 0x7f, 0x00, 0x9b, 0x76, 0x18,
+	0xda, 0xd1, 0x2c, 0x88, 0xd0, 0x99, 0xaf, 0xd4, 0x23, 0xbd, 0x4e, 0x42, 0xce, 0x88, 0x47, 0xfd,
+	0xf8, 0xf6, 0x54, 0x44, 0xa6, 0x67, 0xc8, 0x5d, 0x24, 0xf7, 0x83, 0xcf, 0xe4, 0x66, 0x18, 0xd1,
+	0x6b, 0xa4, 0x9d, 0xa0, 0x9c, 0xae, 0x74, 0xee, 0x31, 0x34, 0x22, 0x8f, 0xce, 0x28, 0x67, 0xbd,
+	0x4d, 0x14, 0xdc, 0x1e, 0x18, 0xda, 0xbf, 0x33, 0xb9, 0x6b, 0x0d, 0xa0, 0xae, 0xfe, 0x09, 0x5f,
+	0xc5, 0x89, 0x86, 0x09, 0x57, 0x2c, 0xf8, 0xca, 0x25, 0x44, 0x55, 0xb1, 0x9a, 0xda, 0x91, 0x2b,
+	0x21, 0xaa, 0x62, 0xc0, 0xaa, 0x12, 0x1d, 0xf4, 0x3a, 0xd6, 0xb8, 0x1a, 0x62, 0x31, 0xd1, 0x81,
+	0x32, 0xcc, 0x7d, 0xe8, 0xda, 0xae, 0x8b, 0xfe, 0x04, 0x08, 0xf3, 0xaf, 0xa8, 0xcb, 0x90, 0xb3,
+	0x82, 0x01, 0xdb, 0x05, 0x33, 0x1b, 0x1d, 0x1d, 0xb4, 0xd3, 0x34, 0x81, 0xd2, 0x74, 0x5d, 0x16,
+	0xb9, 0xef, 0xe6, 0xf2, 0xb9, 0x2c, 0xa3, 0xb5, 0x9d, 0x64, 0x53, 0x7a, 0x60, 0xf5, 0xa1, 0xb7,
+	0x28, 0x4d, 0x6b, 0x7a, 0x0d, 0x07, 0xef, 0x89, 0x47, 0xee, 0xd3, 0x84, 0xee, 0xfa, 0xf6, 0x8c,
+	0xa8, 0xac, 0x13, 0x02, 0x17, 0x99, 0xb4, 0xc0, 0xa7, 0xb0, 0x77, 0x4a, 0x19, 0x5f, 0x2b, 0xce,
+	0xfa, 0x03, 0xc0, 0x9c, 0x20, 0x15, 0x9e, 0xaa, 0x22, 0xb7, 0x94, 0xeb, 0x54, 0x44, 0x10, 0xb9,
+	0x13, 0xea, 0x96, 0x81, 0xc5, 0x16, 0xfb, 0xf4, 0xf6, 0x3c, 0x70, 0xae, 0x08, 0x67, 0xb2, 0x62,
+	0x65, 0x1f, 0x61, 0x53, 0xe2, 0x79, 0xb2, 0x60, 0x9b, 0xd6, 0x2f, 0x60, 0xbf, 0xa8, 0x5f, 0x97,
+	0xde, 0xf7, 0xa0, 0x3d, 0x47, 0x8b, 0xa1, 0xb6, 0xca, 0x2a, 0xb8, 0x3a, 0xe7, 0x1c, 0xd1, 0x5a,
+	0x66, 0xf8, 0x11, 0x74, 0xd3, 0x32, 0x95, 0x44, 0x2a, 0x79, 0x6d, 0x1e, 0x33, 0x4d, 0xf1, 0xb7,
+	0x32, 0x34, 0x74, 0x38, 0x93, 0x22, 0xf8, 0x06, 0xcb, 0x6c, 0x1b, 0x5a, 0xec, 0x8e, 0x71, 0x32,
+	0x1b, 0xea, 0x62, 0x33, 0xbe, 0x5d, 0xc5, 0xf6, 0xe7, 0x12, 0xb4, 0x52, 0x40, 0xef, 0xed, 0xdf,
+	0xdf, 0x81, 0x56, 0xa8, 0xa0, 0x25, 0xaa, 0x7e, 0xda, 0x83, 0xae, 0x96, 0x97, 0x40, 0x3e, 0x0f,
+	0x47, 0xb5, 0xd0, 0xaf, 0x15, 0x7a, 0x08, 0x6c, 0x28, 0xaa, 0xaf, 0x2e, 0xaa, 0xcf, 0xdc, 0x44,
+	0xf3, 0x62, 0x9f, 0x53, 0x4c, 0x3e, 0xd9, 0xa9, 0xac, 0xe7, 0xd0, 0xf8, 0x64, 0x3b, 0x53, 0xb4,
+	0x46, 0x50, 0x3a, 0xa1, 0x0e, 0xab, 0x1c, 0x4f, 0x33, 0x82, 0x68, 0xdc, 0xa9, 0xfa, 0xb7, 0x7e,
+	0x87, 0x2d, 0x5a, 0x25, 0x89, 0xce, 0xae, 0x67, 0x58, 0x8b, 0x89, 0x23, 0x49, 0x72, 0x2d, 0x74,
+	0x76, 0xf3, 0x09, 0x34, 0x66, 0x4a, 0xbe, 0x2e, 0xd7, 0xc4, 0x7e, 0xad, 0xd5, 0xba, 0x82, 0x7d,
+	0x35, 0xf6, 0xd6, 0x0e, 0xb7, 0x85, 0x19, 0xa0, 0x5c, 0x56, 0x13, 0xed, 0x05, 0xb4, 0x22, 0xc2,
+	0x82, 0x38, 0x42, 0x40, 0x24, 0x0a, 0xed, 0xc1, 0x5e, 0x92, 0x5b, 0x52, 0xf4, 0x99, 0x3e, 0xb5,
+	0xfe, 0x5d, 0x82, 0x6e, 0x7e, 0x4b, 0x94, 0xd8, 0xd8, 0xbb, 0xa2, 0xc1, 0xef, 0xd5, 0x2c, 0x56,
+	0xce, 0x63, 0x96, 0x21, 0x14, 0xe7, 0xd8, 0xf0, 0x50, 0x62, 0x39, 0xb3, 0x35, 0x24, 0x11, 0x0d,
+	0x54, 0x13, 0x34, 0x44, 0x82, 0xe3, 0xd6, 0x97, 0x38, 0xe0, 0xb6, 0x9e, 0xe9, 0x62, 0xde, 0x22,
+	0x84, 0x84, 0x9f, 0x08, 0x20, 0x6b, 0xe9, 0x0c, 0x96, 0x7b, 0x9f, 0xc8, 0x8c, 0xe9, 0x2c, 0x46,
+	0xa5, 0x0a, 0xdc, 0x53, 0x91, 0x14, 0x3a, 0x8f, 0x91, 0x50, 0x6d, 0x9e, 0xdf, 0xd8, 0xa1, 0x4c,
+	0x66, 0x03, 0x2b, 0x66, 0x5b, 0xed, 0xa1, 0xbd, 0x24, 0xba, 0xb6, 0x45, 0x3b, 0x95, 0x79, 0x2d,
+	0x8f, 0xae, 0x48, 0xe4, 0x13, 0xef, 0x53, 0x46, 0x12, 0xc8, 0xa1, 0x78, 0x08, 0x07, 0x0b, 0x98,
+	0xea, 0x6e, 0x65, 0x81, 0xf1, 0xe1, 0x9a, 0x60, 0x3b, 0x48, 0x50, 0x46, 0xbf, 0x44, 0x3a, 0x20,
+	0xa0, 0xb3, 0x50, 0x7a, 0x5f, 0xb5, 0xbe, 0x40, 0x4d, 0xd2, 0x14, 0xe6, 0x81, 0x8a, 0xc7, 0xb2,
+	0x10, 0x18, 0x49, 0x7c, 0xaa, 0x49, 0x8d, 0xce, 0x45, 0xd6, 0xa4, 0xc8, 0x7f, 0x96, 0xa0, 0xf3,
+	0x99, 0xf0, 0x9b, 0x20, 0xba, 0x12, 0x59, 0xc4, 0x0a, 0x2d, 0x10, 0x91, 0x8c, 0x6e, 0x47, 0xe3,
+	0x3b, 0xae, 0xe1, 0xae, 0x0a, 0x30, 0x70, 0x67, 0x68, 0xab, 0xc6, 0x27, 0x87, 0x8e, 0x90, 0x7b,
+	0x76, 0x3b, 0xc2, 0x4a, 0x0e, 0x22, 0x15, 0x67, 0x49, 0x86, 0x5b, 0x6e, 0x14, 0x84, 0x21, 0x71,
+	0x95, 0x2e, 0x21, 0xec, 0x22, 0x11, 0x56, 0x4f, 0xa8, 0x70, 0x27, 0xd4, 0xc2, 0x1a, 0x89, 0xb0,
+	0x8b, 0x54, 0x58, 0x33, 0x43, 0x96, 0x08, 0x6b, 0x49, 0xc3, 0x67, 0xd0, 0xc4, 0x58, 0x5e, 0x32,
+	0x7b, 0x22, 0x53, 0x85, 0x63, 0xac, 0xbd, 0x51, 0x2c, 0x96, 0x0a, 0x2c, 0xd1, 0x1f, 0x42, 0x12,
+	0x61, 0x84, 0xf5, 0x6e, 0x19, 0x0b, 0xa1, 0x6a, 0x3e, 0x80, 0x1d, 0xb9, 0x1c, 0x51, 0x7f, 0xa4,
+	0xa2, 0x34, 0x0b, 0x5c, 0xa2, 0xfd, 0xc0, 0xc8, 0xa5, 0x87, 0xa2, 0x1f, 0xca, 0x23, 0xe9, 0x8f,
+	0x75, 0x01, 0xdd, 0x8b, 0x29, 0xde, 0x3c, 0x39, 0x76, 0x9c, 0xc9, 0x7b, 0x9b, 0xdb, 0xa2, 0x62,
+	0x43, 0x99, 0x74, 0x4c, 0x2b, 0x44, 0x6e, 0xae, 0x48, 0x88, 0x3b, 0x4a, 0x8e, 0x14, 0x68, 0x38,
+	0x73, 0xe7, 0x47, 0xb2, 0xc8, 0xd5, 0xb4, 0xe6, 0xd2, 0x09, 0x05, 0xbc, 0x25, 0xf3, 0x38, 0xe3,
+	0x42, 0x7b, 0xb0, 0x99, 0x54, 0x6d, 0xe2, 0xe8, 0x31, 0x6c, 0xf2, 0xd4, 0x8a, 0x11, 0x26, 0x92,
+	0xad, 0x8b, 0x37, 0x29, 0xab, 0x82, 0x8d, 0xa2, 0x47, 0xca, 0xa6, 0xac, 0xc5, 0x2a, 0xad, 0x0f,
+	0xa1, 0x85, 0x4d, 0x9a, 0x29, 0xb5, 0xe8, 0x86, 0x13, 0x47, 0x11, 0x66, 0x95, 0x4e, 0xb2, 0xcf,
+	0x00, 0x2a, 0x71, 0xa5, 0x04, 0xec, 0xe1, 0x59, 0x50, 0x31, 0x38, 0x33, 0xfb, 0x36, 0x45, 0x54,
+	0x6c, 0xa1, 0x80, 0xaf, 0x36, 0xf5, 0x1c, 0x7d, 0x8f, 0xad, 0x0a, 0x16, 0xd9, 0x52, 0x35, 0x72,
+	0xff, 0x29, 0x41, 0x5b, 0x09, 0x54, 0x0a, 0xf1, 0xd8, 0xc1, 0x16, 0x93, 0x48, 0x3c, 0x4a, 0x14,
+	0xe4, 0x2f, 0x0d, 0x19, 0x13, 0xf0, 0x6e, 0xc1, 0xb0, 0xf0, 0x32, 0x2e, 0x2c, 0x25, 0x7b, 0x0e,
+	0x1d, 0x15, 0x50, 0x4d, 0x58, 0x5d, 0x45, 0xf8, 0x52, 0x8c, 0x25, 0xb4, 0x44, 0xf6, 0xe1, 0xf6,
+	0xe0, 0x51, 0x8e, 0x42, 0xda, 0x78, 0x2c, 0x7f, 0x3f, 0xf8, 0x3c, 0xba, 0xeb, 0xbf, 0x04, 0x98,
+	0xaf, 0x44, 0x39, 0x5d, 0x91, 0x3b, 0x5d, 0x1c, 0xe8, 0xc9, 0xb5, 0xed, 0xc5, 0x1a, 0x88, 0xb7,
+	0xe5, 0x37, 0x25, 0xeb, 0x37, 0xb0, 0xf9, 0x4b, 0xd1, 0xb4, 0x32, 0x2c, 0x48, 0x35, 0xb3, 0xff,
+	0x18, 0x44, 0xda, 0x5f, 0xb1, 0xa4, 0x3e, 0x2e, 0x15, 0x7a, 0x58, 0xbb, 0x41, 0x38, 0x7f, 0x00,
+	0x28, 0x79, 0x0a, 0xb8, 0x7f, 0x55, 0x00, 0xe6, 0xc2, 0xcc, 0xb7, 0xd0, 0xa7, 0xc1, 0x48, 0x34,
+	0x1b, 0xea, 0x10, 0x55, 0x45, 0xa3, 0x88, 0x60, 0xec, 0x18, 0xbd, 0x26, 0xba, 0xcd, 0xef, 0x6b,
+	0x5f, 0x8a, 0x36, 0xfc, 0x08, 0xf6, 0xe6, 0xbc, 0x6e, 0x86, 0xad, 0xbc, 0x96, 0xed, 0x35, 0xec,
+	0x20, 0x1b, 0xb6, 0xa3, 0x38, 0xc7, 0x54, 0x59, 0xcb, 0xf4, 0x53, 0x38, 0xcc, 0xd8, 0x29, 0x92,
+	0x3d, 0xc3, 0x5a, 0x5d, 0xcb, 0xfa, 0x63, 0xd8, 0x47, 0xd6, 0x1b, 0x9b, 0xf2, 0x22, 0x5f, 0xed,
+	0x7f, 0xb0, 0x73, 0x46, 0xa2, 0x49, 0xce, 0xce, 0xfa, 0x5a, 0xa6, 0x1f, 0xc0, 0x36, 0x32, 0x15,
+	0xf4, 0x34, 0xee, 0x63, 0x61, 0xc4, 0xe1, 0xd8, 0x98, 0x32, 0x2c, 0xcd, 0x75, 0x2c, 0xd6, 0x10,
+	0x3a, 0x1f, 0xe3, 0x09, 0xe1, 0xde, 0x38, 0xcd, 0xfe, 0xff, 0xb3, 0x9e, 0xfe, 0x51, 0x86, 0xf6,
+	0xc9, 0x24, 0x0a, 0xe2, 0x30, 0xd7, 0x37, 0x54, 0x4a, 0x2f, 0xf4, 0x0d, 0x45, 0xf3, 0x02, 0x3a,
+	0x6a, 0x5a, 0x69, 0x32, 0x55, 0x6b, 0xe6, 0x62, 0xe6, 0x8b, 0xab, 0xa9, 0x9c, 0xba, 0x9a, 0x30,
+	0x5f, 0x6d, 0x99, 0x6c, 0xfc, 0x19, 0x18, 0x53, 0xe5, 0x97, 0xa6, 0x54, 0x91, 0x7d, 0x96, 0x68,
+	0x9e, 0x1b, 0x78, 0x9c, 0xf5, 0x5f, 0xe1, 0x88, 0x37, 0x14, 0x71, 0xf5, 0x19, 0x25, 0x65, 0x98,
+	0x7d, 0x7b, 0xa6, 0x9d, 0xa9, 0xff, 0x11, 0xb6, 0x17, 0x59, 0x73, 0x05, 0x68, 0x65, 0x0b, 0xb0,
+	0x3d, 0xd8, 0xd1, 0x22, 0xb2, 0x5c, 0xb2, 0x2a, 0x6f, 0xd5, 0x15, 0x29, 0x7d, 0xd5, 0x98, 0xdf,
+	0x07, 0xc3, 0x57, 0x43, 0x2f, 0xc5, 0xad, 0x92, 0x11, 0x90, 0x1b, 0x88, 0x88, 0x9d, 0x23, 0xbd,
+	0x59, 0x8a, 0x5d, 0x36, 0x12, 0xb9, 0xf1, 0xaa, 0x5a, 0xad, 0xbe, 0xc1, 0x2f, 0x7b, 0xed, 0x0e,
+	0xfe, 0x52, 0x87, 0xca, 0xbb, 0xe1, 0xaf, 0xcd, 0x33, 0xd8, 0x2c, 0xbc, 0xd1, 0xcd, 0xa4, 0x27,
+	0x2d, 0xff, 0xba, 0xd0, 0x7f, 0xbc, 0xea, 0x58, 0xdf, 0x25, 0x36, 0x84, 0xcc, 0xc2, 0x45, 0x23,
+	0x95, 0xb9, 0xfc, 0x52, 0x97, 0xca, 0x5c, 0x75, 0x3f, 0xd9, 0x30, 0x7f, 0x02, 0x75, 0xf5, 0xa2,
+	0x37, 0x77, 0x35, 0x6d, 0xee, 0xd3, 0x40, 0x7f, 0xaf, 0xb0, 0x9b, 0x32, 0x9e, 0x82, 0x91, 0xfb,
+	0x80, 0x62, 0x3e, 0xc8, 0xe9, 0xca, 0x7f, 0x10, 0xe8, 0x3f, 0x5c, 0x7e, 0x98, 0x4a, 0x3b, 0x01,
+	0x98, 0xbf, 0x53, 0xcd, 0x9e, 0xa6, 0x5e, 0xf8, 0xb0, 0xd0, 0x3f, 0x5c, 0x72, 0x92, 0x0a, 0xb9,
+	0x84, 0xad, 0xe2, 0x43, 0xd4, 0x2c, 0xa0, 0x5a, 0x7c, 0x36, 0xf6, 0x9f, 0xac, 0x3c, 0xcf, 0x8a,
+	0x2d, 0x3e, 0x47, 0x53, 0xb1, 0x2b, 0x1e, 0xb7, 0xa9, 0xd8, 0x95, 0xef, 0xd8, 0x0d, 0xf3, 0xb7,
+	0xd0, 0xcd, 0xbf, 0x24, 0xcd, 0x04, 0xa4, 0xa5, 0x0f, 0xdc, 0xfe, 0xa3, 0x15, 0xa7, 0xa9, 0xc0,
+	0x1f, 0x42, 0x4d, 0xbd, 0x19, 0x93, 0x8c, 0xcf, 0x3e, 0x33, 0xfb, 0xbb, 0xf9, 0xcd, 0x94, 0xeb,
+	0x15, 0xd4, 0xd5, 0x15, 0x35, 0x4d, 0x80, 0xdc, 0x8d, 0xb5, 0xdf, 0xc9, 0xee, 0x5a, 0x1b, 0xaf,
+	0x4a, 0x89, 0x1e, 0x96, 0xd3, 0xc3, 0x96, 0xe9, 0xc9, 0x04, 0x67, 0x5c, 0x97, 0x9f, 0xf8, 0x5e,
+	0xff, 0x37, 0x00, 0x00, 0xff, 0xff, 0xee, 0xbf, 0x26, 0x33, 0xef, 0x13, 0x00, 0x00,
 }
 }

+ 1 - 0
vendor/src/github.com/docker/containerd/api/grpc/types/api.proto

@@ -35,6 +35,7 @@ message CreateContainerRequest {
 	string stdout = 5; // path to file where stdout will be written (optional)
 	string stdout = 5; // path to file where stdout will be written (optional)
 	string stderr = 6; // path to file where stderr will be written (optional)
 	string stderr = 6; // path to file where stderr will be written (optional)
 	repeated string labels = 7;
 	repeated string labels = 7;
+	bool noPivotRoot = 8;
 }
 }
 
 
 message CreateContainerResponse {
 message CreateContainerResponse {

+ 6 - 0
vendor/src/github.com/docker/libnetwork/CHANGELOG.md

@@ -1,5 +1,11 @@
 # Changelog
 # Changelog
 
 
+## 0.7.0-rc.1 (2016-03-30)
+- Fixes https://github.com/docker/libnetwork/issues/985
+- Fixes https://github.com/docker/libnetwork/issues/945
+- Log time taken to set sandbox key
+- Limit number of concurrent DNS queries
+
 ## 0.7.0-dev.10 (2016-03-21)
 ## 0.7.0-dev.10 (2016-03-21)
 - Add IPv6 service discovery (AAAA records) in embedded DNS server
 - Add IPv6 service discovery (AAAA records) in embedded DNS server
 - Honor enableIPv6 flag in network create for the IP allocation
 - Honor enableIPv6 flag in network create for the IP allocation

Неке датотеке нису приказане због велике количине промена