Browse Source

Merge branch 'master' of github.com:docker/docker into kill

Docker-DCO-1.1-Signed-off-by: Dan Walsh <dwalsh@redhat.com> (github: rhatdan)
Dan Walsh 10 years ago
parent
commit
66121d1b45
100 changed files with 4019 additions and 2395 deletions
  1. 1 0
      .gitignore
  2. 17 0
      CHANGELOG.md
  3. 8 8
      CONTRIBUTING.md
  4. 18 10
      Dockerfile
  5. 69 30
      MAINTAINERS
  6. 6 6
      Makefile
  7. 47 0
      README.md
  8. 15 6
      api/client/build.go
  9. 11 17
      api/client/cli.go
  10. 1 1
      api/client/cp.go
  11. 9 4
      api/client/create.go
  12. 7 17
      api/client/events.go
  13. 2 2
      api/client/exec.go
  14. 5 1
      api/client/export.go
  15. 3 6
      api/client/help.go
  16. 7 1
      api/client/import.go
  17. 5 3
      api/client/info.go
  18. 18 6
      api/client/inspect.go
  19. 6 3
      api/client/kill.go
  20. 6 1
      api/client/load.go
  21. 15 3
      api/client/logs.go
  22. 6 3
      api/client/pause.go
  23. 2 2
      api/client/pull.go
  24. 6 3
      api/client/restart.go
  25. 8 3
      api/client/rm.go
  26. 7 4
      api/client/rmi.go
  27. 6 7
      api/client/run.go
  28. 7 2
      api/client/save.go
  29. 15 10
      api/client/start.go
  30. 27 6
      api/client/stats.go
  31. 6 3
      api/client/stop.go
  32. 6 3
      api/client/unpause.go
  33. 13 20
      api/client/utils.go
  34. 14 18
      api/client/version.go
  35. 6 3
      api/client/wait.go
  36. 3 2
      api/common.go
  37. 1 1
      api/server/form.go
  38. 2 2
      api/server/form_test.go
  39. 151 279
      api/server/server.go
  40. 4 9
      api/server/server_linux.go
  41. 2 8
      api/server/server_windows.go
  42. 3 9
      api/types/types.go
  43. 155 0
      builder/bflag.go
  44. 187 0
      builder/bflag_test.go
  45. 0 2
      builder/command/command.go
  46. 104 13
      builder/dispatchers.go
  47. 21 7
      builder/evaluator.go
  48. 20 18
      builder/internals.go
  49. 7 4
      builder/job.go
  50. 3 2
      builder/parser/parser.go
  51. 10 0
      builder/parser/testfiles/flags/Dockerfile
  52. 10 0
      builder/parser/testfiles/flags/result
  53. 105 5
      builder/parser/utils.go
  54. 1 1
      contrib/builder/deb/debian-jessie/Dockerfile
  55. 14 0
      contrib/builder/deb/debian-stretch/Dockerfile
  56. 1 1
      contrib/builder/deb/debian-wheezy/Dockerfile
  57. 1 1
      contrib/builder/deb/generate.sh
  58. 1 1
      contrib/builder/deb/ubuntu-debootstrap-trusty/Dockerfile
  59. 1 1
      contrib/builder/deb/ubuntu-debootstrap-utopic/Dockerfile
  60. 1 1
      contrib/builder/deb/ubuntu-debootstrap-vivid/Dockerfile
  61. 5 0
      contrib/builder/rpm/README.md
  62. 10 0
      contrib/builder/rpm/build.sh
  63. 15 0
      contrib/builder/rpm/centos-6/Dockerfile
  64. 15 0
      contrib/builder/rpm/centos-7/Dockerfile
  65. 15 0
      contrib/builder/rpm/fedora-20/Dockerfile
  66. 15 0
      contrib/builder/rpm/fedora-21/Dockerfile
  67. 73 0
      contrib/builder/rpm/generate.sh
  68. 33 2
      contrib/check-config.sh
  69. 20 4
      contrib/completion/bash/docker
  70. 3 1
      contrib/completion/fish/docker.fish
  71. 2 0
      contrib/completion/zsh/_docker
  72. 8 4
      contrib/download-frozen-image.sh
  73. 2 0
      contrib/init/upstart/docker.conf
  74. 0 49
      contrib/mkimage-unittest.sh
  75. 13 5
      contrib/mkimage/debootstrap
  76. 26 0
      contrib/syntax/nano/Dockerfile.nanorc
  77. 32 0
      contrib/syntax/nano/README.md
  78. 39 207
      daemon/attach.go
  79. 13 0
      daemon/changes.go
  80. 5 1
      daemon/commit.go
  81. 52 49
      daemon/config.go
  82. 43 0
      daemon/config_linux.go
  83. 32 0
      daemon/config_windows.go
  84. 455 907
      daemon/container.go
  85. 979 0
      daemon/container_linux.go
  86. 171 0
      daemon/container_windows.go
  87. 16 0
      daemon/copy.go
  88. 51 9
      daemon/create.go
  89. 270 286
      daemon/daemon.go
  90. 118 12
      daemon/daemon_test.go
  91. 7 0
      daemon/daemon_zfs.go
  92. 21 0
      daemon/debugtrap.go
  93. 7 0
      daemon/debugtrap_unsupported.go
  94. 26 51
      daemon/delete.go
  95. 3 74
      daemon/exec.go
  96. 18 0
      daemon/exec_linux.go
  97. 9 0
      daemon/exec_windows.go
  98. 23 155
      daemon/execdriver/driver.go
  99. 159 0
      daemon/execdriver/driver_linux.go
  100. 2 0
      daemon/execdriver/execdrivers/execdrivers_linux.go

+ 1 - 0
.gitignore

@@ -3,6 +3,7 @@
 #  please consider a global .gitignore https://help.github.com/articles/ignoring-files
 #  please consider a global .gitignore https://help.github.com/articles/ignoring-files
 *.exe
 *.exe
 *.orig
 *.orig
+*.rej
 *.test
 *.test
 .*.swp
 .*.swp
 .DS_Store
 .DS_Store

+ 17 - 0
CHANGELOG.md

@@ -1,5 +1,22 @@
 # Changelog
 # Changelog
 
 
+## 1.6.2 (2015-05-13)
+
+####  Runtime
+- Revert change prohibiting mounting into /sys
+
+## 1.6.1 (2015-05-07)
+
+####  Security
+- Fix read/write /proc paths (CVE-2015-3630)
+- Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631)
+- Fix opening of file-descriptor 1 (CVE-2015-3627)
+- Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629)
+- Prohibit mount of /sys
+
+#### Runtime
+- Update Apparmor policy to not allow mounts
+
 ## 1.6.0 (2015-04-07)
 ## 1.6.0 (2015-04-07)
 
 
 #### Builder
 #### Builder

+ 8 - 8
CONTRIBUTING.md

@@ -129,12 +129,12 @@ However, there might be a way to implement that feature *on top of* Docker.
   <col width="45%">
   <col width="45%">
   <col width="65%">
   <col width="65%">
   <tr>
   <tr>
-    <td>Internet&nbsp;Relay&nbsp;Chat&nbsp;(IRC)</th>
+    <td>Internet&nbsp;Relay&nbsp;Chat&nbsp;(IRC)</td>
     <td>
     <td>
       <p>
       <p>
         IRC a direct line to our most knowledgeable Docker users; we have
         IRC a direct line to our most knowledgeable Docker users; we have
-        both the  <code>#docker</code> and <code>#docker-dev</code> group on 
-        <strong>irc.freenode.net</strong>.  
+        both the  <code>#docker</code> and <code>#docker-dev</code> group on
+        <strong>irc.freenode.net</strong>.
         IRC is a rich chat protocol but it can overwhelm new users. You can search
         IRC is a rich chat protocol but it can overwhelm new users. You can search
         <a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
         <a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
       </p>
       </p>
@@ -146,9 +146,9 @@ However, there might be a way to implement that feature *on top of* Docker.
     <td>
     <td>
       There are two groups.
       There are two groups.
       <a href="https://groups.google.com/forum/#!forum/docker-user" target="_blank">Docker-user</a>
       <a href="https://groups.google.com/forum/#!forum/docker-user" target="_blank">Docker-user</a>
-      is for people using Docker containers. 
-      The <a href="https://groups.google.com/forum/#!forum/docker-dev" target="_blank">docker-dev</a> 
-      group is for contributors and other people contributing to the Docker 
+      is for people using Docker containers.
+      The <a href="https://groups.google.com/forum/#!forum/docker-dev" target="_blank">docker-dev</a>
+      group is for contributors and other people contributing to the Docker
       project.
       project.
     </td>
     </td>
   </tr>
   </tr>
@@ -156,14 +156,14 @@ However, there might be a way to implement that feature *on top of* Docker.
     <td>Twitter</td>
     <td>Twitter</td>
     <td>
     <td>
       You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
       You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
-      to get updates on our products. You can also tweet us questions or just 
+      to get updates on our products. You can also tweet us questions or just
       share blogs or stories.
       share blogs or stories.
     </td>
     </td>
   </tr>
   </tr>
   <tr>
   <tr>
     <td>Stack Overflow</td>
     <td>Stack Overflow</td>
     <td>
     <td>
-      Stack Overflow has over 7000K Docker questions listed. We regularly 
+      Stack Overflow has over 7000K Docker questions listed. We regularly
       monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
       monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
       and so do many other knowledgeable Docker users.
       and so do many other knowledgeable Docker users.
     </td>
     </td>

+ 18 - 10
Dockerfile

@@ -26,6 +26,9 @@
 FROM ubuntu:14.04
 FROM ubuntu:14.04
 MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
 MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
 
 
+RUN	apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61
+RUN	echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list
+
 # Packaged dependencies
 # Packaged dependencies
 RUN apt-get update && apt-get install -y \
 RUN apt-get update && apt-get install -y \
 	apparmor \
 	apparmor \
@@ -50,6 +53,8 @@ RUN apt-get update && apt-get install -y \
 	ruby1.9.1 \
 	ruby1.9.1 \
 	ruby1.9.1-dev \
 	ruby1.9.1-dev \
 	s3cmd=1.1.0* \
 	s3cmd=1.1.0* \
+	ubuntu-zfs \
+	libzfs-dev \
 	--no-install-recommends
 	--no-install-recommends
 
 
 # Get lvm2 source for compiling statically
 # Get lvm2 source for compiling statically
@@ -121,7 +126,8 @@ RUN set -x \
 	&& git clone https://github.com/docker/distribution.git /go/src/github.com/docker/distribution \
 	&& git clone https://github.com/docker/distribution.git /go/src/github.com/docker/distribution \
 	&& (cd /go/src/github.com/docker/distribution && git checkout -q $REGISTRY_COMMIT) \
 	&& (cd /go/src/github.com/docker/distribution && git checkout -q $REGISTRY_COMMIT) \
 	&& GOPATH=/go/src/github.com/docker/distribution/Godeps/_workspace:/go \
 	&& GOPATH=/go/src/github.com/docker/distribution/Godeps/_workspace:/go \
-		go build -o /go/bin/registry-v2 github.com/docker/distribution/cmd/registry
+		go build -o /go/bin/registry-v2 github.com/docker/distribution/cmd/registry \
+	&& rm -rf /go/src/github.com/docker/distribution/
 
 
 # Get the "docker-py" source so we can run their integration tests
 # Get the "docker-py" source so we can run their integration tests
 ENV DOCKER_PY_COMMIT 91985b239764fe54714fa0a93d52aa362357d251
 ENV DOCKER_PY_COMMIT 91985b239764fe54714fa0a93d52aa362357d251
@@ -157,23 +163,25 @@ RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
 COPY contrib/download-frozen-image.sh /go/src/github.com/docker/docker/contrib/
 COPY contrib/download-frozen-image.sh /go/src/github.com/docker/docker/contrib/
 RUN ./contrib/download-frozen-image.sh /docker-frozen-images \
 RUN ./contrib/download-frozen-image.sh /docker-frozen-images \
 	busybox:latest@4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 \
 	busybox:latest@4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 \
-	hello-world:frozen@e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5
+	hello-world:frozen@e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5 \
+	jess/unshare@5c9f6ea50341a2a8eb6677527f2bdedbf331ae894a41714fda770fb130f3314d
 # see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
 # see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
 
 
-# Install man page generator
-COPY vendor /go/src/github.com/docker/docker/vendor
-# (copy vendor/ because go-md2man needs golang.org/x/net)
+# Download man page generator
 RUN set -x \
 RUN set -x \
 	&& git clone -b v1.0.1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \
 	&& git clone -b v1.0.1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \
-	&& git clone -b v1.2 https://github.com/russross/blackfriday.git /go/src/github.com/russross/blackfriday \
-	&& go install -v github.com/cpuguy83/go-md2man
+	&& git clone -b v1.2 https://github.com/russross/blackfriday.git /go/src/github.com/russross/blackfriday
 
 
-# install toml validator
+# Download toml validator
 ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
 ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
 RUN set -x \
 RUN set -x \
 	&& git clone https://github.com/BurntSushi/toml.git /go/src/github.com/BurntSushi/toml \
 	&& git clone https://github.com/BurntSushi/toml.git /go/src/github.com/BurntSushi/toml \
-	&& (cd /go/src/github.com/BurntSushi/toml && git checkout -q $TOMLV_COMMIT) \
-	&& go install -v github.com/BurntSushi/toml/cmd/tomlv
+	&& (cd /go/src/github.com/BurntSushi/toml && git checkout -q $TOMLV_COMMIT)
+
+# copy vendor/ because go-md2man needs golang.org/x/net
+COPY vendor /go/src/github.com/docker/docker/vendor
+RUN go install -v github.com/cpuguy83/go-md2man \
+	github.com/BurntSushi/toml/cmd/tomlv
 
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
 ENTRYPOINT ["hack/dind"]
 ENTRYPOINT ["hack/dind"]

+ 69 - 30
MAINTAINERS

@@ -296,7 +296,9 @@ made through a pull request.
 		[Org.Operators.security]
 		[Org.Operators.security]
 
 
 			people = [
 			people = [
-				"erw"
+				"erw",
+				"diogomonica",
+				"nathanmccauley"
 			]
 			]
 
 
 		[Org.Operators."monthly meetings"]
 		[Org.Operators."monthly meetings"]
@@ -312,6 +314,11 @@ made through a pull request.
 				"jfrazelle",
 				"jfrazelle",
 				"crosbymichael"
 				"crosbymichael"
 			]
 			]
+		
+		[Org.Operators.community]
+			people = [
+				"theadactyl"
+			]
 
 
 	# The chief maintainer is responsible for all aspects of quality for the project including
 	# The chief maintainer is responsible for all aspects of quality for the project including
 	# code reviews, usability, stability, security, performance, etc.
 	# code reviews, usability, stability, security, performance, etc.
@@ -319,6 +326,17 @@ made through a pull request.
 	# day of a new maintainer, the best advice should be "follow the C.M.'s example and you'll
 	# day of a new maintainer, the best advice should be "follow the C.M.'s example and you'll
 	# be fine".
 	# be fine".
 	"Chief Maintainer" = "crosbymichael"
 	"Chief Maintainer" = "crosbymichael"
+	
+	# The community manager is responsible for serving the project community, including users, 
+	# contributors and partners. This involves:
+	#	- facilitating communication between maintainers, contributors and users
+	#	- organizing contributor and maintainer events
+	#	- helping new contributors get involved
+	#	- anything the project community needs to be successful
+	#
+	# The community manager is a point of contact for any contributor who has questions, concerns 
+	# or feedback about project operations. 
+	"Community Manager" = "theadactyl"
 
 
 	[Org."Core maintainers"]
 	[Org."Core maintainers"]
 
 
@@ -345,6 +363,7 @@ made through a pull request.
 			"icecrime",
 			"icecrime",
 			"jfrazelle",
 			"jfrazelle",
 			"lk4d4",
 			"lk4d4",
+			"runcom",
 			"tibor",
 			"tibor",
 			"unclejack",
 			"unclejack",
 			"vbatts",
 			"vbatts",
@@ -365,43 +384,43 @@ made through a pull request.
 	# 1. Exposing a clear road map for improving their subsystem.
 	# 1. Exposing a clear road map for improving their subsystem.
 	# 2. Deliver prompt feedback and decisions on pull requests affecting their subsystem.
 	# 2. Deliver prompt feedback and decisions on pull requests affecting their subsystem.
 	# 3. Be available to anyone with questions, bug reports, criticism etc.
 	# 3. Be available to anyone with questions, bug reports, criticism etc.
-	#   on their component. This includes IRC, GitHub requests and the mailing
-	#   list.
+	#	on their component. This includes IRC, GitHub requests and the mailing
+	#	list.
 	# 4. Make sure their subsystem respects the philosophy, design and
 	# 4. Make sure their subsystem respects the philosophy, design and
-	#   road map of the project.
+	#	road map of the project.
 	#
 	#
 	# #### How to review patches to your subsystem
 	# #### How to review patches to your subsystem
 	#
 	#
 	# Accepting pull requests:
 	# Accepting pull requests:
 	#
 	#
-	#   - If the pull request appears to be ready to merge, give it a `LGTM`, which
-	#     stands for "Looks Good To Me".
-	#   - If the pull request has some small problems that need to be changed, make
-	#     a comment adressing the issues.
-	#   - If the changes needed to a PR are small, you can add a "LGTM once the
-	#     following comments are adressed..." this will reduce needless back and
-	#     forth.
-	#   - If the PR only needs a few changes before being merged, any MAINTAINER can
-	#     make a replacement PR that incorporates the existing commits and fixes the
-	#     problems before a fast track merge.
+	#	- If the pull request appears to be ready to merge, give it a `LGTM`, which
+	#	  stands for "Looks Good To Me".
+	#	- If the pull request has some small problems that need to be changed, make
+	#	  a comment adressing the issues.
+	#	- If the changes needed to a PR are small, you can add a "LGTM once the
+	#	  following comments are adressed..." this will reduce needless back and
+	#	  forth.
+	#	- If the PR only needs a few changes before being merged, any MAINTAINER can
+	#	  make a replacement PR that incorporates the existing commits and fixes the
+	#	  problems before a fast track merge.
 	#
 	#
 	# Closing pull requests:
 	# Closing pull requests:
 	#
 	#
-	#   - If a PR appears to be abandoned, after having attempted to contact the
-	#     original contributor, then a replacement PR may be made.  Once the
-	#     replacement PR is made, any contributor may close the original one.
-	#   - If you are not sure if the pull request implements a good feature or you
-	#     do not understand the purpose of the PR, ask the contributor to provide
-	#     more documentation.  If the contributor is not able to adequately explain
-	#     the purpose of the PR, the PR may be closed by any MAINTAINER.
-	#   - If a MAINTAINER feels that the pull request is sufficiently architecturally
-	#     flawed, or if the pull request needs significantly more design discussion
-	#     before being considered, the MAINTAINER should close the pull request with
-	#     a short explanation of what discussion still needs to be had.  It is
-	#     important not to leave such pull requests open, as this will waste both the
-	#     MAINTAINER's time and the contributor's time.  It is not good to string a
-	#     contributor on for weeks or months, having them make many changes to a PR
-	#     that will eventually be rejected.
+	#	- If a PR appears to be abandoned, after having attempted to contact the
+	#	  original contributor, then a replacement PR may be made. Once the
+	#	  replacement PR is made, any contributor may close the original one.
+	#	- If you are not sure if the pull request implements a good feature or you
+	#	  do not understand the purpose of the PR, ask the contributor to provide
+	#	  more documentation.  If the contributor is not able to adequately explain
+	#	  the purpose of the PR, the PR may be closed by any MAINTAINER.
+	#	- If a MAINTAINER feels that the pull request is sufficiently architecturally
+	#	  flawed, or if the pull request needs significantly more design discussion
+	#	  before being considered, the MAINTAINER should close the pull request with
+	#	  a short explanation of what discussion still needs to be had.  It is
+	#	  important not to leave such pull requests open, as this will waste both the
+	#	  MAINTAINER's time and the contributor's time.  It is not good to string a
+	#	  contributor on for weeks or months, having them make many changes to a PR
+	#	  that will eventually be rejected.
 
 
 		[Org.Subsystems.Documentation]
 		[Org.Subsystems.Documentation]
 
 
@@ -527,6 +546,11 @@ made through a pull request.
 	Email = "crosbymichael@gmail.com"
 	Email = "crosbymichael@gmail.com"
 	GitHub = "crosbymichael"
 	GitHub = "crosbymichael"
 
 
+	[people.diogomonica]
+	Name = "Diogo Monica"
+	Email = "diogo@docker.com"
+	GitHub = "diogomonica"
+
 	[people.duglin]
 	[people.duglin]
 	Name = "Doug Davis"
 	Name = "Doug Davis"
 	Email = "dug@us.ibm.com"
 	Email = "dug@us.ibm.com"
@@ -574,7 +598,7 @@ made through a pull request.
 
 
 	[people.jfrazelle]
 	[people.jfrazelle]
 	Name = "Jessie Frazelle"
 	Name = "Jessie Frazelle"
-	Email = "jess@docker.com"
+	Email = "j@docker.com"
 	GitHub = "jfrazelle"
 	GitHub = "jfrazelle"
 
 
 	[people.jlhawn]
 	[people.jlhawn]
@@ -592,6 +616,16 @@ made through a pull request.
 	Email = "mary.anthony@docker.com"
 	Email = "mary.anthony@docker.com"
 	GitHub = "moxiegirl"
 	GitHub = "moxiegirl"
 
 
+	[people.nathanmccauley]
+	Name = "Nathan McCauley"
+	Email = "nathan.mccauley@docker.com"
+	GitHub = "nathanmccauley"
+
+	[people.runcom]
+	Name = "Antonio Murdaca"
+	Email = "me@runcom.ninja"
+	GitHub = "runcom"
+
 	[people.sday]
 	[people.sday]
 	Name = "Stephen Day"
 	Name = "Stephen Day"
 	Email = "stephen.day@docker.com"
 	Email = "stephen.day@docker.com"
@@ -616,6 +650,11 @@ made through a pull request.
 	Name = "Sebastiaan van Stijn"
 	Name = "Sebastiaan van Stijn"
 	Email = "github@gone.nl"
 	Email = "github@gone.nl"
 	GitHub = "thaJeztah"
 	GitHub = "thaJeztah"
+	
+	[people.theadactyl]
+	Name = "Thea Lamkin"
+	Email = "thea@docker.com"
+	GitHub = "theadactyl"
 
 
 	[people.tianon]
 	[people.tianon]
 	Name = "Tianon Gravi"
 	Name = "Tianon Gravi"

+ 6 - 6
Makefile

@@ -1,4 +1,4 @@
-.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate
+.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration-cli test-docker-py validate
 
 
 # env vars passed through directly to Docker's build scripts
 # env vars passed through directly to Docker's build scripts
 # to allow things like `make DOCKER_CLIENTONLY=1 binary` easily
 # to allow things like `make DOCKER_CLIENTONLY=1 binary` easily
@@ -7,7 +7,10 @@ DOCKER_ENVS := \
 	-e BUILDFLAGS \
 	-e BUILDFLAGS \
 	-e DOCKER_CLIENTONLY \
 	-e DOCKER_CLIENTONLY \
 	-e DOCKER_EXECDRIVER \
 	-e DOCKER_EXECDRIVER \
+	-e DOCKER_EXPERIMENTAL \
 	-e DOCKER_GRAPHDRIVER \
 	-e DOCKER_GRAPHDRIVER \
+	-e DOCKER_STORAGE_OPTS \
+	-e DOCKER_USERLANDPROXY \
 	-e TESTDIRS \
 	-e TESTDIRS \
 	-e TESTFLAGS \
 	-e TESTFLAGS \
 	-e TIMEOUT
 	-e TIMEOUT
@@ -26,7 +29,7 @@ DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR))
 DOCSPORT := 8000
 DOCSPORT := 8000
 
 
 GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
 GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
-DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
+DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH))
 DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
 DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
 
 
 DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
 DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
@@ -62,14 +65,11 @@ docs-test: docs-build
 	$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./test.sh
 	$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./test.sh
 
 
 test: build
 test: build
-	$(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli test-docker-py
+	$(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration-cli test-docker-py
 
 
 test-unit: build
 test-unit: build
 	$(DOCKER_RUN_DOCKER) hack/make.sh test-unit
 	$(DOCKER_RUN_DOCKER) hack/make.sh test-unit
 
 
-test-integration: build
-	$(DOCKER_RUN_DOCKER) hack/make.sh test-integration
-
 test-integration-cli: build
 test-integration-cli: build
 	$(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli
 	$(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli
 
 

+ 47 - 0
README.md

@@ -207,6 +207,53 @@ or want to get more involved, the best place to start is [the project directory]
 
 
 We are always open to suggestions on process improvements, and are always looking for more maintainers.
 We are always open to suggestions on process improvements, and are always looking for more maintainers.
 
 
+### Talking to other Docker users and contributors
+
+<table class="tg">
+  <col width="45%">
+  <col width="65%">
+  <tr>
+    <td>Internet&nbsp;Relay&nbsp;Chat&nbsp;(IRC)</td>
+    <td>
+      <p>
+        IRC a direct line to our most knowledgeable Docker users; we have
+        both the  <code>#docker</code> and <code>#docker-dev</code> group on
+        <strong>irc.freenode.net</strong>.
+        IRC is a rich chat protocol but it can overwhelm new users. You can search
+        <a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
+      </p>
+      Read our <a href="https://docs.docker.com/project/get-help/#irc-quickstart" target="_blank">IRC quickstart guide</a> for an easy way to get started.
+    </td>
+  </tr>
+  <tr>
+    <td>Google Groups</td>
+    <td>
+      There are two groups.
+      <a href="https://groups.google.com/forum/#!forum/docker-user" target="_blank">Docker-user</a>
+      is for people using Docker containers.
+      The <a href="https://groups.google.com/forum/#!forum/docker-dev" target="_blank">docker-dev</a>
+      group is for contributors and other people contributing to the Docker
+      project.
+    </td>
+  </tr>
+  <tr>
+    <td>Twitter</td>
+    <td>
+      You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
+      to get updates on our products. You can also tweet us questions or just
+      share blogs or stories.
+    </td>
+  </tr>
+  <tr>
+    <td>Stack Overflow</td>
+    <td>
+      Stack Overflow has over 7000K Docker questions listed. We regularly
+      monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
+      and so do many other knowledgeable Docker users.
+    </td>
+  </tr>
+</table>
+
 ### Legal
 ### Legal
 
 
 *Brought to you courtesy of our legal counsel. For more context,
 *Brought to you courtesy of our legal counsel. For more context,

+ 15 - 6
api/client/build.go

@@ -17,9 +17,8 @@ import (
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 
 
-	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api"
-	"github.com/docker/docker/graph"
+	"github.com/docker/docker/graph/tags"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/jsonmessage"
 	"github.com/docker/docker/pkg/jsonmessage"
@@ -55,9 +54,11 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 	flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
 	flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
 	flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap")
 	flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap")
 	flCPUShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
 	flCPUShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
+	flCpuPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
 	flCpuQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
 	flCpuQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
 	flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
 	flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
 	flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)")
 	flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)")
+	flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container")
 
 
 	cmd.Require(flag.Exact, 1)
 	cmd.Require(flag.Exact, 1)
 	cmd.ParseFlags(args, true)
 	cmd.ParseFlags(args, true)
@@ -190,14 +191,14 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 	// windows: show error message about modified file permissions
 	// windows: show error message about modified file permissions
 	// FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build.
 	// FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build.
 	if runtime.GOOS == "windows" {
 	if runtime.GOOS == "windows" {
-		logrus.Warn(`SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
+		fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
 	}
 	}
 
 
 	var body io.Reader
 	var body io.Reader
 	// Setup an upload progress bar
 	// Setup an upload progress bar
 	// FIXME: ProgressReader shouldn't be this annoying to use
 	// FIXME: ProgressReader shouldn't be this annoying to use
 	if context != nil {
 	if context != nil {
-		sf := streamformatter.NewStreamFormatter(false)
+		sf := streamformatter.NewStreamFormatter()
 		body = progressreader.New(progressreader.Config{
 		body = progressreader.New(progressreader.Config{
 			In:        context,
 			In:        context,
 			Out:       cli.out,
 			Out:       cli.out,
@@ -239,7 +240,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 			return err
 			return err
 		}
 		}
 		if len(tag) > 0 {
 		if len(tag) > 0 {
-			if err := graph.ValidateTagName(tag); err != nil {
+			if err := tags.ValidateTagName(tag); err != nil {
 				return err
 				return err
 			}
 			}
 		}
 		}
@@ -274,8 +275,10 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 	v.Set("cpusetmems", *flCPUSetMems)
 	v.Set("cpusetmems", *flCPUSetMems)
 	v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10))
 	v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10))
 	v.Set("cpuquota", strconv.FormatInt(*flCpuQuota, 10))
 	v.Set("cpuquota", strconv.FormatInt(*flCpuQuota, 10))
+	v.Set("cpuperiod", strconv.FormatInt(*flCpuPeriod, 10))
 	v.Set("memory", strconv.FormatInt(memory, 10))
 	v.Set("memory", strconv.FormatInt(memory, 10))
 	v.Set("memswap", strconv.FormatInt(memorySwap, 10))
 	v.Set("memswap", strconv.FormatInt(memorySwap, 10))
+	v.Set("cgroupparent", *flCgroupParent)
 
 
 	v.Set("dockerfile", *dockerfileName)
 	v.Set("dockerfile", *dockerfileName)
 
 
@@ -289,7 +292,13 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 	if context != nil {
 	if context != nil {
 		headers.Set("Content-Type", "application/tar")
 		headers.Set("Content-Type", "application/tar")
 	}
 	}
-	err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers)
+	sopts := &streamOpts{
+		rawTerminal: true,
+		in:          body,
+		out:         cli.out,
+		headers:     headers,
+	}
+	err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), sopts)
 	if jerr, ok := err.(*jsonmessage.JSONError); ok {
 	if jerr, ok := err.(*jsonmessage.JSONError); ok {
 		// If no error code is set, default to 1
 		// If no error code is set, default to 1
 		if jerr.Code == 0 {
 		if jerr.Code == 0 {

+ 11 - 17
api/client/cli.go

@@ -6,19 +6,18 @@ import (
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
-	"net"
 	"net/http"
 	"net/http"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
 	"reflect"
 	"reflect"
 	"strings"
 	"strings"
 	"text/template"
 	"text/template"
-	"time"
 
 
 	"github.com/docker/docker/cliconfig"
 	"github.com/docker/docker/cliconfig"
 	"github.com/docker/docker/pkg/homedir"
 	"github.com/docker/docker/pkg/homedir"
 	flag "github.com/docker/docker/pkg/mflag"
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/docker/pkg/term"
+	"github.com/docker/docker/utils"
 )
 )
 
 
 // DockerCli represents the docker command line client.
 // DockerCli represents the docker command line client.
@@ -63,6 +62,14 @@ var funcMap = template.FuncMap{
 	},
 	},
 }
 }
 
 
+func (cli *DockerCli) Out() io.Writer {
+	return cli.out
+}
+
+func (cli *DockerCli) Err() io.Writer {
+	return cli.err
+}
+
 func (cli *DockerCli) getMethod(args ...string) (func(...string) error, bool) {
 func (cli *DockerCli) getMethod(args ...string) (func(...string) error, bool) {
 	camelArgs := make([]string, len(args))
 	camelArgs := make([]string, len(args))
 	for i, s := range args {
 	for i, s := range args {
@@ -90,8 +97,7 @@ func (cli *DockerCli) Cmd(args ...string) error {
 	if len(args) > 0 {
 	if len(args) > 0 {
 		method, exists := cli.getMethod(args[0])
 		method, exists := cli.getMethod(args[0])
 		if !exists {
 		if !exists {
-			fmt.Fprintf(cli.err, "docker: '%s' is not a docker command. See 'docker --help'.\n", args[0])
-			os.Exit(1)
+			return fmt.Errorf("docker: '%s' is not a docker command. See 'docker --help'.", args[0])
 		}
 		}
 		return method(args[1:]...)
 		return method(args[1:]...)
 	}
 	}
@@ -171,19 +177,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, a
 	tr := &http.Transport{
 	tr := &http.Transport{
 		TLSClientConfig: tlsConfig,
 		TLSClientConfig: tlsConfig,
 	}
 	}
-
-	// Why 32? See https://github.com/docker/docker/pull/8035.
-	timeout := 32 * time.Second
-	if proto == "unix" {
-		// No need for compression in local communications.
-		tr.DisableCompression = true
-		tr.Dial = func(_, _ string) (net.Conn, error) {
-			return net.DialTimeout(proto, addr, timeout)
-		}
-	} else {
-		tr.Proxy = http.ProxyFromEnvironment
-		tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
-	}
+	utils.ConfigureTCPTransport(tr, proto, addr)
 
 
 	configFile, e := cliconfig.Load(filepath.Join(homedir.Get(), ".docker"))
 	configFile, e := cliconfig.Load(filepath.Join(homedir.Get(), ".docker"))
 	if e != nil {
 	if e != nil {

+ 1 - 1
api/client/cp.go

@@ -16,7 +16,7 @@ import (
 //
 //
 // Usage: docker cp CONTAINER:PATH HOSTDIR
 // Usage: docker cp CONTAINER:PATH HOSTDIR
 func (cli *DockerCli) CmdCp(args ...string) error {
 func (cli *DockerCli) CmdCp(args ...string) error {
-	cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTDIR|-", "Copy files/folders from a PATH on the container to a HOSTDIR on the host\nrunning the command. Use '-' to write the data\nas a tar file to STDOUT.", true)
+	cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTDIR|-", "Copy files/folders from a PATH on the container to a HOSTDIR on the host\nrunning the command. Use '-' to write the data as a tar file to STDOUT.", true)
 	cmd.Require(flag.Exact, 2)
 	cmd.Require(flag.Exact, 2)
 
 
 	cmd.ParseFlags(args, true)
 	cmd.ParseFlags(args, true)

+ 9 - 4
api/client/create.go

@@ -10,7 +10,7 @@ import (
 	"strings"
 	"strings"
 
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/graph"
+	"github.com/docker/docker/graph/tags"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
@@ -26,7 +26,7 @@ func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error {
 	repos, tag := parsers.ParseRepositoryTag(image)
 	repos, tag := parsers.ParseRepositoryTag(image)
 	// pull only the image tagged 'latest' if no tag was specified
 	// pull only the image tagged 'latest' if no tag was specified
 	if tag == "" {
 	if tag == "" {
-		tag = graph.DEFAULTTAG
+		tag = tags.DEFAULTTAG
 	}
 	}
 	v.Set("fromImage", repos)
 	v.Set("fromImage", repos)
 	v.Set("tag", tag)
 	v.Set("tag", tag)
@@ -47,7 +47,12 @@ func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error {
 	registryAuthHeader := []string{
 	registryAuthHeader := []string{
 		base64.URLEncoding.EncodeToString(buf),
 		base64.URLEncoding.EncodeToString(buf),
 	}
 	}
-	if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, out, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil {
+	sopts := &streamOpts{
+		rawTerminal: true,
+		out:         out,
+		headers:     map[string][]string{"X-Registry-Auth": registryAuthHeader},
+	}
+	if err := cli.stream("POST", "/images/create?"+v.Encode(), sopts); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
@@ -95,7 +100,7 @@ func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runc
 	if statusCode == 404 && strings.Contains(err.Error(), config.Image) {
 	if statusCode == 404 && strings.Contains(err.Error(), config.Image) {
 		repo, tag := parsers.ParseRepositoryTag(config.Image)
 		repo, tag := parsers.ParseRepositoryTag(config.Image)
 		if tag == "" {
 		if tag == "" {
-			tag = graph.DEFAULTTAG
+			tag = tags.DEFAULTTAG
 		}
 		}
 		fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", utils.ImageReference(repo, tag))
 		fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", utils.ImageReference(repo, tag))
 
 

+ 7 - 17
api/client/events.go

@@ -2,8 +2,6 @@ package client
 
 
 import (
 import (
 	"net/url"
 	"net/url"
-	"strconv"
-	"time"
 
 
 	"github.com/docker/docker/opts"
 	"github.com/docker/docker/opts"
 	flag "github.com/docker/docker/pkg/mflag"
 	flag "github.com/docker/docker/pkg/mflag"
@@ -26,7 +24,6 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
 
 
 	var (
 	var (
 		v               = url.Values{}
 		v               = url.Values{}
-		loc             = time.FixedZone(time.Now().Zone())
 		eventFilterArgs = filters.Args{}
 		eventFilterArgs = filters.Args{}
 	)
 	)
 
 
@@ -39,22 +36,11 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
 			return err
 			return err
 		}
 		}
 	}
 	}
-	var setTime = func(key, value string) {
-		format := timeutils.RFC3339NanoFixed
-		if len(value) < len(format) {
-			format = format[:len(value)]
-		}
-		if t, err := time.ParseInLocation(format, value, loc); err == nil {
-			v.Set(key, strconv.FormatInt(t.Unix(), 10))
-		} else {
-			v.Set(key, value)
-		}
-	}
 	if *since != "" {
 	if *since != "" {
-		setTime("since", *since)
+		v.Set("since", timeutils.GetTimestamp(*since))
 	}
 	}
 	if *until != "" {
 	if *until != "" {
-		setTime("until", *until)
+		v.Set("until", timeutils.GetTimestamp(*until))
 	}
 	}
 	if len(eventFilterArgs) > 0 {
 	if len(eventFilterArgs) > 0 {
 		filterJSON, err := filters.ToParam(eventFilterArgs)
 		filterJSON, err := filters.ToParam(eventFilterArgs)
@@ -63,7 +49,11 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
 		}
 		}
 		v.Set("filters", filterJSON)
 		v.Set("filters", filterJSON)
 	}
 	}
-	if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil {
+	sopts := &streamOpts{
+		rawTerminal: true,
+		out:         cli.out,
+	}
+	if err := cli.stream("GET", "/events?"+v.Encode(), sopts); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil

+ 2 - 2
api/client/exec.go

@@ -71,7 +71,7 @@ func (cli *DockerCli) CmdExec(args ...string) error {
 	defer func() {
 	defer func() {
 		logrus.Debugf("End of CmdExec(), Waiting for hijack to finish.")
 		logrus.Debugf("End of CmdExec(), Waiting for hijack to finish.")
 		if _, ok := <-hijacked; ok {
 		if _, ok := <-hijacked; ok {
-			logrus.Errorf("Hijack did not finish (chan still open)")
+			fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)")
 		}
 		}
 	}()
 	}()
 
 
@@ -109,7 +109,7 @@ func (cli *DockerCli) CmdExec(args ...string) error {
 
 
 	if execConfig.Tty && cli.isTerminalIn {
 	if execConfig.Tty && cli.isTerminalIn {
 		if err := cli.monitorTtySize(execID, true); err != nil {
 		if err := cli.monitorTtySize(execID, true); err != nil {
-			logrus.Errorf("Error monitoring TTY size: %s", err)
+			fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
 		}
 		}
 	}
 	}
 
 

+ 5 - 1
api/client/export.go

@@ -34,7 +34,11 @@ func (cli *DockerCli) CmdExport(args ...string) error {
 	}
 	}
 
 
 	image := cmd.Arg(0)
 	image := cmd.Arg(0)
-	if err := cli.stream("GET", "/containers/"+image+"/export", nil, output, nil); err != nil {
+	sopts := &streamOpts{
+		rawTerminal: true,
+		out:         output,
+	}
+	if err := cli.stream("GET", "/containers/"+image+"/export", sopts); err != nil {
 		return err
 		return err
 	}
 	}
 
 

+ 3 - 6
api/client/help.go

@@ -2,7 +2,6 @@ package client
 
 
 import (
 import (
 	"fmt"
 	"fmt"
-	"os"
 
 
 	flag "github.com/docker/docker/pkg/mflag"
 	flag "github.com/docker/docker/pkg/mflag"
 )
 )
@@ -23,12 +22,10 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
 	if len(args) > 0 {
 	if len(args) > 0 {
 		method, exists := cli.getMethod(args[0])
 		method, exists := cli.getMethod(args[0])
 		if !exists {
 		if !exists {
-			fmt.Fprintf(cli.err, "docker: '%s' is not a docker command. See 'docker --help'.\n", args[0])
-			os.Exit(1)
-		} else {
-			method("--help")
-			return nil
+			return fmt.Errorf("docker: '%s' is not a docker command. See 'docker --help'.", args[0])
 		}
 		}
+		method("--help")
+		return nil
 	}
 	}
 
 
 	flag.Usage()
 	flag.Usage()

+ 7 - 1
api/client/import.go

@@ -54,5 +54,11 @@ func (cli *DockerCli) CmdImport(args ...string) error {
 		in = cli.in
 		in = cli.in
 	}
 	}
 
 
-	return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil)
+	sopts := &streamOpts{
+		rawTerminal: true,
+		in:          in,
+		out:         cli.out,
+	}
+
+	return cli.stream("POST", "/images/create?"+v.Encode(), sopts)
 }
 }

+ 5 - 3
api/client/info.go

@@ -3,7 +3,6 @@ package client
 import (
 import (
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
-	"os"
 
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	flag "github.com/docker/docker/pkg/mflag"
 	flag "github.com/docker/docker/pkg/mflag"
@@ -45,9 +44,8 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
 	fmt.Fprintf(cli.out, "Name: %s\n", info.Name)
 	fmt.Fprintf(cli.out, "Name: %s\n", info.Name)
 	fmt.Fprintf(cli.out, "ID: %s\n", info.ID)
 	fmt.Fprintf(cli.out, "ID: %s\n", info.ID)
 
 
-	if info.Debug || os.Getenv("DEBUG") != "" {
+	if info.Debug {
 		fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug)
 		fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug)
-		fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "")
 		fmt.Fprintf(cli.out, "File Descriptors: %d\n", info.NFd)
 		fmt.Fprintf(cli.out, "File Descriptors: %d\n", info.NFd)
 		fmt.Fprintf(cli.out, "Goroutines: %d\n", info.NGoroutines)
 		fmt.Fprintf(cli.out, "Goroutines: %d\n", info.NGoroutines)
 		fmt.Fprintf(cli.out, "System Time: %s\n", info.SystemTime)
 		fmt.Fprintf(cli.out, "System Time: %s\n", info.SystemTime)
@@ -90,5 +88,9 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
 		}
 		}
 	}
 	}
 
 
+	if info.ExperimentalBuild {
+		fmt.Fprintf(cli.out, "Experimental: true\n")
+	}
+
 	return nil
 	return nil
 }
 }

+ 18 - 6
api/client/inspect.go

@@ -15,7 +15,6 @@ import (
 // CmdInspect displays low-level information on one or more containers or images.
 // CmdInspect displays low-level information on one or more containers or images.
 //
 //
 // Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...]
 // Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...]
-
 func (cli *DockerCli) CmdInspect(args ...string) error {
 func (cli *DockerCli) CmdInspect(args ...string) error {
 	cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container or image", true)
 	cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container or image", true)
 	tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template")
 	tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template")
@@ -27,7 +26,6 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
 	if *tmplStr != "" {
 	if *tmplStr != "" {
 		var err error
 		var err error
 		if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil {
 		if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil {
-			fmt.Fprintf(cli.err, "Template parsing error: %v\n", err)
 			return StatusError{StatusCode: 64,
 			return StatusError{StatusCode: 64,
 				Status: "Template parsing error: " + err.Error()}
 				Status: "Template parsing error: " + err.Error()}
 		}
 		}
@@ -61,7 +59,8 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
 				continue
 				continue
 			}
 			}
 		} else {
 		} else {
-			dec := json.NewDecoder(bytes.NewReader(obj))
+			rdr := bytes.NewReader(obj)
+			dec := json.NewDecoder(rdr)
 
 
 			if isImage {
 			if isImage {
 				inspPtr := types.ImageInspect{}
 				inspPtr := types.ImageInspect{}
@@ -71,7 +70,14 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
 					continue
 					continue
 				}
 				}
 				if err := tmpl.Execute(cli.out, inspPtr); err != nil {
 				if err := tmpl.Execute(cli.out, inspPtr); err != nil {
-					return err
+					rdr.Seek(0, 0)
+					var raw interface{}
+					if err := dec.Decode(&raw); err != nil {
+						return err
+					}
+					if err = tmpl.Execute(cli.out, raw); err != nil {
+						return err
+					}
 				}
 				}
 			} else {
 			} else {
 				inspPtr := types.ContainerJSON{}
 				inspPtr := types.ContainerJSON{}
@@ -81,8 +87,14 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
 					continue
 					continue
 				}
 				}
 				if err := tmpl.Execute(cli.out, inspPtr); err != nil {
 				if err := tmpl.Execute(cli.out, inspPtr); err != nil {
-					return err
-
+					rdr.Seek(0, 0)
+					var raw interface{}
+					if err := dec.Decode(&raw); err != nil {
+						return err
+					}
+					if err = tmpl.Execute(cli.out, raw); err != nil {
+						return err
+					}
 				}
 				}
 			}
 			}
 			cli.out.Write([]byte{'\n'})
 			cli.out.Write([]byte{'\n'})

+ 6 - 3
api/client/kill.go

@@ -16,14 +16,17 @@ func (cli *DockerCli) CmdKill(args ...string) error {
 
 
 	cmd.ParseFlags(args, true)
 	cmd.ParseFlags(args, true)
 
 
-	var encounteredError error
+	var errNames []string
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
 		if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, nil)); err != nil {
 		if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, nil)); err != nil {
 			fmt.Fprintf(cli.err, "%s\n", err)
 			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to kill one or more containers")
+			errNames = append(errNames, name)
 		} else {
 		} else {
 			fmt.Fprintf(cli.out, "%s\n", name)
 			fmt.Fprintf(cli.out, "%s\n", name)
 		}
 		}
 	}
 	}
-	return encounteredError
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to kill containers: %v", errNames)
+	}
+	return nil
 }
 }

+ 6 - 1
api/client/load.go

@@ -29,7 +29,12 @@ func (cli *DockerCli) CmdLoad(args ...string) error {
 			return err
 			return err
 		}
 		}
 	}
 	}
-	if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil {
+	sopts := &streamOpts{
+		rawTerminal: true,
+		in:          input,
+		out:         cli.out,
+	}
+	if err := cli.stream("POST", "/images/load", sopts); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil

+ 15 - 3
api/client/logs.go

@@ -7,6 +7,7 @@ import (
 
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	flag "github.com/docker/docker/pkg/mflag"
 	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/timeutils"
 )
 )
 
 
 // CmdLogs fetches the logs of a given container.
 // CmdLogs fetches the logs of a given container.
@@ -16,6 +17,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
 	var (
 	var (
 		cmd    = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container", true)
 		cmd    = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container", true)
 		follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
 		follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
+		since  = cmd.String([]string{"-since"}, "", "Show logs since timestamp")
 		times  = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
 		times  = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
 		tail   = cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs")
 		tail   = cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs")
 	)
 	)
@@ -35,14 +37,18 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
 		return err
 		return err
 	}
 	}
 
 
-	if c.HostConfig.LogConfig.Type != "json-file" {
-		return fmt.Errorf("\"logs\" command is supported only for \"json-file\" logging driver")
+	if logType := c.HostConfig.LogConfig.Type; logType != "json-file" {
+		return fmt.Errorf("\"logs\" command is supported only for \"json-file\" logging driver (got: %s)", logType)
 	}
 	}
 
 
 	v := url.Values{}
 	v := url.Values{}
 	v.Set("stdout", "1")
 	v.Set("stdout", "1")
 	v.Set("stderr", "1")
 	v.Set("stderr", "1")
 
 
+	if *since != "" {
+		v.Set("since", timeutils.GetTimestamp(*since))
+	}
+
 	if *times {
 	if *times {
 		v.Set("timestamps", "1")
 		v.Set("timestamps", "1")
 	}
 	}
@@ -52,5 +58,11 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
 	}
 	}
 	v.Set("tail", *tail)
 	v.Set("tail", *tail)
 
 
-	return cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), c.Config.Tty, nil, cli.out, cli.err, nil)
+	sopts := &streamOpts{
+		rawTerminal: c.Config.Tty,
+		out:         cli.out,
+		err:         cli.err,
+	}
+
+	return cli.stream("GET", "/containers/"+name+"/logs?"+v.Encode(), sopts)
 }
 }

+ 6 - 3
api/client/pause.go

@@ -14,14 +14,17 @@ func (cli *DockerCli) CmdPause(args ...string) error {
 	cmd.Require(flag.Min, 1)
 	cmd.Require(flag.Min, 1)
 	cmd.ParseFlags(args, false)
 	cmd.ParseFlags(args, false)
 
 
-	var encounteredError error
+	var errNames []string
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
 		if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/pause", name), nil, nil)); err != nil {
 		if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/pause", name), nil, nil)); err != nil {
 			fmt.Fprintf(cli.err, "%s\n", err)
 			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to pause container named %s", name)
+			errNames = append(errNames, name)
 		} else {
 		} else {
 			fmt.Fprintf(cli.out, "%s\n", name)
 			fmt.Fprintf(cli.out, "%s\n", name)
 		}
 		}
 	}
 	}
-	return encounteredError
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to pause containers: %v", errNames)
+	}
+	return nil
 }
 }

+ 2 - 2
api/client/pull.go

@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"fmt"
 	"net/url"
 	"net/url"
 
 
-	"github.com/docker/docker/graph"
+	"github.com/docker/docker/graph/tags"
 	flag "github.com/docker/docker/pkg/mflag"
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
@@ -28,7 +28,7 @@ func (cli *DockerCli) CmdPull(args ...string) error {
 	)
 	)
 	taglessRemote, tag := parsers.ParseRepositoryTag(remote)
 	taglessRemote, tag := parsers.ParseRepositoryTag(remote)
 	if tag == "" && !*allTags {
 	if tag == "" && !*allTags {
-		newRemote = utils.ImageReference(taglessRemote, graph.DEFAULTTAG)
+		newRemote = utils.ImageReference(taglessRemote, tags.DEFAULTTAG)
 	}
 	}
 	if tag != "" && *allTags {
 	if tag != "" && *allTags {
 		return fmt.Errorf("tag can't be used with --all-tags/-a")
 		return fmt.Errorf("tag can't be used with --all-tags/-a")

+ 6 - 3
api/client/restart.go

@@ -21,15 +21,18 @@ func (cli *DockerCli) CmdRestart(args ...string) error {
 	v := url.Values{}
 	v := url.Values{}
 	v.Set("t", strconv.Itoa(*nSeconds))
 	v.Set("t", strconv.Itoa(*nSeconds))
 
 
-	var encounteredError error
+	var errNames []string
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
 		_, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, nil))
 		_, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, nil))
 		if err != nil {
 		if err != nil {
 			fmt.Fprintf(cli.err, "%s\n", err)
 			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to restart one or more containers")
+			errNames = append(errNames, name)
 		} else {
 		} else {
 			fmt.Fprintf(cli.out, "%s\n", name)
 			fmt.Fprintf(cli.out, "%s\n", name)
 		}
 		}
 	}
 	}
-	return encounteredError
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to restart containers: %v", errNames)
+	}
+	return nil
 }
 }

+ 8 - 3
api/client/rm.go

@@ -3,6 +3,7 @@ package client
 import (
 import (
 	"fmt"
 	"fmt"
 	"net/url"
 	"net/url"
+	"strings"
 
 
 	flag "github.com/docker/docker/pkg/mflag"
 	flag "github.com/docker/docker/pkg/mflag"
 )
 )
@@ -31,19 +32,23 @@ func (cli *DockerCli) CmdRm(args ...string) error {
 		val.Set("force", "1")
 		val.Set("force", "1")
 	}
 	}
 
 
-	var encounteredError error
+	var errNames []string
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
 		if name == "" {
 		if name == "" {
 			return fmt.Errorf("Container name cannot be empty")
 			return fmt.Errorf("Container name cannot be empty")
 		}
 		}
+		name = strings.Trim(name, "/")
 
 
 		_, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, nil))
 		_, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, nil))
 		if err != nil {
 		if err != nil {
 			fmt.Fprintf(cli.err, "%s\n", err)
 			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to remove one or more containers")
+			errNames = append(errNames, name)
 		} else {
 		} else {
 			fmt.Fprintf(cli.out, "%s\n", name)
 			fmt.Fprintf(cli.out, "%s\n", name)
 		}
 		}
 	}
 	}
-	return encounteredError
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to remove containers: %v", errNames)
+	}
+	return nil
 }
 }

+ 7 - 4
api/client/rmi.go

@@ -29,17 +29,17 @@ func (cli *DockerCli) CmdRmi(args ...string) error {
 		v.Set("noprune", "1")
 		v.Set("noprune", "1")
 	}
 	}
 
 
-	var encounteredError error
+	var errNames []string
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
 		rdr, _, err := cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, nil)
 		rdr, _, err := cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, nil)
 		if err != nil {
 		if err != nil {
 			fmt.Fprintf(cli.err, "%s\n", err)
 			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to remove one or more images")
+			errNames = append(errNames, name)
 		} else {
 		} else {
 			dels := []types.ImageDelete{}
 			dels := []types.ImageDelete{}
 			if err := json.NewDecoder(rdr).Decode(&dels); err != nil {
 			if err := json.NewDecoder(rdr).Decode(&dels); err != nil {
 				fmt.Fprintf(cli.err, "%s\n", err)
 				fmt.Fprintf(cli.err, "%s\n", err)
-				encounteredError = fmt.Errorf("Error: failed to remove one or more images")
+				errNames = append(errNames, name)
 				continue
 				continue
 			}
 			}
 
 
@@ -52,5 +52,8 @@ func (cli *DockerCli) CmdRmi(args ...string) error {
 			}
 			}
 		}
 		}
 	}
 	}
-	return encounteredError
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to remove images: %v", errNames)
+	}
+	return nil
 }
 }

+ 6 - 7
api/client/run.go

@@ -9,9 +9,9 @@ import (
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/opts"
 	"github.com/docker/docker/opts"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/promise"
-	"github.com/docker/docker/pkg/resolvconf"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
+	"github.com/docker/libnetwork/resolvconf/dns"
 )
 )
 
 
 func (cid *cidFile) Close() error {
 func (cid *cidFile) Close() error {
@@ -38,7 +38,6 @@ func (cid *cidFile) Write(id string) error {
 //
 //
 // Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...]
 // Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...]
 func (cli *DockerCli) CmdRun(args ...string) error {
 func (cli *DockerCli) CmdRun(args ...string) error {
-	// FIXME: just use runconfig.Parse already
 	cmd := cli.Subcmd("run", "IMAGE [COMMAND] [ARG...]", "Run a command in a new container", true)
 	cmd := cli.Subcmd("run", "IMAGE [COMMAND] [ARG...]", "Run a command in a new container", true)
 
 
 	// These are flags not stored in Config/HostConfig
 	// These are flags not stored in Config/HostConfig
@@ -65,7 +64,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		// localhost regexp to warn if they are trying to
 		// localhost regexp to warn if they are trying to
 		// set a DNS to a localhost address
 		// set a DNS to a localhost address
 		for _, dnsIP := range hostConfig.Dns {
 		for _, dnsIP := range hostConfig.Dns {
-			if resolvconf.IsLocalhost(dnsIP) {
+			if dns.IsLocalhost(dnsIP) {
 				fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP)
 				fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP)
 				break
 				break
 			}
 			}
@@ -123,7 +122,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 			fmt.Fprintf(cli.out, "%s\n", createResponse.ID)
 			fmt.Fprintf(cli.out, "%s\n", createResponse.ID)
 		}()
 		}()
 	}
 	}
-	if *flAutoRemove && (hostConfig.RestartPolicy.Name == "always" || hostConfig.RestartPolicy.Name == "on-failure") {
+	if *flAutoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) {
 		return ErrConflictRestartPolicyAndAutoRemove
 		return ErrConflictRestartPolicyAndAutoRemove
 	}
 	}
 	// We need to instantiate the chan because the select needs it. It can
 	// We need to instantiate the chan because the select needs it. It can
@@ -133,7 +132,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 	defer func() {
 	defer func() {
 		logrus.Debugf("End of CmdRun(), Waiting for hijack to finish.")
 		logrus.Debugf("End of CmdRun(), Waiting for hijack to finish.")
 		if _, ok := <-hijacked; ok {
 		if _, ok := <-hijacked; ok {
-			logrus.Errorf("Hijack did not finish (chan still open)")
+			fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)")
 		}
 		}
 	}()
 	}()
 	if config.AttachStdin || config.AttachStdout || config.AttachStderr {
 	if config.AttachStdin || config.AttachStdout || config.AttachStderr {
@@ -183,7 +182,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 	defer func() {
 	defer func() {
 		if *flAutoRemove {
 		if *flAutoRemove {
 			if _, _, err = readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, nil)); err != nil {
 			if _, _, err = readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, nil)); err != nil {
-				logrus.Errorf("Error deleting container: %s", err)
+				fmt.Fprintf(cli.err, "Error deleting container: %s\n", err)
 			}
 			}
 		}
 		}
 	}()
 	}()
@@ -195,7 +194,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 
 
 	if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut {
 	if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut {
 		if err := cli.monitorTtySize(createResponse.ID, false); err != nil {
 		if err := cli.monitorTtySize(createResponse.ID, false); err != nil {
-			logrus.Errorf("Error monitoring TTY size: %s", err)
+			fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
 		}
 		}
 	}
 	}
 
 

+ 7 - 2
api/client/save.go

@@ -34,9 +34,14 @@ func (cli *DockerCli) CmdSave(args ...string) error {
 		return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.")
 		return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.")
 	}
 	}
 
 
+	sopts := &streamOpts{
+		rawTerminal: true,
+		out:         output,
+	}
+
 	if len(cmd.Args()) == 1 {
 	if len(cmd.Args()) == 1 {
 		image := cmd.Arg(0)
 		image := cmd.Arg(0)
-		if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil {
+		if err := cli.stream("GET", "/images/"+image+"/get", sopts); err != nil {
 			return err
 			return err
 		}
 		}
 	} else {
 	} else {
@@ -44,7 +49,7 @@ func (cli *DockerCli) CmdSave(args ...string) error {
 		for _, arg := range cmd.Args() {
 		for _, arg := range cmd.Args() {
 			v.Add("names", arg)
 			v.Add("names", arg)
 		}
 		}
-		if err := cli.stream("GET", "/images/get?"+v.Encode(), nil, output, nil); err != nil {
+		if err := cli.stream("GET", "/images/get?"+v.Encode(), sopts); err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}

+ 15 - 10
api/client/start.go

@@ -1,13 +1,14 @@
 package client
 package client
 
 
 import (
 import (
+	"encoding/json"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"net/url"
 	"net/url"
 	"os"
 	"os"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/engine"
+	"github.com/docker/docker/api/types"
 	flag "github.com/docker/docker/pkg/mflag"
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/pkg/signal"
@@ -29,7 +30,7 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
 				}
 				}
 			}
 			}
 			if sig == "" {
 			if sig == "" {
-				logrus.Errorf("Unsupported signal: %v. Discarding.", s)
+				fmt.Fprintf(cli.err, "Unsupported signal: %v. Discarding.\n", s)
 			}
 			}
 			if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, nil)); err != nil {
 			if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, nil)); err != nil {
 				logrus.Debugf("Error sending signal: %s", err)
 				logrus.Debugf("Error sending signal: %s", err)
@@ -65,12 +66,12 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 			return err
 			return err
 		}
 		}
 
 
-		env := engine.Env{}
-		if err := env.Decode(stream); err != nil {
+		var c types.ContainerJSON
+		if err := json.NewDecoder(stream).Decode(&c); err != nil {
 			return err
 			return err
 		}
 		}
-		config := env.GetSubEnv("Config")
-		tty = config.GetBool("Tty")
+
+		tty = c.Config.Tty
 
 
 		if !tty {
 		if !tty {
 			sigc := cli.forwardAllSignals(cmd.Arg(0))
 			sigc := cli.forwardAllSignals(cmd.Arg(0))
@@ -82,7 +83,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 		v := url.Values{}
 		v := url.Values{}
 		v.Set("stream", "1")
 		v.Set("stream", "1")
 
 
-		if *openStdin && config.GetBool("OpenStdin") {
+		if *openStdin && c.Config.OpenStdin {
 			v.Set("stdin", "1")
 			v.Set("stdin", "1")
 			in = cli.in
 			in = cli.in
 		}
 		}
@@ -95,7 +96,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 		defer func() {
 		defer func() {
 			logrus.Debugf("CmdStart() returned, defer waiting for hijack to finish.")
 			logrus.Debugf("CmdStart() returned, defer waiting for hijack to finish.")
 			if _, ok := <-hijacked; ok {
 			if _, ok := <-hijacked; ok {
-				logrus.Errorf("Hijack did not finish (chan still open)")
+				fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)")
 			}
 			}
 			cli.in.Close()
 			cli.in.Close()
 		}()
 		}()
@@ -119,6 +120,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 	}
 	}
 
 
 	var encounteredError error
 	var encounteredError error
+	var errNames []string
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
 		_, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, nil))
 		_, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, nil))
 		if err != nil {
 		if err != nil {
@@ -126,7 +128,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 				// attach and openStdin is false means it could be starting multiple containers
 				// attach and openStdin is false means it could be starting multiple containers
 				// when a container start failed, show the error message and start next
 				// when a container start failed, show the error message and start next
 				fmt.Fprintf(cli.err, "%s\n", err)
 				fmt.Fprintf(cli.err, "%s\n", err)
-				encounteredError = fmt.Errorf("Error: failed to start one or more containers")
+				errNames = append(errNames, name)
 			} else {
 			} else {
 				encounteredError = err
 				encounteredError = err
 			}
 			}
@@ -137,6 +139,9 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 		}
 		}
 	}
 	}
 
 
+	if len(errNames) > 0 {
+		encounteredError = fmt.Errorf("Error: failed to start containers: %v", errNames)
+	}
 	if encounteredError != nil {
 	if encounteredError != nil {
 		return encounteredError
 		return encounteredError
 	}
 	}
@@ -144,7 +149,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 	if *openStdin || *attach {
 	if *openStdin || *attach {
 		if tty && cli.isTerminalOut {
 		if tty && cli.isTerminalOut {
 			if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil {
 			if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil {
-				logrus.Errorf("Error monitoring TTY size: %s", err)
+				fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
 			}
 			}
 		}
 		}
 		if attchErr := <-cErr; attchErr != nil {
 		if attchErr := <-cErr; attchErr != nil {

+ 27 - 6
api/client/stats.go

@@ -4,6 +4,7 @@ import (
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
+	"net/url"
 	"sort"
 	"sort"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
@@ -27,10 +28,18 @@ type containerStats struct {
 	err              error
 	err              error
 }
 }
 
 
-func (s *containerStats) Collect(cli *DockerCli) {
-	stream, _, err := cli.call("GET", "/containers/"+s.Name+"/stats", nil, nil)
+func (s *containerStats) Collect(cli *DockerCli, streamStats bool) {
+	v := url.Values{}
+	if streamStats {
+		v.Set("stream", "1")
+	} else {
+		v.Set("stream", "0")
+	}
+	stream, _, err := cli.call("GET", "/containers/"+s.Name+"/stats?"+v.Encode(), nil, nil)
 	if err != nil {
 	if err != nil {
+		s.mu.Lock()
 		s.err = err
 		s.err = err
+		s.mu.Unlock()
 		return
 		return
 	}
 	}
 	defer stream.Close()
 	defer stream.Close()
@@ -67,6 +76,9 @@ func (s *containerStats) Collect(cli *DockerCli) {
 			previousCPU = v.CpuStats.CpuUsage.TotalUsage
 			previousCPU = v.CpuStats.CpuUsage.TotalUsage
 			previousSystem = v.CpuStats.SystemUsage
 			previousSystem = v.CpuStats.SystemUsage
 			u <- nil
 			u <- nil
+			if !streamStats {
+				return
+			}
 		}
 		}
 	}()
 	}()
 	for {
 	for {
@@ -87,6 +99,9 @@ func (s *containerStats) Collect(cli *DockerCli) {
 				return
 				return
 			}
 			}
 		}
 		}
+		if !streamStats {
+			return
+		}
 	}
 	}
 }
 }
 
 
@@ -112,6 +127,7 @@ func (s *containerStats) Display(w io.Writer) error {
 // Usage: docker stats CONTAINER [CONTAINER...]
 // Usage: docker stats CONTAINER [CONTAINER...]
 func (cli *DockerCli) CmdStats(args ...string) error {
 func (cli *DockerCli) CmdStats(args ...string) error {
 	cmd := cli.Subcmd("stats", "CONTAINER [CONTAINER...]", "Display a live stream of one or more containers' resource usage statistics", true)
 	cmd := cli.Subcmd("stats", "CONTAINER [CONTAINER...]", "Display a live stream of one or more containers' resource usage statistics", true)
+	noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result")
 	cmd.Require(flag.Min, 1)
 	cmd.Require(flag.Min, 1)
 	cmd.ParseFlags(args, true)
 	cmd.ParseFlags(args, true)
 
 
@@ -122,14 +138,16 @@ func (cli *DockerCli) CmdStats(args ...string) error {
 		w      = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
 		w      = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
 	)
 	)
 	printHeader := func() {
 	printHeader := func() {
-		io.WriteString(cli.out, "\033[2J")
-		io.WriteString(cli.out, "\033[H")
+		if !*noStream {
+			fmt.Fprint(cli.out, "\033[2J")
+			fmt.Fprint(cli.out, "\033[H")
+		}
 		io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE/LIMIT\tMEM %\tNET I/O\n")
 		io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE/LIMIT\tMEM %\tNET I/O\n")
 	}
 	}
 	for _, n := range names {
 	for _, n := range names {
 		s := &containerStats{Name: n}
 		s := &containerStats{Name: n}
 		cStats = append(cStats, s)
 		cStats = append(cStats, s)
-		go s.Collect(cli)
+		go s.Collect(cli, !*noStream)
 	}
 	}
 	// do a quick pause so that any failed connections for containers that do not exist are able to be
 	// do a quick pause so that any failed connections for containers that do not exist are able to be
 	// evicted before we display the initial or default values.
 	// evicted before we display the initial or default values.
@@ -149,7 +167,7 @@ func (cli *DockerCli) CmdStats(args ...string) error {
 		printHeader()
 		printHeader()
 		toRemove := []int{}
 		toRemove := []int{}
 		for i, s := range cStats {
 		for i, s := range cStats {
-			if err := s.Display(w); err != nil {
+			if err := s.Display(w); err != nil && !*noStream {
 				toRemove = append(toRemove, i)
 				toRemove = append(toRemove, i)
 			}
 			}
 		}
 		}
@@ -161,6 +179,9 @@ func (cli *DockerCli) CmdStats(args ...string) error {
 			return nil
 			return nil
 		}
 		}
 		w.Flush()
 		w.Flush()
+		if *noStream {
+			break
+		}
 	}
 	}
 	return nil
 	return nil
 }
 }

+ 6 - 3
api/client/stop.go

@@ -23,15 +23,18 @@ func (cli *DockerCli) CmdStop(args ...string) error {
 	v := url.Values{}
 	v := url.Values{}
 	v.Set("t", strconv.Itoa(*nSeconds))
 	v.Set("t", strconv.Itoa(*nSeconds))
 
 
-	var encounteredError error
+	var errNames []string
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
 		_, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, nil))
 		_, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, nil))
 		if err != nil {
 		if err != nil {
 			fmt.Fprintf(cli.err, "%s\n", err)
 			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to stop one or more containers")
+			errNames = append(errNames, name)
 		} else {
 		} else {
 			fmt.Fprintf(cli.out, "%s\n", name)
 			fmt.Fprintf(cli.out, "%s\n", name)
 		}
 		}
 	}
 	}
-	return encounteredError
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to stop containers: %v", errNames)
+	}
+	return nil
 }
 }

+ 6 - 3
api/client/unpause.go

@@ -14,14 +14,17 @@ func (cli *DockerCli) CmdUnpause(args ...string) error {
 	cmd.Require(flag.Min, 1)
 	cmd.Require(flag.Min, 1)
 	cmd.ParseFlags(args, false)
 	cmd.ParseFlags(args, false)
 
 
-	var encounteredError error
+	var errNames []string
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
 		if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/unpause", name), nil, nil)); err != nil {
 		if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/unpause", name), nil, nil)); err != nil {
 			fmt.Fprintf(cli.err, "%s\n", err)
 			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to unpause container named %s", name)
+			errNames = append(errNames, name)
 		} else {
 		} else {
 			fmt.Fprintf(cli.out, "%s\n", name)
 			fmt.Fprintf(cli.out, "%s\n", name)
 		}
 		}
 	}
 	}
-	return encounteredError
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to unpause containers: %v", errNames)
+	}
+	return nil
 }
 }

+ 13 - 20
api/client/utils.go

@@ -22,7 +22,6 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/autogen/dockerversion"
 	"github.com/docker/docker/autogen/dockerversion"
 	"github.com/docker/docker/cliconfig"
 	"github.com/docker/docker/cliconfig"
-	"github.com/docker/docker/engine"
 	"github.com/docker/docker/pkg/jsonmessage"
 	"github.com/docker/docker/pkg/jsonmessage"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/pkg/stdcopy"
 	"github.com/docker/docker/pkg/stdcopy"
@@ -42,18 +41,8 @@ func (cli *DockerCli) HTTPClient() *http.Client {
 func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) {
 func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) {
 	params := bytes.NewBuffer(nil)
 	params := bytes.NewBuffer(nil)
 	if data != nil {
 	if data != nil {
-		if env, ok := data.(engine.Env); ok {
-			if err := env.Encode(params); err != nil {
-				return nil, err
-			}
-		} else {
-			buf, err := json.Marshal(data)
-			if err != nil {
-				return nil, err
-			}
-			if _, err := params.Write(buf); err != nil {
-				return nil, err
-			}
+		if err := json.NewEncoder(params).Encode(data); err != nil {
+			return nil, err
 		}
 		}
 	}
 	}
 	return params, nil
 	return params, nil
@@ -181,19 +170,23 @@ func (cli *DockerCli) call(method, path string, data interface{}, headers map[st
 	return body, statusCode, err
 	return body, statusCode, err
 }
 }
 
 
-func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
-	return cli.streamHelper(method, path, true, in, out, nil, headers)
+type streamOpts struct {
+	rawTerminal bool
+	in          io.Reader
+	out         io.Writer
+	err         io.Writer
+	headers     map[string][]string
 }
 }
 
 
-func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in io.Reader, stdout, stderr io.Writer, headers map[string][]string) error {
-	body, contentType, _, err := cli.clientRequest(method, path, in, headers)
+func (cli *DockerCli) stream(method, path string, opts *streamOpts) error {
+	body, contentType, _, err := cli.clientRequest(method, path, opts.in, opts.headers)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	return cli.streamBody(body, contentType, setRawTerminal, stdout, stderr)
+	return cli.streamBody(body, contentType, opts.rawTerminal, opts.out, opts.err)
 }
 }
 
 
-func (cli *DockerCli) streamBody(body io.ReadCloser, contentType string, setRawTerminal bool, stdout, stderr io.Writer) error {
+func (cli *DockerCli) streamBody(body io.ReadCloser, contentType string, rawTerminal bool, stdout, stderr io.Writer) error {
 	defer body.Close()
 	defer body.Close()
 
 
 	if api.MatchesContentType(contentType, "application/json") {
 	if api.MatchesContentType(contentType, "application/json") {
@@ -202,7 +195,7 @@ func (cli *DockerCli) streamBody(body io.ReadCloser, contentType string, setRawT
 	if stdout != nil || stderr != nil {
 	if stdout != nil || stderr != nil {
 		// When TTY is ON, use regular copy
 		// When TTY is ON, use regular copy
 		var err error
 		var err error
-		if setRawTerminal {
+		if rawTerminal {
 			_, err = io.Copy(stdout, body)
 			_, err = io.Copy(stdout, body)
 		} else {
 		} else {
 			_, err = stdcopy.StdCopy(stdout, stderr, body)
 			_, err = stdcopy.StdCopy(stdout, stderr, body)

+ 14 - 18
api/client/version.go

@@ -1,13 +1,13 @@
 package client
 package client
 
 
 import (
 import (
+	"encoding/json"
 	"fmt"
 	"fmt"
 	"runtime"
 	"runtime"
 
 
-	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api"
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/autogen/dockerversion"
 	"github.com/docker/docker/autogen/dockerversion"
-	"github.com/docker/docker/engine"
 	flag "github.com/docker/docker/pkg/mflag"
 	flag "github.com/docker/docker/pkg/mflag"
 )
 )
 
 
@@ -32,28 +32,24 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
 	}
 	}
 	fmt.Fprintf(cli.out, "OS/Arch (client): %s/%s\n", runtime.GOOS, runtime.GOARCH)
 	fmt.Fprintf(cli.out, "OS/Arch (client): %s/%s\n", runtime.GOOS, runtime.GOARCH)
 
 
-	body, _, err := readBody(cli.call("GET", "/version", nil, nil))
+	stream, _, err := cli.call("GET", "/version", nil, nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	out := engine.NewOutput()
-	remoteVersion, err := out.AddEnv()
-	if err != nil {
-		logrus.Errorf("Error reading remote version: %s", err)
-		return err
-	}
-	if _, err := out.Write(body); err != nil {
-		logrus.Errorf("Error reading remote version: %s", err)
+	var v types.Version
+	if err := json.NewDecoder(stream).Decode(&v); err != nil {
+		fmt.Fprintf(cli.err, "Error reading remote version: %s\n", err)
 		return err
 		return err
 	}
 	}
-	out.Close()
-	fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version"))
-	if apiVersion := remoteVersion.Get("ApiVersion"); apiVersion != "" {
-		fmt.Fprintf(cli.out, "Server API version: %s\n", apiVersion)
+
+	fmt.Fprintf(cli.out, "Server version: %s\n", v.Version)
+	if v.ApiVersion != "" {
+		fmt.Fprintf(cli.out, "Server API version: %s\n", v.ApiVersion)
 	}
 	}
-	fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion"))
-	fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit"))
-	fmt.Fprintf(cli.out, "OS/Arch (server): %s/%s\n", remoteVersion.Get("Os"), remoteVersion.Get("Arch"))
+	fmt.Fprintf(cli.out, "Go version (server): %s\n", v.GoVersion)
+	fmt.Fprintf(cli.out, "Git commit (server): %s\n", v.GitCommit)
+	fmt.Fprintf(cli.out, "OS/Arch (server): %s/%s\n", v.Os, v.Arch)
+
 	return nil
 	return nil
 }
 }

+ 6 - 3
api/client/wait.go

@@ -17,15 +17,18 @@ func (cli *DockerCli) CmdWait(args ...string) error {
 
 
 	cmd.ParseFlags(args, true)
 	cmd.ParseFlags(args, true)
 
 
-	var encounteredError error
+	var errNames []string
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
 		status, err := waitForExit(cli, name)
 		status, err := waitForExit(cli, name)
 		if err != nil {
 		if err != nil {
 			fmt.Fprintf(cli.err, "%s\n", err)
 			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to wait one or more containers")
+			errNames = append(errNames, name)
 		} else {
 		} else {
 			fmt.Fprintf(cli.out, "%d\n", status)
 			fmt.Fprintf(cli.out, "%d\n", status)
 		}
 		}
 	}
 	}
-	return encounteredError
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to wait containers: %v", errNames)
+	}
+	return nil
 }
 }

+ 3 - 2
api/common.go

@@ -3,13 +3,13 @@ package api
 import (
 import (
 	"fmt"
 	"fmt"
 	"mime"
 	"mime"
-	"os"
 	"path/filepath"
 	"path/filepath"
 	"sort"
 	"sort"
 	"strings"
 	"strings"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/version"
 	"github.com/docker/docker/pkg/version"
 	"github.com/docker/libtrust"
 	"github.com/docker/libtrust"
 )
 )
@@ -107,7 +107,8 @@ func MatchesContentType(contentType, expectedType string) bool {
 // LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
 // LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
 // otherwise generates a new one
 // otherwise generates a new one
 func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
 func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
-	if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil {
+	err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700)
+	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
 	trustKey, err := libtrust.LoadKeyFile(trustKeyPath)

+ 1 - 1
api/server/form.go

@@ -11,7 +11,7 @@ func boolValue(r *http.Request, k string) bool {
 	return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none")
 	return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none")
 }
 }
 
 
-func int64Value(r *http.Request, k string) int64 {
+func int64ValueOrZero(r *http.Request, k string) int64 {
 	val, err := strconv.ParseInt(r.FormValue(k), 10, 64)
 	val, err := strconv.ParseInt(r.FormValue(k), 10, 64)
 	if err != nil {
 	if err != nil {
 		return 0
 		return 0

+ 2 - 2
api/server/form_test.go

@@ -33,7 +33,7 @@ func TestBoolValue(t *testing.T) {
 	}
 	}
 }
 }
 
 
-func TestInt64Value(t *testing.T) {
+func TestInt64ValueOrZero(t *testing.T) {
 	cases := map[string]int64{
 	cases := map[string]int64{
 		"":     0,
 		"":     0,
 		"asdf": 0,
 		"asdf": 0,
@@ -47,7 +47,7 @@ func TestInt64Value(t *testing.T) {
 		r, _ := http.NewRequest("POST", "", nil)
 		r, _ := http.NewRequest("POST", "", nil)
 		r.Form = v
 		r.Form = v
 
 
-		a := int64Value(r, "test")
+		a := int64ValueOrZero(r, "test")
 		if a != e {
 		if a != e {
 			t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a)
 			t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a)
 		}
 		}

File diff suppressed because it is too large
+ 151 - 279
api/server/server.go


+ 4 - 9
api/server/server_linux.go

@@ -7,8 +7,8 @@ import (
 	"net"
 	"net"
 	"net/http"
 	"net/http"
 
 
-	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/daemon"
+	"github.com/docker/docker/pkg/sockets"
 	"github.com/docker/docker/pkg/systemd"
 	"github.com/docker/docker/pkg/systemd"
 )
 )
 
 
@@ -45,17 +45,12 @@ func (s *Server) newServer(proto, addr string) (serverCloser, error) {
 		}
 		}
 		return nil, nil
 		return nil, nil
 	case "tcp":
 	case "tcp":
-		if !s.cfg.TlsVerify {
-			logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
-		}
-		if l, err = NewTcpSocket(addr, tlsConfigFromServerConfig(s.cfg), s.start); err != nil {
-			return nil, err
-		}
-		if err := allocateDaemonPort(addr); err != nil {
+		l, err = s.initTcpSocket(addr)
+		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
 	case "unix":
 	case "unix":
-		if l, err = NewUnixSocket(addr, s.cfg.SocketGroup, s.start); err != nil {
+		if l, err = sockets.NewUnixSocket(addr, s.cfg.SocketGroup, s.start); err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
 	default:
 	default:

+ 2 - 8
api/server/server_windows.go

@@ -7,7 +7,6 @@ import (
 	"net"
 	"net"
 	"net/http"
 	"net/http"
 
 
-	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/daemon"
 )
 )
 
 
@@ -19,13 +18,8 @@ func (s *Server) newServer(proto, addr string) (Server, error) {
 	)
 	)
 	switch proto {
 	switch proto {
 	case "tcp":
 	case "tcp":
-		if !s.cfg.TlsVerify {
-			logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
-		}
-		if l, err = NewTcpSocket(addr, tlsConfigFromServerConfig(s.cfg)); err != nil {
-			return nil, err
-		}
-		if err := allocateDaemonPort(addr); err != nil {
+		l, err = s.initTcpSocket(addr)
+		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
 	default:
 	default:

+ 3 - 9
api/types/types.go

@@ -92,15 +92,6 @@ type ImageInspect struct {
 	VirtualSize     int64
 	VirtualSize     int64
 }
 }
 
 
-type LegacyImage struct {
-	ID          string `json:"Id"`
-	Repository  string
-	Tag         string
-	Created     int
-	Size        int
-	VirtualSize int
-}
-
 // GET  "/containers/json"
 // GET  "/containers/json"
 type Port struct {
 type Port struct {
 	IP          string
 	IP          string
@@ -152,10 +143,12 @@ type Info struct {
 	DriverStatus       [][2]string
 	DriverStatus       [][2]string
 	MemoryLimit        bool
 	MemoryLimit        bool
 	SwapLimit          bool
 	SwapLimit          bool
+	CpuCfsPeriod       bool
 	CpuCfsQuota        bool
 	CpuCfsQuota        bool
 	IPv4Forwarding     bool
 	IPv4Forwarding     bool
 	Debug              bool
 	Debug              bool
 	NFd                int
 	NFd                int
+	OomKillDisable     bool
 	NGoroutines        int
 	NGoroutines        int
 	SystemTime         string
 	SystemTime         string
 	ExecutionDriver    string
 	ExecutionDriver    string
@@ -175,6 +168,7 @@ type Info struct {
 	NoProxy            string
 	NoProxy            string
 	Name               string
 	Name               string
 	Labels             []string
 	Labels             []string
+	ExperimentalBuild  bool
 }
 }
 
 
 // This struct is a temp struct used by execStart
 // This struct is a temp struct used by execStart

+ 155 - 0
builder/bflag.go

@@ -0,0 +1,155 @@
+package builder
+
+import (
+	"fmt"
+	"strings"
+)
+
+type FlagType int
+
+const (
+	boolType FlagType = iota
+	stringType
+)
+
+type BuilderFlags struct {
+	Args  []string // actual flags/args from cmd line
+	flags map[string]*Flag
+	used  map[string]*Flag
+	Err   error
+}
+
+type Flag struct {
+	bf       *BuilderFlags
+	name     string
+	flagType FlagType
+	Value    string
+}
+
+func NewBuilderFlags() *BuilderFlags {
+	return &BuilderFlags{
+		flags: make(map[string]*Flag),
+		used:  make(map[string]*Flag),
+	}
+}
+
+func (bf *BuilderFlags) AddBool(name string, def bool) *Flag {
+	flag := bf.addFlag(name, boolType)
+	if flag == nil {
+		return nil
+	}
+	if def {
+		flag.Value = "true"
+	} else {
+		flag.Value = "false"
+	}
+	return flag
+}
+
+func (bf *BuilderFlags) AddString(name string, def string) *Flag {
+	flag := bf.addFlag(name, stringType)
+	if flag == nil {
+		return nil
+	}
+	flag.Value = def
+	return flag
+}
+
+func (bf *BuilderFlags) addFlag(name string, flagType FlagType) *Flag {
+	if _, ok := bf.flags[name]; ok {
+		bf.Err = fmt.Errorf("Duplicate flag defined: %s", name)
+		return nil
+	}
+
+	newFlag := &Flag{
+		bf:       bf,
+		name:     name,
+		flagType: flagType,
+	}
+	bf.flags[name] = newFlag
+
+	return newFlag
+}
+
+func (fl *Flag) IsUsed() bool {
+	if _, ok := fl.bf.used[fl.name]; ok {
+		return true
+	}
+	return false
+}
+
+func (fl *Flag) IsTrue() bool {
+	if fl.flagType != boolType {
+		// Should never get here
+		panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name))
+	}
+	return fl.Value == "true"
+}
+
+func (bf *BuilderFlags) Parse() error {
+	// If there was an error while defining the possible flags
+	// go ahead and bubble it back up here since we didn't do it
+	// earlier in the processing
+	if bf.Err != nil {
+		return fmt.Errorf("Error setting up flags: %s", bf.Err)
+	}
+
+	for _, arg := range bf.Args {
+		if !strings.HasPrefix(arg, "--") {
+			return fmt.Errorf("Arg should start with -- : %s", arg)
+		}
+
+		if arg == "--" {
+			return nil
+		}
+
+		arg = arg[2:]
+		value := ""
+
+		index := strings.Index(arg, "=")
+		if index >= 0 {
+			value = arg[index+1:]
+			arg = arg[:index]
+		}
+
+		flag, ok := bf.flags[arg]
+		if !ok {
+			return fmt.Errorf("Unknown flag: %s", arg)
+		}
+
+		if _, ok = bf.used[arg]; ok {
+			return fmt.Errorf("Duplicate flag specified: %s", arg)
+		}
+
+		bf.used[arg] = flag
+
+		switch flag.flagType {
+		case boolType:
+			// value == "" is only ok if no "=" was specified
+			if index >= 0 && value == "" {
+				return fmt.Errorf("Missing a value on flag: %s", arg)
+			}
+
+			lower := strings.ToLower(value)
+			if lower == "" {
+				flag.Value = "true"
+			} else if lower == "true" || lower == "false" {
+				flag.Value = lower
+			} else {
+				return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value)
+			}
+
+		case stringType:
+			if index < 0 {
+				return fmt.Errorf("Missing a value on flag: %s", arg)
+			}
+			flag.Value = value
+
+		default:
+			panic(fmt.Errorf("No idea what kind of flag we have! Should never get here!"))
+		}
+
+	}
+
+	return nil
+}

+ 187 - 0
builder/bflag_test.go

@@ -0,0 +1,187 @@
+package builder
+
+import (
+	"testing"
+)
+
+func TestBuilderFlags(t *testing.T) {
+	var expected string
+	var err error
+
+	// ---
+
+	bf := NewBuilderFlags()
+	bf.Args = []string{}
+	if err := bf.Parse(); err != nil {
+		t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err)
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	bf.Args = []string{"--"}
+	if err := bf.Parse(); err != nil {
+		t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err)
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flStr1 := bf.AddString("str1", "")
+	flBool1 := bf.AddBool("bool1", false)
+	bf.Args = []string{}
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if flStr1.IsUsed() == true {
+		t.Fatalf("Test3 - str1 was not used!")
+	}
+	if flBool1.IsUsed() == true {
+		t.Fatalf("Test3 - bool1 was not used!")
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flStr1 = bf.AddString("str1", "HI")
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if flStr1.Value != "HI" {
+		t.Fatalf("Str1 was supposed to default to: HI")
+	}
+	if flBool1.IsTrue() {
+		t.Fatalf("Bool1 was supposed to default to: false")
+	}
+	if flStr1.IsUsed() == true {
+		t.Fatalf("Str1 was not used!")
+	}
+	if flBool1.IsUsed() == true {
+		t.Fatalf("Bool1 was not used!")
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flStr1 = bf.AddString("str1", "HI")
+	bf.Args = []string{"--str1"}
+
+	if err = bf.Parse(); err == nil {
+		t.Fatalf("Test %q was supposed to fail", bf.Args)
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flStr1 = bf.AddString("str1", "HI")
+	bf.Args = []string{"--str1="}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	expected = ""
+	if flStr1.Value != expected {
+		t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flStr1 = bf.AddString("str1", "HI")
+	bf.Args = []string{"--str1=BYE"}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	expected = "BYE"
+	if flStr1.Value != expected {
+		t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool1"}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if !flBool1.IsTrue() {
+		t.Fatalf("Test-b1 Bool1 was supposed to be true")
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool1=true"}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if !flBool1.IsTrue() {
+		t.Fatalf("Test-b2 Bool1 was supposed to be true")
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool1=false"}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if flBool1.IsTrue() {
+		t.Fatalf("Test-b3 Bool1 was supposed to be false")
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool1=false1"}
+
+	if err = bf.Parse(); err == nil {
+		t.Fatalf("Test %q was supposed to fail", bf.Args)
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool2"}
+
+	if err = bf.Parse(); err == nil {
+		t.Fatalf("Test %q was supposed to fail", bf.Args)
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flStr1 = bf.AddString("str1", "HI")
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool1", "--str1=BYE"}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if flStr1.Value != "BYE" {
+		t.Fatalf("Teset %s, str1 should be BYE", bf.Args)
+	}
+	if !flBool1.IsTrue() {
+		t.Fatalf("Teset %s, bool1 should be true", bf.Args)
+	}
+}

+ 0 - 2
builder/command/command.go

@@ -16,7 +16,6 @@ const (
 	Expose     = "expose"
 	Expose     = "expose"
 	Volume     = "volume"
 	Volume     = "volume"
 	User       = "user"
 	User       = "user"
-	Insert     = "insert"
 )
 )
 
 
 // Commands is list of all Dockerfile commands
 // Commands is list of all Dockerfile commands
@@ -35,5 +34,4 @@ var Commands = map[string]struct{}{
 	Expose:     {},
 	Expose:     {},
 	Volume:     {},
 	Volume:     {},
 	User:       {},
 	User:       {},
-	Insert:     {},
 }
 }

+ 104 - 13
builder/dispatchers.go

@@ -12,6 +12,7 @@ import (
 	"io/ioutil"
 	"io/ioutil"
 	"path/filepath"
 	"path/filepath"
 	"regexp"
 	"regexp"
+	"runtime"
 	"sort"
 	"sort"
 	"strings"
 	"strings"
 
 
@@ -38,6 +39,9 @@ func nullDispatch(b *Builder, args []string, attributes map[string]bool, origina
 // in the dockerfile available from the next statement on via ${foo}.
 // in the dockerfile available from the next statement on via ${foo}.
 //
 //
 func env(b *Builder, args []string, attributes map[string]bool, original string) error {
 func env(b *Builder, args []string, attributes map[string]bool, original string) error {
+	if runtime.GOOS == "windows" {
+		return fmt.Errorf("ENV is not supported on Windows.")
+	}
 	if len(args) == 0 {
 	if len(args) == 0 {
 		return fmt.Errorf("ENV requires at least one argument")
 		return fmt.Errorf("ENV requires at least one argument")
 	}
 	}
@@ -47,6 +51,26 @@ func env(b *Builder, args []string, attributes map[string]bool, original string)
 		return fmt.Errorf("Bad input to ENV, too many args")
 		return fmt.Errorf("Bad input to ENV, too many args")
 	}
 	}
 
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
+	// TODO/FIXME/NOT USED
+	// Just here to show how to use the builder flags stuff within the
+	// context of a builder command. Will remove once we actually add
+	// a builder command to something!
+	/*
+		flBool1 := b.BuilderFlags.AddBool("bool1", false)
+		flStr1 := b.BuilderFlags.AddString("str1", "HI")
+
+		if err := b.BuilderFlags.Parse(); err != nil {
+			return err
+		}
+
+		fmt.Printf("Bool1:%v\n", flBool1)
+		fmt.Printf("Str1:%v\n", flStr1)
+	*/
+
 	commitStr := "ENV"
 	commitStr := "ENV"
 
 
 	for j := 0; j < len(args); j++ {
 	for j := 0; j < len(args); j++ {
@@ -81,6 +105,10 @@ func maintainer(b *Builder, args []string, attributes map[string]bool, original
 		return fmt.Errorf("MAINTAINER requires exactly one argument")
 		return fmt.Errorf("MAINTAINER requires exactly one argument")
 	}
 	}
 
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	b.maintainer = args[0]
 	b.maintainer = args[0]
 	return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
 	return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
 }
 }
@@ -98,6 +126,10 @@ func label(b *Builder, args []string, attributes map[string]bool, original strin
 		return fmt.Errorf("Bad input to LABEL, too many args")
 		return fmt.Errorf("Bad input to LABEL, too many args")
 	}
 	}
 
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	commitStr := "LABEL"
 	commitStr := "LABEL"
 
 
 	if b.Config.Labels == nil {
 	if b.Config.Labels == nil {
@@ -126,6 +158,10 @@ func add(b *Builder, args []string, attributes map[string]bool, original string)
 		return fmt.Errorf("ADD requires at least two arguments")
 		return fmt.Errorf("ADD requires at least two arguments")
 	}
 	}
 
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	return b.runContextCommand(args, true, true, "ADD")
 	return b.runContextCommand(args, true, true, "ADD")
 }
 }
 
 
@@ -138,6 +174,10 @@ func dispatchCopy(b *Builder, args []string, attributes map[string]bool, origina
 		return fmt.Errorf("COPY requires at least two arguments")
 		return fmt.Errorf("COPY requires at least two arguments")
 	}
 	}
 
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	return b.runContextCommand(args, false, false, "COPY")
 	return b.runContextCommand(args, false, false, "COPY")
 }
 }
 
 
@@ -150,6 +190,10 @@ func from(b *Builder, args []string, attributes map[string]bool, original string
 		return fmt.Errorf("FROM requires one argument")
 		return fmt.Errorf("FROM requires one argument")
 	}
 	}
 
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	name := args[0]
 	name := args[0]
 
 
 	if name == NoBaseImageSpecifier {
 	if name == NoBaseImageSpecifier {
@@ -194,6 +238,10 @@ func onbuild(b *Builder, args []string, attributes map[string]bool, original str
 		return fmt.Errorf("ONBUILD requires at least one argument")
 		return fmt.Errorf("ONBUILD requires at least one argument")
 	}
 	}
 
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
 	triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
 	switch triggerInstruction {
 	switch triggerInstruction {
 	case "ONBUILD":
 	case "ONBUILD":
@@ -217,6 +265,10 @@ func workdir(b *Builder, args []string, attributes map[string]bool, original str
 		return fmt.Errorf("WORKDIR requires exactly one argument")
 		return fmt.Errorf("WORKDIR requires exactly one argument")
 	}
 	}
 
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	workdir := args[0]
 	workdir := args[0]
 
 
 	if !filepath.IsAbs(workdir) {
 	if !filepath.IsAbs(workdir) {
@@ -231,10 +283,11 @@ func workdir(b *Builder, args []string, attributes map[string]bool, original str
 // RUN some command yo
 // RUN some command yo
 //
 //
 // run a command and commit the image. Args are automatically prepended with
 // run a command and commit the image. Args are automatically prepended with
-// 'sh -c' in the event there is only one argument. The difference in
-// processing:
+// 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is
+// only one argument. The difference in processing:
 //
 //
-// RUN echo hi          # sh -c echo hi
+// RUN echo hi          # sh -c echo hi       (Linux)
+// RUN echo hi          # cmd /S /C echo hi   (Windows)
 // RUN [ "echo", "hi" ] # echo hi
 // RUN [ "echo", "hi" ] # echo hi
 //
 //
 func run(b *Builder, args []string, attributes map[string]bool, original string) error {
 func run(b *Builder, args []string, attributes map[string]bool, original string) error {
@@ -242,10 +295,18 @@ func run(b *Builder, args []string, attributes map[string]bool, original string)
 		return fmt.Errorf("Please provide a source image with `from` prior to run")
 		return fmt.Errorf("Please provide a source image with `from` prior to run")
 	}
 	}
 
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	args = handleJsonArgs(args, attributes)
 	args = handleJsonArgs(args, attributes)
 
 
 	if !attributes["json"] {
 	if !attributes["json"] {
-		args = append([]string{"/bin/sh", "-c"}, args...)
+		if runtime.GOOS != "windows" {
+			args = append([]string{"/bin/sh", "-c"}, args...)
+		} else {
+			args = append([]string{"cmd", "/S /C"}, args...)
+		}
 	}
 	}
 
 
 	runCmd := flag.NewFlagSet("run", flag.ContinueOnError)
 	runCmd := flag.NewFlagSet("run", flag.ContinueOnError)
@@ -301,10 +362,18 @@ func run(b *Builder, args []string, attributes map[string]bool, original string)
 // Argument handling is the same as RUN.
 // Argument handling is the same as RUN.
 //
 //
 func cmd(b *Builder, args []string, attributes map[string]bool, original string) error {
 func cmd(b *Builder, args []string, attributes map[string]bool, original string) error {
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	cmdSlice := handleJsonArgs(args, attributes)
 	cmdSlice := handleJsonArgs(args, attributes)
 
 
 	if !attributes["json"] {
 	if !attributes["json"] {
-		cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...)
+		if runtime.GOOS != "windows" {
+			cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...)
+		} else {
+			cmdSlice = append([]string{"cmd", "/S /C"}, cmdSlice...)
+		}
 	}
 	}
 
 
 	b.Config.Cmd = runconfig.NewCommand(cmdSlice...)
 	b.Config.Cmd = runconfig.NewCommand(cmdSlice...)
@@ -322,13 +391,17 @@ func cmd(b *Builder, args []string, attributes map[string]bool, original string)
 
 
 // ENTRYPOINT /usr/sbin/nginx
 // ENTRYPOINT /usr/sbin/nginx
 //
 //
-// Set the entrypoint (which defaults to sh -c) to /usr/sbin/nginx. Will
-// accept the CMD as the arguments to /usr/sbin/nginx.
+// Set the entrypoint (which defaults to sh -c on linux, or cmd /S /C on Windows) to
+// /usr/sbin/nginx. Will accept the CMD as the arguments to /usr/sbin/nginx.
 //
 //
 // Handles command processing similar to CMD and RUN, only b.Config.Entrypoint
 // Handles command processing similar to CMD and RUN, only b.Config.Entrypoint
 // is initialized at NewBuilder time instead of through argument parsing.
 // is initialized at NewBuilder time instead of through argument parsing.
 //
 //
 func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error {
 func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error {
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	parsed := handleJsonArgs(args, attributes)
 	parsed := handleJsonArgs(args, attributes)
 
 
 	switch {
 	switch {
@@ -340,7 +413,11 @@ func entrypoint(b *Builder, args []string, attributes map[string]bool, original
 		b.Config.Entrypoint = nil
 		b.Config.Entrypoint = nil
 	default:
 	default:
 		// ENTRYPOINT echo hi
 		// ENTRYPOINT echo hi
-		b.Config.Entrypoint = runconfig.NewEntrypoint("/bin/sh", "-c", parsed[0])
+		if runtime.GOOS != "windows" {
+			b.Config.Entrypoint = runconfig.NewEntrypoint("/bin/sh", "-c", parsed[0])
+		} else {
+			b.Config.Entrypoint = runconfig.NewEntrypoint("cmd", "/S /C", parsed[0])
+		}
 	}
 	}
 
 
 	// when setting the entrypoint if a CMD was not explicitly set then
 	// when setting the entrypoint if a CMD was not explicitly set then
@@ -368,6 +445,10 @@ func expose(b *Builder, args []string, attributes map[string]bool, original stri
 		return fmt.Errorf("EXPOSE requires at least one argument")
 		return fmt.Errorf("EXPOSE requires at least one argument")
 	}
 	}
 
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	if b.Config.ExposedPorts == nil {
 	if b.Config.ExposedPorts == nil {
 		b.Config.ExposedPorts = make(nat.PortSet)
 		b.Config.ExposedPorts = make(nat.PortSet)
 	}
 	}
@@ -408,10 +489,18 @@ func expose(b *Builder, args []string, attributes map[string]bool, original stri
 // ENTRYPOINT/CMD at container run time.
 // ENTRYPOINT/CMD at container run time.
 //
 //
 func user(b *Builder, args []string, attributes map[string]bool, original string) error {
 func user(b *Builder, args []string, attributes map[string]bool, original string) error {
+	if runtime.GOOS == "windows" {
+		return fmt.Errorf("USER is not supported on Windows.")
+	}
+
 	if len(args) != 1 {
 	if len(args) != 1 {
 		return fmt.Errorf("USER requires exactly one argument")
 		return fmt.Errorf("USER requires exactly one argument")
 	}
 	}
 
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	b.Config.User = args[0]
 	b.Config.User = args[0]
 	return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args))
 	return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args))
 }
 }
@@ -421,10 +510,17 @@ func user(b *Builder, args []string, attributes map[string]bool, original string
 // Expose the volume /foo for use. Will also accept the JSON array form.
 // Expose the volume /foo for use. Will also accept the JSON array form.
 //
 //
 func volume(b *Builder, args []string, attributes map[string]bool, original string) error {
 func volume(b *Builder, args []string, attributes map[string]bool, original string) error {
+	if runtime.GOOS == "windows" {
+		return fmt.Errorf("VOLUME is not supported on Windows.")
+	}
 	if len(args) == 0 {
 	if len(args) == 0 {
 		return fmt.Errorf("VOLUME requires at least one argument")
 		return fmt.Errorf("VOLUME requires at least one argument")
 	}
 	}
 
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	if b.Config.Volumes == nil {
 	if b.Config.Volumes == nil {
 		b.Config.Volumes = map[string]struct{}{}
 		b.Config.Volumes = map[string]struct{}{}
 	}
 	}
@@ -440,8 +536,3 @@ func volume(b *Builder, args []string, attributes map[string]bool, original stri
 	}
 	}
 	return nil
 	return nil
 }
 }
-
-// INSERT is no longer accepted, but we still parse it.
-func insert(b *Builder, args []string, attributes map[string]bool, original string) error {
-	return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
-}

+ 21 - 7
builder/evaluator.go

@@ -71,7 +71,6 @@ func init() {
 		command.Expose:     expose,
 		command.Expose:     expose,
 		command.Volume:     volume,
 		command.Volume:     volume,
 		command.User:       user,
 		command.User:       user,
-		command.Insert:     insert,
 	}
 	}
 }
 }
 
 
@@ -116,17 +115,20 @@ type Builder struct {
 	image          string        // image name for commit processing
 	image          string        // image name for commit processing
 	maintainer     string        // maintainer name. could probably be removed.
 	maintainer     string        // maintainer name. could probably be removed.
 	cmdSet         bool          // indicates is CMD was set in current Dockerfile
 	cmdSet         bool          // indicates is CMD was set in current Dockerfile
+	BuilderFlags   *BuilderFlags // current cmd's BuilderFlags - temporary
 	context        tarsum.TarSum // the context is a tarball that is uploaded by the client
 	context        tarsum.TarSum // the context is a tarball that is uploaded by the client
 	contextPath    string        // the path of the temporary directory the local context is unpacked to (server side)
 	contextPath    string        // the path of the temporary directory the local context is unpacked to (server side)
 	noBaseImage    bool          // indicates that this build does not start from any base image, but is being built from an empty file system.
 	noBaseImage    bool          // indicates that this build does not start from any base image, but is being built from an empty file system.
 
 
 	// Set resource restrictions for build containers
 	// Set resource restrictions for build containers
-	cpuSetCpus string
-	cpuSetMems string
-	cpuShares  int64
-	cpuQuota   int64
-	memory     int64
-	memorySwap int64
+	cpuSetCpus   string
+	cpuSetMems   string
+	cpuShares    int64
+	cpuPeriod    int64
+	cpuQuota     int64
+	cgroupParent string
+	memory       int64
+	memorySwap   int64
 
 
 	cancelled <-chan struct{} // When closed, job was cancelled.
 	cancelled <-chan struct{} // When closed, job was cancelled.
 }
 }
@@ -276,9 +278,14 @@ func (b *Builder) dispatch(stepN int, ast *parser.Node) error {
 	cmd := ast.Value
 	cmd := ast.Value
 	attrs := ast.Attributes
 	attrs := ast.Attributes
 	original := ast.Original
 	original := ast.Original
+	flags := ast.Flags
 	strs := []string{}
 	strs := []string{}
 	msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd))
 	msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd))
 
 
+	if len(ast.Flags) > 0 {
+		msg += " " + strings.Join(ast.Flags, " ")
+	}
+
 	if cmd == "onbuild" {
 	if cmd == "onbuild" {
 		if ast.Next == nil {
 		if ast.Next == nil {
 			return fmt.Errorf("ONBUILD requires at least one argument")
 			return fmt.Errorf("ONBUILD requires at least one argument")
@@ -286,6 +293,11 @@ func (b *Builder) dispatch(stepN int, ast *parser.Node) error {
 		ast = ast.Next.Children[0]
 		ast = ast.Next.Children[0]
 		strs = append(strs, ast.Value)
 		strs = append(strs, ast.Value)
 		msg += " " + ast.Value
 		msg += " " + ast.Value
+
+		if len(ast.Flags) > 0 {
+			msg += " " + strings.Join(ast.Flags, " ")
+		}
+
 	}
 	}
 
 
 	// count the number of nodes that we are going to traverse first
 	// count the number of nodes that we are going to traverse first
@@ -325,6 +337,8 @@ func (b *Builder) dispatch(stepN int, ast *parser.Node) error {
 	// XXX yes, we skip any cmds that are not valid; the parser should have
 	// XXX yes, we skip any cmds that are not valid; the parser should have
 	// picked these out already.
 	// picked these out already.
 	if f, ok := evaluateTable[cmd]; ok {
 	if f, ok := evaluateTable[cmd]; ok {
+		b.BuilderFlags = NewBuilderFlags()
+		b.BuilderFlags.Args = flags
 		return f(b, strList, attrs, original)
 		return f(b, strList, attrs, original)
 	}
 	}
 
 

+ 20 - 18
builder/internals.go

@@ -155,6 +155,7 @@ func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecomp
 			dest,
 			dest,
 			allowRemote,
 			allowRemote,
 			allowDecompression,
 			allowDecompression,
+			true,
 		); err != nil {
 		); err != nil {
 			return err
 			return err
 		}
 		}
@@ -225,7 +226,7 @@ func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecomp
 	return nil
 	return nil
 }
 }
 
 
-func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error {
+func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error {
 
 
 	if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
 	if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
 		origPath = origPath[1:]
 		origPath = origPath[1:]
@@ -350,7 +351,7 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri
 	}
 	}
 
 
 	// Deal with wildcards
 	// Deal with wildcards
-	if ContainsWildcards(origPath) {
+	if allowWildcards && ContainsWildcards(origPath) {
 		for _, fileInfo := range b.context.GetSums() {
 		for _, fileInfo := range b.context.GetSums() {
 			if fileInfo.Name() == "" {
 			if fileInfo.Name() == "" {
 				continue
 				continue
@@ -360,7 +361,9 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri
 				continue
 				continue
 			}
 			}
 
 
-			calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression)
+			// Note we set allowWildcards to false in case the name has
+			// a * in it
+			calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false)
 		}
 		}
 		return nil
 		return nil
 	}
 	}
@@ -455,10 +458,8 @@ func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
 	}
 	}
 
 
 	imagePullConfig := &graph.ImagePullConfig{
 	imagePullConfig := &graph.ImagePullConfig{
-		Parallel:   true,
 		AuthConfig: pullRegistryAuth,
 		AuthConfig: pullRegistryAuth,
 		OutStream:  ioutils.NopWriteCloser(b.OutOld),
 		OutStream:  ioutils.NopWriteCloser(b.OutOld),
-		Json:       b.StreamFormatter.Json(),
 	}
 	}
 
 
 	if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
 	if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
@@ -552,12 +553,15 @@ func (b *Builder) create() (*daemon.Container, error) {
 	b.Config.Image = b.image
 	b.Config.Image = b.image
 
 
 	hostConfig := &runconfig.HostConfig{
 	hostConfig := &runconfig.HostConfig{
-		CpuShares:  b.cpuShares,
-		CpuQuota:   b.cpuQuota,
-		CpusetCpus: b.cpuSetCpus,
-		CpusetMems: b.cpuSetMems,
-		Memory:     b.memory,
-		MemorySwap: b.memorySwap,
+		CpuShares:    b.cpuShares,
+		CpuPeriod:    b.cpuPeriod,
+		CpuQuota:     b.cpuQuota,
+		CpusetCpus:   b.cpuSetCpus,
+		CpusetMems:   b.cpuSetMems,
+		CgroupParent: b.cgroupParent,
+		Memory:       b.memory,
+		MemorySwap:   b.memorySwap,
+		NetworkMode:  "bridge",
 	}
 	}
 
 
 	config := *b.Config
 	config := *b.Config
@@ -618,7 +622,7 @@ func (b *Builder) run(c *daemon.Container) error {
 	// Wait for it to finish
 	// Wait for it to finish
 	if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
 	if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
 		return &jsonmessage.JSONError{
 		return &jsonmessage.JSONError{
-			Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
+			Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.Config.Cmd.ToString(), ret),
 			Code:    ret,
 			Code:    ret,
 		}
 		}
 	}
 	}
@@ -760,16 +764,14 @@ func fixPermissions(source, destination string, uid, gid int, destExisted bool)
 
 
 func (b *Builder) clearTmp() {
 func (b *Builder) clearTmp() {
 	for c := range b.TmpContainers {
 	for c := range b.TmpContainers {
-		tmp, err := b.Daemon.Get(c)
-		if err != nil {
-			fmt.Fprint(b.OutStream, err.Error())
+		rmConfig := &daemon.ContainerRmConfig{
+			ForceRemove:  true,
+			RemoveVolume: true,
 		}
 		}
-
-		if err := b.Daemon.Rm(tmp); err != nil {
+		if err := b.Daemon.ContainerRm(c, rmConfig); err != nil {
 			fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
 			fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
 			return
 			return
 		}
 		}
-		b.Daemon.DeleteVolumes(tmp.VolumePaths())
 		delete(b.TmpContainers, c)
 		delete(b.TmpContainers, c)
 		fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
 		fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
 	}
 	}

+ 7 - 4
builder/job.go

@@ -13,7 +13,7 @@ import (
 	"github.com/docker/docker/builder/parser"
 	"github.com/docker/docker/builder/parser"
 	"github.com/docker/docker/cliconfig"
 	"github.com/docker/docker/cliconfig"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/daemon"
-	"github.com/docker/docker/graph"
+	"github.com/docker/docker/graph/tags"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/httputils"
 	"github.com/docker/docker/pkg/httputils"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers"
@@ -45,13 +45,14 @@ type Config struct {
 	Remove         bool
 	Remove         bool
 	ForceRemove    bool
 	ForceRemove    bool
 	Pull           bool
 	Pull           bool
-	JSONFormat     bool
 	Memory         int64
 	Memory         int64
 	MemorySwap     int64
 	MemorySwap     int64
 	CpuShares      int64
 	CpuShares      int64
+	CpuPeriod      int64
 	CpuQuota       int64
 	CpuQuota       int64
 	CpuSetCpus     string
 	CpuSetCpus     string
 	CpuSetMems     string
 	CpuSetMems     string
+	CgroupParent   string
 	AuthConfig     *cliconfig.AuthConfig
 	AuthConfig     *cliconfig.AuthConfig
 	ConfigFile     *cliconfig.ConfigFile
 	ConfigFile     *cliconfig.ConfigFile
 
 
@@ -97,7 +98,7 @@ func Build(d *daemon.Daemon, buildConfig *Config) error {
 			return err
 			return err
 		}
 		}
 		if len(tag) > 0 {
 		if len(tag) > 0 {
-			if err := graph.ValidateTagName(tag); err != nil {
+			if err := tags.ValidateTagName(tag); err != nil {
 				return err
 				return err
 			}
 			}
 		}
 		}
@@ -140,7 +141,7 @@ func Build(d *daemon.Daemon, buildConfig *Config) error {
 	}
 	}
 	defer context.Close()
 	defer context.Close()
 
 
-	sf := streamformatter.NewStreamFormatter(buildConfig.JSONFormat)
+	sf := streamformatter.NewJSONStreamFormatter()
 
 
 	builder := &Builder{
 	builder := &Builder{
 		Daemon: d,
 		Daemon: d,
@@ -163,9 +164,11 @@ func Build(d *daemon.Daemon, buildConfig *Config) error {
 		ConfigFile:      buildConfig.ConfigFile,
 		ConfigFile:      buildConfig.ConfigFile,
 		dockerfileName:  buildConfig.DockerfileName,
 		dockerfileName:  buildConfig.DockerfileName,
 		cpuShares:       buildConfig.CpuShares,
 		cpuShares:       buildConfig.CpuShares,
+		cpuPeriod:       buildConfig.CpuPeriod,
 		cpuQuota:        buildConfig.CpuQuota,
 		cpuQuota:        buildConfig.CpuQuota,
 		cpuSetCpus:      buildConfig.CpuSetCpus,
 		cpuSetCpus:      buildConfig.CpuSetCpus,
 		cpuSetMems:      buildConfig.CpuSetMems,
 		cpuSetMems:      buildConfig.CpuSetMems,
+		cgroupParent:    buildConfig.CgroupParent,
 		memory:          buildConfig.Memory,
 		memory:          buildConfig.Memory,
 		memorySwap:      buildConfig.MemorySwap,
 		memorySwap:      buildConfig.MemorySwap,
 		cancelled:       buildConfig.WaitCancelled(),
 		cancelled:       buildConfig.WaitCancelled(),

+ 3 - 2
builder/parser/parser.go

@@ -29,6 +29,7 @@ type Node struct {
 	Children   []*Node         // the children of this sexp
 	Children   []*Node         // the children of this sexp
 	Attributes map[string]bool // special attributes for this node
 	Attributes map[string]bool // special attributes for this node
 	Original   string          // original line used before parsing
 	Original   string          // original line used before parsing
+	Flags      []string        // only top Node should have this set
 }
 }
 
 
 var (
 var (
@@ -60,7 +61,6 @@ func init() {
 		command.Entrypoint: parseMaybeJSON,
 		command.Entrypoint: parseMaybeJSON,
 		command.Expose:     parseStringsWhitespaceDelimited,
 		command.Expose:     parseStringsWhitespaceDelimited,
 		command.Volume:     parseMaybeJSONToList,
 		command.Volume:     parseMaybeJSONToList,
-		command.Insert:     parseIgnore,
 	}
 	}
 }
 }
 
 
@@ -75,7 +75,7 @@ func parseLine(line string) (string, *Node, error) {
 		return line, nil, nil
 		return line, nil, nil
 	}
 	}
 
 
-	cmd, args, err := splitCommand(line)
+	cmd, flags, args, err := splitCommand(line)
 	if err != nil {
 	if err != nil {
 		return "", nil, err
 		return "", nil, err
 	}
 	}
@@ -91,6 +91,7 @@ func parseLine(line string) (string, *Node, error) {
 	node.Next = sexp
 	node.Next = sexp
 	node.Attributes = attrs
 	node.Attributes = attrs
 	node.Original = line
 	node.Original = line
+	node.Flags = flags
 
 
 	return "", node, nil
 	return "", node, nil
 }
 }

+ 10 - 0
builder/parser/testfiles/flags/Dockerfile

@@ -0,0 +1,10 @@
+FROM scratch
+COPY foo /tmp/
+COPY --user=me foo /tmp/
+COPY --doit=true foo /tmp/
+COPY --user=me --doit=true foo /tmp/
+COPY --doit=true -- foo /tmp/
+COPY -- foo /tmp/
+CMD --doit [ "a", "b" ]
+CMD --doit=true -- [ "a", "b" ]
+CMD --doit -- [ ]

+ 10 - 0
builder/parser/testfiles/flags/result

@@ -0,0 +1,10 @@
+(from "scratch")
+(copy "foo" "/tmp/")
+(copy ["--user=me"] "foo" "/tmp/")
+(copy ["--doit=true"] "foo" "/tmp/")
+(copy ["--user=me" "--doit=true"] "foo" "/tmp/")
+(copy ["--doit=true"] "foo" "/tmp/")
+(copy "foo" "/tmp/")
+(cmd ["--doit"] "a" "b")
+(cmd ["--doit=true"] "a" "b")
+(cmd ["--doit"])

+ 105 - 5
builder/parser/utils.go

@@ -1,8 +1,10 @@
 package parser
 package parser
 
 
 import (
 import (
+	"fmt"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
+	"unicode"
 )
 )
 
 
 // dumps the AST defined by `node` as a list of sexps. Returns a string
 // dumps the AST defined by `node` as a list of sexps. Returns a string
@@ -11,6 +13,10 @@ func (node *Node) Dump() string {
 	str := ""
 	str := ""
 	str += node.Value
 	str += node.Value
 
 
+	if len(node.Flags) > 0 {
+		str += fmt.Sprintf(" %q", node.Flags)
+	}
+
 	for _, n := range node.Children {
 	for _, n := range node.Children {
 		str += "(" + n.Dump() + ")\n"
 		str += "(" + n.Dump() + ")\n"
 	}
 	}
@@ -48,20 +54,23 @@ func fullDispatch(cmd, args string) (*Node, map[string]bool, error) {
 
 
 // splitCommand takes a single line of text and parses out the cmd and args,
 // splitCommand takes a single line of text and parses out the cmd and args,
 // which are used for dispatching to more exact parsing functions.
 // which are used for dispatching to more exact parsing functions.
-func splitCommand(line string) (string, string, error) {
+func splitCommand(line string) (string, []string, string, error) {
 	var args string
 	var args string
+	var flags []string
 
 
 	// Make sure we get the same results irrespective of leading/trailing spaces
 	// Make sure we get the same results irrespective of leading/trailing spaces
 	cmdline := TOKEN_WHITESPACE.Split(strings.TrimSpace(line), 2)
 	cmdline := TOKEN_WHITESPACE.Split(strings.TrimSpace(line), 2)
 	cmd := strings.ToLower(cmdline[0])
 	cmd := strings.ToLower(cmdline[0])
 
 
 	if len(cmdline) == 2 {
 	if len(cmdline) == 2 {
-		args = strings.TrimSpace(cmdline[1])
+		var err error
+		args, flags, err = extractBuilderFlags(cmdline[1])
+		if err != nil {
+			return "", nil, "", err
+		}
 	}
 	}
 
 
-	// the cmd should never have whitespace, but it's possible for the args to
-	// have trailing whitespace.
-	return cmd, args, nil
+	return cmd, flags, strings.TrimSpace(args), nil
 }
 }
 
 
 // covers comments and empty lines. Lines should be trimmed before passing to
 // covers comments and empty lines. Lines should be trimmed before passing to
@@ -74,3 +83,94 @@ func stripComments(line string) string {
 
 
 	return line
 	return line
 }
 }
+
+func extractBuilderFlags(line string) (string, []string, error) {
+	// Parses the BuilderFlags and returns the remaining part of the line
+
+	const (
+		inSpaces = iota // looking for start of a word
+		inWord
+		inQuote
+	)
+
+	words := []string{}
+	phase := inSpaces
+	word := ""
+	quote := '\000'
+	blankOK := false
+	var ch rune
+
+	for pos := 0; pos <= len(line); pos++ {
+		if pos != len(line) {
+			ch = rune(line[pos])
+		}
+
+		if phase == inSpaces { // Looking for start of word
+			if pos == len(line) { // end of input
+				break
+			}
+			if unicode.IsSpace(ch) { // skip spaces
+				continue
+			}
+
+			// Only keep going if the next word starts with --
+			if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' {
+				return line[pos:], words, nil
+			}
+
+			phase = inWord // found someting with "--", fall thru
+		}
+		if (phase == inWord || phase == inQuote) && (pos == len(line)) {
+			if word != "--" && (blankOK || len(word) > 0) {
+				words = append(words, word)
+			}
+			break
+		}
+		if phase == inWord {
+			if unicode.IsSpace(ch) {
+				phase = inSpaces
+				if word == "--" {
+					return line[pos:], words, nil
+				}
+				if blankOK || len(word) > 0 {
+					words = append(words, word)
+				}
+				word = ""
+				blankOK = false
+				continue
+			}
+			if ch == '\'' || ch == '"' {
+				quote = ch
+				blankOK = true
+				phase = inQuote
+				continue
+			}
+			if ch == '\\' {
+				if pos+1 == len(line) {
+					continue // just skip \ at end
+				}
+				pos++
+				ch = rune(line[pos])
+			}
+			word += string(ch)
+			continue
+		}
+		if phase == inQuote {
+			if ch == quote {
+				phase = inWord
+				continue
+			}
+			if ch == '\\' {
+				if pos+1 == len(line) {
+					phase = inWord
+					continue // just skip \ at end
+				}
+				pos++
+				ch = rune(line[pos])
+			}
+			word += string(ch)
+		}
+	}
+
+	return "", words, nil
+}

+ 1 - 1
contrib/builder/deb/debian-jessie/Dockerfile

@@ -7,7 +7,7 @@ FROM debian:jessie
 RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
 
 ENV GO_VERSION 1.4.2
 ENV GO_VERSION 1.4.2
-RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 ENV PATH $PATH:/usr/local/go/bin
 
 
 ENV AUTO_GOPATH 1
 ENV AUTO_GOPATH 1

+ 14 - 0
contrib/builder/deb/debian-stretch/Dockerfile

@@ -0,0 +1,14 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
+#
+
+FROM debian:stretch
+
+RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS apparmor selinux

+ 1 - 1
contrib/builder/deb/debian-wheezy/Dockerfile

@@ -8,7 +8,7 @@ RUN echo deb http://http.debian.net/debian wheezy-backports main > /etc/apt/sour
 RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
 
 ENV GO_VERSION 1.4.2
 ENV GO_VERSION 1.4.2
-RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 ENV PATH $PATH:/usr/local/go/bin
 
 
 ENV AUTO_GOPATH 1
 ENV AUTO_GOPATH 1

+ 1 - 1
contrib/builder/deb/generate.sh

@@ -59,7 +59,7 @@ for version in "${versions[@]}"; do
 	echo >> "$version/Dockerfile"
 	echo >> "$version/Dockerfile"
 
 
 	awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile"
 	awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile"
-	echo 'RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local' >> "$version/Dockerfile"
+	echo 'RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile"
 	echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile"
 	echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile"
 
 
 	echo >> "$version/Dockerfile"
 	echo >> "$version/Dockerfile"

+ 1 - 1
contrib/builder/deb/ubuntu-debootstrap-trusty/Dockerfile

@@ -7,7 +7,7 @@ FROM ubuntu-debootstrap:trusty
 RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
 
 ENV GO_VERSION 1.4.2
 ENV GO_VERSION 1.4.2
-RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 ENV PATH $PATH:/usr/local/go/bin
 
 
 ENV AUTO_GOPATH 1
 ENV AUTO_GOPATH 1

+ 1 - 1
contrib/builder/deb/ubuntu-debootstrap-utopic/Dockerfile

@@ -7,7 +7,7 @@ FROM ubuntu-debootstrap:utopic
 RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
 
 ENV GO_VERSION 1.4.2
 ENV GO_VERSION 1.4.2
-RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 ENV PATH $PATH:/usr/local/go/bin
 
 
 ENV AUTO_GOPATH 1
 ENV AUTO_GOPATH 1

+ 1 - 1
contrib/builder/deb/ubuntu-debootstrap-vivid/Dockerfile

@@ -7,7 +7,7 @@ FROM ubuntu-debootstrap:vivid
 RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
 
 ENV GO_VERSION 1.4.2
 ENV GO_VERSION 1.4.2
-RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 ENV PATH $PATH:/usr/local/go/bin
 
 
 ENV AUTO_GOPATH 1
 ENV AUTO_GOPATH 1

+ 5 - 0
contrib/builder/rpm/README.md

@@ -0,0 +1,5 @@
+# `dockercore/builder-rpm`
+
+This image's tags contain the dependencies for building Docker `.rpm`s for each of the RPM-based platforms Docker targets.
+
+To add new tags, see [`contrib/builder/rpm` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/rpm), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file.

+ 10 - 0
contrib/builder/rpm/build.sh

@@ -0,0 +1,10 @@
+#!/bin/bash
+set -e
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+set -x
+./generate.sh
+for d in */; do
+	docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d"
+done

+ 15 - 0
contrib/builder/rpm/centos-6/Dockerfile

@@ -0,0 +1,15 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
+#
+
+FROM centos:6
+
+RUN yum groupinstall -y "Development Tools"
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS selinux exclude_graphdriver_btrfs

+ 15 - 0
contrib/builder/rpm/centos-7/Dockerfile

@@ -0,0 +1,15 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
+#
+
+FROM centos:7
+
+RUN yum groupinstall -y "Development Tools"
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS selinux

+ 15 - 0
contrib/builder/rpm/fedora-20/Dockerfile

@@ -0,0 +1,15 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
+#
+
+FROM fedora:20
+
+RUN yum install -y @development-tools fedora-packager
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS selinux

+ 15 - 0
contrib/builder/rpm/fedora-21/Dockerfile

@@ -0,0 +1,15 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
+#
+
+FROM fedora:21
+
+RUN yum install -y @development-tools fedora-packager
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS selinux

+ 73 - 0
contrib/builder/rpm/generate.sh

@@ -0,0 +1,73 @@
+#!/bin/bash
+set -e
+
+# usage: ./generate.sh [versions]
+#    ie: ./generate.sh
+#        to update all Dockerfiles in this directory
+#    or: ./generate.sh
+#        to only update fedora-20/Dockerfile
+#    or: ./generate.sh fedora-newversion
+#        to create a new folder and a Dockerfile within it
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+versions=( "$@" )
+if [ ${#versions[@]} -eq 0 ]; then
+	versions=( */ )
+fi
+versions=( "${versions[@]%/}" )
+
+for version in "${versions[@]}"; do
+	distro="${version%-*}"
+	suite="${version##*-}"
+	from="${distro}:${suite}"
+
+	mkdir -p "$version"
+	echo "$version -> FROM $from"
+	cat > "$version/Dockerfile" <<-EOF
+		#
+		# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
+		#
+
+		FROM $from
+	EOF
+
+	echo >> "$version/Dockerfile"
+
+	case "$from" in
+		centos:*)
+			# get "Development Tools" packages dependencies
+			echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile"
+			;;
+		*)
+			echo 'RUN yum install -y @development-tools fedora-packager' >> "$version/Dockerfile"
+			;;
+	esac
+
+	# this list is sorted alphabetically; please keep it that way
+	packages=(
+		btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible)
+		device-mapper-devel # for "libdevmapper.h"
+		glibc-static
+		libselinux-devel # for "libselinux.so"
+		sqlite-devel # for "sqlite3.h"
+		tar # older versions of dev-tools don't have tar
+	)
+	echo "RUN yum install -y ${packages[*]}" >> "$version/Dockerfile"
+
+	echo >> "$version/Dockerfile"
+
+	awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile"
+	echo 'RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile"
+	echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile"
+
+	echo >> "$version/Dockerfile"
+
+	echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile"
+
+	if [ "$from" == "centos:6" ]; then
+		echo 'ENV DOCKER_BUILDTAGS selinux exclude_graphdriver_btrfs' >> "$version/Dockerfile"
+	else
+		echo 'ENV DOCKER_BUILDTAGS selinux' >> "$version/Dockerfile"
+	fi
+done

+ 33 - 2
contrib/check-config.sh

@@ -26,6 +26,12 @@ fi
 is_set() {
 is_set() {
 	zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
 	zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
 }
 }
+is_set_in_kernel() {
+	zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
+}
+is_set_as_module() {
+	zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
+}
 
 
 # see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
 # see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
 declare -A colors=(
 declare -A colors=(
@@ -70,8 +76,10 @@ wrap_warning() {
 }
 }
 
 
 check_flag() {
 check_flag() {
-	if is_set "$1"; then
+	if is_set_in_kernel "$1"; then
 		wrap_good "CONFIG_$1" 'enabled'
 		wrap_good "CONFIG_$1" 'enabled'
+	elif is_set_as_module "$1"; then
+		wrap_good "CONFIG_$1" 'enabled (as module)'
 	else
 	else
 		wrap_bad "CONFIG_$1" 'missing'
 		wrap_bad "CONFIG_$1" 'missing'
 	fi
 	fi
@@ -83,6 +91,22 @@ check_flags() {
 	done
 	done
 }
 }
 
 
+check_command() {
+	if command -v "$1" >/dev/null 2>&1; then
+		wrap_good "$1 command" 'available'
+	else
+		wrap_bad "$1 command" 'missing'
+	fi
+}
+
+check_device() {
+	if [ -c "$1" ]; then
+		wrap_good "$1" 'present'
+	else
+		wrap_bad "$1" 'missing'
+	fi
+}
+
 if [ ! -e "$CONFIG" ]; then
 if [ ! -e "$CONFIG" ]; then
 	wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..."
 	wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..."
 	for tryConfig in "${possibleConfigs[@]}"; do
 	for tryConfig in "${possibleConfigs[@]}"; do
@@ -139,7 +163,7 @@ flags=(
 	NAMESPACES {NET,PID,IPC,UTS}_NS
 	NAMESPACES {NET,PID,IPC,UTS}_NS
 	DEVPTS_MULTIPLE_INSTANCES
 	DEVPTS_MULTIPLE_INSTANCES
 	CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS
 	CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS
-	MACVLAN VETH BRIDGE
+	MACVLAN VETH BRIDGE BRIDGE_NETFILTER
 	NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE
 	NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE
 	NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}
 	NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}
 	NF_NAT NF_NAT_NEEDED
 	NF_NAT NF_NAT_NEEDED
@@ -160,6 +184,8 @@ echo 'Optional Features:'
 }
 }
 flags=(
 flags=(
 	RESOURCE_COUNTERS
 	RESOURCE_COUNTERS
+	BLK_CGROUP
+	IOSCHED_CFQ
 	CGROUP_PERF
 	CGROUP_PERF
 	CFS_BANDWIDTH
 	CFS_BANDWIDTH
 )
 )
@@ -182,6 +208,11 @@ echo '- Storage Drivers:'
 
 
 	echo '- "'$(wrap_color 'overlay' blue)'":'
 	echo '- "'$(wrap_color 'overlay' blue)'":'
 	check_flags OVERLAY_FS EXT4_FS_SECURITY EXT4_FS_POSIX_ACL | sed 's/^/  /'
 	check_flags OVERLAY_FS EXT4_FS_SECURITY EXT4_FS_POSIX_ACL | sed 's/^/  /'
+
+	echo '- "'$(wrap_color 'zfs' blue)'":'
+	echo "  - $(check_device /dev/zfs)"
+	echo "  - $(check_command zfs)"
+	echo "  - $(check_command zpool)"
 } | sed 's/^/  /'
 } | sed 's/^/  /'
 echo
 echo
 
 

+ 20 - 4
contrib/completion/bash/docker

@@ -27,7 +27,7 @@
 # This order should be applied to lists, alternatives and code blocks.
 # This order should be applied to lists, alternatives and code blocks.
 
 
 __docker_q() {
 __docker_q() {
-	docker 2>/dev/null "$@"
+	docker ${host:+-H "$host"} 2>/dev/null "$@"
 }
 }
 
 
 __docker_containers_all() {
 __docker_containers_all() {
@@ -407,7 +407,7 @@ _docker_events() {
 _docker_exec() {
 _docker_exec() {
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--detach -d --help --interactive -i --privileged -t --tty -u --user" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--detach -d --help --interactive -i -t --tty -u --user" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			__docker_containers_running
 			__docker_containers_running
@@ -593,7 +593,7 @@ _docker_logs() {
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--follow -f --help --tail --timestamps -t" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--follow -f --help --since --tail --timestamps -t" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			local counter=$(__docker_pos_first_nonflag '--tail')
 			local counter=$(__docker_pos_first_nonflag '--tail')
@@ -679,6 +679,14 @@ _docker_pull() {
 		*)
 		*)
 			local counter=$(__docker_pos_first_nonflag)
 			local counter=$(__docker_pos_first_nonflag)
 			if [ $cword -eq $counter ]; then
 			if [ $cword -eq $counter ]; then
+				for arg in "${COMP_WORDS[@]}"; do
+					case "$arg" in
+						--all-tags|-a)
+							__docker_image_repos
+							return
+							;;
+					esac
+				done
 				__docker_image_repos_and_tags
 				__docker_image_repos_and_tags
 			fi
 			fi
 			;;
 			;;
@@ -770,6 +778,7 @@ _docker_run() {
 		--cidfile
 		--cidfile
 		--cpuset
 		--cpuset
 		--cpu-shares -c
 		--cpu-shares -c
+		--cpu-period
 		--cpu-quota
 		--cpu-quota
 		--device
 		--device
 		--dns
 		--dns
@@ -1003,7 +1012,7 @@ _docker_start() {
 _docker_stats() {
 _docker_stats() {
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--no-stream --help" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			__docker_containers_running
 			__docker_containers_running
@@ -1152,6 +1161,7 @@ _docker() {
 		--dns-search
 		--dns-search
 		--exec-driver -e
 		--exec-driver -e
 		--exec-opt
 		--exec-opt
+		--exec-root
 		--fixed-cidr
 		--fixed-cidr
 		--fixed-cidr-v6
 		--fixed-cidr-v6
 		--graph -g
 		--graph -g
@@ -1173,6 +1183,7 @@ _docker() {
 	"
 	"
 
 
 	local main_options_with_args_glob=$(__docker_to_extglob "$main_options_with_args")
 	local main_options_with_args_glob=$(__docker_to_extglob "$main_options_with_args")
+	local host
 
 
 	COMPREPLY=()
 	COMPREPLY=()
 	local cur prev words cword
 	local cur prev words cword
@@ -1182,6 +1193,11 @@ _docker() {
 	local counter=1
 	local counter=1
 	while [ $counter -lt $cword ]; do
 	while [ $counter -lt $cword ]; do
 		case "${words[$counter]}" in
 		case "${words[$counter]}" in
+			# save host so that completion can use custom daemon
+			--host|-H)
+				(( counter++ ))
+				host="${words[$counter]}"
+				;;
 			$main_options_with_args_glob )
 			$main_options_with_args_glob )
 				(( counter++ ))
 				(( counter++ ))
 				;;
 				;;

+ 3 - 1
contrib/completion/fish/docker.fish

@@ -16,7 +16,7 @@
 
 
 function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand'
 function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand'
     for i in (commandline -opc)
     for i in (commandline -opc)
-        if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait
+        if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait stats
             return 1
             return 1
         end
         end
     end
     end
@@ -233,6 +233,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the log
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output'
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output'
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps'
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps'
+complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l since -d 'Show logs since timestamp'
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)'
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)'
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container"
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container"
 
 
@@ -362,6 +363,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_prin
 # stats
 # stats
 complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics"
 complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics"
 complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l no-stream -d 'Disable streaming stats and only pull the first result'
 complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container"
 complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container"
 
 
 # stop
 # stop

+ 2 - 0
contrib/completion/zsh/_docker

@@ -305,6 +305,7 @@ __docker_subcommand () {
         (logs)
         (logs)
             _arguments \
             _arguments \
                 {-f,--follow}'[Follow log output]' \
                 {-f,--follow}'[Follow log output]' \
+                '-s,--since[Show logs since timestamp]' \
                 {-t,--timestamps}'[Show timestamps]' \
                 {-t,--timestamps}'[Show timestamps]' \
                 '--tail=-[Output the last K lines]:lines:(1 10 20 50 all)' \
                 '--tail=-[Output the last K lines]:lines:(1 10 20 50 all)' \
                 '*:containers:__docker_containers'
                 '*:containers:__docker_containers'
@@ -326,6 +327,7 @@ __docker_subcommand () {
             ;;
             ;;
         (stats)
         (stats)
             _arguments \
             _arguments \
+                '--no-stream[Disable streaming stats and only pull the first result]' \
                 '*:containers:__docker_runningcontainers'
                 '*:containers:__docker_runningcontainers'
             ;;
             ;;
         (rm)
         (rm)

+ 8 - 4
contrib/download-frozen-image.sh

@@ -42,6 +42,8 @@ while [ $# -gt 0 ]; do
 	[ "$tag" != "$imageTag" ] || tag='latest'
 	[ "$tag" != "$imageTag" ] || tag='latest'
 	tag="${tag%@*}"
 	tag="${tag%@*}"
 
 
+	imageFile="${image//\//_}" # "/" can't be in filenames :)
+
 	token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')"
 	token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')"
 
 
 	if [ -z "$imageId" ]; then
 	if [ -z "$imageId" ]; then
@@ -60,12 +62,12 @@ while [ $# -gt 0 ]; do
 	ancestry=( ${ancestryJson//[\[\] \"]/} )
 	ancestry=( ${ancestryJson//[\[\] \"]/} )
 	unset IFS
 	unset IFS
 
 
-	if [ -s "$dir/tags-$image.tmp" ]; then
-		echo -n ', ' >> "$dir/tags-$image.tmp"
+	if [ -s "$dir/tags-$imageFile.tmp" ]; then
+		echo -n ', ' >> "$dir/tags-$imageFile.tmp"
 	else
 	else
 		images=( "${images[@]}" "$image" )
 		images=( "${images[@]}" "$image" )
 	fi
 	fi
-	echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$image.tmp"
+	echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp"
 
 
 	echo "Downloading '$imageTag' (${#ancestry[@]} layers)..."
 	echo "Downloading '$imageTag' (${#ancestry[@]} layers)..."
 	for imageId in "${ancestry[@]}"; do
 	for imageId in "${ancestry[@]}"; do
@@ -90,10 +92,12 @@ done
 echo -n '{' > "$dir/repositories"
 echo -n '{' > "$dir/repositories"
 firstImage=1
 firstImage=1
 for image in "${images[@]}"; do
 for image in "${images[@]}"; do
+	imageFile="${image//\//_}" # "/" can't be in filenames :)
+
 	[ "$firstImage" ] || echo -n ',' >> "$dir/repositories"
 	[ "$firstImage" ] || echo -n ',' >> "$dir/repositories"
 	firstImage=
 	firstImage=
 	echo -n $'\n\t' >> "$dir/repositories"
 	echo -n $'\n\t' >> "$dir/repositories"
-	echo -n '"'"$image"'": { '"$(cat "$dir/tags-$image.tmp")"' }' >> "$dir/repositories"
+	echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories"
 done
 done
 echo -n $'\n}\n' >> "$dir/repositories"
 echo -n $'\n}\n' >> "$dir/repositories"
 
 

+ 2 - 0
contrib/init/upstart/docker.conf

@@ -7,6 +7,8 @@ limit nproc 524288 1048576
 
 
 respawn
 respawn
 
 
+kill timeout 20
+
 pre-start script
 pre-start script
 	# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
 	# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
 	if grep -v '^#' /etc/fstab | grep -q cgroup \
 	if grep -v '^#' /etc/fstab | grep -q cgroup \

+ 0 - 49
contrib/mkimage-unittest.sh

@@ -1,49 +0,0 @@
-#!/usr/bin/env bash
-# Generate a very minimal filesystem based on busybox-static,
-# and load it into the local docker under the name "docker-ut".
-
-missing_pkg() {
-    echo "Sorry, I could not locate $1"
-    echo "Try 'apt-get install ${2:-$1}'?"
-    exit 1
-}
-
-BUSYBOX=$(which busybox)
-[ "$BUSYBOX" ] || missing_pkg busybox busybox-static
-SOCAT=$(which socat)
-[ "$SOCAT" ] || missing_pkg socat
-
-shopt -s extglob
-set -ex
-ROOTFS=`mktemp -d ${TMPDIR:-/var/tmp}/rootfs-busybox.XXXXXXXXXX`
-trap "rm -rf $ROOTFS" INT QUIT TERM
-cd $ROOTFS
-
-mkdir bin etc dev dev/pts lib proc sys tmp
-touch etc/resolv.conf
-cp /etc/nsswitch.conf etc/nsswitch.conf
-echo root:x:0:0:root:/:/bin/sh > etc/passwd
-echo daemon:x:1:1:daemon:/usr/sbin:/bin/sh >> etc/passwd
-echo root:x:0: > etc/group
-echo daemon:x:1: >> etc/group
-ln -s lib lib64
-ln -s bin sbin
-cp $BUSYBOX $SOCAT bin
-for X in $(busybox --list)
-do
-    ln -s busybox bin/$X
-done
-rm bin/init
-ln bin/busybox bin/init
-cp -P /lib/x86_64-linux-gnu/lib{pthread*,c*(-*),dl*(-*),nsl*(-*),nss_*,util*(-*),wrap,z}.so* lib
-cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib
-cp -P /usr/lib/x86_64-linux-gnu/lib{crypto,ssl}.so* lib
-for X in console null ptmx random stdin stdout stderr tty urandom zero
-do
-    cp -a /dev/$X dev
-done
-
-chmod 0755 $ROOTFS # See #486
-tar --numeric-owner -cf- . | docker import - docker-ut
-docker run -i -u root docker-ut /bin/echo Success.
-rm -rf $ROOTFS

+ 13 - 5
contrib/mkimage/debootstrap

@@ -176,11 +176,19 @@ if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then
 						s/ $suite / ${suite}-updates /
 						s/ $suite / ${suite}-updates /
 					" "$rootfsDir/etc/apt/sources.list"
 					" "$rootfsDir/etc/apt/sources.list"
 					echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list"
 					echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list"
-					# LTS
-					if [ "$suite" = 'squeeze' -o "$suite" = 'oldstable' ]; then
-						head -1 "$rootfsDir/etc/apt/sources.list" \
-							| sed "s/ $suite / squeeze-lts /" \
-								>> "$rootfsDir/etc/apt/sources.list"
+					# squeeze-lts
+					if [ -f "$rootfsDir/etc/debian_version" ]; then
+						ltsSuite=
+						case "$(cat "$rootfsDir/etc/debian_version")" in
+							6.*) ltsSuite='squeeze-lts' ;;
+							#7.*) ltsSuite='wheezy-lts' ;;
+							#8.*) ltsSuite='jessie-lts' ;;
+						esac
+						if [ "$ltsSuite" ]; then
+							head -1 "$rootfsDir/etc/apt/sources.list" \
+								| sed "s/ $suite / $ltsSuite /" \
+									>> "$rootfsDir/etc/apt/sources.list"
+						fi
 					fi
 					fi
 				)
 				)
 			fi
 			fi

+ 26 - 0
contrib/syntax/nano/Dockerfile.nanorc

@@ -0,0 +1,26 @@
+## Syntax highlighting for Dockerfiles
+syntax "Dockerfile" "Dockerfile[^/]*$"
+
+## Keywords
+icolor red "^(FROM|MAINTAINER|RUN|CMD|LABEL|EXPOSE|ENV|ADD|COPY|ENTRYPOINT|VOLUME|USER|WORKDIR|ONBUILD)[[:space:]]"
+
+## Brackets & parenthesis
+color brightgreen "(\(|\)|\[|\])"
+
+## Double ampersand
+color brightmagenta "&&"
+
+## Comments
+icolor cyan "^[[:space:]]*#.*$"
+
+## Blank space at EOL
+color ,green "[[:space:]]+$"
+
+## Strings, single-quoted
+color brightwhite "'([^']|(\\'))*'" "%[qw]\{[^}]*\}" "%[qw]\([^)]*\)" "%[qw]<[^>]*>" "%[qw]\[[^]]*\]" "%[qw]\$[^$]*\$" "%[qw]\^[^^]*\^" "%[qw]![^!]*!"
+
+## Strings, double-quoted
+color brightwhite ""([^"]|(\\"))*"" "%[QW]?\{[^}]*\}" "%[QW]?\([^)]*\)" "%[QW]?<[^>]*>" "%[QW]?\[[^]]*\]" "%[QW]?\$[^$]*\$" "%[QW]?\^[^^]*\^" "%[QW]?![^!]*!"
+
+## Single and double quotes
+color brightyellow "('|\")"

+ 32 - 0
contrib/syntax/nano/README.md

@@ -0,0 +1,32 @@
+Dockerfile.nanorc
+=================
+
+Dockerfile syntax highlighting for nano
+
+Single User Installation
+------------------------
+1. Create a nano syntax directory in your home directory:
+ * `mkdir -p ~/.nano/syntax`
+
+2. Copy `Dockerfile.nanorc` to` ~/.nano/syntax/`
+ * `cp Dockerfile.nanorc ~/.nano/syntax/`
+
+3. Add the following to your `~/.nanorc` to tell nano where to find the `Dockerfile.nanorc` file
+  ```
+## Dockerfile files
+include "~/.nano/syntax/Dockerfile.nanorc"
+  ```
+
+System Wide Installation
+------------------------
+1. Create a nano syntax directory: 
+  * `mkdir /usr/local/share/nano`
+
+2. Copy `Dockerfile.nanorc` to `/usr/local/share/nano`
+  * `cp Dockerfile.nanorc /usr/local/share/nano/`
+
+3. Add the following to your `/etc/nanorc`:
+  ```
+## Dockerfile files
+include "/usr/local/share/nano/Dockerfile.nanorc"
+  ```

+ 39 - 207
daemon/attach.go

@@ -1,229 +1,61 @@
 package daemon
 package daemon
 
 
 import (
 import (
-	"encoding/json"
 	"io"
 	"io"
-	"os"
-	"sync"
-	"time"
 
 
-	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/pkg/jsonlog"
-	"github.com/docker/docker/pkg/promise"
+	"github.com/docker/docker/pkg/stdcopy"
 )
 )
 
 
-func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
-	if logs {
-		cLog, err := c.ReadLog("json")
-		if err != nil && os.IsNotExist(err) {
-			// Legacy logs
-			logrus.Debugf("Old logs format")
-			if stdout != nil {
-				cLog, err := c.ReadLog("stdout")
-				if err != nil {
-					logrus.Errorf("Error reading logs (stdout): %s", err)
-				} else if _, err := io.Copy(stdout, cLog); err != nil {
-					logrus.Errorf("Error streaming logs (stdout): %s", err)
-				}
-			}
-			if stderr != nil {
-				cLog, err := c.ReadLog("stderr")
-				if err != nil {
-					logrus.Errorf("Error reading logs (stderr): %s", err)
-				} else if _, err := io.Copy(stderr, cLog); err != nil {
-					logrus.Errorf("Error streaming logs (stderr): %s", err)
-				}
-			}
-		} else if err != nil {
-			logrus.Errorf("Error reading logs (json): %s", err)
-		} else {
-			dec := json.NewDecoder(cLog)
-			for {
-				l := &jsonlog.JSONLog{}
-
-				if err := dec.Decode(l); err == io.EOF {
-					break
-				} else if err != nil {
-					logrus.Errorf("Error streaming logs: %s", err)
-					break
-				}
-				if l.Stream == "stdout" && stdout != nil {
-					io.WriteString(stdout, l.Log)
-				}
-				if l.Stream == "stderr" && stderr != nil {
-					io.WriteString(stderr, l.Log)
-				}
-			}
-		}
-	}
-
-	//stream
-	if stream {
-		var stdinPipe io.ReadCloser
-		if stdin != nil {
-			r, w := io.Pipe()
-			go func() {
-				defer w.Close()
-				defer logrus.Debugf("Closing buffered stdin pipe")
-				io.Copy(w, stdin)
-			}()
-			stdinPipe = r
-		}
-		<-c.Attach(stdinPipe, stdout, stderr)
-		// If we are in stdinonce mode, wait for the process to end
-		// otherwise, simply return
-		if c.Config.StdinOnce && !c.Config.Tty {
-			c.WaitStop(-1 * time.Second)
-		}
-	}
-	return nil
+type ContainerAttachWithLogsConfig struct {
+	InStream                       io.ReadCloser
+	OutStream                      io.Writer
+	UseStdin, UseStdout, UseStderr bool
+	Logs, Stream                   bool
+	Multiplex                      bool
 }
 }
 
 
-func (c *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
-	return attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, stdin, stdout, stderr)
-}
-
-func attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
-	var (
-		cStdout, cStderr io.ReadCloser
-		cStdin           io.WriteCloser
-		wg               sync.WaitGroup
-		errors           = make(chan error, 3)
-	)
-
-	if stdin != nil && openStdin {
-		cStdin = streamConfig.StdinPipe()
-		wg.Add(1)
+func (daemon *Daemon) ContainerAttachWithLogs(name string, c *ContainerAttachWithLogsConfig) error {
+	container, err := daemon.Get(name)
+	if err != nil {
+		return err
 	}
 	}
 
 
-	if stdout != nil {
-		cStdout = streamConfig.StdoutPipe()
-		wg.Add(1)
-	}
+	var errStream io.Writer
 
 
-	if stderr != nil {
-		cStderr = streamConfig.StderrPipe()
-		wg.Add(1)
+	if !container.Config.Tty && c.Multiplex {
+		errStream = stdcopy.NewStdWriter(c.OutStream, stdcopy.Stderr)
+		c.OutStream = stdcopy.NewStdWriter(c.OutStream, stdcopy.Stdout)
+	} else {
+		errStream = c.OutStream
 	}
 	}
 
 
-	// Connect stdin of container to the http conn.
-	go func() {
-		if stdin == nil || !openStdin {
-			return
-		}
-		logrus.Debugf("attach: stdin: begin")
-		defer func() {
-			if stdinOnce && !tty {
-				cStdin.Close()
-			} else {
-				// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
-				if cStdout != nil {
-					cStdout.Close()
-				}
-				if cStderr != nil {
-					cStderr.Close()
-				}
-			}
-			wg.Done()
-			logrus.Debugf("attach: stdin: end")
-		}()
+	var stdin io.ReadCloser
+	var stdout, stderr io.Writer
 
 
-		var err error
-		if tty {
-			_, err = copyEscapable(cStdin, stdin)
-		} else {
-			_, err = io.Copy(cStdin, stdin)
-
-		}
-		if err == io.ErrClosedPipe {
-			err = nil
-		}
-		if err != nil {
-			logrus.Errorf("attach: stdin: %s", err)
-			errors <- err
-			return
-		}
-	}()
-
-	attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) {
-		if stream == nil {
-			return
-		}
-		defer func() {
-			// Make sure stdin gets closed
-			if stdin != nil {
-				stdin.Close()
-			}
-			streamPipe.Close()
-			wg.Done()
-			logrus.Debugf("attach: %s: end", name)
-		}()
-
-		logrus.Debugf("attach: %s: begin", name)
-		_, err := io.Copy(stream, streamPipe)
-		if err == io.ErrClosedPipe {
-			err = nil
-		}
-		if err != nil {
-			logrus.Errorf("attach: %s: %v", name, err)
-			errors <- err
-		}
+	if c.UseStdin {
+		stdin = c.InStream
+	}
+	if c.UseStdout {
+		stdout = c.OutStream
+	}
+	if c.UseStderr {
+		stderr = errStream
 	}
 	}
 
 
-	go attachStream("stdout", stdout, cStdout)
-	go attachStream("stderr", stderr, cStderr)
+	return container.AttachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream)
+}
 
 
-	return promise.Go(func() error {
-		wg.Wait()
-		close(errors)
-		for err := range errors {
-			if err != nil {
-				return err
-			}
-		}
-		return nil
-	})
+type ContainerWsAttachWithLogsConfig struct {
+	InStream             io.ReadCloser
+	OutStream, ErrStream io.Writer
+	Logs, Stream         bool
 }
 }
 
 
-// Code c/c from io.Copy() modified to handle escape sequence
-func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
-	buf := make([]byte, 32*1024)
-	for {
-		nr, er := src.Read(buf)
-		if nr > 0 {
-			// ---- Docker addition
-			// char 16 is C-p
-			if nr == 1 && buf[0] == 16 {
-				nr, er = src.Read(buf)
-				// char 17 is C-q
-				if nr == 1 && buf[0] == 17 {
-					if err := src.Close(); err != nil {
-						return 0, err
-					}
-					return 0, nil
-				}
-			}
-			// ---- End of docker
-			nw, ew := dst.Write(buf[0:nr])
-			if nw > 0 {
-				written += int64(nw)
-			}
-			if ew != nil {
-				err = ew
-				break
-			}
-			if nr != nw {
-				err = io.ErrShortWrite
-				break
-			}
-		}
-		if er == io.EOF {
-			break
-		}
-		if er != nil {
-			err = er
-			break
-		}
+func (daemon *Daemon) ContainerWsAttachWithLogs(name string, c *ContainerWsAttachWithLogsConfig) error {
+	container, err := daemon.Get(name)
+	if err != nil {
+		return err
 	}
 	}
-	return written, err
+
+	return container.AttachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
 }
 }

+ 13 - 0
daemon/changes.go

@@ -0,0 +1,13 @@
+package daemon
+
+import "github.com/docker/docker/pkg/archive"
+
+// ContainerChanges returns a list of container fs changes
+func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) {
+	container, err := daemon.Get(name)
+	if err != nil {
+		return nil, err
+	}
+
+	return container.Changes()
+}

+ 5 - 1
daemon/commit.go

@@ -32,7 +32,11 @@ func (daemon *Daemon) Commit(container *Container, repository, tag, comment, aut
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	defer rwTar.Close()
+	defer func() {
+		if rwTar != nil {
+			rwTar.Close()
+		}
+	}()
 
 
 	// Create a new image from the container's base layers + a new layer from container changes
 	// Create a new image from the container's base layers + a new layer from container changes
 	var (
 	var (

+ 52 - 49
daemon/config.go

@@ -1,11 +1,10 @@
 package daemon
 package daemon
 
 
 import (
 import (
-	"github.com/docker/docker/daemon/networkdriver"
-	"github.com/docker/docker/daemon/networkdriver/bridge"
+	"net"
+
 	"github.com/docker/docker/opts"
 	"github.com/docker/docker/opts"
 	flag "github.com/docker/docker/pkg/mflag"
 	flag "github.com/docker/docker/pkg/mflag"
-	"github.com/docker/docker/pkg/ulimit"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
 )
 )
 
 
@@ -14,46 +13,60 @@ const (
 	disableNetworkBridge = "none"
 	disableNetworkBridge = "none"
 )
 )
 
 
-// Config define the configuration of a docker daemon
-// These are the configuration settings that you pass
-// to the docker daemon when you launch it with say: `docker -d -e lxc`
-// FIXME: separate runtime configuration from http api configuration
-type Config struct {
-	Bridge bridge.Config
+// CommonConfig defines the configuration of a docker daemon which are
+// common across platforms.
+type CommonConfig struct {
+	AutoRestart bool
+	// Bridge holds bridge network specific configuration.
+	Bridge         bridgeConfig
+	Context        map[string][]string
+	CorsHeaders    string
+	DisableNetwork bool
+	Dns            []string
+	DnsSearch      []string
+	EnableCors     bool
+	ExecDriver     string
+	ExecRoot       string
+	GraphDriver    string
+	Labels         []string
+	LogConfig      runconfig.LogConfig
+	Mtu            int
+	Pidfile        string
+	Root           string
+	TrustKeyPath   string
+}
 
 
-	Pidfile              string
-	Root                 string
-	AutoRestart          bool
-	Dns                  []string
-	DnsSearch            []string
-	GraphDriver          string
-	GraphOptions         []string
-	ExecDriver           string
-	ExecOptions          []string
-	Mtu                  int
-	SocketGroup          string
-	EnableCors           bool
-	CorsHeaders          string
-	DisableNetwork       bool
-	EnableSelinuxSupport bool
-	Context              map[string][]string
-	TrustKeyPath         string
-	Labels               []string
-	Ulimits              map[string]*ulimit.Ulimit
-	LogConfig            runconfig.LogConfig
+// bridgeConfig stores all the bridge driver specific
+// configuration.
+type bridgeConfig struct {
+	EnableIPv6                  bool
+	EnableIPTables              bool
+	EnableIPForward             bool
+	EnableIPMasq                bool
+	EnableUserlandProxy         bool
+	DefaultIP                   net.IP
+	Iface                       string
+	IP                          string
+	FixedCIDR                   string
+	FixedCIDRv6                 string
+	DefaultGatewayIPv4          string
+	DefaultGatewayIPv6          string
+	InterContainerCommunication bool
 }
 }
 
 
-// InstallFlags adds command-line options to the top-level flag parser for
+// InstallCommonFlags adds command-line options to the top-level flag parser for
 // the current process.
 // the current process.
 // Subsequent calls to `flag.Parse` will populate config with values parsed
 // Subsequent calls to `flag.Parse` will populate config with values parsed
 // from the command-line.
 // from the command-line.
-func (config *Config) InstallFlags() {
-	flag.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file")
-	flag.StringVar(&config.Root, []string{"g", "-graph"}, "/var/lib/docker", "Root of the Docker runtime")
+
+func (config *Config) InstallCommonFlags() {
+	flag.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, defaultPidFile, "Path to use for daemon PID file")
+	flag.StringVar(&config.Root, []string{"g", "-graph"}, defaultGraph, "Root of the Docker runtime")
+	flag.StringVar(&config.ExecRoot, []string{"-exec-root"}, "/var/run/docker", "Root of the Docker execdriver")
 	flag.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run")
 	flag.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run")
-	flag.BoolVar(&config.Bridge.EnableIptables, []string{"#iptables", "-iptables"}, true, "Enable addition of iptables rules")
-	flag.BoolVar(&config.Bridge.EnableIpForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward")
-	flag.BoolVar(&config.Bridge.EnableIpMasq, []string{"-ip-masq"}, true, "Enable IP masquerading")
+	flag.BoolVar(&config.Bridge.EnableIPTables, []string{"#iptables", "-iptables"}, true, "Enable addition of iptables rules")
+	flag.BoolVar(&config.Bridge.EnableIPForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward")
+	flag.BoolVar(&config.Bridge.EnableIPMasq, []string{"-ip-masq"}, true, "Enable IP masquerading")
 	flag.BoolVar(&config.Bridge.EnableIPv6, []string{"-ipv6"}, false, "Enable IPv6 networking")
 	flag.BoolVar(&config.Bridge.EnableIPv6, []string{"-ipv6"}, false, "Enable IPv6 networking")
 	flag.StringVar(&config.Bridge.IP, []string{"#bip", "-bip"}, "", "Specify network bridge IP")
 	flag.StringVar(&config.Bridge.IP, []string{"#bip", "-bip"}, "", "Specify network bridge IP")
 	flag.StringVar(&config.Bridge.Iface, []string{"b", "-bridge"}, "", "Attach containers to a network bridge")
 	flag.StringVar(&config.Bridge.Iface, []string{"b", "-bridge"}, "", "Attach containers to a network bridge")
@@ -64,26 +77,16 @@ func (config *Config) InstallFlags() {
 	flag.BoolVar(&config.Bridge.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication")
 	flag.BoolVar(&config.Bridge.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication")
 	flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Storage driver to use")
 	flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Storage driver to use")
 	flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Exec driver to use")
 	flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Exec driver to use")
-	flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support")
 	flag.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, "Set the containers network MTU")
 	flag.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, "Set the containers network MTU")
-	flag.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", "Group for the unix socket")
 	flag.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, "Enable CORS headers in the remote API, this is deprecated by --api-cors-header")
 	flag.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, "Enable CORS headers in the remote API, this is deprecated by --api-cors-header")
 	flag.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", "Set CORS headers in the remote API")
 	flag.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", "Set CORS headers in the remote API")
-	opts.IPVar(&config.Bridge.DefaultIp, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP when binding container ports")
-	opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options")
-	opts.ListVar(&config.ExecOptions, []string{"-exec-opt"}, "Set exec driver options")
+	opts.IPVar(&config.Bridge.DefaultIP, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP when binding container ports")
 	// FIXME: why the inconsistency between "hosts" and "sockets"?
 	// FIXME: why the inconsistency between "hosts" and "sockets"?
 	opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "DNS server to use")
 	opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "DNS server to use")
 	opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "DNS search domains to use")
 	opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "DNS search domains to use")
 	opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon")
 	opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon")
-	config.Ulimits = make(map[string]*ulimit.Ulimit)
-	opts.UlimitMapVar(config.Ulimits, []string{"-default-ulimit"}, "Set default ulimits for containers")
 	flag.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", "Default driver for container logs")
 	flag.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", "Default driver for container logs")
-}
+	opts.LogOptsVar(config.LogConfig.Config, []string{"-log-opt"}, "Set log driver options")
+	flag.BoolVar(&config.Bridge.EnableUserlandProxy, []string{"-userland-proxy"}, true, "Use userland proxy for loopback traffic")
 
 
-func getDefaultNetworkMtu() int {
-	if iface, err := networkdriver.GetDefaultRouteIface(); err == nil {
-		return iface.MTU
-	}
-	return defaultNetworkMtu
 }
 }

+ 43 - 0
daemon/config_linux.go

@@ -0,0 +1,43 @@
+package daemon
+
+import (
+	"github.com/docker/docker/opts"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/ulimit"
+)
+
+var (
+	defaultPidFile = "/var/run/docker.pid"
+	defaultGraph   = "/var/lib/docker"
+)
+
+// Config defines the configuration of a docker daemon.
+// These are the configuration settings that you pass
+// to the docker daemon when you launch it with say: `docker -d -e lxc`
+type Config struct {
+	CommonConfig
+
+	// Fields below here are platform specific.
+	EnableSelinuxSupport bool
+	ExecOptions          []string
+	GraphOptions         []string
+	SocketGroup          string
+	Ulimits              map[string]*ulimit.Ulimit
+}
+
+// InstallFlags adds command-line options to the top-level flag parser for
+// the current process.
+// Subsequent calls to `flag.Parse` will populate config with values parsed
+// from the command-line.
+func (config *Config) InstallFlags() {
+	// First handle install flags which are consistent cross-platform
+	config.InstallCommonFlags()
+
+	// Then platform-specific install flags
+	opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options")
+	opts.ListVar(&config.ExecOptions, []string{"-exec-opt"}, "Set exec driver options")
+	flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support")
+	flag.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", "Group for the unix socket")
+	config.Ulimits = make(map[string]*ulimit.Ulimit)
+	opts.UlimitMapVar(config.Ulimits, []string{"-default-ulimit"}, "Set default ulimits for containers")
+}

+ 32 - 0
daemon/config_windows.go

@@ -0,0 +1,32 @@
+package daemon
+
+import (
+	"os"
+)
+
+var (
+	defaultPidFile = os.Getenv("programdata") + string(os.PathSeparator) + "docker.pid"
+	defaultGraph   = os.Getenv("programdata") + string(os.PathSeparator) + "docker"
+)
+
+// Config defines the configuration of a docker daemon.
+// These are the configuration settings that you pass
+// to the docker daemon when you launch it with say: `docker -d -e windows`
+type Config struct {
+	CommonConfig
+
+	// Fields below here are platform specific. (There are none presently
+	// for the Windows daemon.)
+}
+
+// InstallFlags adds command-line options to the top-level flag parser for
+// the current process.
+// Subsequent calls to `flag.Parse` will populate config with values parsed
+// from the command-line.
+func (config *Config) InstallFlags() {
+	// First handle install flags which are consistent cross-platform
+	config.InstallCommonFlags()
+
+	// Then platform-specific install flags. There are none presently on Windows
+
+}

+ 455 - 907
daemon/container.go

@@ -1,51 +1,38 @@
 package daemon
 package daemon
 
 
 import (
 import (
-	"bytes"
 	"encoding/json"
 	"encoding/json"
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
-	"path"
 	"path/filepath"
 	"path/filepath"
 	"strings"
 	"strings"
+	"sync"
 	"syscall"
 	"syscall"
 	"time"
 	"time"
 
 
-	"github.com/docker/libcontainer/configs"
-	"github.com/docker/libcontainer/devices"
 	"github.com/docker/libcontainer/label"
 	"github.com/docker/libcontainer/label"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/logger"
 	"github.com/docker/docker/daemon/logger"
-	"github.com/docker/docker/daemon/logger/journald"
 	"github.com/docker/docker/daemon/logger/jsonfilelog"
 	"github.com/docker/docker/daemon/logger/jsonfilelog"
-	"github.com/docker/docker/daemon/logger/syslog"
 	"github.com/docker/docker/daemon/network"
 	"github.com/docker/docker/daemon/network"
-	"github.com/docker/docker/daemon/networkdriver/bridge"
-	"github.com/docker/docker/engine"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/image"
-	"github.com/docker/docker/links"
 	"github.com/docker/docker/nat"
 	"github.com/docker/docker/nat"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/broadcastwriter"
 	"github.com/docker/docker/pkg/broadcastwriter"
-	"github.com/docker/docker/pkg/directory"
-	"github.com/docker/docker/pkg/etchosts"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/docker/pkg/jsonlog"
+	"github.com/docker/docker/pkg/mount"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/promise"
-	"github.com/docker/docker/pkg/resolvconf"
-	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/symlink"
-	"github.com/docker/docker/pkg/ulimit"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
-	"github.com/docker/docker/utils"
+	"github.com/docker/docker/volume"
 )
 )
 
 
-const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-
 var (
 var (
 	ErrNotATTY               = errors.New("The PTY is not a file")
 	ErrNotATTY               = errors.New("The PTY is not a file")
 	ErrNoTTY                 = errors.New("No PTY found")
 	ErrNoTTY                 = errors.New("No PTY found")
@@ -60,56 +47,43 @@ type StreamConfig struct {
 	stdinPipe io.WriteCloser
 	stdinPipe io.WriteCloser
 }
 }
 
 
-type Container struct {
+// CommonContainer holds the settings for a container which are applicable
+// across all platforms supported by the daemon.
+type CommonContainer struct {
+	StreamConfig
+
 	*State `json:"State"` // Needed for remote api version <= 1.11
 	*State `json:"State"` // Needed for remote api version <= 1.11
 	root   string         // Path to the "home" of the container, including metadata.
 	root   string         // Path to the "home" of the container, including metadata.
 	basefs string         // Path to the graphdriver mountpoint
 	basefs string         // Path to the graphdriver mountpoint
 
 
-	ID string
-
-	Created time.Time
-
-	Path string
-	Args []string
-
-	Config  *runconfig.Config
-	ImageID string `json:"Image"`
-
-	NetworkSettings *network.Settings
-
-	ResolvConfPath string
-	HostnamePath   string
-	HostsPath      string
-	LogPath        string
-	Name           string
-	Driver         string
-	ExecDriver     string
-
-	command *execdriver.Command
-	StreamConfig
-
-	daemon                   *Daemon
+	ID                       string
+	Created                  time.Time
+	Path                     string
+	Args                     []string
+	Config                   *runconfig.Config
+	ImageID                  string `json:"Image"`
+	NetworkSettings          *network.Settings
+	ResolvConfPath           string
+	HostnamePath             string
+	HostsPath                string
+	LogPath                  string
+	Name                     string
+	Driver                   string
+	ExecDriver               string
 	MountLabel, ProcessLabel string
 	MountLabel, ProcessLabel string
-	AppArmorProfile          string
 	RestartCount             int
 	RestartCount             int
 	UpdateDns                bool
 	UpdateDns                bool
+	MountPoints              map[string]*mountPoint
 
 
-	// Maps container paths to volume paths.  The key in this is the path to which
-	// the volume is being mounted inside the container.  Value is the path of the
-	// volume on disk
-	Volumes map[string]string
-	// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
-	// Easier than migrating older container configs :)
-	VolumesRW  map[string]bool
 	hostConfig *runconfig.HostConfig
 	hostConfig *runconfig.HostConfig
+	command    *execdriver.Command
 
 
-	activeLinks  map[string]*links.Link
 	monitor      *containerMonitor
 	monitor      *containerMonitor
 	execCommands *execStore
 	execCommands *execStore
+	daemon       *Daemon
 	// logDriver for closing
 	// logDriver for closing
-	logDriver          logger.Logger
-	logCopier          *logger.Copier
-	AppliedVolumesFrom map[string]struct{}
+	logDriver logger.Logger
+	logCopier *logger.Copier
 }
 }
 
 
 func (container *Container) FromDisk() error {
 func (container *Container) FromDisk() error {
@@ -245,184 +219,6 @@ func (container *Container) GetRootResourcePath(path string) (string, error) {
 	return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root)
 	return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root)
 }
 }
 
 
-func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.Device, err error) {
-	device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions)
-	// if there was no error, return the device
-	if err == nil {
-		device.Path = deviceMapping.PathInContainer
-		return append(devs, device), nil
-	}
-
-	// if the device is not a device node
-	// try to see if it's a directory holding many devices
-	if err == devices.ErrNotADevice {
-
-		// check if it is a directory
-		if src, e := os.Stat(deviceMapping.PathOnHost); e == nil && src.IsDir() {
-
-			// mount the internal devices recursively
-			filepath.Walk(deviceMapping.PathOnHost, func(dpath string, f os.FileInfo, e error) error {
-				childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions)
-				if e != nil {
-					// ignore the device
-					return nil
-				}
-
-				// add the device to userSpecified devices
-				childDevice.Path = strings.Replace(dpath, deviceMapping.PathOnHost, deviceMapping.PathInContainer, 1)
-				devs = append(devs, childDevice)
-
-				return nil
-			})
-		}
-	}
-
-	if len(devs) > 0 {
-		return devs, nil
-	}
-
-	return devs, fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err)
-}
-
-func populateCommand(c *Container, env []string) error {
-	en := &execdriver.Network{
-		Mtu:       c.daemon.config.Mtu,
-		Interface: nil,
-	}
-
-	parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
-	switch parts[0] {
-	case "none":
-	case "host":
-		en.HostNetworking = true
-	case "bridge", "": // empty string to support existing containers
-		if !c.Config.NetworkDisabled {
-			network := c.NetworkSettings
-			en.Interface = &execdriver.NetworkInterface{
-				Gateway:              network.Gateway,
-				Bridge:               network.Bridge,
-				IPAddress:            network.IPAddress,
-				IPPrefixLen:          network.IPPrefixLen,
-				MacAddress:           network.MacAddress,
-				LinkLocalIPv6Address: network.LinkLocalIPv6Address,
-				GlobalIPv6Address:    network.GlobalIPv6Address,
-				GlobalIPv6PrefixLen:  network.GlobalIPv6PrefixLen,
-				IPv6Gateway:          network.IPv6Gateway,
-			}
-		}
-	case "container":
-		nc, err := c.getNetworkedContainer()
-		if err != nil {
-			return err
-		}
-		en.ContainerID = nc.ID
-	default:
-		return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode)
-	}
-
-	ipc := &execdriver.Ipc{}
-
-	if c.hostConfig.IpcMode.IsContainer() {
-		ic, err := c.getIpcContainer()
-		if err != nil {
-			return err
-		}
-		ipc.ContainerID = ic.ID
-	} else {
-		ipc.HostIpc = c.hostConfig.IpcMode.IsHost()
-	}
-
-	pid := &execdriver.Pid{}
-	pid.HostPid = c.hostConfig.PidMode.IsHost()
-
-	// Build lists of devices allowed and created within the container.
-	var userSpecifiedDevices []*configs.Device
-	for _, deviceMapping := range c.hostConfig.Devices {
-		devs, err := getDevicesFromPath(deviceMapping)
-		if err != nil {
-			return err
-		}
-
-		userSpecifiedDevices = append(userSpecifiedDevices, devs...)
-	}
-	allowedDevices := append(configs.DefaultAllowedDevices, userSpecifiedDevices...)
-
-	autoCreatedDevices := append(configs.DefaultAutoCreatedDevices, userSpecifiedDevices...)
-
-	// TODO: this can be removed after lxc-conf is fully deprecated
-	lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig)
-	if err != nil {
-		return err
-	}
-
-	var rlimits []*ulimit.Rlimit
-	ulimits := c.hostConfig.Ulimits
-
-	// Merge ulimits with daemon defaults
-	ulIdx := make(map[string]*ulimit.Ulimit)
-	for _, ul := range ulimits {
-		ulIdx[ul.Name] = ul
-	}
-	for name, ul := range c.daemon.config.Ulimits {
-		if _, exists := ulIdx[name]; !exists {
-			ulimits = append(ulimits, ul)
-		}
-	}
-
-	for _, limit := range ulimits {
-		rl, err := limit.GetRlimit()
-		if err != nil {
-			return err
-		}
-		rlimits = append(rlimits, rl)
-	}
-
-	resources := &execdriver.Resources{
-		Memory:     c.hostConfig.Memory,
-		MemorySwap: c.hostConfig.MemorySwap,
-		CpuShares:  c.hostConfig.CpuShares,
-		CpusetCpus: c.hostConfig.CpusetCpus,
-		CpusetMems: c.hostConfig.CpusetMems,
-		CpuQuota:   c.hostConfig.CpuQuota,
-		Rlimits:    rlimits,
-	}
-
-	processConfig := execdriver.ProcessConfig{
-		Privileged: c.hostConfig.Privileged,
-		Entrypoint: c.Path,
-		Arguments:  c.Args,
-		Tty:        c.Config.Tty,
-		User:       c.Config.User,
-	}
-
-	processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
-	processConfig.Env = env
-
-	c.command = &execdriver.Command{
-		ID:                 c.ID,
-		Rootfs:             c.RootfsPath(),
-		ReadonlyRootfs:     c.hostConfig.ReadonlyRootfs,
-		InitPath:           "/.dockerinit",
-		WorkingDir:         c.Config.WorkingDir,
-		Network:            en,
-		Ipc:                ipc,
-		Pid:                pid,
-		Resources:          resources,
-		AllowedDevices:     allowedDevices,
-		AutoCreatedDevices: autoCreatedDevices,
-		CapAdd:             c.hostConfig.CapAdd,
-		CapDrop:            c.hostConfig.CapDrop,
-		ProcessConfig:      processConfig,
-		ProcessLabel:       c.GetProcessLabel(),
-		MountLabel:         c.GetMountLabel(),
-		LxcConfig:          lxcConfig,
-		AppArmorProfile:    c.AppArmorProfile,
-		CgroupParent:       c.hostConfig.CgroupParent,
-	}
-
-	return nil
-}
-
 func (container *Container) Start() (err error) {
 func (container *Container) Start() (err error) {
 	container.Lock()
 	container.Lock()
 	defer container.Unlock()
 	defer container.Unlock()
@@ -449,22 +245,13 @@ func (container *Container) Start() (err error) {
 		}
 		}
 	}()
 	}()
 
 
-	if err := container.setupContainerDns(); err != nil {
-		return err
-	}
 	if err := container.Mount(); err != nil {
 	if err := container.Mount(); err != nil {
 		return err
 		return err
 	}
 	}
 	if err := container.initializeNetworking(); err != nil {
 	if err := container.initializeNetworking(); err != nil {
 		return err
 		return err
 	}
 	}
-	if err := container.updateParentsHosts(); err != nil {
-		return err
-	}
 	container.verifyDaemonSettings()
 	container.verifyDaemonSettings()
-	if err := container.prepareVolumes(); err != nil {
-		return err
-	}
 	linkedEnv, err := container.setupLinkedContainers()
 	linkedEnv, err := container.setupLinkedContainers()
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -476,10 +263,13 @@ func (container *Container) Start() (err error) {
 	if err := populateCommand(container, env); err != nil {
 	if err := populateCommand(container, env); err != nil {
 		return err
 		return err
 	}
 	}
-	if err := container.setupMounts(); err != nil {
+
+	mounts, err := container.setupMounts()
+	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
+	container.command.Mounts = mounts
 	return container.waitForStart()
 	return container.waitForStart()
 }
 }
 
 
@@ -538,181 +328,16 @@ func (streamConfig *StreamConfig) StderrLogPipe() io.ReadCloser {
 	return ioutils.NewBufReader(reader)
 	return ioutils.NewBufReader(reader)
 }
 }
 
 
-func (container *Container) buildHostnameFile() error {
-	hostnamePath, err := container.GetRootResourcePath("hostname")
-	if err != nil {
-		return err
-	}
-	container.HostnamePath = hostnamePath
-
-	if container.Config.Domainname != "" {
-		return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644)
-	}
-	return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
-}
-
-func (container *Container) buildHostsFiles(IP string) error {
-
-	hostsPath, err := container.GetRootResourcePath("hosts")
-	if err != nil {
-		return err
-	}
-	container.HostsPath = hostsPath
-
-	var extraContent []etchosts.Record
-
-	children, err := container.daemon.Children(container.Name)
-	if err != nil {
-		return err
-	}
-
-	for linkAlias, child := range children {
-		_, alias := path.Split(linkAlias)
-		// allow access to the linked container via the alias, real name, and container hostname
-		aliasList := alias + " " + child.Config.Hostname
-		// only add the name if alias isn't equal to the name
-		if alias != child.Name[1:] {
-			aliasList = aliasList + " " + child.Name[1:]
-		}
-		extraContent = append(extraContent, etchosts.Record{Hosts: aliasList, IP: child.NetworkSettings.IPAddress})
-	}
-
-	for _, extraHost := range container.hostConfig.ExtraHosts {
-		// allow IPv6 addresses in extra hosts; only split on first ":"
-		parts := strings.SplitN(extraHost, ":", 2)
-		extraContent = append(extraContent, etchosts.Record{Hosts: parts[0], IP: parts[1]})
-	}
-
-	return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname, extraContent)
-}
-
-func (container *Container) buildHostnameAndHostsFiles(IP string) error {
-	if err := container.buildHostnameFile(); err != nil {
-		return err
-	}
-
-	return container.buildHostsFiles(IP)
-}
-
-func (container *Container) AllocateNetwork() error {
-	mode := container.hostConfig.NetworkMode
-	if container.Config.NetworkDisabled || !mode.IsPrivate() {
-		return nil
-	}
-
-	var (
-		err error
-		eng = container.daemon.eng
-	)
-
-	networkSettings, err := bridge.Allocate(container.ID, container.Config.MacAddress, "", "")
-	if err != nil {
-		return err
-	}
-
-	// Error handling: At this point, the interface is allocated so we have to
-	// make sure that it is always released in case of error, otherwise we
-	// might leak resources.
-
-	if container.Config.PortSpecs != nil {
-		if err = migratePortMappings(container.Config, container.hostConfig); err != nil {
-			bridge.Release(container.ID)
-			return err
-		}
-		container.Config.PortSpecs = nil
-		if err = container.WriteHostConfig(); err != nil {
-			bridge.Release(container.ID)
-			return err
-		}
-	}
-
-	var (
-		portSpecs = make(nat.PortSet)
-		bindings  = make(nat.PortMap)
-	)
-
-	if container.Config.ExposedPorts != nil {
-		portSpecs = container.Config.ExposedPorts
-	}
-
-	if container.hostConfig.PortBindings != nil {
-		for p, b := range container.hostConfig.PortBindings {
-			bindings[p] = []nat.PortBinding{}
-			for _, bb := range b {
-				bindings[p] = append(bindings[p], nat.PortBinding{
-					HostIp:   bb.HostIp,
-					HostPort: bb.HostPort,
-				})
-			}
-		}
-	}
-
-	container.NetworkSettings.PortMapping = nil
-
-	for port := range portSpecs {
-		if err = container.allocatePort(eng, port, bindings); err != nil {
-			bridge.Release(container.ID)
-			return err
-		}
-	}
-	container.WriteHostConfig()
-
-	networkSettings.Ports = bindings
-	container.NetworkSettings = networkSettings
-
-	return nil
-}
-
-func (container *Container) ReleaseNetwork() {
-	if container.Config.NetworkDisabled || !container.hostConfig.NetworkMode.IsPrivate() {
-		return
-	}
-
-	bridge.Release(container.ID)
-
-	container.NetworkSettings = &network.Settings{}
-}
-
 func (container *Container) isNetworkAllocated() bool {
 func (container *Container) isNetworkAllocated() bool {
 	return container.NetworkSettings.IPAddress != ""
 	return container.NetworkSettings.IPAddress != ""
 }
 }
 
 
-func (container *Container) RestoreNetwork() error {
-	mode := container.hostConfig.NetworkMode
-	// Don't attempt a restore if we previously didn't allocate networking.
-	// This might be a legacy container with no network allocated, in which case the
-	// allocation will happen once and for all at start.
-	if !container.isNetworkAllocated() || container.Config.NetworkDisabled || !mode.IsPrivate() {
-		return nil
-	}
-
-	eng := container.daemon.eng
-
-	// Re-allocate the interface with the same IP and MAC address.
-	if _, err := bridge.Allocate(container.ID, container.NetworkSettings.MacAddress, container.NetworkSettings.IPAddress, ""); err != nil {
-		return err
-	}
-
-	// Re-allocate any previously allocated ports.
-	for port := range container.NetworkSettings.Ports {
-		if err := container.allocatePort(eng, port, container.NetworkSettings.Ports); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
 // cleanup releases any network resources allocated to the container along with any rules
 // cleanup releases any network resources allocated to the container along with any rules
 // around how containers are linked together.  It also unmounts the container's root filesystem.
 // around how containers are linked together.  It also unmounts the container's root filesystem.
 func (container *Container) cleanup() {
 func (container *Container) cleanup() {
 	container.ReleaseNetwork()
 	container.ReleaseNetwork()
 
 
-	// Disable all active links
-	if container.activeLinks != nil {
-		for _, link := range container.activeLinks {
-			link.Disable()
-		}
-	}
+	disableAllActiveLinks(container)
 
 
 	if err := container.Unmount(); err != nil {
 	if err := container.Unmount(); err != nil {
 		logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
 		logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
@@ -721,6 +346,8 @@ func (container *Container) cleanup() {
 	for _, eConfig := range container.execCommands.s {
 	for _, eConfig := range container.execCommands.s {
 		container.daemon.unregisterExecCommand(eConfig)
 		container.daemon.unregisterExecCommand(eConfig)
 	}
 	}
+
+	container.UnmountVolumes(false)
 }
 }
 
 
 func (container *Container) KillSig(sig int) error {
 func (container *Container) KillSig(sig int) error {
@@ -762,23 +389,45 @@ func (container *Container) killPossiblyDeadProcess(sig int) error {
 }
 }
 
 
 func (container *Container) Pause() error {
 func (container *Container) Pause() error {
-	if container.IsPaused() {
+	container.Lock()
+	defer container.Unlock()
+
+	// We cannot Pause the container which is already paused
+	if container.Paused {
 		return fmt.Errorf("Container %s is already paused", container.ID)
 		return fmt.Errorf("Container %s is already paused", container.ID)
 	}
 	}
-	if !container.IsRunning() {
+
+	// We cannot Pause the container which is not running
+	if !container.Running {
 		return fmt.Errorf("Container %s is not running", container.ID)
 		return fmt.Errorf("Container %s is not running", container.ID)
 	}
 	}
-	return container.daemon.Pause(container)
+
+	if err := container.daemon.execDriver.Pause(container.command); err != nil {
+		return err
+	}
+	container.Paused = true
+	return nil
 }
 }
 
 
 func (container *Container) Unpause() error {
 func (container *Container) Unpause() error {
-	if !container.IsPaused() {
-		return fmt.Errorf("Container %s is not paused", container.ID)
+	container.Lock()
+	defer container.Unlock()
+
+	// We cannot unpause the container which is not paused
+	if !container.Paused {
+		return fmt.Errorf("Container %s is not paused, so what", container.ID)
 	}
 	}
-	if !container.IsRunning() {
+
+	// We cannot unpause the container which is not running
+	if !container.Running {
 		return fmt.Errorf("Container %s is not running", container.ID)
 		return fmt.Errorf("Container %s is not running", container.ID)
 	}
 	}
-	return container.daemon.Unpause(container)
+
+	if err := container.daemon.execDriver.Unpause(container.command); err != nil {
+		return err
+	}
+	container.Paused = false
+	return nil
 }
 }
 
 
 func (container *Container) Kill() error {
 func (container *Container) Kill() error {
@@ -792,17 +441,8 @@ func (container *Container) Kill() error {
 	}
 	}
 
 
 	// 2. Wait for the process to die, in last resort, try to kill the process directly
 	// 2. Wait for the process to die, in last resort, try to kill the process directly
-	if _, err := container.WaitStop(10 * time.Second); err != nil {
-		// Ensure that we don't kill ourselves
-		if pid := container.GetPid(); pid != 0 {
-			logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID))
-			if err := syscall.Kill(pid, 9); err != nil {
-				if err != syscall.ESRCH {
-					return err
-				}
-				logrus.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid)
-			}
-		}
+	if err := killProcessDirectly(container); err != nil {
+		return err
 	}
 	}
 
 
 	container.WaitStop(-1 * time.Second)
 	container.WaitStop(-1 * time.Second)
@@ -831,6 +471,7 @@ func (container *Container) Stop(seconds int) error {
 			return err
 			return err
 		}
 		}
 	}
 	}
+
 	return nil
 	return nil
 }
 }
 
 
@@ -855,26 +496,6 @@ func (container *Container) Resize(h, w int) error {
 	return container.command.ProcessConfig.Terminal.Resize(h, w)
 	return container.command.ProcessConfig.Terminal.Resize(h, w)
 }
 }
 
 
-func (container *Container) ExportRw() (archive.Archive, error) {
-	if err := container.Mount(); err != nil {
-		return nil, err
-	}
-	if container.daemon == nil {
-		return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
-	}
-	archive, err := container.daemon.Diff(container)
-	if err != nil {
-		container.Unmount()
-		return nil, err
-	}
-	return ioutils.NewReadCloserWrapper(archive, func() error {
-			err := archive.Close()
-			container.Unmount()
-			return err
-		}),
-		nil
-}
-
 func (container *Container) Export() (archive.Archive, error) {
 func (container *Container) Export() (archive.Archive, error) {
 	if err := container.Mount(); err != nil {
 	if err := container.Mount(); err != nil {
 		return nil, err
 		return nil, err
@@ -918,18 +539,6 @@ func (container *Container) Unmount() error {
 	return container.daemon.Unmount(container)
 	return container.daemon.Unmount(container)
 }
 }
 
 
-func (container *Container) logPath(name string) (string, error) {
-	return container.GetRootResourcePath(fmt.Sprintf("%s-%s.log", container.ID, name))
-}
-
-func (container *Container) ReadLog(name string) (io.Reader, error) {
-	pth, err := container.logPath(name)
-	if err != nil {
-		return nil, err
-	}
-	return os.Open(pth)
-}
-
 func (container *Container) hostConfigPath() (string, error) {
 func (container *Container) hostConfigPath() (string, error) {
 	return container.GetRootResourcePath("hostconfig.json")
 	return container.GetRootResourcePath("hostconfig.json")
 }
 }
@@ -951,37 +560,6 @@ func validateID(id string) error {
 	return nil
 	return nil
 }
 }
 
 
-// GetSize, return real size, virtual size
-func (container *Container) GetSize() (int64, int64) {
-	var (
-		sizeRw, sizeRootfs int64
-		err                error
-		driver             = container.daemon.driver
-	)
-
-	if err := container.Mount(); err != nil {
-		logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
-		return sizeRw, sizeRootfs
-	}
-	defer container.Unmount()
-
-	initID := fmt.Sprintf("%s-init", container.ID)
-	sizeRw, err = driver.DiffSize(container.ID, initID)
-	if err != nil {
-		logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
-		// FIXME: GetSize should return an error. Not changing it now in case
-		// there is a side-effect.
-		sizeRw = -1
-	}
-
-	if _, err = os.Stat(container.basefs); err != nil {
-		if sizeRootfs, err = directory.Size(container.basefs); err != nil {
-			sizeRootfs = -1
-		}
-	}
-	return sizeRw, sizeRootfs
-}
-
 func (container *Container) Copy(resource string) (io.ReadCloser, error) {
 func (container *Container) Copy(resource string) (io.ReadCloser, error) {
 	container.Lock()
 	container.Lock()
 	defer container.Unlock()
 	defer container.Unlock()
@@ -991,39 +569,42 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
 	}
 	}
 	defer func() {
 	defer func() {
 		if err != nil {
 		if err != nil {
+			// unmount any volumes
+			container.UnmountVolumes(true)
+			// unmount the container's rootfs
 			container.Unmount()
 			container.Unmount()
 		}
 		}
 	}()
 	}()
-
-	if err = container.mountVolumes(); err != nil {
-		container.unmountVolumes()
+	mounts, err := container.setupMounts()
+	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	defer func() {
+	for _, m := range mounts {
+		dest, err := container.GetResourcePath(m.Destination)
 		if err != nil {
 		if err != nil {
-			container.unmountVolumes()
+			return nil, err
 		}
 		}
-	}()
-
+		if err := mount.Mount(m.Source, dest, "bind", "rbind,ro"); err != nil {
+			return nil, err
+		}
+	}
 	basePath, err := container.GetResourcePath(resource)
 	basePath, err := container.GetResourcePath(resource)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-
 	stat, err := os.Stat(basePath)
 	stat, err := os.Stat(basePath)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	var filter []string
 	var filter []string
 	if !stat.IsDir() {
 	if !stat.IsDir() {
-		d, f := path.Split(basePath)
+		d, f := filepath.Split(basePath)
 		basePath = d
 		basePath = d
 		filter = []string{f}
 		filter = []string{f}
 	} else {
 	} else {
-		filter = []string{path.Base(basePath)}
-		basePath = path.Dir(basePath)
+		filter = []string{filepath.Base(basePath)}
+		basePath = filepath.Dir(basePath)
 	}
 	}
-
 	archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
 	archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
 		Compression:  archive.Uncompressed,
 		Compression:  archive.Uncompressed,
 		IncludeFiles: filter,
 		IncludeFiles: filter,
@@ -1031,10 +612,9 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-
 	return ioutils.NewReadCloserWrapper(archive, func() error {
 	return ioutils.NewReadCloserWrapper(archive, func() error {
 			err := archive.Close()
 			err := archive.Close()
-			container.unmountVolumes()
+			container.UnmountVolumes(true)
 			container.Unmount()
 			container.Unmount()
 			return err
 			return err
 		}),
 		}),
@@ -1048,518 +628,486 @@ func (container *Container) Exposes(p nat.Port) bool {
 }
 }
 
 
 func (container *Container) HostConfig() *runconfig.HostConfig {
 func (container *Container) HostConfig() *runconfig.HostConfig {
-	container.Lock()
-	res := container.hostConfig
-	container.Unlock()
-	return res
+	return container.hostConfig
 }
 }
 
 
 func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) {
 func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) {
-	container.Lock()
 	container.hostConfig = hostConfig
 	container.hostConfig = hostConfig
-	container.Unlock()
 }
 }
 
 
-func (container *Container) DisableLink(name string) {
-	if container.activeLinks != nil {
-		if link, exists := container.activeLinks[name]; exists {
-			link.Disable()
-		} else {
-			logrus.Debugf("Could not find active link for %s", name)
-		}
+func (container *Container) getLogConfig() runconfig.LogConfig {
+	cfg := container.hostConfig.LogConfig
+	if cfg.Type != "" { // container has log driver configured
+		return cfg
 	}
 	}
+	// Use daemon's default log config for containers
+	return container.daemon.defaultLogConfig
 }
 }
 
 
-func (container *Container) setupContainerDns() error {
-	if container.ResolvConfPath != "" {
-		// check if this is an existing container that needs DNS update:
-		if container.UpdateDns {
-			// read the host's resolv.conf, get the hash and call updateResolvConf
-			logrus.Debugf("Check container (%s) for update to resolv.conf - UpdateDns flag was set", container.ID)
-			latestResolvConf, latestHash := resolvconf.GetLastModified()
-
-			// clean container resolv.conf re: localhost nameservers and IPv6 NS (if IPv6 disabled)
-			updatedResolvConf, modified := resolvconf.FilterResolvDns(latestResolvConf, container.daemon.config.Bridge.EnableIPv6)
-			if modified {
-				// changes have occurred during resolv.conf localhost cleanup: generate an updated hash
-				newHash, err := ioutils.HashData(bytes.NewReader(updatedResolvConf))
-				if err != nil {
-					return err
-				}
-				latestHash = newHash
-			}
+func (container *Container) getLogger() (logger.Logger, error) {
+	cfg := container.getLogConfig()
+	c, err := logger.GetLogDriver(cfg.Type)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to get logging factory: %v", err)
+	}
+	ctx := logger.Context{
+		Config:        cfg.Config,
+		ContainerID:   container.ID,
+		ContainerName: container.Name,
+	}
 
 
-			if err := container.updateResolvConf(updatedResolvConf, latestHash); err != nil {
-				return err
-			}
-			// successful update of the restarting container; set the flag off
-			container.UpdateDns = false
+	// Set logging file for "json-logger"
+	if cfg.Type == jsonfilelog.Name {
+		ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID))
+		if err != nil {
+			return nil, err
 		}
 		}
-		return nil
 	}
 	}
+	return c(ctx)
+}
 
 
-	var (
-		config = container.hostConfig
-		daemon = container.daemon
-	)
-
-	resolvConf, err := resolvconf.Get()
-	if err != nil {
-		return err
+func (container *Container) startLogging() error {
+	cfg := container.getLogConfig()
+	if cfg.Type == "none" {
+		return nil // do not start logging routines
 	}
 	}
-	container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf")
+
+	l, err := container.getLogger()
 	if err != nil {
 	if err != nil {
-		return err
+		return fmt.Errorf("Failed to initialize logging driver: %v", err)
 	}
 	}
 
 
-	if config.NetworkMode != "host" {
-		// check configurations for any container/daemon dns settings
-		if len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0 {
-			var (
-				dns       = resolvconf.GetNameservers(resolvConf)
-				dnsSearch = resolvconf.GetSearchDomains(resolvConf)
-			)
-			if len(config.Dns) > 0 {
-				dns = config.Dns
-			} else if len(daemon.config.Dns) > 0 {
-				dns = daemon.config.Dns
-			}
-			if len(config.DnsSearch) > 0 {
-				dnsSearch = config.DnsSearch
-			} else if len(daemon.config.DnsSearch) > 0 {
-				dnsSearch = daemon.config.DnsSearch
-			}
-			return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch)
-		}
-
-		// replace any localhost/127.*, and remove IPv6 nameservers if IPv6 disabled in daemon
-		resolvConf, _ = resolvconf.FilterResolvDns(resolvConf, daemon.config.Bridge.EnableIPv6)
-	}
-	//get a sha256 hash of the resolv conf at this point so we can check
-	//for changes when the host resolv.conf changes (e.g. network update)
-	resolvHash, err := ioutils.HashData(bytes.NewReader(resolvConf))
+	copier, err := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	resolvHashFile := container.ResolvConfPath + ".hash"
-	if err = ioutil.WriteFile(resolvHashFile, []byte(resolvHash), 0644); err != nil {
-		return err
-	}
-	return ioutil.WriteFile(container.ResolvConfPath, resolvConf, 0644)
-}
-
-// called when the host's resolv.conf changes to check whether container's resolv.conf
-// is unchanged by the container "user" since container start: if unchanged, the
-// container's resolv.conf will be updated to match the host's new resolv.conf
-func (container *Container) updateResolvConf(updatedResolvConf []byte, newResolvHash string) error {
+	container.logCopier = copier
+	copier.Run()
+	container.logDriver = l
 
 
-	if container.ResolvConfPath == "" {
-		return nil
-	}
-	if container.Running {
-		//set a marker in the hostConfig to update on next start/restart
-		container.UpdateDns = true
-		return nil
+	// set LogPath field only for json-file logdriver
+	if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok {
+		container.LogPath = jl.LogPath()
 	}
 	}
 
 
-	resolvHashFile := container.ResolvConfPath + ".hash"
+	return nil
+}
 
 
-	//read the container's current resolv.conf and compute the hash
-	resolvBytes, err := ioutil.ReadFile(container.ResolvConfPath)
-	if err != nil {
-		return err
-	}
-	curHash, err := ioutils.HashData(bytes.NewReader(resolvBytes))
-	if err != nil {
+func (container *Container) waitForStart() error {
+	container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy)
+
+	// block until we either receive an error from the initial start of the container's
+	// process or until the process is running in the container
+	select {
+	case <-container.monitor.startSignal:
+	case err := <-promise.Go(container.monitor.Start):
 		return err
 		return err
 	}
 	}
 
 
-	//read the hash from the last time we wrote resolv.conf in the container
-	hashBytes, err := ioutil.ReadFile(resolvHashFile)
-	if err != nil {
-		if !os.IsNotExist(err) {
-			return err
-		}
-		// backwards compat: if no hash file exists, this container pre-existed from
-		// a Docker daemon that didn't contain this update feature. Given we can't know
-		// if the user has modified the resolv.conf since container start time, safer
-		// to just never update the container's resolv.conf during it's lifetime which
-		// we can control by setting hashBytes to an empty string
-		hashBytes = []byte("")
-	}
-
-	//if the user has not modified the resolv.conf of the container since we wrote it last
-	//we will replace it with the updated resolv.conf from the host
-	if string(hashBytes) == curHash {
-		logrus.Debugf("replacing %q with updated host resolv.conf", container.ResolvConfPath)
-
-		// for atomic updates to these files, use temporary files with os.Rename:
-		dir := path.Dir(container.ResolvConfPath)
-		tmpHashFile, err := ioutil.TempFile(dir, "hash")
-		if err != nil {
-			return err
-		}
-		tmpResolvFile, err := ioutil.TempFile(dir, "resolv")
-		if err != nil {
-			return err
-		}
-
-		// write the updates to the temp files
-		if err = ioutil.WriteFile(tmpHashFile.Name(), []byte(newResolvHash), 0644); err != nil {
-			return err
-		}
-		if err = ioutil.WriteFile(tmpResolvFile.Name(), updatedResolvConf, 0644); err != nil {
-			return err
-		}
+	return nil
+}
 
 
-		// rename the temp files for atomic replace
-		if err = os.Rename(tmpHashFile.Name(), resolvHashFile); err != nil {
-			return err
-		}
-		return os.Rename(tmpResolvFile.Name(), container.ResolvConfPath)
+func (container *Container) GetProcessLabel() string {
+	// even if we have a process label return "" if we are running
+	// in privileged mode
+	if container.hostConfig.Privileged {
+		return ""
 	}
 	}
-	return nil
+	return container.ProcessLabel
 }
 }
 
 
-func (container *Container) updateParentsHosts() error {
-	refs := container.daemon.ContainerGraph().RefPaths(container.ID)
-	for _, ref := range refs {
-		if ref.ParentID == "0" {
-			continue
-		}
+func (container *Container) GetMountLabel() string {
+	if container.hostConfig.Privileged {
+		return ""
+	}
+	return container.MountLabel
+}
 
 
-		c, err := container.daemon.Get(ref.ParentID)
-		if err != nil {
-			logrus.Error(err)
-		}
+func (container *Container) Stats() (*execdriver.ResourceStats, error) {
+	return container.daemon.Stats(container)
+}
 
 
-		if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() {
-			logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress)
-			if err := etchosts.Update(c.HostsPath, container.NetworkSettings.IPAddress, ref.Name); err != nil {
-				logrus.Errorf("Failed to update /etc/hosts in parent container %s for alias %s: %v", c.ID, ref.Name, err)
-			}
-		}
+func (c *Container) LogDriverType() string {
+	c.Lock()
+	defer c.Unlock()
+	if c.hostConfig.LogConfig.Type == "" {
+		return c.daemon.defaultLogConfig.Type
 	}
 	}
-	return nil
+	return c.hostConfig.LogConfig.Type
 }
 }
 
 
-func (container *Container) initializeNetworking() error {
-	var err error
-	if container.hostConfig.NetworkMode.IsHost() {
-		container.Config.Hostname, err = os.Hostname()
-		if err != nil {
-			return err
-		}
+func (container *Container) GetExecIDs() []string {
+	return container.execCommands.List()
+}
 
 
-		parts := strings.SplitN(container.Config.Hostname, ".", 2)
-		if len(parts) > 1 {
-			container.Config.Hostname = parts[0]
-			container.Config.Domainname = parts[1]
-		}
+func (container *Container) Exec(execConfig *execConfig) error {
+	container.Lock()
+	defer container.Unlock()
 
 
-		content, err := ioutil.ReadFile("/etc/hosts")
-		if os.IsNotExist(err) {
-			return container.buildHostnameAndHostsFiles("")
-		} else if err != nil {
-			return err
-		}
+	waitStart := make(chan struct{})
 
 
-		if err := container.buildHostnameFile(); err != nil {
-			return err
+	callback := func(processConfig *execdriver.ProcessConfig, pid int) {
+		if processConfig.Tty {
+			// The callback is called after the process Start()
+			// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave
+			// which we close here.
+			if c, ok := processConfig.Stdout.(io.Closer); ok {
+				c.Close()
+			}
 		}
 		}
+		close(waitStart)
+	}
 
 
-		hostsPath, err := container.GetRootResourcePath("hosts")
-		if err != nil {
-			return err
-		}
-		container.HostsPath = hostsPath
+	// We use a callback here instead of a goroutine and an chan for
+	// syncronization purposes
+	cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) })
 
 
-		return ioutil.WriteFile(container.HostsPath, content, 0644)
-	}
-	if container.hostConfig.NetworkMode.IsContainer() {
-		// we need to get the hosts files from the container to join
-		nc, err := container.getNetworkedContainer()
-		if err != nil {
-			return err
-		}
-		container.HostnamePath = nc.HostnamePath
-		container.HostsPath = nc.HostsPath
-		container.ResolvConfPath = nc.ResolvConfPath
-		container.Config.Hostname = nc.Config.Hostname
-		container.Config.Domainname = nc.Config.Domainname
-		return nil
-	}
-	if container.daemon.config.DisableNetwork {
-		container.Config.NetworkDisabled = true
-		return container.buildHostnameAndHostsFiles("127.0.1.1")
-	}
-	if err := container.AllocateNetwork(); err != nil {
+	// Exec should not return until the process is actually running
+	select {
+	case <-waitStart:
+	case err := <-cErr:
 		return err
 		return err
 	}
 	}
-	return container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress)
-}
 
 
-// Make sure the config is compatible with the current kernel
-func (container *Container) verifyDaemonSettings() {
-	if container.hostConfig.Memory > 0 && !container.daemon.sysInfo.MemoryLimit {
-		logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.")
-		container.hostConfig.Memory = 0
-	}
-	if container.hostConfig.Memory > 0 && container.hostConfig.MemorySwap != -1 && !container.daemon.sysInfo.SwapLimit {
-		logrus.Warnf("Your kernel does not support swap limit capabilities. Limitation discarded.")
-		container.hostConfig.MemorySwap = -1
-	}
-	if container.daemon.sysInfo.IPv4ForwardingDisabled {
-		logrus.Warnf("IPv4 forwarding is disabled. Networking will not work")
-	}
+	return nil
 }
 }
 
 
-func (container *Container) setupLinkedContainers() ([]string, error) {
+func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error {
 	var (
 	var (
-		env    []string
-		daemon = container.daemon
+		err      error
+		exitCode int
 	)
 	)
-	children, err := daemon.Children(container.Name)
+
+	pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin)
+	exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback)
 	if err != nil {
 	if err != nil {
-		return nil, err
+		logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
 	}
 	}
 
 
-	if len(children) > 0 {
-		container.activeLinks = make(map[string]*links.Link, len(children))
-
-		// If we encounter an error make sure that we rollback any network
-		// config and iptables changes
-		rollback := func() {
-			for _, link := range container.activeLinks {
-				link.Disable()
-			}
-			container.activeLinks = nil
+	logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)
+	if execConfig.OpenStdin {
+		if err := execConfig.StreamConfig.stdin.Close(); err != nil {
+			logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err)
+		}
+	}
+	if err := execConfig.StreamConfig.stdout.Clean(); err != nil {
+		logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err)
+	}
+	if err := execConfig.StreamConfig.stderr.Clean(); err != nil {
+		logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err)
+	}
+	if execConfig.ProcessConfig.Terminal != nil {
+		if err := execConfig.ProcessConfig.Terminal.Close(); err != nil {
+			logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
 		}
 		}
+	}
 
 
-		for linkAlias, child := range children {
-			if !child.IsRunning() {
-				return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias)
-			}
+	return err
+}
 
 
-			link, err := links.NewLink(
-				container.NetworkSettings.IPAddress,
-				child.NetworkSettings.IPAddress,
-				linkAlias,
-				child.Config.Env,
-				child.Config.ExposedPorts,
-			)
+func (c *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
+	return attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, stdin, stdout, stderr)
+}
 
 
-			if err != nil {
-				rollback()
-				return nil, err
-			}
+func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
+	if logs {
+		logDriver, err := c.getLogger()
+		cLog, err := logDriver.GetReader()
 
 
-			container.activeLinks[link.Alias()] = link
-			if err := link.Enable(); err != nil {
-				rollback()
-				return nil, err
+		if err != nil {
+			logrus.Errorf("Error reading logs: %s", err)
+		} else if c.LogDriverType() != jsonfilelog.Name {
+			logrus.Errorf("Reading logs not implemented for driver %s", c.LogDriverType())
+		} else {
+			dec := json.NewDecoder(cLog)
+			for {
+				l := &jsonlog.JSONLog{}
+
+				if err := dec.Decode(l); err == io.EOF {
+					break
+				} else if err != nil {
+					logrus.Errorf("Error streaming logs: %s", err)
+					break
+				}
+				if l.Stream == "stdout" && stdout != nil {
+					io.WriteString(stdout, l.Log)
+				}
+				if l.Stream == "stderr" && stderr != nil {
+					io.WriteString(stderr, l.Log)
+				}
 			}
 			}
+		}
+	}
 
 
-			for _, envVar := range link.ToEnv() {
-				env = append(env, envVar)
-			}
+	//stream
+	if stream {
+		var stdinPipe io.ReadCloser
+		if stdin != nil {
+			r, w := io.Pipe()
+			go func() {
+				defer w.Close()
+				defer logrus.Debugf("Closing buffered stdin pipe")
+				io.Copy(w, stdin)
+			}()
+			stdinPipe = r
+		}
+		<-c.Attach(stdinPipe, stdout, stderr)
+		// If we are in stdinonce mode, wait for the process to end
+		// otherwise, simply return
+		if c.Config.StdinOnce && !c.Config.Tty {
+			c.WaitStop(-1 * time.Second)
 		}
 		}
 	}
 	}
-	return env, nil
+	return nil
 }
 }
 
 
-func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
-	// if a domain name was specified, append it to the hostname (see #7851)
-	fullHostname := container.Config.Hostname
-	if container.Config.Domainname != "" {
-		fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname)
+func attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
+	var (
+		cStdout, cStderr io.ReadCloser
+		cStdin           io.WriteCloser
+		wg               sync.WaitGroup
+		errors           = make(chan error, 3)
+	)
+
+	if stdin != nil && openStdin {
+		cStdin = streamConfig.StdinPipe()
+		wg.Add(1)
 	}
 	}
-	// Setup environment
-	env := []string{
-		"PATH=" + DefaultPathEnv,
-		"HOSTNAME=" + fullHostname,
-		// Note: we don't set HOME here because it'll get autoset intelligently
-		// based on the value of USER inside dockerinit, but only if it isn't
-		// set already (ie, that can be overridden by setting HOME via -e or ENV
-		// in a Dockerfile).
+
+	if stdout != nil {
+		cStdout = streamConfig.StdoutPipe()
+		wg.Add(1)
 	}
 	}
-	if container.Config.Tty {
-		env = append(env, "TERM=xterm")
+
+	if stderr != nil {
+		cStderr = streamConfig.StderrPipe()
+		wg.Add(1)
 	}
 	}
-	env = append(env, linkedEnv...)
-	// because the env on the container can override certain default values
-	// we need to replace the 'env' keys where they match and append anything
-	// else.
-	env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env)
 
 
-	return env
-}
+	// Connect stdin of container to the http conn.
+	go func() {
+		if stdin == nil || !openStdin {
+			return
+		}
+		logrus.Debugf("attach: stdin: begin")
+		defer func() {
+			if stdinOnce && !tty {
+				cStdin.Close()
+			} else {
+				// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
+				if cStdout != nil {
+					cStdout.Close()
+				}
+				if cStderr != nil {
+					cStderr.Close()
+				}
+			}
+			wg.Done()
+			logrus.Debugf("attach: stdin: end")
+		}()
 
 
-func (container *Container) setupWorkingDirectory() error {
-	if container.Config.WorkingDir != "" {
-		container.Config.WorkingDir = path.Clean(container.Config.WorkingDir)
+		var err error
+		if tty {
+			_, err = copyEscapable(cStdin, stdin)
+		} else {
+			_, err = io.Copy(cStdin, stdin)
 
 
-		pth, err := container.GetResourcePath(container.Config.WorkingDir)
-		if err != nil {
-			return err
 		}
 		}
-
-		pthInfo, err := os.Stat(pth)
+		if err == io.ErrClosedPipe {
+			err = nil
+		}
 		if err != nil {
 		if err != nil {
-			if !os.IsNotExist(err) {
-				return err
-			}
+			logrus.Errorf("attach: stdin: %s", err)
+			errors <- err
+			return
+		}
+	}()
 
 
-			if err := os.MkdirAll(pth, 0755); err != nil {
-				return err
+	attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) {
+		if stream == nil {
+			return
+		}
+		defer func() {
+			// Make sure stdin gets closed
+			if stdin != nil {
+				stdin.Close()
 			}
 			}
+			streamPipe.Close()
+			wg.Done()
+			logrus.Debugf("attach: %s: end", name)
+		}()
+
+		logrus.Debugf("attach: %s: begin", name)
+		_, err := io.Copy(stream, streamPipe)
+		if err == io.ErrClosedPipe {
+			err = nil
 		}
 		}
-		if pthInfo != nil && !pthInfo.IsDir() {
-			return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir)
+		if err != nil {
+			logrus.Errorf("attach: %s: %v", name, err)
+			errors <- err
 		}
 		}
 	}
 	}
-	return nil
-}
 
 
-func (container *Container) startLogging() error {
-	cfg := container.hostConfig.LogConfig
-	if cfg.Type == "" {
-		cfg = container.daemon.defaultLogConfig
-	}
-	var l logger.Logger
-	switch cfg.Type {
-	case "json-file":
-		pth, err := container.logPath("json")
-		if err != nil {
-			return err
+	go attachStream("stdout", stdout, cStdout)
+	go attachStream("stderr", stderr, cStderr)
+
+	return promise.Go(func() error {
+		wg.Wait()
+		close(errors)
+		for err := range errors {
+			if err != nil {
+				return err
+			}
 		}
 		}
-		container.LogPath = pth
+		return nil
+	})
+}
 
 
-		dl, err := jsonfilelog.New(pth)
-		if err != nil {
-			return err
+// Code c/c from io.Copy() modified to handle escape sequence
+func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
+	buf := make([]byte, 32*1024)
+	for {
+		nr, er := src.Read(buf)
+		if nr > 0 {
+			// ---- Docker addition
+			// char 16 is C-p
+			if nr == 1 && buf[0] == 16 {
+				nr, er = src.Read(buf)
+				// char 17 is C-q
+				if nr == 1 && buf[0] == 17 {
+					if err := src.Close(); err != nil {
+						return 0, err
+					}
+					return 0, nil
+				}
+			}
+			// ---- End of docker
+			nw, ew := dst.Write(buf[0:nr])
+			if nw > 0 {
+				written += int64(nw)
+			}
+			if ew != nil {
+				err = ew
+				break
+			}
+			if nr != nw {
+				err = io.ErrShortWrite
+				break
+			}
 		}
 		}
-		l = dl
-	case "syslog":
-		dl, err := syslog.New(container.ID[:12])
-		if err != nil {
-			return err
+		if er == io.EOF {
+			break
 		}
 		}
-		l = dl
-	case "journald":
-		dl, err := journald.New(container.ID[:12])
-		if err != nil {
-			return err
+		if er != nil {
+			err = er
+			break
 		}
 		}
-		l = dl
-	case "none":
-		return nil
-	default:
-		return fmt.Errorf("Unknown logging driver: %s", cfg.Type)
 	}
 	}
+	return written, err
+}
 
 
-	copier, err := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l)
-	if err != nil {
-		return err
+func (container *Container) networkMounts() []execdriver.Mount {
+	var mounts []execdriver.Mount
+	if container.ResolvConfPath != "" {
+		mounts = append(mounts, execdriver.Mount{
+			Source:      container.ResolvConfPath,
+			Destination: "/etc/resolv.conf",
+			Writable:    !container.hostConfig.ReadonlyRootfs,
+			Private:     true,
+		})
 	}
 	}
-	container.logCopier = copier
-	copier.Run()
-	container.logDriver = l
-
-	return nil
+	if container.HostnamePath != "" {
+		mounts = append(mounts, execdriver.Mount{
+			Source:      container.HostnamePath,
+			Destination: "/etc/hostname",
+			Writable:    !container.hostConfig.ReadonlyRootfs,
+			Private:     true,
+		})
+	}
+	if container.HostsPath != "" {
+		mounts = append(mounts, execdriver.Mount{
+			Source:      container.HostsPath,
+			Destination: "/etc/hosts",
+			Writable:    !container.hostConfig.ReadonlyRootfs,
+			Private:     true,
+		})
+	}
+	return mounts
 }
 }
 
 
-func (container *Container) waitForStart() error {
-	container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy)
-
-	// block until we either receive an error from the initial start of the container's
-	// process or until the process is running in the container
-	select {
-	case <-container.monitor.startSignal:
-	case err := <-promise.Go(container.monitor.Start):
-		return err
+func (container *Container) addLocalMountPoint(name, destination string, rw bool) {
+	container.MountPoints[destination] = &mountPoint{
+		Name:        name,
+		Driver:      volume.DefaultDriverName,
+		Destination: destination,
+		RW:          rw,
 	}
 	}
-
-	return nil
 }
 }
 
 
-func (container *Container) allocatePort(eng *engine.Engine, port nat.Port, bindings nat.PortMap) error {
-	binding := bindings[port]
-	if container.hostConfig.PublishAllPorts && len(binding) == 0 {
-		binding = append(binding, nat.PortBinding{})
+func (container *Container) addMountPointWithVolume(destination string, vol volume.Volume, rw bool) {
+	container.MountPoints[destination] = &mountPoint{
+		Name:        vol.Name(),
+		Driver:      vol.DriverName(),
+		Destination: destination,
+		RW:          rw,
+		Volume:      vol,
 	}
 	}
+}
 
 
-	for i := 0; i < len(binding); i++ {
-		b, err := bridge.AllocatePort(container.ID, port, binding[i])
-		if err != nil {
-			return err
+func (container *Container) isDestinationMounted(destination string) bool {
+	return container.MountPoints[destination] != nil
+}
+
+func (container *Container) prepareMountPoints() error {
+	for _, config := range container.MountPoints {
+		if len(config.Driver) > 0 {
+			v, err := createVolume(config.Name, config.Driver)
+			if err != nil {
+				return err
+			}
+			config.Volume = v
 		}
 		}
-		binding[i] = b
 	}
 	}
-	bindings[port] = binding
 	return nil
 	return nil
 }
 }
 
 
-func (container *Container) GetProcessLabel() string {
-	// even if we have a process label return "" if we are running
-	// in privileged mode
-	if container.hostConfig.Privileged {
-		return ""
+func (container *Container) removeMountPoints() error {
+	for _, m := range container.MountPoints {
+		if m.Volume != nil {
+			if err := removeVolume(m.Volume); err != nil {
+				return err
+			}
+		}
 	}
 	}
-	return container.ProcessLabel
+	return nil
 }
 }
 
 
-func (container *Container) GetMountLabel() string {
-	if container.hostConfig.Privileged {
-		return ""
-	}
-	return container.MountLabel
+func (container *Container) shouldRestart() bool {
+	return container.hostConfig.RestartPolicy.Name == "always" ||
+		(container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0)
 }
 }
 
 
-func (container *Container) getIpcContainer() (*Container, error) {
-	containerID := container.hostConfig.IpcMode.Container()
-	c, err := container.daemon.Get(containerID)
+func (container *Container) copyImagePathContent(v volume.Volume, destination string) error {
+	rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs)
 	if err != nil {
 	if err != nil {
-		return nil, err
-	}
-	if !c.IsRunning() {
-		return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID)
+		return err
 	}
 	}
-	return c, nil
-}
 
 
-func (container *Container) getNetworkedContainer() (*Container, error) {
-	parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2)
-	switch parts[0] {
-	case "container":
-		if len(parts) != 2 {
-			return nil, fmt.Errorf("no container specified to join network")
-		}
-		nc, err := container.daemon.Get(parts[1])
-		if err != nil {
-			return nil, err
-		}
-		if container == nc {
-			return nil, fmt.Errorf("cannot join own network")
-		}
-		if !nc.IsRunning() {
-			return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1])
+	if _, err = ioutil.ReadDir(rootfs); err != nil {
+		if os.IsNotExist(err) {
+			return nil
 		}
 		}
-		return nc, nil
-	default:
-		return nil, fmt.Errorf("network mode not set to container")
+		return err
 	}
 	}
-}
 
 
-func (container *Container) Stats() (*execdriver.ResourceStats, error) {
-	return container.daemon.Stats(container)
-}
+	path, err := v.Mount()
+	if err != nil {
+		return err
+	}
 
 
-func (c *Container) LogDriverType() string {
-	c.Lock()
-	defer c.Unlock()
-	if c.hostConfig.LogConfig.Type == "" {
-		return c.daemon.defaultLogConfig.Type
+	if err := copyExistingContents(rootfs, path); err != nil {
+		return err
 	}
 	}
-	return c.hostConfig.LogConfig.Type
+
+	return v.Unmount()
 }
 }

+ 979 - 0
daemon/container_linux.go

@@ -0,0 +1,979 @@
+// +build linux
+
+package daemon
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net"
+	"os"
+	"path"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"syscall"
+	"time"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/daemon/network"
+	"github.com/docker/docker/links"
+	"github.com/docker/docker/nat"
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/directory"
+	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/pkg/ulimit"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
+	"github.com/docker/libcontainer/configs"
+	"github.com/docker/libcontainer/devices"
+	"github.com/docker/libnetwork"
+	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/options"
+	"github.com/docker/libnetwork/types"
+)
+
+const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+
+type Container struct {
+	CommonContainer
+
+	// Fields below here are platform specific.
+
+	AppArmorProfile string
+	activeLinks     map[string]*links.Link
+}
+
+func killProcessDirectly(container *Container) error {
+	if _, err := container.WaitStop(10 * time.Second); err != nil {
+		// Ensure that we don't kill ourselves
+		if pid := container.GetPid(); pid != 0 {
+			logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID))
+			if err := syscall.Kill(pid, 9); err != nil {
+				if err != syscall.ESRCH {
+					return err
+				}
+				logrus.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid)
+			}
+		}
+	}
+	return nil
+}
+
+func (container *Container) setupLinkedContainers() ([]string, error) {
+	var (
+		env    []string
+		daemon = container.daemon
+	)
+	children, err := daemon.Children(container.Name)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(children) > 0 {
+		container.activeLinks = make(map[string]*links.Link, len(children))
+
+		// If we encounter an error make sure that we rollback any network
+		// config and iptables changes
+		rollback := func() {
+			for _, link := range container.activeLinks {
+				link.Disable()
+			}
+			container.activeLinks = nil
+		}
+
+		for linkAlias, child := range children {
+			if !child.IsRunning() {
+				return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias)
+			}
+
+			link, err := links.NewLink(
+				container.NetworkSettings.IPAddress,
+				child.NetworkSettings.IPAddress,
+				linkAlias,
+				child.Config.Env,
+				child.Config.ExposedPorts,
+			)
+
+			if err != nil {
+				rollback()
+				return nil, err
+			}
+
+			container.activeLinks[link.Alias()] = link
+			if err := link.Enable(); err != nil {
+				rollback()
+				return nil, err
+			}
+
+			for _, envVar := range link.ToEnv() {
+				env = append(env, envVar)
+			}
+		}
+	}
+	return env, nil
+}
+
+func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
+	// if a domain name was specified, append it to the hostname (see #7851)
+	fullHostname := container.Config.Hostname
+	if container.Config.Domainname != "" {
+		fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname)
+	}
+	// Setup environment
+	env := []string{
+		"PATH=" + DefaultPathEnv,
+		"HOSTNAME=" + fullHostname,
+		// Note: we don't set HOME here because it'll get autoset intelligently
+		// based on the value of USER inside dockerinit, but only if it isn't
+		// set already (ie, that can be overridden by setting HOME via -e or ENV
+		// in a Dockerfile).
+	}
+	if container.Config.Tty {
+		env = append(env, "TERM=xterm")
+	}
+	env = append(env, linkedEnv...)
+	// because the env on the container can override certain default values
+	// we need to replace the 'env' keys where they match and append anything
+	// else.
+	env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env)
+
+	return env
+}
+
+func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.Device, err error) {
+	device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions)
+	// if there was no error, return the device
+	if err == nil {
+		device.Path = deviceMapping.PathInContainer
+		return append(devs, device), nil
+	}
+
+	// if the device is not a device node
+	// try to see if it's a directory holding many devices
+	if err == devices.ErrNotADevice {
+
+		// check if it is a directory
+		if src, e := os.Stat(deviceMapping.PathOnHost); e == nil && src.IsDir() {
+
+			// mount the internal devices recursively
+			filepath.Walk(deviceMapping.PathOnHost, func(dpath string, f os.FileInfo, e error) error {
+				childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions)
+				if e != nil {
+					// ignore the device
+					return nil
+				}
+
+				// add the device to userSpecified devices
+				childDevice.Path = strings.Replace(dpath, deviceMapping.PathOnHost, deviceMapping.PathInContainer, 1)
+				devs = append(devs, childDevice)
+
+				return nil
+			})
+		}
+	}
+
+	if len(devs) > 0 {
+		return devs, nil
+	}
+
+	return devs, fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err)
+}
+
+func populateCommand(c *Container, env []string) error {
+	var en *execdriver.Network
+	if !c.daemon.config.DisableNetwork {
+		en = &execdriver.Network{
+			NamespacePath: c.NetworkSettings.SandboxKey,
+		}
+
+		parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
+		if parts[0] == "container" {
+			nc, err := c.getNetworkedContainer()
+			if err != nil {
+				return err
+			}
+			en.ContainerID = nc.ID
+		}
+	}
+
+	ipc := &execdriver.Ipc{}
+
+	if c.hostConfig.IpcMode.IsContainer() {
+		ic, err := c.getIpcContainer()
+		if err != nil {
+			return err
+		}
+		ipc.ContainerID = ic.ID
+	} else {
+		ipc.HostIpc = c.hostConfig.IpcMode.IsHost()
+	}
+
+	pid := &execdriver.Pid{}
+	pid.HostPid = c.hostConfig.PidMode.IsHost()
+
+	uts := &execdriver.UTS{
+		HostUTS: c.hostConfig.UTSMode.IsHost(),
+	}
+
+	// Build lists of devices allowed and created within the container.
+	var userSpecifiedDevices []*configs.Device
+	for _, deviceMapping := range c.hostConfig.Devices {
+		devs, err := getDevicesFromPath(deviceMapping)
+		if err != nil {
+			return err
+		}
+
+		userSpecifiedDevices = append(userSpecifiedDevices, devs...)
+	}
+	allowedDevices := append(configs.DefaultAllowedDevices, userSpecifiedDevices...)
+
+	autoCreatedDevices := append(configs.DefaultAutoCreatedDevices, userSpecifiedDevices...)
+
+	// TODO: this can be removed after lxc-conf is fully deprecated
+	lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig)
+	if err != nil {
+		return err
+	}
+
+	var rlimits []*ulimit.Rlimit
+	ulimits := c.hostConfig.Ulimits
+
+	// Merge ulimits with daemon defaults
+	ulIdx := make(map[string]*ulimit.Ulimit)
+	for _, ul := range ulimits {
+		ulIdx[ul.Name] = ul
+	}
+	for name, ul := range c.daemon.config.Ulimits {
+		if _, exists := ulIdx[name]; !exists {
+			ulimits = append(ulimits, ul)
+		}
+	}
+
+	for _, limit := range ulimits {
+		rl, err := limit.GetRlimit()
+		if err != nil {
+			return err
+		}
+		rlimits = append(rlimits, rl)
+	}
+
+	resources := &execdriver.Resources{
+		Memory:         c.hostConfig.Memory,
+		MemorySwap:     c.hostConfig.MemorySwap,
+		CpuShares:      c.hostConfig.CpuShares,
+		CpusetCpus:     c.hostConfig.CpusetCpus,
+		CpusetMems:     c.hostConfig.CpusetMems,
+		CpuPeriod:      c.hostConfig.CpuPeriod,
+		CpuQuota:       c.hostConfig.CpuQuota,
+		BlkioWeight:    c.hostConfig.BlkioWeight,
+		Rlimits:        rlimits,
+		OomKillDisable: c.hostConfig.OomKillDisable,
+	}
+
+	processConfig := execdriver.ProcessConfig{
+		Privileged: c.hostConfig.Privileged,
+		Entrypoint: c.Path,
+		Arguments:  c.Args,
+		Tty:        c.Config.Tty,
+		User:       c.Config.User,
+	}
+
+	processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
+	processConfig.Env = env
+
+	c.command = &execdriver.Command{
+		ID:                 c.ID,
+		Rootfs:             c.RootfsPath(),
+		ReadonlyRootfs:     c.hostConfig.ReadonlyRootfs,
+		InitPath:           "/.dockerinit",
+		WorkingDir:         c.Config.WorkingDir,
+		Network:            en,
+		Ipc:                ipc,
+		Pid:                pid,
+		UTS:                uts,
+		Resources:          resources,
+		AllowedDevices:     allowedDevices,
+		AutoCreatedDevices: autoCreatedDevices,
+		CapAdd:             c.hostConfig.CapAdd,
+		CapDrop:            c.hostConfig.CapDrop,
+		ProcessConfig:      processConfig,
+		ProcessLabel:       c.GetProcessLabel(),
+		MountLabel:         c.GetMountLabel(),
+		LxcConfig:          lxcConfig,
+		AppArmorProfile:    c.AppArmorProfile,
+		CgroupParent:       c.hostConfig.CgroupParent,
+	}
+
+	return nil
+}
+
+// GetSize, return real size, virtual size
+func (container *Container) GetSize() (int64, int64) {
+	var (
+		sizeRw, sizeRootfs int64
+		err                error
+		driver             = container.daemon.driver
+	)
+
+	if err := container.Mount(); err != nil {
+		logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
+		return sizeRw, sizeRootfs
+	}
+	defer container.Unmount()
+
+	initID := fmt.Sprintf("%s-init", container.ID)
+	sizeRw, err = driver.DiffSize(container.ID, initID)
+	if err != nil {
+		logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
+		// FIXME: GetSize should return an error. Not changing it now in case
+		// there is a side-effect.
+		sizeRw = -1
+	}
+
+	if _, err = os.Stat(container.basefs); err == nil {
+		if sizeRootfs, err = directory.Size(container.basefs); err != nil {
+			sizeRootfs = -1
+		}
+	}
+	return sizeRw, sizeRootfs
+}
+
+func (container *Container) buildHostnameFile() error {
+	hostnamePath, err := container.GetRootResourcePath("hostname")
+	if err != nil {
+		return err
+	}
+	container.HostnamePath = hostnamePath
+
+	if container.Config.Domainname != "" {
+		return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644)
+	}
+	return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
+}
+
+func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, error) {
+	var (
+		joinOptions []libnetwork.EndpointOption
+		err         error
+		dns         []string
+		dnsSearch   []string
+	)
+
+	joinOptions = append(joinOptions, libnetwork.JoinOptionHostname(container.Config.Hostname),
+		libnetwork.JoinOptionDomainname(container.Config.Domainname))
+
+	if container.hostConfig.NetworkMode.IsHost() {
+		joinOptions = append(joinOptions, libnetwork.JoinOptionUseDefaultSandbox())
+	}
+
+	container.HostsPath, err = container.GetRootResourcePath("hosts")
+	if err != nil {
+		return nil, err
+	}
+	joinOptions = append(joinOptions, libnetwork.JoinOptionHostsPath(container.HostsPath))
+
+	container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf")
+	if err != nil {
+		return nil, err
+	}
+	joinOptions = append(joinOptions, libnetwork.JoinOptionResolvConfPath(container.ResolvConfPath))
+
+	if len(container.hostConfig.Dns) > 0 {
+		dns = container.hostConfig.Dns
+	} else if len(container.daemon.config.Dns) > 0 {
+		dns = container.daemon.config.Dns
+	}
+
+	for _, d := range dns {
+		joinOptions = append(joinOptions, libnetwork.JoinOptionDNS(d))
+	}
+
+	if len(container.hostConfig.DnsSearch) > 0 {
+		dnsSearch = container.hostConfig.DnsSearch
+	} else if len(container.daemon.config.DnsSearch) > 0 {
+		dnsSearch = container.daemon.config.DnsSearch
+	}
+
+	for _, ds := range dnsSearch {
+		joinOptions = append(joinOptions, libnetwork.JoinOptionDNSSearch(ds))
+	}
+
+	if container.NetworkSettings.SecondaryIPAddresses != nil {
+		name := container.Config.Hostname
+		if container.Config.Domainname != "" {
+			name = name + "." + container.Config.Domainname
+		}
+
+		for _, a := range container.NetworkSettings.SecondaryIPAddresses {
+			joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(name, a.Addr))
+		}
+	}
+
+	var childEndpoints, parentEndpoints []string
+
+	children, err := container.daemon.Children(container.Name)
+	if err != nil {
+		return nil, err
+	}
+
+	for linkAlias, child := range children {
+		_, alias := path.Split(linkAlias)
+		// allow access to the linked container via the alias, real name, and container hostname
+		aliasList := alias + " " + child.Config.Hostname
+		// only add the name if alias isn't equal to the name
+		if alias != child.Name[1:] {
+			aliasList = aliasList + " " + child.Name[1:]
+		}
+		joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(aliasList, child.NetworkSettings.IPAddress))
+		if child.NetworkSettings.EndpointID != "" {
+			childEndpoints = append(childEndpoints, child.NetworkSettings.EndpointID)
+		}
+	}
+
+	for _, extraHost := range container.hostConfig.ExtraHosts {
+		// allow IPv6 addresses in extra hosts; only split on first ":"
+		parts := strings.SplitN(extraHost, ":", 2)
+		joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(parts[0], parts[1]))
+	}
+
+	refs := container.daemon.ContainerGraph().RefPaths(container.ID)
+	for _, ref := range refs {
+		if ref.ParentID == "0" {
+			continue
+		}
+
+		c, err := container.daemon.Get(ref.ParentID)
+		if err != nil {
+			logrus.Error(err)
+		}
+
+		if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() {
+			logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress)
+			joinOptions = append(joinOptions, libnetwork.JoinOptionParentUpdate(c.NetworkSettings.EndpointID, ref.Name, container.NetworkSettings.IPAddress))
+			if c.NetworkSettings.EndpointID != "" {
+				parentEndpoints = append(parentEndpoints, c.NetworkSettings.EndpointID)
+			}
+		}
+	}
+
+	linkOptions := options.Generic{
+		netlabel.GenericData: options.Generic{
+			"ParentEndpoints": parentEndpoints,
+			"ChildEndpoints":  childEndpoints,
+		},
+	}
+
+	joinOptions = append(joinOptions, libnetwork.JoinOptionGeneric(linkOptions))
+
+	return joinOptions, nil
+}
+
+func (container *Container) buildPortMapInfo(n libnetwork.Network, ep libnetwork.Endpoint, networkSettings *network.Settings) (*network.Settings, error) {
+	if ep == nil {
+		return nil, fmt.Errorf("invalid endpoint while building port map info")
+	}
+
+	if networkSettings == nil {
+		return nil, fmt.Errorf("invalid networksettings while building port map info")
+	}
+
+	driverInfo, err := ep.DriverInfo()
+	if err != nil {
+		return nil, err
+	}
+
+	if driverInfo == nil {
+		// It is not an error for epInfo to be nil
+		return networkSettings, nil
+	}
+
+	if mac, ok := driverInfo[netlabel.MacAddress]; ok {
+		networkSettings.MacAddress = mac.(net.HardwareAddr).String()
+	}
+
+	mapData, ok := driverInfo[netlabel.PortMap]
+	if !ok {
+		return networkSettings, nil
+	}
+
+	if portMapping, ok := mapData.([]types.PortBinding); ok {
+		networkSettings.Ports = nat.PortMap{}
+		for _, pp := range portMapping {
+			natPort := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port)))
+			natBndg := nat.PortBinding{HostIp: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))}
+			networkSettings.Ports[natPort] = append(networkSettings.Ports[natPort], natBndg)
+		}
+	}
+
+	return networkSettings, nil
+}
+
+func (container *Container) buildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint, networkSettings *network.Settings) (*network.Settings, error) {
+	if ep == nil {
+		return nil, fmt.Errorf("invalid endpoint while building port map info")
+	}
+
+	if networkSettings == nil {
+		return nil, fmt.Errorf("invalid networksettings while building port map info")
+	}
+
+	epInfo := ep.Info()
+	if epInfo == nil {
+		// It is not an error to get an empty endpoint info
+		return networkSettings, nil
+	}
+
+	ifaceList := epInfo.InterfaceList()
+	if len(ifaceList) == 0 {
+		return networkSettings, nil
+	}
+
+	iface := ifaceList[0]
+
+	ones, _ := iface.Address().Mask.Size()
+	networkSettings.IPAddress = iface.Address().IP.String()
+	networkSettings.IPPrefixLen = ones
+
+	if iface.AddressIPv6().IP.To16() != nil {
+		onesv6, _ := iface.AddressIPv6().Mask.Size()
+		networkSettings.GlobalIPv6Address = iface.AddressIPv6().IP.String()
+		networkSettings.GlobalIPv6PrefixLen = onesv6
+	}
+
+	if len(ifaceList) == 1 {
+		return networkSettings, nil
+	}
+
+	networkSettings.SecondaryIPAddresses = make([]network.Address, 0, len(ifaceList)-1)
+	networkSettings.SecondaryIPv6Addresses = make([]network.Address, 0, len(ifaceList)-1)
+	for _, iface := range ifaceList[1:] {
+		ones, _ := iface.Address().Mask.Size()
+		addr := network.Address{Addr: iface.Address().IP.String(), PrefixLen: ones}
+		networkSettings.SecondaryIPAddresses = append(networkSettings.SecondaryIPAddresses, addr)
+
+		if iface.AddressIPv6().IP.To16() != nil {
+			onesv6, _ := iface.AddressIPv6().Mask.Size()
+			addrv6 := network.Address{Addr: iface.AddressIPv6().IP.String(), PrefixLen: onesv6}
+			networkSettings.SecondaryIPv6Addresses = append(networkSettings.SecondaryIPv6Addresses, addrv6)
+		}
+	}
+
+	return networkSettings, nil
+}
+
+func (container *Container) updateJoinInfo(ep libnetwork.Endpoint) error {
+	epInfo := ep.Info()
+	if epInfo == nil {
+		// It is not an error to get an empty endpoint info
+		return nil
+	}
+
+	container.NetworkSettings.Gateway = epInfo.Gateway().String()
+	if epInfo.GatewayIPv6().To16() != nil {
+		container.NetworkSettings.IPv6Gateway = epInfo.GatewayIPv6().String()
+	}
+
+	container.NetworkSettings.SandboxKey = epInfo.SandboxKey()
+
+	return nil
+}
+
+func (container *Container) updateNetworkSettings(n libnetwork.Network, ep libnetwork.Endpoint) error {
+	networkSettings := &network.Settings{NetworkID: n.ID(), EndpointID: ep.ID()}
+
+	networkSettings, err := container.buildPortMapInfo(n, ep, networkSettings)
+	if err != nil {
+		return err
+	}
+
+	networkSettings, err = container.buildEndpointInfo(n, ep, networkSettings)
+	if err != nil {
+		return err
+	}
+
+	if container.hostConfig.NetworkMode == runconfig.NetworkMode("bridge") {
+		networkSettings.Bridge = container.daemon.config.Bridge.Iface
+	}
+
+	container.NetworkSettings = networkSettings
+	return nil
+}
+
+func (container *Container) UpdateNetwork() error {
+	n, err := container.daemon.netController.NetworkByID(container.NetworkSettings.NetworkID)
+	if err != nil {
+		return fmt.Errorf("error locating network id %s: %v", container.NetworkSettings.NetworkID, err)
+	}
+
+	ep, err := n.EndpointByID(container.NetworkSettings.EndpointID)
+	if err != nil {
+		return fmt.Errorf("error locating endpoint id %s: %v", container.NetworkSettings.EndpointID, err)
+	}
+
+	if err := ep.Leave(container.ID); err != nil {
+		return fmt.Errorf("endpoint leave failed: %v", err)
+
+	}
+
+	joinOptions, err := container.buildJoinOptions()
+	if err != nil {
+		return fmt.Errorf("Update network failed: %v", err)
+	}
+
+	if _, err := ep.Join(container.ID, joinOptions...); err != nil {
+		return fmt.Errorf("endpoint join failed: %v", err)
+	}
+
+	if err := container.updateJoinInfo(ep); err != nil {
+		return fmt.Errorf("Updating join info failed: %v", err)
+	}
+
+	return nil
+}
+
+func (container *Container) buildCreateEndpointOptions() ([]libnetwork.EndpointOption, error) {
+	var (
+		portSpecs     = make(nat.PortSet)
+		bindings      = make(nat.PortMap)
+		pbList        []types.PortBinding
+		exposeList    []types.TransportPort
+		createOptions []libnetwork.EndpointOption
+	)
+
+	if container.Config.PortSpecs != nil {
+		if err := migratePortMappings(container.Config, container.hostConfig); err != nil {
+			return nil, err
+		}
+		container.Config.PortSpecs = nil
+		if err := container.WriteHostConfig(); err != nil {
+			return nil, err
+		}
+	}
+
+	if container.Config.ExposedPorts != nil {
+		portSpecs = container.Config.ExposedPorts
+	}
+
+	if container.hostConfig.PortBindings != nil {
+		for p, b := range container.hostConfig.PortBindings {
+			bindings[p] = []nat.PortBinding{}
+			for _, bb := range b {
+				bindings[p] = append(bindings[p], nat.PortBinding{
+					HostIp:   bb.HostIp,
+					HostPort: bb.HostPort,
+				})
+			}
+		}
+	}
+
+	container.NetworkSettings.PortMapping = nil
+
+	ports := make([]nat.Port, len(portSpecs))
+	var i int
+	for p := range portSpecs {
+		ports[i] = p
+		i++
+	}
+	nat.SortPortMap(ports, bindings)
+	for _, port := range ports {
+		expose := types.TransportPort{}
+		expose.Proto = types.ParseProtocol(port.Proto())
+		expose.Port = uint16(port.Int())
+		exposeList = append(exposeList, expose)
+
+		pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto}
+		binding := bindings[port]
+		for i := 0; i < len(binding); i++ {
+			pbCopy := pb.GetCopy()
+			pbCopy.HostPort = uint16(nat.Port(binding[i].HostPort).Int())
+			pbCopy.HostIP = net.ParseIP(binding[i].HostIp)
+			pbList = append(pbList, pbCopy)
+		}
+
+		if container.hostConfig.PublishAllPorts && len(binding) == 0 {
+			pbList = append(pbList, pb)
+		}
+	}
+
+	createOptions = append(createOptions,
+		libnetwork.CreateOptionPortMapping(pbList),
+		libnetwork.CreateOptionExposedPorts(exposeList))
+
+	if container.Config.MacAddress != "" {
+		mac, err := net.ParseMAC(container.Config.MacAddress)
+		if err != nil {
+			return nil, err
+		}
+
+		genericOption := options.Generic{
+			netlabel.MacAddress: mac,
+		}
+
+		createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption))
+	}
+
+	return createOptions, nil
+}
+
+func (container *Container) AllocateNetwork() error {
+	mode := container.hostConfig.NetworkMode
+	if container.Config.NetworkDisabled || mode.IsContainer() {
+		return nil
+	}
+
+	var err error
+
+	n, err := container.daemon.netController.NetworkByName(string(mode))
+	if err != nil {
+		return fmt.Errorf("error locating network with name %s: %v", string(mode), err)
+	}
+
+	createOptions, err := container.buildCreateEndpointOptions()
+	if err != nil {
+		return err
+	}
+
+	ep, err := n.CreateEndpoint(container.Name, createOptions...)
+	if err != nil {
+		return err
+	}
+
+	if err := container.updateNetworkSettings(n, ep); err != nil {
+		return err
+	}
+
+	joinOptions, err := container.buildJoinOptions()
+	if err != nil {
+		return err
+	}
+
+	if _, err := ep.Join(container.ID, joinOptions...); err != nil {
+		return err
+	}
+
+	if err := container.updateJoinInfo(ep); err != nil {
+		return fmt.Errorf("Updating join info failed: %v", err)
+	}
+
+	if err := container.WriteHostConfig(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (container *Container) initializeNetworking() error {
+	var err error
+
+	// Make sure NetworkMode has an acceptable value before
+	// initializing networking.
+	if container.hostConfig.NetworkMode == runconfig.NetworkMode("") {
+		container.hostConfig.NetworkMode = runconfig.NetworkMode("bridge")
+	}
+
+	if container.hostConfig.NetworkMode.IsContainer() {
+		// we need to get the hosts files from the container to join
+		nc, err := container.getNetworkedContainer()
+		if err != nil {
+			return err
+		}
+		container.HostnamePath = nc.HostnamePath
+		container.HostsPath = nc.HostsPath
+		container.ResolvConfPath = nc.ResolvConfPath
+		container.Config.Hostname = nc.Config.Hostname
+		container.Config.Domainname = nc.Config.Domainname
+		return nil
+	}
+
+	if container.daemon.config.DisableNetwork {
+		container.Config.NetworkDisabled = true
+	}
+
+	if container.hostConfig.NetworkMode.IsHost() {
+		container.Config.Hostname, err = os.Hostname()
+		if err != nil {
+			return err
+		}
+
+		parts := strings.SplitN(container.Config.Hostname, ".", 2)
+		if len(parts) > 1 {
+			container.Config.Hostname = parts[0]
+			container.Config.Domainname = parts[1]
+		}
+
+	}
+
+	if err := container.AllocateNetwork(); err != nil {
+		return err
+	}
+
+	return container.buildHostnameFile()
+}
+
+// Make sure the config is compatible with the current kernel
+func (container *Container) verifyDaemonSettings() {
+	if container.hostConfig.Memory > 0 && !container.daemon.sysInfo.MemoryLimit {
+		logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.")
+		container.hostConfig.Memory = 0
+	}
+	if container.hostConfig.Memory > 0 && container.hostConfig.MemorySwap != -1 && !container.daemon.sysInfo.SwapLimit {
+		logrus.Warnf("Your kernel does not support swap limit capabilities. Limitation discarded.")
+		container.hostConfig.MemorySwap = -1
+	}
+	if container.daemon.sysInfo.IPv4ForwardingDisabled {
+		logrus.Warnf("IPv4 forwarding is disabled. Networking will not work")
+	}
+}
+
+func (container *Container) ExportRw() (archive.Archive, error) {
+	if err := container.Mount(); err != nil {
+		return nil, err
+	}
+	if container.daemon == nil {
+		return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
+	}
+	archive, err := container.daemon.Diff(container)
+	if err != nil {
+		container.Unmount()
+		return nil, err
+	}
+	return ioutils.NewReadCloserWrapper(archive, func() error {
+			err := archive.Close()
+			container.Unmount()
+			return err
+		}),
+		nil
+}
+
+func (container *Container) getIpcContainer() (*Container, error) {
+	containerID := container.hostConfig.IpcMode.Container()
+	c, err := container.daemon.Get(containerID)
+	if err != nil {
+		return nil, err
+	}
+	if !c.IsRunning() {
+		return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID)
+	}
+	return c, nil
+}
+
+func (container *Container) setupWorkingDirectory() error {
+	if container.Config.WorkingDir != "" {
+		container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir)
+
+		pth, err := container.GetResourcePath(container.Config.WorkingDir)
+		if err != nil {
+			return err
+		}
+
+		pthInfo, err := os.Stat(pth)
+		if err != nil {
+			if !os.IsNotExist(err) {
+				return err
+			}
+
+			if err := os.MkdirAll(pth, 0755); err != nil {
+				return err
+			}
+		}
+		if pthInfo != nil && !pthInfo.IsDir() {
+			return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir)
+		}
+	}
+	return nil
+}
+
+func (container *Container) getNetworkedContainer() (*Container, error) {
+	parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2)
+	switch parts[0] {
+	case "container":
+		if len(parts) != 2 {
+			return nil, fmt.Errorf("no container specified to join network")
+		}
+		nc, err := container.daemon.Get(parts[1])
+		if err != nil {
+			return nil, err
+		}
+		if container == nc {
+			return nil, fmt.Errorf("cannot join own network")
+		}
+		if !nc.IsRunning() {
+			return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1])
+		}
+		return nc, nil
+	default:
+		return nil, fmt.Errorf("network mode not set to container")
+	}
+}
+
+func (container *Container) ReleaseNetwork() {
+	if container.hostConfig.NetworkMode.IsContainer() || container.daemon.config.DisableNetwork {
+		return
+	}
+
+	n, err := container.daemon.netController.NetworkByID(container.NetworkSettings.NetworkID)
+	if err != nil {
+		logrus.Errorf("error locating network id %s: %v", container.NetworkSettings.NetworkID, err)
+		return
+	}
+
+	ep, err := n.EndpointByID(container.NetworkSettings.EndpointID)
+	if err != nil {
+		logrus.Errorf("error locating endpoint id %s: %v", container.NetworkSettings.EndpointID, err)
+		return
+	}
+
+	if err := ep.Leave(container.ID); err != nil {
+		logrus.Errorf("leaving endpoint failed: %v", err)
+	}
+
+	if err := ep.Delete(); err != nil {
+		logrus.Errorf("deleting endpoint failed: %v", err)
+	}
+
+	container.NetworkSettings = &network.Settings{}
+}
+
+func disableAllActiveLinks(container *Container) {
+	if container.activeLinks != nil {
+		for _, link := range container.activeLinks {
+			link.Disable()
+		}
+	}
+}
+
+func (container *Container) DisableLink(name string) {
+	if container.activeLinks != nil {
+		if link, exists := container.activeLinks[name]; exists {
+			link.Disable()
+			delete(container.activeLinks, name)
+			if err := container.UpdateNetwork(); err != nil {
+				logrus.Debugf("Could not update network to remove link: %v", err)
+			}
+		} else {
+			logrus.Debugf("Could not find active link for %s", name)
+		}
+	}
+}
+
+func (container *Container) UnmountVolumes(forceSyscall bool) error {
+	for _, m := range container.MountPoints {
+		dest, err := container.GetResourcePath(m.Destination)
+		if err != nil {
+			return err
+		}
+
+		if forceSyscall {
+			syscall.Unmount(dest, 0)
+		}
+
+		if m.Volume != nil {
+			if err := m.Volume.Unmount(); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}

+ 171 - 0
daemon/container_windows.go

@@ -0,0 +1,171 @@
+// +build windows
+
+package daemon
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/pkg/archive"
+)
+
+// TODO Windows. A reasonable default at the moment.
+const DefaultPathEnv = `c:\windows\system32;c:\windows\system32\WindowsPowerShell\v1.0`
+
+type Container struct {
+	CommonContainer
+
+	// Fields below here are platform specific.
+
+	// TODO Windows. Further factoring out of unused fields will be necessary.
+
+	// ---- START OF TEMPORARY DECLARATION ----
+	// TODO Windows. Temporarily keeping fields in to assist in compilation
+	// of the daemon on Windows without affecting many other files in a single
+	// PR, thus making code review significantly harder. These lines will be
+	// removed in subsequent PRs.
+
+	AppArmorProfile string
+	// ---- END OF TEMPORARY DECLARATION ----
+
+}
+
+func killProcessDirectly(container *Container) error {
+	return nil
+}
+
+func (container *Container) setupContainerDns() error {
+	return nil
+}
+
+func (container *Container) updateParentsHosts() error {
+	return nil
+}
+
+func (container *Container) setupLinkedContainers() ([]string, error) {
+	return nil, nil
+}
+
+func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
+	return nil
+}
+
+func (container *Container) initializeNetworking() error {
+	return nil
+}
+
+func (container *Container) setupWorkingDirectory() error {
+	return nil
+}
+
+func (container *Container) verifyDaemonSettings() {
+}
+
+func populateCommand(c *Container, env []string) error {
+	en := &execdriver.Network{
+		Mtu:       c.daemon.config.Mtu,
+		Interface: nil,
+	}
+
+	// TODO Windows. Appropriate network mode (will refactor as part of
+	// libnetwork. For now, even through bridge not used, let it succeed to
+	// allow the Windows daemon to limp during its bring-up
+	parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
+	switch parts[0] {
+	case "none":
+	case "bridge", "": // empty string to support existing containers
+		if !c.Config.NetworkDisabled {
+			network := c.NetworkSettings
+			en.Interface = &execdriver.NetworkInterface{
+				Bridge:     network.Bridge,
+				MacAddress: network.MacAddress,
+			}
+		}
+	case "host", "container":
+		return fmt.Errorf("unsupported network mode: %s", c.hostConfig.NetworkMode)
+	default:
+		return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode)
+	}
+
+	pid := &execdriver.Pid{}
+
+	// TODO Windows. This can probably be factored out.
+	pid.HostPid = c.hostConfig.PidMode.IsHost()
+
+	// TODO Windows. Resource controls to be implemented later.
+	resources := &execdriver.Resources{}
+
+	// TODO Windows. Further refactoring required (privileged/user)
+	processConfig := execdriver.ProcessConfig{
+		Privileged: c.hostConfig.Privileged,
+		Entrypoint: c.Path,
+		Arguments:  c.Args,
+		Tty:        c.Config.Tty,
+		User:       c.Config.User,
+	}
+
+	processConfig.Env = env
+
+	// TODO Windows: Factor out remainder of unused fields.
+	c.command = &execdriver.Command{
+		ID:             c.ID,
+		Rootfs:         c.RootfsPath(),
+		ReadonlyRootfs: c.hostConfig.ReadonlyRootfs,
+		InitPath:       "/.dockerinit",
+		WorkingDir:     c.Config.WorkingDir,
+		Network:        en,
+		Pid:            pid,
+		Resources:      resources,
+		CapAdd:         c.hostConfig.CapAdd,
+		CapDrop:        c.hostConfig.CapDrop,
+		ProcessConfig:  processConfig,
+		ProcessLabel:   c.GetProcessLabel(),
+		MountLabel:     c.GetMountLabel(),
+	}
+
+	return nil
+}
+
+// GetSize, return real size, virtual size
+func (container *Container) GetSize() (int64, int64) {
+	// TODO Windows
+	return 0, 0
+}
+
+func (container *Container) AllocateNetwork() error {
+
+	// TODO Windows. This needs reworking with libnetwork. In the
+	// proof-of-concept for //build conference, the Windows daemon
+	// invoked eng.Job("allocate_interface) passing through
+	// RequestedMac.
+
+	return nil
+}
+
+func (container *Container) ExportRw() (archive.Archive, error) {
+	if container.IsRunning() {
+		return nil, fmt.Errorf("Cannot export a running container.")
+	}
+	// TODO Windows. Implementation (different to Linux)
+	return nil, nil
+}
+
+func (container *Container) ReleaseNetwork() {
+	// TODO Windows. Rework with libnetwork
+}
+
+func (container *Container) RestoreNetwork() error {
+	// TODO Windows. Rework with libnetwork
+	return nil
+}
+
+func disableAllActiveLinks(container *Container) {
+}
+
+func (container *Container) DisableLink(name string) {
+}
+
+func (container *Container) UnmountVolumes(forceSyscall bool) error {
+	return nil
+}

+ 16 - 0
daemon/copy.go

@@ -0,0 +1,16 @@
+package daemon
+
+import "io"
+
+func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
+	container, err := daemon.Get(name)
+	if err != nil {
+		return nil, err
+	}
+
+	if res[0] == '/' {
+		res = res[1:]
+	}
+
+	return container.Copy(res)
+}

+ 51 - 9
daemon/create.go

@@ -2,10 +2,14 @@ package daemon
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
 
 
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/libcontainer/label"
 	"github.com/docker/libcontainer/label"
 )
 )
@@ -16,6 +20,13 @@ func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hos
 		return "", warnings, err
 		return "", warnings, err
 	}
 	}
 
 
+	// The check for a valid workdir path is made on the server rather than in the
+	// client. This is because we don't know the type of path (Linux or Windows)
+	// to validate on the client.
+	if config.WorkingDir != "" && !filepath.IsAbs(config.WorkingDir) {
+		return "", warnings, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path.", config.WorkingDir)
+	}
+
 	container, buildWarnings, err := daemon.Create(config, hostConfig, name)
 	container, buildWarnings, err := daemon.Create(config, hostConfig, name)
 	if err != nil {
 	if err != nil {
 		if daemon.Graph().IsNotExist(err, config.Image) {
 		if daemon.Graph().IsNotExist(err, config.Image) {
@@ -79,17 +90,51 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
 	if err := daemon.createRootfs(container); err != nil {
 	if err := daemon.createRootfs(container); err != nil {
 		return nil, nil, err
 		return nil, nil, err
 	}
 	}
-	if hostConfig != nil {
-		if err := daemon.setHostConfig(container, hostConfig); err != nil {
-			return nil, nil, err
-		}
+	if err := daemon.setHostConfig(container, hostConfig); err != nil {
+		return nil, nil, err
 	}
 	}
 	if err := container.Mount(); err != nil {
 	if err := container.Mount(); err != nil {
 		return nil, nil, err
 		return nil, nil, err
 	}
 	}
 	defer container.Unmount()
 	defer container.Unmount()
-	if err := container.prepareVolumes(); err != nil {
-		return nil, nil, err
+
+	for spec := range config.Volumes {
+		var (
+			name, destination string
+			parts             = strings.Split(spec, ":")
+		)
+		switch len(parts) {
+		case 2:
+			name, destination = parts[0], filepath.Clean(parts[1])
+		default:
+			name = stringid.GenerateRandomID()
+			destination = filepath.Clean(parts[0])
+		}
+		// Skip volumes for which we already have something mounted on that
+		// destination because of a --volume-from.
+		if container.isDestinationMounted(destination) {
+			continue
+		}
+		path, err := container.GetResourcePath(destination)
+		if err != nil {
+			return nil, nil, err
+		}
+
+		stat, err := os.Stat(path)
+		if err == nil && !stat.IsDir() {
+			return nil, nil, fmt.Errorf("cannot mount volume over existing file, file exists %s", path)
+		}
+
+		v, err := createVolume(name, config.VolumeDriver)
+		if err != nil {
+			return nil, nil, err
+		}
+
+		if err := container.copyImagePathContent(v, destination); err != nil {
+			return nil, nil, err
+		}
+
+		container.addMountPointWithVolume(destination, v, true)
 	}
 	}
 	if err := container.ToDisk(); err != nil {
 	if err := container.ToDisk(); err != nil {
 		return nil, nil, err
 		return nil, nil, err
@@ -106,9 +151,6 @@ func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode, pidMode run
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
-		if !c.IsRunning() {
-			return nil, fmt.Errorf("cannot join IPC of a non running container: %s", ipcContainer)
-		}
 
 
 		return label.DupSecOpt(c.ProcessLabel), nil
 		return label.DupSecOpt(c.ProcessLabel), nil
 	}
 	}

+ 270 - 286
daemon/daemon.go

@@ -1,10 +1,10 @@
 package daemon
 package daemon
 
 
 import (
 import (
-	"bytes"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
+	"net"
 	"os"
 	"os"
 	"path"
 	"path"
 	"path/filepath"
 	"path/filepath"
@@ -15,6 +15,9 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/docker/libcontainer/label"
 	"github.com/docker/libcontainer/label"
+	"github.com/docker/libnetwork"
+	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/options"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api"
@@ -22,12 +25,10 @@ import (
 	"github.com/docker/docker/daemon/events"
 	"github.com/docker/docker/daemon/events"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver/execdrivers"
 	"github.com/docker/docker/daemon/execdriver/execdrivers"
-	"github.com/docker/docker/daemon/execdriver/lxc"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/daemon/graphdriver"
 	_ "github.com/docker/docker/daemon/graphdriver/vfs"
 	_ "github.com/docker/docker/daemon/graphdriver/vfs"
+	"github.com/docker/docker/daemon/logger"
 	"github.com/docker/docker/daemon/network"
 	"github.com/docker/docker/daemon/network"
-	"github.com/docker/docker/daemon/networkdriver/bridge"
-	"github.com/docker/docker/engine"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/archive"
@@ -38,8 +39,6 @@ import (
 	"github.com/docker/docker/pkg/namesgenerator"
 	"github.com/docker/docker/pkg/namesgenerator"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers/kernel"
 	"github.com/docker/docker/pkg/parsers/kernel"
-	"github.com/docker/docker/pkg/pidfile"
-	"github.com/docker/docker/pkg/resolvconf"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/sysinfo"
 	"github.com/docker/docker/pkg/sysinfo"
 	"github.com/docker/docker/pkg/truncindex"
 	"github.com/docker/docker/pkg/truncindex"
@@ -47,11 +46,12 @@ import (
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/trust"
 	"github.com/docker/docker/trust"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
-	"github.com/docker/docker/volumes"
-
-	"github.com/go-fsnotify/fsnotify"
+	volumedrivers "github.com/docker/docker/volume/drivers"
+	"github.com/docker/docker/volume/local"
 )
 )
 
 
+const defaultVolumesPathName = "volumes"
+
 var (
 var (
 	validContainerNameChars   = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
 	validContainerNameChars   = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
 	validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
 	validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
@@ -102,8 +102,6 @@ type Daemon struct {
 	repositories     *graph.TagStore
 	repositories     *graph.TagStore
 	idIndex          *truncindex.TruncIndex
 	idIndex          *truncindex.TruncIndex
 	sysInfo          *sysinfo.SysInfo
 	sysInfo          *sysinfo.SysInfo
-	volumes          *volumes.Repository
-	eng              *engine.Engine
 	config           *Config
 	config           *Config
 	containerGraph   *graphdb.Database
 	containerGraph   *graphdb.Database
 	driver           graphdriver.Driver
 	driver           graphdriver.Driver
@@ -112,14 +110,8 @@ type Daemon struct {
 	defaultLogConfig runconfig.LogConfig
 	defaultLogConfig runconfig.LogConfig
 	RegistryService  *registry.Service
 	RegistryService  *registry.Service
 	EventsService    *events.Events
 	EventsService    *events.Events
-}
-
-// Install installs daemon capabilities to eng.
-func (daemon *Daemon) Install(eng *engine.Engine) error {
-	// FIXME: this hack is necessary for legacy integration tests to access
-	// the daemon object.
-	eng.HackSetGlobalVar("httpapi.daemon", daemon)
-	return nil
+	netController    libnetwork.NetworkController
+	root             string
 }
 }
 
 
 // Get looks for a container using the provided information, which could be
 // Get looks for a container using the provided information, which could be
@@ -166,10 +158,14 @@ func (daemon *Daemon) containerRoot(id string) string {
 // This is typically done at startup.
 // This is typically done at startup.
 func (daemon *Daemon) load(id string) (*Container, error) {
 func (daemon *Daemon) load(id string) (*Container, error) {
 	container := &Container{
 	container := &Container{
-		root:         daemon.containerRoot(id),
-		State:        NewState(),
-		execCommands: newExecStore(),
+		CommonContainer: CommonContainer{
+			State:        NewState(),
+			root:         daemon.containerRoot(id),
+			MountPoints:  make(map[string]*mountPoint),
+			execCommands: newExecStore(),
+		},
 	}
 	}
+
 	if err := container.FromDisk(); err != nil {
 	if err := container.FromDisk(); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -217,27 +213,24 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
 	// we'll waste time if we update it for every container
 	// we'll waste time if we update it for every container
 	daemon.idIndex.Add(container.ID)
 	daemon.idIndex.Add(container.ID)
 
 
-	container.registerVolumes()
+	if err := daemon.verifyOldVolumesInfo(container); err != nil {
+		return err
+	}
+
+	if err := container.prepareMountPoints(); err != nil {
+		return err
+	}
 
 
-	// FIXME: if the container is supposed to be running but is not, auto restart it?
-	//        if so, then we need to restart monitor and init a new lock
-	// If the container is supposed to be running, make sure of it
 	if container.IsRunning() {
 	if container.IsRunning() {
 		logrus.Debugf("killing old running container %s", container.ID)
 		logrus.Debugf("killing old running container %s", container.ID)
 
 
 		container.SetStopped(&execdriver.ExitStatus{ExitCode: 0})
 		container.SetStopped(&execdriver.ExitStatus{ExitCode: 0})
 
 
-		// We only have to handle this for lxc because the other drivers will ensure that
-		// no processes are left when docker dies
-		if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") {
-			lxc.KillLxc(container.ID, 9)
-		} else {
-			// use the current driver and ensure that the container is dead x.x
-			cmd := &execdriver.Command{
-				ID: container.ID,
-			}
-			daemon.execDriver.Terminate(cmd)
+		// use the current driver and ensure that the container is dead x.x
+		cmd := &execdriver.Command{
+			ID: container.ID,
 		}
 		}
+		daemon.execDriver.Terminate(cmd)
 
 
 		if err := container.Unmount(); err != nil {
 		if err := container.Unmount(); err != nil {
 			logrus.Debugf("unmount error %s", err)
 			logrus.Debugf("unmount error %s", err)
@@ -266,10 +259,15 @@ func (daemon *Daemon) ensureName(container *Container) error {
 }
 }
 
 
 func (daemon *Daemon) restore() error {
 func (daemon *Daemon) restore() error {
+	type cr struct {
+		container  *Container
+		registered bool
+	}
+
 	var (
 	var (
 		debug         = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "")
 		debug         = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "")
-		containers    = make(map[string]*Container)
 		currentDriver = daemon.driver.String()
 		currentDriver = daemon.driver.String()
+		containers    = make(map[string]*cr)
 	)
 	)
 
 
 	if !debug {
 	if !debug {
@@ -295,14 +293,12 @@ func (daemon *Daemon) restore() error {
 		if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
 		if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
 			logrus.Debugf("Loaded container %v", container.ID)
 			logrus.Debugf("Loaded container %v", container.ID)
 
 
-			containers[container.ID] = container
+			containers[container.ID] = &cr{container: container}
 		} else {
 		} else {
 			logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
 			logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
 		}
 		}
 	}
 	}
 
 
-	registeredContainers := []*Container{}
-
 	if entities := daemon.containerGraph.List("/", -1); entities != nil {
 	if entities := daemon.containerGraph.List("/", -1); entities != nil {
 		for _, p := range entities.Paths() {
 		for _, p := range entities.Paths() {
 			if !debug && logrus.GetLevel() == logrus.InfoLevel {
 			if !debug && logrus.GetLevel() == logrus.InfoLevel {
@@ -311,50 +307,43 @@ func (daemon *Daemon) restore() error {
 
 
 			e := entities[p]
 			e := entities[p]
 
 
-			if container, ok := containers[e.ID()]; ok {
-				if err := daemon.register(container, false); err != nil {
-					logrus.Debugf("Failed to register container %s: %s", container.ID, err)
-				}
-
-				registeredContainers = append(registeredContainers, container)
-
-				// delete from the map so that a new name is not automatically generated
-				delete(containers, e.ID())
+			if c, ok := containers[e.ID()]; ok {
+				c.registered = true
 			}
 			}
 		}
 		}
 	}
 	}
 
 
-	// Any containers that are left over do not exist in the graph
-	for _, container := range containers {
-		// Try to set the default name for a container if it exists prior to links
-		container.Name, err = daemon.generateNewName(container.ID)
-		if err != nil {
-			logrus.Debugf("Setting default id - %s", err)
-		}
+	group := sync.WaitGroup{}
+	for _, c := range containers {
+		group.Add(1)
 
 
-		if err := daemon.register(container, false); err != nil {
-			logrus.Debugf("Failed to register container %s: %s", container.ID, err)
-		}
+		go func(container *Container, registered bool) {
+			defer group.Done()
 
 
-		registeredContainers = append(registeredContainers, container)
-	}
+			if !registered {
+				// Try to set the default name for a container if it exists prior to links
+				container.Name, err = daemon.generateNewName(container.ID)
+				if err != nil {
+					logrus.Debugf("Setting default id - %s", err)
+				}
+			}
 
 
-	// check the restart policy on the containers and restart any container with
-	// the restart policy of "always"
-	if daemon.config.AutoRestart {
-		logrus.Debug("Restarting containers...")
+			if err := daemon.register(container, false); err != nil {
+				logrus.Debugf("Failed to register container %s: %s", container.ID, err)
+			}
 
 
-		for _, container := range registeredContainers {
-			if container.hostConfig.RestartPolicy.Name == "always" ||
-				(container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) {
+			// check the restart policy on the containers and restart any container with
+			// the restart policy of "always"
+			if daemon.config.AutoRestart && container.shouldRestart() {
 				logrus.Debugf("Starting container %s", container.ID)
 				logrus.Debugf("Starting container %s", container.ID)
 
 
 				if err := container.Start(); err != nil {
 				if err := container.Start(); err != nil {
 					logrus.Debugf("Failed to start container %s: %s", container.ID, err)
 					logrus.Debugf("Failed to start container %s: %s", container.ID, err)
 				}
 				}
 			}
 			}
-		}
+		}(c.container, c.registered)
 	}
 	}
+	group.Wait()
 
 
 	if !debug {
 	if !debug {
 		if logrus.GetLevel() == logrus.InfoLevel {
 		if logrus.GetLevel() == logrus.InfoLevel {
@@ -366,61 +355,6 @@ func (daemon *Daemon) restore() error {
 	return nil
 	return nil
 }
 }
 
 
-// set up the watch on the host's /etc/resolv.conf so that we can update container's
-// live resolv.conf when the network changes on the host
-func (daemon *Daemon) setupResolvconfWatcher() error {
-
-	watcher, err := fsnotify.NewWatcher()
-	if err != nil {
-		return err
-	}
-
-	//this goroutine listens for the events on the watch we add
-	//on the resolv.conf file on the host
-	go func() {
-		for {
-			select {
-			case event := <-watcher.Events:
-				if event.Name == "/etc/resolv.conf" &&
-					(event.Op&(fsnotify.Write|fsnotify.Create) != 0) {
-					// verify a real change happened before we go further--a file write may have happened
-					// without an actual change to the file
-					updatedResolvConf, newResolvConfHash, err := resolvconf.GetIfChanged()
-					if err != nil {
-						logrus.Debugf("Error retrieving updated host resolv.conf: %v", err)
-					} else if updatedResolvConf != nil {
-						// because the new host resolv.conf might have localhost nameservers..
-						updatedResolvConf, modified := resolvconf.FilterResolvDns(updatedResolvConf, daemon.config.Bridge.EnableIPv6)
-						if modified {
-							// changes have occurred during localhost cleanup: generate an updated hash
-							newHash, err := ioutils.HashData(bytes.NewReader(updatedResolvConf))
-							if err != nil {
-								logrus.Debugf("Error generating hash of new resolv.conf: %v", err)
-							} else {
-								newResolvConfHash = newHash
-							}
-						}
-						logrus.Debug("host network resolv.conf changed--walking container list for updates")
-						contList := daemon.containers.List()
-						for _, container := range contList {
-							if err := container.updateResolvConf(updatedResolvConf, newResolvConfHash); err != nil {
-								logrus.Debugf("Error on resolv.conf update check for container ID: %s: %v", container.ID, err)
-							}
-						}
-					}
-				}
-			case err := <-watcher.Errors:
-				logrus.Debugf("host resolv.conf notify error: %v", err)
-			}
-		}
-	}()
-
-	if err := watcher.Add("/etc"); err != nil {
-		return err
-	}
-	return nil
-}
-
 func (daemon *Daemon) checkDeprecatedExpose(config *runconfig.Config) bool {
 func (daemon *Daemon) checkDeprecatedExpose(config *runconfig.Config) bool {
 	if config != nil {
 	if config != nil {
 		if config.PortSpecs != nil {
 		if config.PortSpecs != nil {
@@ -593,22 +527,25 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID
 	entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd)
 	entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd)
 
 
 	container := &Container{
 	container := &Container{
-		// FIXME: we should generate the ID here instead of receiving it as an argument
-		ID:              id,
-		Created:         time.Now().UTC(),
-		Path:            entrypoint,
-		Args:            args, //FIXME: de-duplicate from config
-		Config:          config,
-		hostConfig:      &runconfig.HostConfig{},
-		ImageID:         imgID,
-		NetworkSettings: &network.Settings{},
-		Name:            name,
-		Driver:          daemon.driver.String(),
-		ExecDriver:      daemon.execDriver.Name(),
-		State:           NewState(),
-		execCommands:    newExecStore(),
+		CommonContainer: CommonContainer{
+			ID:              id, // FIXME: we should generate the ID here instead of receiving it as an argument
+			Created:         time.Now().UTC(),
+			Path:            entrypoint,
+			Args:            args, //FIXME: de-duplicate from config
+			Config:          config,
+			hostConfig:      &runconfig.HostConfig{},
+			ImageID:         imgID,
+			NetworkSettings: &network.Settings{},
+			Name:            name,
+			Driver:          daemon.driver.String(),
+			ExecDriver:      daemon.execDriver.Name(),
+			State:           NewState(),
+			execCommands:    newExecStore(),
+			MountPoints:     map[string]*mountPoint{},
+		},
 	}
 	}
 	container.root = daemon.containerRoot(container.ID)
 	container.root = daemon.containerRoot(container.ID)
+
 	return container, err
 	return container, err
 }
 }
 
 
@@ -707,14 +644,14 @@ func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error
 func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error {
 func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error {
 	if hostConfig != nil && hostConfig.Links != nil {
 	if hostConfig != nil && hostConfig.Links != nil {
 		for _, l := range hostConfig.Links {
 		for _, l := range hostConfig.Links {
-			parts, err := parsers.PartParser("name:alias", l)
+			name, alias, err := parsers.ParseLink(l)
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
-			child, err := daemon.Get(parts["name"])
+			child, err := daemon.Get(name)
 			if err != nil {
 			if err != nil {
 				//An error from daemon.Get() means this name could not be found
 				//An error from daemon.Get() means this name could not be found
-				return fmt.Errorf("Could not get container for %s", parts["name"])
+				return fmt.Errorf("Could not get container for %s", name)
 			}
 			}
 			for child.hostConfig.NetworkMode.IsContainer() {
 			for child.hostConfig.NetworkMode.IsContainer() {
 				parts := strings.SplitN(string(child.hostConfig.NetworkMode), ":", 2)
 				parts := strings.SplitN(string(child.hostConfig.NetworkMode), ":", 2)
@@ -726,7 +663,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
 			if child.hostConfig.NetworkMode.IsHost() {
 			if child.hostConfig.NetworkMode.IsHost() {
 				return runconfig.ErrConflictHostNetworkAndLinks
 				return runconfig.ErrConflictHostNetworkAndLinks
 			}
 			}
-			if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil {
+			if err := daemon.RegisterLink(container, child, alias); err != nil {
 				return err
 				return err
 			}
 			}
 		}
 		}
@@ -741,44 +678,19 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
 	return nil
 	return nil
 }
 }
 
 
-// FIXME: harmonize with NewGraph()
-func NewDaemon(config *Config, eng *engine.Engine, registryService *registry.Service) (*Daemon, error) {
-	daemon, err := NewDaemonFromDirectory(config, eng, registryService)
-	if err != nil {
-		return nil, err
-	}
-	return daemon, nil
-}
-
-func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService *registry.Service) (*Daemon, error) {
-	if config.Mtu == 0 {
-		config.Mtu = getDefaultNetworkMtu()
-	}
+func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) {
 	// Check for mutually incompatible config options
 	// Check for mutually incompatible config options
 	if config.Bridge.Iface != "" && config.Bridge.IP != "" {
 	if config.Bridge.Iface != "" && config.Bridge.IP != "" {
 		return nil, fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one.")
 		return nil, fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one.")
 	}
 	}
-	if !config.Bridge.EnableIptables && !config.Bridge.InterContainerCommunication {
+	if !config.Bridge.EnableIPTables && !config.Bridge.InterContainerCommunication {
 		return nil, fmt.Errorf("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.")
 		return nil, fmt.Errorf("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.")
 	}
 	}
-	if !config.Bridge.EnableIptables && config.Bridge.EnableIpMasq {
-		config.Bridge.EnableIpMasq = false
+	if !config.Bridge.EnableIPTables && config.Bridge.EnableIPMasq {
+		config.Bridge.EnableIPMasq = false
 	}
 	}
 	config.DisableNetwork = config.Bridge.Iface == disableNetworkBridge
 	config.DisableNetwork = config.Bridge.Iface == disableNetworkBridge
 
 
-	// Claim the pidfile first, to avoid any and all unexpected race conditions.
-	// Some of the init doesn't need a pidfile lock - but let's not try to be smart.
-	if config.Pidfile != "" {
-		file, err := pidfile.New(config.Pidfile)
-		if err != nil {
-			return nil, err
-		}
-		eng.OnShutdown(func() {
-			// Always release the pidfile last, just in case
-			file.Remove()
-		})
-	}
-
 	// Check that the system is supported and we have sufficient privileges
 	// Check that the system is supported and we have sufficient privileges
 	if runtime.GOOS != "linux" {
 	if runtime.GOOS != "linux" {
 		return nil, fmt.Errorf("The Docker daemon is only supported on linux")
 		return nil, fmt.Errorf("The Docker daemon is only supported on linux")
@@ -790,6 +702,9 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService
 		return nil, err
 		return nil, err
 	}
 	}
 
 
+	// set up SIGUSR1 handler to dump Go routine stacks
+	setupSigusr1Trap()
+
 	// set up the tmpDir to use a canonical path
 	// set up the tmpDir to use a canonical path
 	tmp, err := tempDir(config.Root)
 	tmp, err := tempDir(config.Root)
 	if err != nil {
 	if err != nil {
@@ -826,17 +741,30 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService
 		return nil, fmt.Errorf("error initializing graphdriver: %v", err)
 		return nil, fmt.Errorf("error initializing graphdriver: %v", err)
 	}
 	}
 	logrus.Debugf("Using graph driver %s", driver)
 	logrus.Debugf("Using graph driver %s", driver)
-	// register cleanup for graph driver
-	eng.OnShutdown(func() {
-		if err := driver.Cleanup(); err != nil {
-			logrus.Errorf("Error during graph storage driver.Cleanup(): %v", err)
+
+	d := &Daemon{}
+	d.driver = driver
+
+	defer func() {
+		if err != nil {
+			if err := d.Shutdown(); err != nil {
+				logrus.Error(err)
+			}
+		}
+	}()
+
+	// Verify logging driver type
+	if config.LogConfig.Type != "none" {
+		if _, err := logger.GetLogDriver(config.LogConfig.Type); err != nil {
+			return nil, fmt.Errorf("error finding the logging driver: %v", err)
 		}
 		}
-	})
+	}
+	logrus.Debugf("Using default logging driver %s", config.LogConfig.Type)
 
 
 	if config.EnableSelinuxSupport {
 	if config.EnableSelinuxSupport {
 		if selinuxEnabled() {
 		if selinuxEnabled() {
 			// As Docker on btrfs and SELinux are incompatible at present, error on both being enabled
 			// As Docker on btrfs and SELinux are incompatible at present, error on both being enabled
-			if driver.String() == "btrfs" {
+			if d.driver.String() == "btrfs" {
 				return nil, fmt.Errorf("SELinux is not supported with the BTRFS graph driver")
 				return nil, fmt.Errorf("SELinux is not supported with the BTRFS graph driver")
 			}
 			}
 			logrus.Debug("SELinux enabled successfully")
 			logrus.Debug("SELinux enabled successfully")
@@ -854,25 +782,21 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService
 	}
 	}
 
 
 	// Migrate the container if it is aufs and aufs is enabled
 	// Migrate the container if it is aufs and aufs is enabled
-	if err = migrateIfAufs(driver, config.Root); err != nil {
+	if err := migrateIfAufs(d.driver, config.Root); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
 	logrus.Debug("Creating images graph")
 	logrus.Debug("Creating images graph")
-	g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver)
+	g, err := graph.NewGraph(path.Join(config.Root, "graph"), d.driver)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	volumesDriver, err := graphdriver.GetDriver("vfs", config.Root, config.GraphOptions)
-	if err != nil {
-		return nil, err
-	}
-
-	volumes, err := volumes.NewRepository(filepath.Join(config.Root, "volumes"), volumesDriver)
+	volumesDriver, err := local.New(filepath.Join(config.Root, defaultVolumesPathName))
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
+	volumedrivers.Register(volumesDriver, volumesDriver.Name())
 
 
 	trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
 	trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
 	if err != nil {
 	if err != nil {
@@ -897,14 +821,15 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService
 		Events:   eventsService,
 		Events:   eventsService,
 		Trust:    trustService,
 		Trust:    trustService,
 	}
 	}
-	repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), tagCfg)
+	repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+d.driver.String()), tagCfg)
 	if err != nil {
 	if err != nil {
 		return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
 		return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
 	}
 	}
 
 
 	if !config.DisableNetwork {
 	if !config.DisableNetwork {
-		if err := bridge.InitDriver(&config.Bridge); err != nil {
-			return nil, fmt.Errorf("Error initializing Bridge: %v", err)
+		d.netController, err = initNetworkController(config)
+		if err != nil {
+			return nil, fmt.Errorf("Error initializing network controller: %v", err)
 		}
 		}
 	}
 	}
 
 
@@ -913,12 +838,8 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	// register graph close on shutdown
-	eng.OnShutdown(func() {
-		if err := graph.Close(); err != nil {
-			logrus.Errorf("Error during container graph.Close(): %v", err)
-		}
-	})
+
+	d.containerGraph = graph
 
 
 	localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION))
 	localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION))
 	sysInitPath := utils.DockerInitPath(localCopy)
 	sysInitPath := utils.DockerInitPath(localCopy)
@@ -941,72 +862,157 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService
 	}
 	}
 
 
 	sysInfo := sysinfo.New(false)
 	sysInfo := sysinfo.New(false)
-	const runDir = "/var/run/docker"
-	ed, err := execdrivers.NewDriver(config.ExecDriver, config.ExecOptions, runDir, config.Root, sysInitPath, sysInfo)
+	ed, err := execdrivers.NewDriver(config.ExecDriver, config.ExecOptions, config.ExecRoot, config.Root, sysInitPath, sysInfo)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	daemon := &Daemon{
-		ID:               trustKey.PublicKey().KeyID(),
-		repository:       daemonRepo,
-		containers:       &contStore{s: make(map[string]*Container)},
-		execCommands:     newExecStore(),
-		graph:            g,
-		repositories:     repositories,
-		idIndex:          truncindex.NewTruncIndex([]string{}),
-		sysInfo:          sysInfo,
-		volumes:          volumes,
-		config:           config,
-		containerGraph:   graph,
-		driver:           driver,
-		sysInitPath:      sysInitPath,
-		execDriver:       ed,
-		eng:              eng,
-		statsCollector:   newStatsCollector(1 * time.Second),
-		defaultLogConfig: config.LogConfig,
-		RegistryService:  registryService,
-		EventsService:    eventsService,
-	}
-
-	eng.OnShutdown(func() {
-		if err := daemon.shutdown(); err != nil {
-			logrus.Errorf("Error during daemon.shutdown(): %v", err)
+	d.ID = trustKey.PublicKey().KeyID()
+	d.repository = daemonRepo
+	d.containers = &contStore{s: make(map[string]*Container)}
+	d.execCommands = newExecStore()
+	d.graph = g
+	d.repositories = repositories
+	d.idIndex = truncindex.NewTruncIndex([]string{})
+	d.sysInfo = sysInfo
+	d.config = config
+	d.sysInitPath = sysInitPath
+	d.execDriver = ed
+	d.statsCollector = newStatsCollector(1 * time.Second)
+	d.defaultLogConfig = config.LogConfig
+	d.RegistryService = registryService
+	d.EventsService = eventsService
+	d.root = config.Root
+
+	if err := d.restore(); err != nil {
+		return nil, err
+	}
+
+	return d, nil
+}
+
+func initNetworkController(config *Config) (libnetwork.NetworkController, error) {
+	controller, err := libnetwork.New()
+	if err != nil {
+		return nil, fmt.Errorf("error obtaining controller instance: %v", err)
+	}
+
+	// Initialize default driver "null"
+
+	if err := controller.ConfigureNetworkDriver("null", options.Generic{}); err != nil {
+		return nil, fmt.Errorf("Error initializing null driver: %v", err)
+	}
+
+	// Initialize default network on "null"
+	if _, err := controller.NewNetwork("null", "none"); err != nil {
+		return nil, fmt.Errorf("Error creating default \"null\" network: %v", err)
+	}
+
+	// Initialize default driver "host"
+	if err := controller.ConfigureNetworkDriver("host", options.Generic{}); err != nil {
+		return nil, fmt.Errorf("Error initializing host driver: %v", err)
+	}
+
+	// Initialize default network on "host"
+	if _, err := controller.NewNetwork("host", "host"); err != nil {
+		return nil, fmt.Errorf("Error creating default \"host\" network: %v", err)
+	}
+
+	// Initialize default driver "bridge"
+	option := options.Generic{
+		"EnableIPForwarding": config.Bridge.EnableIPForward}
+
+	if err := controller.ConfigureNetworkDriver("bridge", options.Generic{netlabel.GenericData: option}); err != nil {
+		return nil, fmt.Errorf("Error initializing bridge driver: %v", err)
+	}
+
+	netOption := options.Generic{
+		"BridgeName":          config.Bridge.Iface,
+		"Mtu":                 config.Mtu,
+		"EnableIPTables":      config.Bridge.EnableIPTables,
+		"EnableIPMasquerade":  config.Bridge.EnableIPMasq,
+		"EnableICC":           config.Bridge.InterContainerCommunication,
+		"EnableUserlandProxy": config.Bridge.EnableUserlandProxy,
+	}
+
+	if config.Bridge.IP != "" {
+		ip, bipNet, err := net.ParseCIDR(config.Bridge.IP)
+		if err != nil {
+			return nil, err
 		}
 		}
-	})
 
 
-	if err := daemon.restore(); err != nil {
-		return nil, err
+		bipNet.IP = ip
+		netOption["AddressIPv4"] = bipNet
 	}
 	}
 
 
-	// set up filesystem watch on resolv.conf for network changes
-	if err := daemon.setupResolvconfWatcher(); err != nil {
-		return nil, err
+	if config.Bridge.FixedCIDR != "" {
+		_, fCIDR, err := net.ParseCIDR(config.Bridge.FixedCIDR)
+		if err != nil {
+			return nil, err
+		}
+
+		netOption["FixedCIDR"] = fCIDR
 	}
 	}
 
 
-	return daemon, nil
+	if config.Bridge.FixedCIDRv6 != "" {
+		_, fCIDRv6, err := net.ParseCIDR(config.Bridge.FixedCIDRv6)
+		if err != nil {
+			return nil, err
+		}
+
+		netOption["FixedCIDRv6"] = fCIDRv6
+	}
+
+	// --ip processing
+	if config.Bridge.DefaultIP != nil {
+		netOption["DefaultBindingIP"] = config.Bridge.DefaultIP
+	}
+
+	// Initialize default network on "bridge" with the same name
+	_, err = controller.NewNetwork("bridge", "bridge",
+		libnetwork.NetworkOptionGeneric(options.Generic{
+			netlabel.GenericData: netOption,
+			netlabel.EnableIPv6:  config.Bridge.EnableIPv6,
+		}))
+	if err != nil {
+		return nil, fmt.Errorf("Error creating default \"bridge\" network: %v", err)
+	}
+
+	return controller, nil
 }
 }
 
 
-func (daemon *Daemon) shutdown() error {
-	group := sync.WaitGroup{}
-	logrus.Debug("starting clean shutdown of all containers...")
-	for _, container := range daemon.List() {
-		c := container
-		if c.IsRunning() {
-			logrus.Debugf("stopping %s", c.ID)
-			group.Add(1)
-
-			go func() {
-				defer group.Done()
-				if err := c.KillSig(15); err != nil {
-					logrus.Debugf("kill 15 error for %s - %s", c.ID, err)
-				}
-				c.WaitStop(-1 * time.Second)
-				logrus.Debugf("container stopped %s", c.ID)
-			}()
+func (daemon *Daemon) Shutdown() error {
+	if daemon.containerGraph != nil {
+		if err := daemon.containerGraph.Close(); err != nil {
+			logrus.Errorf("Error during container graph.Close(): %v", err)
 		}
 		}
 	}
 	}
-	group.Wait()
+	if daemon.driver != nil {
+		if err := daemon.driver.Cleanup(); err != nil {
+			logrus.Errorf("Error during graph storage driver.Cleanup(): %v", err)
+		}
+	}
+	if daemon.containers != nil {
+		group := sync.WaitGroup{}
+		logrus.Debug("starting clean shutdown of all containers...")
+		for _, container := range daemon.List() {
+			c := container
+			if c.IsRunning() {
+				logrus.Debugf("stopping %s", c.ID)
+				group.Add(1)
+
+				go func() {
+					defer group.Done()
+					if err := c.KillSig(15); err != nil {
+						logrus.Debugf("kill 15 error for %s - %s", c.ID, err)
+					}
+					c.WaitStop(-1 * time.Second)
+					logrus.Debugf("container stopped %s", c.ID)
+				}()
+			}
+		}
+		group.Wait()
+	}
 
 
 	return nil
 	return nil
 }
 }
@@ -1045,22 +1051,6 @@ func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback e
 	return daemon.execDriver.Run(c.command, pipes, startCallback)
 	return daemon.execDriver.Run(c.command, pipes, startCallback)
 }
 }
 
 
-func (daemon *Daemon) Pause(c *Container) error {
-	if err := daemon.execDriver.Pause(c.command); err != nil {
-		return err
-	}
-	c.SetPaused()
-	return nil
-}
-
-func (daemon *Daemon) Unpause(c *Container) error {
-	if err := daemon.execDriver.Unpause(c.command); err != nil {
-		return err
-	}
-	c.SetUnpaused()
-	return nil
-}
-
 func (daemon *Daemon) Kill(c *Container, sig int) error {
 func (daemon *Daemon) Kill(c *Container, sig int) error {
 	return daemon.execDriver.Kill(c.command, sig)
 	return daemon.execDriver.Kill(c.command, sig)
 }
 }
@@ -1087,26 +1077,6 @@ func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface
 	return nil
 	return nil
 }
 }
 
 
-// Nuke kills all containers then removes all content
-// from the content root, including images, volumes and
-// container filesystems.
-// Again: this will remove your entire docker daemon!
-// FIXME: this is deprecated, and only used in legacy
-// tests. Please remove.
-func (daemon *Daemon) Nuke() error {
-	var wg sync.WaitGroup
-	for _, container := range daemon.List() {
-		wg.Add(1)
-		go func(c *Container) {
-			c.Kill()
-			wg.Done()
-		}(container)
-	}
-	wg.Wait()
-
-	return os.RemoveAll(daemon.config.Root)
-}
-
 // FIXME: this is a convenience function for integration tests
 // FIXME: this is a convenience function for integration tests
 // which need direct access to daemon.graph.
 // which need direct access to daemon.graph.
 // Once the tests switch to using engine and jobs, this method
 // Once the tests switch to using engine and jobs, this method
@@ -1190,14 +1160,14 @@ func checkKernel() error {
 	// test for specific functionalities.
 	// test for specific functionalities.
 	// Unfortunately we can't test for the feature "does not cause a kernel panic"
 	// Unfortunately we can't test for the feature "does not cause a kernel panic"
 	// without actually causing a kernel panic, so we need this workaround until
 	// without actually causing a kernel panic, so we need this workaround until
-	// the circumstances of pre-3.8 crashes are clearer.
+	// the circumstances of pre-3.10 crashes are clearer.
 	// For details see https://github.com/docker/docker/issues/407
 	// For details see https://github.com/docker/docker/issues/407
 	if k, err := kernel.GetKernelVersion(); err != nil {
 	if k, err := kernel.GetKernelVersion(); err != nil {
 		logrus.Warnf("%s", err)
 		logrus.Warnf("%s", err)
 	} else {
 	} else {
-		if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
+		if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 10, Minor: 0}) < 0 {
 			if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
 			if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
-				logrus.Warnf("You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
+				logrus.Warnf("You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.10.0.", k.String())
 			}
 			}
 		}
 		}
 	}
 	}
@@ -1231,15 +1201,30 @@ func (daemon *Daemon) verifyHostConfig(hostConfig *runconfig.HostConfig) ([]stri
 	if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 {
 	if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 {
 		return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.")
 		return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.")
 	}
 	}
+	if hostConfig.CpuPeriod > 0 && !daemon.SystemConfig().CpuCfsPeriod {
+		warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.")
+		hostConfig.CpuPeriod = 0
+	}
 	if hostConfig.CpuQuota > 0 && !daemon.SystemConfig().CpuCfsQuota {
 	if hostConfig.CpuQuota > 0 && !daemon.SystemConfig().CpuCfsQuota {
 		warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.")
 		warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.")
 		hostConfig.CpuQuota = 0
 		hostConfig.CpuQuota = 0
 	}
 	}
+	if hostConfig.BlkioWeight > 0 && (hostConfig.BlkioWeight < 10 || hostConfig.BlkioWeight > 1000) {
+		return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000.")
+	}
+	if hostConfig.OomKillDisable && !daemon.SystemConfig().OomKillDisable {
+		hostConfig.OomKillDisable = false
+		return warnings, fmt.Errorf("Your kernel does not support oom kill disable.")
+	}
 
 
 	return warnings, nil
 	return warnings, nil
 }
 }
 
 
 func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
 func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
+	if err := daemon.registerMountPoints(container, hostConfig); err != nil {
+		return err
+	}
+
 	container.Lock()
 	container.Lock()
 	defer container.Unlock()
 	defer container.Unlock()
 	if err := parseSecurityOpt(container, hostConfig); err != nil {
 	if err := parseSecurityOpt(container, hostConfig); err != nil {
@@ -1253,6 +1238,5 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.
 
 
 	container.hostConfig = hostConfig
 	container.hostConfig = hostConfig
 	container.toDisk()
 	container.toDisk()
-
 	return nil
 	return nil
 }
 }

+ 118 - 12
daemon/daemon_test.go

@@ -1,11 +1,17 @@
 package daemon
 package daemon
 
 
 import (
 import (
-	"github.com/docker/docker/pkg/graphdb"
-	"github.com/docker/docker/pkg/truncindex"
+	"fmt"
+	"io/ioutil"
 	"os"
 	"os"
 	"path"
 	"path"
+	"path/filepath"
 	"testing"
 	"testing"
+
+	"github.com/docker/docker/pkg/graphdb"
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/pkg/truncindex"
+	"github.com/docker/docker/volume"
 )
 )
 
 
 //
 //
@@ -14,24 +20,38 @@ import (
 
 
 func TestGet(t *testing.T) {
 func TestGet(t *testing.T) {
 	c1 := &Container{
 	c1 := &Container{
-		ID:   "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
-		Name: "tender_bardeen",
+		CommonContainer: CommonContainer{
+			ID:   "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
+			Name: "tender_bardeen",
+		},
 	}
 	}
+
 	c2 := &Container{
 	c2 := &Container{
-		ID:   "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de",
-		Name: "drunk_hawking",
+		CommonContainer: CommonContainer{
+			ID:   "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de",
+			Name: "drunk_hawking",
+		},
 	}
 	}
+
 	c3 := &Container{
 	c3 := &Container{
-		ID:   "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57",
-		Name: "3cdbd1aa",
+		CommonContainer: CommonContainer{
+			ID:   "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57",
+			Name: "3cdbd1aa",
+		},
 	}
 	}
+
 	c4 := &Container{
 	c4 := &Container{
-		ID:   "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5",
-		Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
+		CommonContainer: CommonContainer{
+			ID:   "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5",
+			Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
+		},
 	}
 	}
+
 	c5 := &Container{
 	c5 := &Container{
-		ID:   "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b",
-		Name: "d22d69a2b896",
+		CommonContainer: CommonContainer{
+			ID:   "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b",
+			Name: "d22d69a2b896",
+		},
 	}
 	}
 
 
 	store := &contStore{
 	store := &contStore{
@@ -99,3 +119,89 @@ func TestGet(t *testing.T) {
 
 
 	os.Remove(daemonTestDbPath)
 	os.Remove(daemonTestDbPath)
 }
 }
+
+func TestLoadWithVolume(t *testing.T) {
+	tmp, err := ioutil.TempDir("", "docker-daemon-test-")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmp)
+
+	containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
+	containerPath := filepath.Join(tmp, containerId)
+	if err = os.MkdirAll(containerPath, 0755); err != nil {
+		t.Fatal(err)
+	}
+
+	hostVolumeId := stringid.GenerateRandomID()
+	volumePath := filepath.Join(tmp, "vfs", "dir", hostVolumeId)
+
+	config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0,
+"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"},
+"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top",
+"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"",
+"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true,
+"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null,
+"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95",
+"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1",
+"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","PortMapping":null,"Ports":{}},
+"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf",
+"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname",
+"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts",
+"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log",
+"Name":"/ubuntu","Driver":"aufs","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0,
+"UpdateDns":false,"Volumes":{"/vol1":"%s"},"VolumesRW":{"/vol1":true},"AppliedVolumesFrom":null}`
+
+	cfg := fmt.Sprintf(config, volumePath)
+	if err = ioutil.WriteFile(filepath.Join(containerPath, "config.json"), []byte(cfg), 0644); err != nil {
+		t.Fatal(err)
+	}
+
+	hostConfig := `{"Binds":[],"ContainerIDFile":"","LxcConf":[],"Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"",
+"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null,
+"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0},
+"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}`
+	if err = ioutil.WriteFile(filepath.Join(containerPath, "hostconfig.json"), []byte(hostConfig), 0644); err != nil {
+		t.Fatal(err)
+	}
+
+	if err = os.MkdirAll(volumePath, 0755); err != nil {
+		t.Fatal(err)
+	}
+
+	daemon := &Daemon{
+		repository: tmp,
+		root:       tmp,
+	}
+
+	c, err := daemon.load(containerId)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = daemon.verifyOldVolumesInfo(c)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(c.MountPoints) != 1 {
+		t.Fatalf("Expected 1 volume mounted, was 0\n")
+	}
+
+	m := c.MountPoints["/vol1"]
+	if m.Name != hostVolumeId {
+		t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeId, m.Name)
+	}
+
+	if m.Destination != "/vol1" {
+		t.Fatalf("Expected mount destination /vol1, was %s\n", m.Destination)
+	}
+
+	if !m.RW {
+		t.Fatalf("Expected mount point to be RW but it was not\n")
+	}
+
+	if m.Driver != volume.DefaultDriverName {
+		t.Fatalf("Expected mount driver local, was %s\n", m.Driver)
+	}
+}

+ 7 - 0
daemon/daemon_zfs.go

@@ -0,0 +1,7 @@
+// +build !exclude_graphdriver_zfs,linux
+
+package daemon
+
+import (
+	_ "github.com/docker/docker/daemon/graphdriver/zfs"
+)

+ 21 - 0
daemon/debugtrap.go

@@ -0,0 +1,21 @@
+// +build !windows
+
+package daemon
+
+import (
+	"os"
+	"os/signal"
+	"syscall"
+
+	psignal "github.com/docker/docker/pkg/signal"
+)
+
+func setupSigusr1Trap() {
+	c := make(chan os.Signal, 1)
+	signal.Notify(c, syscall.SIGUSR1)
+	go func() {
+		for range c {
+			psignal.DumpStacks()
+		}
+	}()
+}

+ 7 - 0
daemon/debugtrap_unsupported.go

@@ -0,0 +1,7 @@
+// +build !linux,!darwin,!freebsd
+
+package daemon
+
+func setupSigusr1Trap() {
+	return
+}

+ 26 - 51
daemon/delete.go

@@ -22,8 +22,6 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
 		name, err := GetFullContainerName(name)
 		name, err := GetFullContainerName(name)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
-			// TODO: why was just job.Error(err) without return if the function cannot continue w/o container name?
-			//job.Error(err)
 		}
 		}
 		parent, n := path.Split(name)
 		parent, n := path.Split(name)
 		if parent == "/" {
 		if parent == "/" {
@@ -35,68 +33,42 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
 		}
 		}
 		parentContainer, _ := daemon.Get(pe.ID())
 		parentContainer, _ := daemon.Get(pe.ID())
 
 
-		if parentContainer != nil {
-			parentContainer.DisableLink(n)
-		}
-
 		if err := daemon.ContainerGraph().Delete(name); err != nil {
 		if err := daemon.ContainerGraph().Delete(name); err != nil {
 			return err
 			return err
 		}
 		}
-		return nil
-	}
 
 
-	if container != nil {
-		// stop collection of stats for the container regardless
-		// if stats are currently getting collected.
-		daemon.statsCollector.stopCollection(container)
-		if container.IsRunning() {
-			if config.ForceRemove {
-				if err := container.Kill(); err != nil {
-					return fmt.Errorf("Could not kill running container, cannot remove - %v", err)
-				}
-			} else {
-				return fmt.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f")
-			}
+		if parentContainer != nil {
+			parentContainer.DisableLink(n)
 		}
 		}
 
 
-		if config.ForceRemove {
-			if err := daemon.ForceRm(container); err != nil {
-				logrus.Errorf("Cannot destroy container %s: %v", name, err)
-			}
-		} else {
-			if err := daemon.Rm(container); err != nil {
-				return fmt.Errorf("Cannot destroy container %s: %v", name, err)
-			}
-		}
-		container.LogEvent("destroy")
-		if config.RemoveVolume {
-			daemon.DeleteVolumes(container.VolumePaths())
-		}
+		return nil
 	}
 	}
-	return nil
-}
 
 
-func (daemon *Daemon) DeleteVolumes(volumeIDs map[string]struct{}) {
-	for id := range volumeIDs {
-		if err := daemon.volumes.Delete(id); err != nil {
-			logrus.Infof("%s", err)
-			continue
-		}
+	if err := daemon.rm(container, config.ForceRemove); err != nil {
+		return fmt.Errorf("Cannot destroy container %s: %v", name, err)
 	}
 	}
-}
 
 
-func (daemon *Daemon) Rm(container *Container) (err error) {
-	return daemon.commonRm(container, false)
-}
+	container.LogEvent("destroy")
 
 
-func (daemon *Daemon) ForceRm(container *Container) (err error) {
-	return daemon.commonRm(container, true)
+	if config.RemoveVolume {
+		container.removeMountPoints()
+	}
+	return nil
 }
 }
 
 
 // Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
 // Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
-func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err error) {
-	if container == nil {
-		return fmt.Errorf("The given container is <nil>")
+func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
+	// stop collection of stats for the container regardless
+	// if stats are currently getting collected.
+	daemon.statsCollector.stopCollection(container)
+
+	if container.IsRunning() {
+		if !forceRemove {
+			return fmt.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f")
+		}
+		if err := container.Kill(); err != nil {
+			return fmt.Errorf("Could not kill running container, cannot remove - %v", err)
+		}
 	}
 	}
 
 
 	element := daemon.containers.Get(container.ID)
 	element := daemon.containers.Get(container.ID)
@@ -133,7 +105,6 @@ func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err erro
 		}
 		}
 	}()
 	}()
 
 
-	container.derefVolumes()
 	if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
 	if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
 		logrus.Debugf("Unable to remove container from link graph: %s", err)
 		logrus.Debugf("Unable to remove container from link graph: %s", err)
 	}
 	}
@@ -161,3 +132,7 @@ func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err erro
 
 
 	return nil
 	return nil
 }
 }
+
+func (daemon *Daemon) DeleteVolumes(c *Container) error {
+	return c.removeMountPoints()
+}

+ 3 - 74
daemon/exec.go

@@ -9,10 +9,8 @@ import (
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver"
-	"github.com/docker/docker/daemon/execdriver/lxc"
 	"github.com/docker/docker/pkg/broadcastwriter"
 	"github.com/docker/docker/pkg/broadcastwriter"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/ioutils"
-	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
 )
 )
@@ -112,8 +110,9 @@ func (d *Daemon) getActiveContainer(name string) (*Container, error) {
 
 
 func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) {
 func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) {
 
 
-	if strings.HasPrefix(d.execDriver.Name(), lxc.DriverName) {
-		return "", lxc.ErrExec
+	// Not all drivers support Exec (LXC for example)
+	if err := checkExecSupport(d.execDriver.Name()); err != nil {
+		return "", err
 	}
 	}
 
 
 	container, err := d.getActiveContainer(config.Container)
 	container, err := d.getActiveContainer(config.Container)
@@ -129,7 +128,6 @@ func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, erro
 		Entrypoint: entrypoint,
 		Entrypoint: entrypoint,
 		Arguments:  args,
 		Arguments:  args,
 		User:       config.User,
 		User:       config.User,
-		Privileged: config.Privileged,
 	}
 	}
 
 
 	execConfig := &execConfig{
 	execConfig := &execConfig{
@@ -245,72 +243,3 @@ func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pi
 
 
 	return exitStatus, err
 	return exitStatus, err
 }
 }
-
-func (container *Container) GetExecIDs() []string {
-	return container.execCommands.List()
-}
-
-func (container *Container) Exec(execConfig *execConfig) error {
-	container.Lock()
-	defer container.Unlock()
-
-	waitStart := make(chan struct{})
-
-	callback := func(processConfig *execdriver.ProcessConfig, pid int) {
-		if processConfig.Tty {
-			// The callback is called after the process Start()
-			// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave
-			// which we close here.
-			if c, ok := processConfig.Stdout.(io.Closer); ok {
-				c.Close()
-			}
-		}
-		close(waitStart)
-	}
-
-	// We use a callback here instead of a goroutine and an chan for
-	// syncronization purposes
-	cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) })
-
-	// Exec should not return until the process is actually running
-	select {
-	case <-waitStart:
-	case err := <-cErr:
-		return err
-	}
-
-	return nil
-}
-
-func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error {
-	var (
-		err      error
-		exitCode int
-	)
-
-	pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin)
-	exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback)
-	if err != nil {
-		logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
-	}
-
-	logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)
-	if execConfig.OpenStdin {
-		if err := execConfig.StreamConfig.stdin.Close(); err != nil {
-			logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err)
-		}
-	}
-	if err := execConfig.StreamConfig.stdout.Clean(); err != nil {
-		logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err)
-	}
-	if err := execConfig.StreamConfig.stderr.Clean(); err != nil {
-		logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err)
-	}
-	if execConfig.ProcessConfig.Terminal != nil {
-		if err := execConfig.ProcessConfig.Terminal.Close(); err != nil {
-			logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
-		}
-	}
-
-	return err
-}

+ 18 - 0
daemon/exec_linux.go

@@ -0,0 +1,18 @@
+// +build linux
+
+package daemon
+
+import (
+	"strings"
+
+	"github.com/docker/docker/daemon/execdriver/lxc"
+)
+
+// checkExecSupport returns an error if the exec driver does not support exec,
+// or nil if it is supported.
+func checkExecSupport(drivername string) error {
+	if strings.HasPrefix(drivername, lxc.DriverName) {
+		return lxc.ErrExec
+	}
+	return nil
+}

+ 9 - 0
daemon/exec_windows.go

@@ -0,0 +1,9 @@
+// +build windows
+
+package daemon
+
+// checkExecSupport returns an error if the exec driver does not support exec,
+// or nil if it is supported.
+func checkExecSupport(DriverName string) error {
+	return nil
+}

+ 23 - 155
daemon/execdriver/driver.go

@@ -1,21 +1,14 @@
 package execdriver
 package execdriver
 
 
 import (
 import (
-	"encoding/json"
 	"errors"
 	"errors"
 	"io"
 	"io"
-	"io/ioutil"
-	"os"
 	"os/exec"
 	"os/exec"
-	"path/filepath"
-	"strconv"
-	"strings"
 	"time"
 	"time"
 
 
-	"github.com/docker/docker/daemon/execdriver/native/template"
+	// TODO Windows: Factor out ulimit
 	"github.com/docker/docker/pkg/ulimit"
 	"github.com/docker/docker/pkg/ulimit"
 	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer"
-	"github.com/docker/libcontainer/cgroups/fs"
 	"github.com/docker/libcontainer/configs"
 	"github.com/docker/libcontainer/configs"
 )
 )
 
 
@@ -79,6 +72,7 @@ type Network struct {
 	Interface      *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled
 	Interface      *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled
 	Mtu            int               `json:"mtu"`
 	Mtu            int               `json:"mtu"`
 	ContainerID    string            `json:"container_id"` // id of the container to join network.
 	ContainerID    string            `json:"container_id"` // id of the container to join network.
+	NamespacePath  string            `json:"namespace_path"`
 	HostNetworking bool              `json:"host_networking"`
 	HostNetworking bool              `json:"host_networking"`
 }
 }
 
 
@@ -93,6 +87,11 @@ type Pid struct {
 	HostPid bool `json:"host_pid"`
 	HostPid bool `json:"host_pid"`
 }
 }
 
 
+// UTS settings of the container
+type UTS struct {
+	HostUTS bool `json:"host_uts"`
+}
+
 type NetworkInterface struct {
 type NetworkInterface struct {
 	Gateway              string `json:"gateway"`
 	Gateway              string `json:"gateway"`
 	IPAddress            string `json:"ip"`
 	IPAddress            string `json:"ip"`
@@ -103,16 +102,21 @@ type NetworkInterface struct {
 	LinkLocalIPv6Address string `json:"link_local_ipv6"`
 	LinkLocalIPv6Address string `json:"link_local_ipv6"`
 	GlobalIPv6PrefixLen  int    `json:"global_ipv6_prefix_len"`
 	GlobalIPv6PrefixLen  int    `json:"global_ipv6_prefix_len"`
 	IPv6Gateway          string `json:"ipv6_gateway"`
 	IPv6Gateway          string `json:"ipv6_gateway"`
+	HairpinMode          bool   `json:"hairpin_mode"`
 }
 }
 
 
+// TODO Windows: Factor out ulimit.Rlimit
 type Resources struct {
 type Resources struct {
-	Memory     int64            `json:"memory"`
-	MemorySwap int64            `json:"memory_swap"`
-	CpuShares  int64            `json:"cpu_shares"`
-	CpusetCpus string           `json:"cpuset_cpus"`
-	CpusetMems string           `json:"cpuset_mems"`
-	CpuQuota   int64            `json:"cpu_quota"`
-	Rlimits    []*ulimit.Rlimit `json:"rlimits"`
+	Memory         int64            `json:"memory"`
+	MemorySwap     int64            `json:"memory_swap"`
+	CpuShares      int64            `json:"cpu_shares"`
+	CpusetCpus     string           `json:"cpuset_cpus"`
+	CpusetMems     string           `json:"cpuset_mems"`
+	CpuPeriod      int64            `json:"cpu_period"`
+	CpuQuota       int64            `json:"cpu_quota"`
+	BlkioWeight    int64            `json:"blkio_weight"`
+	Rlimits        []*ulimit.Rlimit `json:"rlimits"`
+	OomKillDisable bool             `json:"oom_kill_disable"`
 }
 }
 
 
 type ResourceStats struct {
 type ResourceStats struct {
@@ -143,6 +147,9 @@ type ProcessConfig struct {
 	Console    string   `json:"-"` // dev/console path
 	Console    string   `json:"-"` // dev/console path
 }
 }
 
 
+// TODO Windows: Factor out unused fields such as LxcConfig, AppArmorProfile,
+// and CgroupParent.
+//
 // Process wrapps an os/exec.Cmd to add more metadata
 // Process wrapps an os/exec.Cmd to add more metadata
 type Command struct {
 type Command struct {
 	ID                 string            `json:"id"`
 	ID                 string            `json:"id"`
@@ -154,6 +161,7 @@ type Command struct {
 	Network            *Network          `json:"network"`
 	Network            *Network          `json:"network"`
 	Ipc                *Ipc              `json:"ipc"`
 	Ipc                *Ipc              `json:"ipc"`
 	Pid                *Pid              `json:"pid"`
 	Pid                *Pid              `json:"pid"`
+	UTS                *UTS              `json:"uts"`
 	Resources          *Resources        `json:"resources"`
 	Resources          *Resources        `json:"resources"`
 	Mounts             []Mount           `json:"mounts"`
 	Mounts             []Mount           `json:"mounts"`
 	AllowedDevices     []*configs.Device `json:"allowed_devices"`
 	AllowedDevices     []*configs.Device `json:"allowed_devices"`
@@ -168,143 +176,3 @@ type Command struct {
 	AppArmorProfile    string            `json:"apparmor_profile"`
 	AppArmorProfile    string            `json:"apparmor_profile"`
 	CgroupParent       string            `json:"cgroup_parent"` // The parent cgroup for this command.
 	CgroupParent       string            `json:"cgroup_parent"` // The parent cgroup for this command.
 }
 }
-
-func InitContainer(c *Command) *configs.Config {
-	container := template.New()
-
-	container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env)
-	container.Cgroups.Name = c.ID
-	container.Cgroups.AllowedDevices = c.AllowedDevices
-	container.Devices = c.AutoCreatedDevices
-	container.Rootfs = c.Rootfs
-	container.Readonlyfs = c.ReadonlyRootfs
-
-	// check to see if we are running in ramdisk to disable pivot root
-	container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
-
-	// Default parent cgroup is "docker". Override if required.
-	if c.CgroupParent != "" {
-		container.Cgroups.Parent = c.CgroupParent
-	}
-	return container
-}
-
-func getEnv(key string, env []string) string {
-	for _, pair := range env {
-		parts := strings.Split(pair, "=")
-		if parts[0] == key {
-			return parts[1]
-		}
-	}
-	return ""
-}
-
-func SetupCgroups(container *configs.Config, c *Command) error {
-	if c.Resources != nil {
-		container.Cgroups.CpuShares = c.Resources.CpuShares
-		container.Cgroups.Memory = c.Resources.Memory
-		container.Cgroups.MemoryReservation = c.Resources.Memory
-		container.Cgroups.MemorySwap = c.Resources.MemorySwap
-		container.Cgroups.CpusetCpus = c.Resources.CpusetCpus
-		container.Cgroups.CpusetMems = c.Resources.CpusetMems
-		container.Cgroups.CpuQuota = c.Resources.CpuQuota
-	}
-
-	return nil
-}
-
-// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo.
-func getNetworkInterfaceStats(interfaceName string) (*libcontainer.NetworkInterface, error) {
-	out := &libcontainer.NetworkInterface{Name: interfaceName}
-	// This can happen if the network runtime information is missing - possible if the
-	// container was created by an old version of libcontainer.
-	if interfaceName == "" {
-		return out, nil
-	}
-	type netStatsPair struct {
-		// Where to write the output.
-		Out *uint64
-		// The network stats file to read.
-		File string
-	}
-	// Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
-	netStats := []netStatsPair{
-		{Out: &out.RxBytes, File: "tx_bytes"},
-		{Out: &out.RxPackets, File: "tx_packets"},
-		{Out: &out.RxErrors, File: "tx_errors"},
-		{Out: &out.RxDropped, File: "tx_dropped"},
-
-		{Out: &out.TxBytes, File: "rx_bytes"},
-		{Out: &out.TxPackets, File: "rx_packets"},
-		{Out: &out.TxErrors, File: "rx_errors"},
-		{Out: &out.TxDropped, File: "rx_dropped"},
-	}
-	for _, netStat := range netStats {
-		data, err := readSysfsNetworkStats(interfaceName, netStat.File)
-		if err != nil {
-			return nil, err
-		}
-		*(netStat.Out) = data
-	}
-	return out, nil
-}
-
-// Reads the specified statistics available under /sys/class/net/<EthInterface>/statistics
-func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) {
-	data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile))
-	if err != nil {
-		return 0, err
-	}
-	return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
-}
-
-func Stats(containerDir string, containerMemoryLimit int64, machineMemory int64) (*ResourceStats, error) {
-	f, err := os.Open(filepath.Join(containerDir, "state.json"))
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-
-	type network struct {
-		Type              string
-		HostInterfaceName string
-	}
-
-	state := struct {
-		CgroupPaths map[string]string `json:"cgroup_paths"`
-		Networks    []network
-	}{}
-
-	if err := json.NewDecoder(f).Decode(&state); err != nil {
-		return nil, err
-	}
-	now := time.Now()
-
-	mgr := fs.Manager{Paths: state.CgroupPaths}
-	cstats, err := mgr.GetStats()
-	if err != nil {
-		return nil, err
-	}
-	stats := &libcontainer.Stats{CgroupStats: cstats}
-	// if the container does not have any memory limit specified set the
-	// limit to the machines memory
-	memoryLimit := containerMemoryLimit
-	if memoryLimit == 0 {
-		memoryLimit = machineMemory
-	}
-	for _, iface := range state.Networks {
-		switch iface.Type {
-		case "veth":
-			istats, err := getNetworkInterfaceStats(iface.HostInterfaceName)
-			if err != nil {
-				return nil, err
-			}
-			stats.Interfaces = append(stats.Interfaces, istats)
-		}
-	}
-	return &ResourceStats{
-		Stats:       stats,
-		Read:        now,
-		MemoryLimit: memoryLimit,
-	}, nil
-}

+ 159 - 0
daemon/execdriver/driver_linux.go

@@ -0,0 +1,159 @@
+package execdriver
+
+import (
+	"encoding/json"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/docker/docker/daemon/execdriver/native/template"
+	"github.com/docker/libcontainer"
+	"github.com/docker/libcontainer/cgroups/fs"
+	"github.com/docker/libcontainer/configs"
+)
+
+func InitContainer(c *Command) *configs.Config {
+	container := template.New()
+
+	container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env)
+	container.Cgroups.Name = c.ID
+	container.Cgroups.AllowedDevices = c.AllowedDevices
+	container.Devices = c.AutoCreatedDevices
+	container.Rootfs = c.Rootfs
+	container.Readonlyfs = c.ReadonlyRootfs
+
+	// check to see if we are running in ramdisk to disable pivot root
+	container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
+
+	// Default parent cgroup is "docker". Override if required.
+	if c.CgroupParent != "" {
+		container.Cgroups.Parent = c.CgroupParent
+	}
+	return container
+}
+
+func getEnv(key string, env []string) string {
+	for _, pair := range env {
+		parts := strings.Split(pair, "=")
+		if parts[0] == key {
+			return parts[1]
+		}
+	}
+	return ""
+}
+
+func SetupCgroups(container *configs.Config, c *Command) error {
+	if c.Resources != nil {
+		container.Cgroups.CpuShares = c.Resources.CpuShares
+		container.Cgroups.Memory = c.Resources.Memory
+		container.Cgroups.MemoryReservation = c.Resources.Memory
+		container.Cgroups.MemorySwap = c.Resources.MemorySwap
+		container.Cgroups.CpusetCpus = c.Resources.CpusetCpus
+		container.Cgroups.CpusetMems = c.Resources.CpusetMems
+		container.Cgroups.CpuPeriod = c.Resources.CpuPeriod
+		container.Cgroups.CpuQuota = c.Resources.CpuQuota
+		container.Cgroups.BlkioWeight = c.Resources.BlkioWeight
+		container.Cgroups.OomKillDisable = c.Resources.OomKillDisable
+	}
+
+	return nil
+}
+
+// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo.
+func getNetworkInterfaceStats(interfaceName string) (*libcontainer.NetworkInterface, error) {
+	out := &libcontainer.NetworkInterface{Name: interfaceName}
+	// This can happen if the network runtime information is missing - possible if the
+	// container was created by an old version of libcontainer.
+	if interfaceName == "" {
+		return out, nil
+	}
+	type netStatsPair struct {
+		// Where to write the output.
+		Out *uint64
+		// The network stats file to read.
+		File string
+	}
+	// Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
+	netStats := []netStatsPair{
+		{Out: &out.RxBytes, File: "tx_bytes"},
+		{Out: &out.RxPackets, File: "tx_packets"},
+		{Out: &out.RxErrors, File: "tx_errors"},
+		{Out: &out.RxDropped, File: "tx_dropped"},
+
+		{Out: &out.TxBytes, File: "rx_bytes"},
+		{Out: &out.TxPackets, File: "rx_packets"},
+		{Out: &out.TxErrors, File: "rx_errors"},
+		{Out: &out.TxDropped, File: "rx_dropped"},
+	}
+	for _, netStat := range netStats {
+		data, err := readSysfsNetworkStats(interfaceName, netStat.File)
+		if err != nil {
+			return nil, err
+		}
+		*(netStat.Out) = data
+	}
+	return out, nil
+}
+
+// Reads the specified statistics available under /sys/class/net/<EthInterface>/statistics
+func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) {
+	data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile))
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+func Stats(containerDir string, containerMemoryLimit int64, machineMemory int64) (*ResourceStats, error) {
+	f, err := os.Open(filepath.Join(containerDir, "state.json"))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	type network struct {
+		Type              string
+		HostInterfaceName string
+	}
+
+	state := struct {
+		CgroupPaths map[string]string `json:"cgroup_paths"`
+		Networks    []network
+	}{}
+
+	if err := json.NewDecoder(f).Decode(&state); err != nil {
+		return nil, err
+	}
+	now := time.Now()
+
+	mgr := fs.Manager{Paths: state.CgroupPaths}
+	cstats, err := mgr.GetStats()
+	if err != nil {
+		return nil, err
+	}
+	stats := &libcontainer.Stats{CgroupStats: cstats}
+	// if the container does not have any memory limit specified set the
+	// limit to the machines memory
+	memoryLimit := containerMemoryLimit
+	if memoryLimit == 0 {
+		memoryLimit = machineMemory
+	}
+	for _, iface := range state.Networks {
+		switch iface.Type {
+		case "veth":
+			istats, err := getNetworkInterfaceStats(iface.HostInterfaceName)
+			if err != nil {
+				return nil, err
+			}
+			stats.Interfaces = append(stats.Interfaces, istats)
+		}
+	}
+	return &ResourceStats{
+		Stats:       stats,
+		Read:        now,
+		MemoryLimit: memoryLimit,
+	}, nil
+}

+ 2 - 0
daemon/execdriver/execdrivers/execdrivers.go → daemon/execdriver/execdrivers/execdrivers_linux.go

@@ -1,3 +1,5 @@
+// +build linux
+
 package execdrivers
 package execdrivers
 
 
 import (
 import (

Some files were not shown because too many files changed in this diff