瀏覽代碼

Merge origin/master into origin/release

Signed-off-by: Jessica Frazelle <jess@docker.com>

Docker-DCO-1.1-Signed-off-by: Jessica Frazelle <jess@docker.com> (github: jfrazelle)
Jessica Frazelle 10 年之前
父節點
當前提交
81b4691406
共有 100 個文件被更改,包括 3427 次插入1467 次删除
  1. 0 14
      .drone.yml
  2. 2 0
      .gitignore
  3. 1 1
      .mailmap
  4. 1 1
      AUTHORS
  5. 187 156
      CONTRIBUTING.md
  6. 20 15
      Dockerfile
  7. 34 0
      Dockerfile.simple
  8. 57 6
      MAINTAINERS
  9. 9 6
      Makefile
  10. 1 1
      NOTICE
  11. 11 7
      README.md
  12. 1 1
      VERSION
  13. 0 2
      api/MAINTAINERS
  14. 10 13
      api/client/cli.go
  15. 282 113
      api/client/commands.go
  16. 1 1
      api/client/hijack.go
  17. 1 1
      api/client/utils.go
  18. 66 9
      api/common.go
  19. 0 2
      api/server/MAINTAINERS
  20. 84 122
      api/server/server.go
  21. 103 0
      api/server/server_linux.go
  22. 31 0
      api/server/server_windows.go
  23. 1 1
      api/types/stats.go
  24. 11 0
      api/types/types.go
  25. 0 3
      builder/MAINTAINERS
  26. 39 0
      builder/command/command.go
  27. 59 8
      builder/dispatchers.go
  28. 74 29
      builder/evaluator.go
  29. 73 32
      builder/internals.go
  30. 79 4
      builder/job.go
  31. 28 9
      builder/parser/line_parsers.go
  32. 18 20
      builder/parser/parser.go
  33. 11 11
      builder/parser/parser_test.go
  34. 0 8
      builder/parser/testfiles-negative/empty-instruction/Dockerfile
  35. 0 2
      builder/parser/testfiles-negative/html-page-yes-really-thanks-lk4d4/Dockerfile
  36. 8 6
      builder/parser/utils.go
  37. 1 1
      builtins/builtins.go
  38. 0 0
      contrib/REVIEWERS
  39. 9 4
      contrib/check-config.sh
  40. 0 0
      contrib/completion/REVIEWERS
  41. 301 96
      contrib/completion/bash/docker
  42. 4 4
      contrib/completion/fish/docker.fish
  43. 3 0
      contrib/completion/zsh/REVIEWERS
  44. 44 23
      contrib/completion/zsh/_docker
  45. 104 0
      contrib/download-frozen-image.sh
  46. 4 0
      contrib/httpserver/Dockerfile
  47. 12 0
      contrib/httpserver/server.go
  48. 0 0
      contrib/init/systemd/REVIEWERS
  49. 1 0
      contrib/init/systemd/docker.service
  50. 9 0
      contrib/init/sysvinit-redhat/docker
  51. 0 0
      contrib/init/upstart/REVIEWERS
  52. 1 1
      contrib/mkimage-alpine.sh
  53. 1 1
      contrib/mkimage-arch.sh
  54. 13 3
      contrib/mkimage-yum.sh
  55. 57 26
      contrib/mkimage/debootstrap
  56. 0 0
      contrib/project-stats.sh
  57. 0 0
      contrib/report-issue.sh
  58. 1 0
      contrib/syntax/kate/Dockerfile.xml
  59. 1 1
      contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage
  60. 0 0
      contrib/syntax/textmate/REVIEWERS
  61. 1 1
      contrib/syntax/vim/syntax/dockerfile.vim
  62. 0 7
      daemon/MAINTAINERS
  63. 5 5
      daemon/attach.go
  64. 21 16
      daemon/changes.go
  65. 20 7
      daemon/commit.go
  66. 30 17
      daemon/config.go
  67. 129 42
      daemon/container.go
  68. 12 11
      daemon/copy.go
  69. 22 17
      daemon/create.go
  70. 133 94
      daemon/daemon.go
  71. 101 0
      daemon/daemon_test.go
  72. 6 7
      daemon/delete.go
  73. 7 8
      daemon/exec.go
  74. 0 2
      daemon/execdriver/MAINTAINERS
  75. 158 9
      daemon/execdriver/driver.go
  76. 0 2
      daemon/execdriver/lxc/MAINTAINERS
  77. 283 30
      daemon/execdriver/lxc/driver.go
  78. 1 5
      daemon/execdriver/lxc/lxc_init_linux.go
  79. 12 13
      daemon/execdriver/lxc/lxc_template.go
  80. 8 8
      daemon/execdriver/lxc/lxc_template_unit_test.go
  81. 116 82
      daemon/execdriver/native/create.go
  82. 168 147
      daemon/execdriver/native/driver.go
  83. 50 40
      daemon/execdriver/native/exec.go
  84. 2 16
      daemon/execdriver/native/info.go
  85. 15 30
      daemon/execdriver/native/init.go
  86. 56 7
      daemon/execdriver/native/template/default_template.go
  87. 17 24
      daemon/execdriver/native/utils.go
  88. 73 3
      daemon/execdriver/utils.go
  89. 17 14
      daemon/export.go
  90. 8 7
      daemon/graphdriver/aufs/aufs.go
  91. 1 1
      daemon/graphdriver/aufs/mount.go
  92. 0 1
      daemon/graphdriver/btrfs/MAINTAINERS
  93. 0 2
      daemon/graphdriver/devmapper/MAINTAINERS
  94. 5 1
      daemon/graphdriver/devmapper/README.md
  95. 65 21
      daemon/graphdriver/devmapper/deviceset.go
  96. 3 1
      daemon/graphdriver/devmapper/driver.go
  97. 2 1
      daemon/graphdriver/devmapper/mount.go
  98. 5 1
      daemon/graphdriver/driver.go
  99. 2 2
      daemon/graphdriver/graphtest/graphtest.go
  100. 4 1
      daemon/graphdriver/overlay/overlay.go

+ 0 - 14
.drone.yml

@@ -1,14 +0,0 @@
-image: dockercore/docker
-env:
-  - AUTO_GOPATH=1
-  - DOCKER_GRAPHDRIVER=vfs
-  - DOCKER_EXECDRIVER=native
-script:
-# Setup the DockerInDocker environment.
-  - hack/dind
-# Tests relying on StartWithBusybox make Drone time out.
-  - rm integration-cli/docker_cli_daemon_test.go
-  - rm integration-cli/docker_cli_exec_test.go 
-# Validate and test.
-  - hack/make.sh validate-dco validate-gofmt validate-toml
-  - hack/make.sh binary cross test-unit test-integration-cli test-integration test-docker-py

+ 2 - 0
.gitignore

@@ -29,3 +29,5 @@ docs/GIT_BRANCH
 docs/VERSION
 docs/VERSION
 docs/GITCOMMIT
 docs/GITCOMMIT
 docs/changed-files
 docs/changed-files
+autogen/
+.bashrc

+ 1 - 1
.mailmap

@@ -1,4 +1,4 @@
-# Generate AUTHORS: project/generate-authors.sh
+# Generate AUTHORS: hack/generate-authors.sh
 
 
 # Tip for finding duplicates (besides scanning the output of AUTHORS for name
 # Tip for finding duplicates (besides scanning the output of AUTHORS for name
 # duplicates that aren't also email duplicates): scan the output of:
 # duplicates that aren't also email duplicates): scan the output of:

+ 1 - 1
AUTHORS

@@ -1,5 +1,5 @@
 # This file lists all individuals having contributed content to the repository.
 # This file lists all individuals having contributed content to the repository.
-# For how it is generated, see `project/generate-authors.sh`.
+# For how it is generated, see `hack/generate-authors.sh`.
 
 
 Aanand Prasad <aanand.prasad@gmail.com>
 Aanand Prasad <aanand.prasad@gmail.com>
 Aaron Feng <aaron.feng@gmail.com>
 Aaron Feng <aaron.feng@gmail.com>

+ 187 - 156
CONTRIBUTING.md

@@ -1,70 +1,60 @@
 # Contributing to Docker
 # Contributing to Docker
 
 
-Want to hack on Docker? Awesome! Here are instructions to get you
-started. They are probably not perfect, please let us know if anything
-feels wrong or incomplete.
+Want to hack on Docker? Awesome!  We have a contributor's guide that explains
+[setting up a Docker development environment and the contribution
+process](https://docs.docker.com/project/who-written-for/). 
+
+![Contributors guide](docs/sources/static_files/contributors.png)
+
+This page contains information about reporting issues as well as some tips and
+guidelines useful to experienced open source contributors. Finally, make sure
+you read our [community guidelines](#docker-community-guidelines) before you
+start participating.
 
 
 ## Topics
 ## Topics
 
 
 * [Reporting Security Issues](#reporting-security-issues)
 * [Reporting Security Issues](#reporting-security-issues)
 * [Design and Cleanup Proposals](#design-and-cleanup-proposals)
 * [Design and Cleanup Proposals](#design-and-cleanup-proposals)
-* [Reporting Issues](#reporting-issues)
-* [Build Environment](#build-environment)
-* [Contribution Guidelines](#contribution-guidelines)
+* [Reporting Issues](#reporting-other-issues)
+* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines)
 * [Community Guidelines](#docker-community-guidelines)
 * [Community Guidelines](#docker-community-guidelines)
 
 
-## Reporting Security Issues
-
-The Docker maintainers take security very seriously. If you discover a security issue,
-please bring it to their attention right away!
-
-Please send your report privately to [security@docker.com](mailto:security@docker.com),
-please **DO NOT** file a public issue.
-
-Security reports are greatly appreciated and we will publicly thank you for it. We also
-like to send gifts - if you're into Docker shwag make sure to let us know :)
-We currently do not offer a paid security bounty program, but are not ruling it out in
-the future.
+## Reporting security issues
 
 
-## Design and Cleanup Proposals
+The Docker maintainers take security seriously. If you discover a security
+issue, please bring it to their attention right away!
 
 
-When considering a design proposal, we are looking for:
+Please **DO NOT** file a public issue, instead send your report privately to
+[security@docker.com](mailto:security@docker.com), 
 
 
-* A description of the problem this design proposal solves
-* A pull request, not an issue, that modifies the documentation describing
-  the feature you are proposing, adding new documentation if necessary.
-  * Please prefix your issue with `Proposal:` in the title
-* Please review [the existing Proposals](https://github.com/docker/docker/pulls?q=is%3Aopen+is%3Apr+label%3AProposal)
-  before reporting a new one. You can always pair with someone if you both
-  have the same idea.
+Security reports are greatly appreciated and we will publicly thank you for it.
+We also like to send gifts&mdash;if you're into Docker schwag make sure to let
+us know We currently do not offer a paid security bounty program, but are not
+ruling it out in the future.
 
 
-When considering a cleanup task, we are looking for:
 
 
-* A description of the refactors made
-  * Please note any logic changes if necessary
-* A pull request with the code
-  * Please prefix your PR's title with `Cleanup:` so we can quickly address it.
-  * Your pull request must remain up to date with master, so rebase as necessary.
-
-## Reporting Issues
+## Reporting other issues
 
 
 A great way to contribute to the project is to send a detailed report when you
 A great way to contribute to the project is to send a detailed report when you
 encounter an issue. We always appreciate a well-written, thorough bug report,
 encounter an issue. We always appreciate a well-written, thorough bug report,
 and will thank you for it!
 and will thank you for it!
 
 
-When reporting [issues](https://github.com/docker/docker/issues) on
-GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc).
-Please include:
+Check that [our issue database](https://github.com/docker/docker/issues)
+doesn't already include that problem or suggestion before submitting an issue.
+If you find a match, add a quick "+1" or "I have this problem too." Doing this
+helps prioritize the most common problems and requests.
+
+When reporting issues, please include your host OS (Ubuntu 12.04, Fedora 19,
+etc). Please include:
 
 
 * The output of `uname -a`.
 * The output of `uname -a`.
 * The output of `docker version`.
 * The output of `docker version`.
 * The output of `docker -D info`.
 * The output of `docker -D info`.
 
 
-Please also include the steps required to reproduce the problem if
-possible and applicable.  This information will help us review and fix
-your issue faster.
+Please also include the steps required to reproduce the problem if possible and
+applicable. This information will help us review and fix your issue faster.
 
 
-### Template
+**Issue Report Template**:
 
 
 ```
 ```
 Description of problem:
 Description of problem:
@@ -103,123 +93,165 @@ Additional info:
 
 
 ```
 ```
 
 
-## Build Environment
-
-For instructions on setting up your development environment, please
-see our dedicated [dev environment setup
-docs](http://docs.docker.com/contributing/devenvironment/).
-
-## Contribution guidelines
 
 
-### Pull requests are always welcome
+##Quick contribution tips and guidelines
+
+This section gives the experienced contributor some tips and guidelines.
+
+###Pull requests are always welcome
+
+Not sure if that typo is worth a pull request? Found a bug and know how to fix
+it? Do it! We will appreciate it. Any significant improvement should be
+documented as [a GitHub issue](https://github.com/docker/docker/issues) before
+anybody starts working on it.
+
+We are always thrilled to receive pull requests. We do our best to process them
+quickly. If your pull request is not accepted on the first try,
+don't get discouraged! Our contributor's guide explains [the review process we
+use for simple changes](https://docs.docker.com/project/make-a-contribution/).
+
+### Design and cleanup proposals
+
+You can propose new designs for existing Docker features. You can also design
+entirely new features. We really appreciate contributors who want to refactor or
+otherwise cleanup our project. For information on making these types of
+contributions, see [the advanced contribution
+section](https://docs.docker.com/project/advanced-contributing/) in the
+contributors guide.
+
+We try hard to keep Docker lean and focused. Docker can't do everything for
+everybody. This means that we might decide against incorporating a new feature.
+However, there might be a way to implement that feature *on top of* Docker.
+
+### Talking to other Docker users and contributors
+
+<table class="tg">
+  <col width="45%">
+  <col width="65%">
+  <tr>
+    <td>Internet&nbsp;Relay&nbsp;Chat&nbsp;(IRC)</th>
+    <td>
+      <p>
+        IRC a direct line to our most knowledgeable Docker users; we have
+        both the  <code>#docker</code> and <code>#docker-dev</code> group on 
+        <strong>irc.freenode.net</strong>.  
+        IRC is a rich chat protocol but it can overwhelm new users. You can search
+        <a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
+      </p>
+      Read our <a href="https://docs.docker.com/project/get-help/#irc-quickstart" target="_blank">IRC quickstart guide</a> for an easy way to get started.
+    </td>
+  </tr>
+  <tr>
+    <td>Google Groups</td>
+    <td>
+      There are two groups.
+      <a href="https://groups.google.com/forum/#!forum/docker-user" target="_blank">Docker-user</a>
+      is for people using Docker containers. 
+      The <a href="https://groups.google.com/forum/#!forum/docker-dev" target="_blank">docker-dev</a> 
+      group is for contributors and other people contributing to the Docker 
+      project.
+    </td>
+  </tr>
+  <tr>
+    <td>Twitter</td>
+    <td>
+      You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
+      to get updates on our products. You can also tweet us questions or just 
+      share blogs or stories.
+    </td>
+  </tr>
+  <tr>
+    <td>Stack Overflow</td>
+    <td>
+      Stack Overflow has over 7000K Docker questions listed. We regularly 
+      monitor <a href="http://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
+      and so do many other knowledgeable Docker users.
+    </td>
+  </tr>
+</table>
 
 
-We are always thrilled to receive pull requests, and do our best to
-process them as quickly as possible. Not sure if that typo is worth a pull
-request? Do it! We will appreciate it.
-
-If your pull request is not accepted on the first try, don't be
-discouraged! If there's a problem with the implementation, hopefully you
-received feedback on what to improve.
-
-We're trying very hard to keep Docker lean and focused. We don't want it
-to do everything for everybody. This means that we might decide against
-incorporating a new feature. However, there might be a way to implement
-that feature *on top of* Docker.
-
-### Discuss your design on the mailing list
-
-We recommend discussing your plans [on the mailing
-list](https://groups.google.com/forum/?fromgroups#!forum/docker-dev)
-before starting to code - especially for more ambitious contributions.
-This gives other contributors a chance to point you in the right
-direction, give feedback on your design, and maybe point out if someone
-else is working on the same thing.
-
-### Create issues...
-
-Any significant improvement should be documented as [a GitHub
-issue](https://github.com/docker/docker/issues) before anybody
-starts working on it.
-
-### ...but check for existing issues first!
-
-Please take a moment to check that an issue doesn't already exist
-documenting your bug report or improvement proposal. If it does, it
-never hurts to add a quick "+1" or "I have this problem too". This will
-help prioritize the most common problems and requests.
 
 
 ### Conventions
 ### Conventions
 
 
 Fork the repository and make changes on your fork in a feature branch:
 Fork the repository and make changes on your fork in a feature branch:
 
 
-- If it's a bug fix branch, name it XXXX-something where XXXX is the number of the
-  issue.
-- If it's a feature branch, create an enhancement issue to announce your
-  intentions, and name it XXXX-something where XXXX is the number of the issue.
+- If it's a bug fix branch, name it XXXX-something where XXXX is the number of
+	the issue. 
+- If it's a feature branch, create an enhancement issue to announce
+	your intentions, and name it XXXX-something where XXXX is the number of the
+	issue.
 
 
-Submit unit tests for your changes.  Go has a great test framework built in; use
-it! Take a look at existing tests for inspiration. Run the full test suite on
-your branch before submitting a pull request.
+Submit unit tests for your changes. Go has a great test framework built in; use
+it! Take a look at existing tests for inspiration. [Run the full test
+suite](https://docs.docker.com/project/test-and-docs/) on your branch before
+submitting a pull request.
 
 
-Update the documentation when creating or modifying features. Test
-your documentation changes for clarity, concision, and correctness, as
-well as a clean documentation build. See `docs/README.md` for more
-information on building the docs and how they get released.
+Update the documentation when creating or modifying features. Test your
+documentation changes for clarity, concision, and correctness, as well as a
+clean documentation build. See our contributors guide for [our style
+guide](https://docs.docker.com/project/doc-style) and instructions on [building
+the documentation](https://docs.docker.com/project/test-and-docs/#build-and-test-the-documentation).
 
 
 Write clean code. Universally formatted code promotes ease of writing, reading,
 Write clean code. Universally formatted code promotes ease of writing, reading,
 and maintenance. Always run `gofmt -s -w file.go` on each changed file before
 and maintenance. Always run `gofmt -s -w file.go` on each changed file before
 committing your changes. Most editors have plug-ins that do this automatically.
 committing your changes. Most editors have plug-ins that do this automatically.
 
 
-Pull requests descriptions should be as clear as possible and include a
-reference to all the issues that they address.
+Pull request descriptions should be as clear as possible and include a reference
+to all the issues that they address.
 
 
-Commit messages must start with a capitalized and short summary (max. 50
-chars) written in the imperative, followed by an optional, more detailed
-explanatory text which is separated from the summary by an empty line.
+Commit messages must start with a capitalized and short summary (max. 50 chars)
+written in the imperative, followed by an optional, more detailed explanatory
+text which is separated from the summary by an empty line.
 
 
 Code review comments may be added to your pull request. Discuss, then make the
 Code review comments may be added to your pull request. Discuss, then make the
-suggested modifications and push additional commits to your feature branch. Be
-sure to post a comment after pushing. The new commits will show up in the pull
-request automatically, but the reviewers will not be notified unless you
-comment.
+suggested modifications and push additional commits to your feature branch. Post
+a comment after pushing. New commits show up in the pull request automatically,
+but the reviewers are notified only when you comment.
 
 
-Pull requests must be cleanly rebased ontop of master without multiple branches
+Pull requests must be cleanly rebased on top of master without multiple branches
 mixed into the PR.
 mixed into the PR.
 
 
 **Git tip**: If your PR no longer merges cleanly, use `rebase master` in your
 **Git tip**: If your PR no longer merges cleanly, use `rebase master` in your
 feature branch to update your pull request rather than `merge master`.
 feature branch to update your pull request rather than `merge master`.
 
 
-Before the pull request is merged, make sure that you squash your commits into
-logical units of work using `git rebase -i` and `git push -f`. After every
-commit the test suite should be passing. Include documentation changes in the
-same commit so that a revert would remove all traces of the feature or fix.
+Before you make a pull request, squash your commits into logical units of work
+using `git rebase -i` and `git push -f`. A logical unit of work is a consistent
+set of patches that should be reviewed together: for example, upgrading the
+version of a vendored dependency and taking advantage of its now available new
+feature constitute two separate units of work. Implementing a new function and
+calling it in another file constitute a single logical unit of work. The very
+high majory of submissions should have a single commit, so if in doubt: squash
+down to one.
+
+After every commit, [make sure the test suite passes]
+((https://docs.docker.com/project/test-and-docs/)). Include documentation
+changes in the same pull request so that a revert would remove all traces of
+the feature or fix.
 
 
-Commits that fix or close an issue should include a reference like
-`Closes #XXXX` or `Fixes #XXXX`, which will automatically close the
-issue when merged.
+Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that
+close an issue. Including references automatically closes the issue on a merge.
 
 
-Please do not add yourself to the `AUTHORS` file, as it is regenerated
-regularly from the Git history.
+Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly
+from the Git history.
 
 
 ### Merge approval
 ### Merge approval
 
 
-Docker maintainers use LGTM (Looks Good To Me) in comments on the code review
-to indicate acceptance.
+Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to
+indicate acceptance.
 
 
 A change requires LGTMs from an absolute majority of the maintainers of each
 A change requires LGTMs from an absolute majority of the maintainers of each
 component affected. For example, if a change affects `docs/` and `registry/`, it
 component affected. For example, if a change affects `docs/` and `registry/`, it
 needs an absolute majority from the maintainers of `docs/` AND, separately, an
 needs an absolute majority from the maintainers of `docs/` AND, separately, an
 absolute majority of the maintainers of `registry/`.
 absolute majority of the maintainers of `registry/`.
 
 
-For more details see [MAINTAINERS](MAINTAINERS)
+For more details, see the [MAINTAINERS](MAINTAINERS) page.
 
 
 ### Sign your work
 ### Sign your work
 
 
-The sign-off is a simple line at the end of the explanation for the
-patch, which certifies that you wrote it or otherwise have the right to
-pass it on as an open-source patch.  The rules are pretty simple: if you
-can certify the below (from
-[developercertificate.org](http://developercertificate.org/)):
+The sign-off is a simple line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are pretty simple: if you can certify
+the below (from [developercertificate.org](http://developercertificate.org/)):
 
 
 ```
 ```
 Developer Certificate of Origin
 Developer Certificate of Origin
@@ -263,7 +295,7 @@ Then you just add a line to every git commit message:
 
 
     Signed-off-by: Joe Smith <joe.smith@email.com>
     Signed-off-by: Joe Smith <joe.smith@email.com>
 
 
-Using your real name (sorry, no pseudonyms or anonymous contributions.)
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
 
 
 If you set your `user.name` and `user.email` git configs, you can sign your
 If you set your `user.name` and `user.email` git configs, you can sign your
 commit automatically with `git commit -s`.
 commit automatically with `git commit -s`.
@@ -280,45 +312,45 @@ format right away, but please do adjust your processes for future contributions.
 * Step 4: Propose yourself at a scheduled docker meeting in #docker-dev
 * Step 4: Propose yourself at a scheduled docker meeting in #docker-dev
 
 
 Don't forget: being a maintainer is a time investment. Make sure you
 Don't forget: being a maintainer is a time investment. Make sure you
-will have time to make yourself available.  You don't have to be a
+will have time to make yourself available. You don't have to be a
 maintainer to make a difference on the project!
 maintainer to make a difference on the project!
 
 
-### IRC Meetings
+### IRC meetings
 
 
-There are two monthly meetings taking place on #docker-dev IRC to accomodate all timezones.
-Anybody can ask for a topic to be discussed prior to the meeting.
+There are two monthly meetings taking place on #docker-dev IRC to accomodate all
+timezones. Anybody can propose a topic for discussion prior to the meeting.
 
 
 If you feel the conversation is going off-topic, feel free to point it out.
 If you feel the conversation is going off-topic, feel free to point it out.
 
 
-For the exact dates and times, have a look at [the irc-minutes repo](https://github.com/docker/irc-minutes).
-They also contain all the notes from previous meetings.
+For the exact dates and times, have a look at [the irc-minutes
+repo](https://github.com/docker/irc-minutes). The minutes also contain all the
+notes from previous meetings.
 
 
-## Docker Community Guidelines
+## Docker community guidelines
 
 
-We want to keep the Docker community awesome, growing and collaborative. We
-need your help to keep it that way. To help with this we've come up with some
-general guidelines for the community as a whole:
+We want to keep the Docker community awesome, growing and collaborative. We need
+your help to keep it that way. To help with this we've come up with some general
+guidelines for the community as a whole:
 
 
-* Be nice: Be courteous, respectful and polite to fellow community members: no
-  regional, racial, gender, or other abuse will be tolerated. We like nice people
-  way better than mean ones!
+* Be nice: Be courteous, respectful and polite to fellow community members:
+  no regional, racial, gender, or other abuse will be tolerated. We like
+  nice people way better than mean ones!
 
 
-* Encourage diversity and participation: Make everyone in our community
-  feel welcome, regardless of their background and the extent of their
+* Encourage diversity and participation: Make everyone in our community feel
+  welcome, regardless of their background and the extent of their
   contributions, and do everything possible to encourage participation in
   contributions, and do everything possible to encourage participation in
   our community.
   our community.
 
 
 * Keep it legal: Basically, don't get us in trouble. Share only content that
 * Keep it legal: Basically, don't get us in trouble. Share only content that
-  you own, do not share private or sensitive information, and don't break the
-  law.
+  you own, do not share private or sensitive information, and don't break
+  the law.
 
 
-* Stay on topic: Make sure that you are posting to the correct channel
-  and avoid off-topic discussions. Remember when you update an issue or
-  respond to an email you are potentially sending to a large number of
-  people.  Please consider this before you update.  Also remember that
-  nobody likes spam.
+* Stay on topic: Make sure that you are posting to the correct channel and
+  avoid off-topic discussions. Remember when you update an issue or respond
+  to an email you are potentially sending to a large number of people. Please
+  consider this before you update. Also remember that nobody likes spam.
 
 
-### Guideline Violations — 3 Strikes Method
+### Guideline violations — 3 strikes method
 
 
 The point of this section is not to find opportunities to punish people, but we
 The point of this section is not to find opportunities to punish people, but we
 do need a fair way to deal with people who are making our community suck.
 do need a fair way to deal with people who are making our community suck.
@@ -337,20 +369,19 @@ do need a fair way to deal with people who are making our community suck.
 * Obvious spammers are banned on first occurrence. If we don't do this, we'll
 * Obvious spammers are banned on first occurrence. If we don't do this, we'll
   have spam all over the place.
   have spam all over the place.
 
 
-* Violations are forgiven after 6 months of good behavior, and we won't
-  hold a grudge.
+* Violations are forgiven after 6 months of good behavior, and we won't hold a
+  grudge.
 
 
-* People who commit minor infractions will get some education,
-  rather than hammering them in the 3 strikes process.
+* People who commit minor infractions will get some education, rather than
+  hammering them in the 3 strikes process.
 
 
-* The rules apply equally to everyone in the community, no matter how
-  much you've contributed.
+* The rules apply equally to everyone in the community, no matter how much
+	you've contributed.
 
 
 * Extreme violations of a threatening, abusive, destructive or illegal nature
 * Extreme violations of a threatening, abusive, destructive or illegal nature
-  will be addressed immediately and are not subject to 3 strikes or
-  forgiveness.
+	will be addressed immediately and are not subject to 3 strikes or forgiveness.
 
 
 * Contact abuse@docker.com to report abuse or appeal violations. In the case of
 * Contact abuse@docker.com to report abuse or appeal violations. In the case of
-  appeals, we know that mistakes happen, and we'll work with you to come up with
-  a fair solution if there has been a misunderstanding.
+	appeals, we know that mistakes happen, and we'll work with you to come up with a
+	fair solution if there has been a misunderstanding.
 
 

+ 20 - 15
Dockerfile

@@ -73,7 +73,7 @@ RUN cd /usr/src/lxc \
 	&& ldconfig
 	&& ldconfig
 
 
 # Install Go
 # Install Go
-ENV GO_VERSION 1.4.1
+ENV GO_VERSION 1.4.2
 RUN curl -sSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/local -xz \
 RUN curl -sSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/local -xz \
 	&& mkdir -p /go/bin
 	&& mkdir -p /go/bin
 ENV PATH /go/bin:/usr/local/go/bin:$PATH
 ENV PATH /go/bin:/usr/local/go/bin:$PATH
@@ -84,10 +84,8 @@ RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
 ENV DOCKER_CROSSPLATFORMS \
 ENV DOCKER_CROSSPLATFORMS \
 	linux/386 linux/arm \
 	linux/386 linux/arm \
 	darwin/amd64 darwin/386 \
 	darwin/amd64 darwin/386 \
-	freebsd/amd64 freebsd/386 freebsd/arm
-
-# TODO when https://jenkins.dockerproject.com/job/Windows/ is green, add windows back to the list above
-#	windows/amd64 windows/386
+	freebsd/amd64 freebsd/386 freebsd/arm \
+	windows/amd64 windows/386
 
 
 # (set an explicit GOARM of 5 for maximum compatibility)
 # (set an explicit GOARM of 5 for maximum compatibility)
 ENV GOARM 5
 ENV GOARM 5
@@ -109,14 +107,8 @@ RUN go get golang.org/x/tools/cmd/cover
 # TODO replace FPM with some very minimal debhelper stuff
 # TODO replace FPM with some very minimal debhelper stuff
 RUN gem install --no-rdoc --no-ri fpm --version 1.3.2
 RUN gem install --no-rdoc --no-ri fpm --version 1.3.2
 
 
-# Get the "busybox" image source so we can build locally instead of pulling
-RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox
-
-# Get the "cirros" image source so we can import it instead of fetching it during tests
-RUN curl -sSL -o /cirros.tar.gz https://github.com/ewindisch/docker-cirros/raw/1cded459668e8b9dbf4ef976c94c05add9bbd8e9/cirros-0.3.0-x86_64-lxc.tar.gz
-
 # Install registry
 # Install registry
-ENV REGISTRY_COMMIT c448e0416925a9876d5576e412703c9b8b865e19
+ENV REGISTRY_COMMIT d957768537c5af40e4f4cd96871f7b2bde9e2923
 RUN set -x \
 RUN set -x \
 	&& git clone https://github.com/docker/distribution.git /go/src/github.com/docker/distribution \
 	&& git clone https://github.com/docker/distribution.git /go/src/github.com/docker/distribution \
 	&& (cd /go/src/github.com/docker/distribution && git checkout -q $REGISTRY_COMMIT) \
 	&& (cd /go/src/github.com/docker/distribution && git checkout -q $REGISTRY_COMMIT) \
@@ -124,7 +116,7 @@ RUN set -x \
 		go build -o /go/bin/registry-v2 github.com/docker/distribution/cmd/registry
 		go build -o /go/bin/registry-v2 github.com/docker/distribution/cmd/registry
 
 
 # Get the "docker-py" source so we can run their integration tests
 # Get the "docker-py" source so we can run their integration tests
-ENV DOCKER_PY_COMMIT aa19d7b6609c6676e8258f6b900dea2eda1dbe95
+ENV DOCKER_PY_COMMIT 91985b239764fe54714fa0a93d52aa362357d251
 RUN git clone https://github.com/docker/docker-py.git /docker-py \
 RUN git clone https://github.com/docker/docker-py.git /docker-py \
 	&& cd /docker-py \
 	&& cd /docker-py \
 	&& git checkout -q $DOCKER_PY_COMMIT
 	&& git checkout -q $DOCKER_PY_COMMIT
@@ -147,6 +139,16 @@ VOLUME /var/lib/docker
 WORKDIR /go/src/github.com/docker/docker
 WORKDIR /go/src/github.com/docker/docker
 ENV DOCKER_BUILDTAGS apparmor selinux btrfs_noversion
 ENV DOCKER_BUILDTAGS apparmor selinux btrfs_noversion
 
 
+# Let us use a .bashrc file
+RUN ln -sfv $PWD/.bashrc ~/.bashrc
+
+# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
+COPY contrib/download-frozen-image.sh /go/src/github.com/docker/docker/contrib/
+RUN ./contrib/download-frozen-image.sh /docker-frozen-images \
+	busybox:latest@4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 \
+	hello-world:frozen@e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5
+# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
+
 # Install man page generator
 # Install man page generator
 COPY vendor /go/src/github.com/docker/docker/vendor
 COPY vendor /go/src/github.com/docker/docker/vendor
 # (copy vendor/ because go-md2man needs golang.org/x/net)
 # (copy vendor/ because go-md2man needs golang.org/x/net)
@@ -156,8 +158,11 @@ RUN set -x \
 	&& go install -v github.com/cpuguy83/go-md2man
 	&& go install -v github.com/cpuguy83/go-md2man
 
 
 # install toml validator
 # install toml validator
-RUN git clone -b v0.1.0 https://github.com/BurntSushi/toml.git /go/src/github.com/BurntSushi/toml \
-    && go install -v github.com/BurntSushi/toml/cmd/tomlv
+ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
+RUN set -x \
+	&& git clone https://github.com/BurntSushi/toml.git /go/src/github.com/BurntSushi/toml \
+	&& (cd /go/src/github.com/BurntSushi/toml && git checkout -q $TOMLV_COMMIT) \
+	&& go install -v github.com/BurntSushi/toml/cmd/tomlv
 
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
 ENTRYPOINT ["hack/dind"]
 ENTRYPOINT ["hack/dind"]

+ 34 - 0
Dockerfile.simple

@@ -0,0 +1,34 @@
+# docker build -t docker:simple -f Dockerfile.simple .
+# docker run --rm docker:simple hack/make.sh dynbinary
+# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit
+# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli
+
+# This represents the bare minimum required to build and test Docker.
+
+FROM debian:jessie
+
+# compile and runtime deps
+# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies
+# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies
+RUN apt-get update && apt-get install -y --no-install-recommends \
+		btrfs-tools \
+		curl \
+		gcc \
+		git \
+		golang \
+		libdevmapper-dev \
+		libsqlite3-dev \
+		\
+		ca-certificates \
+		e2fsprogs \
+		iptables \
+		procps \
+		xz-utils \
+		\
+		aufs-tools \
+		lxc \
+	&& rm -rf /var/lib/apt/lists/*
+
+ENV AUTO_GOPATH 1
+WORKDIR /usr/src/docker
+COPY . /usr/src/docker

+ 57 - 6
MAINTAINERS

@@ -12,9 +12,9 @@
 
 
 	[Rules.maintainers]
 	[Rules.maintainers]
 
 
-	title = "What is a maintainer?"
+		title = "What is a maintainer?"
 
 
-	text = """
+		text = """
 There are different types of maintainers, with different responsibilities, but
 There are different types of maintainers, with different responsibilities, but
 all maintainers have 3 things in common:
 all maintainers have 3 things in common:
 
 
@@ -193,13 +193,18 @@ for each.
 			# They should ask for any editorial change that makes the documentation more
 			# They should ask for any editorial change that makes the documentation more
 			# consistent and easier to understand.
 			# consistent and easier to understand.
 			#
 			#
-			# Once documentation is approved, a maintainer should make sure to remove this
+			# Once documentation is approved (see below), a maintainer should make sure to remove this
 			# label and add the next one.
 			# label and add the next one.
 
 
 			close = ""
 			close = ""
 			2-code-review = "requires more code changes"
 			2-code-review = "requires more code changes"
 			1-design-review = "raises design concerns"
 			1-design-review = "raises design concerns"
 			4-merge = "general case"
 			4-merge = "general case"
+			
+		# Docs approval
+		[Rules.review.docs-approval]
+			# Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of two docs sub-project maintainers.
+			# If the docs change originates with a docs maintainer, only one additional LGTM is required (since we assume a docs maintainer approves of their own PR). 	
 
 
 		# Merge
 		# Merge
 		[Rules.review.states.4-merge]
 		[Rules.review.states.4-merge]
@@ -337,13 +342,14 @@ made through a pull request.
 			"unclejack",
 			"unclejack",
 			"crosbymichael",
 			"crosbymichael",
 			"erikh",
 			"erikh",
+			"estesp",
 			"icecrime",
 			"icecrime",
 			"jfrazelle",
 			"jfrazelle",
 			"lk4d4",
 			"lk4d4",
 			"tibor",
 			"tibor",
 			"vbatts",
 			"vbatts",
 			"vieux",
 			"vieux",
-			"vish"
+			"vishh"
 		]
 		]
 
 
 
 
@@ -403,6 +409,8 @@ made through a pull request.
 				"fredlf",
 				"fredlf",
 				"james",
 				"james",
 				"sven",
 				"sven",
+				"spf13",
+				"mary"
 			]
 			]
 
 
 		[Org.Subsystems.libcontainer]
 		[Org.Subsystems.libcontainer]
@@ -421,7 +429,10 @@ made through a pull request.
 				"dmp42",
 				"dmp42",
 				"vbatts",
 				"vbatts",
 				"joffrey",
 				"joffrey",
-				"samalba"
+				"samalba",
+				"sday",
+				"jlhawn",
+				"dmcg"
 			]
 			]
 
 
 		[Org.Subsystems."build tools"]
 		[Org.Subsystems."build tools"]
@@ -499,6 +510,16 @@ made through a pull request.
 	Email = "dug@us.ibm.com"
 	Email = "dug@us.ibm.com"
 	GitHub = "duglin"
 	GitHub = "duglin"
 
 
+	[people.dmcg]
+	Name = "Derek McGowan"
+	Email = "derek@docker.com"
+	Github = "dmcgowan"
+
+	[people.dmp42]
+	Name = "Olivier Gambier"
+	Email = "olivier@docker.com"
+	Github = "dmp42"
+
 	[people.ehazlett]
 	[people.ehazlett]
 	Name = "Evan Hazlett"
 	Name = "Evan Hazlett"
 	Email = "ejhazlett@gmail.com"
 	Email = "ejhazlett@gmail.com"
@@ -514,6 +535,16 @@ made through a pull request.
 	Email = "eric@windisch.us"
 	Email = "eric@windisch.us"
 	GitHub = "ewindisch"
 	GitHub = "ewindisch"
 
 
+	[people.estesp]
+	Name = "Phil Estes"
+	Email = "estesp@linux.vnet.ibm.com"
+	GitHub = "estesp"
+
+	[people.fredlf]
+	Name = "Fred Lifton"
+	Email = "fred.lifton@docker.com"
+	GitHub = "fredlf"
+
 	[people.icecrime]
 	[people.icecrime]
 	Name = "Arnaud Porterie"
 	Name = "Arnaud Porterie"
 	Email = "arnaud@docker.com"
 	Email = "arnaud@docker.com"
@@ -524,11 +555,31 @@ made through a pull request.
 	Email = "jess@docker.com"
 	Email = "jess@docker.com"
 	GitHub = "jfrazelle"
 	GitHub = "jfrazelle"
 
 
+	[people.jlhawn]
+	Name = "Josh Hawn"
+	Email = "josh.hawn@docker.com"
+	Github = "jlhawn"
+
+	[people.joffrey]
+	Name = "Joffrey Fuhrer"
+	Email = "joffrey@docker.com"
+	Github = "shin-"
+
 	[people.lk4d4]
 	[people.lk4d4]
 	Name = "Alexander Morozov"
 	Name = "Alexander Morozov"
 	Email = "lk4d4@docker.com"
 	Email = "lk4d4@docker.com"
 	GitHub = "lk4d4"
 	GitHub = "lk4d4"
 
 
+	[people.mary]
+	Name = "Mary Anthony"
+	Email = "mary.anthony@docker.com"
+	GitHub = "moxiegirl"
+
+	[people.sday]
+	Name = "Stephen Day"
+	Email = "stephen.day@docker.com"
+	Github = "stevvooe"
+
 	[people.shykes]
 	[people.shykes]
 	Name = "Solomon Hykes"
 	Name = "Solomon Hykes"
 	Email = "solomon@docker.com"
 	Email = "solomon@docker.com"
@@ -584,7 +635,7 @@ made through a pull request.
 	Email = "cristian.staretu@gmail.com"
 	Email = "cristian.staretu@gmail.com"
 	GitHub = "unclejack"
 	GitHub = "unclejack"
 
 
-	[people.vish]
+	[people.vishh]
 	Name = "Vishnu Kannan"
 	Name = "Vishnu Kannan"
 	Email = "vishnuk@google.com"
 	Email = "vishnuk@google.com"
 	GitHub = "vishh"
 	GitHub = "vishh"

+ 9 - 6
Makefile

@@ -13,10 +13,11 @@ DOCKER_ENVS := \
 	-e TIMEOUT
 	-e TIMEOUT
 # note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
 # note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
 
 
-# to allow `make BINDDIR=. shell` or `make BINDDIR= test`
+# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test`
 # (default to no bind mount if DOCKER_HOST is set)
 # (default to no bind mount if DOCKER_HOST is set)
-BINDDIR := $(if $(DOCKER_HOST),,bundles)
-DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/docker/docker/$(BINDDIR)")
+# note: BINDDIR is supported for backwards-compatibility here
+BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles))
+DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)")
 
 
 # to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs)
 # to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs)
 DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR))
 DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR))
@@ -53,7 +54,9 @@ docs-shell: docs-build
 	$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
 	$(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
 
 
 docs-release: docs-build
 docs-release: docs-build
-	$(DOCKER_RUN_DOCS) -e OPTIONS -e BUILD_ROOT -e DISTRIBUTION_ID "$(DOCKER_DOCS_IMAGE)" ./release.sh
+	$(DOCKER_RUN_DOCS) -e OPTIONS -e BUILD_ROOT -e DISTRIBUTION_ID \
+		-v $(CURDIR)/docs/awsconfig:/docs/awsconfig \
+		"$(DOCKER_DOCS_IMAGE)" ./release.sh
 
 
 docs-test: docs-build
 docs-test: docs-build
 	$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./test.sh
 	$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./test.sh
@@ -83,11 +86,11 @@ build: bundles
 	docker build -t "$(DOCKER_IMAGE)" .
 	docker build -t "$(DOCKER_IMAGE)" .
 
 
 docs-build:
 docs-build:
-	( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs docs/ > docs/changed-files
 	cp ./VERSION docs/VERSION
 	cp ./VERSION docs/VERSION
 	echo "$(GIT_BRANCH)" > docs/GIT_BRANCH
 	echo "$(GIT_BRANCH)" > docs/GIT_BRANCH
-	echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET
+#	echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET
 	echo "$(GITCOMMIT)" > docs/GITCOMMIT
 	echo "$(GITCOMMIT)" > docs/GITCOMMIT
+	docker pull docs/base
 	docker build -t "$(DOCKER_DOCS_IMAGE)" docs
 	docker build -t "$(DOCKER_DOCS_IMAGE)" docs
 
 
 bundles:
 bundles:

+ 1 - 1
NOTICE

@@ -1,5 +1,5 @@
 Docker
 Docker
-Copyright 2012-2014 Docker, Inc.
+Copyright 2012-2015 Docker, Inc.
 
 
 This product includes software developed at Docker, Inc. (http://www.docker.com).
 This product includes software developed at Docker, Inc. (http://www.docker.com).
 
 

+ 11 - 7
README.md

@@ -18,7 +18,7 @@ It benefits directly from the experience accumulated over several years
 of large-scale operation and support of hundreds of thousands of
 of large-scale operation and support of hundreds of thousands of
 applications and databases.
 applications and databases.
 
 
-![Docker L](docs/theme/mkdocs/images/docker-logo-compressed.png "Docker")
+![Docker L](docs/sources/static_files/docker-logo-compressed.png "Docker")
 
 
 ## Security Disclosure
 ## Security Disclosure
 
 
@@ -173,6 +173,8 @@ Under the hood, Docker is built on the following components:
   [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part)
   [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part)
   capabilities of the Linux kernel;
   capabilities of the Linux kernel;
 * The [Go](http://golang.org) programming language.
 * The [Go](http://golang.org) programming language.
+* The [Docker Image Specification] (https://github.com/docker/docker/blob/master/image/spec/v1.md)
+* The [Libcontainer Specification] (https://github.com/docker/libcontainer/blob/master/SPEC.md)
 
 
 Contributing to Docker
 Contributing to Docker
 ======================
 ======================
@@ -181,12 +183,14 @@ Contributing to Docker
 [![Jenkins Build Status](https://jenkins.dockerproject.com/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.com/job/Docker%20Master/)
 [![Jenkins Build Status](https://jenkins.dockerproject.com/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.com/job/Docker%20Master/)
 
 
 Want to hack on Docker? Awesome! We have [instructions to help you get
 Want to hack on Docker? Awesome! We have [instructions to help you get
-started](CONTRIBUTING.md). If you'd like to contribute to the
-documentation, please take a look at this [README.md](https://github.com/docker/docker/blob/master/docs/README.md).
+started contributing code or documentation.](https://docs.docker.com/project/who-written-for/).
 
 
 These instructions are probably not perfect, please let us know if anything
 These instructions are probably not perfect, please let us know if anything
 feels wrong or incomplete. Better yet, submit a PR and improve them yourself.
 feels wrong or incomplete. Better yet, submit a PR and improve them yourself.
 
 
+Getting the development builds
+==============================
+
 Want to run Docker from a master build? You can download 
 Want to run Docker from a master build? You can download 
 master builds at [master.dockerproject.com](https://master.dockerproject.com). 
 master builds at [master.dockerproject.com](https://master.dockerproject.com). 
 They are updated with each commit merged into the master branch.
 They are updated with each commit merged into the master branch.
@@ -231,12 +235,12 @@ Docker platform to broaden its application and utility.
 If you know of another project underway that should be listed here, please help
 If you know of another project underway that should be listed here, please help
 us keep this list up-to-date by submitting a PR.
 us keep this list up-to-date by submitting a PR.
 
 
-* [Docker Registry](https://github.com/docker/docker-registry): Registry 
-server for Docker (hosting/delivering of repositories and images) 
+* [Docker Registry](https://github.com/docker/distribution): Registry 
+server for Docker (hosting/delivery of repositories and images)
 * [Docker Machine](https://github.com/docker/machine): Machine management 
 * [Docker Machine](https://github.com/docker/machine): Machine management 
 for a container-centric world 
 for a container-centric world 
 * [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering 
 * [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering 
 system 
 system 
-* [Docker Compose, aka Fig](https://github.com/docker/fig): 
-Multi-container application management
+* [Docker Compose](https://github.com/docker/compose) (formerly Fig): 
+Define and run multi-container apps
 
 

+ 1 - 1
VERSION

@@ -1 +1 @@
-1.5.0
+1.5.0-dev

+ 0 - 2
api/MAINTAINERS

@@ -1,2 +0,0 @@
-Victor Vieux <vieux@docker.com> (@vieux)
-Jessie Frazelle <jess@docker.com> (@jfrazelle)

+ 10 - 13
api/client/cli.go

@@ -14,6 +14,7 @@ import (
 	"text/template"
 	"text/template"
 	"time"
 	"time"
 
 
+	"github.com/docker/docker/pkg/homedir"
 	flag "github.com/docker/docker/pkg/mflag"
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
@@ -92,10 +93,13 @@ func (cli *DockerCli) Subcmd(name, signature, description string, exitOnError bo
 	flags := flag.NewFlagSet(name, errorHandling)
 	flags := flag.NewFlagSet(name, errorHandling)
 	flags.Usage = func() {
 	flags.Usage = func() {
 		options := ""
 		options := ""
+		if signature != "" {
+			signature = " " + signature
+		}
 		if flags.FlagCountUndeprecated() > 0 {
 		if flags.FlagCountUndeprecated() > 0 {
-			options = "[OPTIONS] "
+			options = " [OPTIONS]"
 		}
 		}
-		fmt.Fprintf(cli.out, "\nUsage: docker %s %s%s\n\n%s\n\n", name, options, signature, description)
+		fmt.Fprintf(cli.out, "\nUsage: docker %s%s%s\n\n%s\n\n", name, options, signature, description)
 		flags.SetOutput(cli.out)
 		flags.SetOutput(cli.out)
 		flags.PrintDefaults()
 		flags.PrintDefaults()
 		os.Exit(0)
 		os.Exit(0)
@@ -104,7 +108,7 @@ func (cli *DockerCli) Subcmd(name, signature, description string, exitOnError bo
 }
 }
 
 
 func (cli *DockerCli) LoadConfigFile() (err error) {
 func (cli *DockerCli) LoadConfigFile() (err error) {
-	cli.configFile, err = registry.LoadConfig(os.Getenv("HOME"))
+	cli.configFile, err = registry.LoadConfig(homedir.Get())
 	if err != nil {
 	if err != nil {
 		fmt.Fprintf(cli.err, "WARNING: %s\n", err)
 		fmt.Fprintf(cli.err, "WARNING: %s\n", err)
 	}
 	}
@@ -133,19 +137,12 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, a
 	if tlsConfig != nil {
 	if tlsConfig != nil {
 		scheme = "https"
 		scheme = "https"
 	}
 	}
-
 	if in != nil {
 	if in != nil {
-		if file, ok := in.(*os.File); ok {
-			inFd = file.Fd()
-			isTerminalIn = term.IsTerminal(inFd)
-		}
+		inFd, isTerminalIn = term.GetFdInfo(in)
 	}
 	}
 
 
 	if out != nil {
 	if out != nil {
-		if file, ok := out.(*os.File); ok {
-			outFd = file.Fd()
-			isTerminalOut = term.IsTerminal(outFd)
-		}
+		outFd, isTerminalOut = term.GetFdInfo(out)
 	}
 	}
 
 
 	if err == nil {
 	if err == nil {
@@ -154,7 +151,6 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, a
 
 
 	// The transport is created here for reuse during the client session
 	// The transport is created here for reuse during the client session
 	tr := &http.Transport{
 	tr := &http.Transport{
-		Proxy:           http.ProxyFromEnvironment,
 		TLSClientConfig: tlsConfig,
 		TLSClientConfig: tlsConfig,
 	}
 	}
 
 
@@ -167,6 +163,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, a
 			return net.DialTimeout(proto, addr, timeout)
 			return net.DialTimeout(proto, addr, timeout)
 		}
 		}
 	} else {
 	} else {
+		tr.Proxy = http.ProxyFromEnvironment
 		tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
 		tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
 	}
 	}
 
 

+ 282 - 113
api/client/commands.go

@@ -26,17 +26,21 @@ import (
 
 
 	log "github.com/Sirupsen/logrus"
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api"
-	"github.com/docker/docker/api/stats"
-	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/autogen/dockerversion"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/nat"
 	"github.com/docker/docker/nat"
 	"github.com/docker/docker/opts"
 	"github.com/docker/docker/opts"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/common"
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/fileutils"
+	"github.com/docker/docker/pkg/homedir"
 	flag "github.com/docker/docker/pkg/mflag"
 	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/networkfs/resolvconf"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers/filters"
 	"github.com/docker/docker/pkg/parsers/filters"
+	"github.com/docker/docker/pkg/progressreader"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/symlink"
@@ -79,13 +83,17 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
 
 
 func (cli *DockerCli) CmdBuild(args ...string) error {
 func (cli *DockerCli) CmdBuild(args ...string) error {
 	cmd := cli.Subcmd("build", "PATH | URL | -", "Build a new image from the source code at PATH", true)
 	cmd := cli.Subcmd("build", "PATH | URL | -", "Build a new image from the source code at PATH", true)
-	tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success")
+	tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) for the image")
 	suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers")
 	suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers")
 	noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
 	noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
 	rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
 	rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
-	forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers, even after unsuccessful builds")
+	forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers")
 	pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
 	pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
-	dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile(Default is 'Dockerfile' at context root)")
+	dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
+	flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
+	flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap")
+	flCpuShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
+	flCpuSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
 
 
 	cmd.Require(flag.Exact, 1)
 	cmd.Require(flag.Exact, 1)
 
 
@@ -112,9 +120,10 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 			if err != nil {
 			if err != nil {
 				return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err)
 				return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err)
 			}
 			}
-			if *dockerfileName == "" {
-				*dockerfileName = api.DefaultDockerfileName
-			}
+
+			// -f option has no meaning when we're reading it from stdin,
+			// so just use our default Dockerfile name
+			*dockerfileName = api.DefaultDockerfileName
 			context, err = archive.Generate(*dockerfileName, string(dockerfile))
 			context, err = archive.Generate(*dockerfileName, string(dockerfile))
 		} else {
 		} else {
 			context = ioutil.NopCloser(buf)
 			context = ioutil.NopCloser(buf)
@@ -153,11 +162,20 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 		if *dockerfileName == "" {
 		if *dockerfileName == "" {
 			// No -f/--file was specified so use the default
 			// No -f/--file was specified so use the default
 			*dockerfileName = api.DefaultDockerfileName
 			*dockerfileName = api.DefaultDockerfileName
-			filename = path.Join(absRoot, *dockerfileName)
+			filename = filepath.Join(absRoot, *dockerfileName)
+
+			// Just to be nice ;-) look for 'dockerfile' too but only
+			// use it if we found it, otherwise ignore this check
+			if _, err = os.Lstat(filename); os.IsNotExist(err) {
+				tmpFN := path.Join(absRoot, strings.ToLower(*dockerfileName))
+				if _, err = os.Lstat(tmpFN); err == nil {
+					*dockerfileName = strings.ToLower(*dockerfileName)
+					filename = tmpFN
+				}
+			}
 		}
 		}
 
 
 		origDockerfile := *dockerfileName // used for error msg
 		origDockerfile := *dockerfileName // used for error msg
-
 		if filename, err = filepath.Abs(filename); err != nil {
 		if filename, err = filepath.Abs(filename); err != nil {
 			return err
 			return err
 		}
 		}
@@ -173,6 +191,11 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
+		// And canonicalize dockerfile name to a platform-independent one
+		*dockerfileName, err = archive.CanonicalTarNameForPath(*dockerfileName)
+		if err != nil {
+			return fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", dockerfileName, err)
+		}
 
 
 		if _, err = os.Lstat(filename); os.IsNotExist(err) {
 		if _, err = os.Lstat(filename); os.IsNotExist(err) {
 			return fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile)
 			return fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile)
@@ -209,12 +232,48 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 			return err
 			return err
 		}
 		}
 	}
 	}
+
+	// windows: show error message about modified file permissions
+	// FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build.
+	if runtime.GOOS == "windows" {
+		log.Warn(`SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
+	}
+
 	var body io.Reader
 	var body io.Reader
 	// Setup an upload progress bar
 	// Setup an upload progress bar
 	// FIXME: ProgressReader shouldn't be this annoying to use
 	// FIXME: ProgressReader shouldn't be this annoying to use
 	if context != nil {
 	if context != nil {
 		sf := utils.NewStreamFormatter(false)
 		sf := utils.NewStreamFormatter(false)
-		body = utils.ProgressReader(context, 0, cli.out, sf, true, "", "Sending build context to Docker daemon")
+		body = progressreader.New(progressreader.Config{
+			In:        context,
+			Out:       cli.out,
+			Formatter: sf,
+			NewLines:  true,
+			ID:        "",
+			Action:    "Sending build context to Docker daemon",
+		})
+	}
+
+	var memory int64
+	if *flMemoryString != "" {
+		parsedMemory, err := units.RAMInBytes(*flMemoryString)
+		if err != nil {
+			return err
+		}
+		memory = parsedMemory
+	}
+
+	var memorySwap int64
+	if *flMemorySwap != "" {
+		if *flMemorySwap == "-1" {
+			memorySwap = -1
+		} else {
+			parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap)
+			if err != nil {
+				return err
+			}
+			memorySwap = parsedMemorySwap
+		}
 	}
 	}
 	// Send the build context
 	// Send the build context
 	v := &url.Values{}
 	v := &url.Values{}
@@ -257,6 +316,11 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 		v.Set("pull", "1")
 		v.Set("pull", "1")
 	}
 	}
 
 
+	v.Set("cpusetcpus", *flCpuSetCpus)
+	v.Set("cpushares", strconv.FormatInt(*flCpuShares, 10))
+	v.Set("memory", strconv.FormatInt(memory, 10))
+	v.Set("memswap", strconv.FormatInt(memorySwap, 10))
+
 	v.Set("dockerfile", *dockerfileName)
 	v.Set("dockerfile", *dockerfileName)
 
 
 	cli.LoadConfigFile()
 	cli.LoadConfigFile()
@@ -284,7 +348,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 
 
 // 'docker login': login / register a user to registry service.
 // 'docker login': login / register a user to registry service.
 func (cli *DockerCli) CmdLogin(args ...string) error {
 func (cli *DockerCli) CmdLogin(args ...string) error {
-	cmd := cli.Subcmd("login", "[SERVER]", "Register or log in to a Docker registry server, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.", true)
+	cmd := cli.Subcmd("login", "[SERVER]", "Register or log in to a Docker registry server, if no server is\nspecified \""+registry.IndexServerAddress()+"\" is the default.", true)
 	cmd.Require(flag.Max, 1)
 	cmd.Require(flag.Max, 1)
 
 
 	var username, password, email string
 	var username, password, email string
@@ -327,6 +391,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
 	if username == "" {
 	if username == "" {
 		promptDefault("Username", authconfig.Username)
 		promptDefault("Username", authconfig.Username)
 		username = readInput(cli.in, cli.out)
 		username = readInput(cli.in, cli.out)
+		username = strings.Trim(username, " ")
 		if username == "" {
 		if username == "" {
 			username = authconfig.Username
 			username = authconfig.Username
 		}
 		}
@@ -361,7 +426,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
 	} else {
 	} else {
 		// However, if they don't override the username use the
 		// However, if they don't override the username use the
 		// password or email from the cmd line if specified. IOW, allow
 		// password or email from the cmd line if specified. IOW, allow
-		// then to change/overide them.  And if not specified, just
+		// then to change/override them.  And if not specified, just
 		// use what's in the config file
 		// use what's in the config file
 		if password == "" {
 		if password == "" {
 			password = authconfig.Password
 			password = authconfig.Password
@@ -388,10 +453,12 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
 	var out2 engine.Env
 	var out2 engine.Env
 	err = out2.Decode(stream)
 	err = out2.Decode(stream)
 	if err != nil {
 	if err != nil {
-		cli.configFile, _ = registry.LoadConfig(os.Getenv("HOME"))
+		cli.configFile, _ = registry.LoadConfig(homedir.Get())
 		return err
 		return err
 	}
 	}
 	registry.SaveConfig(cli.configFile)
 	registry.SaveConfig(cli.configFile)
+	fmt.Fprintf(cli.out, "WARNING: login credentials saved in %s.\n", path.Join(homedir.Get(), registry.CONFIGFILE))
+
 	if out2.Get("Status") != "" {
 	if out2.Get("Status") != "" {
 		fmt.Fprintf(cli.out, "%s\n", out2.Get("Status"))
 		fmt.Fprintf(cli.out, "%s\n", out2.Get("Status"))
 	}
 	}
@@ -400,7 +467,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
 
 
 // log out from a Docker registry
 // log out from a Docker registry
 func (cli *DockerCli) CmdLogout(args ...string) error {
 func (cli *DockerCli) CmdLogout(args ...string) error {
-	cmd := cli.Subcmd("logout", "[SERVER]", "Log out from a Docker registry, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.", true)
+	cmd := cli.Subcmd("logout", "[SERVER]", "Log out from a Docker registry, if no server is\nspecified \""+registry.IndexServerAddress()+"\" is the default.", true)
 	cmd.Require(flag.Max, 1)
 	cmd.Require(flag.Max, 1)
 
 
 	utils.ParseFlags(cmd, args, false)
 	utils.ParseFlags(cmd, args, false)
@@ -482,6 +549,7 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
 	}
 	}
 	fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion"))
 	fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion"))
 	fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit"))
 	fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit"))
+	fmt.Fprintf(cli.out, "OS/Arch (server): %s/%s\n", remoteVersion.Get("Os"), remoteVersion.Get("Arch"))
 	return nil
 	return nil
 }
 }
 
 
@@ -559,6 +627,14 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
 		if remoteInfo.Exists("NGoroutines") {
 		if remoteInfo.Exists("NGoroutines") {
 			fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines"))
 			fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines"))
 		}
 		}
+		if remoteInfo.Exists("SystemTime") {
+			t, err := remoteInfo.GetTime("SystemTime")
+			if err != nil {
+				log.Errorf("Error reading system time: %v", err)
+			} else {
+				fmt.Fprintf(cli.out, "System Time: %s\n", t.Format(time.UnixDate))
+			}
+		}
 		if remoteInfo.Exists("NEventsListener") {
 		if remoteInfo.Exists("NEventsListener") {
 			fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener"))
 			fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener"))
 		}
 		}
@@ -572,7 +648,15 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
 			fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", root)
 			fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", root)
 		}
 		}
 	}
 	}
-
+	if remoteInfo.Exists("HttpProxy") {
+		fmt.Fprintf(cli.out, "Http Proxy: %s\n", remoteInfo.Get("HttpProxy"))
+	}
+	if remoteInfo.Exists("HttpsProxy") {
+		fmt.Fprintf(cli.out, "Https Proxy: %s\n", remoteInfo.Get("HttpsProxy"))
+	}
+	if remoteInfo.Exists("NoProxy") {
+		fmt.Fprintf(cli.out, "No Proxy: %s\n", remoteInfo.Get("NoProxy"))
+	}
 	if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
 	if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
 		cli.LoadConfigFile()
 		cli.LoadConfigFile()
 		u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username
 		u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username
@@ -601,8 +685,8 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
 }
 }
 
 
 func (cli *DockerCli) CmdStop(args ...string) error {
 func (cli *DockerCli) CmdStop(args ...string) error {
-	cmd := cli.Subcmd("stop", "CONTAINER [CONTAINER...]", "Stop a running container by sending SIGTERM and then SIGKILL after a grace period", true)
-	nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.")
+	cmd := cli.Subcmd("stop", "CONTAINER [CONTAINER...]", "Stop a running container by sending SIGTERM and then SIGKILL after a\ngrace period", true)
+	nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing it")
 	cmd.Require(flag.Min, 1)
 	cmd.Require(flag.Min, 1)
 
 
 	utils.ParseFlags(cmd, args, true)
 	utils.ParseFlags(cmd, args, true)
@@ -625,7 +709,7 @@ func (cli *DockerCli) CmdStop(args ...string) error {
 
 
 func (cli *DockerCli) CmdRestart(args ...string) error {
 func (cli *DockerCli) CmdRestart(args ...string) error {
 	cmd := cli.Subcmd("restart", "CONTAINER [CONTAINER...]", "Restart a running container", true)
 	cmd := cli.Subcmd("restart", "CONTAINER [CONTAINER...]", "Restart a running container", true)
-	nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.")
+	nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing the container")
 	cmd.Require(flag.Min, 1)
 	cmd.Require(flag.Min, 1)
 
 
 	utils.ParseFlags(cmd, args, true)
 	utils.ParseFlags(cmd, args, true)
@@ -677,8 +761,8 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 		cErr chan error
 		cErr chan error
 		tty  bool
 		tty  bool
 
 
-		cmd       = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container", true)
-		attach    = cmd.Bool([]string{"a", "-attach"}, false, "Attach container's STDOUT and STDERR and forward all signals to the process")
+		cmd       = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Start one or more stopped containers", true)
+		attach    = cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals")
 		openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN")
 		openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN")
 	)
 	)
 
 
@@ -686,6 +770,16 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 	utils.ParseFlags(cmd, args, true)
 	utils.ParseFlags(cmd, args, true)
 
 
 	hijacked := make(chan io.Closer)
 	hijacked := make(chan io.Closer)
+	// Block the return until the chan gets closed
+	defer func() {
+		log.Debugf("CmdStart() returned, defer waiting for hijack to finish.")
+		if _, ok := <-hijacked; ok {
+			log.Errorf("Hijack did not finish (chan still open)")
+		}
+		if *openStdin || *attach {
+			cli.in.Close()
+		}
+	}()
 
 
 	if *attach || *openStdin {
 	if *attach || *openStdin {
 		if cmd.NArg() > 1 {
 		if cmd.NArg() > 1 {
@@ -742,25 +836,26 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 			return err
 			return err
 		}
 		}
 	}
 	}
-
 	var encounteredError error
 	var encounteredError error
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
 		_, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false))
 		_, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false))
 		if err != nil {
 		if err != nil {
 			if !*attach && !*openStdin {
 			if !*attach && !*openStdin {
+				// attach and openStdin is false means it could be starting multiple containers
+				// when a container start failed, show the error message and start next
 				fmt.Fprintf(cli.err, "%s\n", err)
 				fmt.Fprintf(cli.err, "%s\n", err)
+				encounteredError = fmt.Errorf("Error: failed to start one or more containers")
+			} else {
+				encounteredError = err
 			}
 			}
-			encounteredError = fmt.Errorf("Error: failed to start one or more containers")
 		} else {
 		} else {
 			if !*attach && !*openStdin {
 			if !*attach && !*openStdin {
 				fmt.Fprintf(cli.out, "%s\n", name)
 				fmt.Fprintf(cli.out, "%s\n", name)
 			}
 			}
 		}
 		}
 	}
 	}
+
 	if encounteredError != nil {
 	if encounteredError != nil {
-		if *openStdin || *attach {
-			cli.in.Close()
-		}
 		return encounteredError
 		return encounteredError
 	}
 	}
 
 
@@ -785,8 +880,8 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 }
 }
 
 
 func (cli *DockerCli) CmdUnpause(args ...string) error {
 func (cli *DockerCli) CmdUnpause(args ...string) error {
-	cmd := cli.Subcmd("unpause", "CONTAINER", "Unpause all processes within a container", true)
-	cmd.Require(flag.Exact, 1)
+	cmd := cli.Subcmd("unpause", "CONTAINER [CONTAINER...]", "Unpause all processes within a container", true)
+	cmd.Require(flag.Min, 1)
 	utils.ParseFlags(cmd, args, false)
 	utils.ParseFlags(cmd, args, false)
 
 
 	var encounteredError error
 	var encounteredError error
@@ -802,8 +897,8 @@ func (cli *DockerCli) CmdUnpause(args ...string) error {
 }
 }
 
 
 func (cli *DockerCli) CmdPause(args ...string) error {
 func (cli *DockerCli) CmdPause(args ...string) error {
-	cmd := cli.Subcmd("pause", "CONTAINER", "Pause all processes within a container", true)
-	cmd.Require(flag.Exact, 1)
+	cmd := cli.Subcmd("pause", "CONTAINER [CONTAINER...]", "Pause all processes within a container", true)
+	cmd.Require(flag.Min, 1)
 	utils.ParseFlags(cmd, args, false)
 	utils.ParseFlags(cmd, args, false)
 
 
 	var encounteredError error
 	var encounteredError error
@@ -840,7 +935,7 @@ func (cli *DockerCli) CmdRename(args ...string) error {
 
 
 func (cli *DockerCli) CmdInspect(args ...string) error {
 func (cli *DockerCli) CmdInspect(args ...string) error {
 	cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container or image", true)
 	cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container or image", true)
-	tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.")
+	tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template")
 	cmd.Require(flag.Min, 1)
 	cmd.Require(flag.Min, 1)
 
 
 	utils.ParseFlags(cmd, args, true)
 	utils.ParseFlags(cmd, args, true)
@@ -862,6 +957,12 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
 	for _, name := range cmd.Args() {
 	for _, name := range cmd.Args() {
 		obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false))
 		obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false))
 		if err != nil {
 		if err != nil {
+			if strings.Contains(err.Error(), "Too many") {
+				fmt.Fprintf(cli.err, "Error: %v", err)
+				status = 1
+				continue
+			}
+
 			obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false))
 			obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false))
 			if err != nil {
 			if err != nil {
 				if strings.Contains(err.Error(), "No such") {
 				if strings.Contains(err.Error(), "No such") {
@@ -947,7 +1048,7 @@ func (cli *DockerCli) CmdTop(args ...string) error {
 }
 }
 
 
 func (cli *DockerCli) CmdPort(args ...string) error {
 func (cli *DockerCli) CmdPort(args ...string) error {
-	cmd := cli.Subcmd("port", "CONTAINER [PRIVATE_PORT[/PROTO]]", "List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT", true)
+	cmd := cli.Subcmd("port", "CONTAINER [PRIVATE_PORT[/PROTO]]", "List port mappings for the CONTAINER, or lookup the public-facing port that\nis NAT-ed to the PRIVATE_PORT", true)
 	cmd.Require(flag.Min, 1)
 	cmd.Require(flag.Min, 1)
 	utils.ParseFlags(cmd, args, true)
 	utils.ParseFlags(cmd, args, true)
 
 
@@ -1068,7 +1169,7 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
 			if *noTrunc {
 			if *noTrunc {
 				fmt.Fprintf(w, "%s\t", outID)
 				fmt.Fprintf(w, "%s\t", outID)
 			} else {
 			} else {
-				fmt.Fprintf(w, "%s\t", utils.TruncateID(outID))
+				fmt.Fprintf(w, "%s\t", common.TruncateID(outID))
 			}
 			}
 
 
 			fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))))
 			fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))))
@@ -1083,7 +1184,7 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
 			if *noTrunc {
 			if *noTrunc {
 				fmt.Fprintln(w, outID)
 				fmt.Fprintln(w, outID)
 			} else {
 			} else {
-				fmt.Fprintln(w, utils.TruncateID(outID))
+				fmt.Fprintln(w, common.TruncateID(outID))
 			}
 			}
 		}
 		}
 	}
 	}
@@ -1094,7 +1195,7 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
 func (cli *DockerCli) CmdRm(args ...string) error {
 func (cli *DockerCli) CmdRm(args ...string) error {
 	cmd := cli.Subcmd("rm", "CONTAINER [CONTAINER...]", "Remove one or more containers", true)
 	cmd := cli.Subcmd("rm", "CONTAINER [CONTAINER...]", "Remove one or more containers", true)
 	v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container")
 	v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container")
-	link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container")
+	link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link")
 	force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)")
 	force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)")
 	cmd.Require(flag.Min, 1)
 	cmd.Require(flag.Min, 1)
 
 
@@ -1146,7 +1247,9 @@ func (cli *DockerCli) CmdKill(args ...string) error {
 }
 }
 
 
 func (cli *DockerCli) CmdImport(args ...string) error {
 func (cli *DockerCli) CmdImport(args ...string) error {
-	cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.", true)
+	cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the\ntarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then\noptionally tag it.", true)
+	flChanges := opts.NewListOpts(nil)
+	cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image")
 	cmd.Require(flag.Min, 1)
 	cmd.Require(flag.Min, 1)
 
 
 	utils.ParseFlags(cmd, args, true)
 	utils.ParseFlags(cmd, args, true)
@@ -1159,7 +1262,9 @@ func (cli *DockerCli) CmdImport(args ...string) error {
 
 
 	v.Set("fromSrc", src)
 	v.Set("fromSrc", src)
 	v.Set("repo", repository)
 	v.Set("repo", repository)
-
+	for _, change := range flChanges.GetAll() {
+		v.Add("changes", change)
+	}
 	if cmd.NArg() == 3 {
 	if cmd.NArg() == 3 {
 		fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' has been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n")
 		fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' has been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n")
 		v.Set("tag", cmd.Arg(2))
 		v.Set("tag", cmd.Arg(2))
@@ -1245,7 +1350,7 @@ func (cli *DockerCli) CmdPush(args ...string) error {
 }
 }
 
 
 func (cli *DockerCli) CmdPull(args ...string) error {
 func (cli *DockerCli) CmdPull(args ...string) error {
-	cmd := cli.Subcmd("pull", "NAME[:TAG]", "Pull an image or a repository from the registry", true)
+	cmd := cli.Subcmd("pull", "NAME[:TAG|@DIGEST]", "Pull an image or a repository from the registry", true)
 	allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository")
 	allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository")
 	cmd.Require(flag.Exact, 1)
 	cmd.Require(flag.Exact, 1)
 
 
@@ -1258,7 +1363,7 @@ func (cli *DockerCli) CmdPull(args ...string) error {
 	)
 	)
 	taglessRemote, tag := parsers.ParseRepositoryTag(remote)
 	taglessRemote, tag := parsers.ParseRepositoryTag(remote)
 	if tag == "" && !*allTags {
 	if tag == "" && !*allTags {
-		newRemote = taglessRemote + ":" + graph.DEFAULTTAG
+		newRemote = utils.ImageReference(taglessRemote, graph.DEFAULTTAG)
 	}
 	}
 	if tag != "" && *allTags {
 	if tag != "" && *allTags {
 		return fmt.Errorf("tag can't be used with --all-tags/-a")
 		return fmt.Errorf("tag can't be used with --all-tags/-a")
@@ -1309,14 +1414,15 @@ func (cli *DockerCli) CmdPull(args ...string) error {
 func (cli *DockerCli) CmdImages(args ...string) error {
 func (cli *DockerCli) CmdImages(args ...string) error {
 	cmd := cli.Subcmd("images", "[REPOSITORY]", "List images", true)
 	cmd := cli.Subcmd("images", "[REPOSITORY]", "List images", true)
 	quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
 	quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
-	all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate image layers)")
+	all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)")
 	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
 	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
+	showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests")
 	// FIXME: --viz and --tree are deprecated. Remove them in a future version.
 	// FIXME: --viz and --tree are deprecated. Remove them in a future version.
 	flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format")
 	flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format")
 	flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format")
 	flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format")
 
 
 	flFilter := opts.NewListOpts(nil)
 	flFilter := opts.NewListOpts(nil)
-	cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e., 'dangling=true')")
+	cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided")
 	cmd.Require(flag.Max, 1)
 	cmd.Require(flag.Max, 1)
 
 
 	utils.ParseFlags(cmd, args, true)
 	utils.ParseFlags(cmd, args, true)
@@ -1377,7 +1483,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
 			}
 			}
 
 
 			if matchName != "" {
 			if matchName != "" {
-				if matchName == image.Get("Id") || matchName == utils.TruncateID(image.Get("Id")) {
+				if matchName == image.Get("Id") || matchName == common.TruncateID(image.Get("Id")) {
 					startImage = image
 					startImage = image
 				}
 				}
 
 
@@ -1437,20 +1543,46 @@ func (cli *DockerCli) CmdImages(args ...string) error {
 
 
 		w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
 		w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
 		if !*quiet {
 		if !*quiet {
-			fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
+			if *showDigests {
+				fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
+			} else {
+				fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
+			}
 		}
 		}
 
 
 		for _, out := range outs.Data {
 		for _, out := range outs.Data {
-			for _, repotag := range out.GetList("RepoTags") {
+			outID := out.Get("Id")
+			if !*noTrunc {
+				outID = common.TruncateID(outID)
+			}
+
+			repoTags := out.GetList("RepoTags")
+			repoDigests := out.GetList("RepoDigests")
+
+			if len(repoTags) == 1 && repoTags[0] == "<none>:<none>" && len(repoDigests) == 1 && repoDigests[0] == "<none>@<none>" {
+				// dangling image - clear out either repoTags or repoDigsts so we only show it once below
+				repoDigests = []string{}
+			}
 
 
-				repo, tag := parsers.ParseRepositoryTag(repotag)
-				outID := out.Get("Id")
-				if !*noTrunc {
-					outID = utils.TruncateID(outID)
+			// combine the tags and digests lists
+			tagsAndDigests := append(repoTags, repoDigests...)
+			for _, repoAndRef := range tagsAndDigests {
+				repo, ref := parsers.ParseRepositoryTag(repoAndRef)
+				// default tag and digest to none - if there's a value, it'll be set below
+				tag := "<none>"
+				digest := "<none>"
+				if utils.DigestReference(ref) {
+					digest = ref
+				} else {
+					tag = ref
 				}
 				}
 
 
 				if !*quiet {
 				if !*quiet {
-					fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), units.HumanSize(float64(out.GetInt64("VirtualSize"))))
+					if *showDigests {
+						fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, outID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), units.HumanSize(float64(out.GetInt64("VirtualSize"))))
+					} else {
+						fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), units.HumanSize(float64(out.GetInt64("VirtualSize"))))
+					}
 				} else {
 				} else {
 					fmt.Fprintln(w, outID)
 					fmt.Fprintln(w, outID)
 				}
 				}
@@ -1501,8 +1633,8 @@ func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix strin
 		imageID = image.Get("Id")
 		imageID = image.Get("Id")
 		parentID = image.Get("ParentId")
 		parentID = image.Get("ParentId")
 	} else {
 	} else {
-		imageID = utils.TruncateID(image.Get("Id"))
-		parentID = utils.TruncateID(image.Get("ParentId"))
+		imageID = common.TruncateID(image.Get("Id"))
+		parentID = common.TruncateID(image.Get("ParentId"))
 	}
 	}
 	if parentID == "" {
 	if parentID == "" {
 		fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID)
 		fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID)
@@ -1521,7 +1653,7 @@ func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix stri
 	if noTrunc {
 	if noTrunc {
 		imageID = image.Get("Id")
 		imageID = image.Get("Id")
 	} else {
 	} else {
-		imageID = utils.TruncateID(image.Get("Id"))
+		imageID = common.TruncateID(image.Get("Id"))
 	}
 	}
 
 
 	fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(float64(image.GetInt64("VirtualSize"))))
 	fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(float64(image.GetInt64("VirtualSize"))))
@@ -1542,17 +1674,17 @@ func (cli *DockerCli) CmdPs(args ...string) error {
 		cmd      = cli.Subcmd("ps", "", "List containers", true)
 		cmd      = cli.Subcmd("ps", "", "List containers", true)
 		quiet    = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
 		quiet    = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
 		size     = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes")
 		size     = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes")
-		all      = cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.")
+		all      = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)")
 		noTrunc  = cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
 		noTrunc  = cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
-		nLatest  = cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.")
-		since    = cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show only containers created since Id or Name, include non-running ones.")
-		before   = cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name, include non-running ones.")
-		last     = cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.")
+		nLatest  = cmd.Bool([]string{"l", "-latest"}, false, "Show the latest created container, include non-running")
+		since    = cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show created since Id or Name, include non-running")
+		before   = cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name")
+		last     = cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running")
 		flFilter = opts.NewListOpts(nil)
 		flFilter = opts.NewListOpts(nil)
 	)
 	)
 	cmd.Require(flag.Exact, 0)
 	cmd.Require(flag.Exact, 0)
 
 
-	cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values. Valid filters:\nexited=<int> - containers with exit code of <int>\nstatus=(restarting|running|paused|exited)")
+	cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided")
 
 
 	utils.ParseFlags(cmd, args, true)
 	utils.ParseFlags(cmd, args, true)
 	if *last == -1 && *nLatest {
 	if *last == -1 && *nLatest {
@@ -1629,7 +1761,7 @@ func (cli *DockerCli) CmdPs(args ...string) error {
 		outID := out.Get("Id")
 		outID := out.Get("Id")
 
 
 		if !*noTrunc {
 		if !*noTrunc {
-			outID = utils.TruncateID(outID)
+			outID = common.TruncateID(outID)
 		}
 		}
 
 
 		if *quiet {
 		if *quiet {
@@ -1693,6 +1825,8 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
 	flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit")
 	flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit")
 	flComment := cmd.String([]string{"m", "-message"}, "", "Commit message")
 	flComment := cmd.String([]string{"m", "-message"}, "", "Commit message")
 	flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith <hannibal@a-team.com>\")")
 	flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith <hannibal@a-team.com>\")")
+	flChanges := opts.NewListOpts(nil)
+	cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image")
 	// FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands.
 	// FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands.
 	flConfig := cmd.String([]string{"#run", "#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands")
 	flConfig := cmd.String([]string{"#run", "#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands")
 	cmd.Require(flag.Max, 2)
 	cmd.Require(flag.Max, 2)
@@ -1717,6 +1851,9 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
 	v.Set("tag", tag)
 	v.Set("tag", tag)
 	v.Set("comment", *flComment)
 	v.Set("comment", *flComment)
 	v.Set("author", *flAuthor)
 	v.Set("author", *flAuthor)
+	for _, change := range flChanges.GetAll() {
+		v.Add("changes", change)
+	}
 
 
 	if *flPause != true {
 	if *flPause != true {
 		v.Set("pause", "0")
 		v.Set("pause", "0")
@@ -1749,7 +1886,7 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
 	since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp")
 	since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp")
 	until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp")
 	until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp")
 	flFilter := opts.NewListOpts(nil)
 	flFilter := opts.NewListOpts(nil)
-	cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e., 'event=stop')")
+	cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided")
 	cmd.Require(flag.Exact, 0)
 	cmd.Require(flag.Exact, 0)
 
 
 	utils.ParseFlags(cmd, args, true)
 	utils.ParseFlags(cmd, args, true)
@@ -1800,14 +1937,40 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
 }
 }
 
 
 func (cli *DockerCli) CmdExport(args ...string) error {
 func (cli *DockerCli) CmdExport(args ...string) error {
-	cmd := cli.Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive to STDOUT", true)
+	cmd := cli.Subcmd("export", "CONTAINER", "Export a filesystem as a tar archive (streamed to STDOUT by default)", true)
+	outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT")
 	cmd.Require(flag.Exact, 1)
 	cmd.Require(flag.Exact, 1)
 
 
 	utils.ParseFlags(cmd, args, true)
 	utils.ParseFlags(cmd, args, true)
 
 
-	if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out, nil); err != nil {
-		return err
+	var (
+		output io.Writer = cli.out
+		err    error
+	)
+	if *outfile != "" {
+		output, err = os.Create(*outfile)
+		if err != nil {
+			return err
+		}
+	} else if cli.isTerminalOut {
+		return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.")
+	}
+
+	if len(cmd.Args()) == 1 {
+		image := cmd.Arg(0)
+		if err := cli.stream("GET", "/containers/"+image+"/export", nil, output, nil); err != nil {
+			return err
+		}
+	} else {
+		v := url.Values{}
+		for _, arg := range cmd.Args() {
+			v.Add("names", arg)
+		}
+		if err := cli.stream("GET", "/containers/get?"+v.Encode(), nil, output, nil); err != nil {
+			return err
+		}
 	}
 	}
+
 	return nil
 	return nil
 }
 }
 
 
@@ -1847,7 +2010,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
 		cmd    = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container", true)
 		cmd    = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container", true)
 		follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
 		follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
 		times  = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
 		times  = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
-		tail   = cmd.String([]string{"-tail"}, "all", "Output the specified number of lines at the end of logs (defaults to all logs)")
+		tail   = cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs")
 	)
 	)
 	cmd.Require(flag.Exact, 1)
 	cmd.Require(flag.Exact, 1)
 
 
@@ -1865,6 +2028,10 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
 		return err
 		return err
 	}
 	}
 
 
+	if env.GetSubEnv("HostConfig").GetSubEnv("LogConfig").Get("Type") != "json-file" {
+		return fmt.Errorf("\"logs\" command is supported only for \"json-file\" logging driver")
+	}
+
 	v := url.Values{}
 	v := url.Values{}
 	v.Set("stdout", "1")
 	v.Set("stdout", "1")
 	v.Set("stderr", "1")
 	v.Set("stderr", "1")
@@ -1885,7 +2052,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
 	var (
 	var (
 		cmd     = cli.Subcmd("attach", "CONTAINER", "Attach to a running container", true)
 		cmd     = cli.Subcmd("attach", "CONTAINER", "Attach to a running container", true)
 		noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN")
 		noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN")
-		proxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.")
+		proxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process")
 	)
 	)
 	cmd.Require(flag.Exact, 1)
 	cmd.Require(flag.Exact, 1)
 
 
@@ -2111,7 +2278,7 @@ func (cid *cidFile) Write(id string) error {
 	return nil
 	return nil
 }
 }
 
 
-func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runconfig.HostConfig, cidfile, name string) (engine.Env, error) {
+func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runconfig.HostConfig, cidfile, name string) (*types.ContainerCreateResponse, error) {
 	containerValues := url.Values{}
 	containerValues := url.Values{}
 	if name != "" {
 	if name != "" {
 		containerValues.Set("name", name)
 		containerValues.Set("name", name)
@@ -2136,7 +2303,7 @@ func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runc
 		if tag == "" {
 		if tag == "" {
 			tag = graph.DEFAULTTAG
 			tag = graph.DEFAULTTAG
 		}
 		}
-		fmt.Fprintf(cli.err, "Unable to find image '%s:%s' locally\n", repo, tag)
+		fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", utils.ImageReference(repo, tag))
 
 
 		// we don't want to write to stdout anything apart from container.ID
 		// we don't want to write to stdout anything apart from container.ID
 		if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil {
 		if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil {
@@ -2150,23 +2317,19 @@ func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runc
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	var result engine.Env
-	if err := result.Decode(stream); err != nil {
+	var response types.ContainerCreateResponse
+	if err := json.NewDecoder(stream).Decode(&response); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-
-	for _, warning := range result.GetList("Warnings") {
+	for _, warning := range response.Warnings {
 		fmt.Fprintf(cli.err, "WARNING: %s\n", warning)
 		fmt.Fprintf(cli.err, "WARNING: %s\n", warning)
 	}
 	}
-
 	if containerIDFile != nil {
 	if containerIDFile != nil {
-		if err = containerIDFile.Write(result.Get("Id")); err != nil {
+		if err = containerIDFile.Write(response.ID); err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
 	}
 	}
-
-	return result, nil
-
+	return &response, nil
 }
 }
 
 
 func (cli *DockerCli) CmdCreate(args ...string) error {
 func (cli *DockerCli) CmdCreate(args ...string) error {
@@ -2185,14 +2348,11 @@ func (cli *DockerCli) CmdCreate(args ...string) error {
 		cmd.Usage()
 		cmd.Usage()
 		return nil
 		return nil
 	}
 	}
-
-	createResult, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName)
+	response, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-
-	fmt.Fprintf(cli.out, "%s\n", createResult.Get("Id"))
-
+	fmt.Fprintf(cli.out, "%s\n", response.ID)
 	return nil
 	return nil
 }
 }
 
 
@@ -2202,9 +2362,9 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 
 
 	// These are flags not stored in Config/HostConfig
 	// These are flags not stored in Config/HostConfig
 	var (
 	var (
-		flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)")
-		flDetach     = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run the container in the background and print the new container ID")
-		flSigProxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.")
+		flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits")
+		flDetach     = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID")
+		flSigProxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process")
 		flName       = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container")
 		flName       = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container")
 		flAttach     *opts.ListOpts
 		flAttach     *opts.ListOpts
 
 
@@ -2218,6 +2378,18 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 	if err != nil {
 	if err != nil {
 		utils.ReportError(cmd, err.Error(), true)
 		utils.ReportError(cmd, err.Error(), true)
 	}
 	}
+
+	if len(hostConfig.Dns) > 0 {
+		// check the DNS settings passed via --dns against
+		// localhost regexp to warn if they are trying to
+		// set a DNS to a localhost address
+		for _, dnsIP := range hostConfig.Dns {
+			if resolvconf.IsLocalhost(dnsIP) {
+				fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP)
+				break
+			}
+		}
+	}
 	if config.Image == "" {
 	if config.Image == "" {
 		cmd.Usage()
 		cmd.Usage()
 		return nil
 		return nil
@@ -2228,7 +2400,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 			return err
 			return err
 		}
 		}
 	} else {
 	} else {
-		if fl := cmd.Lookup("attach"); fl != nil {
+		if fl := cmd.Lookup("-attach"); fl != nil {
 			flAttach = fl.Value.(*opts.ListOpts)
 			flAttach = fl.Value.(*opts.ListOpts)
 			if flAttach.Len() != 0 {
 			if flAttach.Len() != 0 {
 				return ErrConflictAttachDetach
 				return ErrConflictAttachDetach
@@ -2250,38 +2422,32 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		sigProxy = false
 		sigProxy = false
 	}
 	}
 
 
-	runResult, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName)
+	createResponse, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-
 	if sigProxy {
 	if sigProxy {
-		sigc := cli.forwardAllSignals(runResult.Get("Id"))
+		sigc := cli.forwardAllSignals(createResponse.ID)
 		defer signal.StopCatch(sigc)
 		defer signal.StopCatch(sigc)
 	}
 	}
-
 	var (
 	var (
 		waitDisplayId chan struct{}
 		waitDisplayId chan struct{}
 		errCh         chan error
 		errCh         chan error
 	)
 	)
-
 	if !config.AttachStdout && !config.AttachStderr {
 	if !config.AttachStdout && !config.AttachStderr {
 		// Make this asynchronous to allow the client to write to stdin before having to read the ID
 		// Make this asynchronous to allow the client to write to stdin before having to read the ID
 		waitDisplayId = make(chan struct{})
 		waitDisplayId = make(chan struct{})
 		go func() {
 		go func() {
 			defer close(waitDisplayId)
 			defer close(waitDisplayId)
-			fmt.Fprintf(cli.out, "%s\n", runResult.Get("Id"))
+			fmt.Fprintf(cli.out, "%s\n", createResponse.ID)
 		}()
 		}()
 	}
 	}
-
 	if *flAutoRemove && (hostConfig.RestartPolicy.Name == "always" || hostConfig.RestartPolicy.Name == "on-failure") {
 	if *flAutoRemove && (hostConfig.RestartPolicy.Name == "always" || hostConfig.RestartPolicy.Name == "on-failure") {
 		return ErrConflictRestartPolicyAndAutoRemove
 		return ErrConflictRestartPolicyAndAutoRemove
 	}
 	}
-
 	// We need to instantiate the chan because the select needs it. It can
 	// We need to instantiate the chan because the select needs it. It can
 	// be closed but can't be uninitialized.
 	// be closed but can't be uninitialized.
 	hijacked := make(chan io.Closer)
 	hijacked := make(chan io.Closer)
-
 	// Block the return until the chan gets closed
 	// Block the return until the chan gets closed
 	defer func() {
 	defer func() {
 		log.Debugf("End of CmdRun(), Waiting for hijack to finish.")
 		log.Debugf("End of CmdRun(), Waiting for hijack to finish.")
@@ -2289,7 +2455,6 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 			log.Errorf("Hijack did not finish (chan still open)")
 			log.Errorf("Hijack did not finish (chan still open)")
 		}
 		}
 	}()
 	}()
-
 	if config.AttachStdin || config.AttachStdout || config.AttachStderr {
 	if config.AttachStdin || config.AttachStdout || config.AttachStderr {
 		var (
 		var (
 			out, stderr io.Writer
 			out, stderr io.Writer
@@ -2297,7 +2462,6 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 			v           = url.Values{}
 			v           = url.Values{}
 		)
 		)
 		v.Set("stream", "1")
 		v.Set("stream", "1")
-
 		if config.AttachStdin {
 		if config.AttachStdin {
 			v.Set("stdin", "1")
 			v.Set("stdin", "1")
 			in = cli.in
 			in = cli.in
@@ -2314,14 +2478,12 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 				stderr = cli.err
 				stderr = cli.err
 			}
 			}
 		}
 		}
-
 		errCh = promise.Go(func() error {
 		errCh = promise.Go(func() error {
-			return cli.hijack("POST", "/containers/"+runResult.Get("Id")+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked, nil)
+			return cli.hijack("POST", "/containers/"+createResponse.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked, nil)
 		})
 		})
 	} else {
 	} else {
 		close(hijacked)
 		close(hijacked)
 	}
 	}
-
 	// Acknowledge the hijack before starting
 	// Acknowledge the hijack before starting
 	select {
 	select {
 	case closer := <-hijacked:
 	case closer := <-hijacked:
@@ -2338,12 +2500,12 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 	}
 	}
 
 
 	//start the container
 	//start the container
-	if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", nil, false)); err != nil {
+	if _, _, err = readBody(cli.call("POST", "/containers/"+createResponse.ID+"/start", nil, false)); err != nil {
 		return err
 		return err
 	}
 	}
 
 
 	if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut {
 	if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut {
-		if err := cli.monitorTtySize(runResult.Get("Id"), false); err != nil {
+		if err := cli.monitorTtySize(createResponse.ID, false); err != nil {
 			log.Errorf("Error monitoring TTY size: %s", err)
 			log.Errorf("Error monitoring TTY size: %s", err)
 		}
 		}
 	}
 	}
@@ -2368,26 +2530,26 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 	if *flAutoRemove {
 	if *flAutoRemove {
 		// Autoremove: wait for the container to finish, retrieve
 		// Autoremove: wait for the container to finish, retrieve
 		// the exit code and remove the container
 		// the exit code and remove the container
-		if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/wait", nil, false)); err != nil {
+		if _, _, err := readBody(cli.call("POST", "/containers/"+createResponse.ID+"/wait", nil, false)); err != nil {
 			return err
 			return err
 		}
 		}
-		if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil {
+		if _, status, err = getExitCode(cli, createResponse.ID); err != nil {
 			return err
 			return err
 		}
 		}
-		if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.Get("Id")+"?v=1", nil, false)); err != nil {
+		if _, _, err := readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, false)); err != nil {
 			return err
 			return err
 		}
 		}
 	} else {
 	} else {
 		// No Autoremove: Simply retrieve the exit code
 		// No Autoremove: Simply retrieve the exit code
 		if !config.Tty {
 		if !config.Tty {
 			// In non-TTY mode, we can't detach, so we must wait for container exit
 			// In non-TTY mode, we can't detach, so we must wait for container exit
-			if status, err = waitForExit(cli, runResult.Get("Id")); err != nil {
+			if status, err = waitForExit(cli, createResponse.ID); err != nil {
 				return err
 				return err
 			}
 			}
 		} else {
 		} else {
 			// In TTY mode, there is a race: if the process dies too slowly, the state could
 			// In TTY mode, there is a race: if the process dies too slowly, the state could
 			// be updated after the getExitCode call and result in the wrong exit code being reported
 			// be updated after the getExitCode call and result in the wrong exit code being reported
-			if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil {
+			if _, status, err = getExitCode(cli, createResponse.ID); err != nil {
 				return err
 				return err
 			}
 			}
 		}
 		}
@@ -2399,7 +2561,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 }
 }
 
 
 func (cli *DockerCli) CmdCp(args ...string) error {
 func (cli *DockerCli) CmdCp(args ...string) error {
-	cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTPATH", "Copy files/folders from the PATH to the HOSTPATH", true)
+	cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTDIR|-", "Copy files/folders from a PATH on the container to a HOSTDIR on the host\nrunning the command. Use '-' to write the data\nas a tar file to STDOUT.", true)
 	cmd.Require(flag.Exact, 2)
 	cmd.Require(flag.Exact, 2)
 
 
 	utils.ParseFlags(cmd, args, true)
 	utils.ParseFlags(cmd, args, true)
@@ -2426,7 +2588,14 @@ func (cli *DockerCli) CmdCp(args ...string) error {
 	}
 	}
 
 
 	if statusCode == 200 {
 	if statusCode == 200 {
-		if err := archive.Untar(stream, copyData.Get("HostPath"), &archive.TarOptions{NoLchown: true}); err != nil {
+		dest := copyData.Get("HostPath")
+
+		if dest == "-" {
+			_, err = io.Copy(cli.out, stream)
+		} else {
+			err = archive.Untar(stream, dest, &archive.TarOptions{NoLchown: true})
+		}
+		if err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}
@@ -2631,7 +2800,7 @@ func (s *containerStats) Collect(cli *DockerCli) {
 	)
 	)
 	go func() {
 	go func() {
 		for {
 		for {
-			var v *stats.Stats
+			var v *types.Stats
 			if err := dec.Decode(&v); err != nil {
 			if err := dec.Decode(&v); err != nil {
 				u <- err
 				u <- err
 				return
 				return
@@ -2641,7 +2810,7 @@ func (s *containerStats) Collect(cli *DockerCli) {
 				cpuPercent = 0.0
 				cpuPercent = 0.0
 			)
 			)
 			if !start {
 			if !start {
-				cpuPercent = calcuateCpuPercent(previousCpu, previousSystem, v)
+				cpuPercent = calculateCpuPercent(previousCpu, previousSystem, v)
 			}
 			}
 			start = false
 			start = false
 			s.mu.Lock()
 			s.mu.Lock()
@@ -2694,7 +2863,7 @@ func (s *containerStats) Display(w io.Writer) error {
 }
 }
 
 
 func (cli *DockerCli) CmdStats(args ...string) error {
 func (cli *DockerCli) CmdStats(args ...string) error {
-	cmd := cli.Subcmd("stats", "CONTAINER", "Display a live stream of one or more containers' resource usage statistics", true)
+	cmd := cli.Subcmd("stats", "CONTAINER [CONTAINER...]", "Display a live stream of one or more containers' resource usage statistics", true)
 	cmd.Require(flag.Min, 1)
 	cmd.Require(flag.Min, 1)
 	utils.ParseFlags(cmd, args, true)
 	utils.ParseFlags(cmd, args, true)
 
 
@@ -2721,7 +2890,7 @@ func (cli *DockerCli) CmdStats(args ...string) error {
 	for _, c := range cStats {
 	for _, c := range cStats {
 		c.mu.Lock()
 		c.mu.Lock()
 		if c.err != nil {
 		if c.err != nil {
-			errs = append(errs, fmt.Sprintf("%s: %s", c.Name, c.err.Error()))
+			errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err))
 		}
 		}
 		c.mu.Unlock()
 		c.mu.Unlock()
 	}
 	}
@@ -2748,7 +2917,7 @@ func (cli *DockerCli) CmdStats(args ...string) error {
 	return nil
 	return nil
 }
 }
 
 
-func calcuateCpuPercent(previousCpu, previousSystem uint64, v *stats.Stats) float64 {
+func calculateCpuPercent(previousCpu, previousSystem uint64, v *types.Stats) float64 {
 	var (
 	var (
 		cpuPercent = 0.0
 		cpuPercent = 0.0
 		// calculate the change for the cpu usage of the container in between readings
 		// calculate the change for the cpu usage of the container in between readings

+ 1 - 1
api/client/hijack.go

@@ -15,7 +15,7 @@ import (
 
 
 	log "github.com/Sirupsen/logrus"
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api"
-	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/autogen/dockerversion"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/stdcopy"
 	"github.com/docker/docker/pkg/stdcopy"
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/docker/pkg/term"

+ 1 - 1
api/client/utils.go

@@ -17,7 +17,7 @@ import (
 
 
 	log "github.com/Sirupsen/logrus"
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api"
-	"github.com/docker/docker/dockerversion"
+	"github.com/docker/docker/autogen/dockerversion"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/pkg/stdcopy"
 	"github.com/docker/docker/pkg/stdcopy"

+ 66 - 9
api/common.go

@@ -15,7 +15,7 @@ import (
 )
 )
 
 
 const (
 const (
-	APIVERSION            version.Version = "1.17"
+	APIVERSION            version.Version = "1.18"
 	DEFAULTHTTPHOST                       = "127.0.0.1"
 	DEFAULTHTTPHOST                       = "127.0.0.1"
 	DEFAULTUNIXSOCKET                     = "/var/run/docker.sock"
 	DEFAULTUNIXSOCKET                     = "/var/run/docker.sock"
 	DefaultDockerfileName string          = "Dockerfile"
 	DefaultDockerfileName string          = "Dockerfile"
@@ -29,25 +29,82 @@ func ValidateHost(val string) (string, error) {
 	return host, nil
 	return host, nil
 }
 }
 
 
-//TODO remove, used on < 1.5 in getContainersJSON
+// TODO remove, used on < 1.5 in getContainersJSON
 func DisplayablePorts(ports *engine.Table) string {
 func DisplayablePorts(ports *engine.Table) string {
-	result := []string{}
-	ports.SetKey("PublicPort")
+	var (
+		result          = []string{}
+		hostMappings    = []string{}
+		firstInGroupMap map[string]int
+		lastInGroupMap  map[string]int
+	)
+	firstInGroupMap = make(map[string]int)
+	lastInGroupMap = make(map[string]int)
+	ports.SetKey("PrivatePort")
 	ports.Sort()
 	ports.Sort()
 	for _, port := range ports.Data {
 	for _, port := range ports.Data {
-		if port.Get("IP") == "" {
-			result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PrivatePort"), port.Get("Type")))
-		} else {
-			result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type")))
+		var (
+			current      = port.GetInt("PrivatePort")
+			portKey      = port.Get("Type")
+			firstInGroup int
+			lastInGroup  int
+		)
+		if port.Get("IP") != "" {
+			if port.GetInt("PublicPort") != current {
+				hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type")))
+				continue
+			}
+			portKey = fmt.Sprintf("%s/%s", port.Get("IP"), port.Get("Type"))
 		}
 		}
+		firstInGroup = firstInGroupMap[portKey]
+		lastInGroup = lastInGroupMap[portKey]
+
+		if firstInGroup == 0 {
+			firstInGroupMap[portKey] = current
+			lastInGroupMap[portKey] = current
+			continue
+		}
+
+		if current == (lastInGroup + 1) {
+			lastInGroupMap[portKey] = current
+			continue
+		}
+		result = append(result, FormGroup(portKey, firstInGroup, lastInGroup))
+		firstInGroupMap[portKey] = current
+		lastInGroupMap[portKey] = current
+	}
+	for portKey, firstInGroup := range firstInGroupMap {
+		result = append(result, FormGroup(portKey, firstInGroup, lastInGroupMap[portKey]))
 	}
 	}
+	result = append(result, hostMappings...)
 	return strings.Join(result, ", ")
 	return strings.Join(result, ", ")
 }
 }
 
 
+func FormGroup(key string, start, last int) string {
+	var (
+		group     string
+		parts     = strings.Split(key, "/")
+		groupType = parts[0]
+		ip        = ""
+	)
+	if len(parts) > 1 {
+		ip = parts[0]
+		groupType = parts[1]
+	}
+	if start == last {
+		group = fmt.Sprintf("%d", start)
+	} else {
+		group = fmt.Sprintf("%d-%d", start, last)
+	}
+	if ip != "" {
+		group = fmt.Sprintf("%s:%s->%s", ip, group, group)
+	}
+	return fmt.Sprintf("%s/%s", group, groupType)
+}
+
 func MatchesContentType(contentType, expectedType string) bool {
 func MatchesContentType(contentType, expectedType string) bool {
 	mimetype, _, err := mime.ParseMediaType(contentType)
 	mimetype, _, err := mime.ParseMediaType(contentType)
 	if err != nil {
 	if err != nil {
-		log.Errorf("Error parsing media type: %s error: %s", contentType, err.Error())
+		log.Errorf("Error parsing media type: %s error: %v", contentType, err)
 	}
 	}
 	return err == nil && mimetype == expectedType
 	return err == nil && mimetype == expectedType
 }
 }

+ 0 - 2
api/server/MAINTAINERS

@@ -1,2 +0,0 @@
-Victor Vieux <vieux@docker.com> (@vieux)
-# Johan Euphrosine <proppy@google.com> (@proppy)

+ 84 - 122
api/server/server.go

@@ -16,7 +16,6 @@ import (
 	"os"
 	"os"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
-	"syscall"
 
 
 	"crypto/tls"
 	"crypto/tls"
 	"crypto/x509"
 	"crypto/x509"
@@ -27,12 +26,12 @@ import (
 
 
 	log "github.com/Sirupsen/logrus"
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api"
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/daemon/networkdriver/portallocator"
 	"github.com/docker/docker/daemon/networkdriver/portallocator"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/pkg/listenbuffer"
 	"github.com/docker/docker/pkg/listenbuffer"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/stdcopy"
 	"github.com/docker/docker/pkg/stdcopy"
-	"github.com/docker/docker/pkg/systemd"
 	"github.com/docker/docker/pkg/version"
 	"github.com/docker/docker/pkg/version"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
@@ -135,17 +134,27 @@ func httpError(w http.ResponseWriter, err error) {
 	}
 	}
 
 
 	if err != nil {
 	if err != nil {
-		log.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error())
+		log.Errorf("HTTP Error: statusCode=%d %v", statusCode, err)
 		http.Error(w, err.Error(), statusCode)
 		http.Error(w, err.Error(), statusCode)
 	}
 	}
 }
 }
 
 
-func writeJSON(w http.ResponseWriter, code int, v engine.Env) error {
+// writeJSONEnv writes the engine.Env values to the http response stream as a
+// json encoded body.
+func writeJSONEnv(w http.ResponseWriter, code int, v engine.Env) error {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Content-Type", "application/json")
 	w.WriteHeader(code)
 	w.WriteHeader(code)
 	return v.Encode(w)
 	return v.Encode(w)
 }
 }
 
 
+// writeJSON writes the value v to the http response stream as json with standard
+// json encoding.
+func writeJSON(w http.ResponseWriter, code int, v interface{}) error {
+	w.Header().Set("Content-Type", "application/json")
+	w.WriteHeader(code)
+	return json.NewEncoder(w).Encode(v)
+}
+
 func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) {
 func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) {
 	w.Header().Set("Content-Type", "application/json")
 	w.Header().Set("Content-Type", "application/json")
 	if flush {
 	if flush {
@@ -183,7 +192,7 @@ func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter
 	if status := engine.Tail(stdoutBuffer, 1); status != "" {
 	if status := engine.Tail(stdoutBuffer, 1); status != "" {
 		var env engine.Env
 		var env engine.Env
 		env.Set("Status", status)
 		env.Set("Status", status)
-		return writeJSON(w, http.StatusOK, env)
+		return writeJSONEnv(w, http.StatusOK, env)
 	}
 	}
 	w.WriteHeader(http.StatusNoContent)
 	w.WriteHeader(http.StatusNoContent)
 	return nil
 	return nil
@@ -518,6 +527,7 @@ func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWrit
 	job.Setenv("tag", r.Form.Get("tag"))
 	job.Setenv("tag", r.Form.Get("tag"))
 	job.Setenv("author", r.Form.Get("author"))
 	job.Setenv("author", r.Form.Get("author"))
 	job.Setenv("comment", r.Form.Get("comment"))
 	job.Setenv("comment", r.Form.Get("comment"))
+	job.SetenvList("changes", r.Form["changes"])
 	job.SetenvSubEnv("config", &config)
 	job.SetenvSubEnv("config", &config)
 
 
 	job.Stdout.Add(stdoutBuffer)
 	job.Stdout.Add(stdoutBuffer)
@@ -525,7 +535,7 @@ func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWrit
 		return err
 		return err
 	}
 	}
 	env.Set("Id", engine.Tail(stdoutBuffer, 1))
 	env.Set("Id", engine.Tail(stdoutBuffer, 1))
-	return writeJSON(w, http.StatusCreated, env)
+	return writeJSONEnv(w, http.StatusCreated, env)
 }
 }
 
 
 // Creates an image from Pull or from Import
 // Creates an image from Pull or from Import
@@ -570,6 +580,7 @@ func postImagesCreate(eng *engine.Engine, version version.Version, w http.Respon
 		}
 		}
 		job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag)
 		job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag)
 		job.Stdin.Add(r.Body)
 		job.Stdin.Add(r.Body)
+		job.SetenvList("changes", r.Form["changes"])
 	}
 	}
 
 
 	if version.GreaterThan("1.0") {
 	if version.GreaterThan("1.0") {
@@ -703,18 +714,16 @@ func postContainersCreate(eng *engine.Engine, version version.Version, w http.Re
 	if err := parseForm(r); err != nil {
 	if err := parseForm(r); err != nil {
 		return nil
 		return nil
 	}
 	}
+	if err := checkForJson(r); err != nil {
+		return err
+	}
 	var (
 	var (
-		out          engine.Env
 		job          = eng.Job("create", r.Form.Get("name"))
 		job          = eng.Job("create", r.Form.Get("name"))
 		outWarnings  []string
 		outWarnings  []string
 		stdoutBuffer = bytes.NewBuffer(nil)
 		stdoutBuffer = bytes.NewBuffer(nil)
 		warnings     = bytes.NewBuffer(nil)
 		warnings     = bytes.NewBuffer(nil)
 	)
 	)
 
 
-	if err := checkForJson(r); err != nil {
-		return err
-	}
-
 	if err := job.DecodeEnv(r.Body); err != nil {
 	if err := job.DecodeEnv(r.Body); err != nil {
 		return err
 		return err
 	}
 	}
@@ -730,10 +739,10 @@ func postContainersCreate(eng *engine.Engine, version version.Version, w http.Re
 	for scanner.Scan() {
 	for scanner.Scan() {
 		outWarnings = append(outWarnings, scanner.Text())
 		outWarnings = append(outWarnings, scanner.Text())
 	}
 	}
-	out.Set("Id", engine.Tail(stdoutBuffer, 1))
-	out.SetList("Warnings", outWarnings)
-
-	return writeJSON(w, http.StatusCreated, out)
+	return writeJSON(w, http.StatusCreated, &types.ContainerCreateResponse{
+		ID:       engine.Tail(stdoutBuffer, 1),
+		Warnings: outWarnings,
+	})
 }
 }
 
 
 func postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 func postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -876,7 +885,7 @@ func postContainersWait(eng *engine.Engine, version version.Version, w http.Resp
 	}
 	}
 
 
 	env.Set("StatusCode", engine.Tail(stdoutBuffer, 1))
 	env.Set("StatusCode", engine.Tail(stdoutBuffer, 1))
-	return writeJSON(w, http.StatusOK, env)
+	return writeJSONEnv(w, http.StatusOK, env)
 }
 }
 
 
 func postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 func postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -1073,6 +1082,24 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite
 	job.Setenv("forcerm", r.FormValue("forcerm"))
 	job.Setenv("forcerm", r.FormValue("forcerm"))
 	job.SetenvJson("authConfig", authConfig)
 	job.SetenvJson("authConfig", authConfig)
 	job.SetenvJson("configFile", configFile)
 	job.SetenvJson("configFile", configFile)
+	job.Setenv("memswap", r.FormValue("memswap"))
+	job.Setenv("memory", r.FormValue("memory"))
+	job.Setenv("cpusetcpus", r.FormValue("cpusetcpus"))
+	job.Setenv("cpushares", r.FormValue("cpushares"))
+
+	// Job cancellation. Note: not all job types support this.
+	if closeNotifier, ok := w.(http.CloseNotifier); ok {
+		finished := make(chan struct{})
+		defer close(finished)
+		go func() {
+			select {
+			case <-finished:
+			case <-closeNotifier.CloseNotify():
+				log.Infof("Client disconnected, cancelling job: %v", job)
+				job.Cancel()
+			}
+		}()
+	}
 
 
 	if err := job.Run(); err != nil {
 	if err := job.Run(); err != nil {
 		if !job.Stdout.Used() {
 		if !job.Stdout.Used() {
@@ -1113,8 +1140,8 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp
 	job.Stdout.Add(w)
 	job.Stdout.Add(w)
 	w.Header().Set("Content-Type", "application/x-tar")
 	w.Header().Set("Content-Type", "application/x-tar")
 	if err := job.Run(); err != nil {
 	if err := job.Run(); err != nil {
-		log.Errorf("%s", err.Error())
-		if strings.Contains(strings.ToLower(err.Error()), "no such container") {
+		log.Errorf("%v", err)
+		if strings.Contains(strings.ToLower(err.Error()), "no such id") {
 			w.WriteHeader(http.StatusNotFound)
 			w.WriteHeader(http.StatusNotFound)
 		} else if strings.Contains(err.Error(), "no such file or directory") {
 		} else if strings.Contains(err.Error(), "no such file or directory") {
 			return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"])
 			return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"])
@@ -1147,7 +1174,7 @@ func postContainerExecCreate(eng *engine.Engine, version version.Version, w http
 	// Return the ID
 	// Return the ID
 	out.Set("Id", engine.Tail(stdoutBuffer, 1))
 	out.Set("Id", engine.Tail(stdoutBuffer, 1))
 
 
-	return writeJSON(w, http.StatusCreated, out)
+	return writeJSONEnv(w, http.StatusCreated, out)
 }
 }
 
 
 // TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start.
 // TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start.
@@ -1219,8 +1246,9 @@ func optionsHandler(eng *engine.Engine, version version.Version, w http.Response
 	w.WriteHeader(http.StatusOK)
 	w.WriteHeader(http.StatusOK)
 	return nil
 	return nil
 }
 }
-func writeCorsHeaders(w http.ResponseWriter, r *http.Request) {
-	w.Header().Add("Access-Control-Allow-Origin", "*")
+func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string) {
+	log.Debugf("CORS header is enabled and set to: %s", corsHeaders)
+	w.Header().Add("Access-Control-Allow-Origin", corsHeaders)
 	w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth")
 	w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth")
 	w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS")
 	w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS")
 }
 }
@@ -1230,7 +1258,7 @@ func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r
 	return err
 	return err
 }
 }
 
 
-func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc {
+func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc {
 	return func(w http.ResponseWriter, r *http.Request) {
 	return func(w http.ResponseWriter, r *http.Request) {
 		// log the request
 		// log the request
 		log.Debugf("Calling %s %s", localMethod, localRoute)
 		log.Debugf("Calling %s %s", localMethod, localRoute)
@@ -1249,8 +1277,8 @@ func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, local
 		if version == "" {
 		if version == "" {
 			version = api.APIVERSION
 			version = api.APIVERSION
 		}
 		}
-		if enableCors {
-			writeCorsHeaders(w, r)
+		if corsHeaders != "" {
+			writeCorsHeaders(w, r, corsHeaders)
 		}
 		}
 
 
 		if version.GreaterThan(api.APIVERSION) {
 		if version.GreaterThan(api.APIVERSION) {
@@ -1292,7 +1320,8 @@ func AttachProfiler(router *mux.Router) {
 	router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP)
 	router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP)
 }
 }
 
 
-func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion string) *mux.Router {
+// we keep enableCors just for legacy usage, need to be removed in the future
+func createRouter(eng *engine.Engine, logging, enableCors bool, corsHeaders string, dockerVersion string) *mux.Router {
 	r := mux.NewRouter()
 	r := mux.NewRouter()
 	if os.Getenv("DEBUG") != "" {
 	if os.Getenv("DEBUG") != "" {
 		AttachProfiler(r)
 		AttachProfiler(r)
@@ -1354,6 +1383,12 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st
 		},
 		},
 	}
 	}
 
 
+	// If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*"
+	// otherwise, all head values will be passed to HTTP handler
+	if corsHeaders == "" && enableCors {
+		corsHeaders = "*"
+	}
+
 	for method, routes := range m {
 	for method, routes := range m {
 		for route, fct := range routes {
 		for route, fct := range routes {
 			log.Debugf("Registering %s, %s", method, route)
 			log.Debugf("Registering %s, %s", method, route)
@@ -1363,7 +1398,7 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st
 			localMethod := method
 			localMethod := method
 
 
 			// build the handler function
 			// build the handler function
-			f := makeHttpHandler(eng, logging, localMethod, localRoute, localFct, enableCors, version.Version(dockerVersion))
+			f := makeHttpHandler(eng, logging, localMethod, localRoute, localFct, corsHeaders, version.Version(dockerVersion))
 
 
 			// add the new route
 			// add the new route
 			if localRoute == "" {
 			if localRoute == "" {
@@ -1382,49 +1417,12 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st
 // FIXME: refactor this to be part of Server and not require re-creating a new
 // FIXME: refactor this to be part of Server and not require re-creating a new
 // router each time. This requires first moving ListenAndServe into Server.
 // router each time. This requires first moving ListenAndServe into Server.
 func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) {
 func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) {
-	router := createRouter(eng, false, true, "")
+	router := createRouter(eng, false, true, "", "")
 	// Insert APIVERSION into the request as a convenience
 	// Insert APIVERSION into the request as a convenience
 	req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path)
 	req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path)
 	router.ServeHTTP(w, req)
 	router.ServeHTTP(w, req)
 }
 }
 
 
-// serveFd creates an http.Server and sets it up to serve given a socket activated
-// argument.
-func serveFd(addr string, job *engine.Job) error {
-	r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
-
-	ls, e := systemd.ListenFD(addr)
-	if e != nil {
-		return e
-	}
-
-	chErrors := make(chan error, len(ls))
-
-	// We don't want to start serving on these sockets until the
-	// daemon is initialized and installed. Otherwise required handlers
-	// won't be ready.
-	<-activationLock
-
-	// Since ListenFD will return one or more sockets we have
-	// to create a go func to spawn off multiple serves
-	for i := range ls {
-		listener := ls[i]
-		go func() {
-			httpSrv := http.Server{Handler: r}
-			chErrors <- httpSrv.Serve(listener)
-		}()
-	}
-
-	for i := 0; i < len(ls); i++ {
-		err := <-chErrors
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
 func lookupGidByName(nameOrGid string) (int, error) {
 func lookupGidByName(nameOrGid string) (int, error) {
 	groupFile, err := user.GetGroupPath()
 	groupFile, err := user.GetGroupPath()
 	if err != nil {
 	if err != nil {
@@ -1439,13 +1437,21 @@ func lookupGidByName(nameOrGid string) (int, error) {
 	if groups != nil && len(groups) > 0 {
 	if groups != nil && len(groups) > 0 {
 		return groups[0].Gid, nil
 		return groups[0].Gid, nil
 	}
 	}
+	gid, err := strconv.Atoi(nameOrGid)
+	if err == nil {
+		log.Warnf("Could not find GID %d", gid)
+		return gid, nil
+	}
 	return -1, fmt.Errorf("Group %s not found", nameOrGid)
 	return -1, fmt.Errorf("Group %s not found", nameOrGid)
 }
 }
 
 
 func setupTls(cert, key, ca string, l net.Listener) (net.Listener, error) {
 func setupTls(cert, key, ca string, l net.Listener) (net.Listener, error) {
 	tlsCert, err := tls.LoadX509KeyPair(cert, key)
 	tlsCert, err := tls.LoadX509KeyPair(cert, key)
 	if err != nil {
 	if err != nil {
-		return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?",
+		if os.IsNotExist(err) {
+			return nil, fmt.Errorf("Could not load X509 key pair (%s, %s): %v", cert, key, err)
+		}
+		return nil, fmt.Errorf("Error reading X509 key pair (%s, %s): %q. Make sure the key is encrypted.",
 			cert, key, err)
 			cert, key, err)
 	}
 	}
 	tlsConfig := &tls.Config{
 	tlsConfig := &tls.Config{
@@ -1459,7 +1465,7 @@ func setupTls(cert, key, ca string, l net.Listener) (net.Listener, error) {
 		certPool := x509.NewCertPool()
 		certPool := x509.NewCertPool()
 		file, err := ioutil.ReadFile(ca)
 		file, err := ioutil.ReadFile(ca)
 		if err != nil {
 		if err != nil {
-			return nil, fmt.Errorf("Couldn't read CA certificate: %s", err)
+			return nil, fmt.Errorf("Could not read CA certificate: %v", err)
 		}
 		}
 		certPool.AppendCertsFromPEM(file)
 		certPool.AppendCertsFromPEM(file)
 		tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
 		tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
@@ -1502,31 +1508,6 @@ func setSocketGroup(addr, group string) error {
 	return nil
 	return nil
 }
 }
 
 
-func setupUnixHttp(addr string, job *engine.Job) (*HttpServer, error) {
-	r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
-
-	if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) {
-		return nil, err
-	}
-	mask := syscall.Umask(0777)
-	defer syscall.Umask(mask)
-
-	l, err := newListener("unix", addr, job.GetenvBool("BufferRequests"))
-	if err != nil {
-		return nil, err
-	}
-
-	if err := setSocketGroup(addr, job.Getenv("SocketGroup")); err != nil {
-		return nil, err
-	}
-
-	if err := os.Chmod(addr, 0660); err != nil {
-		return nil, err
-	}
-
-	return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil
-}
-
 func allocateDaemonPort(addr string) error {
 func allocateDaemonPort(addr string) error {
 	host, port, err := net.SplitHostPort(addr)
 	host, port, err := net.SplitHostPort(addr)
 	if err != nil {
 	if err != nil {
@@ -1554,11 +1535,11 @@ func allocateDaemonPort(addr string) error {
 }
 }
 
 
 func setupTcpHttp(addr string, job *engine.Job) (*HttpServer, error) {
 func setupTcpHttp(addr string, job *engine.Job) (*HttpServer, error) {
-	if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") {
-		log.Infof("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
+	if !job.GetenvBool("TlsVerify") {
+		log.Infof("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
 	}
 	}
 
 
-	r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
+	r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version"))
 
 
 	l, err := newListener("tcp", addr, job.GetenvBool("BufferRequests"))
 	l, err := newListener("tcp", addr, job.GetenvBool("BufferRequests"))
 	if err != nil {
 	if err != nil {
@@ -1582,21 +1563,6 @@ func setupTcpHttp(addr string, job *engine.Job) (*HttpServer, error) {
 	return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil
 	return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil
 }
 }
 
 
-// NewServer sets up the required Server and does protocol specific checking.
-func NewServer(proto, addr string, job *engine.Job) (Server, error) {
-	// Basic error and sanity checking
-	switch proto {
-	case "fd":
-		return nil, serveFd(addr, job)
-	case "tcp":
-		return setupTcpHttp(addr, job)
-	case "unix":
-		return setupUnixHttp(addr, job)
-	default:
-		return nil, fmt.Errorf("Invalid protocol format.")
-	}
-}
-
 type Server interface {
 type Server interface {
 	Serve() error
 	Serve() error
 	Close() error
 	Close() error
@@ -1626,7 +1592,15 @@ func ServeApi(job *engine.Job) engine.Status {
 				chErrors <- err
 				chErrors <- err
 				return
 				return
 			}
 			}
-			chErrors <- srv.Serve()
+			job.Eng.OnShutdown(func() {
+				if err := srv.Close(); err != nil {
+					log.Error(err)
+				}
+			})
+			if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
+				err = nil
+			}
+			chErrors <- err
 		}()
 		}()
 	}
 	}
 
 
@@ -1639,15 +1613,3 @@ func ServeApi(job *engine.Job) engine.Status {
 
 
 	return engine.StatusOK
 	return engine.StatusOK
 }
 }
-
-func AcceptConnections(job *engine.Job) engine.Status {
-	// Tell the init daemon we are accepting requests
-	go systemd.SdNotify("READY=1")
-
-	// close the lock so the listeners start accepting connections
-	if activationLock != nil {
-		close(activationLock)
-	}
-
-	return engine.StatusOK
-}

+ 103 - 0
api/server/server_linux.go

@@ -0,0 +1,103 @@
+// +build linux
+
+package server
+
+import (
+	"fmt"
+	"net/http"
+	"os"
+	"syscall"
+
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/systemd"
+)
+
+// NewServer sets up the required Server and does protocol specific checking.
+func NewServer(proto, addr string, job *engine.Job) (Server, error) {
+	// Basic error and sanity checking
+	switch proto {
+	case "fd":
+		return nil, serveFd(addr, job)
+	case "tcp":
+		return setupTcpHttp(addr, job)
+	case "unix":
+		return setupUnixHttp(addr, job)
+	default:
+		return nil, fmt.Errorf("Invalid protocol format.")
+	}
+}
+
+func setupUnixHttp(addr string, job *engine.Job) (*HttpServer, error) {
+	r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version"))
+
+	if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) {
+		return nil, err
+	}
+	mask := syscall.Umask(0777)
+	defer syscall.Umask(mask)
+
+	l, err := newListener("unix", addr, job.GetenvBool("BufferRequests"))
+	if err != nil {
+		return nil, err
+	}
+
+	if err := setSocketGroup(addr, job.Getenv("SocketGroup")); err != nil {
+		return nil, err
+	}
+
+	if err := os.Chmod(addr, 0660); err != nil {
+		return nil, err
+	}
+
+	return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil
+}
+
+// serveFd creates an http.Server and sets it up to serve given a socket activated
+// argument.
+func serveFd(addr string, job *engine.Job) error {
+	r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version"))
+
+	ls, e := systemd.ListenFD(addr)
+	if e != nil {
+		return e
+	}
+
+	chErrors := make(chan error, len(ls))
+
+	// We don't want to start serving on these sockets until the
+	// daemon is initialized and installed. Otherwise required handlers
+	// won't be ready.
+	<-activationLock
+
+	// Since ListenFD will return one or more sockets we have
+	// to create a go func to spawn off multiple serves
+	for i := range ls {
+		listener := ls[i]
+		go func() {
+			httpSrv := http.Server{Handler: r}
+			chErrors <- httpSrv.Serve(listener)
+		}()
+	}
+
+	for i := 0; i < len(ls); i++ {
+		err := <-chErrors
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// Called through eng.Job("acceptconnections")
+func AcceptConnections(job *engine.Job) engine.Status {
+	// Tell the init daemon we are accepting requests
+	go systemd.SdNotify("READY=1")
+
+	// close the lock so the listeners start accepting connections
+	if activationLock != nil {
+		close(activationLock)
+	}
+
+	return engine.StatusOK
+}

+ 31 - 0
api/server/server_windows.go

@@ -0,0 +1,31 @@
+// +build windows
+
+package server
+
+import (
+	"fmt"
+
+	"github.com/docker/docker/engine"
+)
+
+// NewServer sets up the required Server and does protocol specific checking.
+func NewServer(proto, addr string, job *engine.Job) (Server, error) {
+	// Basic error and sanity checking
+	switch proto {
+	case "tcp":
+		return setupTcpHttp(addr, job)
+	default:
+		return nil, errors.New("Invalid protocol format. Windows only supports tcp.")
+	}
+}
+
+// Called through eng.Job("acceptconnections")
+func AcceptConnections(job *engine.Job) engine.Status {
+
+	// close the lock so the listeners start accepting connections
+	if activationLock != nil {
+		close(activationLock)
+	}
+
+	return engine.StatusOK
+}

+ 1 - 1
api/stats/stats.go → api/types/stats.go

@@ -1,6 +1,6 @@
 // This package is used for API stability in the types and response to the
 // This package is used for API stability in the types and response to the
 // consumers of the API stats endpoint.
 // consumers of the API stats endpoint.
-package stats
+package types
 
 
 import "time"
 import "time"
 
 

+ 11 - 0
api/types/types.go

@@ -0,0 +1,11 @@
+package types
+
+// ContainerCreateResponse contains the information returned to a client on the
+// creation of a new container.
+type ContainerCreateResponse struct {
+	// ID is the ID of the created container.
+	ID string `json:"Id"`
+
+	// Warnings are any warnings encountered during the creation of the container.
+	Warnings []string `json:"Warnings"`
+}

+ 0 - 3
builder/MAINTAINERS

@@ -1,3 +0,0 @@
-Tibor Vass <teabee89@gmail.com> (@tiborvass)
-Erik Hollensbe <github@hollensbe.org> (@erikh)
-Doug Davis <dug@us.ibm.com> (@duglin)

+ 39 - 0
builder/command/command.go

@@ -0,0 +1,39 @@
+// This package contains the set of Dockerfile commands.
+package command
+
+const (
+	Env        = "env"
+	Label      = "label"
+	Maintainer = "maintainer"
+	Add        = "add"
+	Copy       = "copy"
+	From       = "from"
+	Onbuild    = "onbuild"
+	Workdir    = "workdir"
+	Run        = "run"
+	Cmd        = "cmd"
+	Entrypoint = "entrypoint"
+	Expose     = "expose"
+	Volume     = "volume"
+	User       = "user"
+	Insert     = "insert"
+)
+
+// Commands is list of all Dockerfile commands
+var Commands = map[string]struct{}{
+	Env:        {},
+	Label:      {},
+	Maintainer: {},
+	Add:        {},
+	Copy:       {},
+	From:       {},
+	Onbuild:    {},
+	Workdir:    {},
+	Run:        {},
+	Cmd:        {},
+	Entrypoint: {},
+	Expose:     {},
+	Volume:     {},
+	User:       {},
+	Insert:     {},
+}

+ 59 - 8
builder/dispatchers.go

@@ -39,7 +39,7 @@ func nullDispatch(b *Builder, args []string, attributes map[string]bool, origina
 //
 //
 func env(b *Builder, args []string, attributes map[string]bool, original string) error {
 func env(b *Builder, args []string, attributes map[string]bool, original string) error {
 	if len(args) == 0 {
 	if len(args) == 0 {
-		return fmt.Errorf("ENV is missing arguments")
+		return fmt.Errorf("ENV requires at least one argument")
 	}
 	}
 
 
 	if len(args)%2 != 0 {
 	if len(args)%2 != 0 {
@@ -78,13 +78,44 @@ func env(b *Builder, args []string, attributes map[string]bool, original string)
 // Sets the maintainer metadata.
 // Sets the maintainer metadata.
 func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error {
 func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error {
 	if len(args) != 1 {
 	if len(args) != 1 {
-		return fmt.Errorf("MAINTAINER requires only one argument")
+		return fmt.Errorf("MAINTAINER requires exactly one argument")
 	}
 	}
 
 
 	b.maintainer = args[0]
 	b.maintainer = args[0]
 	return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
 	return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
 }
 }
 
 
+// LABEL some json data describing the image
+//
+// Sets the Label variable foo to bar,
+//
+func label(b *Builder, args []string, attributes map[string]bool, original string) error {
+	if len(args) == 0 {
+		return fmt.Errorf("LABEL requires at least one argument")
+	}
+	if len(args)%2 != 0 {
+		// should never get here, but just in case
+		return fmt.Errorf("Bad input to LABEL, too many args")
+	}
+
+	commitStr := "LABEL"
+
+	if b.Config.Labels == nil {
+		b.Config.Labels = map[string]string{}
+	}
+
+	for j := 0; j < len(args); j++ {
+		// name  ==> args[j]
+		// value ==> args[j+1]
+		newVar := args[j] + "=" + args[j+1] + ""
+		commitStr += " " + newVar
+
+		b.Config.Labels[args[j]] = args[j+1]
+		j++
+	}
+	return b.commit("", b.Config.Cmd, commitStr)
+}
+
 // ADD foo /path
 // ADD foo /path
 //
 //
 // Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
 // Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
@@ -159,6 +190,10 @@ func from(b *Builder, args []string, attributes map[string]bool, original string
 // cases.
 // cases.
 //
 //
 func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error {
 func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error {
+	if len(args) == 0 {
+		return fmt.Errorf("ONBUILD requires at least one argument")
+	}
+
 	triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
 	triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
 	switch triggerInstruction {
 	switch triggerInstruction {
 	case "ONBUILD":
 	case "ONBUILD":
@@ -209,8 +244,8 @@ func run(b *Builder, args []string, attributes map[string]bool, original string)
 
 
 	args = handleJsonArgs(args, attributes)
 	args = handleJsonArgs(args, attributes)
 
 
-	if len(args) == 1 {
-		args = append([]string{"/bin/sh", "-c"}, args[0])
+	if !attributes["json"] {
+		args = append([]string{"/bin/sh", "-c"}, args...)
 	}
 	}
 
 
 	runCmd := flag.NewFlagSet("run", flag.ContinueOnError)
 	runCmd := flag.NewFlagSet("run", flag.ContinueOnError)
@@ -272,7 +307,7 @@ func cmd(b *Builder, args []string, attributes map[string]bool, original string)
 		b.Config.Cmd = append([]string{"/bin/sh", "-c"}, b.Config.Cmd...)
 		b.Config.Cmd = append([]string{"/bin/sh", "-c"}, b.Config.Cmd...)
 	}
 	}
 
 
-	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %v", b.Config.Cmd)); err != nil {
+	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", b.Config.Cmd)); err != nil {
 		return err
 		return err
 	}
 	}
 
 
@@ -312,7 +347,7 @@ func entrypoint(b *Builder, args []string, attributes map[string]bool, original
 		b.Config.Cmd = nil
 		b.Config.Cmd = nil
 	}
 	}
 
 
-	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %v", b.Config.Entrypoint)); err != nil {
+	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil {
 		return err
 		return err
 	}
 	}
 
 
@@ -327,15 +362,27 @@ func entrypoint(b *Builder, args []string, attributes map[string]bool, original
 func expose(b *Builder, args []string, attributes map[string]bool, original string) error {
 func expose(b *Builder, args []string, attributes map[string]bool, original string) error {
 	portsTab := args
 	portsTab := args
 
 
+	if len(args) == 0 {
+		return fmt.Errorf("EXPOSE requires at least one argument")
+	}
+
 	if b.Config.ExposedPorts == nil {
 	if b.Config.ExposedPorts == nil {
 		b.Config.ExposedPorts = make(nat.PortSet)
 		b.Config.ExposedPorts = make(nat.PortSet)
 	}
 	}
 
 
-	ports, _, err := nat.ParsePortSpecs(append(portsTab, b.Config.PortSpecs...))
+	ports, bindingMap, err := nat.ParsePortSpecs(append(portsTab, b.Config.PortSpecs...))
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
+	for _, bindings := range bindingMap {
+		if bindings[0].HostIp != "" || bindings[0].HostPort != "" {
+			fmt.Fprintf(b.ErrStream, " ---> Using Dockerfile's EXPOSE instruction"+
+				"      to map host ports to container ports (ip:hostPort:containerPort) is deprecated.\n"+
+				"      Please use -p to publish the ports.\n")
+		}
+	}
+
 	// instead of using ports directly, we build a list of ports and sort it so
 	// instead of using ports directly, we build a list of ports and sort it so
 	// the order is consistent. This prevents cache burst where map ordering
 	// the order is consistent. This prevents cache burst where map ordering
 	// changes between builds
 	// changes between builds
@@ -373,13 +420,17 @@ func user(b *Builder, args []string, attributes map[string]bool, original string
 //
 //
 func volume(b *Builder, args []string, attributes map[string]bool, original string) error {
 func volume(b *Builder, args []string, attributes map[string]bool, original string) error {
 	if len(args) == 0 {
 	if len(args) == 0 {
-		return fmt.Errorf("Volume cannot be empty")
+		return fmt.Errorf("VOLUME requires at least one argument")
 	}
 	}
 
 
 	if b.Config.Volumes == nil {
 	if b.Config.Volumes == nil {
 		b.Config.Volumes = map[string]struct{}{}
 		b.Config.Volumes = map[string]struct{}{}
 	}
 	}
 	for _, v := range args {
 	for _, v := range args {
+		v = strings.TrimSpace(v)
+		if v == "" {
+			return fmt.Errorf("Volume specified can not be an empty string")
+		}
 		b.Config.Volumes[v] = struct{}{}
 		b.Config.Volumes[v] = struct{}{}
 	}
 	}
 	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {
 	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {

+ 74 - 29
builder/evaluator.go

@@ -28,9 +28,12 @@ import (
 	"strings"
 	"strings"
 
 
 	log "github.com/Sirupsen/logrus"
 	log "github.com/Sirupsen/logrus"
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/builder/command"
 	"github.com/docker/docker/builder/parser"
 	"github.com/docker/docker/builder/parser"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/common"
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/tarsum"
 	"github.com/docker/docker/pkg/tarsum"
@@ -45,33 +48,35 @@ var (
 
 
 // Environment variable interpolation will happen on these statements only.
 // Environment variable interpolation will happen on these statements only.
 var replaceEnvAllowed = map[string]struct{}{
 var replaceEnvAllowed = map[string]struct{}{
-	"env":     {},
-	"add":     {},
-	"copy":    {},
-	"workdir": {},
-	"expose":  {},
-	"volume":  {},
-	"user":    {},
+	command.Env:     {},
+	command.Label:   {},
+	command.Add:     {},
+	command.Copy:    {},
+	command.Workdir: {},
+	command.Expose:  {},
+	command.Volume:  {},
+	command.User:    {},
 }
 }
 
 
 var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error
 var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error
 
 
 func init() {
 func init() {
 	evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{
 	evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{
-		"env":        env,
-		"maintainer": maintainer,
-		"add":        add,
-		"copy":       dispatchCopy, // copy() is a go builtin
-		"from":       from,
-		"onbuild":    onbuild,
-		"workdir":    workdir,
-		"run":        run,
-		"cmd":        cmd,
-		"entrypoint": entrypoint,
-		"expose":     expose,
-		"volume":     volume,
-		"user":       user,
-		"insert":     insert,
+		command.Env:        env,
+		command.Label:      label,
+		command.Maintainer: maintainer,
+		command.Add:        add,
+		command.Copy:       dispatchCopy, // copy() is a go builtin
+		command.From:       from,
+		command.Onbuild:    onbuild,
+		command.Workdir:    workdir,
+		command.Run:        run,
+		command.Cmd:        cmd,
+		command.Entrypoint: entrypoint,
+		command.Expose:     expose,
+		command.Volume:     volume,
+		command.User:       user,
+		command.Insert:     insert,
 	}
 	}
 }
 }
 
 
@@ -88,12 +93,18 @@ type Builder struct {
 
 
 	Verbose      bool
 	Verbose      bool
 	UtilizeCache bool
 	UtilizeCache bool
+	cacheBusted  bool
 
 
 	// controls how images and containers are handled between steps.
 	// controls how images and containers are handled between steps.
 	Remove      bool
 	Remove      bool
 	ForceRemove bool
 	ForceRemove bool
 	Pull        bool
 	Pull        bool
 
 
+	// set this to true if we want the builder to not commit between steps.
+	// This is useful when we only want to use the evaluator table to generate
+	// the final configs of the Dockerfile but dont want the layers
+	disableCommit bool
+
 	AuthConfig     *registry.AuthConfig
 	AuthConfig     *registry.AuthConfig
 	AuthConfigFile *registry.ConfigFile
 	AuthConfigFile *registry.ConfigFile
 
 
@@ -114,6 +125,14 @@ type Builder struct {
 	context        tarsum.TarSum // the context is a tarball that is uploaded by the client
 	context        tarsum.TarSum // the context is a tarball that is uploaded by the client
 	contextPath    string        // the path of the temporary directory the local context is unpacked to (server side)
 	contextPath    string        // the path of the temporary directory the local context is unpacked to (server side)
 	noBaseImage    bool          // indicates that this build does not start from any base image, but is being built from an empty file system.
 	noBaseImage    bool          // indicates that this build does not start from any base image, but is being built from an empty file system.
+
+	// Set resource restrictions for build containers
+	cpuSetCpus string
+	cpuShares  int64
+	memory     int64
+	memorySwap int64
+
+	cancelled <-chan struct{} // When closed, job was cancelled.
 }
 }
 
 
 // Run the builder with the context. This is the lynchpin of this package. This
 // Run the builder with the context. This is the lynchpin of this package. This
@@ -139,38 +158,63 @@ func (b *Builder) Run(context io.Reader) (string, error) {
 		}
 		}
 	}()
 	}()
 
 
-	if err := b.readDockerfile(b.dockerfileName); err != nil {
+	if err := b.readDockerfile(); err != nil {
 		return "", err
 		return "", err
 	}
 	}
 
 
 	// some initializations that would not have been supplied by the caller.
 	// some initializations that would not have been supplied by the caller.
 	b.Config = &runconfig.Config{}
 	b.Config = &runconfig.Config{}
+
 	b.TmpContainers = map[string]struct{}{}
 	b.TmpContainers = map[string]struct{}{}
 
 
 	for i, n := range b.dockerfile.Children {
 	for i, n := range b.dockerfile.Children {
+		select {
+		case <-b.cancelled:
+			log.Debug("Builder: build cancelled!")
+			fmt.Fprintf(b.OutStream, "Build cancelled")
+			return "", fmt.Errorf("Build cancelled")
+		default:
+			// Not cancelled yet, keep going...
+		}
 		if err := b.dispatch(i, n); err != nil {
 		if err := b.dispatch(i, n); err != nil {
 			if b.ForceRemove {
 			if b.ForceRemove {
 				b.clearTmp()
 				b.clearTmp()
 			}
 			}
 			return "", err
 			return "", err
 		}
 		}
-		fmt.Fprintf(b.OutStream, " ---> %s\n", utils.TruncateID(b.image))
+		fmt.Fprintf(b.OutStream, " ---> %s\n", common.TruncateID(b.image))
 		if b.Remove {
 		if b.Remove {
 			b.clearTmp()
 			b.clearTmp()
 		}
 		}
 	}
 	}
 
 
 	if b.image == "" {
 	if b.image == "" {
-		return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?\n")
+		return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
 	}
 	}
 
 
-	fmt.Fprintf(b.OutStream, "Successfully built %s\n", utils.TruncateID(b.image))
+	fmt.Fprintf(b.OutStream, "Successfully built %s\n", common.TruncateID(b.image))
 	return b.image, nil
 	return b.image, nil
 }
 }
 
 
 // Reads a Dockerfile from the current context. It assumes that the
 // Reads a Dockerfile from the current context. It assumes that the
 // 'filename' is a relative path from the root of the context
 // 'filename' is a relative path from the root of the context
-func (b *Builder) readDockerfile(origFile string) error {
+func (b *Builder) readDockerfile() error {
+	// If no -f was specified then look for 'Dockerfile'. If we can't find
+	// that then look for 'dockerfile'.  If neither are found then default
+	// back to 'Dockerfile' and use that in the error message.
+	if b.dockerfileName == "" {
+		b.dockerfileName = api.DefaultDockerfileName
+		tmpFN := filepath.Join(b.contextPath, api.DefaultDockerfileName)
+		if _, err := os.Lstat(tmpFN); err != nil {
+			tmpFN = filepath.Join(b.contextPath, strings.ToLower(api.DefaultDockerfileName))
+			if _, err := os.Lstat(tmpFN); err == nil {
+				b.dockerfileName = strings.ToLower(api.DefaultDockerfileName)
+			}
+		}
+	}
+
+	origFile := b.dockerfileName
+
 	filename, err := symlink.FollowSymlinkInScope(filepath.Join(b.contextPath, origFile), b.contextPath)
 	filename, err := symlink.FollowSymlinkInScope(filepath.Join(b.contextPath, origFile), b.contextPath)
 	if err != nil {
 	if err != nil {
 		return fmt.Errorf("The Dockerfile (%s) must be within the build context", origFile)
 		return fmt.Errorf("The Dockerfile (%s) must be within the build context", origFile)
@@ -240,6 +284,9 @@ func (b *Builder) dispatch(stepN int, ast *parser.Node) error {
 	msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd))
 	msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd))
 
 
 	if cmd == "onbuild" {
 	if cmd == "onbuild" {
+		if ast.Next == nil {
+			return fmt.Errorf("ONBUILD requires at least one argument")
+		}
 		ast = ast.Next.Children[0]
 		ast = ast.Next.Children[0]
 		strs = append(strs, ast.Value)
 		strs = append(strs, ast.Value)
 		msg += " " + ast.Value
 		msg += " " + ast.Value
@@ -281,7 +328,5 @@ func (b *Builder) dispatch(stepN int, ast *parser.Node) error {
 		return f(b, strList, attrs, original)
 		return f(b, strList, attrs, original)
 	}
 	}
 
 
-	fmt.Fprintf(b.ErrStream, "# Skipping unknown instruction %s\n", strings.ToUpper(cmd))
-
-	return nil
+	return fmt.Errorf("Unknown instruction: %s", strings.ToUpper(cmd))
 }
 }

+ 73 - 32
builder/internals.go

@@ -25,13 +25,16 @@ import (
 	imagepkg "github.com/docker/docker/image"
 	imagepkg "github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/pkg/chrootarchive"
+	"github.com/docker/docker/pkg/common"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/progressreader"
 	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/tarsum"
 	"github.com/docker/docker/pkg/tarsum"
 	"github.com/docker/docker/pkg/urlutil"
 	"github.com/docker/docker/pkg/urlutil"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
+	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
 )
 )
 
 
@@ -59,6 +62,9 @@ func (b *Builder) readContext(context io.Reader) error {
 }
 }
 
 
 func (b *Builder) commit(id string, autoCmd []string, comment string) error {
 func (b *Builder) commit(id string, autoCmd []string, comment string) error {
+	if b.disableCommit {
+		return nil
+	}
 	if b.image == "" && !b.noBaseImage {
 	if b.image == "" && !b.noBaseImage {
 		return fmt.Errorf("Please provide a source image with `from` prior to commit")
 		return fmt.Errorf("Please provide a source image with `from` prior to commit")
 	}
 	}
@@ -87,9 +93,9 @@ func (b *Builder) commit(id string, autoCmd []string, comment string) error {
 		}
 		}
 		defer container.Unmount()
 		defer container.Unmount()
 	}
 	}
-	container := b.Daemon.Get(id)
-	if container == nil {
-		return fmt.Errorf("An error occured while creating the container")
+	container, err := b.Daemon.Get(id)
+	if err != nil {
+		return err
 	}
 	}
 
 
 	// Note: Actually copy the struct
 	// Note: Actually copy the struct
@@ -183,8 +189,8 @@ func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecomp
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	// If we do not have at least one hash, never use the cache
-	if hit && b.UtilizeCache {
+
+	if hit {
 		return nil
 		return nil
 	}
 	}
 
 
@@ -264,7 +270,15 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri
 		}
 		}
 
 
 		// Download and dump result to tmp file
 		// Download and dump result to tmp file
-		if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil {
+		if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
+			In:        resp.Body,
+			Out:       b.OutOld,
+			Formatter: b.StreamFormatter,
+			Size:      int(resp.ContentLength),
+			NewLines:  true,
+			ID:        "",
+			Action:    "Downloading",
+		})); err != nil {
 			tmpFile.Close()
 			tmpFile.Close()
 			return err
 			return err
 		}
 		}
@@ -498,19 +512,24 @@ func (b *Builder) processImageFrom(img *imagepkg.Image) error {
 // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
 // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
 // is any error, it returns `(false, err)`.
 // is any error, it returns `(false, err)`.
 func (b *Builder) probeCache() (bool, error) {
 func (b *Builder) probeCache() (bool, error) {
-	if b.UtilizeCache {
-		if cache, err := b.Daemon.ImageGetCached(b.image, b.Config); err != nil {
-			return false, err
-		} else if cache != nil {
-			fmt.Fprintf(b.OutStream, " ---> Using cache\n")
-			log.Debugf("[BUILDER] Use cached version")
-			b.image = cache.ID
-			return true, nil
-		} else {
-			log.Debugf("[BUILDER] Cache miss")
-		}
-	}
-	return false, nil
+	if !b.UtilizeCache || b.cacheBusted {
+		return false, nil
+	}
+
+	cache, err := b.Daemon.ImageGetCached(b.image, b.Config)
+	if err != nil {
+		return false, err
+	}
+	if cache == nil {
+		log.Debugf("[BUILDER] Cache miss")
+		b.cacheBusted = true
+		return false, nil
+	}
+
+	fmt.Fprintf(b.OutStream, " ---> Using cache\n")
+	log.Debugf("[BUILDER] Use cached version")
+	b.image = cache.ID
+	return true, nil
 }
 }
 
 
 func (b *Builder) create() (*daemon.Container, error) {
 func (b *Builder) create() (*daemon.Container, error) {
@@ -519,10 +538,17 @@ func (b *Builder) create() (*daemon.Container, error) {
 	}
 	}
 	b.Config.Image = b.image
 	b.Config.Image = b.image
 
 
+	hostConfig := &runconfig.HostConfig{
+		CpuShares:  b.cpuShares,
+		CpusetCpus: b.cpuSetCpus,
+		Memory:     b.memory,
+		MemorySwap: b.memorySwap,
+	}
+
 	config := *b.Config
 	config := *b.Config
 
 
 	// Create the container
 	// Create the container
-	c, warnings, err := b.Daemon.Create(b.Config, nil, "")
+	c, warnings, err := b.Daemon.Create(b.Config, hostConfig, "")
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -531,7 +557,7 @@ func (b *Builder) create() (*daemon.Container, error) {
 	}
 	}
 
 
 	b.TmpContainers[c.ID] = struct{}{}
 	b.TmpContainers[c.ID] = struct{}{}
-	fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
+	fmt.Fprintf(b.OutStream, " ---> Running in %s\n", common.TruncateID(c.ID))
 
 
 	if len(config.Cmd) > 0 {
 	if len(config.Cmd) > 0 {
 		// override the entry point that may have been picked up from the base image
 		// override the entry point that may have been picked up from the base image
@@ -545,19 +571,30 @@ func (b *Builder) create() (*daemon.Container, error) {
 }
 }
 
 
 func (b *Builder) run(c *daemon.Container) error {
 func (b *Builder) run(c *daemon.Container) error {
+	var errCh chan error
+	if b.Verbose {
+		errCh = b.Daemon.Attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, nil, b.OutStream, b.ErrStream)
+	}
+
 	//start the container
 	//start the container
 	if err := c.Start(); err != nil {
 	if err := c.Start(); err != nil {
 		return err
 		return err
 	}
 	}
 
 
+	finished := make(chan struct{})
+	defer close(finished)
+	go func() {
+		select {
+		case <-b.cancelled:
+			log.Debugln("Build cancelled, killing container:", c.ID)
+			c.Kill()
+		case <-finished:
+		}
+	}()
+
 	if b.Verbose {
 	if b.Verbose {
-		logsJob := b.Engine.Job("logs", c.ID)
-		logsJob.Setenv("follow", "1")
-		logsJob.Setenv("stdout", "1")
-		logsJob.Setenv("stderr", "1")
-		logsJob.Stdout.Add(b.OutStream)
-		logsJob.Stderr.Set(b.ErrStream)
-		if err := logsJob.Run(); err != nil {
+		// Block on reading output from container, stop on err or chan closed
+		if err := <-errCh; err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}
@@ -710,13 +747,17 @@ func fixPermissions(source, destination string, uid, gid int, destExisted bool)
 
 
 func (b *Builder) clearTmp() {
 func (b *Builder) clearTmp() {
 	for c := range b.TmpContainers {
 	for c := range b.TmpContainers {
-		tmp := b.Daemon.Get(c)
-		if err := b.Daemon.Destroy(tmp); err != nil {
-			fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
+		tmp, err := b.Daemon.Get(c)
+		if err != nil {
+			fmt.Fprint(b.OutStream, err.Error())
+		}
+
+		if err := b.Daemon.Rm(tmp); err != nil {
+			fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", common.TruncateID(c), err)
 			return
 			return
 		}
 		}
 		b.Daemon.DeleteVolumes(tmp.VolumePaths())
 		b.Daemon.DeleteVolumes(tmp.VolumePaths())
 		delete(b.TmpContainers, c)
 		delete(b.TmpContainers, c)
-		fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c))
+		fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", common.TruncateID(c))
 	}
 	}
 }
 }

+ 79 - 4
builder/job.go

@@ -1,12 +1,16 @@
 package builder
 package builder
 
 
 import (
 import (
+	"bytes"
+	"encoding/json"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
 	"os/exec"
 	"os/exec"
+	"strings"
 
 
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api"
+	"github.com/docker/docker/builder/parser"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/graph"
@@ -14,9 +18,22 @@ import (
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/urlutil"
 	"github.com/docker/docker/pkg/urlutil"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
+	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
 )
 )
 
 
+// whitelist of commands allowed for a commit/import
+var validCommitCommands = map[string]bool{
+	"entrypoint": true,
+	"cmd":        true,
+	"user":       true,
+	"workdir":    true,
+	"env":        true,
+	"volume":     true,
+	"expose":     true,
+	"onbuild":    true,
+}
+
 type BuilderJob struct {
 type BuilderJob struct {
 	Engine *engine.Engine
 	Engine *engine.Engine
 	Daemon *daemon.Daemon
 	Daemon *daemon.Daemon
@@ -24,6 +41,7 @@ type BuilderJob struct {
 
 
 func (b *BuilderJob) Install() {
 func (b *BuilderJob) Install() {
 	b.Engine.Register("build", b.CmdBuild)
 	b.Engine.Register("build", b.CmdBuild)
+	b.Engine.Register("build_config", b.CmdBuildConfig)
 }
 }
 
 
 func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
@@ -39,6 +57,10 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 		rm             = job.GetenvBool("rm")
 		rm             = job.GetenvBool("rm")
 		forceRm        = job.GetenvBool("forcerm")
 		forceRm        = job.GetenvBool("forcerm")
 		pull           = job.GetenvBool("pull")
 		pull           = job.GetenvBool("pull")
+		memory         = job.GetenvInt64("memory")
+		memorySwap     = job.GetenvInt64("memswap")
+		cpuShares      = job.GetenvInt64("cpushares")
+		cpuSetCpus     = job.Getenv("cpusetcpus")
 		authConfig     = &registry.AuthConfig{}
 		authConfig     = &registry.AuthConfig{}
 		configFile     = &registry.ConfigFile{}
 		configFile     = &registry.ConfigFile{}
 		tag            string
 		tag            string
@@ -60,10 +82,6 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 		}
 		}
 	}
 	}
 
 
-	if dockerfileName == "" {
-		dockerfileName = api.DefaultDockerfileName
-	}
-
 	if remoteURL == "" {
 	if remoteURL == "" {
 		context = ioutil.NopCloser(job.Stdin)
 		context = ioutil.NopCloser(job.Stdin)
 	} else if urlutil.IsGitURL(remoteURL) {
 	} else if urlutil.IsGitURL(remoteURL) {
@@ -95,6 +113,11 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 		if err != nil {
 		if err != nil {
 			return job.Error(err)
 			return job.Error(err)
 		}
 		}
+
+		// When we're downloading just a Dockerfile put it in
+		// the default name - don't allow the client to move/specify it
+		dockerfileName = api.DefaultDockerfileName
+
 		c, err := archive.Generate(dockerfileName, string(dockerFile))
 		c, err := archive.Generate(dockerfileName, string(dockerFile))
 		if err != nil {
 		if err != nil {
 			return job.Error(err)
 			return job.Error(err)
@@ -126,6 +149,11 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 		AuthConfig:      authConfig,
 		AuthConfig:      authConfig,
 		AuthConfigFile:  configFile,
 		AuthConfigFile:  configFile,
 		dockerfileName:  dockerfileName,
 		dockerfileName:  dockerfileName,
+		cpuShares:       cpuShares,
+		cpuSetCpus:      cpuSetCpus,
+		memory:          memory,
+		memorySwap:      memorySwap,
+		cancelled:       job.WaitCancelled(),
 	}
 	}
 
 
 	id, err := builder.Run(context)
 	id, err := builder.Run(context)
@@ -138,3 +166,50 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
 	}
 	}
 	return engine.StatusOK
 	return engine.StatusOK
 }
 }
+
+func (b *BuilderJob) CmdBuildConfig(job *engine.Job) engine.Status {
+	if len(job.Args) != 0 {
+		return job.Errorf("Usage: %s\n", job.Name)
+	}
+
+	var (
+		changes   = job.GetenvList("changes")
+		newConfig runconfig.Config
+	)
+
+	if err := job.GetenvJson("config", &newConfig); err != nil {
+		return job.Error(err)
+	}
+
+	ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")))
+	if err != nil {
+		return job.Error(err)
+	}
+
+	// ensure that the commands are valid
+	for _, n := range ast.Children {
+		if !validCommitCommands[n.Value] {
+			return job.Errorf("%s is not a valid change command", n.Value)
+		}
+	}
+
+	builder := &Builder{
+		Daemon:        b.Daemon,
+		Engine:        b.Engine,
+		Config:        &newConfig,
+		OutStream:     ioutil.Discard,
+		ErrStream:     ioutil.Discard,
+		disableCommit: true,
+	}
+
+	for i, n := range ast.Children {
+		if err := builder.dispatch(i, n); err != nil {
+			return job.Error(err)
+		}
+	}
+
+	if err := json.NewEncoder(job.Stdout).Encode(builder.Config); err != nil {
+		return job.Error(err)
+	}
+	return engine.StatusOK
+}

+ 28 - 9
builder/parser/line_parsers.go

@@ -30,6 +30,10 @@ func parseIgnore(rest string) (*Node, map[string]bool, error) {
 // ONBUILD RUN foo bar -> (onbuild (run foo bar))
 // ONBUILD RUN foo bar -> (onbuild (run foo bar))
 //
 //
 func parseSubCommand(rest string) (*Node, map[string]bool, error) {
 func parseSubCommand(rest string) (*Node, map[string]bool, error) {
+	if rest == "" {
+		return nil, nil, nil
+	}
+
 	_, child, err := parseLine(rest)
 	_, child, err := parseLine(rest)
 	if err != nil {
 	if err != nil {
 		return nil, nil, err
 		return nil, nil, err
@@ -40,10 +44,10 @@ func parseSubCommand(rest string) (*Node, map[string]bool, error) {
 
 
 // parse environment like statements. Note that this does *not* handle
 // parse environment like statements. Note that this does *not* handle
 // variable interpolation, which will be handled in the evaluator.
 // variable interpolation, which will be handled in the evaluator.
-func parseEnv(rest string) (*Node, map[string]bool, error) {
+func parseNameVal(rest string, key string) (*Node, map[string]bool, error) {
 	// This is kind of tricky because we need to support the old
 	// This is kind of tricky because we need to support the old
-	// variant:   ENV name value
-	// as well as the new one:    ENV name=value ...
+	// variant:   KEY name value
+	// as well as the new one:    KEY name=value ...
 	// The trigger to know which one is being used will be whether we hit
 	// The trigger to know which one is being used will be whether we hit
 	// a space or = first.  space ==> old, "=" ==> new
 	// a space or = first.  space ==> old, "=" ==> new
 
 
@@ -133,10 +137,10 @@ func parseEnv(rest string) (*Node, map[string]bool, error) {
 	}
 	}
 
 
 	if len(words) == 0 {
 	if len(words) == 0 {
-		return nil, nil, fmt.Errorf("ENV must have some arguments")
+		return nil, nil, nil
 	}
 	}
 
 
-	// Old format (ENV name value)
+	// Old format (KEY name value)
 	var rootnode *Node
 	var rootnode *Node
 
 
 	if !strings.Contains(words[0], "=") {
 	if !strings.Contains(words[0], "=") {
@@ -145,7 +149,7 @@ func parseEnv(rest string) (*Node, map[string]bool, error) {
 		strs := TOKEN_WHITESPACE.Split(rest, 2)
 		strs := TOKEN_WHITESPACE.Split(rest, 2)
 
 
 		if len(strs) < 2 {
 		if len(strs) < 2 {
-			return nil, nil, fmt.Errorf("ENV must have two arguments")
+			return nil, nil, fmt.Errorf(key + " must have two arguments")
 		}
 		}
 
 
 		node.Value = strs[0]
 		node.Value = strs[0]
@@ -178,9 +182,21 @@ func parseEnv(rest string) (*Node, map[string]bool, error) {
 	return rootnode, nil, nil
 	return rootnode, nil, nil
 }
 }
 
 
+func parseEnv(rest string) (*Node, map[string]bool, error) {
+	return parseNameVal(rest, "ENV")
+}
+
+func parseLabel(rest string) (*Node, map[string]bool, error) {
+	return parseNameVal(rest, "LABEL")
+}
+
 // parses a whitespace-delimited set of arguments. The result is effectively a
 // parses a whitespace-delimited set of arguments. The result is effectively a
 // linked list of string arguments.
 // linked list of string arguments.
 func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) {
 func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) {
+	if rest == "" {
+		return nil, nil, nil
+	}
+
 	node := &Node{}
 	node := &Node{}
 	rootnode := node
 	rootnode := node
 	prevnode := node
 	prevnode := node
@@ -201,6 +217,9 @@ func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error
 
 
 // parsestring just wraps the string in quotes and returns a working node.
 // parsestring just wraps the string in quotes and returns a working node.
 func parseString(rest string) (*Node, map[string]bool, error) {
 func parseString(rest string) (*Node, map[string]bool, error) {
+	if rest == "" {
+		return nil, nil, nil
+	}
 	n := &Node{}
 	n := &Node{}
 	n.Value = rest
 	n.Value = rest
 	return n, nil, nil
 	return n, nil, nil
@@ -235,7 +254,9 @@ func parseJSON(rest string) (*Node, map[string]bool, error) {
 // so, passes to parseJSON; if not, quotes the result and returns a single
 // so, passes to parseJSON; if not, quotes the result and returns a single
 // node.
 // node.
 func parseMaybeJSON(rest string) (*Node, map[string]bool, error) {
 func parseMaybeJSON(rest string) (*Node, map[string]bool, error) {
-	rest = strings.TrimSpace(rest)
+	if rest == "" {
+		return nil, nil, nil
+	}
 
 
 	node, attrs, err := parseJSON(rest)
 	node, attrs, err := parseJSON(rest)
 
 
@@ -255,8 +276,6 @@ func parseMaybeJSON(rest string) (*Node, map[string]bool, error) {
 // so, passes to parseJSON; if not, attmpts to parse it as a whitespace
 // so, passes to parseJSON; if not, attmpts to parse it as a whitespace
 // delimited string.
 // delimited string.
 func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) {
 func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) {
-	rest = strings.TrimSpace(rest)
-
 	node, attrs, err := parseJSON(rest)
 	node, attrs, err := parseJSON(rest)
 
 
 	if err == nil {
 	if err == nil {

+ 18 - 20
builder/parser/parser.go

@@ -3,11 +3,12 @@ package parser
 
 
 import (
 import (
 	"bufio"
 	"bufio"
-	"fmt"
 	"io"
 	"io"
 	"regexp"
 	"regexp"
 	"strings"
 	"strings"
 	"unicode"
 	"unicode"
+
+	"github.com/docker/docker/builder/command"
 )
 )
 
 
 // Node is a structure used to represent a parse tree.
 // Node is a structure used to represent a parse tree.
@@ -42,23 +43,24 @@ func init() {
 	// The command is parsed and mapped to the line parser. The line parser
 	// The command is parsed and mapped to the line parser. The line parser
 	// recieves the arguments but not the command, and returns an AST after
 	// recieves the arguments but not the command, and returns an AST after
 	// reformulating the arguments according to the rules in the parser
 	// reformulating the arguments according to the rules in the parser
-	// functions. Errors are propogated up by Parse() and the resulting AST can
+	// functions. Errors are propagated up by Parse() and the resulting AST can
 	// be incorporated directly into the existing AST as a next.
 	// be incorporated directly into the existing AST as a next.
 	dispatch = map[string]func(string) (*Node, map[string]bool, error){
 	dispatch = map[string]func(string) (*Node, map[string]bool, error){
-		"user":       parseString,
-		"onbuild":    parseSubCommand,
-		"workdir":    parseString,
-		"env":        parseEnv,
-		"maintainer": parseString,
-		"from":       parseString,
-		"add":        parseMaybeJSONToList,
-		"copy":       parseMaybeJSONToList,
-		"run":        parseMaybeJSON,
-		"cmd":        parseMaybeJSON,
-		"entrypoint": parseMaybeJSON,
-		"expose":     parseStringsWhitespaceDelimited,
-		"volume":     parseMaybeJSONToList,
-		"insert":     parseIgnore,
+		command.User:       parseString,
+		command.Onbuild:    parseSubCommand,
+		command.Workdir:    parseString,
+		command.Env:        parseEnv,
+		command.Label:      parseLabel,
+		command.Maintainer: parseString,
+		command.From:       parseString,
+		command.Add:        parseMaybeJSONToList,
+		command.Copy:       parseMaybeJSONToList,
+		command.Run:        parseMaybeJSON,
+		command.Cmd:        parseMaybeJSON,
+		command.Entrypoint: parseMaybeJSON,
+		command.Expose:     parseStringsWhitespaceDelimited,
+		command.Volume:     parseMaybeJSONToList,
+		command.Insert:     parseIgnore,
 	}
 	}
 }
 }
 
 
@@ -78,10 +80,6 @@ func parseLine(line string) (string, *Node, error) {
 		return "", nil, err
 		return "", nil, err
 	}
 	}
 
 
-	if len(args) == 0 {
-		return "", nil, fmt.Errorf("Instruction %q is empty; cannot continue", cmd)
-	}
-
 	node := &Node{}
 	node := &Node{}
 	node.Value = cmd
 	node.Value = cmd
 
 

+ 11 - 11
builder/parser/parser_test.go

@@ -11,7 +11,7 @@ import (
 const testDir = "testfiles"
 const testDir = "testfiles"
 const negativeTestDir = "testfiles-negative"
 const negativeTestDir = "testfiles-negative"
 
 
-func getDirs(t *testing.T, dir string) []os.FileInfo {
+func getDirs(t *testing.T, dir string) []string {
 	f, err := os.Open(dir)
 	f, err := os.Open(dir)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -19,7 +19,7 @@ func getDirs(t *testing.T, dir string) []os.FileInfo {
 
 
 	defer f.Close()
 	defer f.Close()
 
 
-	dirs, err := f.Readdir(0)
+	dirs, err := f.Readdirnames(0)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -29,16 +29,16 @@ func getDirs(t *testing.T, dir string) []os.FileInfo {
 
 
 func TestTestNegative(t *testing.T) {
 func TestTestNegative(t *testing.T) {
 	for _, dir := range getDirs(t, negativeTestDir) {
 	for _, dir := range getDirs(t, negativeTestDir) {
-		dockerfile := filepath.Join(negativeTestDir, dir.Name(), "Dockerfile")
+		dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile")
 
 
 		df, err := os.Open(dockerfile)
 		df, err := os.Open(dockerfile)
 		if err != nil {
 		if err != nil {
-			t.Fatalf("Dockerfile missing for %s: %s", dir.Name(), err.Error())
+			t.Fatalf("Dockerfile missing for %s: %v", dir, err)
 		}
 		}
 
 
 		_, err = Parse(df)
 		_, err = Parse(df)
 		if err == nil {
 		if err == nil {
-			t.Fatalf("No error parsing broken dockerfile for %s", dir.Name())
+			t.Fatalf("No error parsing broken dockerfile for %s", dir)
 		}
 		}
 
 
 		df.Close()
 		df.Close()
@@ -47,29 +47,29 @@ func TestTestNegative(t *testing.T) {
 
 
 func TestTestData(t *testing.T) {
 func TestTestData(t *testing.T) {
 	for _, dir := range getDirs(t, testDir) {
 	for _, dir := range getDirs(t, testDir) {
-		dockerfile := filepath.Join(testDir, dir.Name(), "Dockerfile")
-		resultfile := filepath.Join(testDir, dir.Name(), "result")
+		dockerfile := filepath.Join(testDir, dir, "Dockerfile")
+		resultfile := filepath.Join(testDir, dir, "result")
 
 
 		df, err := os.Open(dockerfile)
 		df, err := os.Open(dockerfile)
 		if err != nil {
 		if err != nil {
-			t.Fatalf("Dockerfile missing for %s: %s", dir.Name(), err.Error())
+			t.Fatalf("Dockerfile missing for %s: %v", dir, err)
 		}
 		}
 		defer df.Close()
 		defer df.Close()
 
 
 		ast, err := Parse(df)
 		ast, err := Parse(df)
 		if err != nil {
 		if err != nil {
-			t.Fatalf("Error parsing %s's dockerfile: %s", dir.Name(), err.Error())
+			t.Fatalf("Error parsing %s's dockerfile: %v", dir, err)
 		}
 		}
 
 
 		content, err := ioutil.ReadFile(resultfile)
 		content, err := ioutil.ReadFile(resultfile)
 		if err != nil {
 		if err != nil {
-			t.Fatalf("Error reading %s's result file: %s", dir.Name(), err.Error())
+			t.Fatalf("Error reading %s's result file: %v", dir, err)
 		}
 		}
 
 
 		if ast.Dump()+"\n" != string(content) {
 		if ast.Dump()+"\n" != string(content) {
 			fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump())
 			fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump())
 			fmt.Fprintln(os.Stderr, "Expected:\n"+string(content))
 			fmt.Fprintln(os.Stderr, "Expected:\n"+string(content))
-			t.Fatalf("%s: AST dump of dockerfile does not match result", dir.Name())
+			t.Fatalf("%s: AST dump of dockerfile does not match result", dir)
 		}
 		}
 	}
 	}
 }
 }

+ 0 - 8
builder/parser/testfiles-negative/empty-instruction/Dockerfile

@@ -1,8 +0,0 @@
-FROM dockerfile/rabbitmq
-
-RUN
-  rabbitmq-plugins enable \
-   rabbitmq_shovel \
-   rabbitmq_shovel_management \
-   rabbitmq_federation \
-   rabbitmq_federation_management

+ 0 - 2
builder/parser/testfiles-negative/html-page-yes-really-thanks-lk4d4/Dockerfile

@@ -1,2 +0,0 @@
-<html>
-</html>

+ 8 - 6
builder/parser/utils.go

@@ -1,7 +1,6 @@
 package parser
 package parser
 
 
 import (
 import (
-	"fmt"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 )
 )
@@ -50,16 +49,19 @@ func fullDispatch(cmd, args string) (*Node, map[string]bool, error) {
 // splitCommand takes a single line of text and parses out the cmd and args,
 // splitCommand takes a single line of text and parses out the cmd and args,
 // which are used for dispatching to more exact parsing functions.
 // which are used for dispatching to more exact parsing functions.
 func splitCommand(line string) (string, string, error) {
 func splitCommand(line string) (string, string, error) {
-	cmdline := TOKEN_WHITESPACE.Split(line, 2)
+	var args string
 
 
-	if len(cmdline) != 2 {
-		return "", "", fmt.Errorf("We do not understand this file. Please ensure it is a valid Dockerfile. Parser error at %q", line)
+	// Make sure we get the same results irrespective of leading/trailing spaces
+	cmdline := TOKEN_WHITESPACE.Split(strings.TrimSpace(line), 2)
+	cmd := strings.ToLower(cmdline[0])
+
+	if len(cmdline) == 2 {
+		args = strings.TrimSpace(cmdline[1])
 	}
 	}
 
 
-	cmd := strings.ToLower(cmdline[0])
 	// the cmd should never have whitespace, but it's possible for the args to
 	// the cmd should never have whitespace, but it's possible for the args to
 	// have trailing whitespace.
 	// have trailing whitespace.
-	return cmd, strings.TrimSpace(cmdline[1]), nil
+	return cmd, args, nil
 }
 }
 
 
 // covers comments and empty lines. Lines should be trimmed before passing to
 // covers comments and empty lines. Lines should be trimmed before passing to

+ 1 - 1
builtins/builtins.go

@@ -5,8 +5,8 @@ import (
 
 
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api"
 	apiserver "github.com/docker/docker/api/server"
 	apiserver "github.com/docker/docker/api/server"
+	"github.com/docker/docker/autogen/dockerversion"
 	"github.com/docker/docker/daemon/networkdriver/bridge"
 	"github.com/docker/docker/daemon/networkdriver/bridge"
-	"github.com/docker/docker/dockerversion"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/events"
 	"github.com/docker/docker/events"
 	"github.com/docker/docker/pkg/parsers/kernel"
 	"github.com/docker/docker/pkg/parsers/kernel"

+ 0 - 0
contrib/MAINTAINERS → contrib/REVIEWERS


+ 9 - 4
contrib/check-config.sh

@@ -10,7 +10,12 @@ possibleConfigs=(
 	"/usr/src/linux-$(uname -r)/.config"
 	"/usr/src/linux-$(uname -r)/.config"
 	'/usr/src/linux/.config'
 	'/usr/src/linux/.config'
 )
 )
-: ${CONFIG:="${possibleConfigs[0]}"}
+
+if [ $# -gt 0 ]; then
+	CONFIG="$1"
+else
+	: ${CONFIG:="${possibleConfigs[0]}"}
+fi
 
 
 if ! command -v zgrep &> /dev/null; then
 if ! command -v zgrep &> /dev/null; then
 	zgrep() {
 	zgrep() {
@@ -89,7 +94,7 @@ if [ ! -e "$CONFIG" ]; then
 	if [ ! -e "$CONFIG" ]; then
 	if [ ! -e "$CONFIG" ]; then
 		wrap_warning "error: cannot find kernel config"
 		wrap_warning "error: cannot find kernel config"
 		wrap_warning "  try running this script again, specifying the kernel config:"
 		wrap_warning "  try running this script again, specifying the kernel config:"
-		wrap_warning "    CONFIG=/path/to/kernel/.config $0"
+		wrap_warning "    CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
 		exit 1
 		exit 1
 	fi
 	fi
 fi
 fi
@@ -133,7 +138,7 @@ fi
 flags=(
 flags=(
 	NAMESPACES {NET,PID,IPC,UTS}_NS
 	NAMESPACES {NET,PID,IPC,UTS}_NS
 	DEVPTS_MULTIPLE_INSTANCES
 	DEVPTS_MULTIPLE_INSTANCES
-	CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED
+	CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS
 	MACVLAN VETH BRIDGE
 	MACVLAN VETH BRIDGE
 	NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE
 	NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE
 	NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}
 	NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}
@@ -169,7 +174,7 @@ echo '- Storage Drivers:'
 	check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/  /'
 	check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/  /'
 
 
 	echo '- "'$(wrap_color 'overlay' blue)'":'
 	echo '- "'$(wrap_color 'overlay' blue)'":'
-	check_flags OVERLAY_FS | sed 's/^/  /'
+	check_flags OVERLAY_FS EXT4_FS_SECURITY EXT4_FS_POSIX_ACL | sed 's/^/  /'
 } | sed 's/^/  /'
 } | sed 's/^/  /'
 echo
 echo
 
 

+ 0 - 0
contrib/completion/MAINTAINERS → contrib/completion/REVIEWERS


+ 301 - 96
contrib/completion/bash/docker

@@ -131,6 +131,7 @@ __docker_capabilities() {
 		ALL
 		ALL
 		AUDIT_CONTROL
 		AUDIT_CONTROL
 		AUDIT_WRITE
 		AUDIT_WRITE
+		AUDIT_READ
 		BLOCK_SUSPEND
 		BLOCK_SUSPEND
 		CHOWN
 		CHOWN
 		DAC_OVERRIDE
 		DAC_OVERRIDE
@@ -169,9 +170,25 @@ __docker_capabilities() {
 	" -- "$cur" ) )
 	" -- "$cur" ) )
 }
 }
 
 
+# a selection of the available signals that is most likely of interest in the
+# context of docker containers.
+__docker_signals() {
+	local signals=(
+		SIGCONT
+		SIGHUP
+		SIGINT
+		SIGKILL
+		SIGQUIT
+		SIGSTOP
+		SIGTERM
+		SIGUSR1
+		SIGUSR2
+	)
+	COMPREPLY=( $( compgen -W "${signals[*]} ${signals[*]#SIG}" -- "$( echo $cur | tr '[:lower:]' '[:upper:]')" ) )
+}
+
 _docker_docker() {
 _docker_docker() {
 	local boolean_options="
 	local boolean_options="
-		--api-enable-cors
 		--daemon -d
 		--daemon -d
 		--debug -D
 		--debug -D
 		--help -h
 		--help -h
@@ -221,7 +238,7 @@ _docker_docker() {
 _docker_attach() {
 _docker_attach() {
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--no-stdin --sig-proxy" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--help --no-stdin --sig-proxy" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			local counter="$(__docker_pos_first_nonflag)"
 			local counter="$(__docker_pos_first_nonflag)"
@@ -238,11 +255,15 @@ _docker_build() {
 			__docker_image_repos_and_tags
 			__docker_image_repos_and_tags
 			return
 			return
 			;;
 			;;
+		--file|-f)
+			_filedir
+			return	
+			;;	
 	esac
 	esac
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--force-rm --no-cache --quiet -q --rm --tag -t" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--file -f --force-rm --help --no-cache --pull --quiet -q --rm --tag -t" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			local counter="$(__docker_pos_first_nonflag '--tag|-t')"
 			local counter="$(__docker_pos_first_nonflag '--tag|-t')"
@@ -255,17 +276,17 @@ _docker_build() {
 
 
 _docker_commit() {
 _docker_commit() {
 	case "$prev" in
 	case "$prev" in
-		--author|-a|--message|-m|--run)
+		--author|-a|--change|-c|--message|-m)
 			return
 			return
 			;;
 			;;
 	esac
 	esac
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--author -a --message -m --run" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--author -a --change -c --help --message -m --pause -p" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
-			local counter=$(__docker_pos_first_nonflag '--author|-a|--message|-m|--run')
+			local counter=$(__docker_pos_first_nonflag '--author|-a|--change|-c|--message|-m')
 
 
 			if [ $cword -eq $counter ]; then
 			if [ $cword -eq $counter ]; then
 				__docker_containers_all
 				__docker_containers_all
@@ -282,26 +303,33 @@ _docker_commit() {
 }
 }
 
 
 _docker_cp() {
 _docker_cp() {
-	local counter=$(__docker_pos_first_nonflag)
-	if [ $cword -eq $counter ]; then
-		case "$cur" in
-			*:)
-				return
-				;;
-			*)
-				__docker_containers_all
-				COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) )
-				compopt -o nospace
-				return
-				;;
-		esac
-	fi
-	(( counter++ ))
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			;;
+		*)
+			local counter=$(__docker_pos_first_nonflag)
+			if [ $cword -eq $counter ]; then
+				case "$cur" in
+					*:)
+						return
+						;;
+					*)
+						__docker_containers_all
+						COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) )
+						compopt -o nospace
+						return
+						;;
+				esac
+			fi
+			(( counter++ ))
 
 
-	if [ $cword -eq $counter ]; then
-		_filedir
-		return
-	fi
+			if [ $cword -eq $counter ]; then
+				_filedir
+				return
+			fi
+			;;
+	esac
 }
 }
 
 
 _docker_create() {
 _docker_create() {
@@ -309,22 +337,53 @@ _docker_create() {
 }
 }
 
 
 _docker_diff() {
 _docker_diff() {
-	local counter=$(__docker_pos_first_nonflag)
-	if [ $cword -eq $counter ]; then
-		__docker_containers_all
-	fi
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			;;
+		*)
+			local counter=$(__docker_pos_first_nonflag)
+			if [ $cword -eq $counter ]; then
+				__docker_containers_all
+			fi
+			;;
+	esac
 }
 }
 
 
 _docker_events() {
 _docker_events() {
 	case "$prev" in
 	case "$prev" in
-		--since)
+		--filter|-f)
+			COMPREPLY=( $( compgen -S = -W "container event image" -- "$cur" ) )
+			compopt -o nospace
+			return
+			;;
+		--since|--until)
+			return
+			;;
+	esac
+
+	# "=" gets parsed to a word and assigned to either $cur or $prev depending on whether
+	# it is the last character or not. So we search for "xxx=" in the the last two words.
+	case "${words[$cword-2]}$prev=" in
+		*container=*)
+			cur="${cur#=}"
+			__docker_containers_all
+			return
+			;;
+		*event=*)
+			COMPREPLY=( $( compgen -W "create destroy die export kill pause restart start stop unpause" -- "${cur#=}" ) )
+			return
+			;;
+		*image=*)
+			cur="${cur#=}"
+			__docker_image_repos_and_tags_and_ids
 			return
 			return
 			;;
 			;;
 	esac
 	esac
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--since" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--filter -f --help --since --until" -- "$cur" ) )
 			;;
 			;;
 	esac
 	esac
 }
 }
@@ -332,7 +391,7 @@ _docker_events() {
 _docker_exec() {
 _docker_exec() {
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--detach -d --interactive -i -t --tty" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--detach -d --help --interactive -i -t --tty" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			__docker_containers_running
 			__docker_containers_running
@@ -341,10 +400,17 @@ _docker_exec() {
 }
 }
 
 
 _docker_export() {
 _docker_export() {
-	local counter=$(__docker_pos_first_nonflag)
-	if [ $cword -eq $counter ]; then
-		__docker_containers_all
-	fi
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			;;
+		*)
+			local counter=$(__docker_pos_first_nonflag)
+			if [ $cword -eq $counter ]; then
+				__docker_containers_all
+			fi
+			;;
+	esac
 }
 }
 
 
 _docker_help() {
 _docker_help() {
@@ -357,7 +423,7 @@ _docker_help() {
 _docker_history() {
 _docker_history() {
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--no-trunc --quiet -q" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--help --no-trunc --quiet -q" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			local counter=$(__docker_pos_first_nonflag)
 			local counter=$(__docker_pos_first_nonflag)
@@ -369,9 +435,23 @@ _docker_history() {
 }
 }
 
 
 _docker_images() {
 _docker_images() {
+	case "$prev" in
+		--filter|-f)
+			COMPREPLY=( $( compgen -W "dangling=true" -- "$cur" ) )
+			return
+			;;
+	esac
+
+	case "${words[$cword-2]}$prev=" in
+		*dangling=*)
+			COMPREPLY=( $( compgen -W "true false" -- "${cur#=}" ) )
+			return
+			;;
+	esac
+
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--all -a --no-trunc --quiet -q" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--all -a --filter -f --help --no-trunc --quiet -q" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			local counter=$(__docker_pos_first_nonflag)
 			local counter=$(__docker_pos_first_nonflag)
@@ -383,20 +463,31 @@ _docker_images() {
 }
 }
 
 
 _docker_import() {
 _docker_import() {
-	local counter=$(__docker_pos_first_nonflag)
-	if [ $cword -eq $counter ]; then
-		return
-	fi
-	(( counter++ ))
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			;;
+		*)
+			local counter=$(__docker_pos_first_nonflag)
+			if [ $cword -eq $counter ]; then
+				return
+			fi
+			(( counter++ ))
 
 
-	if [ $cword -eq $counter ]; then
-		__docker_image_repos_and_tags
-		return
-	fi
+			if [ $cword -eq $counter ]; then
+				__docker_image_repos_and_tags
+				return
+			fi
+			;;
+	esac
 }
 }
 
 
 _docker_info() {
 _docker_info() {
-	return
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			;;
+	esac
 }
 }
 
 
 _docker_inspect() {
 _docker_inspect() {
@@ -408,7 +499,7 @@ _docker_inspect() {
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--format -f" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			__docker_containers_and_images
 			__docker_containers_and_images
@@ -417,7 +508,21 @@ _docker_inspect() {
 }
 }
 
 
 _docker_kill() {
 _docker_kill() {
-	__docker_containers_running
+	case "$prev" in
+		--signal|-s)
+			__docker_signals
+			return
+			;;
+	esac
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help --signal -s" -- "$cur" ) )
+			;;
+		*)
+			__docker_containers_running
+			;;
+	esac
 }
 }
 
 
 _docker_load() {
 _docker_load() {
@@ -430,7 +535,7 @@ _docker_load() {
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--input -i" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--help --input -i" -- "$cur" ) )
 			;;
 			;;
 	esac
 	esac
 }
 }
@@ -444,18 +549,32 @@ _docker_login() {
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--email -e --password -p --username -u" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--email -e --help --password -p --username -u" -- "$cur" ) )
+			;;
+	esac
+}
+
+_docker_logout() {
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
 			;;
 			;;
 	esac
 	esac
 }
 }
 
 
 _docker_logs() {
 _docker_logs() {
+	case "$prev" in
+		--tail)
+			return
+			;;
+	esac
+
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--follow -f" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--follow -f --help --tail --timestamps -t" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
-			local counter=$(__docker_pos_first_nonflag)
+			local counter=$(__docker_pos_first_nonflag '--tail')
 			if [ $cword -eq $counter ]; then
 			if [ $cword -eq $counter ]; then
 				__docker_containers_all
 				__docker_containers_all
 			fi
 			fi
@@ -464,17 +583,31 @@ _docker_logs() {
 }
 }
 
 
 _docker_pause() {
 _docker_pause() {
-	local counter=$(__docker_pos_first_nonflag)
-	if [ $cword -eq $counter ]; then
-		__docker_containers_pauseable
-	fi
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			;;
+		*)
+			local counter=$(__docker_pos_first_nonflag)
+			if [ $cword -eq $counter ]; then
+				__docker_containers_pauseable
+			fi
+			;;
+	esac
 }
 }
 
 
 _docker_port() {
 _docker_port() {
-	local counter=$(__docker_pos_first_nonflag)
-	if [ $cword -eq $counter ]; then
-		__docker_containers_all
-	fi
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			;;
+		*)
+			local counter=$(__docker_pos_first_nonflag)
+			if [ $cword -eq $counter ]; then
+				__docker_containers_all
+			fi
+			;;
+	esac
 }
 }
 
 
 _docker_ps() {
 _docker_ps() {
@@ -482,31 +615,51 @@ _docker_ps() {
 		--before|--since)
 		--before|--since)
 			__docker_containers_all
 			__docker_containers_all
 			;;
 			;;
+		--filter|-f)
+			COMPREPLY=( $( compgen -S = -W "exited status" -- "$cur" ) )
+			compopt -o nospace
+			return
+			;;
 		-n)
 		-n)
 			return
 			return
 			;;
 			;;
 	esac
 	esac
 
 
+	case "${words[$cword-2]}$prev=" in
+		*status=*)
+			COMPREPLY=( $( compgen -W "exited paused restarting running" -- "${cur#=}" ) )
+			return
+			;;
+	esac
+
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--all -a --before --latest -l --no-trunc -n --quiet -q --size -s --since" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--all -a --before --filter -f --help --latest -l -n --no-trunc --quiet -q --size -s --since" -- "$cur" ) )
 			;;
 			;;
 	esac
 	esac
 }
 }
 
 
 _docker_pull() {
 _docker_pull() {
-	case "$prev" in
-		--tag|-t)
-			return
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--all-tags -a --help" -- "$cur" ) )
+			;;
+		*)
+			local counter=$(__docker_pos_first_nonflag)
+			if [ $cword -eq $counter ]; then
+				__docker_image_repos_and_tags
+			fi
 			;;
 			;;
 	esac
 	esac
+}
 
 
+_docker_push() {
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--tag -t" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
-			local counter=$(__docker_pos_first_nonflag '--tag|-t')
+			local counter=$(__docker_pos_first_nonflag)
 			if [ $cword -eq $counter ]; then
 			if [ $cword -eq $counter ]; then
 				__docker_image_repos_and_tags
 				__docker_image_repos_and_tags
 			fi
 			fi
@@ -514,11 +667,18 @@ _docker_pull() {
 	esac
 	esac
 }
 }
 
 
-_docker_push() {
-	local counter=$(__docker_pos_first_nonflag)
-	if [ $cword -eq $counter ]; then
-		__docker_image_repos_and_tags
-	fi
+_docker_rename() {
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			;;
+		*)
+			local counter=$(__docker_pos_first_nonflag)
+			if [ $cword -eq $counter ]; then
+				__docker_containers_all
+			fi
+			;;
+	esac
 }
 }
 
 
 _docker_restart() {
 _docker_restart() {
@@ -530,7 +690,7 @@ _docker_restart() {
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--time -t" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			__docker_containers_all
 			__docker_containers_all
@@ -541,8 +701,7 @@ _docker_restart() {
 _docker_rm() {
 _docker_rm() {
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--force -f --link -l --volumes -v" -- "$cur" ) )
-			return
+			COMPREPLY=( $( compgen -W "--force -f --help --link -l --volumes -v" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			for arg in "${COMP_WORDS[@]}"; do
 			for arg in "${COMP_WORDS[@]}"; do
@@ -554,13 +713,19 @@ _docker_rm() {
 				esac
 				esac
 			done
 			done
 			__docker_containers_stopped
 			__docker_containers_stopped
-			return
 			;;
 			;;
 	esac
 	esac
 }
 }
 
 
 _docker_rmi() {
 _docker_rmi() {
-	__docker_image_repos_and_tags_and_ids
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--force -f --help --no-prune" -- "$cur" ) )
+			;;
+		*)
+			__docker_image_repos_and_tags_and_ids
+			;;
+	esac
 }
 }
 
 
 _docker_run() {
 _docker_run() {
@@ -585,21 +750,26 @@ _docker_run() {
 		--lxc-conf
 		--lxc-conf
 		--mac-address
 		--mac-address
 		--memory -m
 		--memory -m
+		--memory-swap
 		--name
 		--name
 		--net
 		--net
+		--pid
 		--publish -p
 		--publish -p
 		--restart
 		--restart
 		--security-opt
 		--security-opt
 		--user -u
 		--user -u
+		--ulimit
 		--volumes-from
 		--volumes-from
 		--volume -v
 		--volume -v
 		--workdir -w
 		--workdir -w
 	"
 	"
 
 
 	local all_options="$options_with_args
 	local all_options="$options_with_args
+		--help
 		--interactive -i
 		--interactive -i
 		--privileged
 		--privileged
 		--publish-all -P
 		--publish-all -P
+		--read-only
 		--tty -t
 		--tty -t
 	"
 	"
 
 
@@ -632,7 +802,7 @@ _docker_run() {
 			_filedir
 			_filedir
 			return
 			return
 			;;
 			;;
-		--device|-d|--volume)
+		--device|--volume|-v)
 			case "$cur" in
 			case "$cur" in
 				*:*)
 				*:*)
 					# TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
 					# TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine)
@@ -756,7 +926,7 @@ _docker_save() {
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "-o --output" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			__docker_image_repos_and_tags_and_ids
 			__docker_image_repos_and_tags_and_ids
@@ -773,7 +943,7 @@ _docker_search() {
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--automated --no-trunc --stars -s" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--automated --help --no-trunc --stars -s" -- "$cur" ) )
 			;;
 			;;
 	esac
 	esac
 }
 }
@@ -781,7 +951,7 @@ _docker_search() {
 _docker_start() {
 _docker_start() {
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--attach -a --interactive -i" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--attach -a --help --interactive -i" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			__docker_containers_stopped
 			__docker_containers_stopped
@@ -790,7 +960,14 @@ _docker_start() {
 }
 }
 
 
 _docker_stats() {
 _docker_stats() {
-	__docker_containers_running
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			;;
+		*)
+			__docker_containers_running
+			;;
+	esac
 }
 }
 
 
 _docker_stop() {
 _docker_stop() {
@@ -802,7 +979,7 @@ _docker_stop() {
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--time -t" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			__docker_containers_running
 			__docker_containers_running
@@ -813,7 +990,7 @@ _docker_stop() {
 _docker_tag() {
 _docker_tag() {
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--force -f" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			local counter=$(__docker_pos_first_nonflag)
 			local counter=$(__docker_pos_first_nonflag)
@@ -833,25 +1010,50 @@ _docker_tag() {
 }
 }
 
 
 _docker_unpause() {
 _docker_unpause() {
-	local counter=$(__docker_pos_first_nonflag)
-	if [ $cword -eq $counter ]; then
-		__docker_containers_unpauseable
-	fi
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			;;
+		*)
+			local counter=$(__docker_pos_first_nonflag)
+			if [ $cword -eq $counter ]; then
+				__docker_containers_unpauseable
+			fi
+			;;
+	esac
 }
 }
 
 
 _docker_top() {
 _docker_top() {
-	local counter=$(__docker_pos_first_nonflag)
-	if [ $cword -eq $counter ]; then
-		__docker_containers_running
-	fi
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			;;
+		*)
+			local counter=$(__docker_pos_first_nonflag)
+			if [ $cword -eq $counter ]; then
+				__docker_containers_running
+			fi
+			;;
+	esac
 }
 }
 
 
 _docker_version() {
 _docker_version() {
-	return
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			;;
+	esac
 }
 }
 
 
 _docker_wait() {
 _docker_wait() {
-	__docker_containers_all
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			;;
+		*)
+			__docker_containers_all
+			;;
+	esac
 }
 }
 
 
 _docker() {
 _docker() {
@@ -872,17 +1074,18 @@ _docker() {
 		images
 		images
 		import
 		import
 		info
 		info
-		insert
 		inspect
 		inspect
 		kill
 		kill
 		load
 		load
 		login
 		login
+		logout
 		logs
 		logs
 		pause
 		pause
 		port
 		port
 		ps
 		ps
 		pull
 		pull
 		push
 		push
+		rename
 		restart
 		restart
 		rm
 		rm
 		rmi
 		rmi
@@ -900,8 +1103,10 @@ _docker() {
 	)
 	)
 
 
 	local main_options_with_args="
 	local main_options_with_args="
+		--api-cors-header
 		--bip
 		--bip
 		--bridge -b
 		--bridge -b
+		--default-ulimit
 		--dns
 		--dns
 		--dns-search
 		--dns-search
 		--exec-driver -e
 		--exec-driver -e

+ 4 - 4
contrib/completion/fish/docker.fish

@@ -16,7 +16,7 @@
 
 
 function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand'
 function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand'
     for i in (commandline -opc)
     for i in (commandline -opc)
-        if contains -- $i attach build commit cp create diff events exec export history images import info insert inspect kill load login logout logs pause port ps pull push restart rm rmi run save search start stop tag top unpause version wait
+        if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait
             return 1
             return 1
         end
         end
     end
     end
@@ -43,7 +43,7 @@ function __fish_print_docker_repositories --description 'Print a list of docker
 end
 end
 
 
 # common options
 # common options
-complete -c docker -f -n '__fish_docker_no_subcommand' -l api-enable-cors -d 'Enable CORS headers in the remote API'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l api-cors-header -d "Set CORS headers in the remote API. Default is cors disabled"
 complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d 'Attach containers to a pre-existing network bridge'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d 'Attach containers to a pre-existing network bridge'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b"
 complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b"
 complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode'
@@ -72,7 +72,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -l registry-mirror -d 'Sp
 complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the Docker runtime to use a specific storage driver'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the Docker runtime to use a specific storage driver'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l selinux-enabled -d 'Enable selinux support. SELinux does not presently support the BTRFS storage driver'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l selinux-enabled -d 'Enable selinux support. SELinux does not presently support the BTRFS storage driver'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l storage-opt -d 'Set storage driver options'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l storage-opt -d 'Set storage driver options'
-complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by --tlsverify flag'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by --tlsverify'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscacert -d 'Trust only remotes providing a certificate signed by the CA given here'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscacert -d 'Trust only remotes providing a certificate signed by the CA given here'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscert -d 'Path to TLS certificate file'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscert -d 'Path to TLS certificate file'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l tlskey -d 'Path to TLS key file'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l tlskey -d 'Path to TLS key file'
@@ -345,7 +345,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from save' -s o -l output -d
 complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image"
 complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image"
 
 
 # search
 # search
-complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image on the Docker Hub'
+complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image on the registry (defaults to the Docker Hub)'
 complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds'
 complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds'
 complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output"
 complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output"

+ 3 - 0
contrib/completion/zsh/REVIEWERS

@@ -0,0 +1,3 @@
+Tianon Gravi <admwiggin@gmail.com> (@tianon)
+Jessie Frazelle <jess@docker.com> (@jfrazelle)
+Vincent Bernat <vincent@bernat.im> (@vincentbernat)

+ 44 - 23
contrib/completion/zsh/_docker

@@ -197,8 +197,10 @@ __docker_subcommand () {
             ;;
             ;;
         (build)
         (build)
             _arguments \
             _arguments \
+                {-f,--file=-}'[Dockerfile to use]:Dockerfile:_files' \
                 '--force-rm[Always remove intermediate containers]' \
                 '--force-rm[Always remove intermediate containers]' \
                 '--no-cache[Do not use cache when building the image]' \
                 '--no-cache[Do not use cache when building the image]' \
+                '--pull[Attempt to pull a newer version of the image]' \
                 {-q,--quiet}'[Suppress verbose build output]' \
                 {-q,--quiet}'[Suppress verbose build output]' \
                 '--rm[Remove intermediate containers after a successful build]' \
                 '--rm[Remove intermediate containers after a successful build]' \
                 {-t,--tag=-}'[Repository, name and tag to be applied]:repository:__docker_repositories_with_tags' \
                 {-t,--tag=-}'[Repository, name and tag to be applied]:repository:__docker_repositories_with_tags' \
@@ -209,7 +211,6 @@ __docker_subcommand () {
                 {-a,--author=-}'[Author]:author: ' \
                 {-a,--author=-}'[Author]:author: ' \
                 {-m,--message=-}'[Commit message]:message: ' \
                 {-m,--message=-}'[Commit message]:message: ' \
                 {-p,--pause}'[Pause container during commit]' \
                 {-p,--pause}'[Pause container during commit]' \
-                '--run=-[Configuration automatically applied when the image is run]:configuration: ' \
                 ':container:__docker_containers' \
                 ':container:__docker_containers' \
                 ':repository:__docker_repositories_with_tags'
                 ':repository:__docker_repositories_with_tags'
             ;;
             ;;
@@ -232,15 +233,28 @@ __docker_subcommand () {
             ;;
             ;;
         (events)
         (events)
             _arguments \
             _arguments \
+                '*'{-f,--filter=-}'[Filter values]:filter: ' \
                 '--since=-[Events created since this timestamp]:timestamp: ' \
                 '--since=-[Events created since this timestamp]:timestamp: ' \
                 '--until=-[Events created until this timestamp]:timestamp: '
                 '--until=-[Events created until this timestamp]:timestamp: '
             ;;
             ;;
         (exec)
         (exec)
+            local state ret
             _arguments \
             _arguments \
                 {-d,--detach}'[Detached mode: leave the container running in the background]' \
                 {-d,--detach}'[Detached mode: leave the container running in the background]' \
                 {-i,--interactive}'[Keep stdin open even if not attached]' \
                 {-i,--interactive}'[Keep stdin open even if not attached]' \
                 {-t,--tty}'[Allocate a pseudo-tty]' \
                 {-t,--tty}'[Allocate a pseudo-tty]' \
-                ':containers:__docker_runningcontainers'
+                ':containers:__docker_runningcontainers' \
+                '*::command:->anycommand' && ret=0
+
+            case $state in
+                (anycommand)
+                    shift 1 words
+                    (( CURRENT-- ))
+                    _normal
+                    ;;
+            esac
+
+            return ret
             ;;
             ;;
         (history)
         (history)
             _arguments \
             _arguments \
@@ -254,15 +268,8 @@ __docker_subcommand () {
                 '*'{-f,--filter=-}'[Filter values]:filter: ' \
                 '*'{-f,--filter=-}'[Filter values]:filter: ' \
                 '--no-trunc[Do not truncate output]' \
                 '--no-trunc[Do not truncate output]' \
                 {-q,--quiet}'[Only show numeric IDs]' \
                 {-q,--quiet}'[Only show numeric IDs]' \
-                '--tree[Output graph in tree format]' \
-                '--viz[Output graph in graphviz format]' \
                 ':repository:__docker_repositories'
                 ':repository:__docker_repositories'
             ;;
             ;;
-        (inspect)
-            _arguments \
-                {-f,--format=-}'[Format the output using the given go template]:template: ' \
-                '*:containers:__docker_containers'
-            ;;
         (import)
         (import)
             _arguments \
             _arguments \
                 ':URL:(- http:// file://)' \
                 ':URL:(- http:// file://)' \
@@ -270,15 +277,10 @@ __docker_subcommand () {
             ;;
             ;;
         (info)
         (info)
             ;;
             ;;
-        (import)
+        (inspect)
             _arguments \
             _arguments \
-                ':URL:(- http:// file://)' \
-                ':repository:__docker_repositories_with_tags'
-            ;;
-        (insert)
-            _arguments '1:containers:__docker_containers' \
-                       '2:URL:(http:// file://)' \
-                       '3:file:_files'
+                {-f,--format=-}'[Format the output using the given go template]:template: ' \
+                '*:containers:__docker_containers'
             ;;
             ;;
         (kill)
         (kill)
             _arguments \
             _arguments \
@@ -287,7 +289,7 @@ __docker_subcommand () {
             ;;
             ;;
         (load)
         (load)
             _arguments \
             _arguments \
-                {-i,--input=-}'[Read from tar archive file]:tar:_files'
+                {-i,--input=-}'[Read from tar archive file]:archive file:_files -g "*.((tar|TAR)(.gz|.GZ|.Z|.bz2|.lzma|.xz|)|(tbz|tgz|txz))(-.)"'
             ;;
             ;;
         (login)
         (login)
             _arguments \
             _arguments \
@@ -304,6 +306,7 @@ __docker_subcommand () {
             _arguments \
             _arguments \
                 {-f,--follow}'[Follow log output]' \
                 {-f,--follow}'[Follow log output]' \
                 {-t,--timestamps}'[Show timestamps]' \
                 {-t,--timestamps}'[Show timestamps]' \
+                '--tail=-[Output the last K lines]:lines:(1 10 20 50 all)' \
                 '*:containers:__docker_containers'
                 '*:containers:__docker_containers'
             ;;
             ;;
         (port)
         (port)
@@ -321,6 +324,10 @@ __docker_subcommand () {
                 {-i,--interactive}'[Attach container'"'"'s stding]' \
                 {-i,--interactive}'[Attach container'"'"'s stding]' \
                 '*:containers:__docker_stoppedcontainers'
                 '*:containers:__docker_stoppedcontainers'
             ;;
             ;;
+        (stats)
+            _arguments \
+                '*:containers:__docker_runningcontainers'
+            ;;
         (rm)
         (rm)
             _arguments \
             _arguments \
                 {-f,--force}'[Force removal]' \
                 {-f,--force}'[Force removal]' \
@@ -391,7 +398,7 @@ __docker_subcommand () {
                 '*--lxc-conf=-[Add custom lxc options]:lxc options: ' \
                 '*--lxc-conf=-[Add custom lxc options]:lxc options: ' \
                 '-m[Memory limit (in bytes)]:limit: ' \
                 '-m[Memory limit (in bytes)]:limit: ' \
                 '--name=-[Container name]:name: ' \
                 '--name=-[Container name]:name: ' \
-                '--net=-[Network mode]:network mode:(bridge none container: host)' \
+                '--net=-[Network mode]:network mode:(bridge none container host)' \
                 {-P,--publish-all}'[Publish all exposed ports]' \
                 {-P,--publish-all}'[Publish all exposed ports]' \
                 '*'{-p,--publish=-}'[Expose a container'"'"'s port to the host]:port:_ports' \
                 '*'{-p,--publish=-}'[Expose a container'"'"'s port to the host]:port:_ports' \
                 '--privileged[Give extended privileges to this container]' \
                 '--privileged[Give extended privileges to this container]' \
@@ -419,19 +426,33 @@ __docker_subcommand () {
             esac
             esac
 
 
             ;;
             ;;
-        (pull|search)
-            _arguments ':name:__docker_search'
+        (pull)
+            _arguments \
+                {-a,--all-tags}'[Download all tagged images]' \
+                ':name:__docker_search'
             ;;
             ;;
         (push)
         (push)
             _arguments ':images:__docker_images'
             _arguments ':images:__docker_images'
             ;;
             ;;
+        (rename)
+            _arguments \
+                ':old name:__docker_containers' \
+                ':new name: '
+            ;;
         (save)
         (save)
             _arguments \
             _arguments \
                 {-o,--output=-}'[Write to file]:file:_files' \
                 {-o,--output=-}'[Write to file]:file:_files' \
-                ':images:__docker_images'
+                '*:images:__docker_images'
+            ;;
+        (search)
+            _arguments \
+                '--automated[Only show automated builds]' \
+                '--no-trunc[Do not truncate output]' \
+                {-s,--stars=-}'[Only display with at least X stars]:stars:(0 10 100 1000)' \
+                ':term: '
             ;;
             ;;
         (wait)
         (wait)
-            _arguments ':containers:__docker_runningcontainers'
+            _arguments '*:containers:__docker_runningcontainers'
             ;;
             ;;
         (help)
         (help)
             _arguments ':subcommand:__docker_commands'
             _arguments ':subcommand:__docker_commands'

+ 104 - 0
contrib/download-frozen-image.sh

@@ -0,0 +1,104 @@
+#!/bin/bash
+set -e
+
+# hello-world                      latest              ef872312fe1b        3 months ago        910 B
+# hello-world                      latest              ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9   3 months ago        910 B
+
+# debian                           latest              f6fab3b798be        10 weeks ago        85.1 MB
+# debian                           latest              f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd   10 weeks ago        85.1 MB
+
+if ! command -v curl &> /dev/null; then
+	echo >&2 'error: "curl" not found!'
+	exit 1
+fi
+
+usage() {
+	echo "usage: $0 dir image[:tag][@image-id] ..."
+	echo "   ie: $0 /tmp/hello-world hello-world"
+	echo "       $0 /tmp/debian-jessie debian:jessie"
+	echo "       $0 /tmp/old-hello-world hello-world@ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9"
+	echo "       $0 /tmp/old-debian debian:latest@f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd"
+	[ -z "$1" ] || exit "$1"
+}
+
+dir="$1" # dir for building tar in
+shift || usage 1 >&2
+
+[ $# -gt 0 -a "$dir" ] || usage 2 >&2
+mkdir -p "$dir"
+
+# hacky workarounds for Bash 3 support (no associative arrays)
+images=()
+rm -f "$dir"/tags-*.tmp
+# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."'
+
+while [ $# -gt 0 ]; do
+	imageTag="$1"
+	shift
+	image="${imageTag%%[:@]*}"
+	tag="${imageTag#*:}"
+	imageId="${tag##*@}"
+	[ "$imageId" != "$tag" ] || imageId=
+	[ "$tag" != "$imageTag" ] || tag='latest'
+	tag="${tag%@*}"
+	
+	token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')"
+	
+	if [ -z "$imageId" ]; then
+		imageId="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/repositories/$image/tags/$tag")"
+		imageId="${imageId//\"/}"
+	fi
+	
+	ancestryJson="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/ancestry")"
+	if [ "${ancestryJson:0:1}" != '[' ]; then
+		echo >&2 "error: /v1/images/$imageId/ancestry returned something unexpected:"
+		echo >&2 "  $ancestryJson"
+		exit 1
+	fi
+	
+	IFS=','
+	ancestry=( ${ancestryJson//[\[\] \"]/} )
+	unset IFS
+	
+	if [ -s "$dir/tags-$image.tmp" ]; then
+		echo -n ', ' >> "$dir/tags-$image.tmp"
+	else
+		images=( "${images[@]}" "$image" )
+	fi
+	echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$image.tmp"
+	
+	echo "Downloading '$imageTag' (${#ancestry[@]} layers)..."
+	for imageId in "${ancestry[@]}"; do
+		mkdir -p "$dir/$imageId"
+		echo '1.0' > "$dir/$imageId/VERSION"
+		
+		curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/json" -o "$dir/$imageId/json"
+		
+		# TODO figure out why "-C -" doesn't work here
+		# "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume."
+		# "HTTP/1.1 416 Requested Range Not Satisfiable"
+		if [ -f "$dir/$imageId/layer.tar" ]; then
+			# TODO hackpatch for no -C support :'(
+			echo "skipping existing ${imageId:0:12}"
+			continue
+		fi
+		curl -SL --progress -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/layer" -o "$dir/$imageId/layer.tar" # -C -
+	done
+	echo
+done
+
+echo -n '{' > "$dir/repositories"
+firstImage=1
+for image in "${images[@]}"; do
+	[ "$firstImage" ] || echo -n ',' >> "$dir/repositories"
+	firstImage=
+	echo -n $'\n\t' >> "$dir/repositories"
+	echo -n '"'"$image"'": { '"$(cat "$dir/tags-$image.tmp")"' }' >> "$dir/repositories"
+done
+echo -n $'\n}\n' >> "$dir/repositories"
+
+rm -f "$dir"/tags-*.tmp
+
+echo "Download of images into '$dir' complete."
+echo "Use something like the following to load the result into a Docker daemon:"
+echo "  tar -cC '$dir' . | docker load"

+ 4 - 0
contrib/httpserver/Dockerfile

@@ -0,0 +1,4 @@
+FROM busybox
+EXPOSE 80/tcp
+COPY httpserver .
+CMD ["./httpserver"]

+ 12 - 0
contrib/httpserver/server.go

@@ -0,0 +1,12 @@
+package main
+
+import (
+	"log"
+	"net/http"
+)
+
+func main() {
+	fs := http.FileServer(http.Dir("/static"))
+	http.Handle("/", fs)
+	log.Panic(http.ListenAndServe(":80", nil))
+}

+ 0 - 0
contrib/init/systemd/MAINTAINERS → contrib/init/systemd/REVIEWERS


+ 1 - 0
contrib/init/systemd/docker.service

@@ -9,6 +9,7 @@ ExecStart=/usr/bin/docker -d -H fd://
 MountFlags=slave
 MountFlags=slave
 LimitNOFILE=1048576
 LimitNOFILE=1048576
 LimitNPROC=1048576
 LimitNPROC=1048576
+LimitCORE=infinity
 
 
 [Install]
 [Install]
 WantedBy=multi-user.target
 WantedBy=multi-user.target

+ 9 - 0
contrib/init/sysvinit-redhat/docker

@@ -43,6 +43,8 @@ prestart() {
 start() {
 start() {
     [ -x $exec ] || exit 5
     [ -x $exec ] || exit 5
 
 
+    check_for_cleanup
+
     if ! [ -f $pidfile ]; then
     if ! [ -f $pidfile ]; then
         prestart
         prestart
         printf "Starting $prog:\t"
         printf "Starting $prog:\t"
@@ -97,6 +99,13 @@ rh_status_q() {
     rh_status >/dev/null 2>&1
     rh_status >/dev/null 2>&1
 }
 }
 
 
+
+check_for_cleanup() {
+    if [ -f ${pidfile} ]; then
+        /bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile}
+    fi
+}
+
 case "$1" in
 case "$1" in
     start)
     start)
         rh_status_q && exit 0
         rh_status_q && exit 0

+ 0 - 0
contrib/init/upstart/MAINTAINERS → contrib/init/upstart/REVIEWERS


+ 1 - 1
contrib/mkimage-alpine.sh

@@ -72,7 +72,7 @@ REL=${REL:-edge}
 MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine}
 MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine}
 SAVE=${SAVE:-0}
 SAVE=${SAVE:-0}
 REPO=$MIRROR/$REL/main
 REPO=$MIRROR/$REL/main
-ARCH=$(uname -m)
+ARCH=${ARCH:-$(uname -m)}
 
 
 tmp
 tmp
 getapk
 getapk

+ 1 - 1
contrib/mkimage-arch.sh

@@ -87,5 +87,5 @@ mknod -m 666 $DEV/ptmx c 5 2
 ln -sf /proc/self/fd $DEV/fd
 ln -sf /proc/self/fd $DEV/fd
 
 
 tar --numeric-owner --xattrs --acls -C $ROOTFS -c . | docker import - archlinux
 tar --numeric-owner --xattrs --acls -C $ROOTFS -c . | docker import - archlinux
-docker run -i -t archlinux echo Success.
+docker run -t archlinux echo Success.
 rm -rf $ROOTFS
 rm -rf $ROOTFS

+ 13 - 3
contrib/mkimage-yum.sh

@@ -57,6 +57,12 @@ mknod -m 666 "$target"/dev/tty0 c 4 0
 mknod -m 666 "$target"/dev/urandom c 1 9
 mknod -m 666 "$target"/dev/urandom c 1 9
 mknod -m 666 "$target"/dev/zero c 1 5
 mknod -m 666 "$target"/dev/zero c 1 5
 
 
+# amazon linux yum will fail without vars set
+if [ -d /etc/yum/vars ]; then
+	mkdir -p -m 755 "$target"/etc/yum
+	cp -a /etc/yum/vars "$target"/etc/yum/
+fi
+
 yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \
 yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \
     --setopt=group_package_types=mandatory -y groupinstall Core
     --setopt=group_package_types=mandatory -y groupinstall Core
 yum -c "$yum_config" --installroot="$target" -y clean all
 yum -c "$yum_config" --installroot="$target" -y clean all
@@ -83,9 +89,13 @@ rm -rf "$target"/etc/ld.so.cache
 rm -rf "$target"/var/cache/ldconfig/*
 rm -rf "$target"/var/cache/ldconfig/*
 
 
 version=
 version=
-if [ -r "$target"/etc/redhat-release ]; then
-    version="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' "$target"/etc/redhat-release)"
-fi
+for file in "$target"/etc/{redhat,system}-release
+do
+    if [ -r "$file" ]; then
+        version="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' "$file")"
+        break
+    fi
+done
 
 
 if [ -z "$version" ]; then
 if [ -z "$version" ]; then
     echo >&2 "warning: cannot autodetect OS version, using '$name' as tag"
     echo >&2 "warning: cannot autodetect OS version, using '$name' as tag"

+ 57 - 26
contrib/mkimage/debootstrap

@@ -15,6 +15,16 @@ done
 suite="$1"
 suite="$1"
 shift
 shift
 
 
+# get path to "chroot" in our current PATH
+chrootPath="$(type -P chroot)"
+rootfs_chroot() {
+	# "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately!
+	
+	# set PATH and chroot away!
+	PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \
+		"$chrootPath" "$rootfsDir" "$@"
+}
+
 # allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ...
 # allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ...
 : ${DEBOOTSTRAP:=debootstrap}
 : ${DEBOOTSTRAP:=debootstrap}
 
 
@@ -28,26 +38,26 @@ shift
 # prevent init scripts from running during install/update
 # prevent init scripts from running during install/update
 echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'"
 echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'"
 cat > "$rootfsDir/usr/sbin/policy-rc.d" <<'EOF'
 cat > "$rootfsDir/usr/sbin/policy-rc.d" <<'EOF'
-#!/bin/sh
+	#!/bin/sh
 
 
-# For most Docker users, "apt-get install" only happens during "docker build",
-# where starting services doesn't work and often fails in humorous ways. This
-# prevents those failures by stopping the services from attempting to start.
+	# For most Docker users, "apt-get install" only happens during "docker build",
+	# where starting services doesn't work and often fails in humorous ways. This
+	# prevents those failures by stopping the services from attempting to start.
 
 
-exit 101
+	exit 101
 EOF
 EOF
 chmod +x "$rootfsDir/usr/sbin/policy-rc.d"
 chmod +x "$rootfsDir/usr/sbin/policy-rc.d"
 
 
 # prevent upstart scripts from running during install/update
 # prevent upstart scripts from running during install/update
 (
 (
 	set -x
 	set -x
-	chroot "$rootfsDir" dpkg-divert --local --rename --add /sbin/initctl
+	rootfs_chroot dpkg-divert --local --rename --add /sbin/initctl
 	cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl"
 	cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl"
 	sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl"
 	sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl"
 )
 )
 
 
 # shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB)
 # shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB)
-( set -x; chroot "$rootfsDir" apt-get clean )
+( set -x; rootfs_chroot apt-get clean )
 
 
 # this file is one APT creates to make sure we don't "autoremove" our currently
 # this file is one APT creates to make sure we don't "autoremove" our currently
 # in-use kernel, which doesn't really apply to debootstraps/Docker images that
 # in-use kernel, which doesn't really apply to debootstraps/Docker images that
@@ -59,12 +69,12 @@ if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then
 	# force dpkg not to call sync() after package extraction (speeding up installs)
 	# force dpkg not to call sync() after package extraction (speeding up installs)
 	echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'"
 	echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'"
 	cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF'
 	cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF'
-	# For most Docker users, package installs happen during "docker build", which
-	# doesn't survive power loss and gets restarted clean afterwards anyhow, so
-	# this minor tweak gives us a nice speedup (much nicer on spinning disks,
-	# obviously).
+		# For most Docker users, package installs happen during "docker build", which
+		# doesn't survive power loss and gets restarted clean afterwards anyhow, so
+		# this minor tweak gives us a nice speedup (much nicer on spinning disks,
+		# obviously).
 
 
-	force-unsafe-io
+		force-unsafe-io
 	EOF
 	EOF
 fi
 fi
 
 
@@ -97,26 +107,47 @@ if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then
 	# remove apt-cache translations for fast "apt-get update"
 	# remove apt-cache translations for fast "apt-get update"
 	echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'"
 	echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'"
 	cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF'
 	cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF'
-	# In Docker, we don't often need the "Translations" files, so we're just wasting
-	# time and space by downloading them, and this inhibits that.  For users that do
-	# need them, it's a simple matter to delete this file and "apt-get update". :)
+		# In Docker, we don't often need the "Translations" files, so we're just wasting
+		# time and space by downloading them, and this inhibits that.  For users that do
+		# need them, it's a simple matter to delete this file and "apt-get update". :)
 
 
-	Acquire::Languages "none";
+		Acquire::Languages "none";
 	EOF
 	EOF
 
 
 	echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'"
 	echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'"
 	cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF'
 	cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF'
-	# Since Docker users using "RUN apt-get update && apt-get install -y ..." in
-	# their Dockerfiles don't go delete the lists files afterwards, we want them to
-	# be as small as possible on-disk, so we explicitly request "gz" versions and
-	# tell Apt to keep them gzipped on-disk.
+		# Since Docker users using "RUN apt-get update && apt-get install -y ..." in
+		# their Dockerfiles don't go delete the lists files afterwards, we want them to
+		# be as small as possible on-disk, so we explicitly request "gz" versions and
+		# tell Apt to keep them gzipped on-disk.
 
 
-	# For comparison, an "apt-get update" layer without this on a pristine
-	# "debian:wheezy" base image was "29.88 MB", where with this it was only
-	# "8.273 MB".
+		# For comparison, an "apt-get update" layer without this on a pristine
+		# "debian:wheezy" base image was "29.88 MB", where with this it was only
+		# "8.273 MB".
+
+		Acquire::GzipIndexes "true";
+		Acquire::CompressionTypes::Order:: "gz";
+	EOF
 
 
-	Acquire::GzipIndexes "true";
-	Acquire::CompressionTypes::Order:: "gz";
+	# update "autoremove" configuration to be aggressive about removing suggests deps that weren't manually installed
+	echo >&2 "+ echo Apt::AutoRemove::SuggestsImportant 'false' > '$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests'"
+	cat > "$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests" <<-'EOF'
+		# Since Docker users are looking for the smallest possible final images, the
+		# following emerges as a very common pattern:
+
+		#   RUN apt-get update \
+		#       && apt-get install -y <packages> \
+		#       && <do some compilation work> \
+		#       && apt-get purge -y --auto-remove <packages>
+
+		# By default, APT will actually _keep_ packages installed via Recommends or
+		# Depends if another package Suggests them, even and including if the package
+		# that originally caused them to be installed is removed.  Setting this to
+		# "false" ensures that APT is appropriately aggressive about removing the
+		# packages it added.
+
+		# https://aptitude.alioth.debian.org/doc/en/ch02s05s05.html#configApt-AutoRemove-SuggestsImportant
+		Apt::AutoRemove::SuggestsImportant "false";
 	EOF
 	EOF
 fi
 fi
 
 
@@ -191,7 +222,7 @@ fi
 	set -x
 	set -x
 	
 	
 	# make sure we're fully up-to-date
 	# make sure we're fully up-to-date
-	chroot "$rootfsDir" bash -c 'apt-get update && apt-get dist-upgrade -y'
+	rootfs_chroot sh -xc 'apt-get update && apt-get dist-upgrade -y'
 	
 	
 	# delete all the apt list files since they're big and get stale quickly
 	# delete all the apt list files since they're big and get stale quickly
 	rm -rf "$rootfsDir/var/lib/apt/lists"/*
 	rm -rf "$rootfsDir/var/lib/apt/lists"/*

+ 0 - 0
project/stats.sh → contrib/project-stats.sh


+ 0 - 0
project/report-issue.sh → contrib/report-issue.sh


+ 1 - 0
contrib/syntax/kate/Dockerfile.xml

@@ -22,6 +22,7 @@
       <item> CMD </item>
       <item> CMD </item>
       <item> WORKDIR </item>
       <item> WORKDIR </item>
       <item> USER </item>
       <item> USER </item>
+      <item> LABEL </item>
     </list>
     </list>
 
 
     <contexts>
     <contexts>

+ 1 - 1
contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage

@@ -12,7 +12,7 @@
 	<array>
 	<array>
 		<dict>
 		<dict>
 			<key>match</key>
 			<key>match</key>
-			<string>^\s*(ONBUILD\s+)?(FROM|MAINTAINER|RUN|EXPOSE|ENV|ADD|VOLUME|USER|WORKDIR|COPY)\s</string>
+			<string>^\s*(ONBUILD\s+)?(FROM|MAINTAINER|RUN|EXPOSE|ENV|ADD|VOLUME|USER|LABEL|WORKDIR|COPY)\s</string>
 			<key>captures</key>
 			<key>captures</key>
 			<dict>
 			<dict>
 				<key>0</key>
 				<key>0</key>

+ 0 - 0
contrib/syntax/textmate/MAINTAINERS → contrib/syntax/textmate/REVIEWERS


+ 1 - 1
contrib/syntax/vim/syntax/dockerfile.vim

@@ -11,7 +11,7 @@ let b:current_syntax = "dockerfile"
 
 
 syntax case ignore
 syntax case ignore
 
 
-syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|VOLUME|WORKDIR|COPY)\s/
+syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|LABEL|VOLUME|WORKDIR|COPY)\s/
 highlight link dockerfileKeyword Keyword
 highlight link dockerfileKeyword Keyword
 
 
 syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/
 syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/

+ 0 - 7
daemon/MAINTAINERS

@@ -1,7 +0,0 @@
-Solomon Hykes <solomon@docker.com> (@shykes)
-Victor Vieux <vieux@docker.com> (@vieux)
-Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
-Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
-Tibor Vass <teabee89@gmail.com> (@tiborvass)
-Vishnu Kannan <vishnuk@google.com> (@vishh)
-volumes.go: Brian Goff <cpuguy83@gmail.com> (@cpuguy83)

+ 5 - 5
daemon/attach.go

@@ -28,9 +28,9 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
 		stderr = job.GetenvBool("stderr")
 		stderr = job.GetenvBool("stderr")
 	)
 	)
 
 
-	container := daemon.Get(name)
-	if container == nil {
-		return job.Errorf("No such container: %s", name)
+	container, err := daemon.Get(name)
+	if err != nil {
+		return job.Error(err)
 	}
 	}
 
 
 	//logs
 	//logs
@@ -101,7 +101,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
 			cStderr = job.Stderr
 			cStderr = job.Stderr
 		}
 		}
 
 
-		<-daemon.attach(&container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdout, cStderr)
+		<-daemon.Attach(&container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdout, cStderr)
 		// If we are in stdinonce mode, wait for the process to end
 		// If we are in stdinonce mode, wait for the process to end
 		// otherwise, simply return
 		// otherwise, simply return
 		if container.Config.StdinOnce && !container.Config.Tty {
 		if container.Config.StdinOnce && !container.Config.Tty {
@@ -111,7 +111,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
 	return engine.StatusOK
 	return engine.StatusOK
 }
 }
 
 
-func (daemon *Daemon) attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
+func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
 	var (
 	var (
 		cStdout, cStderr io.ReadCloser
 		cStdout, cStderr io.ReadCloser
 		cStdin           io.WriteCloser
 		cStdin           io.WriteCloser

+ 21 - 16
daemon/changes.go

@@ -9,24 +9,29 @@ func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status {
 		return job.Errorf("Usage: %s CONTAINER", job.Name)
 		return job.Errorf("Usage: %s CONTAINER", job.Name)
 	}
 	}
 	name := job.Args[0]
 	name := job.Args[0]
-	if container := daemon.Get(name); container != nil {
-		outs := engine.NewTable("", 0)
-		changes, err := container.Changes()
-		if err != nil {
-			return job.Error(err)
-		}
-		for _, change := range changes {
-			out := &engine.Env{}
-			if err := out.Import(change); err != nil {
-				return job.Error(err)
-			}
-			outs.Add(out)
-		}
-		if _, err := outs.WriteListTo(job.Stdout); err != nil {
+
+	container, error := daemon.Get(name)
+	if error != nil {
+		return job.Error(error)
+	}
+
+	outs := engine.NewTable("", 0)
+	changes, err := container.Changes()
+	if err != nil {
+		return job.Error(err)
+	}
+
+	for _, change := range changes {
+		out := &engine.Env{}
+		if err := out.Import(change); err != nil {
 			return job.Error(err)
 			return job.Error(err)
 		}
 		}
-	} else {
-		return job.Errorf("No such container: %s", name)
+		outs.Add(out)
 	}
 	}
+
+	if _, err := outs.WriteListTo(job.Stdout); err != nil {
+		return job.Error(err)
+	}
+
 	return engine.StatusOK
 	return engine.StatusOK
 }
 }

+ 20 - 7
daemon/commit.go

@@ -1,6 +1,9 @@
 package daemon
 package daemon
 
 
 import (
 import (
+	"bytes"
+	"encoding/json"
+
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
@@ -12,17 +15,27 @@ func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status {
 	}
 	}
 	name := job.Args[0]
 	name := job.Args[0]
 
 
-	container := daemon.Get(name)
-	if container == nil {
-		return job.Errorf("No such container: %s", name)
+	container, err := daemon.Get(name)
+	if err != nil {
+		return job.Error(err)
 	}
 	}
 
 
 	var (
 	var (
-		config    = container.Config
-		newConfig runconfig.Config
+		config       = container.Config
+		stdoutBuffer = bytes.NewBuffer(nil)
+		newConfig    runconfig.Config
 	)
 	)
 
 
-	if err := job.GetenvJson("config", &newConfig); err != nil {
+	buildConfigJob := daemon.eng.Job("build_config")
+	buildConfigJob.Stdout.Add(stdoutBuffer)
+	buildConfigJob.Setenv("changes", job.Getenv("changes"))
+	// FIXME this should be remove when we remove deprecated config param
+	buildConfigJob.Setenv("config", job.Getenv("config"))
+
+	if err := buildConfigJob.Run(); err != nil {
+		return job.Error(err)
+	}
+	if err := json.NewDecoder(stdoutBuffer).Decode(&newConfig); err != nil {
 		return job.Error(err)
 		return job.Error(err)
 	}
 	}
 
 
@@ -41,7 +54,7 @@ func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status {
 // Commit creates a new filesystem image from the current state of a container.
 // Commit creates a new filesystem image from the current state of a container.
 // The image can optionally be tagged into a repository
 // The image can optionally be tagged into a repository
 func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, pause bool, config *runconfig.Config) (*image.Image, error) {
 func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, pause bool, config *runconfig.Config) (*image.Image, error) {
-	if pause {
+	if pause && !container.IsPaused() {
 		container.Pause()
 		container.Pause()
 		defer container.Unpause()
 		defer container.Unpause()
 	}
 	}

+ 30 - 17
daemon/config.go

@@ -6,6 +6,8 @@ import (
 	"github.com/docker/docker/daemon/networkdriver"
 	"github.com/docker/docker/daemon/networkdriver"
 	"github.com/docker/docker/opts"
 	"github.com/docker/docker/opts"
 	flag "github.com/docker/docker/pkg/mflag"
 	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/ulimit"
+	"github.com/docker/docker/runconfig"
 )
 )
 
 
 const (
 const (
@@ -37,11 +39,16 @@ type Config struct {
 	GraphOptions                []string
 	GraphOptions                []string
 	ExecDriver                  string
 	ExecDriver                  string
 	Mtu                         int
 	Mtu                         int
+	SocketGroup                 string
+	EnableCors                  bool
+	CorsHeaders                 string
 	DisableNetwork              bool
 	DisableNetwork              bool
 	EnableSelinuxSupport        bool
 	EnableSelinuxSupport        bool
 	Context                     map[string][]string
 	Context                     map[string][]string
 	TrustKeyPath                string
 	TrustKeyPath                string
 	Labels                      []string
 	Labels                      []string
+	Ulimits                     map[string]*ulimit.Ulimit
+	LogConfig                   runconfig.LogConfig
 }
 }
 
 
 // InstallFlags adds command-line options to the top-level flag parser for
 // InstallFlags adds command-line options to the top-level flag parser for
@@ -50,27 +57,33 @@ type Config struct {
 // from the command-line.
 // from the command-line.
 func (config *Config) InstallFlags() {
 func (config *Config) InstallFlags() {
 	flag.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file")
 	flag.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file")
-	flag.StringVar(&config.Root, []string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the Docker runtime")
+	flag.StringVar(&config.Root, []string{"g", "-graph"}, "/var/lib/docker", "Root of the Docker runtime")
 	flag.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run")
 	flag.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run")
-	flag.BoolVar(&config.EnableIptables, []string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules")
-	flag.BoolVar(&config.EnableIpForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward and IPv6 forwarding if --fixed-cidr-v6 is defined. IPv6 forwarding may interfere with your existing IPv6 configuration when using Router Advertisement.")
-	flag.BoolVar(&config.EnableIpMasq, []string{"-ip-masq"}, true, "Enable IP masquerading for bridge's IP range")
+	flag.BoolVar(&config.EnableIptables, []string{"#iptables", "-iptables"}, true, "Enable addition of iptables rules")
+	flag.BoolVar(&config.EnableIpForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward")
+	flag.BoolVar(&config.EnableIpMasq, []string{"-ip-masq"}, true, "Enable IP masquerading")
 	flag.BoolVar(&config.EnableIPv6, []string{"-ipv6"}, false, "Enable IPv6 networking")
 	flag.BoolVar(&config.EnableIPv6, []string{"-ipv6"}, false, "Enable IPv6 networking")
-	flag.StringVar(&config.BridgeIP, []string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b")
-	flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking")
-	flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)\nthis subnet must be nested in the bridge subnet (which is defined by -b or --bip)")
-	flag.StringVar(&config.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", "IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)")
-	flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Allow unrestricted inter-container and Docker daemon host communication")
-	flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver")
-	flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver")
-	flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support. SELinux does not presently support the BTRFS storage driver")
-	flag.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, "Set the containers network MTU\nif no value is provided: default to the default route MTU or 1500 if no default route is available")
-	opts.IPVar(&config.DefaultIp, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports")
+	flag.StringVar(&config.BridgeIP, []string{"#bip", "-bip"}, "", "Specify network bridge IP")
+	flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a network bridge")
+	flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs")
+	flag.StringVar(&config.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", "IPv6 subnet for fixed IPs")
+	flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication")
+	flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Storage driver to use")
+	flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Exec driver to use")
+	flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support")
+	flag.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, "Set the containers network MTU")
+	flag.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", "Group for the unix socket")
+	flag.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, "Enable CORS headers in the remote API, this is deprecated by --api-cors-header")
+	flag.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", "Set CORS headers in the remote API")
+	opts.IPVar(&config.DefaultIp, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP when binding container ports")
 	opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options")
 	opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options")
 	// FIXME: why the inconsistency between "hosts" and "sockets"?
 	// FIXME: why the inconsistency between "hosts" and "sockets"?
-	opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers")
-	opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains")
-	opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon (displayed in `docker info`)")
+	opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "DNS server to use")
+	opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "DNS search domains to use")
+	opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon")
+	config.Ulimits = make(map[string]*ulimit.Ulimit)
+	opts.UlimitMapVar(config.Ulimits, []string{"-default-ulimit"}, "Set default ulimits for containers")
+	flag.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", "Containers logging driver(json-file/none)")
 }
 }
 
 
 func getDefaultNetworkMtu() int {
 func getDefaultNetworkMtu() int {

+ 129 - 42
daemon/container.go

@@ -14,22 +14,29 @@ import (
 	"syscall"
 	"syscall"
 	"time"
 	"time"
 
 
+	"github.com/docker/libcontainer"
+	"github.com/docker/libcontainer/configs"
 	"github.com/docker/libcontainer/devices"
 	"github.com/docker/libcontainer/devices"
 	"github.com/docker/libcontainer/label"
 	"github.com/docker/libcontainer/label"
 
 
 	log "github.com/Sirupsen/logrus"
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/daemon/logger"
+	"github.com/docker/docker/daemon/logger/jsonfilelog"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/links"
 	"github.com/docker/docker/links"
 	"github.com/docker/docker/nat"
 	"github.com/docker/docker/nat"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/broadcastwriter"
 	"github.com/docker/docker/pkg/broadcastwriter"
+	"github.com/docker/docker/pkg/common"
+	"github.com/docker/docker/pkg/directory"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/networkfs/etchosts"
 	"github.com/docker/docker/pkg/networkfs/etchosts"
 	"github.com/docker/docker/pkg/networkfs/resolvconf"
 	"github.com/docker/docker/pkg/networkfs/resolvconf"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/symlink"
+	"github.com/docker/docker/pkg/ulimit"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
 )
 )
@@ -70,6 +77,7 @@ type Container struct {
 	ResolvConfPath string
 	ResolvConfPath string
 	HostnamePath   string
 	HostnamePath   string
 	HostsPath      string
 	HostsPath      string
+	LogPath        string
 	Name           string
 	Name           string
 	Driver         string
 	Driver         string
 	ExecDriver     string
 	ExecDriver     string
@@ -92,9 +100,12 @@ type Container struct {
 	VolumesRW  map[string]bool
 	VolumesRW  map[string]bool
 	hostConfig *runconfig.HostConfig
 	hostConfig *runconfig.HostConfig
 
 
-	activeLinks        map[string]*links.Link
-	monitor            *containerMonitor
-	execCommands       *execStore
+	activeLinks  map[string]*links.Link
+	monitor      *containerMonitor
+	execCommands *execStore
+	// logDriver for closing
+	logDriver          logger.Logger
+	logCopier          *logger.Copier
 	AppliedVolumesFrom map[string]struct{}
 	AppliedVolumesFrom map[string]struct{}
 }
 }
 
 
@@ -255,18 +266,18 @@ func populateCommand(c *Container, env []string) error {
 	pid.HostPid = c.hostConfig.PidMode.IsHost()
 	pid.HostPid = c.hostConfig.PidMode.IsHost()
 
 
 	// Build lists of devices allowed and created within the container.
 	// Build lists of devices allowed and created within the container.
-	userSpecifiedDevices := make([]*devices.Device, len(c.hostConfig.Devices))
+	userSpecifiedDevices := make([]*configs.Device, len(c.hostConfig.Devices))
 	for i, deviceMapping := range c.hostConfig.Devices {
 	for i, deviceMapping := range c.hostConfig.Devices {
-		device, err := devices.GetDevice(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions)
+		device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions)
 		if err != nil {
 		if err != nil {
 			return fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err)
 			return fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err)
 		}
 		}
 		device.Path = deviceMapping.PathInContainer
 		device.Path = deviceMapping.PathInContainer
 		userSpecifiedDevices[i] = device
 		userSpecifiedDevices[i] = device
 	}
 	}
-	allowedDevices := append(devices.DefaultAllowedDevices, userSpecifiedDevices...)
+	allowedDevices := append(configs.DefaultAllowedDevices, userSpecifiedDevices...)
 
 
-	autoCreatedDevices := append(devices.DefaultAutoCreatedDevices, userSpecifiedDevices...)
+	autoCreatedDevices := append(configs.DefaultAutoCreatedDevices, userSpecifiedDevices...)
 
 
 	// TODO: this can be removed after lxc-conf is fully deprecated
 	// TODO: this can be removed after lxc-conf is fully deprecated
 	lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig)
 	lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig)
@@ -274,11 +285,34 @@ func populateCommand(c *Container, env []string) error {
 		return err
 		return err
 	}
 	}
 
 
+	var rlimits []*ulimit.Rlimit
+	ulimits := c.hostConfig.Ulimits
+
+	// Merge ulimits with daemon defaults
+	ulIdx := make(map[string]*ulimit.Ulimit)
+	for _, ul := range ulimits {
+		ulIdx[ul.Name] = ul
+	}
+	for name, ul := range c.daemon.config.Ulimits {
+		if _, exists := ulIdx[name]; !exists {
+			ulimits = append(ulimits, ul)
+		}
+	}
+
+	for _, limit := range ulimits {
+		rl, err := limit.GetRlimit()
+		if err != nil {
+			return err
+		}
+		rlimits = append(rlimits, rl)
+	}
+
 	resources := &execdriver.Resources{
 	resources := &execdriver.Resources{
-		Memory:     c.Config.Memory,
-		MemorySwap: c.Config.MemorySwap,
-		CpuShares:  c.Config.CpuShares,
-		Cpuset:     c.Config.Cpuset,
+		Memory:     c.hostConfig.Memory,
+		MemorySwap: c.hostConfig.MemorySwap,
+		CpuShares:  c.hostConfig.CpuShares,
+		CpusetCpus: c.hostConfig.CpusetCpus,
+		Rlimits:    rlimits,
 	}
 	}
 
 
 	processConfig := execdriver.ProcessConfig{
 	processConfig := execdriver.ProcessConfig{
@@ -311,6 +345,7 @@ func populateCommand(c *Container, env []string) error {
 		MountLabel:         c.GetMountLabel(),
 		MountLabel:         c.GetMountLabel(),
 		LxcConfig:          lxcConfig,
 		LxcConfig:          lxcConfig,
 		AppArmorProfile:    c.AppArmorProfile,
 		AppArmorProfile:    c.AppArmorProfile,
+		CgroupParent:       c.hostConfig.CgroupParent,
 	}
 	}
 
 
 	return nil
 	return nil
@@ -324,7 +359,7 @@ func (container *Container) Start() (err error) {
 		return nil
 		return nil
 	}
 	}
 
 
-	// if we encounter and error during start we need to ensure that any other
+	// if we encounter an error during start we need to ensure that any other
 	// setup has been cleaned up properly
 	// setup has been cleaned up properly
 	defer func() {
 	defer func() {
 		if err != nil {
 		if err != nil {
@@ -457,11 +492,18 @@ func (container *Container) buildHostsFiles(IP string) error {
 
 
 	for linkAlias, child := range children {
 	for linkAlias, child := range children {
 		_, alias := path.Split(linkAlias)
 		_, alias := path.Split(linkAlias)
-		extraContent = append(extraContent, etchosts.Record{Hosts: alias, IP: child.NetworkSettings.IPAddress})
+		// allow access to the linked container via the alias, real name, and container hostname
+		aliasList := alias + " " + child.Config.Hostname
+		// only add the name if alias isn't equal to the name
+		if alias != child.Name[1:] {
+			aliasList = aliasList + " " + child.Name[1:]
+		}
+		extraContent = append(extraContent, etchosts.Record{Hosts: aliasList, IP: child.NetworkSettings.IPAddress})
 	}
 	}
 
 
 	for _, extraHost := range container.hostConfig.ExtraHosts {
 	for _, extraHost := range container.hostConfig.ExtraHosts {
-		parts := strings.Split(extraHost, ":")
+		// allow IPv6 addresses in extra hosts; only split on first ":"
+		parts := strings.SplitN(extraHost, ":", 2)
 		extraContent = append(extraContent, etchosts.Record{Hosts: parts[0], IP: parts[1]})
 		extraContent = append(extraContent, etchosts.Record{Hosts: parts[0], IP: parts[1]})
 	}
 	}
 
 
@@ -652,6 +694,16 @@ func (container *Container) KillSig(sig int) error {
 	return container.daemon.Kill(container, sig)
 	return container.daemon.Kill(container, sig)
 }
 }
 
 
+// Wrapper aroung KillSig() suppressing "no such process" error.
+func (container *Container) killPossiblyDeadProcess(sig int) error {
+	err := container.KillSig(sig)
+	if err == syscall.ESRCH {
+		log.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPid(), sig)
+		return nil
+	}
+	return err
+}
+
 func (container *Container) Pause() error {
 func (container *Container) Pause() error {
 	if container.IsPaused() {
 	if container.IsPaused() {
 		return fmt.Errorf("Container %s is already paused", container.ID)
 		return fmt.Errorf("Container %s is already paused", container.ID)
@@ -678,7 +730,7 @@ func (container *Container) Kill() error {
 	}
 	}
 
 
 	// 1. Send SIGKILL
 	// 1. Send SIGKILL
-	if err := container.KillSig(9); err != nil {
+	if err := container.killPossiblyDeadProcess(9); err != nil {
 		return err
 		return err
 	}
 	}
 
 
@@ -686,9 +738,12 @@ func (container *Container) Kill() error {
 	if _, err := container.WaitStop(10 * time.Second); err != nil {
 	if _, err := container.WaitStop(10 * time.Second); err != nil {
 		// Ensure that we don't kill ourselves
 		// Ensure that we don't kill ourselves
 		if pid := container.GetPid(); pid != 0 {
 		if pid := container.GetPid(); pid != 0 {
-			log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
+			log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", common.TruncateID(container.ID))
 			if err := syscall.Kill(pid, 9); err != nil {
 			if err := syscall.Kill(pid, 9); err != nil {
-				return err
+				if err != syscall.ESRCH {
+					return err
+				}
+				log.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid)
 			}
 			}
 		}
 		}
 	}
 	}
@@ -703,9 +758,9 @@ func (container *Container) Stop(seconds int) error {
 	}
 	}
 
 
 	// 1. Send a SIGTERM
 	// 1. Send a SIGTERM
-	if err := container.KillSig(15); err != nil {
+	if err := container.killPossiblyDeadProcess(15); err != nil {
 		log.Infof("Failed to send SIGTERM to the process, force killing")
 		log.Infof("Failed to send SIGTERM to the process, force killing")
-		if err := container.KillSig(9); err != nil {
+		if err := container.killPossiblyDeadProcess(9); err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}
@@ -848,7 +903,7 @@ func (container *Container) GetSize() (int64, int64) {
 	)
 	)
 
 
 	if err := container.Mount(); err != nil {
 	if err := container.Mount(); err != nil {
-		log.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err)
+		log.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
 		return sizeRw, sizeRootfs
 		return sizeRw, sizeRootfs
 	}
 	}
 	defer container.Unmount()
 	defer container.Unmount()
@@ -856,14 +911,14 @@ func (container *Container) GetSize() (int64, int64) {
 	initID := fmt.Sprintf("%s-init", container.ID)
 	initID := fmt.Sprintf("%s-init", container.ID)
 	sizeRw, err = driver.DiffSize(container.ID, initID)
 	sizeRw, err = driver.DiffSize(container.ID, initID)
 	if err != nil {
 	if err != nil {
-		log.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
+		log.Errorf("Driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
 		// FIXME: GetSize should return an error. Not changing it now in case
 		// FIXME: GetSize should return an error. Not changing it now in case
 		// there is a side-effect.
 		// there is a side-effect.
 		sizeRw = -1
 		sizeRw = -1
 	}
 	}
 
 
 	if _, err = os.Stat(container.basefs); err != nil {
 	if _, err = os.Stat(container.basefs); err != nil {
-		if sizeRootfs, err = utils.TreeSize(container.basefs); err != nil {
+		if sizeRootfs, err = directory.Size(container.basefs); err != nil {
 			sizeRootfs = -1
 			sizeRootfs = -1
 		}
 		}
 	}
 	}
@@ -925,7 +980,7 @@ func (container *Container) Exposes(p nat.Port) bool {
 	return exists
 	return exists
 }
 }
 
 
-func (container *Container) GetPtyMaster() (*os.File, error) {
+func (container *Container) GetPtyMaster() (libcontainer.Console, error) {
 	ttyConsole, ok := container.command.ProcessConfig.Terminal.(execdriver.TtyTerminal)
 	ttyConsole, ok := container.command.ProcessConfig.Terminal.(execdriver.TtyTerminal)
 	if !ok {
 	if !ok {
 		return nil, ErrNoTTY
 		return nil, ErrNoTTY
@@ -1113,7 +1168,12 @@ func (container *Container) updateParentsHosts() error {
 		if ref.ParentID == "0" {
 		if ref.ParentID == "0" {
 			continue
 			continue
 		}
 		}
-		c := container.daemon.Get(ref.ParentID)
+
+		c, err := container.daemon.Get(ref.ParentID)
+		if err != nil {
+			log.Error(err)
+		}
+
 		if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() {
 		if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() {
 			log.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress)
 			log.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress)
 			if err := etchosts.Update(c.HostsPath, container.NetworkSettings.IPAddress, ref.Name); err != nil {
 			if err := etchosts.Update(c.HostsPath, container.NetworkSettings.IPAddress, ref.Name); err != nil {
@@ -1163,6 +1223,7 @@ func (container *Container) initializeNetworking() error {
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
+		container.HostnamePath = nc.HostnamePath
 		container.HostsPath = nc.HostsPath
 		container.HostsPath = nc.HostsPath
 		container.ResolvConfPath = nc.ResolvConfPath
 		container.ResolvConfPath = nc.ResolvConfPath
 		container.Config.Hostname = nc.Config.Hostname
 		container.Config.Hostname = nc.Config.Hostname
@@ -1182,15 +1243,15 @@ func (container *Container) initializeNetworking() error {
 // Make sure the config is compatible with the current kernel
 // Make sure the config is compatible with the current kernel
 func (container *Container) verifyDaemonSettings() {
 func (container *Container) verifyDaemonSettings() {
 	if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit {
 	if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit {
-		log.Infof("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
+		log.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.")
 		container.Config.Memory = 0
 		container.Config.Memory = 0
 	}
 	}
 	if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit {
 	if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit {
-		log.Infof("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.")
+		log.Warnf("Your kernel does not support swap limit capabilities. Limitation discarded.")
 		container.Config.MemorySwap = -1
 		container.Config.MemorySwap = -1
 	}
 	}
 	if container.daemon.sysInfo.IPv4ForwardingDisabled {
 	if container.daemon.sysInfo.IPv4ForwardingDisabled {
-		log.Infof("WARNING: IPv4 forwarding is disabled. Networking will not work")
+		log.Warnf("IPv4 forwarding is disabled. Networking will not work")
 	}
 	}
 }
 }
 
 
@@ -1208,7 +1269,7 @@ func (container *Container) setupLinkedContainers() ([]string, error) {
 		container.activeLinks = make(map[string]*links.Link, len(children))
 		container.activeLinks = make(map[string]*links.Link, len(children))
 
 
 		// If we encounter an error make sure that we rollback any network
 		// If we encounter an error make sure that we rollback any network
-		// config and ip table changes
+		// config and iptables changes
 		rollback := func() {
 		rollback := func() {
 			for _, link := range container.activeLinks {
 			for _, link := range container.activeLinks {
 				link.Disable()
 				link.Disable()
@@ -1301,20 +1362,37 @@ func (container *Container) setupWorkingDirectory() error {
 	return nil
 	return nil
 }
 }
 
 
-func (container *Container) startLoggingToDisk() error {
-	// Setup logging of stdout and stderr to disk
-	pth, err := container.logPath("json")
-	if err != nil {
-		return err
+func (container *Container) startLogging() error {
+	cfg := container.hostConfig.LogConfig
+	if cfg.Type == "" {
+		cfg = container.daemon.defaultLogConfig
 	}
 	}
+	var l logger.Logger
+	switch cfg.Type {
+	case "json-file":
+		pth, err := container.logPath("json")
+		if err != nil {
+			return err
+		}
 
 
-	if err := container.daemon.LogToDisk(container.stdout, pth, "stdout"); err != nil {
-		return err
+		dl, err := jsonfilelog.New(pth)
+		if err != nil {
+			return err
+		}
+		l = dl
+	case "none":
+		return nil
+	default:
+		return fmt.Errorf("Unknown logging driver: %s", cfg.Type)
 	}
 	}
 
 
-	if err := container.daemon.LogToDisk(container.stderr, pth, "stderr"); err != nil {
+	copier, err := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l)
+	if err != nil {
 		return err
 		return err
 	}
 	}
+	container.logCopier = copier
+	copier.Run()
+	container.logDriver = l
 
 
 	return nil
 	return nil
 }
 }
@@ -1382,9 +1460,9 @@ func (container *Container) GetMountLabel() string {
 
 
 func (container *Container) getIpcContainer() (*Container, error) {
 func (container *Container) getIpcContainer() (*Container, error) {
 	containerID := container.hostConfig.IpcMode.Container()
 	containerID := container.hostConfig.IpcMode.Container()
-	c := container.daemon.Get(containerID)
-	if c == nil {
-		return nil, fmt.Errorf("no such container to join IPC: %s", containerID)
+	c, err := container.daemon.Get(containerID)
+	if err != nil {
+		return nil, err
 	}
 	}
 	if !c.IsRunning() {
 	if !c.IsRunning() {
 		return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID)
 		return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID)
@@ -1399,9 +1477,9 @@ func (container *Container) getNetworkedContainer() (*Container, error) {
 		if len(parts) != 2 {
 		if len(parts) != 2 {
 			return nil, fmt.Errorf("no container specified to join network")
 			return nil, fmt.Errorf("no container specified to join network")
 		}
 		}
-		nc := container.daemon.Get(parts[1])
-		if nc == nil {
-			return nil, fmt.Errorf("no such container to join network: %s", parts[1])
+		nc, err := container.daemon.Get(parts[1])
+		if err != nil {
+			return nil, err
 		}
 		}
 		if !nc.IsRunning() {
 		if !nc.IsRunning() {
 			return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1])
 			return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1])
@@ -1415,3 +1493,12 @@ func (container *Container) getNetworkedContainer() (*Container, error) {
 func (container *Container) Stats() (*execdriver.ResourceStats, error) {
 func (container *Container) Stats() (*execdriver.ResourceStats, error) {
 	return container.daemon.Stats(container)
 	return container.daemon.Stats(container)
 }
 }
+
+func (c *Container) LogDriverType() string {
+	c.Lock()
+	defer c.Unlock()
+	if c.hostConfig.LogConfig.Type == "" {
+		return c.daemon.defaultLogConfig.Type
+	}
+	return c.hostConfig.LogConfig.Type
+}

+ 12 - 11
daemon/copy.go

@@ -16,18 +16,19 @@ func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status {
 		resource = job.Args[1]
 		resource = job.Args[1]
 	)
 	)
 
 
-	if container := daemon.Get(name); container != nil {
+	container, err := daemon.Get(name)
+	if err != nil {
+		return job.Error(err)
+	}
 
 
-		data, err := container.Copy(resource)
-		if err != nil {
-			return job.Error(err)
-		}
-		defer data.Close()
+	data, err := container.Copy(resource)
+	if err != nil {
+		return job.Error(err)
+	}
+	defer data.Close()
 
 
-		if _, err := io.Copy(job.Stdout, data); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
+	if _, err := io.Copy(job.Stdout, data); err != nil {
+		return job.Error(err)
 	}
 	}
-	return job.Errorf("No such container: %s", name)
+	return engine.StatusOK
 }
 }

+ 22 - 17
daemon/create.go

@@ -2,6 +2,7 @@ package daemon
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"strings"
 
 
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/graph"
@@ -18,28 +19,29 @@ func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
 	} else if len(job.Args) > 1 {
 	} else if len(job.Args) > 1 {
 		return job.Errorf("Usage: %s", job.Name)
 		return job.Errorf("Usage: %s", job.Name)
 	}
 	}
+
 	config := runconfig.ContainerConfigFromJob(job)
 	config := runconfig.ContainerConfigFromJob(job)
-	if config.Memory != 0 && config.Memory < 4194304 {
+	hostConfig := runconfig.ContainerHostConfigFromJob(job)
+
+	if len(hostConfig.LxcConf) > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") {
+		return job.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name())
+	}
+	if hostConfig.Memory != 0 && hostConfig.Memory < 4194304 {
 		return job.Errorf("Minimum memory limit allowed is 4MB")
 		return job.Errorf("Minimum memory limit allowed is 4MB")
 	}
 	}
-	if config.Memory > 0 && !daemon.SystemConfig().MemoryLimit {
+	if hostConfig.Memory > 0 && !daemon.SystemConfig().MemoryLimit {
 		job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
 		job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
-		config.Memory = 0
+		hostConfig.Memory = 0
 	}
 	}
-	if config.Memory > 0 && !daemon.SystemConfig().SwapLimit {
+	if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit {
 		job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
 		job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
-		config.MemorySwap = -1
+		hostConfig.MemorySwap = -1
 	}
 	}
-	if config.Memory > 0 && config.MemorySwap > 0 && config.MemorySwap < config.Memory {
+	if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory {
 		return job.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.\n")
 		return job.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.\n")
 	}
 	}
-
-	var hostConfig *runconfig.HostConfig
-	if job.EnvExists("HostConfig") {
-		hostConfig = runconfig.ContainerHostConfigFromJob(job)
-	} else {
-		// Older versions of the API don't provide a HostConfig.
-		hostConfig = nil
+	if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 {
+		return job.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.\n")
 	}
 	}
 
 
 	container, buildWarnings, err := daemon.Create(config, hostConfig, name)
 	container, buildWarnings, err := daemon.Create(config, hostConfig, name)
@@ -91,7 +93,10 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
 	if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
 	if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
 		return nil, nil, err
 		return nil, nil, err
 	}
 	}
-	if hostConfig != nil && hostConfig.SecurityOpt == nil {
+	if hostConfig == nil {
+		hostConfig = &runconfig.HostConfig{}
+	}
+	if hostConfig.SecurityOpt == nil {
 		hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode)
 		hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode)
 		if err != nil {
 		if err != nil {
 			return nil, nil, err
 			return nil, nil, err
@@ -129,9 +134,9 @@ func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode, pidMode run
 		return label.DisableSecOpt(), nil
 		return label.DisableSecOpt(), nil
 	}
 	}
 	if ipcContainer := ipcMode.Container(); ipcContainer != "" {
 	if ipcContainer := ipcMode.Container(); ipcContainer != "" {
-		c := daemon.Get(ipcContainer)
-		if c == nil {
-			return nil, fmt.Errorf("no such container to join IPC: %s", ipcContainer)
+		c, err := daemon.Get(ipcContainer)
+		if err != nil {
+			return nil, err
 		}
 		}
 		if !c.IsRunning() {
 		if !c.IsRunning() {
 			return nil, fmt.Errorf("cannot join IPC of a non running container: %s", ipcContainer)
 			return nil, fmt.Errorf("cannot join IPC of a non running container: %s", ipcContainer)

+ 133 - 94
daemon/daemon.go

@@ -18,6 +18,7 @@ import (
 
 
 	log "github.com/Sirupsen/logrus"
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api"
+	"github.com/docker/docker/autogen/dockerversion"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver/execdrivers"
 	"github.com/docker/docker/daemon/execdriver/execdrivers"
 	"github.com/docker/docker/daemon/execdriver/lxc"
 	"github.com/docker/docker/daemon/execdriver/lxc"
@@ -25,12 +26,12 @@ import (
 	_ "github.com/docker/docker/daemon/graphdriver/vfs"
 	_ "github.com/docker/docker/daemon/graphdriver/vfs"
 	_ "github.com/docker/docker/daemon/networkdriver/bridge"
 	_ "github.com/docker/docker/daemon/networkdriver/bridge"
 	"github.com/docker/docker/daemon/networkdriver/portallocator"
 	"github.com/docker/docker/daemon/networkdriver/portallocator"
-	"github.com/docker/docker/dockerversion"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/broadcastwriter"
 	"github.com/docker/docker/pkg/broadcastwriter"
+	"github.com/docker/docker/pkg/common"
 	"github.com/docker/docker/pkg/graphdb"
 	"github.com/docker/docker/pkg/graphdb"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/namesgenerator"
 	"github.com/docker/docker/pkg/namesgenerator"
@@ -88,23 +89,24 @@ func (c *contStore) List() []*Container {
 }
 }
 
 
 type Daemon struct {
 type Daemon struct {
-	ID             string
-	repository     string
-	sysInitPath    string
-	containers     *contStore
-	execCommands   *execStore
-	graph          *graph.Graph
-	repositories   *graph.TagStore
-	idIndex        *truncindex.TruncIndex
-	sysInfo        *sysinfo.SysInfo
-	volumes        *volumes.Repository
-	eng            *engine.Engine
-	config         *Config
-	containerGraph *graphdb.Database
-	driver         graphdriver.Driver
-	execDriver     execdriver.Driver
-	trustStore     *trust.TrustStore
-	statsCollector *statsCollector
+	ID               string
+	repository       string
+	sysInitPath      string
+	containers       *contStore
+	execCommands     *execStore
+	graph            *graph.Graph
+	repositories     *graph.TagStore
+	idIndex          *truncindex.TruncIndex
+	sysInfo          *sysinfo.SysInfo
+	volumes          *volumes.Repository
+	eng              *engine.Engine
+	config           *Config
+	containerGraph   *graphdb.Database
+	driver           graphdriver.Driver
+	execDriver       execdriver.Driver
+	trustStore       *trust.TrustStore
+	statsCollector   *statsCollector
+	defaultLogConfig runconfig.LogConfig
 }
 }
 
 
 // Install installs daemon capabilities to eng.
 // Install installs daemon capabilities to eng.
@@ -155,28 +157,40 @@ func (daemon *Daemon) Install(eng *engine.Engine) error {
 	return nil
 	return nil
 }
 }
 
 
-// Get looks for a container by the specified ID or name, and returns it.
-// If the container is not found, or if an error occurs, nil is returned.
-func (daemon *Daemon) Get(name string) *Container {
-	id, err := daemon.idIndex.Get(name)
-	if err == nil {
-		return daemon.containers.Get(id)
+// Get looks for a container using the provided information, which could be
+// one of the following inputs from the caller:
+//  - A full container ID, which will exact match a container in daemon's list
+//  - A container name, which will only exact match via the GetByName() function
+//  - A partial container ID prefix (e.g. short ID) of any length that is
+//    unique enough to only return a single container object
+//  If none of these searches succeed, an error is returned
+func (daemon *Daemon) Get(prefixOrName string) (*Container, error) {
+	if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil {
+		// prefix is an exact match to a full container ID
+		return containerByID, nil
 	}
 	}
 
 
-	if c, _ := daemon.GetByName(name); c != nil {
-		return c
+	// GetByName will match only an exact name provided; we ignore errors
+	containerByName, _ := daemon.GetByName(prefixOrName)
+	containerId, indexError := daemon.idIndex.Get(prefixOrName)
+
+	if containerByName != nil {
+		// prefix is an exact match to a full container Name
+		return containerByName, nil
 	}
 	}
 
 
-	if err == truncindex.ErrDuplicateID {
-		log.Errorf("Short ID %s is ambiguous: please retry with more characters or use the full ID.\n", name)
+	if containerId != "" {
+		// prefix is a fuzzy match to a container ID
+		return daemon.containers.Get(containerId), nil
 	}
 	}
-	return nil
+	return nil, indexError
 }
 }
 
 
 // Exists returns a true if a container of the specified ID or name exists,
 // Exists returns a true if a container of the specified ID or name exists,
 // false otherwise.
 // false otherwise.
 func (daemon *Daemon) Exists(id string) bool {
 func (daemon *Daemon) Exists(id string) bool {
-	return daemon.Get(id) != nil
+	c, _ := daemon.Get(id)
+	return c != nil
 }
 }
 
 
 func (daemon *Daemon) containerRoot(id string) string {
 func (daemon *Daemon) containerRoot(id string) string {
@@ -332,7 +346,7 @@ func (daemon *Daemon) restore() error {
 	for _, v := range dir {
 	for _, v := range dir {
 		id := v.Name()
 		id := v.Name()
 		container, err := daemon.load(id)
 		container, err := daemon.load(id)
-		if !debug {
+		if !debug && log.GetLevel() == log.InfoLevel {
 			fmt.Print(".")
 			fmt.Print(".")
 		}
 		}
 		if err != nil {
 		if err != nil {
@@ -354,7 +368,7 @@ func (daemon *Daemon) restore() error {
 
 
 	if entities := daemon.containerGraph.List("/", -1); entities != nil {
 	if entities := daemon.containerGraph.List("/", -1); entities != nil {
 		for _, p := range entities.Paths() {
 		for _, p := range entities.Paths() {
-			if !debug {
+			if !debug && log.GetLevel() == log.InfoLevel {
 				fmt.Print(".")
 				fmt.Print(".")
 			}
 			}
 
 
@@ -406,7 +420,9 @@ func (daemon *Daemon) restore() error {
 	}
 	}
 
 
 	if !debug {
 	if !debug {
-		fmt.Println()
+		if log.GetLevel() == log.InfoLevel {
+			fmt.Println()
+		}
 		log.Infof("Loading containers: done.")
 		log.Infof("Loading containers: done.")
 	}
 	}
 
 
@@ -428,7 +444,9 @@ func (daemon *Daemon) setupResolvconfWatcher() error {
 		for {
 		for {
 			select {
 			select {
 			case event := <-watcher.Events:
 			case event := <-watcher.Events:
-				if event.Op&fsnotify.Write == fsnotify.Write {
+				if event.Name == "/etc/resolv.conf" &&
+					(event.Op&fsnotify.Write == fsnotify.Write ||
+						event.Op&fsnotify.Create == fsnotify.Create) {
 					// verify a real change happened before we go further--a file write may have happened
 					// verify a real change happened before we go further--a file write may have happened
 					// without an actual change to the file
 					// without an actual change to the file
 					updatedResolvConf, newResolvConfHash, err := resolvconf.GetIfChanged()
 					updatedResolvConf, newResolvConfHash, err := resolvconf.GetIfChanged()
@@ -461,7 +479,7 @@ func (daemon *Daemon) setupResolvconfWatcher() error {
 		}
 		}
 	}()
 	}()
 
 
-	if err := watcher.Add("/etc/resolv.conf"); err != nil {
+	if err := watcher.Add("/etc"); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
@@ -499,7 +517,7 @@ func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.
 func (daemon *Daemon) generateIdAndName(name string) (string, string, error) {
 func (daemon *Daemon) generateIdAndName(name string) (string, string, error) {
 	var (
 	var (
 		err error
 		err error
-		id  = utils.GenerateRandomID()
+		id  = common.GenerateRandomID()
 	)
 	)
 
 
 	if name == "" {
 	if name == "" {
@@ -544,7 +562,7 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) {
 			nameAsKnownByUser := strings.TrimPrefix(name, "/")
 			nameAsKnownByUser := strings.TrimPrefix(name, "/")
 			return "", fmt.Errorf(
 			return "", fmt.Errorf(
 				"Conflict. The name %q is already in use by container %s. You have to delete (or rename) that container to be able to reuse that name.", nameAsKnownByUser,
 				"Conflict. The name %q is already in use by container %s. You have to delete (or rename) that container to be able to reuse that name.", nameAsKnownByUser,
-				utils.TruncateID(conflictingContainer.ID))
+				common.TruncateID(conflictingContainer.ID))
 		}
 		}
 	}
 	}
 	return name, nil
 	return name, nil
@@ -567,7 +585,7 @@ func (daemon *Daemon) generateNewName(id string) (string, error) {
 		return name, nil
 		return name, nil
 	}
 	}
 
 
-	name = "/" + utils.TruncateID(id)
+	name = "/" + common.TruncateID(id)
 	if _, err := daemon.containerGraph.Set(name, id); err != nil {
 	if _, err := daemon.containerGraph.Set(name, id); err != nil {
 		return "", err
 		return "", err
 	}
 	}
@@ -715,9 +733,9 @@ func (daemon *Daemon) Children(name string) (map[string]*Container, error) {
 	children := make(map[string]*Container)
 	children := make(map[string]*Container)
 
 
 	err = daemon.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error {
 	err = daemon.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error {
-		c := daemon.Get(e.ID())
-		if c == nil {
-			return fmt.Errorf("Could not get container for name %s and id %s", e.ID(), p)
+		c, err := daemon.Get(e.ID())
+		if err != nil {
+			return err
 		}
 		}
 		children[p] = c
 		children[p] = c
 		return nil
 		return nil
@@ -754,10 +772,18 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
 			if err != nil {
 			if err != nil {
 				return err
 				return err
 			}
 			}
-			child := daemon.Get(parts["name"])
-			if child == nil {
+			child, err := daemon.Get(parts["name"])
+			if err != nil {
+				//An error from daemon.Get() means this name could not be found
 				return fmt.Errorf("Could not get container for %s", parts["name"])
 				return fmt.Errorf("Could not get container for %s", parts["name"])
 			}
 			}
+			for child.hostConfig.NetworkMode.IsContainer() {
+				parts := strings.SplitN(string(child.hostConfig.NetworkMode), ":", 2)
+				child, err = daemon.Get(parts[1])
+				if err != nil {
+					return fmt.Errorf("Could not get container for %s", parts[1])
+				}
+			}
 			if child.hostConfig.NetworkMode.IsHost() {
 			if child.hostConfig.NetworkMode.IsHost() {
 				return runconfig.ErrConflictHostNetworkAndLinks
 				return runconfig.ErrConflictHostNetworkAndLinks
 			}
 			}
@@ -801,6 +827,12 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
 	}
 	}
 	config.DisableNetwork = config.BridgeIface == disableNetworkBridge
 	config.DisableNetwork = config.BridgeIface == disableNetworkBridge
 
 
+	// register portallocator release on shutdown
+	eng.OnShutdown(func() {
+		if err := portallocator.ReleaseAll(); err != nil {
+			log.Errorf("portallocator.ReleaseAll(): %s", err)
+		}
+	})
 	// Claim the pidfile first, to avoid any and all unexpected race conditions.
 	// Claim the pidfile first, to avoid any and all unexpected race conditions.
 	// Some of the init doesn't need a pidfile lock - but let's not try to be smart.
 	// Some of the init doesn't need a pidfile lock - but let's not try to be smart.
 	if config.Pidfile != "" {
 	if config.Pidfile != "" {
@@ -834,9 +866,6 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
 		return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
 		return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
 	}
 	}
 	os.Setenv("TMPDIR", realTmp)
 	os.Setenv("TMPDIR", realTmp)
-	if !config.EnableSelinuxSupport {
-		selinuxSetDisabled()
-	}
 
 
 	// get the canonical path to the Docker root directory
 	// get the canonical path to the Docker root directory
 	var realRoot string
 	var realRoot string
@@ -860,13 +889,28 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
 	// Load storage driver
 	// Load storage driver
 	driver, err := graphdriver.New(config.Root, config.GraphOptions)
 	driver, err := graphdriver.New(config.Root, config.GraphOptions)
 	if err != nil {
 	if err != nil {
-		return nil, err
+		return nil, fmt.Errorf("error intializing graphdriver: %v", err)
 	}
 	}
 	log.Debugf("Using graph driver %s", driver)
 	log.Debugf("Using graph driver %s", driver)
+	// register cleanup for graph driver
+	eng.OnShutdown(func() {
+		if err := driver.Cleanup(); err != nil {
+			log.Errorf("Error during graph storage driver.Cleanup(): %v", err)
+		}
+	})
 
 
-	// As Docker on btrfs and SELinux are incompatible at present, error on both being enabled
-	if selinuxEnabled() && config.EnableSelinuxSupport && driver.String() == "btrfs" {
-		return nil, fmt.Errorf("SELinux is not supported with the BTRFS graph driver!")
+	if config.EnableSelinuxSupport {
+		if selinuxEnabled() {
+			// As Docker on btrfs and SELinux are incompatible at present, error on both being enabled
+			if driver.String() == "btrfs" {
+				return nil, fmt.Errorf("SELinux is not supported with the BTRFS graph driver")
+			}
+			log.Debug("SELinux enabled successfully")
+		} else {
+			log.Warn("Docker could not enable SELinux on the host system")
+		}
+	} else {
+		selinuxSetDisabled()
 	}
 	}
 
 
 	daemonRepo := path.Join(config.Root, "containers")
 	daemonRepo := path.Join(config.Root, "containers")
@@ -940,6 +984,12 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
+	// register graph close on shutdown
+	eng.OnShutdown(func() {
+		if err := graph.Close(); err != nil {
+			log.Errorf("Error during container graph.Close(): %v", err)
+		}
+	})
 
 
 	localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION))
 	localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION))
 	sysInitPath := utils.DockerInitPath(localCopy)
 	sysInitPath := utils.DockerInitPath(localCopy)
@@ -968,24 +1018,32 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
 	}
 	}
 
 
 	daemon := &Daemon{
 	daemon := &Daemon{
-		ID:             trustKey.PublicKey().KeyID(),
-		repository:     daemonRepo,
-		containers:     &contStore{s: make(map[string]*Container)},
-		execCommands:   newExecStore(),
-		graph:          g,
-		repositories:   repositories,
-		idIndex:        truncindex.NewTruncIndex([]string{}),
-		sysInfo:        sysInfo,
-		volumes:        volumes,
-		config:         config,
-		containerGraph: graph,
-		driver:         driver,
-		sysInitPath:    sysInitPath,
-		execDriver:     ed,
-		eng:            eng,
-		trustStore:     t,
-		statsCollector: newStatsCollector(1 * time.Second),
+		ID:               trustKey.PublicKey().KeyID(),
+		repository:       daemonRepo,
+		containers:       &contStore{s: make(map[string]*Container)},
+		execCommands:     newExecStore(),
+		graph:            g,
+		repositories:     repositories,
+		idIndex:          truncindex.NewTruncIndex([]string{}),
+		sysInfo:          sysInfo,
+		volumes:          volumes,
+		config:           config,
+		containerGraph:   graph,
+		driver:           driver,
+		sysInitPath:      sysInitPath,
+		execDriver:       ed,
+		eng:              eng,
+		trustStore:       t,
+		statsCollector:   newStatsCollector(1 * time.Second),
+		defaultLogConfig: config.LogConfig,
 	}
 	}
+
+	eng.OnShutdown(func() {
+		if err := daemon.shutdown(); err != nil {
+			log.Errorf("Error during daemon.shutdown(): %v", err)
+		}
+	})
+
 	if err := daemon.restore(); err != nil {
 	if err := daemon.restore(); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -995,25 +1053,6 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	// Setup shutdown handlers
-	// FIXME: can these shutdown handlers be registered closer to their source?
-	eng.OnShutdown(func() {
-		// FIXME: if these cleanup steps can be called concurrently, register
-		// them as separate handlers to speed up total shutdown time
-		if err := daemon.shutdown(); err != nil {
-			log.Errorf("daemon.shutdown(): %s", err)
-		}
-		if err := portallocator.ReleaseAll(); err != nil {
-			log.Errorf("portallocator.ReleaseAll(): %s", err)
-		}
-		if err := daemon.driver.Cleanup(); err != nil {
-			log.Errorf("daemon.driver.Cleanup(): %s", err.Error())
-		}
-		if err := daemon.containerGraph.Close(); err != nil {
-			log.Errorf("daemon.containerGraph.Close(): %s", err.Error())
-		}
-	})
-
 	return daemon, nil
 	return daemon, nil
 }
 }
 
 
@@ -1100,18 +1139,18 @@ func (daemon *Daemon) Stats(c *Container) (*execdriver.ResourceStats, error) {
 }
 }
 
 
 func (daemon *Daemon) SubscribeToContainerStats(name string) (chan interface{}, error) {
 func (daemon *Daemon) SubscribeToContainerStats(name string) (chan interface{}, error) {
-	c := daemon.Get(name)
-	if c == nil {
-		return nil, fmt.Errorf("no such container")
+	c, err := daemon.Get(name)
+	if err != nil {
+		return nil, err
 	}
 	}
 	ch := daemon.statsCollector.collect(c)
 	ch := daemon.statsCollector.collect(c)
 	return ch, nil
 	return ch, nil
 }
 }
 
 
 func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface{}) error {
 func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface{}) error {
-	c := daemon.Get(name)
-	if c == nil {
-		return fmt.Errorf("no such container")
+	c, err := daemon.Get(name)
+	if err != nil {
+		return err
 	}
 	}
 	daemon.statsCollector.unsubscribe(c, ch)
 	daemon.statsCollector.unsubscribe(c, ch)
 	return nil
 	return nil
@@ -1214,11 +1253,11 @@ func checkKernel() error {
 	// the circumstances of pre-3.8 crashes are clearer.
 	// the circumstances of pre-3.8 crashes are clearer.
 	// For details see http://github.com/docker/docker/issues/407
 	// For details see http://github.com/docker/docker/issues/407
 	if k, err := kernel.GetKernelVersion(); err != nil {
 	if k, err := kernel.GetKernelVersion(); err != nil {
-		log.Infof("WARNING: %s", err)
+		log.Warnf("%s", err)
 	} else {
 	} else {
 		if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
 		if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
 			if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
 			if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
-				log.Infof("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
+				log.Warnf("You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
 			}
 			}
 		}
 		}
 	}
 	}

+ 101 - 0
daemon/daemon_test.go

@@ -0,0 +1,101 @@
+package daemon
+
+import (
+	"github.com/docker/docker/pkg/graphdb"
+	"github.com/docker/docker/pkg/truncindex"
+	"os"
+	"path"
+	"testing"
+)
+
+//
+// https://github.com/docker/docker/issues/8069
+//
+
+func TestGet(t *testing.T) {
+	c1 := &Container{
+		ID:   "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
+		Name: "tender_bardeen",
+	}
+	c2 := &Container{
+		ID:   "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de",
+		Name: "drunk_hawking",
+	}
+	c3 := &Container{
+		ID:   "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57",
+		Name: "3cdbd1aa",
+	}
+	c4 := &Container{
+		ID:   "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5",
+		Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
+	}
+	c5 := &Container{
+		ID:   "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b",
+		Name: "d22d69a2b896",
+	}
+
+	store := &contStore{
+		s: map[string]*Container{
+			c1.ID: c1,
+			c2.ID: c2,
+			c3.ID: c3,
+			c4.ID: c4,
+			c5.ID: c5,
+		},
+	}
+
+	index := truncindex.NewTruncIndex([]string{})
+	index.Add(c1.ID)
+	index.Add(c2.ID)
+	index.Add(c3.ID)
+	index.Add(c4.ID)
+	index.Add(c5.ID)
+
+	daemonTestDbPath := path.Join(os.TempDir(), "daemon_test.db")
+	graph, err := graphdb.NewSqliteConn(daemonTestDbPath)
+	if err != nil {
+		t.Fatalf("Failed to create daemon test sqlite database at %s", daemonTestDbPath)
+	}
+	graph.Set(c1.Name, c1.ID)
+	graph.Set(c2.Name, c2.ID)
+	graph.Set(c3.Name, c3.ID)
+	graph.Set(c4.Name, c4.ID)
+	graph.Set(c5.Name, c5.ID)
+
+	daemon := &Daemon{
+		containers:     store,
+		idIndex:        index,
+		containerGraph: graph,
+	}
+
+	if container, _ := daemon.Get("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
+		t.Fatal("Should explicitly match full container IDs")
+	}
+
+	if container, _ := daemon.Get("75fb0b8009"); container != c4 {
+		t.Fatal("Should match a partial ID")
+	}
+
+	if container, _ := daemon.Get("drunk_hawking"); container != c2 {
+		t.Fatal("Should match a full name")
+	}
+
+	// c3.Name is a partial match for both c3.ID and c2.ID
+	if c, _ := daemon.Get("3cdbd1aa"); c != c3 {
+		t.Fatal("Should match a full name even though it collides with another container's ID")
+	}
+
+	if container, _ := daemon.Get("d22d69a2b896"); container != c5 {
+		t.Fatal("Should match a container where the provided prefix is an exact match to the it's name, and is also a prefix for it's ID")
+	}
+
+	if _, err := daemon.Get("3cdbd1"); err == nil {
+		t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's")
+	}
+
+	if _, err := daemon.Get("nothing"); err == nil {
+		t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID")
+	}
+
+	os.Remove(daemonTestDbPath)
+}

+ 6 - 7
daemon/delete.go

@@ -17,10 +17,10 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
 	removeVolume := job.GetenvBool("removeVolume")
 	removeVolume := job.GetenvBool("removeVolume")
 	removeLink := job.GetenvBool("removeLink")
 	removeLink := job.GetenvBool("removeLink")
 	forceRemove := job.GetenvBool("forceRemove")
 	forceRemove := job.GetenvBool("forceRemove")
-	container := daemon.Get(name)
 
 
-	if container == nil {
-		return job.Errorf("No such container: %s", name)
+	container, err := daemon.Get(name)
+	if err != nil {
+		return job.Error(err)
 	}
 	}
 
 
 	if removeLink {
 	if removeLink {
@@ -36,7 +36,7 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
 		if pe == nil {
 		if pe == nil {
 			return job.Errorf("Cannot get parent %s for name %s", parent, name)
 			return job.Errorf("Cannot get parent %s for name %s", parent, name)
 		}
 		}
-		parentContainer := daemon.Get(pe.ID())
+		parentContainer, _ := daemon.Get(pe.ID())
 
 
 		if parentContainer != nil {
 		if parentContainer != nil {
 			parentContainer.DisableLink(n)
 			parentContainer.DisableLink(n)
@@ -61,7 +61,7 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
 				return job.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f")
 				return job.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f")
 			}
 			}
 		}
 		}
-		if err := daemon.Destroy(container); err != nil {
+		if err := daemon.Rm(container); err != nil {
 			return job.Errorf("Cannot destroy container %s: %s", name, err)
 			return job.Errorf("Cannot destroy container %s: %s", name, err)
 		}
 		}
 		container.LogEvent("destroy")
 		container.LogEvent("destroy")
@@ -82,8 +82,7 @@ func (daemon *Daemon) DeleteVolumes(volumeIDs map[string]struct{}) {
 }
 }
 
 
 // Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
 // Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
-// FIXME: rename to Rm for consistency with the CLI command
-func (daemon *Daemon) Destroy(container *Container) error {
+func (daemon *Daemon) Rm(container *Container) error {
 	if container == nil {
 	if container == nil {
 		return fmt.Errorf("The given container is <nil>")
 		return fmt.Errorf("The given container is <nil>")
 	}
 	}

+ 7 - 8
daemon/exec.go

@@ -12,10 +12,10 @@ import (
 	"github.com/docker/docker/daemon/execdriver/lxc"
 	"github.com/docker/docker/daemon/execdriver/lxc"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/pkg/broadcastwriter"
 	"github.com/docker/docker/pkg/broadcastwriter"
+	"github.com/docker/docker/pkg/common"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
-	"github.com/docker/docker/utils"
 )
 )
 
 
 type execConfig struct {
 type execConfig struct {
@@ -74,7 +74,7 @@ func (execConfig *execConfig) Resize(h, w int) error {
 }
 }
 
 
 func (d *Daemon) registerExecCommand(execConfig *execConfig) {
 func (d *Daemon) registerExecCommand(execConfig *execConfig) {
-	// Storing execs in container inorder to kill them gracefully whenever the container is stopped or removed.
+	// Storing execs in container in order to kill them gracefully whenever the container is stopped or removed.
 	execConfig.Container.execCommands.Add(execConfig.ID, execConfig)
 	execConfig.Container.execCommands.Add(execConfig.ID, execConfig)
 	// Storing execs in daemon for easy access via remote API.
 	// Storing execs in daemon for easy access via remote API.
 	d.execCommands.Add(execConfig.ID, execConfig)
 	d.execCommands.Add(execConfig.ID, execConfig)
@@ -97,10 +97,9 @@ func (d *Daemon) unregisterExecCommand(execConfig *execConfig) {
 }
 }
 
 
 func (d *Daemon) getActiveContainer(name string) (*Container, error) {
 func (d *Daemon) getActiveContainer(name string) (*Container, error) {
-	container := d.Get(name)
-
-	if container == nil {
-		return nil, fmt.Errorf("No such container: %s", name)
+	container, err := d.Get(name)
+	if err != nil {
+		return nil, err
 	}
 	}
 
 
 	if !container.IsRunning() {
 	if !container.IsRunning() {
@@ -142,7 +141,7 @@ func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status {
 	}
 	}
 
 
 	execConfig := &execConfig{
 	execConfig := &execConfig{
-		ID:            utils.GenerateRandomID(),
+		ID:            common.GenerateRandomID(),
 		OpenStdin:     config.AttachStdin,
 		OpenStdin:     config.AttachStdin,
 		OpenStdout:    config.AttachStdout,
 		OpenStdout:    config.AttachStdout,
 		OpenStderr:    config.AttachStderr,
 		OpenStderr:    config.AttachStderr,
@@ -219,7 +218,7 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
 		execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
 		execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
 	}
 	}
 
 
-	attachErr := d.attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr)
+	attachErr := d.Attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr)
 
 
 	execErr := make(chan error)
 	execErr := make(chan error)
 
 

+ 0 - 2
daemon/execdriver/MAINTAINERS

@@ -1,2 +0,0 @@
-Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
-Victor Vieux <vieux@docker.com> (@vieux)

+ 158 - 9
daemon/execdriver/driver.go

@@ -1,14 +1,22 @@
 package execdriver
 package execdriver
 
 
 import (
 import (
+	"encoding/json"
 	"errors"
 	"errors"
 	"io"
 	"io"
+	"io/ioutil"
 	"os"
 	"os"
 	"os/exec"
 	"os/exec"
+	"path/filepath"
+	"strconv"
+	"strings"
 	"time"
 	"time"
 
 
+	"github.com/docker/docker/daemon/execdriver/native/template"
+	"github.com/docker/docker/pkg/ulimit"
 	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer"
-	"github.com/docker/libcontainer/devices"
+	"github.com/docker/libcontainer/cgroups/fs"
+	"github.com/docker/libcontainer/configs"
 )
 )
 
 
 // Context is a generic key value pair that allows
 // Context is a generic key value pair that allows
@@ -39,7 +47,7 @@ type Terminal interface {
 }
 }
 
 
 type TtyTerminal interface {
 type TtyTerminal interface {
-	Master() *os.File
+	Master() libcontainer.Console
 }
 }
 
 
 // ExitStatus provides exit reasons for a container.
 // ExitStatus provides exit reasons for a container.
@@ -98,14 +106,15 @@ type NetworkInterface struct {
 }
 }
 
 
 type Resources struct {
 type Resources struct {
-	Memory     int64  `json:"memory"`
-	MemorySwap int64  `json:"memory_swap"`
-	CpuShares  int64  `json:"cpu_shares"`
-	Cpuset     string `json:"cpuset"`
+	Memory     int64            `json:"memory"`
+	MemorySwap int64            `json:"memory_swap"`
+	CpuShares  int64            `json:"cpu_shares"`
+	CpusetCpus string           `json:"cpuset_cpus"`
+	Rlimits    []*ulimit.Rlimit `json:"rlimits"`
 }
 }
 
 
 type ResourceStats struct {
 type ResourceStats struct {
-	*libcontainer.ContainerStats
+	*libcontainer.Stats
 	Read        time.Time `json:"read"`
 	Read        time.Time `json:"read"`
 	MemoryLimit int64     `json:"memory_limit"`
 	MemoryLimit int64     `json:"memory_limit"`
 	SystemUsage uint64    `json:"system_usage"`
 	SystemUsage uint64    `json:"system_usage"`
@@ -145,8 +154,8 @@ type Command struct {
 	Pid                *Pid              `json:"pid"`
 	Pid                *Pid              `json:"pid"`
 	Resources          *Resources        `json:"resources"`
 	Resources          *Resources        `json:"resources"`
 	Mounts             []Mount           `json:"mounts"`
 	Mounts             []Mount           `json:"mounts"`
-	AllowedDevices     []*devices.Device `json:"allowed_devices"`
-	AutoCreatedDevices []*devices.Device `json:"autocreated_devices"`
+	AllowedDevices     []*configs.Device `json:"allowed_devices"`
+	AutoCreatedDevices []*configs.Device `json:"autocreated_devices"`
 	CapAdd             []string          `json:"cap_add"`
 	CapAdd             []string          `json:"cap_add"`
 	CapDrop            []string          `json:"cap_drop"`
 	CapDrop            []string          `json:"cap_drop"`
 	ContainerPid       int               `json:"container_pid"`  // the pid for the process inside a container
 	ContainerPid       int               `json:"container_pid"`  // the pid for the process inside a container
@@ -155,4 +164,144 @@ type Command struct {
 	MountLabel         string            `json:"mount_label"`
 	MountLabel         string            `json:"mount_label"`
 	LxcConfig          []string          `json:"lxc_config"`
 	LxcConfig          []string          `json:"lxc_config"`
 	AppArmorProfile    string            `json:"apparmor_profile"`
 	AppArmorProfile    string            `json:"apparmor_profile"`
+	CgroupParent       string            `json:"cgroup_parent"` // The parent cgroup for this command.
+}
+
+func InitContainer(c *Command) *configs.Config {
+	container := template.New()
+
+	container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env)
+	container.Cgroups.Name = c.ID
+	container.Cgroups.AllowedDevices = c.AllowedDevices
+	container.Readonlyfs = c.ReadonlyRootfs
+	container.Devices = c.AutoCreatedDevices
+	container.Rootfs = c.Rootfs
+	container.Readonlyfs = c.ReadonlyRootfs
+
+	// check to see if we are running in ramdisk to disable pivot root
+	container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
+
+	// Default parent cgroup is "docker". Override if required.
+	if c.CgroupParent != "" {
+		container.Cgroups.Parent = c.CgroupParent
+	}
+	return container
+}
+
+func getEnv(key string, env []string) string {
+	for _, pair := range env {
+		parts := strings.Split(pair, "=")
+		if parts[0] == key {
+			return parts[1]
+		}
+	}
+	return ""
+}
+
+func SetupCgroups(container *configs.Config, c *Command) error {
+	if c.Resources != nil {
+		container.Cgroups.CpuShares = c.Resources.CpuShares
+		container.Cgroups.Memory = c.Resources.Memory
+		container.Cgroups.MemoryReservation = c.Resources.Memory
+		container.Cgroups.MemorySwap = c.Resources.MemorySwap
+		container.Cgroups.CpusetCpus = c.Resources.CpusetCpus
+	}
+
+	return nil
+}
+
+// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo.
+func getNetworkInterfaceStats(interfaceName string) (*libcontainer.NetworkInterface, error) {
+	out := &libcontainer.NetworkInterface{Name: interfaceName}
+	// This can happen if the network runtime information is missing - possible if the
+	// container was created by an old version of libcontainer.
+	if interfaceName == "" {
+		return out, nil
+	}
+	type netStatsPair struct {
+		// Where to write the output.
+		Out *uint64
+		// The network stats file to read.
+		File string
+	}
+	// Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
+	netStats := []netStatsPair{
+		{Out: &out.RxBytes, File: "tx_bytes"},
+		{Out: &out.RxPackets, File: "tx_packets"},
+		{Out: &out.RxErrors, File: "tx_errors"},
+		{Out: &out.RxDropped, File: "tx_dropped"},
+
+		{Out: &out.TxBytes, File: "rx_bytes"},
+		{Out: &out.TxPackets, File: "rx_packets"},
+		{Out: &out.TxErrors, File: "rx_errors"},
+		{Out: &out.TxDropped, File: "rx_dropped"},
+	}
+	for _, netStat := range netStats {
+		data, err := readSysfsNetworkStats(interfaceName, netStat.File)
+		if err != nil {
+			return nil, err
+		}
+		*(netStat.Out) = data
+	}
+	return out, nil
+}
+
+// Reads the specified statistics available under /sys/class/net/<EthInterface>/statistics
+func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) {
+	data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile))
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+func Stats(containerDir string, containerMemoryLimit int64, machineMemory int64) (*ResourceStats, error) {
+	f, err := os.Open(filepath.Join(containerDir, "state.json"))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	type network struct {
+		Type              string
+		HostInterfaceName string
+	}
+
+	state := struct {
+		CgroupPaths map[string]string `json:"cgroup_paths"`
+		Networks    []network
+	}{}
+
+	if err := json.NewDecoder(f).Decode(&state); err != nil {
+		return nil, err
+	}
+	now := time.Now()
+
+	mgr := fs.Manager{Paths: state.CgroupPaths}
+	cstats, err := mgr.GetStats()
+	if err != nil {
+		return nil, err
+	}
+	stats := &libcontainer.Stats{CgroupStats: cstats}
+	// if the container does not have any memory limit specified set the
+	// limit to the machines memory
+	memoryLimit := containerMemoryLimit
+	if memoryLimit == 0 {
+		memoryLimit = machineMemory
+	}
+	for _, iface := range state.Networks {
+		switch iface.Type {
+		case "veth":
+			istats, err := getNetworkInterfaceStats(iface.HostInterfaceName)
+			if err != nil {
+				return nil, err
+			}
+			stats.Interfaces = append(stats.Interfaces, istats)
+		}
+	}
+	return &ResourceStats{
+		Stats:       stats,
+		Read:        now,
+		MemoryLimit: memoryLimit,
+	}, nil
 }
 }

+ 0 - 2
daemon/execdriver/lxc/MAINTAINERS

@@ -1,2 +0,0 @@
-# the LXC exec driver needs more maintainers and contributions
-Dinesh Subhraveti <dineshs@altiscale.com> (@dineshs-altiscale)

+ 283 - 30
daemon/execdriver/lxc/driver.go

@@ -12,17 +12,21 @@ import (
 	"path/filepath"
 	"path/filepath"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
+	"sync"
 	"syscall"
 	"syscall"
 	"time"
 	"time"
 
 
-	"github.com/kr/pty"
-
 	log "github.com/Sirupsen/logrus"
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver"
+	sysinfo "github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
+	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer/cgroups"
 	"github.com/docker/libcontainer/cgroups"
-	"github.com/docker/libcontainer/mount/nodes"
+	"github.com/docker/libcontainer/configs"
+	"github.com/docker/libcontainer/system"
+	"github.com/docker/libcontainer/user"
+	"github.com/kr/pty"
 )
 )
 
 
 const DriverName = "lxc"
 const DriverName = "lxc"
@@ -30,10 +34,18 @@ const DriverName = "lxc"
 var ErrExec = errors.New("Unsupported: Exec is not supported by the lxc driver")
 var ErrExec = errors.New("Unsupported: Exec is not supported by the lxc driver")
 
 
 type driver struct {
 type driver struct {
-	root       string // root path for the driver to use
-	initPath   string
-	apparmor   bool
-	sharedRoot bool
+	root             string // root path for the driver to use
+	initPath         string
+	apparmor         bool
+	sharedRoot       bool
+	activeContainers map[string]*activeContainer
+	machineMemory    int64
+	sync.Mutex
+}
+
+type activeContainer struct {
+	container *configs.Config
+	cmd       *exec.Cmd
 }
 }
 
 
 func NewDriver(root, initPath string, apparmor bool) (*driver, error) {
 func NewDriver(root, initPath string, apparmor bool) (*driver, error) {
@@ -41,12 +53,17 @@ func NewDriver(root, initPath string, apparmor bool) (*driver, error) {
 	if err := linkLxcStart(root); err != nil {
 	if err := linkLxcStart(root); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-
+	meminfo, err := sysinfo.ReadMemInfo()
+	if err != nil {
+		return nil, err
+	}
 	return &driver{
 	return &driver{
-		apparmor:   apparmor,
-		root:       root,
-		initPath:   initPath,
-		sharedRoot: rootIsShared(),
+		apparmor:         apparmor,
+		root:             root,
+		initPath:         initPath,
+		sharedRoot:       rootIsShared(),
+		activeContainers: make(map[string]*activeContainer),
+		machineMemory:    meminfo.MemTotal,
 	}, nil
 	}, nil
 }
 }
 
 
@@ -57,8 +74,9 @@ func (d *driver) Name() string {
 
 
 func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
 func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
 	var (
 	var (
-		term execdriver.Terminal
-		err  error
+		term     execdriver.Terminal
+		err      error
+		dataPath = d.containerDir(c.ID)
 	)
 	)
 
 
 	if c.ProcessConfig.Tty {
 	if c.ProcessConfig.Tty {
@@ -67,6 +85,16 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
 		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
 	}
 	}
 	c.ProcessConfig.Terminal = term
 	c.ProcessConfig.Terminal = term
+	container, err := d.createContainer(c)
+	if err != nil {
+		return execdriver.ExitStatus{ExitCode: -1}, err
+	}
+	d.Lock()
+	d.activeContainers[c.ID] = &activeContainer{
+		container: container,
+		cmd:       &c.ProcessConfig.Cmd,
+	}
+	d.Unlock()
 
 
 	c.Mounts = append(c.Mounts, execdriver.Mount{
 	c.Mounts = append(c.Mounts, execdriver.Mount{
 		Source:      d.initPath,
 		Source:      d.initPath,
@@ -92,6 +120,17 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 			"--share-net", c.Network.ContainerID,
 			"--share-net", c.Network.ContainerID,
 		)
 		)
 	}
 	}
+	if c.Ipc != nil {
+		if c.Ipc.ContainerID != "" {
+			params = append(params,
+				"--share-ipc", c.Ipc.ContainerID,
+			)
+		} else if c.Ipc.HostIpc {
+			params = append(params,
+				"--share-ipc", "1",
+			)
+		}
+	}
 
 
 	params = append(params,
 	params = append(params,
 		"--",
 		"--",
@@ -141,7 +180,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 			"unshare", "-m", "--", "/bin/sh", "-c", shellString,
 			"unshare", "-m", "--", "/bin/sh", "-c", shellString,
 		}
 		}
 	}
 	}
-
+	log.Debugf("lxc params %s", params)
 	var (
 	var (
 		name = params[0]
 		name = params[0]
 		arg  = params[1:]
 		arg  = params[1:]
@@ -153,7 +192,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 	c.ProcessConfig.Path = aname
 	c.ProcessConfig.Path = aname
 	c.ProcessConfig.Args = append([]string{name}, arg...)
 	c.ProcessConfig.Args = append([]string{name}, arg...)
 
 
-	if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
+	if err := createDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
 		return execdriver.ExitStatus{ExitCode: -1}, err
 		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	}
 
 
@@ -175,25 +214,228 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 		close(waitLock)
 		close(waitLock)
 	}()
 	}()
 
 
-	// Poll lxc for RUNNING status
-	pid, err := d.waitForStart(c, waitLock)
-	if err != nil {
+	terminate := func(terr error) (execdriver.ExitStatus, error) {
 		if c.ProcessConfig.Process != nil {
 		if c.ProcessConfig.Process != nil {
 			c.ProcessConfig.Process.Kill()
 			c.ProcessConfig.Process.Kill()
 			c.ProcessConfig.Wait()
 			c.ProcessConfig.Wait()
 		}
 		}
-		return execdriver.ExitStatus{ExitCode: -1}, err
+		return execdriver.ExitStatus{ExitCode: -1}, terr
+	}
+	// Poll lxc for RUNNING status
+	pid, err := d.waitForStart(c, waitLock)
+	if err != nil {
+		return terminate(err)
+	}
+
+	cgroupPaths, err := cgroupPaths(c.ID)
+	if err != nil {
+		return terminate(err)
+	}
+
+	state := &libcontainer.State{
+		InitProcessPid: pid,
+		CgroupPaths:    cgroupPaths,
+	}
+
+	f, err := os.Create(filepath.Join(dataPath, "state.json"))
+	if err != nil {
+		return terminate(err)
+	}
+	defer f.Close()
+
+	if err := json.NewEncoder(f).Encode(state); err != nil {
+		return terminate(err)
 	}
 	}
 
 
 	c.ContainerPid = pid
 	c.ContainerPid = pid
 
 
 	if startCallback != nil {
 	if startCallback != nil {
+		log.Debugf("Invoking startCallback")
 		startCallback(&c.ProcessConfig, pid)
 		startCallback(&c.ProcessConfig, pid)
 	}
 	}
 
 
+	oomKill := false
+	oomKillNotification, err := notifyOnOOM(cgroupPaths)
+
 	<-waitLock
 	<-waitLock
 
 
-	return execdriver.ExitStatus{ExitCode: getExitCode(c)}, waitErr
+	if err == nil {
+		_, oomKill = <-oomKillNotification
+		log.Debugf("oomKill error %s waitErr %s", oomKill, waitErr)
+	} else {
+		log.Warnf("Your kernel does not support OOM notifications: %s", err)
+	}
+
+	// check oom error
+	exitCode := getExitCode(c)
+	if oomKill {
+		exitCode = 137
+	}
+	return execdriver.ExitStatus{ExitCode: exitCode, OOMKilled: oomKill}, waitErr
+}
+
+// copy from libcontainer
+func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) {
+	dir := paths["memory"]
+	if dir == "" {
+		return nil, fmt.Errorf("There is no path for %q in state", "memory")
+	}
+	oomControl, err := os.Open(filepath.Join(dir, "memory.oom_control"))
+	if err != nil {
+		return nil, err
+	}
+	fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0)
+	if syserr != 0 {
+		oomControl.Close()
+		return nil, syserr
+	}
+
+	eventfd := os.NewFile(fd, "eventfd")
+
+	eventControlPath := filepath.Join(dir, "cgroup.event_control")
+	data := fmt.Sprintf("%d %d", eventfd.Fd(), oomControl.Fd())
+	if err := ioutil.WriteFile(eventControlPath, []byte(data), 0700); err != nil {
+		eventfd.Close()
+		oomControl.Close()
+		return nil, err
+	}
+	ch := make(chan struct{})
+	go func() {
+		defer func() {
+			close(ch)
+			eventfd.Close()
+			oomControl.Close()
+		}()
+		buf := make([]byte, 8)
+		for {
+			if _, err := eventfd.Read(buf); err != nil {
+				return
+			}
+			// When a cgroup is destroyed, an event is sent to eventfd.
+			// So if the control path is gone, return instead of notifying.
+			if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) {
+				return
+			}
+			ch <- struct{}{}
+		}
+	}()
+	return ch, nil
+}
+
+// createContainer populates and configures the container type with the
+// data provided by the execdriver.Command
+func (d *driver) createContainer(c *execdriver.Command) (*configs.Config, error) {
+	container := execdriver.InitContainer(c)
+	if err := execdriver.SetupCgroups(container, c); err != nil {
+		return nil, err
+	}
+	return container, nil
+}
+
+// Return an map of susbystem -> container cgroup
+func cgroupPaths(containerId string) (map[string]string, error) {
+	subsystems, err := cgroups.GetAllSubsystems()
+	if err != nil {
+		return nil, err
+	}
+	log.Debugf("subsystems: %s", subsystems)
+	paths := make(map[string]string)
+	for _, subsystem := range subsystems {
+		cgroupRoot, cgroupDir, err := findCgroupRootAndDir(subsystem)
+		log.Debugf("cgroup path %s %s", cgroupRoot, cgroupDir)
+		if err != nil {
+			//unsupported subystem
+			continue
+		}
+		path := filepath.Join(cgroupRoot, cgroupDir, "lxc", containerId)
+		paths[subsystem] = path
+	}
+
+	return paths, nil
+}
+
+// this is copy from old libcontainer nodes.go
+func createDeviceNodes(rootfs string, nodesToCreate []*configs.Device) error {
+	oldMask := syscall.Umask(0000)
+	defer syscall.Umask(oldMask)
+
+	for _, node := range nodesToCreate {
+		if err := createDeviceNode(rootfs, node); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Creates the device node in the rootfs of the container.
+func createDeviceNode(rootfs string, node *configs.Device) error {
+	var (
+		dest   = filepath.Join(rootfs, node.Path)
+		parent = filepath.Dir(dest)
+	)
+
+	if err := os.MkdirAll(parent, 0755); err != nil {
+		return err
+	}
+
+	fileMode := node.FileMode
+	switch node.Type {
+	case 'c':
+		fileMode |= syscall.S_IFCHR
+	case 'b':
+		fileMode |= syscall.S_IFBLK
+	default:
+		return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path)
+	}
+
+	if err := syscall.Mknod(dest, uint32(fileMode), node.Mkdev()); err != nil && !os.IsExist(err) {
+		return fmt.Errorf("mknod %s %s", node.Path, err)
+	}
+
+	if err := syscall.Chown(dest, int(node.Uid), int(node.Gid)); err != nil {
+		return fmt.Errorf("chown %s to %d:%d", node.Path, node.Uid, node.Gid)
+	}
+
+	return nil
+}
+
+// setupUser changes the groups, gid, and uid for the user inside the container
+// copy from libcontainer, cause not it's private
+func setupUser(userSpec string) error {
+	// Set up defaults.
+	defaultExecUser := user.ExecUser{
+		Uid:  syscall.Getuid(),
+		Gid:  syscall.Getgid(),
+		Home: "/",
+	}
+	passwdPath, err := user.GetPasswdPath()
+	if err != nil {
+		return err
+	}
+	groupPath, err := user.GetGroupPath()
+	if err != nil {
+		return err
+	}
+	execUser, err := user.GetExecUserPath(userSpec, &defaultExecUser, passwdPath, groupPath)
+	if err != nil {
+		return err
+	}
+	if err := syscall.Setgroups(execUser.Sgids); err != nil {
+		return err
+	}
+	if err := system.Setgid(execUser.Gid); err != nil {
+		return err
+	}
+	if err := system.Setuid(execUser.Uid); err != nil {
+		return err
+	}
+	// if we didn't get HOME already, set it based on the user's HOME
+	if envHome := os.Getenv("HOME"); envHome == "" {
+		if err := os.Setenv("HOME", execUser.Home); err != nil {
+			return err
+		}
+	}
+	return nil
 }
 }
 
 
 /// Return the exit code of the process
 /// Return the exit code of the process
@@ -337,17 +579,25 @@ func (d *driver) Info(id string) execdriver.Info {
 	}
 	}
 }
 }
 
 
-func (d *driver) GetPidsForContainer(id string) ([]int, error) {
-	pids := []int{}
-
-	// cpu is chosen because it is the only non optional subsystem in cgroups
-	subsystem := "cpu"
+func findCgroupRootAndDir(subsystem string) (string, string, error) {
 	cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem)
 	cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem)
 	if err != nil {
 	if err != nil {
-		return pids, err
+		return "", "", err
 	}
 	}
 
 
 	cgroupDir, err := cgroups.GetThisCgroupDir(subsystem)
 	cgroupDir, err := cgroups.GetThisCgroupDir(subsystem)
+	if err != nil {
+		return "", "", err
+	}
+	return cgroupRoot, cgroupDir, nil
+}
+
+func (d *driver) GetPidsForContainer(id string) ([]int, error) {
+	pids := []int{}
+
+	// cpu is chosen because it is the only non optional subsystem in cgroups
+	subsystem := "cpu"
+	cgroupRoot, cgroupDir, err := findCgroupRootAndDir(subsystem)
 	if err != nil {
 	if err != nil {
 		return pids, err
 		return pids, err
 	}
 	}
@@ -407,8 +657,12 @@ func rootIsShared() bool {
 	return true
 	return true
 }
 }
 
 
+func (d *driver) containerDir(containerId string) string {
+	return path.Join(d.root, "containers", containerId)
+}
+
 func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) {
 func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) {
-	root := path.Join(d.root, "containers", c.ID, "config.lxc")
+	root := path.Join(d.containerDir(c.ID), "config.lxc")
 
 
 	fo, err := os.Create(root)
 	fo, err := os.Create(root)
 	if err != nil {
 	if err != nil {
@@ -526,6 +780,5 @@ func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessCo
 }
 }
 
 
 func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
 func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
-	return nil, fmt.Errorf("container stats are not supported with LXC")
-
+	return execdriver.Stats(d.containerDir(id), d.activeContainers[id].container.Cgroups.Memory, d.machineMemory)
 }
 }

+ 1 - 5
daemon/execdriver/lxc/lxc_init_linux.go

@@ -3,8 +3,6 @@ package lxc
 import (
 import (
 	"fmt"
 	"fmt"
 
 
-	"github.com/docker/libcontainer"
-	"github.com/docker/libcontainer/namespaces"
 	"github.com/docker/libcontainer/utils"
 	"github.com/docker/libcontainer/utils"
 )
 )
 
 
@@ -12,9 +10,7 @@ func finalizeNamespace(args *InitArgs) error {
 	if err := utils.CloseExecFrom(3); err != nil {
 	if err := utils.CloseExecFrom(3); err != nil {
 		return err
 		return err
 	}
 	}
-	if err := namespaces.SetupUser(&libcontainer.Config{
-		User: args.User,
-	}); err != nil {
+	if err := setupUser(args.User); err != nil {
 		return fmt.Errorf("setup user %s", err)
 		return fmt.Errorf("setup user %s", err)
 	}
 	}
 	if err := setupWorkingDirectory(args); err != nil {
 	if err := setupWorkingDirectory(args); err != nil {

+ 12 - 13
daemon/execdriver/lxc/lxc_template.go

@@ -11,7 +11,6 @@ import (
 	nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template"
 	nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
 	"github.com/docker/libcontainer/label"
 	"github.com/docker/libcontainer/label"
-	"github.com/docker/libcontainer/security/capabilities"
 )
 )
 
 
 const LxcTemplate = `
 const LxcTemplate = `
@@ -52,7 +51,7 @@ lxc.cgroup.devices.allow = a
 lxc.cgroup.devices.deny = a
 lxc.cgroup.devices.deny = a
 #Allow the devices passed to us in the AllowedDevices list.
 #Allow the devices passed to us in the AllowedDevices list.
 {{range $allowedDevice := .AllowedDevices}}
 {{range $allowedDevice := .AllowedDevices}}
-lxc.cgroup.devices.allow = {{$allowedDevice.GetCgroupAllowString}}
+lxc.cgroup.devices.allow = {{$allowedDevice.CgroupString}}
 {{end}}
 {{end}}
 {{end}}
 {{end}}
 
 
@@ -108,8 +107,8 @@ lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}}
 {{if .Resources.CpuShares}}
 {{if .Resources.CpuShares}}
 lxc.cgroup.cpu.shares = {{.Resources.CpuShares}}
 lxc.cgroup.cpu.shares = {{.Resources.CpuShares}}
 {{end}}
 {{end}}
-{{if .Resources.Cpuset}}
-lxc.cgroup.cpuset.cpus = {{.Resources.Cpuset}}
+{{if .Resources.CpusetCpus}}
+lxc.cgroup.cpuset.cpus = {{.Resources.CpusetCpus}}
 {{end}}
 {{end}}
 {{end}}
 {{end}}
 
 
@@ -126,7 +125,9 @@ lxc.network.ipv4 = {{.Network.Interface.IPAddress}}/{{.Network.Interface.IPPrefi
 {{if .Network.Interface.Gateway}}
 {{if .Network.Interface.Gateway}}
 lxc.network.ipv4.gateway = {{.Network.Interface.Gateway}}
 lxc.network.ipv4.gateway = {{.Network.Interface.Gateway}}
 {{end}}
 {{end}}
-
+{{if .Network.Interface.MacAddress}}
+lxc.network.hwaddr = {{.Network.Interface.MacAddress}}
+{{end}}
 {{if .ProcessConfig.Env}}
 {{if .ProcessConfig.Env}}
 lxc.utsname = {{getHostname .ProcessConfig.Env}}
 lxc.utsname = {{getHostname .ProcessConfig.Env}}
 {{end}}
 {{end}}
@@ -167,7 +168,7 @@ func keepCapabilities(adds []string, drops []string) ([]string, error) {
 	var newCaps []string
 	var newCaps []string
 	for _, cap := range caps {
 	for _, cap := range caps {
 		log.Debugf("cap %s\n", cap)
 		log.Debugf("cap %s\n", cap)
-		realCap := capabilities.GetCapability(cap)
+		realCap := execdriver.GetCapability(cap)
 		numCap := fmt.Sprintf("%d", realCap.Value)
 		numCap := fmt.Sprintf("%d", realCap.Value)
 		newCaps = append(newCaps, numCap)
 		newCaps = append(newCaps, numCap)
 	}
 	}
@@ -178,13 +179,10 @@ func keepCapabilities(adds []string, drops []string) ([]string, error) {
 func dropList(drops []string) ([]string, error) {
 func dropList(drops []string) ([]string, error) {
 	if utils.StringsContainsNoCase(drops, "all") {
 	if utils.StringsContainsNoCase(drops, "all") {
 		var newCaps []string
 		var newCaps []string
-		for _, cap := range capabilities.GetAllCapabilities() {
-			log.Debugf("drop cap %s\n", cap)
-			realCap := capabilities.GetCapability(cap)
-			if realCap == nil {
-				return nil, fmt.Errorf("Invalid capability '%s'", cap)
-			}
-			numCap := fmt.Sprintf("%d", realCap.Value)
+		for _, capName := range execdriver.GetAllCapabilities() {
+			cap := execdriver.GetCapability(capName)
+			log.Debugf("drop cap %s\n", cap.Key)
+			numCap := fmt.Sprintf("%d", cap.Value)
 			newCaps = append(newCaps, numCap)
 			newCaps = append(newCaps, numCap)
 		}
 		}
 		return newCaps, nil
 		return newCaps, nil
@@ -194,6 +192,7 @@ func dropList(drops []string) ([]string, error) {
 
 
 func isDirectory(source string) string {
 func isDirectory(source string) string {
 	f, err := os.Stat(source)
 	f, err := os.Stat(source)
+	log.Debugf("dir: %s\n", source)
 	if err != nil {
 	if err != nil {
 		if os.IsNotExist(err) {
 		if os.IsNotExist(err) {
 			return "dir"
 			return "dir"

+ 8 - 8
daemon/execdriver/lxc/lxc_template_unit_test.go

@@ -5,11 +5,6 @@ package lxc
 import (
 import (
 	"bufio"
 	"bufio"
 	"fmt"
 	"fmt"
-	"github.com/docker/docker/daemon/execdriver"
-	nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template"
-	"github.com/docker/libcontainer/devices"
-	"github.com/docker/libcontainer/security/capabilities"
-	"github.com/syndtr/gocapability/capability"
 	"io/ioutil"
 	"io/ioutil"
 	"math/rand"
 	"math/rand"
 	"os"
 	"os"
@@ -17,6 +12,11 @@ import (
 	"strings"
 	"strings"
 	"testing"
 	"testing"
 	"time"
 	"time"
+
+	"github.com/docker/docker/daemon/execdriver"
+	nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template"
+	"github.com/docker/libcontainer/configs"
+	"github.com/syndtr/gocapability/capability"
 )
 )
 
 
 func TestLXCConfig(t *testing.T) {
 func TestLXCConfig(t *testing.T) {
@@ -53,7 +53,7 @@ func TestLXCConfig(t *testing.T) {
 			Mtu:       1500,
 			Mtu:       1500,
 			Interface: nil,
 			Interface: nil,
 		},
 		},
-		AllowedDevices: make([]*devices.Device, 0),
+		AllowedDevices: make([]*configs.Device, 0),
 		ProcessConfig:  execdriver.ProcessConfig{},
 		ProcessConfig:  execdriver.ProcessConfig{},
 	}
 	}
 	p, err := driver.generateLXCConfig(command)
 	p, err := driver.generateLXCConfig(command)
@@ -295,7 +295,7 @@ func TestCustomLxcConfigMisc(t *testing.T) {
 	grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1")
 	grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1")
 	container := nativeTemplate.New()
 	container := nativeTemplate.New()
 	for _, cap := range container.Capabilities {
 	for _, cap := range container.Capabilities {
-		realCap := capabilities.GetCapability(cap)
+		realCap := execdriver.GetCapability(cap)
 		numCap := fmt.Sprintf("%d", realCap.Value)
 		numCap := fmt.Sprintf("%d", realCap.Value)
 		if cap != "MKNOD" && cap != "KILL" {
 		if cap != "MKNOD" && cap != "KILL" {
 			grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", numCap))
 			grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", numCap))
@@ -359,7 +359,7 @@ func TestCustomLxcConfigMiscOverride(t *testing.T) {
 	grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1")
 	grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1")
 	container := nativeTemplate.New()
 	container := nativeTemplate.New()
 	for _, cap := range container.Capabilities {
 	for _, cap := range container.Capabilities {
-		realCap := capabilities.GetCapability(cap)
+		realCap := execdriver.GetCapability(cap)
 		numCap := fmt.Sprintf("%d", realCap.Value)
 		numCap := fmt.Sprintf("%d", realCap.Value)
 		if cap != "MKNOD" && cap != "KILL" {
 		if cap != "MKNOD" && cap != "KILL" {
 			grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", numCap))
 			grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", numCap))

+ 116 - 82
daemon/execdriver/native/create.go

@@ -3,39 +3,25 @@
 package native
 package native
 
 
 import (
 import (
+	"errors"
 	"fmt"
 	"fmt"
-	"os"
-	"os/exec"
+	"net"
 	"path/filepath"
 	"path/filepath"
+	"strings"
+	"syscall"
 
 
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver"
-	"github.com/docker/docker/daemon/execdriver/native/template"
-	"github.com/docker/libcontainer"
+	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/libcontainer/apparmor"
 	"github.com/docker/libcontainer/apparmor"
+	"github.com/docker/libcontainer/configs"
 	"github.com/docker/libcontainer/devices"
 	"github.com/docker/libcontainer/devices"
-	"github.com/docker/libcontainer/mount"
-	"github.com/docker/libcontainer/security/capabilities"
+	"github.com/docker/libcontainer/utils"
 )
 )
 
 
 // createContainer populates and configures the container type with the
 // createContainer populates and configures the container type with the
 // data provided by the execdriver.Command
 // data provided by the execdriver.Command
-func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, error) {
-	container := template.New()
-
-	container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env)
-	container.Tty = c.ProcessConfig.Tty
-	container.User = c.ProcessConfig.User
-	container.WorkingDir = c.WorkingDir
-	container.Env = c.ProcessConfig.Env
-	container.Cgroups.Name = c.ID
-	container.Cgroups.AllowedDevices = c.AllowedDevices
-	container.MountConfig.DeviceNodes = c.AutoCreatedDevices
-	container.RootFs = c.Rootfs
-	container.MountConfig.ReadonlyFs = c.ReadonlyRootfs
-
-	// check to see if we are running in ramdisk to disable pivot root
-	container.MountConfig.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
-	container.RestrictSys = true
+func (d *driver) createContainer(c *execdriver.Command) (*configs.Config, error) {
+	container := execdriver.InitContainer(c)
 
 
 	if err := d.createIpc(container, c); err != nil {
 	if err := d.createIpc(container, c); err != nil {
 		return nil, err
 		return nil, err
@@ -50,6 +36,14 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, e
 	}
 	}
 
 
 	if c.ProcessConfig.Privileged {
 	if c.ProcessConfig.Privileged {
+		// clear readonly for /sys
+		for i := range container.Mounts {
+			if container.Mounts[i].Destination == "/sys" {
+				container.Mounts[i].Flags &= ^syscall.MS_RDONLY
+			}
+		}
+		container.ReadonlyPaths = nil
+		container.MaskPaths = nil
 		if err := d.setPrivileged(container); err != nil {
 		if err := d.setPrivileged(container); err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
@@ -63,7 +57,7 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, e
 		container.AppArmorProfile = c.AppArmorProfile
 		container.AppArmorProfile = c.AppArmorProfile
 	}
 	}
 
 
-	if err := d.setupCgroups(container, c); err != nil {
+	if err := execdriver.SetupCgroups(container, c); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
@@ -74,41 +68,52 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, e
 	if err := d.setupLabels(container, c); err != nil {
 	if err := d.setupLabels(container, c); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
+	d.setupRlimits(container, c)
+	return container, nil
+}
 
 
-	cmds := make(map[string]*exec.Cmd)
-	d.Lock()
-	for k, v := range d.activeContainers {
-		cmds[k] = v.cmd
+func generateIfaceName() (string, error) {
+	for i := 0; i < 10; i++ {
+		name, err := utils.GenerateRandomName("veth", 7)
+		if err != nil {
+			continue
+		}
+		if _, err := net.InterfaceByName(name); err != nil {
+			if strings.Contains(err.Error(), "no such") {
+				return name, nil
+			}
+			return "", err
+		}
 	}
 	}
-	d.Unlock()
-
-	return container, nil
+	return "", errors.New("Failed to find name for new interface")
 }
 }
 
 
-func (d *driver) createNetwork(container *libcontainer.Config, c *execdriver.Command) error {
+func (d *driver) createNetwork(container *configs.Config, c *execdriver.Command) error {
 	if c.Network.HostNetworking {
 	if c.Network.HostNetworking {
-		container.Namespaces.Remove(libcontainer.NEWNET)
+		container.Namespaces.Remove(configs.NEWNET)
 		return nil
 		return nil
 	}
 	}
 
 
-	container.Networks = []*libcontainer.Network{
+	container.Networks = []*configs.Network{
 		{
 		{
-			Mtu:     c.Network.Mtu,
-			Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0),
-			Gateway: "localhost",
-			Type:    "loopback",
+			Type: "loopback",
 		},
 		},
 	}
 	}
 
 
+	iName, err := generateIfaceName()
+	if err != nil {
+		return err
+	}
 	if c.Network.Interface != nil {
 	if c.Network.Interface != nil {
-		vethNetwork := libcontainer.Network{
-			Mtu:        c.Network.Mtu,
-			Address:    fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
-			MacAddress: c.Network.Interface.MacAddress,
-			Gateway:    c.Network.Interface.Gateway,
-			Type:       "veth",
-			Bridge:     c.Network.Interface.Bridge,
-			VethPrefix: "veth",
+		vethNetwork := configs.Network{
+			Name:              "eth0",
+			HostInterfaceName: iName,
+			Mtu:               c.Network.Mtu,
+			Address:           fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
+			MacAddress:        c.Network.Interface.MacAddress,
+			Gateway:           c.Network.Interface.Gateway,
+			Type:              "veth",
+			Bridge:            c.Network.Interface.Bridge,
 		}
 		}
 		if c.Network.Interface.GlobalIPv6Address != "" {
 		if c.Network.Interface.GlobalIPv6Address != "" {
 			vethNetwork.IPv6Address = fmt.Sprintf("%s/%d", c.Network.Interface.GlobalIPv6Address, c.Network.Interface.GlobalIPv6PrefixLen)
 			vethNetwork.IPv6Address = fmt.Sprintf("%s/%d", c.Network.Interface.GlobalIPv6Address, c.Network.Interface.GlobalIPv6PrefixLen)
@@ -122,21 +127,24 @@ func (d *driver) createNetwork(container *libcontainer.Config, c *execdriver.Com
 		active := d.activeContainers[c.Network.ContainerID]
 		active := d.activeContainers[c.Network.ContainerID]
 		d.Unlock()
 		d.Unlock()
 
 
-		if active == nil || active.cmd.Process == nil {
+		if active == nil {
 			return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID)
 			return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID)
 		}
 		}
-		cmd := active.cmd
 
 
-		nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net")
-		container.Namespaces.Add(libcontainer.NEWNET, nspath)
+		state, err := active.State()
+		if err != nil {
+			return err
+		}
+
+		container.Namespaces.Add(configs.NEWNET, state.NamespacePaths[configs.NEWNET])
 	}
 	}
 
 
 	return nil
 	return nil
 }
 }
 
 
-func (d *driver) createIpc(container *libcontainer.Config, c *execdriver.Command) error {
+func (d *driver) createIpc(container *configs.Config, c *execdriver.Command) error {
 	if c.Ipc.HostIpc {
 	if c.Ipc.HostIpc {
-		container.Namespaces.Remove(libcontainer.NEWIPC)
+		container.Namespaces.Remove(configs.NEWIPC)
 		return nil
 		return nil
 	}
 	}
 
 
@@ -145,37 +153,38 @@ func (d *driver) createIpc(container *libcontainer.Config, c *execdriver.Command
 		active := d.activeContainers[c.Ipc.ContainerID]
 		active := d.activeContainers[c.Ipc.ContainerID]
 		d.Unlock()
 		d.Unlock()
 
 
-		if active == nil || active.cmd.Process == nil {
+		if active == nil {
 			return fmt.Errorf("%s is not a valid running container to join", c.Ipc.ContainerID)
 			return fmt.Errorf("%s is not a valid running container to join", c.Ipc.ContainerID)
 		}
 		}
-		cmd := active.cmd
 
 
-		container.Namespaces.Add(libcontainer.NEWIPC, filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "ipc"))
+		state, err := active.State()
+		if err != nil {
+			return err
+		}
+		container.Namespaces.Add(configs.NEWIPC, state.NamespacePaths[configs.NEWIPC])
 	}
 	}
 
 
 	return nil
 	return nil
 }
 }
 
 
-func (d *driver) createPid(container *libcontainer.Config, c *execdriver.Command) error {
+func (d *driver) createPid(container *configs.Config, c *execdriver.Command) error {
 	if c.Pid.HostPid {
 	if c.Pid.HostPid {
-		container.Namespaces.Remove(libcontainer.NEWPID)
+		container.Namespaces.Remove(configs.NEWPID)
 		return nil
 		return nil
 	}
 	}
 
 
 	return nil
 	return nil
 }
 }
 
 
-func (d *driver) setPrivileged(container *libcontainer.Config) (err error) {
-	container.Capabilities = capabilities.GetAllCapabilities()
+func (d *driver) setPrivileged(container *configs.Config) (err error) {
+	container.Capabilities = execdriver.GetAllCapabilities()
 	container.Cgroups.AllowAllDevices = true
 	container.Cgroups.AllowAllDevices = true
 
 
-	hostDeviceNodes, err := devices.GetHostDeviceNodes()
+	hostDevices, err := devices.HostDevices()
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	container.MountConfig.DeviceNodes = hostDeviceNodes
-
-	container.RestrictSys = false
+	container.Devices = hostDevices
 
 
 	if apparmor.IsEnabled() {
 	if apparmor.IsEnabled() {
 		container.AppArmorProfile = "unconfined"
 		container.AppArmorProfile = "unconfined"
@@ -184,41 +193,66 @@ func (d *driver) setPrivileged(container *libcontainer.Config) (err error) {
 	return nil
 	return nil
 }
 }
 
 
-func (d *driver) setCapabilities(container *libcontainer.Config, c *execdriver.Command) (err error) {
+func (d *driver) setCapabilities(container *configs.Config, c *execdriver.Command) (err error) {
 	container.Capabilities, err = execdriver.TweakCapabilities(container.Capabilities, c.CapAdd, c.CapDrop)
 	container.Capabilities, err = execdriver.TweakCapabilities(container.Capabilities, c.CapAdd, c.CapDrop)
 	return err
 	return err
 }
 }
 
 
-func (d *driver) setupCgroups(container *libcontainer.Config, c *execdriver.Command) error {
-	if c.Resources != nil {
-		container.Cgroups.CpuShares = c.Resources.CpuShares
-		container.Cgroups.Memory = c.Resources.Memory
-		container.Cgroups.MemoryReservation = c.Resources.Memory
-		container.Cgroups.MemorySwap = c.Resources.MemorySwap
-		container.Cgroups.CpusetCpus = c.Resources.Cpuset
+func (d *driver) setupRlimits(container *configs.Config, c *execdriver.Command) {
+	if c.Resources == nil {
+		return
 	}
 	}
 
 
-	return nil
+	for _, rlimit := range c.Resources.Rlimits {
+		container.Rlimits = append(container.Rlimits, configs.Rlimit{
+			Type: rlimit.Type,
+			Hard: rlimit.Hard,
+			Soft: rlimit.Soft,
+		})
+	}
 }
 }
 
 
-func (d *driver) setupMounts(container *libcontainer.Config, c *execdriver.Command) error {
+func (d *driver) setupMounts(container *configs.Config, c *execdriver.Command) error {
+	userMounts := make(map[string]struct{})
+	for _, m := range c.Mounts {
+		userMounts[m.Destination] = struct{}{}
+	}
+
+	// Filter out mounts that are overriden by user supplied mounts
+	var defaultMounts []*configs.Mount
+	for _, m := range container.Mounts {
+		if _, ok := userMounts[m.Destination]; !ok {
+			defaultMounts = append(defaultMounts, m)
+		}
+	}
+	container.Mounts = defaultMounts
+
 	for _, m := range c.Mounts {
 	for _, m := range c.Mounts {
-		container.MountConfig.Mounts = append(container.MountConfig.Mounts, &mount.Mount{
-			Type:        "bind",
+		dest, err := symlink.FollowSymlinkInScope(filepath.Join(c.Rootfs, m.Destination), c.Rootfs)
+		if err != nil {
+			return err
+		}
+		flags := syscall.MS_BIND | syscall.MS_REC
+		if !m.Writable {
+			flags |= syscall.MS_RDONLY
+		}
+		if m.Slave {
+			flags |= syscall.MS_SLAVE
+		}
+
+		container.Mounts = append(container.Mounts, &configs.Mount{
 			Source:      m.Source,
 			Source:      m.Source,
-			Destination: m.Destination,
-			Writable:    m.Writable,
-			Private:     m.Private,
-			Slave:       m.Slave,
+			Destination: dest,
+			Device:      "bind",
+			Flags:       flags,
 		})
 		})
 	}
 	}
-
 	return nil
 	return nil
 }
 }
 
 
-func (d *driver) setupLabels(container *libcontainer.Config, c *execdriver.Command) error {
+func (d *driver) setupLabels(container *configs.Config, c *execdriver.Command) error {
 	container.ProcessLabel = c.ProcessLabel
 	container.ProcessLabel = c.ProcessLabel
-	container.MountConfig.MountLabel = c.MountLabel
+	container.MountLabel = c.MountLabel
 
 
 	return nil
 	return nil
 }
 }

+ 168 - 147
daemon/execdriver/native/driver.go

@@ -17,16 +17,15 @@ import (
 
 
 	log "github.com/Sirupsen/logrus"
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/pkg/reexec"
 	sysinfo "github.com/docker/docker/pkg/system"
 	sysinfo "github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer/apparmor"
 	"github.com/docker/libcontainer/apparmor"
-	"github.com/docker/libcontainer/cgroups/fs"
 	"github.com/docker/libcontainer/cgroups/systemd"
 	"github.com/docker/libcontainer/cgroups/systemd"
-	consolepkg "github.com/docker/libcontainer/console"
-	"github.com/docker/libcontainer/namespaces"
-	_ "github.com/docker/libcontainer/namespaces/nsenter"
+	"github.com/docker/libcontainer/configs"
 	"github.com/docker/libcontainer/system"
 	"github.com/docker/libcontainer/system"
+	"github.com/docker/libcontainer/utils"
 )
 )
 
 
 const (
 const (
@@ -34,16 +33,12 @@ const (
 	Version    = "0.2"
 	Version    = "0.2"
 )
 )
 
 
-type activeContainer struct {
-	container *libcontainer.Config
-	cmd       *exec.Cmd
-}
-
 type driver struct {
 type driver struct {
 	root             string
 	root             string
 	initPath         string
 	initPath         string
-	activeContainers map[string]*activeContainer
+	activeContainers map[string]libcontainer.Container
 	machineMemory    int64
 	machineMemory    int64
+	factory          libcontainer.Factory
 	sync.Mutex
 	sync.Mutex
 }
 }
 
 
@@ -60,11 +55,27 @@ func NewDriver(root, initPath string) (*driver, error) {
 	if err := apparmor.InstallDefaultProfile(); err != nil {
 	if err := apparmor.InstallDefaultProfile(); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
+	cgm := libcontainer.Cgroupfs
+	if systemd.UseSystemd() {
+		cgm = libcontainer.SystemdCgroups
+	}
+
+	f, err := libcontainer.New(
+		root,
+		cgm,
+		libcontainer.InitPath(reexec.Self(), DriverName),
+		libcontainer.TmpfsRoot,
+	)
+	if err != nil {
+		return nil, err
+	}
+
 	return &driver{
 	return &driver{
 		root:             root,
 		root:             root,
 		initPath:         initPath,
 		initPath:         initPath,
-		activeContainers: make(map[string]*activeContainer),
+		activeContainers: make(map[string]libcontainer.Container),
 		machineMemory:    meminfo.MemTotal,
 		machineMemory:    meminfo.MemTotal,
+		factory:          f,
 	}, nil
 	}, nil
 }
 }
 
 
@@ -82,98 +93,141 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
 
 
 	var term execdriver.Terminal
 	var term execdriver.Terminal
 
 
+	p := &libcontainer.Process{
+		Args: append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...),
+		Env:  c.ProcessConfig.Env,
+		Cwd:  c.WorkingDir,
+		User: c.ProcessConfig.User,
+	}
+
 	if c.ProcessConfig.Tty {
 	if c.ProcessConfig.Tty {
-		term, err = NewTtyConsole(&c.ProcessConfig, pipes)
+		rootuid, err := container.HostUID()
+		if err != nil {
+			return execdriver.ExitStatus{ExitCode: -1}, err
+		}
+		cons, err := p.NewConsole(rootuid)
+		if err != nil {
+			return execdriver.ExitStatus{ExitCode: -1}, err
+		}
+		term, err = NewTtyConsole(cons, pipes, rootuid)
 	} else {
 	} else {
-		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
+		p.Stdout = pipes.Stdout
+		p.Stderr = pipes.Stderr
+		r, w, err := os.Pipe()
+		if err != nil {
+			return execdriver.ExitStatus{ExitCode: -1}, err
+		}
+		if pipes.Stdin != nil {
+			go func() {
+				io.Copy(w, pipes.Stdin)
+				w.Close()
+			}()
+			p.Stdin = r
+		}
+		term = &execdriver.StdConsole{}
 	}
 	}
 	if err != nil {
 	if err != nil {
 		return execdriver.ExitStatus{ExitCode: -1}, err
 		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	}
 	c.ProcessConfig.Terminal = term
 	c.ProcessConfig.Terminal = term
 
 
-	d.Lock()
-	d.activeContainers[c.ID] = &activeContainer{
-		container: container,
-		cmd:       &c.ProcessConfig.Cmd,
+	cont, err := d.factory.Create(c.ID, container)
+	if err != nil {
+		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	}
+	d.Lock()
+	d.activeContainers[c.ID] = cont
 	d.Unlock()
 	d.Unlock()
+	defer func() {
+		cont.Destroy()
+		d.cleanContainer(c.ID)
+	}()
 
 
-	var (
-		dataPath = filepath.Join(d.root, c.ID)
-		args     = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...)
-	)
-
-	if err := d.createContainerRoot(c.ID); err != nil {
+	if err := cont.Start(p); err != nil {
 		return execdriver.ExitStatus{ExitCode: -1}, err
 		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 	}
-	defer d.cleanContainer(c.ID)
 
 
-	if err := d.writeContainerFile(container, c.ID); err != nil {
-		return execdriver.ExitStatus{ExitCode: -1}, err
+	if startCallback != nil {
+		pid, err := p.Pid()
+		if err != nil {
+			p.Signal(os.Kill)
+			p.Wait()
+			return execdriver.ExitStatus{ExitCode: -1}, err
+		}
+		startCallback(&c.ProcessConfig, pid)
 	}
 	}
 
 
-	execOutputChan := make(chan execOutput, 1)
-	waitForStart := make(chan struct{})
+	oomKillNotification, err := cont.NotifyOOM()
+	if err != nil {
+		oomKillNotification = nil
+		log.Warnf("Your kernel does not support OOM notifications: %s", err)
+	}
+	waitF := p.Wait
+	if nss := cont.Config().Namespaces; nss.Contains(configs.NEWPID) {
+		// we need such hack for tracking processes with inerited fds,
+		// because cmd.Wait() waiting for all streams to be copied
+		waitF = waitInPIDHost(p, cont)
+	}
+	ps, err := waitF()
+	if err != nil {
+		if err, ok := err.(*exec.ExitError); !ok {
+			return execdriver.ExitStatus{ExitCode: -1}, err
+		} else {
+			ps = err.ProcessState
+		}
+	}
+	cont.Destroy()
 
 
-	go func() {
-		exitCode, err := namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd {
-			c.ProcessConfig.Path = d.initPath
-			c.ProcessConfig.Args = append([]string{
-				DriverName,
-				"-console", console,
-				"-pipe", "3",
-				"-root", filepath.Join(d.root, c.ID),
-				"--",
-			}, args...)
-
-			// set this to nil so that when we set the clone flags anything else is reset
-			c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{
-				Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)),
-			}
-			c.ProcessConfig.ExtraFiles = []*os.File{child}
+	_, oomKill := <-oomKillNotification
 
 
-			c.ProcessConfig.Env = container.Env
-			c.ProcessConfig.Dir = container.RootFs
+	return execdriver.ExitStatus{ExitCode: utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), OOMKilled: oomKill}, nil
+}
 
 
-			return &c.ProcessConfig.Cmd
-		}, func() {
-			close(waitForStart)
-			if startCallback != nil {
-				c.ContainerPid = c.ProcessConfig.Process.Pid
-				startCallback(&c.ProcessConfig, c.ContainerPid)
-			}
-		})
-		execOutputChan <- execOutput{exitCode, err}
-	}()
+func waitInPIDHost(p *libcontainer.Process, c libcontainer.Container) func() (*os.ProcessState, error) {
+	return func() (*os.ProcessState, error) {
+		pid, err := p.Pid()
+		if err != nil {
+			return nil, err
+		}
 
 
-	select {
-	case execOutput := <-execOutputChan:
-		return execdriver.ExitStatus{ExitCode: execOutput.exitCode}, execOutput.err
-	case <-waitForStart:
-		break
-	}
+		process, err := os.FindProcess(pid)
+		s, err := process.Wait()
+		if err != nil {
+			if err, ok := err.(*exec.ExitError); !ok {
+				return s, err
+			} else {
+				s = err.ProcessState
+			}
+		}
+		processes, err := c.Processes()
+		if err != nil {
+			return s, err
+		}
 
 
-	oomKill := false
-	state, err := libcontainer.GetState(filepath.Join(d.root, c.ID))
-	if err == nil {
-		oomKillNotification, err := libcontainer.NotifyOnOOM(state)
-		if err == nil {
-			_, oomKill = <-oomKillNotification
-		} else {
-			log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err)
+		for _, pid := range processes {
+			process, err := os.FindProcess(pid)
+			if err != nil {
+				log.Errorf("Failed to kill process: %d", pid)
+				continue
+			}
+			process.Kill()
 		}
 		}
-	} else {
-		log.Warnf("Failed to get container state, oom notify will not work: %s", err)
-	}
-	// wait for the container to exit.
-	execOutput := <-execOutputChan
 
 
-	return execdriver.ExitStatus{ExitCode: execOutput.exitCode, OOMKilled: oomKill}, execOutput.err
+		p.Wait()
+		return s, err
+	}
 }
 }
 
 
-func (d *driver) Kill(p *execdriver.Command, sig int) error {
-	return syscall.Kill(p.ProcessConfig.Process.Pid, syscall.Signal(sig))
+func (d *driver) Kill(c *execdriver.Command, sig int) error {
+	active := d.activeContainers[c.ID]
+	if active == nil {
+		return fmt.Errorf("active container for %s does not exist", c.ID)
+	}
+	state, err := active.State()
+	if err != nil {
+		return err
+	}
+	return syscall.Kill(state.InitProcessPid, syscall.Signal(sig))
 }
 }
 
 
 func (d *driver) Pause(c *execdriver.Command) error {
 func (d *driver) Pause(c *execdriver.Command) error {
@@ -181,11 +235,7 @@ func (d *driver) Pause(c *execdriver.Command) error {
 	if active == nil {
 	if active == nil {
 		return fmt.Errorf("active container for %s does not exist", c.ID)
 		return fmt.Errorf("active container for %s does not exist", c.ID)
 	}
 	}
-	active.container.Cgroups.Freezer = "FROZEN"
-	if systemd.UseSystemd() {
-		return systemd.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer)
-	}
-	return fs.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer)
+	return active.Pause()
 }
 }
 
 
 func (d *driver) Unpause(c *execdriver.Command) error {
 func (d *driver) Unpause(c *execdriver.Command) error {
@@ -193,44 +243,31 @@ func (d *driver) Unpause(c *execdriver.Command) error {
 	if active == nil {
 	if active == nil {
 		return fmt.Errorf("active container for %s does not exist", c.ID)
 		return fmt.Errorf("active container for %s does not exist", c.ID)
 	}
 	}
-	active.container.Cgroups.Freezer = "THAWED"
-	if systemd.UseSystemd() {
-		return systemd.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer)
-	}
-	return fs.Freeze(active.container.Cgroups, active.container.Cgroups.Freezer)
+	return active.Resume()
 }
 }
 
 
-func (d *driver) Terminate(p *execdriver.Command) error {
+func (d *driver) Terminate(c *execdriver.Command) error {
+	defer d.cleanContainer(c.ID)
 	// lets check the start time for the process
 	// lets check the start time for the process
-	state, err := libcontainer.GetState(filepath.Join(d.root, p.ID))
+	active := d.activeContainers[c.ID]
+	if active == nil {
+		return fmt.Errorf("active container for %s does not exist", c.ID)
+	}
+	state, err := active.State()
 	if err != nil {
 	if err != nil {
-		if !os.IsNotExist(err) {
-			return err
-		}
-		// TODO: Remove this part for version 1.2.0
-		// This is added only to ensure smooth upgrades from pre 1.1.0 to 1.1.0
-		data, err := ioutil.ReadFile(filepath.Join(d.root, p.ID, "start"))
-		if err != nil {
-			// if we don't have the data on disk then we can assume the process is gone
-			// because this is only removed after we know the process has stopped
-			if os.IsNotExist(err) {
-				return nil
-			}
-			return err
-		}
-		state = &libcontainer.State{InitStartTime: string(data)}
+		return err
 	}
 	}
+	pid := state.InitProcessPid
 
 
-	currentStartTime, err := system.GetProcessStartTime(p.ProcessConfig.Process.Pid)
+	currentStartTime, err := system.GetProcessStartTime(pid)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	if state.InitStartTime == currentStartTime {
-		err = syscall.Kill(p.ProcessConfig.Process.Pid, 9)
-		syscall.Wait4(p.ProcessConfig.Process.Pid, nil, 0, nil)
+	if state.InitProcessStartTime == currentStartTime {
+		err = syscall.Kill(pid, 9)
+		syscall.Wait4(pid, nil, 0, nil)
 	}
 	}
-	d.cleanContainer(p.ID)
 
 
 	return err
 	return err
 
 
@@ -255,15 +292,10 @@ func (d *driver) GetPidsForContainer(id string) ([]int, error) {
 	if active == nil {
 	if active == nil {
 		return nil, fmt.Errorf("active container for %s does not exist", id)
 		return nil, fmt.Errorf("active container for %s does not exist", id)
 	}
 	}
-	c := active.container.Cgroups
-
-	if systemd.UseSystemd() {
-		return systemd.GetPids(c)
-	}
-	return fs.GetPids(c)
+	return active.Processes()
 }
 }
 
 
-func (d *driver) writeContainerFile(container *libcontainer.Config, id string) error {
+func (d *driver) writeContainerFile(container *configs.Config, id string) error {
 	data, err := json.Marshal(container)
 	data, err := json.Marshal(container)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -275,7 +307,7 @@ func (d *driver) cleanContainer(id string) error {
 	d.Lock()
 	d.Lock()
 	delete(d.activeContainers, id)
 	delete(d.activeContainers, id)
 	d.Unlock()
 	d.Unlock()
-	return os.RemoveAll(filepath.Join(d.root, id, "container.json"))
+	return os.RemoveAll(filepath.Join(d.root, id))
 }
 }
 
 
 func (d *driver) createContainerRoot(id string) error {
 func (d *driver) createContainerRoot(id string) error {
@@ -288,28 +320,24 @@ func (d *driver) Clean(id string) error {
 
 
 func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
 func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
 	c := d.activeContainers[id]
 	c := d.activeContainers[id]
-	state, err := libcontainer.GetState(filepath.Join(d.root, id))
-	if err != nil {
-		if os.IsNotExist(err) {
-			return nil, execdriver.ErrNotRunning
-		}
-		return nil, err
+	if c == nil {
+		return nil, execdriver.ErrNotRunning
 	}
 	}
 	now := time.Now()
 	now := time.Now()
-	stats, err := libcontainer.GetStats(nil, state)
+	stats, err := c.Stats()
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	memoryLimit := c.container.Cgroups.Memory
+	memoryLimit := c.Config().Cgroups.Memory
 	// if the container does not have any memory limit specified set the
 	// if the container does not have any memory limit specified set the
 	// limit to the machines memory
 	// limit to the machines memory
 	if memoryLimit == 0 {
 	if memoryLimit == 0 {
 		memoryLimit = d.machineMemory
 		memoryLimit = d.machineMemory
 	}
 	}
 	return &execdriver.ResourceStats{
 	return &execdriver.ResourceStats{
-		Read:           now,
-		ContainerStats: stats,
-		MemoryLimit:    memoryLimit,
+		Stats:       stats,
+		Read:        now,
+		MemoryLimit: memoryLimit,
 	}, nil
 	}, nil
 }
 }
 
 
@@ -324,38 +352,31 @@ func getEnv(key string, env []string) string {
 }
 }
 
 
 type TtyConsole struct {
 type TtyConsole struct {
-	MasterPty *os.File
+	console libcontainer.Console
 }
 }
 
 
-func NewTtyConsole(processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes) (*TtyConsole, error) {
-	ptyMaster, console, err := consolepkg.CreateMasterAndConsole()
-	if err != nil {
-		return nil, err
-	}
-
+func NewTtyConsole(console libcontainer.Console, pipes *execdriver.Pipes, rootuid int) (*TtyConsole, error) {
 	tty := &TtyConsole{
 	tty := &TtyConsole{
-		MasterPty: ptyMaster,
+		console: console,
 	}
 	}
 
 
-	if err := tty.AttachPipes(&processConfig.Cmd, pipes); err != nil {
+	if err := tty.AttachPipes(pipes); err != nil {
 		tty.Close()
 		tty.Close()
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	processConfig.Console = console
-
 	return tty, nil
 	return tty, nil
 }
 }
 
 
-func (t *TtyConsole) Master() *os.File {
-	return t.MasterPty
+func (t *TtyConsole) Master() libcontainer.Console {
+	return t.console
 }
 }
 
 
 func (t *TtyConsole) Resize(h, w int) error {
 func (t *TtyConsole) Resize(h, w int) error {
-	return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
+	return term.SetWinsize(t.console.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
 }
 }
 
 
-func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error {
+func (t *TtyConsole) AttachPipes(pipes *execdriver.Pipes) error {
 	go func() {
 	go func() {
 		if wb, ok := pipes.Stdout.(interface {
 		if wb, ok := pipes.Stdout.(interface {
 			CloseWriters() error
 			CloseWriters() error
@@ -363,12 +384,12 @@ func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) err
 			defer wb.CloseWriters()
 			defer wb.CloseWriters()
 		}
 		}
 
 
-		io.Copy(pipes.Stdout, t.MasterPty)
+		io.Copy(pipes.Stdout, t.console)
 	}()
 	}()
 
 
 	if pipes.Stdin != nil {
 	if pipes.Stdin != nil {
 		go func() {
 		go func() {
-			io.Copy(t.MasterPty, pipes.Stdin)
+			io.Copy(t.console, pipes.Stdin)
 
 
 			pipes.Stdin.Close()
 			pipes.Stdin.Close()
 		}()
 		}()
@@ -378,5 +399,5 @@ func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) err
 }
 }
 
 
 func (t *TtyConsole) Close() error {
 func (t *TtyConsole) Close() error {
-	return t.MasterPty.Close()
+	return t.console.Close()
 }
 }

+ 50 - 40
daemon/execdriver/native/exec.go

@@ -4,67 +4,77 @@ package native
 
 
 import (
 import (
 	"fmt"
 	"fmt"
-	"log"
 	"os"
 	"os"
 	"os/exec"
 	"os/exec"
-	"path/filepath"
-	"runtime"
+	"syscall"
 
 
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver"
-	"github.com/docker/docker/pkg/reexec"
 	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer"
-	"github.com/docker/libcontainer/namespaces"
+	_ "github.com/docker/libcontainer/nsenter"
+	"github.com/docker/libcontainer/utils"
 )
 )
 
 
-const execCommandName = "nsenter-exec"
-
-func init() {
-	reexec.Register(execCommandName, nsenterExec)
-}
-
-func nsenterExec() {
-	runtime.LockOSThread()
-
-	// User args are passed after '--' in the command line.
-	userArgs := findUserArgs()
-
-	config, err := loadConfigFromFd()
-	if err != nil {
-		log.Fatalf("docker-exec: unable to receive config from sync pipe: %s", err)
-	}
-
-	if err := namespaces.FinalizeSetns(config, userArgs); err != nil {
-		log.Fatalf("docker-exec: failed to exec: %s", err)
-	}
-}
-
 // TODO(vishh): Add support for running in priviledged mode and running as a different user.
 // TODO(vishh): Add support for running in priviledged mode and running as a different user.
 func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
 func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
 	active := d.activeContainers[c.ID]
 	active := d.activeContainers[c.ID]
 	if active == nil {
 	if active == nil {
 		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
 		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
 	}
 	}
-	state, err := libcontainer.GetState(filepath.Join(d.root, c.ID))
-	if err != nil {
-		return -1, fmt.Errorf("State unavailable for container with ID %s. The container may have been cleaned up already. Error: %s", c.ID, err)
-	}
 
 
 	var term execdriver.Terminal
 	var term execdriver.Terminal
+	var err error
+
+	p := &libcontainer.Process{
+		Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...),
+		Env:  c.ProcessConfig.Env,
+		Cwd:  c.WorkingDir,
+		User: c.ProcessConfig.User,
+	}
 
 
 	if processConfig.Tty {
 	if processConfig.Tty {
-		term, err = NewTtyConsole(processConfig, pipes)
+		config := active.Config()
+		rootuid, err := config.HostUID()
+		if err != nil {
+			return -1, err
+		}
+		cons, err := p.NewConsole(rootuid)
+		if err != nil {
+			return -1, err
+		}
+		term, err = NewTtyConsole(cons, pipes, rootuid)
 	} else {
 	} else {
-		term, err = execdriver.NewStdConsole(processConfig, pipes)
+		p.Stdout = pipes.Stdout
+		p.Stderr = pipes.Stderr
+		p.Stdin = pipes.Stdin
+		term = &execdriver.StdConsole{}
+	}
+	if err != nil {
+		return -1, err
 	}
 	}
 
 
 	processConfig.Terminal = term
 	processConfig.Terminal = term
 
 
-	args := append([]string{processConfig.Entrypoint}, processConfig.Arguments...)
+	if err := active.Start(p); err != nil {
+		return -1, err
+	}
+
+	if startCallback != nil {
+		pid, err := p.Pid()
+		if err != nil {
+			p.Signal(os.Kill)
+			p.Wait()
+			return -1, err
+		}
+		startCallback(&c.ProcessConfig, pid)
+	}
 
 
-	return namespaces.ExecIn(active.container, state, args, os.Args[0], "exec", processConfig.Stdin, processConfig.Stdout, processConfig.Stderr, processConfig.Console,
-		func(cmd *exec.Cmd) {
-			if startCallback != nil {
-				startCallback(&c.ProcessConfig, cmd.Process.Pid)
-			}
-		})
+	ps, err := p.Wait()
+	if err != nil {
+		exitErr, ok := err.(*exec.ExitError)
+		if !ok {
+			return -1, err
+		}
+		ps = exitErr.ProcessState
+	}
+	return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil
 }
 }

+ 2 - 16
daemon/execdriver/native/info.go

@@ -2,13 +2,6 @@
 
 
 package native
 package native
 
 
-import (
-	"os"
-	"path/filepath"
-
-	"github.com/docker/libcontainer"
-)
-
 type info struct {
 type info struct {
 	ID     string
 	ID     string
 	driver *driver
 	driver *driver
@@ -18,13 +11,6 @@ type info struct {
 // pid file for a container.  If the file exists then the
 // pid file for a container.  If the file exists then the
 // container is currently running
 // container is currently running
 func (i *info) IsRunning() bool {
 func (i *info) IsRunning() bool {
-	if _, err := libcontainer.GetState(filepath.Join(i.driver.root, i.ID)); err == nil {
-		return true
-	}
-	// TODO: Remove this part for version 1.2.0
-	// This is added only to ensure smooth upgrades from pre 1.1.0 to 1.1.0
-	if _, err := os.Stat(filepath.Join(i.driver.root, i.ID, "pid")); err == nil {
-		return true
-	}
-	return false
+	_, ok := i.driver.activeContainers[i.ID]
+	return ok
 }
 }

+ 15 - 30
daemon/execdriver/native/init.go

@@ -3,55 +3,40 @@
 package native
 package native
 
 
 import (
 import (
-	"encoding/json"
-	"flag"
 	"fmt"
 	"fmt"
 	"os"
 	"os"
-	"path/filepath"
 	"runtime"
 	"runtime"
 
 
 	"github.com/docker/docker/pkg/reexec"
 	"github.com/docker/docker/pkg/reexec"
 	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer"
-	"github.com/docker/libcontainer/namespaces"
 )
 )
 
 
 func init() {
 func init() {
 	reexec.Register(DriverName, initializer)
 	reexec.Register(DriverName, initializer)
 }
 }
 
 
-func initializer() {
-	runtime.LockOSThread()
-
-	var (
-		pipe    = flag.Int("pipe", 0, "sync pipe fd")
-		console = flag.String("console", "", "console (pty slave) path")
-		root    = flag.String("root", ".", "root path for configuration files")
-	)
-
-	flag.Parse()
-
-	var container *libcontainer.Config
-	f, err := os.Open(filepath.Join(*root, "container.json"))
-	if err != nil {
-		writeError(err)
+func fatal(err error) {
+	if lerr, ok := err.(libcontainer.Error); ok {
+		lerr.Detail(os.Stderr)
+		os.Exit(1)
 	}
 	}
 
 
-	if err := json.NewDecoder(f).Decode(&container); err != nil {
-		f.Close()
-		writeError(err)
-	}
-	f.Close()
+	fmt.Fprintln(os.Stderr, err)
+	os.Exit(1)
+}
 
 
-	rootfs, err := os.Getwd()
+func initializer() {
+	runtime.GOMAXPROCS(1)
+	runtime.LockOSThread()
+	factory, err := libcontainer.New("")
 	if err != nil {
 	if err != nil {
-		writeError(err)
+		fatal(err)
 	}
 	}
-
-	if err := namespaces.Init(container, rootfs, *console, os.NewFile(uintptr(*pipe), "child"), flag.Args()); err != nil {
-		writeError(err)
+	if err := factory.StartInitialization(3); err != nil {
+		fatal(err)
 	}
 	}
 
 
-	panic("Unreachable")
+	panic("unreachable")
 }
 }
 
 
 func writeError(err error) {
 func writeError(err error) {

+ 56 - 7
daemon/execdriver/native/template/default_template.go

@@ -1,14 +1,17 @@
 package template
 package template
 
 
 import (
 import (
-	"github.com/docker/libcontainer"
+	"syscall"
+
 	"github.com/docker/libcontainer/apparmor"
 	"github.com/docker/libcontainer/apparmor"
-	"github.com/docker/libcontainer/cgroups"
+	"github.com/docker/libcontainer/configs"
 )
 )
 
 
+const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
+
 // New returns the docker default configuration for libcontainer
 // New returns the docker default configuration for libcontainer
-func New() *libcontainer.Config {
-	container := &libcontainer.Config{
+func New() *configs.Config {
+	container := &configs.Config{
 		Capabilities: []string{
 		Capabilities: []string{
 			"CHOWN",
 			"CHOWN",
 			"DAC_OVERRIDE",
 			"DAC_OVERRIDE",
@@ -25,18 +28,64 @@ func New() *libcontainer.Config {
 			"KILL",
 			"KILL",
 			"AUDIT_WRITE",
 			"AUDIT_WRITE",
 		},
 		},
-		Namespaces: libcontainer.Namespaces([]libcontainer.Namespace{
+		Namespaces: configs.Namespaces([]configs.Namespace{
 			{Type: "NEWNS"},
 			{Type: "NEWNS"},
 			{Type: "NEWUTS"},
 			{Type: "NEWUTS"},
 			{Type: "NEWIPC"},
 			{Type: "NEWIPC"},
 			{Type: "NEWPID"},
 			{Type: "NEWPID"},
 			{Type: "NEWNET"},
 			{Type: "NEWNET"},
 		}),
 		}),
-		Cgroups: &cgroups.Cgroup{
+		Cgroups: &configs.Cgroup{
 			Parent:          "docker",
 			Parent:          "docker",
 			AllowAllDevices: false,
 			AllowAllDevices: false,
 		},
 		},
-		MountConfig: &libcontainer.MountConfig{},
+		Mounts: []*configs.Mount{
+			{
+				Source:      "proc",
+				Destination: "/proc",
+				Device:      "proc",
+				Flags:       defaultMountFlags,
+			},
+			{
+				Source:      "tmpfs",
+				Destination: "/dev",
+				Device:      "tmpfs",
+				Flags:       syscall.MS_NOSUID | syscall.MS_STRICTATIME,
+				Data:        "mode=755",
+			},
+			{
+				Source:      "devpts",
+				Destination: "/dev/pts",
+				Device:      "devpts",
+				Flags:       syscall.MS_NOSUID | syscall.MS_NOEXEC,
+				Data:        "newinstance,ptmxmode=0666,mode=0620,gid=5",
+			},
+			{
+				Device:      "tmpfs",
+				Source:      "shm",
+				Destination: "/dev/shm",
+				Data:        "mode=1777,size=65536k",
+				Flags:       defaultMountFlags,
+			},
+			{
+				Source:      "mqueue",
+				Destination: "/dev/mqueue",
+				Device:      "mqueue",
+				Flags:       defaultMountFlags,
+			},
+			{
+				Source:      "sysfs",
+				Destination: "/sys",
+				Device:      "sysfs",
+				Flags:       defaultMountFlags | syscall.MS_RDONLY,
+			},
+		},
+		MaskPaths: []string{
+			"/proc/kcore",
+		},
+		ReadonlyPaths: []string{
+			"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
+		},
 	}
 	}
 
 
 	if apparmor.IsEnabled() {
 	if apparmor.IsEnabled() {

+ 17 - 24
daemon/execdriver/native/utils.go

@@ -2,28 +2,21 @@
 
 
 package native
 package native
 
 
-import (
-	"encoding/json"
-	"os"
+//func findUserArgs() []string {
+//for i, a := range os.Args {
+//if a == "--" {
+//return os.Args[i+1:]
+//}
+//}
+//return []string{}
+//}
 
 
-	"github.com/docker/libcontainer"
-)
-
-func findUserArgs() []string {
-	for i, a := range os.Args {
-		if a == "--" {
-			return os.Args[i+1:]
-		}
-	}
-	return []string{}
-}
-
-// loadConfigFromFd loads a container's config from the sync pipe that is provided by
-// fd 3 when running a process
-func loadConfigFromFd() (*libcontainer.Config, error) {
-	var config *libcontainer.Config
-	if err := json.NewDecoder(os.NewFile(3, "child")).Decode(&config); err != nil {
-		return nil, err
-	}
-	return config, nil
-}
+//// loadConfigFromFd loads a container's config from the sync pipe that is provided by
+//// fd 3 when running a process
+//func loadConfigFromFd() (*configs.Config, error) {
+//var config *libcontainer.Config
+//if err := json.NewDecoder(os.NewFile(3, "child")).Decode(&config); err != nil {
+//return nil, err
+//}
+//return config, nil
+//}

+ 73 - 3
daemon/execdriver/utils.go

@@ -5,13 +5,83 @@ import (
 	"strings"
 	"strings"
 
 
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
-	"github.com/docker/libcontainer/security/capabilities"
+	"github.com/syndtr/gocapability/capability"
 )
 )
 
 
+var capabilityList = Capabilities{
+	{Key: "SETPCAP", Value: capability.CAP_SETPCAP},
+	{Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE},
+	{Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO},
+	{Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT},
+	{Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN},
+	{Key: "SYS_NICE", Value: capability.CAP_SYS_NICE},
+	{Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE},
+	{Key: "SYS_TIME", Value: capability.CAP_SYS_TIME},
+	{Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG},
+	{Key: "MKNOD", Value: capability.CAP_MKNOD},
+	{Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE},
+	{Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL},
+	{Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE},
+	{Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN},
+	{Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN},
+	{Key: "SYSLOG", Value: capability.CAP_SYSLOG},
+	{Key: "CHOWN", Value: capability.CAP_CHOWN},
+	{Key: "NET_RAW", Value: capability.CAP_NET_RAW},
+	{Key: "DAC_OVERRIDE", Value: capability.CAP_DAC_OVERRIDE},
+	{Key: "FOWNER", Value: capability.CAP_FOWNER},
+	{Key: "DAC_READ_SEARCH", Value: capability.CAP_DAC_READ_SEARCH},
+	{Key: "FSETID", Value: capability.CAP_FSETID},
+	{Key: "KILL", Value: capability.CAP_KILL},
+	{Key: "SETGID", Value: capability.CAP_SETGID},
+	{Key: "SETUID", Value: capability.CAP_SETUID},
+	{Key: "LINUX_IMMUTABLE", Value: capability.CAP_LINUX_IMMUTABLE},
+	{Key: "NET_BIND_SERVICE", Value: capability.CAP_NET_BIND_SERVICE},
+	{Key: "NET_BROADCAST", Value: capability.CAP_NET_BROADCAST},
+	{Key: "IPC_LOCK", Value: capability.CAP_IPC_LOCK},
+	{Key: "IPC_OWNER", Value: capability.CAP_IPC_OWNER},
+	{Key: "SYS_CHROOT", Value: capability.CAP_SYS_CHROOT},
+	{Key: "SYS_PTRACE", Value: capability.CAP_SYS_PTRACE},
+	{Key: "SYS_BOOT", Value: capability.CAP_SYS_BOOT},
+	{Key: "LEASE", Value: capability.CAP_LEASE},
+	{Key: "SETFCAP", Value: capability.CAP_SETFCAP},
+	{Key: "WAKE_ALARM", Value: capability.CAP_WAKE_ALARM},
+	{Key: "BLOCK_SUSPEND", Value: capability.CAP_BLOCK_SUSPEND},
+}
+
+type (
+	CapabilityMapping struct {
+		Key   string         `json:"key,omitempty"`
+		Value capability.Cap `json:"value,omitempty"`
+	}
+	Capabilities []*CapabilityMapping
+)
+
+func (c *CapabilityMapping) String() string {
+	return c.Key
+}
+
+func GetCapability(key string) *CapabilityMapping {
+	for _, capp := range capabilityList {
+		if capp.Key == key {
+			cpy := *capp
+			return &cpy
+		}
+	}
+	return nil
+}
+
+func GetAllCapabilities() []string {
+	output := make([]string, len(capabilityList))
+	for i, capability := range capabilityList {
+		output[i] = capability.String()
+	}
+	return output
+}
+
 func TweakCapabilities(basics, adds, drops []string) ([]string, error) {
 func TweakCapabilities(basics, adds, drops []string) ([]string, error) {
 	var (
 	var (
 		newCaps []string
 		newCaps []string
-		allCaps = capabilities.GetAllCapabilities()
+		allCaps = GetAllCapabilities()
 	)
 	)
 
 
 	// look for invalid cap in the drop list
 	// look for invalid cap in the drop list
@@ -26,7 +96,7 @@ func TweakCapabilities(basics, adds, drops []string) ([]string, error) {
 
 
 	// handle --cap-add=all
 	// handle --cap-add=all
 	if utils.StringsContainsNoCase(adds, "all") {
 	if utils.StringsContainsNoCase(adds, "all") {
-		basics = capabilities.GetAllCapabilities()
+		basics = allCaps
 	}
 	}
 
 
 	if !utils.StringsContainsNoCase(drops, "all") {
 	if !utils.StringsContainsNoCase(drops, "all") {

+ 17 - 14
daemon/export.go

@@ -11,20 +11,23 @@ func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status {
 		return job.Errorf("Usage: %s container_id", job.Name)
 		return job.Errorf("Usage: %s container_id", job.Name)
 	}
 	}
 	name := job.Args[0]
 	name := job.Args[0]
-	if container := daemon.Get(name); container != nil {
-		data, err := container.Export()
-		if err != nil {
-			return job.Errorf("%s: %s", name, err)
-		}
-		defer data.Close()
 
 
-		// Stream the entire contents of the container (basically a volatile snapshot)
-		if _, err := io.Copy(job.Stdout, data); err != nil {
-			return job.Errorf("%s: %s", name, err)
-		}
-		// FIXME: factor job-specific LogEvent to engine.Job.Run()
-		container.LogEvent("export")
-		return engine.StatusOK
+	container, err := daemon.Get(name)
+	if err != nil {
+		return job.Error(err)
 	}
 	}
-	return job.Errorf("No such container: %s", name)
+
+	data, err := container.Export()
+	if err != nil {
+		return job.Errorf("%s: %s", name, err)
+	}
+	defer data.Close()
+
+	// Stream the entire contents of the container (basically a volatile snapshot)
+	if _, err := io.Copy(job.Stdout, data); err != nil {
+		return job.Errorf("%s: %s", name, err)
+	}
+	// FIXME: factor job-specific LogEvent to engine.Job.Run()
+	container.LogEvent("export")
+	return engine.StatusOK
 }
 }

+ 8 - 7
daemon/graphdriver/aufs/aufs.go

@@ -34,8 +34,9 @@ import (
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/pkg/chrootarchive"
+	"github.com/docker/docker/pkg/common"
+	"github.com/docker/docker/pkg/directory"
 	mountpk "github.com/docker/docker/pkg/mount"
 	mountpk "github.com/docker/docker/pkg/mount"
-	"github.com/docker/docker/utils"
 	"github.com/docker/libcontainer/label"
 	"github.com/docker/libcontainer/label"
 )
 )
 
 
@@ -215,7 +216,7 @@ func (a *Driver) Remove(id string) error {
 	defer a.Unlock()
 	defer a.Unlock()
 
 
 	if a.active[id] != 0 {
 	if a.active[id] != 0 {
-		log.Errorf("Warning: removing active id %s", id)
+		log.Errorf("Removing active id %s", id)
 	}
 	}
 
 
 	// Make sure the dir is umounted first
 	// Make sure the dir is umounted first
@@ -319,7 +320,7 @@ func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error {
 // relative to its base filesystem directory.
 // relative to its base filesystem directory.
 func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
 func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
 	// AUFS doesn't need the parent layer to calculate the diff size.
 	// AUFS doesn't need the parent layer to calculate the diff size.
-	return utils.TreeSize(path.Join(a.rootPath(), "diff", id))
+	return directory.Size(path.Join(a.rootPath(), "diff", id))
 }
 }
 
 
 // ApplyDiff extracts the changeset from the given diff into the
 // ApplyDiff extracts the changeset from the given diff into the
@@ -377,7 +378,7 @@ func (a *Driver) mount(id, mountLabel string) error {
 	}
 	}
 
 
 	if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
 	if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
-		return err
+		return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -404,7 +405,7 @@ func (a *Driver) Cleanup() error {
 
 
 	for _, id := range ids {
 	for _, id := range ids {
 		if err := a.unmount(id); err != nil {
 		if err := a.unmount(id); err != nil {
-			log.Errorf("Unmounting %s: %s", utils.TruncateID(id), err)
+			log.Errorf("Unmounting %s: %s", common.TruncateID(id), err)
 		}
 		}
 	}
 	}
 
 
@@ -421,7 +422,7 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
 	// Mount options are clipped to page size(4096 bytes). If there are more
 	// Mount options are clipped to page size(4096 bytes). If there are more
 	// layers then these are remounted individually using append.
 	// layers then these are remounted individually using append.
 
 
-	b := make([]byte, syscall.Getpagesize()-len(mountLabel)-50) // room for xino & mountLabel
+	b := make([]byte, syscall.Getpagesize()-len(mountLabel)-54) // room for xino & mountLabel
 	bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
 	bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
 
 
 	firstMount := true
 	firstMount := true
@@ -445,7 +446,7 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
 		}
 		}
 
 
 		if firstMount {
 		if firstMount {
-			data := label.FormatMountLabel(fmt.Sprintf("%s,xino=/dev/shm/aufs.xino", string(b[:bp])), mountLabel)
+			data := label.FormatMountLabel(fmt.Sprintf("%s,dio,xino=/dev/shm/aufs.xino", string(b[:bp])), mountLabel)
 			if err = mount("none", target, "aufs", 0, data); err != nil {
 			if err = mount("none", target, "aufs", 0, data); err != nil {
 				return
 				return
 			}
 			}

+ 1 - 1
daemon/graphdriver/aufs/mount.go

@@ -9,7 +9,7 @@ import (
 
 
 func Unmount(target string) error {
 func Unmount(target string) error {
 	if err := exec.Command("auplink", target, "flush").Run(); err != nil {
 	if err := exec.Command("auplink", target, "flush").Run(); err != nil {
-		log.Errorf("[warning]: couldn't run auplink before unmount: %s", err)
+		log.Errorf("Couldn't run auplink before unmount: %s", err)
 	}
 	}
 	if err := syscall.Unmount(target, 0); err != nil {
 	if err := syscall.Unmount(target, 0); err != nil {
 		return err
 		return err

+ 0 - 1
daemon/graphdriver/btrfs/MAINTAINERS

@@ -1 +0,0 @@
-Alexander Larsson <alexl@redhat.com> (@alexlarsson)

+ 0 - 2
daemon/graphdriver/devmapper/MAINTAINERS

@@ -1,2 +0,0 @@
-Alexander Larsson <alexl@redhat.com> (@alexlarsson)
-Vincent Batts <vbatts@redhat.com> (@vbatts)

+ 5 - 1
daemon/graphdriver/devmapper/README.md

@@ -42,8 +42,10 @@ will display something like:
 	 Metadata file: /dev/loop4
 	 Metadata file: /dev/loop4
 	 Data Space Used: 2.536 GB
 	 Data Space Used: 2.536 GB
 	 Data Space Total: 107.4 GB
 	 Data Space Total: 107.4 GB
+	 Data Space Available: 104.8 GB
 	 Metadata Space Used: 7.93 MB
 	 Metadata Space Used: 7.93 MB
 	 Metadata Space Total: 2.147 GB
 	 Metadata Space Total: 2.147 GB
+	 Metadata Space Available: 2.14 GB
 	 Udev Sync Supported: true
 	 Udev Sync Supported: true
 	 Data loop file: /home/docker/devicemapper/devicemapper/data
 	 Data loop file: /home/docker/devicemapper/devicemapper/data
 	 Metadata loop file: /home/docker/devicemapper/devicemapper/metadata
 	 Metadata loop file: /home/docker/devicemapper/devicemapper/metadata
@@ -60,8 +62,10 @@ status information about the driver.
  *  `Metadata file` blockdevice file used for the devicemapper metadata
  *  `Metadata file` blockdevice file used for the devicemapper metadata
  *  `Data Space Used` tells how much of `Data file` is currently used
  *  `Data Space Used` tells how much of `Data file` is currently used
  *  `Data Space Total` tells max size the `Data file`
  *  `Data Space Total` tells max size the `Data file`
+ *  `Data Space Available` tells how much free space there is in the `Data file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem.
  *  `Metadata Space Used` tells how much of `Metadata file` is currently used
  *  `Metadata Space Used` tells how much of `Metadata file` is currently used
  *  `Metadata Space Total` tells max size the `Metadata file`
  *  `Metadata Space Total` tells max size the `Metadata file`
+ *  `Metadata Space Available` tells how much free space there is in the `Metadata file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem.
  *  `Udev Sync Supported` tells whether devicemapper is able to sync with Udev. Should be `true`.
  *  `Udev Sync Supported` tells whether devicemapper is able to sync with Udev. Should be `true`.
  *  `Data loop file` file attached to `Data file`, if loopback device is used
  *  `Data loop file` file attached to `Data file`, if loopback device is used
  *  `Metadata loop file` file attached to `Metadata file`, if loopback device is used
  *  `Metadata loop file` file attached to `Metadata file`, if loopback device is used
@@ -146,7 +150,7 @@ Here is the list of supported options:
     If using a block device for device mapper storage, ideally lvm2
     If using a block device for device mapper storage, ideally lvm2
     would be used to create/manage the thin-pool volume that is then
     would be used to create/manage the thin-pool volume that is then
     handed to docker to exclusively create/manage the thin and thin
     handed to docker to exclusively create/manage the thin and thin
-    snapshot volumes needed for it's containers.  Managing the thin-pool
+    snapshot volumes needed for its containers.  Managing the thin-pool
     outside of docker makes for the most feature-rich method of having
     outside of docker makes for the most feature-rich method of having
     docker utilize device mapper thin provisioning as the backing
     docker utilize device mapper thin provisioning as the backing
     storage for docker's containers.  lvm2-based thin-pool management
     storage for docker's containers.  lvm2-based thin-pool management

+ 65 - 21
daemon/graphdriver/devmapper/deviceset.go

@@ -100,8 +100,9 @@ type DeviceSet struct {
 }
 }
 
 
 type DiskUsage struct {
 type DiskUsage struct {
-	Used  uint64
-	Total uint64
+	Used      uint64
+	Total     uint64
+	Available uint64
 }
 }
 
 
 type Status struct {
 type Status struct {
@@ -323,6 +324,11 @@ func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo)
 		return nil
 		return nil
 	}
 	}
 
 
+	if strings.HasPrefix(finfo.Name(), ".") {
+		log.Debugf("Skipping file %s", path)
+		return nil
+	}
+
 	if finfo.Name() == deviceSetMetaFile {
 	if finfo.Name() == deviceSetMetaFile {
 		log.Debugf("Skipping file %s", path)
 		log.Debugf("Skipping file %s", path)
 		return nil
 		return nil
@@ -341,7 +347,7 @@ func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo)
 	}
 	}
 
 
 	if dinfo.DeviceId > MaxDeviceId {
 	if dinfo.DeviceId > MaxDeviceId {
-		log.Errorf("Warning: Ignoring Invalid DeviceId=%d", dinfo.DeviceId)
+		log.Errorf("Ignoring Invalid DeviceId=%d", dinfo.DeviceId)
 		return nil
 		return nil
 	}
 	}
 
 
@@ -386,7 +392,7 @@ func (devices *DeviceSet) unregisterDevice(id int, hash string) error {
 	devices.devicesLock.Unlock()
 	devices.devicesLock.Unlock()
 
 
 	if err := devices.removeMetadata(info); err != nil {
 	if err := devices.removeMetadata(info); err != nil {
-		log.Debugf("Error removing meta data: %s", err)
+		log.Debugf("Error removing metadata: %s", err)
 		return err
 		return err
 	}
 	}
 
 
@@ -463,7 +469,7 @@ func (devices *DeviceSet) createFilesystem(info *DevInfo) error {
 }
 }
 
 
 func (devices *DeviceSet) migrateOldMetaData() error {
 func (devices *DeviceSet) migrateOldMetaData() error {
-	// Migrate old metadatafile
+	// Migrate old metadata file
 	jsonData, err := ioutil.ReadFile(devices.oldMetadataFile())
 	jsonData, err := ioutil.ReadFile(devices.oldMetadataFile())
 	if err != nil && !os.IsNotExist(err) {
 	if err != nil && !os.IsNotExist(err) {
 		return err
 		return err
@@ -548,7 +554,7 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*DevInfo, error) {
 				// happen. Now we have a mechianism to find
 				// happen. Now we have a mechianism to find
 				// a free device Id. So something is not right.
 				// a free device Id. So something is not right.
 				// Give a warning and continue.
 				// Give a warning and continue.
-				log.Errorf("Warning: Device Id %d exists in pool but it is supposed to be unused", deviceId)
+				log.Errorf("Device Id %d exists in pool but it is supposed to be unused", deviceId)
 				deviceId, err = devices.getNextFreeDeviceId()
 				deviceId, err = devices.getNextFreeDeviceId()
 				if err != nil {
 				if err != nil {
 					return nil, err
 					return nil, err
@@ -600,7 +606,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *DevInf
 				// happen. Now we have a mechianism to find
 				// happen. Now we have a mechianism to find
 				// a free device Id. So something is not right.
 				// a free device Id. So something is not right.
 				// Give a warning and continue.
 				// Give a warning and continue.
-				log.Errorf("Warning: Device Id %d exists in pool but it is supposed to be unused", deviceId)
+				log.Errorf("Device Id %d exists in pool but it is supposed to be unused", deviceId)
 				deviceId, err = devices.getNextFreeDeviceId()
 				deviceId, err = devices.getNextFreeDeviceId()
 				if err != nil {
 				if err != nil {
 					return err
 					return err
@@ -846,18 +852,18 @@ func (devices *DeviceSet) rollbackTransaction() error {
 	// closed. In that case this call will fail. Just leave a message
 	// closed. In that case this call will fail. Just leave a message
 	// in case of failure.
 	// in case of failure.
 	if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceId); err != nil {
 	if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceId); err != nil {
-		log.Errorf("Warning: Unable to delete device: %s", err)
+		log.Errorf("Unable to delete device: %s", err)
 	}
 	}
 
 
 	dinfo := &DevInfo{Hash: devices.DeviceIdHash}
 	dinfo := &DevInfo{Hash: devices.DeviceIdHash}
 	if err := devices.removeMetadata(dinfo); err != nil {
 	if err := devices.removeMetadata(dinfo); err != nil {
-		log.Errorf("Warning: Unable to remove meta data: %s", err)
+		log.Errorf("Unable to remove metadata: %s", err)
 	} else {
 	} else {
 		devices.markDeviceIdFree(devices.DeviceId)
 		devices.markDeviceIdFree(devices.DeviceId)
 	}
 	}
 
 
 	if err := devices.removeTransactionMetaData(); err != nil {
 	if err := devices.removeTransactionMetaData(); err != nil {
-		log.Errorf("Warning: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err)
+		log.Errorf("Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err)
 	}
 	}
 
 
 	return nil
 	return nil
@@ -877,7 +883,7 @@ func (devices *DeviceSet) processPendingTransaction() error {
 	// If open transaction Id is less than pool transaction Id, something
 	// If open transaction Id is less than pool transaction Id, something
 	// is wrong. Bail out.
 	// is wrong. Bail out.
 	if devices.OpenTransactionId < devices.TransactionId {
 	if devices.OpenTransactionId < devices.TransactionId {
-		log.Errorf("Warning: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionId, devices.TransactionId)
+		log.Errorf("Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionId, devices.TransactionId)
 		return nil
 		return nil
 	}
 	}
 
 
@@ -919,7 +925,7 @@ func (devices *DeviceSet) openTransaction(hash string, DeviceId int) error {
 	devices.DeviceIdHash = hash
 	devices.DeviceIdHash = hash
 	devices.DeviceId = DeviceId
 	devices.DeviceId = DeviceId
 	if err := devices.saveTransactionMetaData(); err != nil {
 	if err := devices.saveTransactionMetaData(); err != nil {
-		return fmt.Errorf("Error saving transaction meta data: %s", err)
+		return fmt.Errorf("Error saving transaction metadata: %s", err)
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -927,7 +933,7 @@ func (devices *DeviceSet) openTransaction(hash string, DeviceId int) error {
 func (devices *DeviceSet) refreshTransaction(DeviceId int) error {
 func (devices *DeviceSet) refreshTransaction(DeviceId int) error {
 	devices.DeviceId = DeviceId
 	devices.DeviceId = DeviceId
 	if err := devices.saveTransactionMetaData(); err != nil {
 	if err := devices.saveTransactionMetaData(); err != nil {
-		return fmt.Errorf("Error saving transaction meta data: %s", err)
+		return fmt.Errorf("Error saving transaction metadata: %s", err)
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -957,7 +963,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
 
 
 	// https://github.com/docker/docker/issues/4036
 	// https://github.com/docker/docker/issues/4036
 	if supported := devicemapper.UdevSetSyncSupport(true); !supported {
 	if supported := devicemapper.UdevSetSyncSupport(true); !supported {
-		log.Warnf("WARNING: Udev sync is not supported. This will lead to unexpected behavior, data loss and errors")
+		log.Warnf("Udev sync is not supported. This will lead to unexpected behavior, data loss and errors")
 	}
 	}
 	log.Debugf("devicemapper: udev sync support: %v", devicemapper.UdevSyncSupported())
 	log.Debugf("devicemapper: udev sync support: %v", devicemapper.UdevSyncSupported())
 
 
@@ -1087,7 +1093,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
 		}
 		}
 	}
 	}
 
 
-	// Right now this loads only NextDeviceId. If there is more metatadata
+	// Right now this loads only NextDeviceId. If there is more metadata
 	// down the line, we might have to move it earlier.
 	// down the line, we might have to move it earlier.
 	if err = devices.loadDeviceSetMetaData(); err != nil {
 	if err = devices.loadDeviceSetMetaData(); err != nil {
 		return err
 		return err
@@ -1105,7 +1111,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
 }
 }
 
 
 func (devices *DeviceSet) AddDevice(hash, baseHash string) error {
 func (devices *DeviceSet) AddDevice(hash, baseHash string) error {
-	log.Debugf("[deviceset] AddDevice() hash=%s basehash=%s", hash, baseHash)
+	log.Debugf("[deviceset] AddDevice(hash=%s basehash=%s)", hash, baseHash)
 	defer log.Debugf("[deviceset] AddDevice(hash=%s basehash=%s) END", hash, baseHash)
 	defer log.Debugf("[deviceset] AddDevice(hash=%s basehash=%s) END", hash, baseHash)
 
 
 	baseInfo, err := devices.lookupDevice(baseHash)
 	baseInfo, err := devices.lookupDevice(baseHash)
@@ -1215,7 +1221,7 @@ func (devices *DeviceSet) deactivateDevice(info *DevInfo) error {
 	// Wait for the unmount to be effective,
 	// Wait for the unmount to be effective,
 	// by watching the value of Info.OpenCount for the device
 	// by watching the value of Info.OpenCount for the device
 	if err := devices.waitClose(info); err != nil {
 	if err := devices.waitClose(info); err != nil {
-		log.Errorf("Warning: error waiting for device %s to close: %s", info.Hash, err)
+		log.Errorf("Error waiting for device %s to close: %s", info.Hash, err)
 	}
 	}
 
 
 	devinfo, err := devicemapper.GetInfo(info.Name())
 	devinfo, err := devicemapper.GetInfo(info.Name())
@@ -1319,9 +1325,9 @@ func (devices *DeviceSet) waitClose(info *DevInfo) error {
 }
 }
 
 
 func (devices *DeviceSet) Shutdown() error {
 func (devices *DeviceSet) Shutdown() error {
-	log.Debugf("[deviceset %s] shutdown()", devices.devicePrefix)
+	log.Debugf("[deviceset %s] Shutdown()", devices.devicePrefix)
 	log.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
 	log.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
-	defer log.Debugf("[deviceset %s] shutdown END", devices.devicePrefix)
+	defer log.Debugf("[deviceset %s] Shutdown() END", devices.devicePrefix)
 
 
 	var devs []*DevInfo
 	var devs []*DevInfo
 
 
@@ -1388,7 +1394,7 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
 
 
 	if info.mountCount > 0 {
 	if info.mountCount > 0 {
 		if path != info.mountPath {
 		if path != info.mountPath {
-			return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path)
+			return fmt.Errorf("Trying to mount devmapper device in multiple places (%s, %s)", info.mountPath, path)
 		}
 		}
 
 
 		info.mountCount++
 		info.mountCount++
@@ -1562,7 +1568,7 @@ func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionId, dataU
 	return
 	return
 }
 }
 
 
-// MetadataDevicePath returns the path to the metadata storage for this deviceset,
+// DataDevicePath returns the path to the data storage for this deviceset,
 // regardless of loopback or block device
 // regardless of loopback or block device
 func (devices *DeviceSet) DataDevicePath() string {
 func (devices *DeviceSet) DataDevicePath() string {
 	return devices.dataDevice
 	return devices.dataDevice
@@ -1574,6 +1580,28 @@ func (devices *DeviceSet) MetadataDevicePath() string {
 	return devices.metadataDevice
 	return devices.metadataDevice
 }
 }
 
 
+func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) {
+	buf := new(syscall.Statfs_t)
+	err := syscall.Statfs(loopFile, buf)
+	if err != nil {
+		log.Warnf("Couldn't stat loopfile filesystem %v: %v", loopFile, err)
+		return 0, err
+	}
+	return buf.Bfree * uint64(buf.Bsize), nil
+}
+
+func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) {
+	if loopFile != "" {
+		fi, err := os.Stat(loopFile)
+		if err != nil {
+			log.Warnf("Couldn't stat loopfile %v: %v", loopFile, err)
+			return false, err
+		}
+		return fi.Mode().IsRegular(), nil
+	}
+	return false, nil
+}
+
 // Status returns the current status of this deviceset
 // Status returns the current status of this deviceset
 func (devices *DeviceSet) Status() *Status {
 func (devices *DeviceSet) Status() *Status {
 	devices.Lock()
 	devices.Lock()
@@ -1595,12 +1623,28 @@ func (devices *DeviceSet) Status() *Status {
 
 
 		status.Data.Used = dataUsed * blockSizeInSectors * 512
 		status.Data.Used = dataUsed * blockSizeInSectors * 512
 		status.Data.Total = dataTotal * blockSizeInSectors * 512
 		status.Data.Total = dataTotal * blockSizeInSectors * 512
+		status.Data.Available = status.Data.Total - status.Data.Used
 
 
 		// metadata blocks are always 4k
 		// metadata blocks are always 4k
 		status.Metadata.Used = metadataUsed * 4096
 		status.Metadata.Used = metadataUsed * 4096
 		status.Metadata.Total = metadataTotal * 4096
 		status.Metadata.Total = metadataTotal * 4096
+		status.Metadata.Available = status.Metadata.Total - status.Metadata.Used
 
 
 		status.SectorSize = blockSizeInSectors * 512
 		status.SectorSize = blockSizeInSectors * 512
+
+		if check, _ := devices.isRealFile(devices.dataLoopFile); check {
+			actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile)
+			if err == nil && actualSpace < status.Data.Available {
+				status.Data.Available = actualSpace
+			}
+		}
+
+		if check, _ := devices.isRealFile(devices.metadataLoopFile); check {
+			actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile)
+			if err == nil && actualSpace < status.Metadata.Available {
+				status.Metadata.Available = actualSpace
+			}
+		}
 	}
 	}
 
 
 	return status
 	return status

+ 3 - 1
daemon/graphdriver/devmapper/driver.go

@@ -72,8 +72,10 @@ func (d *Driver) Status() [][2]string {
 		{"Metadata file", s.MetadataFile},
 		{"Metadata file", s.MetadataFile},
 		{"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))},
 		{"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))},
 		{"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))},
 		{"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))},
+		{"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))},
 		{"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))},
 		{"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))},
 		{"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))},
 		{"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))},
+		{"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))},
 		{"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
 		{"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
 	}
 	}
 	if len(s.DataLoopback) > 0 {
 	if len(s.DataLoopback) > 0 {
@@ -162,7 +164,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
 func (d *Driver) Put(id string) error {
 func (d *Driver) Put(id string) error {
 	err := d.DeviceSet.UnmountDevice(id)
 	err := d.DeviceSet.UnmountDevice(id)
 	if err != nil {
 	if err != nil {
-		log.Errorf("Warning: error unmounting device %s: %s", id, err)
+		log.Errorf("Error unmounting device %s: %s", id, err)
 	}
 	}
 	return err
 	return err
 }
 }

+ 2 - 1
daemon/graphdriver/devmapper/mount.go

@@ -55,13 +55,14 @@ func ProbeFsType(device string) (string, error) {
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err
 	}
 	}
+	defer file.Close()
 
 
 	buffer := make([]byte, maxLen)
 	buffer := make([]byte, maxLen)
 	l, err := file.Read(buffer)
 	l, err := file.Read(buffer)
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err
 	}
 	}
-	file.Close()
+
 	if uint64(l) != maxLen {
 	if uint64(l) != maxLen {
 		return "", fmt.Errorf("unable to detect filesystem type of %s, short read", device)
 		return "", fmt.Errorf("unable to detect filesystem type of %s, short read", device)
 	}
 	}

+ 5 - 1
daemon/graphdriver/driver.go

@@ -25,6 +25,8 @@ const (
 	FsMagicReiserFs    = FsMagic(0x52654973)
 	FsMagicReiserFs    = FsMagic(0x52654973)
 	FsMagicSmbFs       = FsMagic(0x0000517B)
 	FsMagicSmbFs       = FsMagic(0x0000517B)
 	FsMagicJffs2Fs     = FsMagic(0x000072b6)
 	FsMagicJffs2Fs     = FsMagic(0x000072b6)
+	FsMagicZfs         = FsMagic(0x2fc12fc1)
+	FsMagicXfs         = FsMagic(0x58465342)
 	FsMagicUnsupported = FsMagic(0x00000000)
 	FsMagicUnsupported = FsMagic(0x00000000)
 )
 )
 
 
@@ -58,6 +60,8 @@ var (
 		FsMagicReiserFs:    "reiserfs",
 		FsMagicReiserFs:    "reiserfs",
 		FsMagicSmbFs:       "smb",
 		FsMagicSmbFs:       "smb",
 		FsMagicJffs2Fs:     "jffs2",
 		FsMagicJffs2Fs:     "jffs2",
+		FsMagicZfs:         "zfs",
+		FsMagicXfs:         "xfs",
 		FsMagicUnsupported: "unsupported",
 		FsMagicUnsupported: "unsupported",
 	}
 	}
 )
 )
@@ -180,6 +184,6 @@ func checkPriorDriver(name, root string) {
 		}
 		}
 	}
 	}
 	if len(priorDrivers) > 0 {
 	if len(priorDrivers) > 0 {
-		log.Warnf("graphdriver %s selected. Warning: your graphdriver directory %s already contains data managed by other graphdrivers: %s", name, root, strings.Join(priorDrivers, ","))
+		log.Warnf("Graphdriver %s selected. Your graphdriver directory %s already contains data managed by other graphdrivers: %s", name, root, strings.Join(priorDrivers, ","))
 	}
 	}
 }
 }

+ 2 - 2
daemon/graphdriver/graphtest/graphtest.go

@@ -5,7 +5,6 @@ import (
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
 	"path"
 	"path"
-	"strings"
 	"syscall"
 	"syscall"
 	"testing"
 	"testing"
 
 
@@ -74,7 +73,8 @@ func newDriver(t *testing.T, name string) *Driver {
 
 
 	d, err := graphdriver.GetDriver(name, root, nil)
 	d, err := graphdriver.GetDriver(name, root, nil)
 	if err != nil {
 	if err != nil {
-		if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || strings.Contains(err.Error(), "'overlay' is not supported over") {
+		t.Logf("graphdriver: %v\n", err)
+		if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS {
 			t.Skipf("Driver %s not supported", name)
 			t.Skipf("Driver %s not supported", name)
 		}
 		}
 		t.Fatal(err)
 		t.Fatal(err)

+ 4 - 1
daemon/graphdriver/overlay/overlay.go

@@ -118,6 +118,9 @@ func Init(home string, options []string) (graphdriver.Driver, error) {
 	case graphdriver.FsMagicAufs:
 	case graphdriver.FsMagicAufs:
 		log.Error("'overlay' is not supported over aufs.")
 		log.Error("'overlay' is not supported over aufs.")
 		return nil, graphdriver.ErrIncompatibleFS
 		return nil, graphdriver.ErrIncompatibleFS
+	case graphdriver.FsMagicZfs:
+		log.Error("'overlay' is not supported over zfs.")
+		return nil, graphdriver.ErrIncompatibleFS
 	}
 	}
 
 
 	// Create the driver home dir
 	// Create the driver home dir
@@ -298,7 +301,7 @@ func (d *Driver) Get(id string, mountLabel string) (string, error) {
 
 
 	opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
 	opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
 	if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
 	if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
-		return "", err
+		return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
 	}
 	}
 	mount.path = mergedDir
 	mount.path = mergedDir
 	mount.mounted = true
 	mount.mounted = true

Some files were not shown because too many files changed in this diff