Kaynağa Gözat

Merge pull request #5079 from unclejack/bump_v0.10.0

Bump version to v0.10.0
unclejack 11 yıl önce
ebeveyn
işleme
e128a606e3
100 değiştirilmiş dosya ile 3479 ekleme ve 1360 silme
  1. 1 1
      .travis.yml
  2. 1 0
      AUTHORS
  3. 177 0
      CHANGELOG.md
  4. 38 25
      CONTRIBUTING.md
  5. 8 5
      Dockerfile
  6. 1 12
      LICENSE
  7. 1 1
      MAINTAINERS
  8. 19 8
      Makefile
  9. 1 1
      VERSION
  10. 0 46
      api/api_unit_test.go
  11. 102 0
      api/client/cli.go
  12. 189 504
      api/client/commands.go
  13. 390 0
      api/client/utils.go
  14. 7 4
      api/common.go
  15. 70 34
      api/server/server.go
  16. 180 0
      api/server/server_unit_test.go
  17. 4 1
      archive/archive.go
  18. 4 4
      archive/changes_test.go
  19. 0 3
      auth/MAINTAINERS
  20. 5 7
      builtins/builtins.go
  21. 0 160
      commands_unit_test.go
  22. 146 0
      contrib/check-config.sh
  23. 3 6
      contrib/completion/bash/docker
  24. 22 18
      contrib/completion/fish/docker.fish
  25. 1 1
      contrib/completion/zsh/_docker
  26. 3 3
      contrib/desktop-integration/data/Dockerfile
  27. 4 4
      contrib/desktop-integration/iceweasel/Dockerfile
  28. 1 1
      contrib/docker-device-tool/device_tool.go
  29. 1 1
      contrib/host-integration/Dockerfile.dev
  30. 1 1
      contrib/host-integration/Dockerfile.min
  31. 1 1
      contrib/host-integration/manager.go
  32. 28 13
      contrib/init/sysvinit-debian/docker
  33. 1 1
      contrib/init/sysvinit-debian/docker.default
  34. 26 11
      contrib/init/upstart/docker.conf
  35. 56 0
      contrib/man/man1/docker-attach.1
  36. 65 0
      contrib/man/man1/docker-build.1
  37. 84 0
      contrib/man/man1/docker-images.1
  38. 39 0
      contrib/man/man1/docker-info.1
  39. 237 0
      contrib/man/man1/docker-inspect.1
  40. 45 0
      contrib/man/man1/docker-rm.1
  41. 29 0
      contrib/man/man1/docker-rmi.1
  42. 277 0
      contrib/man/man1/docker-run.1
  43. 49 0
      contrib/man/man1/docker-tag.1
  44. 172 0
      contrib/man/man1/docker.1
  45. 1 0
      contrib/mkimage-debootstrap.sh
  46. 1 1
      contrib/mkseccomp.pl
  47. 12 7
      daemonconfig/config.go
  48. 73 4
      docker/docker.go
  49. 0 1
      docs/MAINTAINERS
  50. 1 1
      docs/sources/articles/runmetrics.rst
  51. 4 2
      docs/sources/articles/security.rst
  52. 15 0
      docs/sources/examples/apt-cacher-ng.Dockerfile
  53. 102 0
      docs/sources/examples/apt-cacher-ng.rst
  54. 1 0
      docs/sources/examples/example_header.inc
  55. 17 17
      docs/sources/examples/hello_world.rst
  56. 126 0
      docs/sources/examples/https.rst
  57. 2 0
      docs/sources/examples/index.rst
  58. 3 3
      docs/sources/examples/mongodb.rst
  59. 3 3
      docs/sources/examples/nodejs_web_app.rst
  60. 1 1
      docs/sources/examples/postgresql_service.Dockerfile
  61. 5 5
      docs/sources/examples/postgresql_service.rst
  62. 1 1
      docs/sources/examples/python_web_app.rst
  63. 4 4
      docs/sources/examples/running_redis_service.rst
  64. 1 1
      docs/sources/examples/running_riak_service.rst
  65. 2 2
      docs/sources/examples/running_ssh_service.rst
  66. 32 0
      docs/sources/installation/amazon.rst
  67. 14 1
      docs/sources/installation/binaries.rst
  68. 4 4
      docs/sources/installation/fedora.rst
  69. 1 0
      docs/sources/installation/index.rst
  70. 6 5
      docs/sources/installation/mac.rst
  71. 4 0
      docs/sources/installation/rhel.rst
  72. 25 0
      docs/sources/installation/softlayer.rst
  73. 21 10
      docs/sources/installation/ubuntulinux.rst
  74. 4 4
      docs/sources/reference/api/docker_io_accounts_api.rst
  75. 4 1
      docs/sources/reference/api/docker_remote_api.rst
  76. 4 2
      docs/sources/reference/api/docker_remote_api_v1.10.rst
  77. 2 2
      docs/sources/reference/api/docker_remote_api_v1.2.rst
  78. 2 2
      docs/sources/reference/api/docker_remote_api_v1.3.rst
  79. 2 2
      docs/sources/reference/api/docker_remote_api_v1.4.rst
  80. 2 2
      docs/sources/reference/api/docker_remote_api_v1.5.rst
  81. 2 2
      docs/sources/reference/api/docker_remote_api_v1.6.rst
  82. 2 2
      docs/sources/reference/api/docker_remote_api_v1.7.rst
  83. 2 2
      docs/sources/reference/api/docker_remote_api_v1.8.rst
  84. 2 2
      docs/sources/reference/api/docker_remote_api_v1.9.rst
  85. 6 0
      docs/sources/reference/api/remote_api_client_libraries.rst
  86. 46 43
      docs/sources/reference/builder.rst
  87. 203 208
      docs/sources/reference/commandline/cli.rst
  88. 49 48
      docs/sources/reference/run.rst
  89. BIN
      docs/sources/terms/images/docker-filesystems-busyboxrw.png
  90. BIN
      docs/sources/terms/images/docker-filesystems-debian.png
  91. BIN
      docs/sources/terms/images/docker-filesystems-debianrw.png
  92. BIN
      docs/sources/terms/images/docker-filesystems-generic.png
  93. BIN
      docs/sources/terms/images/docker-filesystems-multilayer.png
  94. BIN
      docs/sources/terms/images/docker-filesystems-multiroot.png
  95. 63 68
      docs/sources/terms/images/docker-filesystems.svg
  96. 14 14
      docs/sources/use/ambassador_pattern_linking.rst
  97. 3 1
      docs/sources/use/basics.rst
  98. 95 0
      docs/sources/use/chef.rst
  99. 0 5
      docs/sources/use/host_integration.rst
  100. 1 0
      docs/sources/use/index.rst

+ 1 - 1
.travis.yml

@@ -25,6 +25,6 @@ before_script:
 script:
   - hack/travis/dco.py
   - hack/travis/gofmt.py
-  - make -sC docs SPHINXOPTS=-q docs man
+  - make -sC docs SPHINXOPTS=-qW docs man
 
 # vim:set sw=2 ts=2:

+ 1 - 0
AUTHORS

@@ -177,6 +177,7 @@ Keli Hu <dev@keli.hu>
 Ken Cochrane <kencochrane@gmail.com>
 Kevin Clark <kevin.clark@gmail.com>
 Kevin J. Lynagh <kevin@keminglabs.com>
+Kevin Wallace <kevin@pentabarf.net>
 Keyvan Fatehi <keyvanfatehi@gmail.com>
 kim0 <email.ahmedkamal@googlemail.com>
 Kim BKC Carlbacker <kim.carlbacker@gmail.com>

+ 177 - 0
CHANGELOG.md

@@ -1,5 +1,182 @@
 # Changelog
 
+## 0.10.0 (2014-04-08)
+
+#### Builder
+- Fix printing multiple messages on a single line. Fixes broken output during builds.
+- Follow symlinks inside container's root for ADD build instructions.
+- Fix EXPOSE caching.
+
+#### Documentation
+- Add the new options of `docker ps` to the documentation.
+- Add the options of `docker restart` to the documentation.
+- Update daemon docs and help messages for --iptables and --ip-forward.
+- Updated apt-cacher-ng docs example.
+- Remove duplicate description of --mtu from docs.
+- Add missing -t and -v for `docker images` to the docs.
+- Add fixes to the cli docs.
+- Update libcontainer docs.
+- Update images in docs to remove references to AUFS and LXC.
+- Update the nodejs_web_app in the docs to use the new epel RPM address.
+- Fix external link on security of containers.
+- Update remote API docs.
+- Add image size to history docs.
+- Be explicit about binding to all interfaces in redis example.
+- Document DisableNetwork flag in the 1.10 remote api.
+- Document that `--lxc-conf` is lxc only.
+- Add chef usage documentation.
+- Add example for an image with multiple for `docker load`.
+- Explain what `docker run -a` does in the docs.
+
+#### Contrib
+- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile.
+- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly.
+- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more.
+- Add check-config script to contrib.
+- Fix fish shell completion.
+
+#### Hack
+* Clean up "go test" output from "make test" to be much more readable/scannable.
+* Excluse more "definitely not unit tested Go source code" directories from hack/make/test.
++ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh.
+- Include contributed completions in Ubuntu PPA.
++ Add cli integration tests.
+* Add tweaks to the hack scripts to make them simpler.
+
+#### Remote API
++ Add TLS auth support for API.
+* Move git clone from daemon to client.
+- Fix content-type detection in docker cp.
+* Split API into 2 go packages.
+
+#### Runtime
+* Support hairpin NAT without going through Docker server.
+- devicemapper: succeed immediately when removing non-existing devices.
+- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping).
+- devicemapper: increase timeout in waitClose to 10 seconds.
+- devicemapper: ensure we shut down thin pool cleanly.
+- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice.
+- devicemapper: avoid AB-BA deadlock.
+- devicemapper: make shutdown better/faster.
+- improve alpha sorting in mflag.
+- Remove manual http cookie management because the cookiejar is being used.
+- Use BSD raw mode on Darwin. Fixes nano, tmux and others.
+- Add FreeBSD support for the client.
+- Merge auth package into registry.
+- Add deprecation warning for -t on `docker pull`.
+- Remove goroutine leak on error.
+- Update parseLxcInfo to comply with new lxc1.0 format.
+- Fix attach exit on darwin.
+- Improve deprecation message.
+- Retry to retrieve the layer metadata up to 5 times for `docker pull`.
+- Only unshare the mount namespace for execin.
+- Merge existing config when committing.
+- Disable daemon startup timeout.
+- Fix issue #4681: add loopback interface when networking is disabled.
+- Add failing test case for issue #4681.
+- Send SIGTERM to child, instead of SIGKILL.
+- Show the driver and the kernel version in `docker info` even when not in debug mode.
+- Always symlink /dev/ptmx for libcontainer. This fixes console related problems.
+- Fix issue caused by the absence of /etc/apparmor.d.
+- Don't leave empty cidFile behind when failing to create the container.
+- Mount cgroups automatically if they're not mounted already.
+- Use mock for search tests.
+- Update to double-dash everywhere.
+- Move .dockerenv parsing to lxc driver.
+- Move all bind-mounts in the container inside the namespace.
+- Don't use separate bind mount for container.
+- Always symlink /dev/ptmx for libcontainer.
+- Don't kill by pid for other drivers.
+- Add initial logging to libcontainer.
+* Sort by port in `docker ps`.
+- Move networking drivers into runtime top level package.
++ Add --no-prune to `docker rmi`.
++ Add time since exit in `docker ps`.
+- graphdriver: add build tags.
+- Prevent allocation of previously allocated ports & prevent improve port allocation.
+* Add support for --since/--before in `docker ps`.
+- Clean up container stop.
++ Add support for configurable dns search domains.
+- Add support for relative WORKDIR instructions.
+- Add --output flag for docker save.
+- Remove duplication of DNS entries in config merging.
+- Add cpuset.cpus to cgroups and native driver options.
+- Remove docker-ci.
+- Promote btrfs. btrfs is no longer considered experimental.
+- Add --input flag to `docker load`.
+- Return error when existing bridge doesn't match IP address.
+- Strip comments before parsing line continuations to avoid interpreting instructions as comments.
+- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces.
+- Add systemd implementation of cgroups and make containers show up as systemd units.
+- Fix commit and import when no repository is specified.
+- Remount /var/lib/docker as --private to fix scaling issue.
+- Use the environment's proxy when pinging the remote registry.
+- Reduce error level from harmless errors.
+* Allow --volumes-from to be individual files.
+- Fix expanding buffer in StdCopy.
+- Set error regardless of attach or stdin. This fixes #3364.
+- Add support for --env-file to load environment variables from files.
+- Symlink /etc/mtab and /proc/mounts.
+- Allow pushing a single tag.
+- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM.
+- Don't throw error when starting an already running container.
+- Fix dynamic port allocation limit.
+- remove setupDev from libcontainer.
+- Add API version to `docker version`.
+- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup.
+- Fix --volumes-from mount failure.
+- Allow non-privileged containers to create device nodes.
+- Skip login tests because of external dependency on a hosted service.
+- Deprecate `docker images --tree` and `docker images --viz`.
+- Deprecate `docker insert`.
+- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04.
+- Add specific error message when hitting 401 over HTTP on push.
+- Fix absolute volume check.
+- Remove volumes-from from the config.
+- Move DNS options to hostconfig.
+- Update the apparmor profile for libcontainer.
+- Add deprecation notice for `docker commit -run`.
+
+## 0.9.1 (2014-03-24)
+
+#### Builder
+- Fix printing multiple messages on a single line. Fixes broken output during builds.
+
+#### Documentation
+- Fix external link on security of containers.
+
+#### Contrib
+- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly.
+- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile.
+
+#### Hack
+- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh.
+
+#### Remote API
+- Fix content-type detection in `docker cp`.
+
+#### Runtime
+- Use BSD raw mode on Darwin. Fixes nano, tmux and others.
+- Only unshare the mount namespace for execin.
+- Retry to retrieve the layer metadata up to 5 times for `docker pull`.
+- Merge existing config when committing.
+- Fix panic in monitor.
+- Disable daemon startup timeout.
+- Fix issue #4681: add loopback interface when networking is disabled.
+- Add failing test case for issue #4681.
+- Send SIGTERM to child, instead of SIGKILL.
+- Show the driver and the kernel version in `docker info` even when not in debug mode.
+- Always symlink /dev/ptmx for libcontainer. This fixes console related problems.
+- Fix issue caused by the absence of /etc/apparmor.d.
+- Don't leave empty cidFile behind when failing to create the container.
+- Improve deprecation message.
+- Fix attach exit on darwin.
+- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping).
+- devicemapper: succeed immediately when removing non-existing devices.
+- devicemapper: increase timeout in waitClose to 10 seconds.
+- Remove goroutine leak on error.
+- Update parseLxcInfo to comply with new lxc1.0 format.
+
 ## 0.9.0 (2014-03-10)
 
 #### Builder

+ 38 - 25
CONTRIBUTING.md

@@ -126,33 +126,46 @@ For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
 The sign-off is a simple line at the end of the explanation for the
 patch, which certifies that you wrote it or otherwise have the right to
 pass it on as an open-source patch.  The rules are pretty simple: if you
-can certify the below:
+can certify the below (from
+[developercertificate.org](http://developercertificate.org/)):
 
 ```
-Docker Developer Certificate of Origin 1.1
-
-By making a contribution to the Docker Project ("Project"), I represent and
-warrant that:
-
-a. The contribution was created in whole or in part by me and I have the right
-to submit the contribution on my own behalf or on behalf of a third party who
-has authorized me to submit this contribution to the Project; or
-
-b. The contribution is based upon previous work that, to the best of my
-knowledge, is covered under an appropriate open source license and I have the
-right and authorization to submit that work with modifications, whether
-created in whole or in part by me, under the same open source license (unless
-I am permitted to submit under a different license) that I have identified in
-the contribution; or
-
-c. The contribution was provided directly to me by some other person who
-represented and warranted (a) or (b) and I have not modified it.
-
-d. I understand and agree that this Project and the contribution are publicly
-known and that a record of the contribution (including all personal
-information I submit with it, including my sign-off record) is maintained
-indefinitely and may be redistributed consistent with this Project or the open
-source license(s) involved.
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+    have the right to submit it under the open source license
+    indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+    of my knowledge, is covered under an appropriate open source
+    license and I have the right under that license to submit that
+    work with modifications, whether created in whole or in part
+    by me, under the same open source license (unless I am
+    permitted to submit under a different license), as indicated
+    in the file; or
+
+(c) The contribution was provided directly to me by some other
+    person who certified (a), (b) or (c) and I have not modified
+    it.
+
+(d) I understand and agree that this project and the contribution
+    are public and that a record of the contribution (including all
+    personal information I submit with it, including my sign-off) is
+    maintained indefinitely and may be redistributed consistent with
+    this project or the open source license(s) involved.
 ```
 
 then you just add a line to every git commit message:

+ 8 - 5
Dockerfile

@@ -6,13 +6,13 @@
 # docker build -t docker .
 #
 # # Mount your source in an interactive container for quick testing:
-# docker run -v `pwd`:/go/src/github.com/dotcloud/docker -privileged -i -t docker bash
+# docker run -v `pwd`:/go/src/github.com/dotcloud/docker --privileged -i -t docker bash
 #
 # # Run the test suite:
-# docker run -privileged docker hack/make.sh test
+# docker run --privileged docker hack/make.sh test
 #
 # # Publish a release:
-# docker run -privileged \
+# docker run --privileged \
 #  -e AWS_S3_BUCKET=baz \
 #  -e AWS_ACCESS_KEY=foo \
 #  -e AWS_SECRET_KEY=bar \
@@ -68,7 +68,10 @@ ENV	GOPATH	/go:/go/src/github.com/dotcloud/docker/vendor
 RUN	cd /usr/local/go/src && ./make.bash --no-clean 2>&1
 
 # Compile Go for cross compilation
-ENV	DOCKER_CROSSPLATFORMS	linux/386 linux/arm darwin/amd64 darwin/386
+ENV	DOCKER_CROSSPLATFORMS	\
+	linux/386 linux/arm \
+	darwin/amd64 darwin/386 \
+	freebsd/amd64 freebsd/386 freebsd/arm
 # (set an explicit GOARM of 5 for maximum compatibility)
 ENV	GOARM	5
 RUN	cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
@@ -87,7 +90,7 @@ RUN	git config --global user.email 'docker-dummy@example.com'
 
 VOLUME	/var/lib/docker
 WORKDIR	/go/src/github.com/dotcloud/docker
-ENV	DOCKER_BUILDTAGS	apparmor
+ENV	DOCKER_BUILDTAGS	apparmor selinux
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
 ENTRYPOINT	["hack/dind"]

+ 1 - 12
LICENSE

@@ -176,18 +176,7 @@
 
    END OF TERMS AND CONDITIONS
 
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
+   Copyright 2014 Docker, Inc.
 
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.

+ 1 - 1
MAINTAINERS

@@ -1,5 +1,5 @@
 Solomon Hykes <solomon@dotcloud.com> (@shykes)
-Guillaume Charmes <guillaume@dotcloud.com> (@creack)
+Guillaume J. Charmes <guillaume@docker.com> (@creack)
 Victor Vieux <vieux@docker.com> (@vieux)
 Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
 .travis.yml: Tianon Gravi <admwiggin@gmail.com> (@tianon)

+ 19 - 8
Makefile

@@ -1,9 +1,17 @@
-.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration
+.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration test-integration-cli
 
-GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
-DOCKER_IMAGE := docker:$(GIT_BRANCH)
-DOCKER_DOCS_IMAGE := docker-docs:$(GIT_BRANCH)
-DOCKER_RUN_DOCKER := docker run --rm -i -t --privileged -e TESTFLAGS -v "$(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles" "$(DOCKER_IMAGE)"
+# to allow `make BINDDIR=. shell` or `make BINDDIR= test`
+BINDDIR := bundles
+# to allow `make DOCSPORT=9000 docs`
+DOCSPORT := 8000
+
+GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
+DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
+DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
+DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/dotcloud/docker/$(BINDDIR)")
+
+DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TESTFLAGS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
+DOCKER_RUN_DOCS := docker run --rm -it -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)"
 
 default: binary
 
@@ -17,17 +25,20 @@ cross: build
 	$(DOCKER_RUN_DOCKER) hack/make.sh binary cross
 
 docs: docs-build
-	docker run --rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)"
+	$(DOCKER_RUN_DOCS)
 
 docs-shell: docs-build
-	docker run --rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" bash
+	$(DOCKER_RUN_DOCS) bash
 
 test: build
-	$(DOCKER_RUN_DOCKER) hack/make.sh test test-integration
+	$(DOCKER_RUN_DOCKER) hack/make.sh binary test test-integration test-integration-cli
 
 test-integration: build
 	$(DOCKER_RUN_DOCKER) hack/make.sh test-integration
 
+test-integration-cli: build
+	$(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli
+
 shell: build
 	$(DOCKER_RUN_DOCKER) bash
 

+ 1 - 1
VERSION

@@ -1 +1 @@
-0.9.0
+0.10.0

+ 0 - 46
api/api_unit_test.go

@@ -1,9 +1,6 @@
 package api
 
 import (
-	"fmt"
-	"net/http"
-	"net/http/httptest"
 	"testing"
 )
 
@@ -20,46 +17,3 @@ func TestJsonContentType(t *testing.T) {
 		t.Fail()
 	}
 }
-
-func TestGetBoolParam(t *testing.T) {
-	if ret, err := getBoolParam("true"); err != nil || !ret {
-		t.Fatalf("true -> true, nil | got %t %s", ret, err)
-	}
-	if ret, err := getBoolParam("True"); err != nil || !ret {
-		t.Fatalf("True -> true, nil | got %t %s", ret, err)
-	}
-	if ret, err := getBoolParam("1"); err != nil || !ret {
-		t.Fatalf("1 -> true, nil | got %t %s", ret, err)
-	}
-	if ret, err := getBoolParam(""); err != nil || ret {
-		t.Fatalf("\"\" -> false, nil | got %t %s", ret, err)
-	}
-	if ret, err := getBoolParam("false"); err != nil || ret {
-		t.Fatalf("false -> false, nil | got %t %s", ret, err)
-	}
-	if ret, err := getBoolParam("0"); err != nil || ret {
-		t.Fatalf("0 -> false, nil | got %t %s", ret, err)
-	}
-	if ret, err := getBoolParam("faux"); err == nil || ret {
-		t.Fatalf("faux -> false, err | got %t %s", ret, err)
-	}
-}
-
-func TesthttpError(t *testing.T) {
-	r := httptest.NewRecorder()
-
-	httpError(r, fmt.Errorf("No such method"))
-	if r.Code != http.StatusNotFound {
-		t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code)
-	}
-
-	httpError(r, fmt.Errorf("This accound hasn't been activated"))
-	if r.Code != http.StatusForbidden {
-		t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code)
-	}
-
-	httpError(r, fmt.Errorf("Some error"))
-	if r.Code != http.StatusInternalServerError {
-		t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code)
-	}
-}

+ 102 - 0
api/client/cli.go

@@ -0,0 +1,102 @@
+package client
+
+import (
+	"crypto/tls"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"strings"
+	"text/template"
+
+	flag "github.com/dotcloud/docker/pkg/mflag"
+	"github.com/dotcloud/docker/pkg/term"
+	"github.com/dotcloud/docker/registry"
+)
+
+var funcMap = template.FuncMap{
+	"json": func(v interface{}) string {
+		a, _ := json.Marshal(v)
+		return string(a)
+	},
+}
+
+func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) {
+	methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:])
+	method := reflect.ValueOf(cli).MethodByName(methodName)
+	if !method.IsValid() {
+		return nil, false
+	}
+	return method.Interface().(func(...string) error), true
+}
+
+func (cli *DockerCli) ParseCommands(args ...string) error {
+	if len(args) > 0 {
+		method, exists := cli.getMethod(args[0])
+		if !exists {
+			fmt.Println("Error: Command not found:", args[0])
+			return cli.CmdHelp(args[1:]...)
+		}
+		return method(args[1:]...)
+	}
+	return cli.CmdHelp(args...)
+}
+
+func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet {
+	flags := flag.NewFlagSet(name, flag.ContinueOnError)
+	flags.Usage = func() {
+		fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description)
+		flags.PrintDefaults()
+		os.Exit(2)
+	}
+	return flags
+}
+
+func (cli *DockerCli) LoadConfigFile() (err error) {
+	cli.configFile, err = registry.LoadConfig(os.Getenv("HOME"))
+	if err != nil {
+		fmt.Fprintf(cli.err, "WARNING: %s\n", err)
+	}
+	return err
+}
+
+func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsConfig *tls.Config) *DockerCli {
+	var (
+		isTerminal = false
+		terminalFd uintptr
+	)
+
+	if in != nil {
+		if file, ok := in.(*os.File); ok {
+			terminalFd = file.Fd()
+			isTerminal = term.IsTerminal(terminalFd)
+		}
+	}
+
+	if err == nil {
+		err = out
+	}
+	return &DockerCli{
+		proto:      proto,
+		addr:       addr,
+		in:         in,
+		out:        out,
+		err:        err,
+		isTerminal: isTerminal,
+		terminalFd: terminalFd,
+		tlsConfig:  tlsConfig,
+	}
+}
+
+type DockerCli struct {
+	proto      string
+	addr       string
+	configFile *registry.ConfigFile
+	in         io.ReadCloser
+	out        io.Writer
+	err        io.Writer
+	isTerminal bool
+	terminalFd uintptr
+	tlsConfig  *tls.Config
+}

+ 189 - 504
api/client.go → api/client/commands.go

@@ -1,76 +1,38 @@
-package api
+package client
 
 import (
 	"bufio"
 	"bytes"
 	"encoding/base64"
 	"encoding/json"
-	"errors"
 	"fmt"
-	"github.com/dotcloud/docker/archive"
-	"github.com/dotcloud/docker/auth"
-	"github.com/dotcloud/docker/dockerversion"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/nat"
-	flag "github.com/dotcloud/docker/pkg/mflag"
-	"github.com/dotcloud/docker/pkg/term"
-	"github.com/dotcloud/docker/registry"
-	"github.com/dotcloud/docker/runconfig"
-	"github.com/dotcloud/docker/utils"
 	"io"
 	"io/ioutil"
-	"net"
 	"net/http"
-	"net/http/httputil"
 	"net/url"
 	"os"
-	"os/signal"
+	"os/exec"
 	"path"
-	"reflect"
-	"regexp"
-	"runtime"
+	goruntime "runtime"
 	"strconv"
 	"strings"
 	"syscall"
 	"text/tabwriter"
 	"text/template"
 	"time"
-)
 
-var funcMap = template.FuncMap{
-	"json": func(v interface{}) string {
-		a, _ := json.Marshal(v)
-		return string(a)
-	},
-}
-
-var (
-	ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
+	"github.com/dotcloud/docker/api"
+	"github.com/dotcloud/docker/archive"
+	"github.com/dotcloud/docker/dockerversion"
+	"github.com/dotcloud/docker/engine"
+	"github.com/dotcloud/docker/nat"
+	"github.com/dotcloud/docker/pkg/signal"
+	"github.com/dotcloud/docker/pkg/term"
+	"github.com/dotcloud/docker/registry"
+	"github.com/dotcloud/docker/runconfig"
+	"github.com/dotcloud/docker/utils"
 )
 
-func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) {
-	methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:])
-	method := reflect.ValueOf(cli).MethodByName(methodName)
-	if !method.IsValid() {
-		return nil, false
-	}
-	return method.Interface().(func(...string) error), true
-}
-
-func ParseCommands(proto, addr string, args ...string) error {
-	cli := NewDockerCli(os.Stdin, os.Stdout, os.Stderr, proto, addr)
-
-	if len(args) > 0 {
-		method, exists := cli.getMethod(args[0])
-		if !exists {
-			fmt.Println("Error: Command not found:", args[0])
-			return cli.CmdHelp(args[1:]...)
-		}
-		return method(args[1:]...)
-	}
-	return cli.CmdHelp(args...)
-}
-
 func (cli *DockerCli) CmdHelp(args ...string) error {
 	if len(args) > 0 {
 		method, exists := cli.getMethod(args[0])
@@ -81,7 +43,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
 			return nil
 		}
 	}
-	help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTUNIXSOCKET)
+	help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", api.DEFAULTUNIXSOCKET)
 	for _, command := range [][]string{
 		{"attach", "Attach to a running container"},
 		{"build", "Build a container from a Dockerfile"},
@@ -94,7 +56,6 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
 		{"images", "List images"},
 		{"import", "Create a new filesystem image from the contents of a tarball"},
 		{"info", "Display system-wide information"},
-		{"insert", "Insert a file in an image"},
 		{"inspect", "Return low-level information on a container"},
 		{"kill", "Kill a running container"},
 		{"load", "Load an image from a tar archive"},
@@ -123,7 +84,9 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
 	return nil
 }
 
+// FIXME: 'insert' is deprecated.
 func (cli *DockerCli) CmdInsert(args ...string) error {
+	fmt.Fprintf(os.Stderr, "Warning: '%s' is deprecated and will be removed in a future version. Please use 'docker build' and 'ADD' instead.\n")
 	cmd := cli.Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH")
 	if err := cmd.Parse(args); err != nil {
 		return nil
@@ -160,6 +123,8 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 		err      error
 	)
 
+	_, err = exec.LookPath("git")
+	hasGit := err == nil
 	if cmd.Arg(0) == "-" {
 		// As a special case, 'docker build -' will build from an empty context with the
 		// contents of stdin as a Dockerfile
@@ -168,17 +133,34 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 			return err
 		}
 		context, err = archive.Generate("Dockerfile", string(dockerfile))
-	} else if utils.IsURL(cmd.Arg(0)) || utils.IsGIT(cmd.Arg(0)) {
+	} else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) {
 		isRemote = true
 	} else {
-		if _, err := os.Stat(cmd.Arg(0)); err != nil {
+		root := cmd.Arg(0)
+		if utils.IsGIT(root) {
+			remoteURL := cmd.Arg(0)
+			if !strings.HasPrefix(remoteURL, "git://") && !strings.HasPrefix(remoteURL, "git@") && !utils.IsURL(remoteURL) {
+				remoteURL = "https://" + remoteURL
+			}
+
+			root, err = ioutil.TempDir("", "docker-build-git")
+			if err != nil {
+				return err
+			}
+			defer os.RemoveAll(root)
+
+			if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
+				return fmt.Errorf("Error trying to use git: %s (%s)", err, output)
+			}
+		}
+		if _, err := os.Stat(root); err != nil {
 			return err
 		}
-		filename := path.Join(cmd.Arg(0), "Dockerfile")
+		filename := path.Join(root, "Dockerfile")
 		if _, err = os.Stat(filename); os.IsNotExist(err) {
 			return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0))
 		}
-		context, err = archive.Tar(cmd.Arg(0), archive.Uncompressed)
+		context, err = archive.Tar(root, archive.Uncompressed)
 	}
 	var body io.Reader
 	// Setup an upload progress bar
@@ -189,6 +171,15 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 	}
 	// Upload the build context
 	v := &url.Values{}
+
+	//Check if the given image name can be resolved
+	if *tag != "" {
+		repository, _ := utils.ParseRepositoryTag(*tag)
+		if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
+			return err
+		}
+	}
+
 	v.Set("t", *tag)
 
 	if *suppressOutput {
@@ -229,7 +220,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 
 // 'docker login': login / register a user to registry service.
 func (cli *DockerCli) CmdLogin(args ...string) error {
-	cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+auth.IndexServerAddress()+"\" is the default.")
+	cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.")
 
 	var username, password, email string
 
@@ -240,7 +231,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
 	if err != nil {
 		return nil
 	}
-	serverAddress := auth.IndexServerAddress()
+	serverAddress := registry.IndexServerAddress()
 	if len(cmd.Args()) > 0 {
 		serverAddress = cmd.Arg(0)
 	}
@@ -266,7 +257,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
 	cli.LoadConfigFile()
 	authconfig, ok := cli.configFile.Configs[serverAddress]
 	if !ok {
-		authconfig = auth.AuthConfig{}
+		authconfig = registry.AuthConfig{}
 	}
 
 	if username == "" {
@@ -311,7 +302,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
 	stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false)
 	if statusCode == 401 {
 		delete(cli.configFile.Configs, serverAddress)
-		auth.SaveConfig(cli.configFile)
+		registry.SaveConfig(cli.configFile)
 		return err
 	}
 	if err != nil {
@@ -320,10 +311,10 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
 	var out2 engine.Env
 	err = out2.Decode(stream)
 	if err != nil {
-		cli.configFile, _ = auth.LoadConfig(os.Getenv("HOME"))
+		cli.configFile, _ = registry.LoadConfig(os.Getenv("HOME"))
 		return err
 	}
-	auth.SaveConfig(cli.configFile)
+	registry.SaveConfig(cli.configFile)
 	if out2.Get("Status") != "" {
 		fmt.Fprintf(cli.out, "%s\n", out2.Get("Status"))
 	}
@@ -367,7 +358,8 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
 	if dockerversion.VERSION != "" {
 		fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION)
 	}
-	fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version())
+	fmt.Fprintf(cli.out, "Client API version: %s\n", api.APIVERSION)
+	fmt.Fprintf(cli.out, "Go version (client): %s\n", goruntime.Version())
 	if dockerversion.GITCOMMIT != "" {
 		fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT)
 	}
@@ -389,6 +381,9 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
 	}
 	out.Close()
 	fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version"))
+	if apiVersion := remoteVersion.Get("ApiVersion"); apiVersion != "" {
+		fmt.Fprintf(cli.out, "Server API version: %s\n", apiVersion)
+	}
 	fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit"))
 	fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion"))
 	release := utils.GetReleaseVersion()
@@ -432,7 +427,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
 
 	fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers"))
 	fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images"))
-	fmt.Fprintf(cli.out, "Driver: %s\n", remoteInfo.Get("Driver"))
+	fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver"))
 	var driverStatus [][2]string
 	if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil {
 		return err
@@ -440,14 +435,15 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
 	for _, pair := range driverStatus {
 		fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1])
 	}
+	fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver"))
+	fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
+
 	if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" {
 		fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug"))
 		fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "")
 		fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd"))
 		fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines"))
-		fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver"))
 		fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener"))
-		fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
 
 		if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" {
 			fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1)
@@ -533,13 +529,23 @@ func (cli *DockerCli) CmdRestart(args ...string) error {
 
 func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
 	sigc := make(chan os.Signal, 1)
-	utils.CatchAll(sigc)
+	signal.CatchAll(sigc)
 	go func() {
 		for s := range sigc {
 			if s == syscall.SIGCHLD {
 				continue
 			}
-			if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%d", cid, s), nil, false)); err != nil {
+			var sig string
+			for sigStr, sigN := range signal.SignalMap {
+				if sigN == s {
+					sig = sigStr
+					break
+				}
+			}
+			if sig == "" {
+				utils.Errorf("Unsupported signal: %d. Discarding.", s)
+			}
+			if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil {
 				utils.Debugf("Error sending signal: %s", err)
 			}
 		}
@@ -548,9 +554,11 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
 }
 
 func (cli *DockerCli) CmdStart(args ...string) error {
-	cmd := cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
-	attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process")
-	openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin")
+	var (
+		cmd       = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
+		attach    = cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process")
+		openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin")
+	)
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -559,8 +567,10 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 		return nil
 	}
 
-	var cErr chan error
-	var tty bool
+	var (
+		cErr chan error
+		tty  bool
+	)
 	if *attach || *openStdin {
 		if cmd.NArg() > 1 {
 			return fmt.Errorf("You cannot start and attach multiple containers at once.")
@@ -571,7 +581,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 			return err
 		}
 
-		container := &Container{}
+		container := &api.Container{}
 		err = json.Unmarshal(body, container)
 		if err != nil {
 			return err
@@ -581,7 +591,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 
 		if !container.Config.Tty {
 			sigc := cli.forwardAllSignals(cmd.Arg(0))
-			defer utils.StopCatch(sigc)
+			defer signal.StopCatch(sigc)
 		}
 
 		var in io.ReadCloser
@@ -606,8 +616,8 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 		if err != nil {
 			if !*attach || !*openStdin {
 				fmt.Fprintf(cli.err, "%s\n", err)
-				encounteredError = fmt.Errorf("Error: failed to start one or more containers")
 			}
+			encounteredError = fmt.Errorf("Error: failed to start one or more containers")
 		} else {
 			if !*attach || !*openStdin {
 				fmt.Fprintf(cli.out, "%s\n", name)
@@ -758,9 +768,13 @@ func (cli *DockerCli) CmdPort(args ...string) error {
 		return nil
 	}
 
-	port := cmd.Arg(1)
-	proto := "tcp"
-	parts := strings.SplitN(port, "/", 2)
+	var (
+		port      = cmd.Arg(1)
+		proto     = "tcp"
+		parts     = strings.SplitN(port, "/", 2)
+		container api.Container
+	)
+
 	if len(parts) == 2 && len(parts[1]) != 0 {
 		port = parts[0]
 		proto = parts[1]
@@ -769,13 +783,13 @@ func (cli *DockerCli) CmdPort(args ...string) error {
 	if err != nil {
 		return err
 	}
-	var out Container
-	err = json.Unmarshal(body, &out)
+
+	err = json.Unmarshal(body, &container)
 	if err != nil {
 		return err
 	}
 
-	if frontends, exists := out.NetworkSettings.Ports[nat.Port(port+"/"+proto)]; exists && frontends != nil {
+	if frontends, exists := container.NetworkSettings.Ports[nat.Port(port+"/"+proto)]; exists && frontends != nil {
 		for _, frontend := range frontends {
 			fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort)
 		}
@@ -788,8 +802,9 @@ func (cli *DockerCli) CmdPort(args ...string) error {
 // 'docker rmi IMAGE' removes all images with the name IMAGE
 func (cli *DockerCli) CmdRmi(args ...string) error {
 	var (
-		cmd   = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images")
-		force = cmd.Bool([]string{"f", "-force"}, false, "Force")
+		cmd     = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images")
+		force   = cmd.Bool([]string{"f", "-force"}, false, "Force")
+		noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents")
 	)
 	if err := cmd.Parse(args); err != nil {
 		return nil
@@ -803,6 +818,9 @@ func (cli *DockerCli) CmdRmi(args ...string) error {
 	if *force {
 		v.Set("force", "1")
 	}
+	if *noprune {
+		v.Set("noprune", "1")
+	}
 
 	var encounteredError error
 	for _, name := range cmd.Args() {
@@ -969,6 +987,14 @@ func (cli *DockerCli) CmdImport(args ...string) error {
 		repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
 	}
 	v := url.Values{}
+
+	if repository != "" {
+		//Check if the given image name can be resolved
+		if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
+			return err
+		}
+	}
+
 	v.Set("repo", repository)
 	v.Set("tag", tag)
 	v.Set("fromSrc", src)
@@ -983,7 +1009,7 @@ func (cli *DockerCli) CmdImport(args ...string) error {
 }
 
 func (cli *DockerCli) CmdPush(args ...string) error {
-	cmd := cli.Subcmd("push", "NAME", "Push an image or a repository to the registry")
+	cmd := cli.Subcmd("push", "NAME[:TAG]", "Push an image or a repository to the registry")
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -996,8 +1022,10 @@ func (cli *DockerCli) CmdPush(args ...string) error {
 
 	cli.LoadConfigFile()
 
+	remote, tag := utils.ParseRepositoryTag(name)
+
 	// Resolve the Repository name from fqn to hostname + name
-	hostname, _, err := registry.ResolveRepositoryName(name)
+	hostname, _, err := registry.ResolveRepositoryName(remote)
 	if err != nil {
 		return err
 	}
@@ -1008,7 +1036,7 @@ func (cli *DockerCli) CmdPush(args ...string) error {
 	// Custom repositories can have different rules, and we must also
 	// allow pushing by image ID.
 	if len(strings.SplitN(name, "/", 2)) == 1 {
-		username := cli.configFile.Configs[auth.IndexServerAddress()].Username
+		username := cli.configFile.Configs[registry.IndexServerAddress()].Username
 		if username == "" {
 			username = "<user>"
 		}
@@ -1016,7 +1044,8 @@ func (cli *DockerCli) CmdPush(args ...string) error {
 	}
 
 	v := url.Values{}
-	push := func(authConfig auth.AuthConfig) error {
+	v.Set("tag", tag)
+	push := func(authConfig registry.AuthConfig) error {
 		buf, err := json.Marshal(authConfig)
 		if err != nil {
 			return err
@@ -1025,7 +1054,7 @@ func (cli *DockerCli) CmdPush(args ...string) error {
 			base64.URLEncoding.EncodeToString(buf),
 		}
 
-		return cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), nil, cli.out, map[string][]string{
+		return cli.stream("POST", "/images/"+remote+"/push?"+v.Encode(), nil, cli.out, map[string][]string{
 			"X-Registry-Auth": registryAuthHeader,
 		})
 	}
@@ -1045,8 +1074,8 @@ func (cli *DockerCli) CmdPush(args ...string) error {
 }
 
 func (cli *DockerCli) CmdPull(args ...string) error {
-	cmd := cli.Subcmd("pull", "NAME", "Pull an image or a repository from the registry")
-	tag := cmd.String([]string{"t", "-tag"}, "", "Download tagged image in repository")
+	cmd := cli.Subcmd("pull", "NAME[:TAG]", "Pull an image or a repository from the registry")
+	tag := cmd.String([]string{"#t", "#-tag"}, "", "Download tagged image in repository")
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -1075,7 +1104,7 @@ func (cli *DockerCli) CmdPull(args ...string) error {
 	v.Set("fromImage", remote)
 	v.Set("tag", *tag)
 
-	pull := func(authConfig auth.AuthConfig) error {
+	pull := func(authConfig registry.AuthConfig) error {
 		buf, err := json.Marshal(authConfig)
 		if err != nil {
 			return err
@@ -1107,10 +1136,11 @@ func (cli *DockerCli) CmdPull(args ...string) error {
 func (cli *DockerCli) CmdImages(args ...string) error {
 	cmd := cli.Subcmd("images", "[OPTIONS] [NAME]", "List images")
 	quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
-	all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate images used to build)")
+	all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate image layers)")
 	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
-	flViz := cmd.Bool([]string{"v", "#viz", "-viz"}, false, "Output graph in graphviz format")
-	flTree := cmd.Bool([]string{"t", "#tree", "-tree"}, false, "Output graph in tree format")
+	// FIXME: --viz and --tree are deprecated. Remove them in a future version.
+	flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format")
+	flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format")
 
 	if err := cmd.Parse(args); err != nil {
 		return nil
@@ -1122,6 +1152,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
 
 	filter := cmd.Arg(0)
 
+	// FIXME: --viz and --tree are deprecated. Remove them in a future version.
 	if *flViz || *flTree {
 		body, _, err := readBody(cli.call("GET", "/images/json?all=1", nil, false))
 		if err != nil {
@@ -1232,6 +1263,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
 	return nil
 }
 
+// FIXME: --viz and --tree are deprecated. Remove them in a future version.
 func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) {
 	length := images.Len()
 	if length > 1 {
@@ -1258,6 +1290,7 @@ func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[
 	}
 }
 
+// FIXME: --viz and --tree are deprecated. Remove them in a future version.
 func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) {
 	var (
 		imageID  string
@@ -1281,6 +1314,7 @@ func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix strin
 	}
 }
 
+// FIXME: --viz and --tree are deprecated. Remove them in a future version.
 func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) {
 	var imageID string
 	if noTrunc {
@@ -1304,8 +1338,8 @@ func (cli *DockerCli) CmdPs(args ...string) error {
 	all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.")
 	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
 	nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.")
-	since := cmd.String([]string{"#sinceId", "-since-id"}, "", "Show only containers created since Id, include non-running ones.")
-	before := cmd.String([]string{"#beforeId", "-before-id"}, "", "Show only container created before Id, include non-running ones.")
+	since := cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show only containers created since Id or Name, include non-running ones.")
+	before := cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name, include non-running ones.")
 	last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.")
 
 	if err := cmd.Parse(args); err != nil {
@@ -1374,7 +1408,7 @@ func (cli *DockerCli) CmdPs(args ...string) error {
 				outCommand = utils.Trunc(outCommand, 20)
 			}
 			ports.ReadListFrom([]byte(out.Get("Ports")))
-			fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), displayablePorts(ports), strings.Join(outNames, ","))
+			fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ","))
 			if *size {
 				if out.GetInt("SizeRootFs") > 0 {
 					fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.GetInt64("SizeRw")), utils.HumanSize(out.GetInt64("SizeRootFs")))
@@ -1399,7 +1433,8 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
 	cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes")
 	flComment := cmd.String([]string{"m", "-message"}, "", "Commit message")
 	flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith <hannibal@a-team.com>\"")
-	flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`)
+	// FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands.
+	flConfig := cmd.String([]string{"#run", "#-run"}, "", "this option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands")
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -1419,6 +1454,13 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
 		return nil
 	}
 
+	//Check if the given image name can be resolved
+	if repository != "" {
+		if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
+			return err
+		}
+	}
+
 	v := url.Values{}
 	v.Set("container", name)
 	v.Set("repo", repository)
@@ -1548,7 +1590,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
 		return err
 	}
 
-	container := &Container{}
+	container := &api.Container{}
 	err = json.Unmarshal(body, container)
 	if err != nil {
 		return err
@@ -1585,7 +1627,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
 		return err
 	}
 
-	container := &Container{}
+	container := &api.Container{}
 	err = json.Unmarshal(body, container)
 	if err != nil {
 		return err
@@ -1614,7 +1656,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
 
 	if *proxy && !container.Config.Tty {
 		sigc := cli.forwardAllSignals(cmd.Arg(0))
-		defer utils.StopCatch(sigc)
+		defer signal.StopCatch(sigc)
 	}
 
 	if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil {
@@ -1707,6 +1749,11 @@ func (cli *DockerCli) CmdTag(args ...string) error {
 	}
 
 	v := url.Values{}
+
+	//Check if the given image name can be resolved
+	if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
+		return err
+	}
 	v.Set("repo", repository)
 	v.Set("tag", tag)
 
@@ -1753,7 +1800,21 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		if containerIDFile, err = os.Create(hostConfig.ContainerIDFile); err != nil {
 			return fmt.Errorf("Failed to create the container ID file: %s", err)
 		}
-		defer containerIDFile.Close()
+		defer func() {
+			containerIDFile.Close()
+			var (
+				cidFileInfo os.FileInfo
+				err         error
+			)
+			if cidFileInfo, err = os.Stat(hostConfig.ContainerIDFile); err != nil {
+				return
+			}
+			if cidFileInfo.Size() == 0 {
+				if err := os.Remove(hostConfig.ContainerIDFile); err != nil {
+					fmt.Printf("failed to remove CID file '%s': %s \n", hostConfig.ContainerIDFile, err)
+				}
+			}
+		}()
 	}
 
 	containerValues := url.Values{}
@@ -1818,7 +1879,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 
 	if sigProxy {
 		sigc := cli.forwardAllSignals(runResult.Get("Id"))
-		defer utils.StopCatch(sigc)
+		defer signal.StopCatch(sigc)
 	}
 
 	var (
@@ -1996,7 +2057,9 @@ func (cli *DockerCli) CmdCp(args ...string) error {
 }
 
 func (cli *DockerCli) CmdSave(args ...string) error {
-	cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout)")
+	cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout by default)")
+	outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT")
+
 	if err := cmd.Parse(args); err != nil {
 		return err
 	}
@@ -2006,8 +2069,18 @@ func (cli *DockerCli) CmdSave(args ...string) error {
 		return nil
 	}
 
+	var (
+		output io.Writer = cli.out
+		err    error
+	)
+	if *outfile != "" {
+		output, err = os.Create(*outfile)
+		if err != nil {
+			return err
+		}
+	}
 	image := cmd.Arg(0)
-	if err := cli.stream("GET", "/images/"+image+"/get", nil, cli.out, nil); err != nil {
+	if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil {
 		return err
 	}
 	return nil
@@ -2015,6 +2088,8 @@ func (cli *DockerCli) CmdSave(args ...string) error {
 
 func (cli *DockerCli) CmdLoad(args ...string) error {
 	cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN")
+	infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN")
+
 	if err := cmd.Parse(args); err != nil {
 		return err
 	}
@@ -2024,408 +2099,18 @@ func (cli *DockerCli) CmdLoad(args ...string) error {
 		return nil
 	}
 
-	if err := cli.stream("POST", "/images/load", cli.in, cli.out, nil); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) {
-	params := bytes.NewBuffer(nil)
-	if data != nil {
-		if env, ok := data.(engine.Env); ok {
-			if err := env.Encode(params); err != nil {
-				return nil, -1, err
-			}
-		} else {
-			buf, err := json.Marshal(data)
-			if err != nil {
-				return nil, -1, err
-			}
-			if _, err := params.Write(buf); err != nil {
-				return nil, -1, err
-			}
-		}
-	}
-	// fixme: refactor client to support redirect
-	re := regexp.MustCompile("/+")
-	path = re.ReplaceAllString(path, "/")
-
-	req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", APIVERSION, path), params)
-	if err != nil {
-		return nil, -1, err
-	}
-	if passAuthInfo {
-		cli.LoadConfigFile()
-		// Resolve the Auth config relevant for this server
-		authConfig := cli.configFile.ResolveAuthConfig(auth.IndexServerAddress())
-		getHeaders := func(authConfig auth.AuthConfig) (map[string][]string, error) {
-			buf, err := json.Marshal(authConfig)
-			if err != nil {
-				return nil, err
-			}
-			registryAuthHeader := []string{
-				base64.URLEncoding.EncodeToString(buf),
-			}
-			return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil
-		}
-		if headers, err := getHeaders(authConfig); err == nil && headers != nil {
-			for k, v := range headers {
-				req.Header[k] = v
-			}
-		}
-	}
-	req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
-	req.Host = cli.addr
-	if data != nil {
-		req.Header.Set("Content-Type", "application/json")
-	} else if method == "POST" {
-		req.Header.Set("Content-Type", "plain/text")
-	}
-	dial, err := net.Dial(cli.proto, cli.addr)
-	if err != nil {
-		if strings.Contains(err.Error(), "connection refused") {
-			return nil, -1, ErrConnectionRefused
-		}
-		return nil, -1, err
-	}
-	clientconn := httputil.NewClientConn(dial, nil)
-	resp, err := clientconn.Do(req)
-	if err != nil {
-		clientconn.Close()
-		if strings.Contains(err.Error(), "connection refused") {
-			return nil, -1, ErrConnectionRefused
-		}
-		return nil, -1, err
-	}
-
-	if resp.StatusCode < 200 || resp.StatusCode >= 400 {
-		body, err := ioutil.ReadAll(resp.Body)
-		if err != nil {
-			return nil, -1, err
-		}
-		if len(body) == 0 {
-			return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL)
-		}
-		return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body))
-	}
-
-	wrapper := utils.NewReadCloserWrapper(resp.Body, func() error {
-		if resp != nil && resp.Body != nil {
-			resp.Body.Close()
-		}
-		return clientconn.Close()
-	})
-	return wrapper, resp.StatusCode, nil
-}
-
-func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
-	if (method == "POST" || method == "PUT") && in == nil {
-		in = bytes.NewReader([]byte{})
-	}
-
-	// fixme: refactor client to support redirect
-	re := regexp.MustCompile("/+")
-	path = re.ReplaceAllString(path, "/")
-
-	req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", APIVERSION, path), in)
-	if err != nil {
-		return err
-	}
-	req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
-	req.Host = cli.addr
-	if method == "POST" {
-		req.Header.Set("Content-Type", "plain/text")
-	}
-
-	if headers != nil {
-		for k, v := range headers {
-			req.Header[k] = v
-		}
-	}
-
-	dial, err := net.Dial(cli.proto, cli.addr)
-	if err != nil {
-		if strings.Contains(err.Error(), "connection refused") {
-			return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
-		}
-		return err
-	}
-	clientconn := httputil.NewClientConn(dial, nil)
-	resp, err := clientconn.Do(req)
-	defer clientconn.Close()
-	if err != nil {
-		if strings.Contains(err.Error(), "connection refused") {
-			return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
-		}
-		return err
-	}
-	defer resp.Body.Close()
-
-	if resp.StatusCode < 200 || resp.StatusCode >= 400 {
-		body, err := ioutil.ReadAll(resp.Body)
+	var (
+		input io.Reader = cli.in
+		err   error
+	)
+	if *infile != "" {
+		input, err = os.Open(*infile)
 		if err != nil {
 			return err
 		}
-		if len(body) == 0 {
-			return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
-		}
-		return fmt.Errorf("Error: %s", bytes.TrimSpace(body))
-	}
-
-	if MatchesContentType(resp.Header.Get("Content-Type"), "application/json") {
-		return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal)
 	}
-	if _, err := io.Copy(out, resp.Body); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error {
-	defer func() {
-		if started != nil {
-			close(started)
-		}
-	}()
-	// fixme: refactor client to support redirect
-	re := regexp.MustCompile("/+")
-	path = re.ReplaceAllString(path, "/")
-
-	req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", APIVERSION, path), nil)
-	if err != nil {
+	if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil {
 		return err
 	}
-	req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
-	req.Header.Set("Content-Type", "plain/text")
-	req.Host = cli.addr
-
-	dial, err := net.Dial(cli.proto, cli.addr)
-	if err != nil {
-		if strings.Contains(err.Error(), "connection refused") {
-			return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
-		}
-		return err
-	}
-	clientconn := httputil.NewClientConn(dial, nil)
-	defer clientconn.Close()
-
-	// Server hijacks the connection, error 'connection closed' expected
-	clientconn.Do(req)
-
-	rwc, br := clientconn.Hijack()
-	defer rwc.Close()
-
-	if started != nil {
-		started <- rwc
-	}
-
-	var receiveStdout chan error
-
-	var oldState *term.State
-
-	if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
-		oldState, err = term.SetRawTerminal(cli.terminalFd)
-		if err != nil {
-			return err
-		}
-		defer term.RestoreTerminal(cli.terminalFd, oldState)
-	}
-
-	if stdout != nil || stderr != nil {
-		receiveStdout = utils.Go(func() (err error) {
-			defer func() {
-				if in != nil {
-					if setRawTerminal && cli.isTerminal {
-						term.RestoreTerminal(cli.terminalFd, oldState)
-					}
-					in.Close()
-				}
-			}()
-
-			// When TTY is ON, use regular copy
-			if setRawTerminal {
-				_, err = io.Copy(stdout, br)
-			} else {
-				_, err = utils.StdCopy(stdout, stderr, br)
-			}
-			utils.Debugf("[hijack] End of stdout")
-			return err
-		})
-	}
-
-	sendStdin := utils.Go(func() error {
-		if in != nil {
-			io.Copy(rwc, in)
-			utils.Debugf("[hijack] End of stdin")
-		}
-		if tcpc, ok := rwc.(*net.TCPConn); ok {
-			if err := tcpc.CloseWrite(); err != nil {
-				utils.Errorf("Couldn't send EOF: %s\n", err)
-			}
-		} else if unixc, ok := rwc.(*net.UnixConn); ok {
-			if err := unixc.CloseWrite(); err != nil {
-				utils.Errorf("Couldn't send EOF: %s\n", err)
-			}
-		}
-		// Discard errors due to pipe interruption
-		return nil
-	})
-
-	if stdout != nil || stderr != nil {
-		if err := <-receiveStdout; err != nil {
-			utils.Errorf("Error receiveStdout: %s", err)
-			return err
-		}
-	}
-
-	if !cli.isTerminal {
-		if err := <-sendStdin; err != nil {
-			utils.Errorf("Error sendStdin: %s", err)
-			return err
-		}
-	}
 	return nil
-
-}
-
-func (cli *DockerCli) getTtySize() (int, int) {
-	if !cli.isTerminal {
-		return 0, 0
-	}
-	ws, err := term.GetWinsize(cli.terminalFd)
-	if err != nil {
-		utils.Errorf("Error getting size: %s", err)
-		if ws == nil {
-			return 0, 0
-		}
-	}
-	return int(ws.Height), int(ws.Width)
-}
-
-func (cli *DockerCli) resizeTty(id string) {
-	height, width := cli.getTtySize()
-	if height == 0 && width == 0 {
-		return
-	}
-	v := url.Values{}
-	v.Set("h", strconv.Itoa(height))
-	v.Set("w", strconv.Itoa(width))
-	if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil {
-		utils.Errorf("Error resize: %s", err)
-	}
-}
-
-func (cli *DockerCli) monitorTtySize(id string) error {
-	cli.resizeTty(id)
-
-	sigchan := make(chan os.Signal, 1)
-	signal.Notify(sigchan, syscall.SIGWINCH)
-	go func() {
-		for _ = range sigchan {
-			cli.resizeTty(id)
-		}
-	}()
-	return nil
-}
-
-func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet {
-	flags := flag.NewFlagSet(name, flag.ContinueOnError)
-	flags.Usage = func() {
-		fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description)
-		flags.PrintDefaults()
-		os.Exit(2)
-	}
-	return flags
-}
-
-func (cli *DockerCli) LoadConfigFile() (err error) {
-	cli.configFile, err = auth.LoadConfig(os.Getenv("HOME"))
-	if err != nil {
-		fmt.Fprintf(cli.err, "WARNING: %s\n", err)
-	}
-	return err
-}
-
-func waitForExit(cli *DockerCli, containerId string) (int, error) {
-	stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false)
-	if err != nil {
-		return -1, err
-	}
-
-	var out engine.Env
-	if err := out.Decode(stream); err != nil {
-		return -1, err
-	}
-	return out.GetInt("StatusCode"), nil
-}
-
-// getExitCode perform an inspect on the container. It returns
-// the running state and the exit code.
-func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
-	body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false))
-	if err != nil {
-		// If we can't connect, then the daemon probably died.
-		if err != ErrConnectionRefused {
-			return false, -1, err
-		}
-		return false, -1, nil
-	}
-	c := &Container{}
-	if err := json.Unmarshal(body, c); err != nil {
-		return false, -1, err
-	}
-	return c.State.Running, c.State.ExitCode, nil
-}
-
-func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) {
-	if stream != nil {
-		defer stream.Close()
-	}
-	if err != nil {
-		return nil, statusCode, err
-	}
-	body, err := ioutil.ReadAll(stream)
-	if err != nil {
-		return nil, -1, err
-	}
-	return body, statusCode, nil
-}
-
-func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli {
-	var (
-		isTerminal = false
-		terminalFd uintptr
-	)
-
-	if in != nil {
-		if file, ok := in.(*os.File); ok {
-			terminalFd = file.Fd()
-			isTerminal = term.IsTerminal(terminalFd)
-		}
-	}
-
-	if err == nil {
-		err = out
-	}
-	return &DockerCli{
-		proto:      proto,
-		addr:       addr,
-		in:         in,
-		out:        out,
-		err:        err,
-		isTerminal: isTerminal,
-		terminalFd: terminalFd,
-	}
-}
-
-type DockerCli struct {
-	proto      string
-	addr       string
-	configFile *auth.ConfigFile
-	in         io.ReadCloser
-	out        io.Writer
-	err        io.Writer
-	isTerminal bool
-	terminalFd uintptr
 }

+ 390 - 0
api/client/utils.go

@@ -0,0 +1,390 @@
+package client
+
+import (
+	"bytes"
+	"crypto/tls"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/http/httputil"
+	"net/url"
+	"os"
+	gosignal "os/signal"
+	"regexp"
+	goruntime "runtime"
+	"strconv"
+	"strings"
+	"syscall"
+
+	"github.com/dotcloud/docker/api"
+	"github.com/dotcloud/docker/dockerversion"
+	"github.com/dotcloud/docker/engine"
+	"github.com/dotcloud/docker/pkg/term"
+	"github.com/dotcloud/docker/registry"
+	"github.com/dotcloud/docker/utils"
+)
+
+var (
+	ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
+)
+
+func (cli *DockerCli) dial() (net.Conn, error) {
+	if cli.tlsConfig != nil && cli.proto != "unix" {
+		return tls.Dial(cli.proto, cli.addr, cli.tlsConfig)
+	}
+	return net.Dial(cli.proto, cli.addr)
+}
+
+func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) {
+	params := bytes.NewBuffer(nil)
+	if data != nil {
+		if env, ok := data.(engine.Env); ok {
+			if err := env.Encode(params); err != nil {
+				return nil, -1, err
+			}
+		} else {
+			buf, err := json.Marshal(data)
+			if err != nil {
+				return nil, -1, err
+			}
+			if _, err := params.Write(buf); err != nil {
+				return nil, -1, err
+			}
+		}
+	}
+	// fixme: refactor client to support redirect
+	re := regexp.MustCompile("/+")
+	path = re.ReplaceAllString(path, "/")
+
+	req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params)
+	if err != nil {
+		return nil, -1, err
+	}
+	if passAuthInfo {
+		cli.LoadConfigFile()
+		// Resolve the Auth config relevant for this server
+		authConfig := cli.configFile.ResolveAuthConfig(registry.IndexServerAddress())
+		getHeaders := func(authConfig registry.AuthConfig) (map[string][]string, error) {
+			buf, err := json.Marshal(authConfig)
+			if err != nil {
+				return nil, err
+			}
+			registryAuthHeader := []string{
+				base64.URLEncoding.EncodeToString(buf),
+			}
+			return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil
+		}
+		if headers, err := getHeaders(authConfig); err == nil && headers != nil {
+			for k, v := range headers {
+				req.Header[k] = v
+			}
+		}
+	}
+	req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
+	req.Host = cli.addr
+	if data != nil {
+		req.Header.Set("Content-Type", "application/json")
+	} else if method == "POST" {
+		req.Header.Set("Content-Type", "plain/text")
+	}
+	dial, err := cli.dial()
+	if err != nil {
+		if strings.Contains(err.Error(), "connection refused") {
+			return nil, -1, ErrConnectionRefused
+		}
+		return nil, -1, err
+	}
+	clientconn := httputil.NewClientConn(dial, nil)
+	resp, err := clientconn.Do(req)
+	if err != nil {
+		clientconn.Close()
+		if strings.Contains(err.Error(), "connection refused") {
+			return nil, -1, ErrConnectionRefused
+		}
+		return nil, -1, err
+	}
+
+	if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+		body, err := ioutil.ReadAll(resp.Body)
+		if err != nil {
+			return nil, -1, err
+		}
+		if len(body) == 0 {
+			return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL)
+		}
+		return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body))
+	}
+
+	wrapper := utils.NewReadCloserWrapper(resp.Body, func() error {
+		if resp != nil && resp.Body != nil {
+			resp.Body.Close()
+		}
+		return clientconn.Close()
+	})
+	return wrapper, resp.StatusCode, nil
+}
+
+func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
+	if (method == "POST" || method == "PUT") && in == nil {
+		in = bytes.NewReader([]byte{})
+	}
+
+	// fixme: refactor client to support redirect
+	re := regexp.MustCompile("/+")
+	path = re.ReplaceAllString(path, "/")
+
+	req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), in)
+	if err != nil {
+		return err
+	}
+	req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
+	req.Host = cli.addr
+	if method == "POST" {
+		req.Header.Set("Content-Type", "plain/text")
+	}
+
+	if headers != nil {
+		for k, v := range headers {
+			req.Header[k] = v
+		}
+	}
+
+	dial, err := cli.dial()
+	if err != nil {
+		if strings.Contains(err.Error(), "connection refused") {
+			return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
+		}
+		return err
+	}
+	clientconn := httputil.NewClientConn(dial, nil)
+	resp, err := clientconn.Do(req)
+	defer clientconn.Close()
+	if err != nil {
+		if strings.Contains(err.Error(), "connection refused") {
+			return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
+		}
+		return err
+	}
+	defer resp.Body.Close()
+
+	if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+		body, err := ioutil.ReadAll(resp.Body)
+		if err != nil {
+			return err
+		}
+		if len(body) == 0 {
+			return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
+		}
+		return fmt.Errorf("Error: %s", bytes.TrimSpace(body))
+	}
+
+	if api.MatchesContentType(resp.Header.Get("Content-Type"), "application/json") {
+		return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal)
+	}
+	if _, err := io.Copy(out, resp.Body); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error {
+	defer func() {
+		if started != nil {
+			close(started)
+		}
+	}()
+	// fixme: refactor client to support redirect
+	re := regexp.MustCompile("/+")
+	path = re.ReplaceAllString(path, "/")
+
+	req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), nil)
+	if err != nil {
+		return err
+	}
+	req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
+	req.Header.Set("Content-Type", "plain/text")
+	req.Host = cli.addr
+
+	dial, err := cli.dial()
+	if err != nil {
+		if strings.Contains(err.Error(), "connection refused") {
+			return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
+		}
+		return err
+	}
+	clientconn := httputil.NewClientConn(dial, nil)
+	defer clientconn.Close()
+
+	// Server hijacks the connection, error 'connection closed' expected
+	clientconn.Do(req)
+
+	rwc, br := clientconn.Hijack()
+	defer rwc.Close()
+
+	if started != nil {
+		started <- rwc
+	}
+
+	var receiveStdout chan error
+
+	var oldState *term.State
+
+	if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
+		oldState, err = term.SetRawTerminal(cli.terminalFd)
+		if err != nil {
+			return err
+		}
+		defer term.RestoreTerminal(cli.terminalFd, oldState)
+	}
+
+	if stdout != nil || stderr != nil {
+		receiveStdout = utils.Go(func() (err error) {
+			defer func() {
+				if in != nil {
+					if setRawTerminal && cli.isTerminal {
+						term.RestoreTerminal(cli.terminalFd, oldState)
+					}
+					// For some reason this Close call blocks on darwin..
+					// As the client exists right after, simply discard the close
+					// until we find a better solution.
+					if goruntime.GOOS != "darwin" {
+						in.Close()
+					}
+				}
+			}()
+
+			// When TTY is ON, use regular copy
+			if setRawTerminal {
+				_, err = io.Copy(stdout, br)
+			} else {
+				_, err = utils.StdCopy(stdout, stderr, br)
+			}
+			utils.Debugf("[hijack] End of stdout")
+			return err
+		})
+	}
+
+	sendStdin := utils.Go(func() error {
+		if in != nil {
+			io.Copy(rwc, in)
+			utils.Debugf("[hijack] End of stdin")
+		}
+		if tcpc, ok := rwc.(*net.TCPConn); ok {
+			if err := tcpc.CloseWrite(); err != nil {
+				utils.Debugf("Couldn't send EOF: %s\n", err)
+			}
+		} else if unixc, ok := rwc.(*net.UnixConn); ok {
+			if err := unixc.CloseWrite(); err != nil {
+				utils.Debugf("Couldn't send EOF: %s\n", err)
+			}
+		}
+		// Discard errors due to pipe interruption
+		return nil
+	})
+
+	if stdout != nil || stderr != nil {
+		if err := <-receiveStdout; err != nil {
+			utils.Debugf("Error receiveStdout: %s", err)
+			return err
+		}
+	}
+
+	if !cli.isTerminal {
+		if err := <-sendStdin; err != nil {
+			utils.Debugf("Error sendStdin: %s", err)
+			return err
+		}
+	}
+	return nil
+
+}
+
+func (cli *DockerCli) resizeTty(id string) {
+	height, width := cli.getTtySize()
+	if height == 0 && width == 0 {
+		return
+	}
+	v := url.Values{}
+	v.Set("h", strconv.Itoa(height))
+	v.Set("w", strconv.Itoa(width))
+	if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil {
+		utils.Debugf("Error resize: %s", err)
+	}
+}
+
+func waitForExit(cli *DockerCli, containerId string) (int, error) {
+	stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false)
+	if err != nil {
+		return -1, err
+	}
+
+	var out engine.Env
+	if err := out.Decode(stream); err != nil {
+		return -1, err
+	}
+	return out.GetInt("StatusCode"), nil
+}
+
+// getExitCode perform an inspect on the container. It returns
+// the running state and the exit code.
+func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
+	body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false))
+	if err != nil {
+		// If we can't connect, then the daemon probably died.
+		if err != ErrConnectionRefused {
+			return false, -1, err
+		}
+		return false, -1, nil
+	}
+	c := &api.Container{}
+	if err := json.Unmarshal(body, c); err != nil {
+		return false, -1, err
+	}
+	return c.State.Running, c.State.ExitCode, nil
+}
+
+func (cli *DockerCli) monitorTtySize(id string) error {
+	cli.resizeTty(id)
+
+	sigchan := make(chan os.Signal, 1)
+	gosignal.Notify(sigchan, syscall.SIGWINCH)
+	go func() {
+		for _ = range sigchan {
+			cli.resizeTty(id)
+		}
+	}()
+	return nil
+}
+
+func (cli *DockerCli) getTtySize() (int, int) {
+	if !cli.isTerminal {
+		return 0, 0
+	}
+	ws, err := term.GetWinsize(cli.terminalFd)
+	if err != nil {
+		utils.Debugf("Error getting size: %s", err)
+		if ws == nil {
+			return 0, 0
+		}
+	}
+	return int(ws.Height), int(ws.Width)
+}
+
+func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) {
+	if stream != nil {
+		defer stream.Close()
+	}
+	if err != nil {
+		return nil, statusCode, err
+	}
+	body, err := ioutil.ReadAll(stream)
+	if err != nil {
+		return nil, -1, err
+	}
+	return body, statusCode, nil
+}

+ 7 - 4
api/common.go

@@ -3,15 +3,16 @@ package api
 import (
 	"fmt"
 	"github.com/dotcloud/docker/engine"
+	"github.com/dotcloud/docker/pkg/version"
 	"github.com/dotcloud/docker/utils"
 	"mime"
 	"strings"
 )
 
 const (
-	APIVERSION        = "1.10"
-	DEFAULTHTTPHOST   = "127.0.0.1"
-	DEFAULTUNIXSOCKET = "/var/run/docker.sock"
+	APIVERSION        version.Version = "1.10"
+	DEFAULTHTTPHOST                   = "127.0.0.1"
+	DEFAULTUNIXSOCKET                 = "/var/run/docker.sock"
 )
 
 func ValidateHost(val string) (string, error) {
@@ -23,8 +24,10 @@ func ValidateHost(val string) (string, error) {
 }
 
 //TODO remove, used on < 1.5 in getContainersJSON
-func displayablePorts(ports *engine.Table) string {
+func DisplayablePorts(ports *engine.Table) string {
 	result := []string{}
+	ports.SetKey("PublicPort")
+	ports.Sort()
 	for _, port := range ports.Data {
 		if port.Get("IP") == "" {
 			result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PublicPort"), port.Get("Type")))

+ 70 - 34
api/server.go → api/server/server.go

@@ -1,21 +1,15 @@
-package api
+package server
 
 import (
 	"bufio"
 	"bytes"
 	"code.google.com/p/go.net/websocket"
+	"crypto/tls"
+	"crypto/x509"
 	"encoding/base64"
 	"encoding/json"
 	"expvar"
 	"fmt"
-	"github.com/dotcloud/docker/auth"
-	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/pkg/listenbuffer"
-	"github.com/dotcloud/docker/pkg/systemd"
-	"github.com/dotcloud/docker/pkg/user"
-	"github.com/dotcloud/docker/pkg/version"
-	"github.com/dotcloud/docker/utils"
-	"github.com/gorilla/mux"
 	"io"
 	"io/ioutil"
 	"log"
@@ -26,7 +20,16 @@ import (
 	"strconv"
 	"strings"
 	"syscall"
-	"time"
+
+	"github.com/dotcloud/docker/api"
+	"github.com/dotcloud/docker/engine"
+	"github.com/dotcloud/docker/pkg/listenbuffer"
+	"github.com/dotcloud/docker/pkg/systemd"
+	"github.com/dotcloud/docker/pkg/user"
+	"github.com/dotcloud/docker/pkg/version"
+	"github.com/dotcloud/docker/registry"
+	"github.com/dotcloud/docker/utils"
+	"github.com/gorilla/mux"
 )
 
 var (
@@ -314,7 +317,7 @@ func getContainersJSON(eng *engine.Engine, version version.Version, w http.Respo
 		for _, out := range outs.Data {
 			ports := engine.NewTable("", 0)
 			ports.ReadListFrom([]byte(out.Get("Ports")))
-			out.Set("Ports", displayablePorts(ports))
+			out.Set("Ports", api.DisplayablePorts(ports))
 		}
 		w.Header().Set("Content-Type", "application/json")
 		if _, err = outs.WriteListTo(w); err != nil {
@@ -381,13 +384,13 @@ func postImagesCreate(eng *engine.Engine, version version.Version, w http.Respon
 		job   *engine.Job
 	)
 	authEncoded := r.Header.Get("X-Registry-Auth")
-	authConfig := &auth.AuthConfig{}
+	authConfig := &registry.AuthConfig{}
 	if authEncoded != "" {
 		authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
 		if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
 			// for a pull it is not an error if no auth was given
 			// to increase compatibility with the existing api it is defaulting to be empty
-			authConfig = &auth.AuthConfig{}
+			authConfig = &registry.AuthConfig{}
 		}
 	}
 	if image != "" { //pull
@@ -429,7 +432,7 @@ func getImagesSearch(eng *engine.Engine, version version.Version, w http.Respons
 	}
 	var (
 		authEncoded = r.Header.Get("X-Registry-Auth")
-		authConfig  = &auth.AuthConfig{}
+		authConfig  = &registry.AuthConfig{}
 		metaHeaders = map[string][]string{}
 	)
 
@@ -438,7 +441,7 @@ func getImagesSearch(eng *engine.Engine, version version.Version, w http.Respons
 		if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
 			// for a search it is not an error if no auth was given
 			// to increase compatibility with the existing api it is defaulting to be empty
-			authConfig = &auth.AuthConfig{}
+			authConfig = &registry.AuthConfig{}
 		}
 	}
 	for k, v := range r.Header {
@@ -455,6 +458,7 @@ func getImagesSearch(eng *engine.Engine, version version.Version, w http.Respons
 	return job.Run()
 }
 
+// FIXME: 'insert' is deprecated as of 0.10, and should be removed in a future version.
 func postImagesInsert(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
 		return err
@@ -494,7 +498,7 @@ func postImagesPush(eng *engine.Engine, version version.Version, w http.Response
 	if err := parseForm(r); err != nil {
 		return err
 	}
-	authConfig := &auth.AuthConfig{}
+	authConfig := &registry.AuthConfig{}
 
 	authEncoded := r.Header.Get("X-Registry-Auth")
 	if authEncoded != "" {
@@ -502,7 +506,7 @@ func postImagesPush(eng *engine.Engine, version version.Version, w http.Response
 		authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
 		if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
 			// to increase compatibility to existing api it is defaulting to be empty
-			authConfig = &auth.AuthConfig{}
+			authConfig = &registry.AuthConfig{}
 		}
 	} else {
 		// the old format is supported for compatibility if there was no authConfig header
@@ -514,6 +518,7 @@ func postImagesPush(eng *engine.Engine, version version.Version, w http.Response
 	job := eng.Job("push", vars["name"])
 	job.SetenvJson("metaHeaders", metaHeaders)
 	job.SetenvJson("authConfig", authConfig)
+	job.Setenv("tag", r.Form.Get("tag"))
 	if version.GreaterThan("1.0") {
 		job.SetenvBool("json", true)
 		streamJSON(job, w, true)
@@ -624,6 +629,7 @@ func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWr
 	var job = eng.Job("image_delete", vars["name"])
 	streamJSON(job, w, false)
 	job.Setenv("force", r.Form.Get("force"))
+	job.Setenv("noprune", r.Form.Get("noprune"))
 
 	return job.Run()
 }
@@ -636,7 +642,7 @@ func postContainersStart(eng *engine.Engine, version version.Version, w http.Res
 	job := eng.Job("start", name)
 	// allow a nil body for backwards compatibility
 	if r.Body != nil {
-		if MatchesContentType(r.Header.Get("Content-Type"), "application/json") {
+		if api.MatchesContentType(r.Header.Get("Content-Type"), "application/json") {
 			if err := job.DecodeEnv(r.Body); err != nil {
 				return err
 			}
@@ -823,9 +829,9 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite
 	}
 	var (
 		authEncoded       = r.Header.Get("X-Registry-Auth")
-		authConfig        = &auth.AuthConfig{}
+		authConfig        = &registry.AuthConfig{}
 		configFileEncoded = r.Header.Get("X-Registry-Config")
-		configFile        = &auth.ConfigFile{}
+		configFile        = &registry.ConfigFile{}
 		job               = eng.Job("build")
 	)
 
@@ -838,7 +844,7 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite
 		if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
 			// for a pull it is not an error if no auth was given
 			// to increase compatibility with the existing api it is defaulting to be empty
-			authConfig = &auth.AuthConfig{}
+			authConfig = &registry.AuthConfig{}
 		}
 	}
 
@@ -847,7 +853,7 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite
 		if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil {
 			// for a pull it is not an error if no auth was given
 			// to increase compatibility with the existing api it is defaulting to be empty
-			configFile = &auth.ConfigFile{}
+			configFile = &registry.ConfigFile{}
 		}
 	}
 
@@ -883,7 +889,7 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp
 
 	var copyData engine.Env
 
-	if contentType := r.Header.Get("Content-Type"); contentType == "application/json" {
+	if contentType := r.Header.Get("Content-Type"); api.MatchesContentType(contentType, "application/json") {
 		if err := copyData.Decode(r.Body); err != nil {
 			return err
 		}
@@ -894,6 +900,9 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp
 	if copyData.Get("Resource") == "" {
 		return fmt.Errorf("Path cannot be empty")
 	}
+
+	origResource := copyData.Get("Resource")
+
 	if copyData.Get("Resource")[0] == '/' {
 		copyData.Set("Resource", copyData.Get("Resource")[1:])
 	}
@@ -904,6 +913,8 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp
 		utils.Errorf("%s", err.Error())
 		if strings.Contains(err.Error(), "No such container") {
 			w.WriteHeader(http.StatusNotFound)
+		} else if strings.Contains(err.Error(), "no such file or directory") {
+			return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"])
 		}
 	}
 	return nil
@@ -930,20 +941,20 @@ func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, local
 
 		if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
 			userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
-			if len(userAgent) == 2 && !dockerVersion.Equal(userAgent[1]) {
+			if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) {
 				utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
 			}
 		}
 		version := version.Version(mux.Vars(r)["version"])
 		if version == "" {
-			version = APIVERSION
+			version = api.APIVERSION
 		}
 		if enableCors {
 			writeCorsHeaders(w, r)
 		}
 
-		if version.GreaterThan(APIVERSION) {
-			http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, APIVERSION).Error(), http.StatusNotFound)
+		if version.GreaterThan(api.APIVERSION) {
+			http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, api.APIVERSION).Error(), http.StatusNotFound)
 			return
 		}
 
@@ -1130,9 +1141,8 @@ func changeGroup(addr string, nameOrGid string) error {
 
 // ListenAndServe sets up the required http.Server and gets it listening for
 // each addr passed in and does protocol specific checking.
-func ListenAndServe(proto, addr string, eng *engine.Engine, logging, enableCors bool, dockerVersion string, socketGroup string) error {
-	r, err := createRouter(eng, logging, enableCors, dockerVersion)
-
+func ListenAndServe(proto, addr string, job *engine.Job) error {
+	r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
 	if err != nil {
 		return err
 	}
@@ -1147,22 +1157,48 @@ func ListenAndServe(proto, addr string, eng *engine.Engine, logging, enableCors
 		}
 	}
 
-	l, err := listenbuffer.NewListenBuffer(proto, addr, activationLock, 15*time.Minute)
+	l, err := listenbuffer.NewListenBuffer(proto, addr, activationLock)
 	if err != nil {
 		return err
 	}
 
+	if proto != "unix" && (job.GetenvBool("Tls") || job.GetenvBool("TlsVerify")) {
+		tlsCert := job.Getenv("TlsCert")
+		tlsKey := job.Getenv("TlsKey")
+		cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey)
+		if err != nil {
+			return fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?",
+				tlsCert, tlsKey, err)
+		}
+		tlsConfig := &tls.Config{
+			NextProtos:   []string{"http/1.1"},
+			Certificates: []tls.Certificate{cert},
+		}
+		if job.GetenvBool("TlsVerify") {
+			certPool := x509.NewCertPool()
+			file, err := ioutil.ReadFile(job.Getenv("TlsCa"))
+			if err != nil {
+				return fmt.Errorf("Couldn't read CA certificate: %s", err)
+			}
+			certPool.AppendCertsFromPEM(file)
+
+			tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
+			tlsConfig.ClientCAs = certPool
+		}
+		l = tls.NewListener(l, tlsConfig)
+	}
+
 	// Basic error and sanity checking
 	switch proto {
 	case "tcp":
-		if !strings.HasPrefix(addr, "127.0.0.1") {
+		if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") {
 			log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
 		}
 	case "unix":
 		if err := os.Chmod(addr, 0660); err != nil {
 			return err
 		}
-
+		socketGroup := job.Getenv("SocketGroup")
 		if socketGroup != "" {
 			if err := changeGroup(addr, socketGroup); err != nil {
 				if socketGroup == "docker" {
@@ -1198,7 +1234,7 @@ func ServeApi(job *engine.Job) engine.Status {
 		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
 		go func() {
 			log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1])
-			chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"), job.Getenv("SocketGroup"))
+			chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job)
 		}()
 	}
 

+ 180 - 0
api/server/server_unit_test.go

@@ -0,0 +1,180 @@
+package server
+
+import (
+	"fmt"
+	"github.com/dotcloud/docker/api"
+	"github.com/dotcloud/docker/engine"
+	"github.com/dotcloud/docker/utils"
+	"io"
+	"net/http"
+	"net/http/httptest"
+	"os"
+	"testing"
+)
+
+func TestGetBoolParam(t *testing.T) {
+	if ret, err := getBoolParam("true"); err != nil || !ret {
+		t.Fatalf("true -> true, nil | got %t %s", ret, err)
+	}
+	if ret, err := getBoolParam("True"); err != nil || !ret {
+		t.Fatalf("True -> true, nil | got %t %s", ret, err)
+	}
+	if ret, err := getBoolParam("1"); err != nil || !ret {
+		t.Fatalf("1 -> true, nil | got %t %s", ret, err)
+	}
+	if ret, err := getBoolParam(""); err != nil || ret {
+		t.Fatalf("\"\" -> false, nil | got %t %s", ret, err)
+	}
+	if ret, err := getBoolParam("false"); err != nil || ret {
+		t.Fatalf("false -> false, nil | got %t %s", ret, err)
+	}
+	if ret, err := getBoolParam("0"); err != nil || ret {
+		t.Fatalf("0 -> false, nil | got %t %s", ret, err)
+	}
+	if ret, err := getBoolParam("faux"); err == nil || ret {
+		t.Fatalf("faux -> false, err | got %t %s", ret, err)
+
+	}
+}
+
+func TesthttpError(t *testing.T) {
+	r := httptest.NewRecorder()
+
+	httpError(r, fmt.Errorf("No such method"))
+	if r.Code != http.StatusNotFound {
+		t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code)
+	}
+
+	httpError(r, fmt.Errorf("This accound hasn't been activated"))
+	if r.Code != http.StatusForbidden {
+		t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code)
+	}
+
+	httpError(r, fmt.Errorf("Some error"))
+	if r.Code != http.StatusInternalServerError {
+		t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code)
+	}
+}
+
+func TestGetVersion(t *testing.T) {
+	tmp, err := utils.TestDirectory("")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmp)
+	eng, err := engine.New(tmp)
+	if err != nil {
+		t.Fatal(err)
+	}
+	var called bool
+	eng.Register("version", func(job *engine.Job) engine.Status {
+		called = true
+		v := &engine.Env{}
+		v.SetJson("Version", "42.1")
+		v.Set("ApiVersion", "1.1.1.1.1")
+		v.Set("GoVersion", "2.42")
+		v.Set("Os", "Linux")
+		v.Set("Arch", "x86_64")
+		if _, err := v.WriteTo(job.Stdout); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	})
+
+	r := httptest.NewRecorder()
+	req, err := http.NewRequest("GET", "/version", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// FIXME getting the version should require an actual running Server
+	if err := ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+		t.Fatal(err)
+	}
+	if !called {
+		t.Fatalf("handler was not called")
+	}
+	out := engine.NewOutput()
+	v, err := out.AddEnv()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := io.Copy(out, r.Body); err != nil {
+		t.Fatal(err)
+	}
+	out.Close()
+	expected := "42.1"
+	if result := v.Get("Version"); result != expected {
+		t.Errorf("Expected version %s, %s found", expected, result)
+	}
+	expected = "application/json"
+	if result := r.HeaderMap.Get("Content-Type"); result != expected {
+		t.Errorf("Expected Content-Type %s, %s found", expected, result)
+	}
+}
+
+func TestGetInfo(t *testing.T) {
+	tmp, err := utils.TestDirectory("")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmp)
+	eng, err := engine.New(tmp)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var called bool
+	eng.Register("info", func(job *engine.Job) engine.Status {
+		called = true
+		v := &engine.Env{}
+		v.SetInt("Containers", 1)
+		v.SetInt("Images", 42000)
+		if _, err := v.WriteTo(job.Stdout); err != nil {
+			return job.Error(err)
+		}
+		return engine.StatusOK
+	})
+
+	r := httptest.NewRecorder()
+	req, err := http.NewRequest("GET", "/info", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// FIXME getting the version should require an actual running Server
+	if err := ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+		t.Fatal(err)
+	}
+	if !called {
+		t.Fatalf("handler was not called")
+	}
+
+	out := engine.NewOutput()
+	i, err := out.AddEnv()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := io.Copy(out, r.Body); err != nil {
+		t.Fatal(err)
+	}
+	out.Close()
+	{
+		expected := 42000
+		result := i.GetInt("Images")
+		if expected != result {
+			t.Fatalf("%#v\n", result)
+		}
+	}
+	{
+		expected := 1
+		result := i.GetInt("Containers")
+		if expected != result {
+			t.Fatalf("%#v\n", result)
+		}
+	}
+	{
+		expected := "application/json"
+		if result := r.HeaderMap.Get("Content-Type"); result != expected {
+			t.Fatalf("%#v\n", result)
+		}
+	}
+}

+ 4 - 1
archive/archive.go

@@ -404,7 +404,7 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
 			parent := filepath.Dir(hdr.Name)
 			parentPath := filepath.Join(dest, parent)
 			if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
-				err = os.MkdirAll(parentPath, 600)
+				err = os.MkdirAll(parentPath, 0777)
 				if err != nil {
 					return err
 				}
@@ -617,6 +617,9 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
 	if _, err := io.Copy(f, src); err != nil {
 		return nil, err
 	}
+	if err = f.Sync(); err != nil {
+		return nil, err
+	}
 	if _, err := f.Seek(0, 0); err != nil {
 		return nil, err
 	}

+ 4 - 4
archive/changes_test.go

@@ -138,7 +138,7 @@ func mutateSampleDir(t *testing.T, root string) {
 	}
 
 	// Rewrite a file
-	if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileN\n"), 0777); err != nil {
+	if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil {
 		t.Fatal(err)
 	}
 
@@ -146,12 +146,12 @@ func mutateSampleDir(t *testing.T, root string) {
 	if err := os.RemoveAll(path.Join(root, "file3")); err != nil {
 		t.Fatal(err)
 	}
-	if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileM\n"), 0404); err != nil {
+	if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil {
 		t.Fatal(err)
 	}
 
 	// Touch file
-	if err := os.Chtimes(path.Join(root, "file4"), time.Now(), time.Now()); err != nil {
+	if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
 		t.Fatal(err)
 	}
 
@@ -195,7 +195,7 @@ func mutateSampleDir(t *testing.T, root string) {
 	}
 
 	// Touch dir
-	if err := os.Chtimes(path.Join(root, "dir3"), time.Now(), time.Now()); err != nil {
+	if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
 		t.Fatal(err)
 	}
 }

+ 0 - 3
auth/MAINTAINERS

@@ -1,3 +0,0 @@
-Sam Alba <sam@dotcloud.com> (@samalba)
-Joffrey Fuhrer <joffrey@dotcloud.com> (@shin-)
-Ken Cochrane <ken@dotcloud.com> (@kencochrane)

+ 5 - 7
builtins/builtins.go

@@ -1,11 +1,10 @@
 package builtins
 
 import (
+	api "github.com/dotcloud/docker/api/server"
 	"github.com/dotcloud/docker/engine"
-
-	"github.com/dotcloud/docker"
-	"github.com/dotcloud/docker/api"
-	"github.com/dotcloud/docker/networkdriver/lxc"
+	"github.com/dotcloud/docker/runtime/networkdriver/bridge"
+	"github.com/dotcloud/docker/server"
 )
 
 func Register(eng *engine.Engine) {
@@ -34,7 +33,6 @@ func remote(eng *engine.Engine) {
 // These components should be broken off into plugins of their own.
 //
 func daemon(eng *engine.Engine) {
-	eng.Register("initserver", docker.InitServer)
-	eng.Register("init_networkdriver", lxc.InitDriver)
-	eng.Register("version", docker.GetVersion)
+	eng.Register("initserver", server.InitServer)
+	eng.Register("init_networkdriver", bridge.InitDriver)
 }

+ 0 - 160
commands_unit_test.go

@@ -1,160 +0,0 @@
-package docker
-
-import (
-	"github.com/dotcloud/docker/runconfig"
-	"strings"
-	"testing"
-)
-
-func parse(t *testing.T, args string) (*runconfig.Config, *runconfig.HostConfig, error) {
-	config, hostConfig, _, err := runconfig.Parse(strings.Split(args+" ubuntu bash", " "), nil)
-	return config, hostConfig, err
-}
-
-func mustParse(t *testing.T, args string) (*runconfig.Config, *runconfig.HostConfig) {
-	config, hostConfig, err := parse(t, args)
-	if err != nil {
-		t.Fatal(err)
-	}
-	return config, hostConfig
-}
-
-func TestParseRunLinks(t *testing.T) {
-	if _, hostConfig := mustParse(t, "-link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" {
-		t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links)
-	}
-	if _, hostConfig := mustParse(t, "-link a:b -link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" {
-		t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links)
-	}
-	if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 {
-		t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links)
-	}
-
-	if _, _, err := parse(t, "-link a"); err == nil {
-		t.Fatalf("Error parsing links. `-link a` should be an error but is not")
-	}
-	if _, _, err := parse(t, "-link"); err == nil {
-		t.Fatalf("Error parsing links. `-link` should be an error but is not")
-	}
-}
-
-func TestParseRunAttach(t *testing.T) {
-	if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr {
-		t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
-	}
-	if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr {
-		t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
-	}
-	if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
-		t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
-	}
-	if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
-		t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
-	}
-
-	if _, _, err := parse(t, "-a"); err == nil {
-		t.Fatalf("Error parsing attach flags, `-a` should be an error but is not")
-	}
-	if _, _, err := parse(t, "-a invalid"); err == nil {
-		t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not")
-	}
-	if _, _, err := parse(t, "-a invalid -a stdout"); err == nil {
-		t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not")
-	}
-	if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil {
-		t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not")
-	}
-	if _, _, err := parse(t, "-a stdin -d"); err == nil {
-		t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not")
-	}
-	if _, _, err := parse(t, "-a stdout -d"); err == nil {
-		t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not")
-	}
-	if _, _, err := parse(t, "-a stderr -d"); err == nil {
-		t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not")
-	}
-	if _, _, err := parse(t, "-d -rm"); err == nil {
-		t.Fatalf("Error parsing attach flags, `-d -rm` should be an error but is not")
-	}
-}
-
-func TestParseRunVolumes(t *testing.T) {
-	if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil {
-		t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds)
-	} else if _, exists := config.Volumes["/tmp"]; !exists {
-		t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
-	}
-
-	if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil {
-		t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds)
-	} else if _, exists := config.Volumes["/tmp"]; !exists {
-		t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes)
-	} else if _, exists := config.Volumes["/var"]; !exists {
-		t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes)
-	}
-
-	if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
-		t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
-	} else if _, exists := config.Volumes["/containerTmp"]; !exists {
-		t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
-	}
-
-	if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" {
-		t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
-	} else if _, exists := config.Volumes["/containerTmp"]; !exists {
-		t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
-	} else if _, exists := config.Volumes["/containerVar"]; !exists {
-		t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
-	}
-
-	if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" {
-		t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
-	} else if _, exists := config.Volumes["/containerTmp"]; !exists {
-		t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
-	} else if _, exists := config.Volumes["/containerVar"]; !exists {
-		t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
-	}
-
-	if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
-		t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
-	} else if _, exists := config.Volumes["/containerTmp"]; !exists {
-		t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
-	} else if _, exists := config.Volumes["/containerVar"]; !exists {
-		t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
-	}
-
-	if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil {
-		t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds)
-	} else if len(config.Volumes) != 0 {
-		t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes)
-	}
-
-	if _, _, err := parse(t, "-v /"); err == nil {
-		t.Fatalf("Expected error, but got none")
-	}
-
-	if _, _, err := parse(t, "-v /:/"); err == nil {
-		t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't")
-	}
-	if _, _, err := parse(t, "-v"); err == nil {
-		t.Fatalf("Error parsing volume flags, `-v` should fail but didn't")
-	}
-	if _, _, err := parse(t, "-v /tmp:"); err == nil {
-		t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't")
-	}
-	if _, _, err := parse(t, "-v /tmp:ro"); err == nil {
-		t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't")
-	}
-	if _, _, err := parse(t, "-v /tmp::"); err == nil {
-		t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't")
-	}
-	if _, _, err := parse(t, "-v :"); err == nil {
-		t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't")
-	}
-	if _, _, err := parse(t, "-v ::"); err == nil {
-		t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't")
-	}
-	if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil {
-		t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't")
-	}
-}

+ 146 - 0
contrib/check-config.sh

@@ -0,0 +1,146 @@
+#!/usr/bin/env bash
+set -e
+
+# bits of this were adapted from lxc-checkconfig
+# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
+
+: ${CONFIG:=/proc/config.gz}
+
+if ! command -v zgrep &> /dev/null; then
+	zgrep() {
+		zcat "$2" | grep "$1"
+	}
+fi
+
+is_set() {
+	zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
+}
+
+# see http://en.wikipedia.org/wiki/ANSI_escape_code#Colors
+declare -A colors=(
+	[black]=30
+	[red]=31
+	[green]=32
+	[yellow]=33
+	[blue]=34
+	[magenta]=35
+	[cyan]=36
+	[white]=37
+)
+color() {
+	color=()
+	if [ "$1" = 'bold' ]; then
+		color+=( '1' )
+		shift
+	fi
+	if [ $# -gt 0 ] && [ "${colors[$1]}" ]; then
+		color+=( "${colors[$1]}" )
+	fi
+	local IFS=';'
+	echo -en '\033['"${color[*]}"m
+}
+wrap_color() {
+	text="$1"
+	shift
+	color "$@"
+	echo -n "$text"
+	color reset
+	echo
+}
+
+wrap_good() {
+	echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
+}
+wrap_bad() {
+	echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
+}
+wrap_warning() {
+	wrap_color >&2 "$*" red
+}
+
+check_flag() {
+	if is_set "$1"; then
+		wrap_good "CONFIG_$1" 'enabled'
+	else
+		wrap_bad "CONFIG_$1" 'missing'
+	fi
+}
+
+check_flags() {
+	for flag in "$@"; do
+		echo "- $(check_flag "$flag")"
+	done
+} 
+
+if [ ! -e "$CONFIG" ]; then
+	wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..."
+	for tryConfig in \
+		'/proc/config.gz' \
+		"/boot/config-$(uname -r)" \
+		'/usr/src/linux/.config' \
+	; do
+		if [ -e "$tryConfig" ]; then
+			CONFIG="$tryConfig"
+			break
+		fi
+	done
+	if [ ! -e "$CONFIG" ]; then
+		wrap_warning "error: cannot find kernel config"
+		wrap_warning "  try running this script again, specifying the kernel config:"
+		wrap_warning "    CONFIG=/path/to/kernel/.config $0"
+		exit 1
+	fi
+fi
+
+wrap_color "info: reading kernel config from $CONFIG ..." white
+echo
+
+echo 'Generally Necessary:'
+
+echo -n '- '
+cgroupCpuDir="$(awk '/[, ]cpu([, ]|$)/ && $8 == "cgroup" { print $5 }' /proc/$$/mountinfo | head -n1)"
+cgroupDir="$(dirname "$cgroupCpuDir")"
+if [ -d "$cgroupDir/cpu" ]; then
+	echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
+else
+	echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupCpuDir]"
+	echo "    $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
+fi
+
+flags=(
+	NAMESPACES {NET,PID,IPC,UTS}_NS
+	DEVPTS_MULTIPLE_INSTANCES
+	CGROUPS CGROUP_DEVICE
+	MACVLAN VETH BRIDGE
+	IP_NF_TARGET_MASQUERADE NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}
+	NF_NAT NF_NAT_NEEDED
+)
+check_flags "${flags[@]}"
+echo
+
+echo 'Optional Features:'
+flags=(
+	MEMCG_SWAP
+	RESOURCE_COUNTERS
+)
+check_flags "${flags[@]}"
+
+echo '- Storage Drivers:'
+{
+	echo '- "'$(wrap_color 'aufs' blue)'":'
+	check_flags AUFS_FS | sed 's/^/  /'
+	if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
+		echo "    $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
+	fi
+
+	echo '- "'$(wrap_color 'btrfs' blue)'":'
+	check_flags BTRFS_FS | sed 's/^/  /'
+
+	echo '- "'$(wrap_color 'devicemapper' blue)'":'
+	check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS | sed 's/^/  /'
+} | sed 's/^/  /'
+echo
+
+#echo 'Potential Future Features:'
+#check_flags USER_NS
+#echo

+ 3 - 6
contrib/completion/bash/docker

@@ -392,11 +392,8 @@ _docker_port()
 _docker_ps()
 {
 	case "$prev" in
-		--since-id|--before-id)
-			COMPREPLY=( $( compgen -W "$( __docker_q ps -a -q )" -- "$cur" ) )
-			# TODO replace this with __docker_containers_all
-			# see https://github.com/dotcloud/docker/issues/3565
-			return
+		--since|--before)
+ 			__docker_containers_all
 			;;
 		-n)
 			return
@@ -407,7 +404,7 @@ _docker_ps()
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since-id --before-id -n" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since --before -n" -- "$cur" ) )
 			;;
 		*)
 			;;

+ 22 - 18
contrib/completion/fish/docker.fish

@@ -26,36 +26,38 @@ end
 function __fish_print_docker_containers --description 'Print a list of docker containers' -a select
     switch $select
         case running
-            docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS="  +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
+            docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS="  +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
         case stopped
-            docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS="  +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
+            docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS="  +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
         case all
-            docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS="  +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n'
+            docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS="  +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n'
     end
 end
 
 function __fish_print_docker_images --description 'Print a list of docker images'
-    docker images | awk 'NR>1' | grep -v '<none>' | awk '{print $1":"$2}'
+    docker images | command awk 'NR>1' | command grep -v '<none>' | command awk '{print $1":"$2}'
 end
 
 function __fish_print_docker_repositories --description 'Print a list of docker repositories'
-    docker images | awk 'NR>1' | grep -v '<none>' | awk '{print $1}' | sort | uniq
+    docker images | command awk 'NR>1' | command grep -v '<none>' | command awk '{print $1}' | command sort | command uniq
 end
 
 # common options
 complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode'
+complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group"
 complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l api-enable-cors -d 'Enable CORS headers in the remote API'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d "Attach containers to a pre-existing network bridge; use 'none' to disable container networking"
 complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b"
 complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force docker to use specific DNS servers'
+complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the docker runtime to use a specific exec driver'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the docker runtime'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Enable inter-container communication'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Disable enabling of net.ipv4.ip_forward'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Disable docker's addition of iptables rules"
-complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if not default route is available'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s r -l restart -d 'Restart previously running containers'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the docker runtime to use a specific storage driver'
@@ -71,7 +73,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_pri
 # build
 complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build a container from a Dockerfile'
 complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image'
-complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress verbose build output'
+complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the verbose output generated by the containers'
 complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build'
 complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success'
 
@@ -100,16 +102,16 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_pri
 # history
 complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image'
 complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output"
-complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'only show numeric IDs'
+complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs'
 complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image"
 
 # images
 complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images'
-complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'show all images (by default filter out the intermediate images used to build)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)'
 complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output"
-complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'only show numeric IDs'
-complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s t -l tree -d 'output graph in tree format'
-complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s v -l viz -d 'output graph in graphviz format'
+complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs'
+complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s t -l tree -d 'Output graph in tree format'
+complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s v -l viz -d 'Output graph in graphviz format'
 complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository"
 
 # import
@@ -126,7 +128,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from insert' -a '(__fish_pri
 complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container'
 complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.'
 complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image"
-complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers running)' -d "Container"
+complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container"
 
 # kill
 complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container'
@@ -138,9 +140,9 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image
 
 # login
 complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Register or Login to the docker registry server'
-complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'email'
-complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'password'
-complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'username'
+complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'Email'
+complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password'
+complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username'
 
 # logs
 complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container'
@@ -154,13 +156,13 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print
 # ps
 complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers'
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.'
-complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before-id -d 'Show only container created before Id, include non-running ones.'
+complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.'
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.'
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.'
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output"
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs'
 complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display sizes'
-complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since-id -d 'Show only containers created since Id, include non-running ones.'
+complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.'
 
 # pull
 complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from the docker registry server'
@@ -180,12 +182,14 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_pr
 
 # rm
 complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers'
+complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force removal of running container'
 complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container'
 complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated to the container'
 complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container"
 
 # rmi
 complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images'
+complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force'
 complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image"
 
 # run

+ 1 - 1
contrib/completion/zsh/_docker

@@ -174,7 +174,7 @@ __docker_subcommand () {
         (ps)
             _arguments '-a[Show all containers. Only running containers are shown by default]' \
                 '-h[Show help]' \
-                '-before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \
+                '--before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \
             '-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)'
             ;;
         (tag)

+ 3 - 3
contrib/desktop-integration/data/Dockerfile

@@ -9,13 +9,13 @@
 #   wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/data/Dockerfile
 #
 #   # Build data image
-#   docker build -t data -rm .
+#   docker build -t data .
 #
 #   # Create a data container. (eg: iceweasel-data)
-#   docker run -name iceweasel-data data true
+#   docker run --name iceweasel-data data true
 #
 #   # List data from it
-#   docker run -volumes-from iceweasel-data busybox ls -al /data
+#   docker run --volumes-from iceweasel-data busybox ls -al /data
 
 docker-version 0.6.5
 

+ 4 - 4
contrib/desktop-integration/iceweasel/Dockerfile

@@ -10,16 +10,16 @@
 #   wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/iceweasel/Dockerfile
 #
 #   # Build iceweasel image
-#   docker build -t iceweasel -rm .
+#   docker build -t iceweasel .
 #
 #   # Run stateful data-on-host iceweasel. For ephemeral, remove -v /data/iceweasel:/data
 #   docker run -v /data/iceweasel:/data -v /tmp/.X11-unix:/tmp/.X11-unix \
-#     -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
+#     -v /dev/snd:/dev/snd --lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
 #     -e DISPLAY=unix$DISPLAY iceweasel
 #
 #   # To run stateful dockerized data containers
-#   docker run -volumes-from iceweasel-data -v /tmp/.X11-unix:/tmp/.X11-unix \
-#     -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
+#   docker run --volumes-from iceweasel-data -v /tmp/.X11-unix:/tmp/.X11-unix \
+#     -v /dev/snd:/dev/snd --lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
 #     -e DISPLAY=unix$DISPLAY iceweasel
 
 docker-version 0.6.5

+ 1 - 1
contrib/docker-device-tool/device_tool.go

@@ -3,7 +3,7 @@ package main
 import (
 	"flag"
 	"fmt"
-	"github.com/dotcloud/docker/graphdriver/devmapper"
+	"github.com/dotcloud/docker/runtime/graphdriver/devmapper"
 	"os"
 	"path"
 	"sort"

+ 1 - 1
contrib/host-integration/Dockerfile.dev

@@ -6,7 +6,7 @@
 #
 
 FROM		ubuntu:12.10
-MAINTAINER	Guillaume J. Charmes <guillaume@dotcloud.com>
+MAINTAINER	Guillaume J. Charmes <guillaume@docker.com>
 
 RUN		apt-get update && apt-get install -y wget git mercurial
 

+ 1 - 1
contrib/host-integration/Dockerfile.min

@@ -1,4 +1,4 @@
 FROM		busybox
-MAINTAINER	Guillaume J. Charmes <guillaume@dotcloud.com>
+MAINTAINER	Guillaume J. Charmes <guillaume@docker.com>
 ADD		manager	  /usr/bin/
 ENTRYPOINT	["/usr/bin/manager"]

+ 1 - 1
contrib/host-integration/manager.go

@@ -70,7 +70,7 @@ func main() {
 	bufErr := bytes.NewBuffer(nil)
 
 	// Instanciate the Docker CLI
-	cli := docker.NewDockerCli(nil, bufOut, bufErr, "unix", "/var/run/docker.sock")
+	cli := docker.NewDockerCli(nil, bufOut, bufErr, "unix", "/var/run/docker.sock", false, nil)
 	// Retrieve the container info
 	if err := cli.CmdInspect(flag.Arg(0)); err != nil {
 		// As of docker v0.6.3, CmdInspect always returns nil

+ 28 - 13
contrib/init/sysvinit-debian/docker

@@ -21,6 +21,7 @@ BASE=$(basename $0)
 # modify these in /etc/default/$BASE (/etc/default/docker)
 DOCKER=/usr/bin/$BASE
 DOCKER_PIDFILE=/var/run/$BASE.pid
+DOCKER_LOGFILE=/var/log/$BASE.log
 DOCKER_OPTS=
 DOCKER_DESC="Docker"
 
@@ -50,23 +51,37 @@ fail_unless_root() {
 	fi
 }
 
+cgroupfs_mount() {
+	# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
+	if grep -v '^#' /etc/fstab | grep -q cgroup \
+		|| [ ! -e /proc/cgroups ] \
+		|| [ ! -d /sys/fs/cgroup ]; then
+		return
+	fi
+	if ! mountpoint -q /sys/fs/cgroup; then
+		mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
+	fi
+	(
+		cd /sys/fs/cgroup
+		for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
+			mkdir -p $sys
+			if ! mountpoint -q $sys; then
+				if ! mount -n -t cgroup -o $sys cgroup $sys; then
+					rmdir $sys || true
+				fi
+			fi
+		done
+	)
+}
+
 case "$1" in
 	start)
 		fail_unless_root
 
-		if ! grep -q cgroup /proc/mounts; then
-			# rough approximation of cgroupfs-mount
-			mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
-			for sys in $(cut -d'	' -f1 /proc/cgroups); do
-				mkdir -p /sys/fs/cgroup/$sys
-				if ! mount -n -t cgroup -o $sys cgroup /sys/fs/cgroup/$sys 2>/dev/null; then
-					rmdir /sys/fs/cgroup/$sys 2>/dev/null || true
-				fi
-			done
-		fi
+		cgroupfs_mount
 
-		touch /var/log/docker.log
-		chgrp docker /var/log/docker.log
+		touch "$DOCKER_LOGFILE"
+		chgrp docker "$DOCKER_LOGFILE"
 
 		log_begin_msg "Starting $DOCKER_DESC: $BASE"
 		start-stop-daemon --start --background \
@@ -76,7 +91,7 @@ case "$1" in
 			-- \
 				-d -p "$DOCKER_PIDFILE" \
 				$DOCKER_OPTS \
-					> /var/log/docker.log 2>&1
+					>> "$DOCKER_LOGFILE" 2>&1
 		log_end_msg $?
 		;;
 

+ 1 - 1
contrib/init/sysvinit-debian/docker.default

@@ -4,7 +4,7 @@
 #DOCKER="/usr/local/bin/docker"
 
 # Use DOCKER_OPTS to modify the daemon startup options.
-#DOCKER_OPTS="-dns 8.8.8.8 -dns 8.8.4.4"
+#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4"
 
 # If you need Docker to use an HTTP proxy, it can also be specified here.
 #export http_proxy="http://127.0.0.1:3128/"

+ 26 - 11
contrib/init/upstart/docker.conf

@@ -2,9 +2,34 @@ description "Docker daemon"
 
 start on filesystem
 stop on runlevel [!2345]
+limit nofile 524288 1048576
+limit nproc 524288 1048576
 
 respawn
 
+pre-start script
+	# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
+	if grep -v '^#' /etc/fstab | grep -q cgroup \
+		|| [ ! -e /proc/cgroups ] \
+		|| [ ! -d /sys/fs/cgroup ]; then
+		exit 0
+	fi
+	if ! mountpoint -q /sys/fs/cgroup; then
+		mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
+	fi
+	(
+		cd /sys/fs/cgroup
+		for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
+			mkdir -p $sys
+			if ! mountpoint -q $sys; then
+				if ! mount -n -t cgroup -o $sys cgroup $sys; then
+					rmdir $sys || true
+				fi
+			fi
+		done
+	)
+end script
+
 script
 	# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
 	DOCKER=/usr/bin/$UPSTART_JOB
@@ -12,15 +37,5 @@ script
 	if [ -f /etc/default/$UPSTART_JOB ]; then
 		. /etc/default/$UPSTART_JOB
 	fi
-	if ! grep -q cgroup /proc/mounts; then
-		# rough approximation of cgroupfs-mount
-		mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
-		for sys in $(cut -d'	' -f1 /proc/cgroups); do
-			mkdir -p /sys/fs/cgroup/$sys
-			if ! mount -n -t cgroup -o $sys cgroup /sys/fs/cgroup/$sys 2>/dev/null; then
-				rmdir /sys/fs/cgroup/$sys 2>/dev/null || true
-			fi
-		done
-	fi
-	"$DOCKER" -d $DOCKER_OPTS
+	exec "$DOCKER" -d $DOCKER_OPTS
 end script

+ 56 - 0
contrib/man/man1/docker-attach.1

@@ -0,0 +1,56 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-attach.1
+.\"
+.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
+.SH NAME
+docker-attach \- Attach to a running container
+.SH SYNOPSIS
+.B docker attach
+\fB--no-stdin\fR[=\fIfalse\fR] 
+\fB--sig-proxy\fR[=\fItrue\fR] 
+container
+.SH DESCRIPTION
+If you \fBdocker run\fR a container in detached mode (\fB-d\fR), you can reattach to the detached container with \fBdocker attach\fR using the container's ID or name.
+.sp
+You can detach from the container again (and leave it running) with CTRL-c (for a quiet exit) or CTRL-\ to get a stacktrace of the Docker client when it quits. When you detach from the container the exit code will be returned to the client.
+.SH "OPTIONS"
+.TP
+.B --no-stdin=\fItrue\fR|\fIfalse\fR: 
+When set to true, do not attach to stdin. The default is \fIfalse\fR.
+.TP
+.B --sig-proxy=\fItrue\fR|\fIfalse\fR: 
+When set to true, proxify all received signal to the process (even in non-tty mode). The default is \fItrue\fR.
+.sp
+.SH EXAMPLES
+.sp
+.PP
+.B Attaching to a container
+.TP
+In this example the top command is run inside a container, from an image called fedora, in detached mode. The ID from the container is passed into the \fBdocker attach\fR command:
+.sp
+.nf
+.RS
+# ID=$(sudo docker run -d fedora /usr/bin/top -b)
+# sudo docker attach $ID
+top - 02:05:52 up  3:05,  0 users,  load average: 0.01, 0.02, 0.05
+Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
+Cpu(s):  0.1%us,  0.2%sy,  0.0%ni, 99.7%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
+Mem:    373572k total,   355560k used,    18012k free,    27872k buffers
+Swap:   786428k total,        0k used,   786428k free,   221740k cached
+
+PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND
+1 root      20   0 17200 1116  912 R    0  0.3   0:00.03 top
+
+top - 02:05:55 up  3:05,  0 users,  load average: 0.01, 0.02, 0.05
+Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
+Cpu(s):  0.0%us,  0.2%sy,  0.0%ni, 99.8%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
+Mem:    373572k total,   355244k used,    18328k free,    27872k buffers
+Swap:   786428k total,        0k used,   786428k free,   221776k cached
+
+PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND
+1 root      20   0 17208 1144  932 R    0  0.3   0:00.03 top
+.RE
+.fi
+.sp
+.SH HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

+ 65 - 0
contrib/man/man1/docker-build.1

@@ -0,0 +1,65 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-build.1
+.\"
+.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
+.SH NAME
+docker-build \- Build a container image from a Dockerfile source at PATH
+.SH SYNOPSIS
+.B docker build 
+[\fB--no-cache\fR[=\fIfalse\fR] 
+[\fB-q\fR|\fB--quiet\fR[=\fIfalse\fR] 
+[\fB--rm\fR[=\fitrue\fR]]
+[\fB-t\fR|\fB--tag\fR=\fItag\fR] 
+PATH | URL | -
+.SH DESCRIPTION
+This will read the Dockerfile from the directory specified in \fBPATH\fR. It also sends any other files and directories found in the current directory to the Docker daemon. The contents of this directory would be used by ADD command found within the Dockerfile. 
+Warning, this will send a lot of data to the Docker daemon if the current directory contains a lot of data.
+If the absolute path is provided instead of ‘.’, only the files and directories required by the ADD commands from the Dockerfile will be added to the context and transferred to the Docker daemon.
+.sp
+When a single Dockerfile is given as URL, then no context is set. When a Git repository is set as URL, the repository is used as context.
+.SH "OPTIONS"
+.TP
+.B -q, --quiet=\fItrue\fR|\fIfalse\fR: 
+When set to true, suppress verbose build output. Default is \fIfalse\fR.
+.TP
+.B --rm=\fItrue\fr|\fIfalse\fR:
+When true, remove intermediate containers that are created during the build process. The default is true.
+.TP
+.B -t, --tag=\fItag\fR: 
+Tag to be applied to the resulting image on successful completion of the build.
+.TP
+.B --no-cache=\fItrue\fR|\fIfalse\fR
+When set to true, do not use a cache when building the image. The default is \fIfalse\fR.
+.sp
+.SH EXAMPLES
+.sp
+.sp
+.B Building an image from current directory
+.TP
+USing a Dockerfile, Docker images are built using the build command:
+.sp
+.RS
+docker build .
+.RE
+.sp
+If, for some reasone, you do not what to remove the intermediate containers created during the build you must set--rm=false.
+.sp
+.RS
+docker build --rm=false .
+.sp
+.RE
+.sp
+A good practice is to make a subdirectory with a related name and create the Dockerfile in that directory. E.g. a directory called mongo may contain a Dockerfile for a MongoDB image, or a directory called httpd may contain an Dockerfile for an Apache web server. 
+.sp
+It is also good practice to add the files required for the image to the subdirectory. These files will be then specified with the `ADD` instruction in the Dockerfile. Note: if you include a tar file, which is good practice, then Docker will automatically extract the contents of the tar file specified in the `ADD` instruction into the specified target.  
+.sp
+.B Building an image container using a URL
+.TP
+This will clone the Github repository and use it as context. The Dockerfile at the root of the repository is used as Dockerfile. This only works if the Github repository is a dedicated repository. Note that you can specify an arbitrary Git repository by using the ‘git://’ schema. 
+.sp
+.RS
+docker build github.com/scollier/Fedora-Dockerfiles/tree/master/apache
+.RE
+.sp
+.SH HISTORY
+March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

+ 84 - 0
contrib/man/man1/docker-images.1

@@ -0,0 +1,84 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-images.1
+.\"
+.TH "DOCKER" "1" "April 2014" "0.1" "Docker"
+.SH NAME
+docker-images \- List the images in the local repository 
+.SH SYNOPSIS
+.B docker images
+[\fB-a\fR|\fB--all\fR=\fIfalse\fR] 
+[\fB--no-trunc\fR[=\fIfalse\fR] 
+[\fB-q\fR|\fB--quiet\fR[=\fIfalse\fR] 
+[\fB-t\fR|\fB--tree\fR=\fIfalse\fR] 
+[\fB-v\fR|\fB--viz\fR=\fIfalse\fR] 
+[NAME]
+.SH DESCRIPTION
+This command lists the images stored in the local Docker repository. 
+.sp
+By default, intermediate images, used during builds, are not listed. Some of the output, e.g. image ID, is truncated, for space reasons. However the truncated image ID, and often the first few characters, are enough to be used in other Docker commands that use the image ID. The output includes repository, tag, image ID, date created and the virtual size. 
+.sp
+The title REPOSITORY for the first title may seem confusing. It is essentially the image name. However, because you can tag a specific image, and multiple tags (image instances) can be associated with a single name, the name is really a repository for all tagged images of the same name. 
+.SH "OPTIONS"
+.TP
+.B -a, --all=\fItrue\fR|\fIfalse\fR: 
+When set to true, also include all intermediate images in the list. The default is false.
+.TP
+.B --no-trunc=\fItrue\fR|\fIfalse\fR: 
+When set to true, list the full image ID and not the truncated ID. The default is false.
+.TP
+.B -q, --quiet=\fItrue\fR|\fIfalse\fR: 
+When set to true, list the complete image ID as part of the output. The default is false.
+.TP
+.B -t, --tree=\fItrue\fR|\fIfalse\fR: 
+When set to true, list the images in a tree dependency tree (hierarchy) format. The default is false.
+.TP
+.B -v, --viz=\fItrue\fR|\fIfalse\fR
+When set to true, list the graph in graphviz format. The default is \fIfalse\fR.
+.sp
+.SH EXAMPLES
+.sp
+.B Listing the images
+.TP
+To list the images in a local repository (not the registry) run:
+.sp
+.RS
+docker images
+.RE
+.sp
+The list will contain the image repository name, a tag for the image, and an image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, IMAGE ID, CREATED, and VIRTUAL SIZE.
+.sp
+To get a verbose list of images which contains all the intermediate images used in builds use \fB-a\fR:
+.sp
+.RS
+docker images -a
+.RE
+.sp
+.B List images dependency tree hierarchy
+.TP
+To list the images in the local repository (not the registry) in a dependency tree format then use the \fB-t\fR|\fB--tree=true\fR option. 
+.sp
+.RS
+docker images -t 
+.RE
+.sp
+This displays a staggered hierarchy tree where the less indented image is the oldest with dependent image layers branching inward (to the right) on subsequent lines. The newest or top level image layer is listed last in any tree branch. 
+.sp
+.B List images in GraphViz format
+.TP
+To display the list in a format consumable by a GraphViz tools run with \fB-v\fR|\fB--viz=true\fR. For example to produce a .png graph file of the hierarchy use: 
+.sp
+.RS
+docker images --viz | dot -Tpng -o docker.png
+.sp
+.RE
+.sp
+.B Listing only the shortened image IDs
+.TP
+Listing just the shortened image IDs. This can be useful for some automated tools.
+.sp
+.RS
+docker images -q
+.RE
+.sp
+.SH HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

+ 39 - 0
contrib/man/man1/docker-info.1

@@ -0,0 +1,39 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-info.1
+.\"
+.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
+.SH NAME
+docker-info \- Display system wide information
+.SH SYNOPSIS
+.B docker info
+.SH DESCRIPTION
+This command displays system wide information regarding the Docker installation. Information displayed includes the number of containers and images, pool name, data file, metadata file, data space used, total data space, metadata space used, total metadata space, execution driver, and the kernel version.
+.sp
+The data file is where the images are stored and the metadata file is where the meta data regarding those images are stored. When run for the first time Docker allocates a certain amount of data space and meta data space from the space available on the volume where /var/lib/docker is mounted.    
+.SH "OPTIONS"
+There are no available options.
+.sp
+.SH EXAMPLES
+.sp
+.B Display Docker system information
+.TP
+Here is a sample output:
+.sp
+.RS
+ # docker info
+ Containers: 18
+ Images: 95
+ Storage Driver: devicemapper
+  Pool Name: docker-8:1-170408448-pool
+  Data file: /var/lib/docker/devicemapper/devicemapper/data
+  Metadata file: /var/lib/docker/devicemapper/devicemapper/metadata
+  Data Space Used: 9946.3 Mb
+  Data Space Total: 102400.0 Mb
+  Metadata Space Used: 9.9 Mb
+  Metadata Space Total: 2048.0 Mb
+ Execution Driver: native-0.1
+ Kernel Version: 3.10.0-116.el7.x86_64
+.RE
+.sp
+.SH HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

+ 237 - 0
contrib/man/man1/docker-inspect.1

@@ -0,0 +1,237 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-inspect.1
+.\"
+.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
+.SH NAME
+docker-inspect \- Return low-level information on a container/image
+.SH SYNOPSIS
+.B docker inspect 
+[\fB-f\fR|\fB--format\fR="" 
+CONTAINER|IMAGE [CONTAINER|IMAGE...]
+.SH DESCRIPTION
+This displays all the information available in Docker for a given container or image. By default, this will render all results in a JSON array. If a format is specified, the given template will be executed for each result. 
+.SH "OPTIONS"
+.TP
+.B -f, --format="": 
+The text/template package of Go describes all the details of the format. See examples section
+.SH EXAMPLES
+.sp
+.PP
+.B Getting information on a container
+.TP
+To get information on a container use it's ID or instance name
+.sp
+.fi
+.RS
+#docker inspect 1eb5fabf5a03
+
+[{
+    "ID": "1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b",
+    "Created": "2014-04-04T21:33:52.02361335Z",
+    "Path": "/usr/sbin/nginx",
+    "Args": [],
+    "Config": {
+        "Hostname": "1eb5fabf5a03",
+        "Domainname": "",
+        "User": "",
+        "Memory": 0,
+        "MemorySwap": 0,
+        "CpuShares": 0,
+        "AttachStdin": false,
+        "AttachStdout": false,
+        "AttachStderr": false,
+        "PortSpecs": null,
+        "ExposedPorts": {
+            "80/tcp": {}
+        },
+        "Tty": true,
+        "OpenStdin": false,
+        "StdinOnce": false,
+        "Env": [
+            "HOME=/",
+            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+        ],
+        "Cmd": [
+            "/usr/sbin/nginx"
+        ],
+        "Dns": null,
+        "DnsSearch": null,
+        "Image": "summit/nginx",
+        "Volumes": null,
+        "VolumesFrom": "",
+        "WorkingDir": "",
+        "Entrypoint": null,
+        "NetworkDisabled": false,
+        "OnBuild": null,
+        "Context": {
+            "mount_label": "system_u:object_r:svirt_sandbox_file_t:s0:c0,c650",
+            "process_label": "system_u:system_r:svirt_lxc_net_t:s0:c0,c650"
+        }
+    },
+    "State": {
+        "Running": true,
+        "Pid": 858,
+        "ExitCode": 0,
+        "StartedAt": "2014-04-04T21:33:54.16259207Z",
+        "FinishedAt": "0001-01-01T00:00:00Z",
+        "Ghost": false
+    },
+    "Image": "df53773a4390e25936f9fd3739e0c0e60a62d024ea7b669282b27e65ae8458e6",
+    "NetworkSettings": {
+        "IPAddress": "172.17.0.2",
+        "IPPrefixLen": 16,
+        "Gateway": "172.17.42.1",
+        "Bridge": "docker0",
+        "PortMapping": null,
+        "Ports": {
+            "80/tcp": [
+                {
+                    "HostIp": "0.0.0.0",
+                    "HostPort": "80"
+                }
+            ]
+        }
+    },
+    "ResolvConfPath": "/etc/resolv.conf",
+    "HostnamePath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hostname",
+    "HostsPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hosts",
+    "Name": "/ecstatic_ptolemy",
+    "Driver": "devicemapper",
+    "ExecDriver": "native-0.1",
+    "Volumes": {},
+    "VolumesRW": {},
+    "HostConfig": {
+        "Binds": null,
+        "ContainerIDFile": "",
+        "LxcConf": [],
+        "Privileged": false,
+        "PortBindings": {
+            "80/tcp": [
+                {
+                    "HostIp": "0.0.0.0",
+                    "HostPort": "80"
+                }
+            ]
+        },
+        "Links": null,
+        "PublishAllPorts": false,
+        "DriverOptions": {
+            "lxc": null
+        },
+        "CliAddress": ""
+    }
+.RE
+.nf
+.sp
+.B Getting the IP address of a container instance
+.TP
+To get the IP address of a container use:
+.sp
+.fi
+.RS
+# docker inspect --format='{{.NetworkSettings.IPAddress}}' 1eb5fabf5a03
+
+172.17.0.2
+.RE
+.nf
+.sp
+.B Listing all port bindings
+.TP
+One can loop over arrays and maps in the results to produce simple text output:
+.sp
+.fi
+.RS
+# docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' 1eb5fabf5a03
+
+80/tcp -> 80 
+.RE
+.nf
+.sp
+.B Getting information on an image
+.TP
+Use an image's ID or name (e.g. repository/name[:tag]) to get information on it.
+.sp
+.fi
+.RS
+docker inspect 58394af37342
+[{
+    "id": "58394af373423902a1b97f209a31e3777932d9321ef10e64feaaa7b4df609cf9",
+    "parent": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
+    "created": "2014-02-03T16:10:40.500814677Z",
+    "container": "f718f19a28a5147da49313c54620306243734bafa63c76942ef6f8c4b4113bc5",
+    "container_config": {
+        "Hostname": "88807319f25e",
+        "Domainname": "",
+        "User": "",
+        "Memory": 0,
+        "MemorySwap": 0,
+        "CpuShares": 0,
+        "AttachStdin": false,
+        "AttachStdout": false,
+        "AttachStderr": false,
+        "PortSpecs": null,
+        "ExposedPorts": null,
+        "Tty": false,
+        "OpenStdin": false,
+        "StdinOnce": false,
+        "Env": [
+            "HOME=/",
+            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+        ],
+        "Cmd": [
+            "/bin/sh",
+            "-c",
+            "#(nop) ADD fedora-20-medium.tar.xz in /"
+        ],
+        "Dns": null,
+        "DnsSearch": null,
+        "Image": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
+        "Volumes": null,
+        "VolumesFrom": "",
+        "WorkingDir": "",
+        "Entrypoint": null,
+        "NetworkDisabled": false,
+        "OnBuild": null,
+        "Context": null
+    },
+    "docker_version": "0.6.3",
+    "author": "Lokesh Mandvekar \u003clsm5@redhat.com\u003e - ./buildcontainers.sh",
+    "config": {
+        "Hostname": "88807319f25e",
+        "Domainname": "",
+        "User": "",
+        "Memory": 0,
+        "MemorySwap": 0,
+        "CpuShares": 0,
+        "AttachStdin": false,
+        "AttachStdout": false,
+        "AttachStderr": false,
+        "PortSpecs": null,
+        "ExposedPorts": null,
+        "Tty": false,
+        "OpenStdin": false,
+        "StdinOnce": false,
+        "Env": [
+            "HOME=/",
+            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+        ],
+        "Cmd": null,
+        "Dns": null,
+        "DnsSearch": null,
+        "Image": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
+        "Volumes": null,
+        "VolumesFrom": "",
+        "WorkingDir": "",
+        "Entrypoint": null,
+        "NetworkDisabled": false,
+        "OnBuild": null,
+        "Context": null
+    },
+    "architecture": "x86_64",
+    "Size": 385520098
+}]
+.RE
+.nf
+.sp
+.SH HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

+ 45 - 0
contrib/man/man1/docker-rm.1

@@ -0,0 +1,45 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-rm.1
+.\"
+.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
+.SH NAME
+docker-rm \- Remove one or more containers.
+.SH SYNOPSIS
+.B docker rm 
+[\fB-f\fR|\fB--force\fR[=\fIfalse\fR] 
+[\fB-l\fR|\fB--link\fR[=\fIfalse\fR] 
+[\fB-v\fR|\fB--volumes\fR[=\fIfalse\fR] 
+CONTAINER [CONTAINER...]
+.SH DESCRIPTION
+This will remove one or more containers from the host node. The container name or ID can be used. This does not remove images. You cannot remove a running container unless you use the \fB-f\fR option. To see all containers on a host use the \fBdocker ps -a\fR command.
+.SH "OPTIONS"
+.TP
+.B -f, --force=\fItrue\fR|\fIfalse\fR: 
+When set to true, force the removal of the container. The default is \fIfalse\fR.
+.TP
+.B -l, --link=\fItrue\fR|\fIfalse\fR: 
+When set to true, remove the specified link and not the underlying container. The default is \fIfalse\fR.
+.TP
+.B -v, --volumes=\fItrue\fR|\fIfalse\fR: 
+When set to true, remove the volumes associated to the container. The default is \fIfalse\fR.
+.SH EXAMPLES
+.sp
+.PP
+.B Removing a container using its ID
+.TP
+To remove a container using its ID, find either from a \fBdocker ps -a\fR command, or use the ID returned from the \fBdocker run\fR command, or retrieve it from a file used to store it using the \fBdocker run --cidfile\fR:
+.sp
+.RS
+docker rm abebf7571666
+.RE
+.sp
+.B Removing a container using the container name:
+.TP
+The name of the container can be found using the \fBdocker ps -a\fR command. The use that name as follows:
+.sp
+.RS
+docker rm hopeful_morse
+.RE
+.sp
+.SH HISTORY
+March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

+ 29 - 0
contrib/man/man1/docker-rmi.1

@@ -0,0 +1,29 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-run.1
+.\"
+.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
+.SH NAME
+docker-rmi \- Remove one or more images.
+.SH SYNOPSIS
+.B docker rmi
+[\fB-f\fR|\fB--force\fR[=\fIfalse\fR] 
+IMAGE [IMAGE...]
+.SH DESCRIPTION
+This will remove one or more images from the host node. This does not remove images from a registry. You cannot remove an image of a running container unless you use the \fB-f\fR option. To see all images on a host use the \fBdocker images\fR command.
+.SH "OPTIONS"
+.TP
+.B -f, --force=\fItrue\fR|\fIfalse\fR: 
+When set to true, force the removal of the image. The default is \fIfalse\fR.
+.SH EXAMPLES
+.sp
+.PP
+.B Removing an image
+.TP
+Here is an example of removing and image:
+.sp
+.RS
+docker rmi fedora/httpd
+.RE
+.sp
+.SH HISTORY
+March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

+ 277 - 0
contrib/man/man1/docker-run.1

@@ -0,0 +1,277 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-run.1
+.\"
+.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
+.SH NAME
+docker-run \- Run a process in an isolated container
+.SH SYNOPSIS
+.B docker run 
+[\fB-a\fR|\fB--attach\fR[=]] [\fB-c\fR|\fB--cpu-shares\fR[=0] [\fB-m\fR|\fB--memory\fR=\fImemory-limit\fR]
+[\fB--cidfile\fR=\fIfile\fR] [\fB-d\fR|\fB--detach\fR[=\fIfalse\fR]] [\fB--dns\fR=\fIIP-address\fR]
+[\fB--name\fR=\fIname\fR] [\fB-u\fR|\fB--user\fR=\fIusername\fR|\fIuid\fR]
+[\fB--link\fR=\fIname\fR:\fIalias\fR] 
+[\fB-e\fR|\fB--env\fR=\fIenvironment\fR] [\fB--entrypoint\fR=\fIcommand\fR] 
+[\fB--expose\fR=\fIport\fR] [\fB-P\fR|\fB--publish-all\fR[=\fIfalse\fR]]
+[\fB-p\fR|\fB--publish\fR=\fIport-mappping\fR] [\fB-h\fR|\fB--hostname\fR=\fIhostname\fR]
+[\fB--rm\fR[=\fIfalse\fR]] [\fB--priviledged\fR[=\fIfalse\fR]
+[\fB-i\fR|\fB--interactive\fR[=\fIfalse\fR] 
+[\fB-t\fR|\fB--tty\fR[=\fIfalse\fR]] [\fB--lxc-conf\fR=\fIoptions\fR]
+[\fB-n\fR|\fB--networking\fR[=\fItrue\fR]]
+[\fB-v\fR|\fB--volume\fR=\fIvolume\fR] [\fB--volumes-from\fR=\fIcontainer-id\fR]
+[\fB-w\fR|\fB--workdir\fR=\fIdirectory\fR] [\fB--sig-proxy\fR[=\fItrue\fR]]
+IMAGE [COMMAND] [ARG...]
+.SH DESCRIPTION
+.PP
+Run a process in a new container. \fBdocker run\fR starts a process with its own file system, its own networking, and its own isolated process tree. The \fIIMAGE\fR which starts the process may define defaults related to the process that will be run in the container, the networking to expose, and more, but \fBdocker run\fR gives final control to the operator or administrator who starts the container from the image. For that reason \fBdocker run\fR has more options than any other docker command.
+
+If the \fIIMAGE\fR is not already loaded then \fBdocker run\fR will pull the \fIIMAGE\fR, and all image dependencies, from the repository in the same way running \fBdocker pull\fR \fIIMAGE\fR, before it starts the container from that image.
+
+
+.SH "OPTIONS"
+
+.TP
+.B  -a, --attach=\fIstdin\fR|\fIstdout\fR|\fIstderr\fR: 
+Attach to stdin, stdout or stderr. In foreground mode (the default when -d is not specified), \fBdocker run\fR can start the process in the container and attach the console to the process’s standard input, output, and standard error. It can even pretend to be a TTY (this is what most commandline executables expect) and pass along signals. The \fB-a\fR option can be set for each of stdin, stdout, and stderr.  
+
+.TP
+.B  -c, --cpu-shares=0: 
+CPU shares in relative weight. You can increase the priority of a container with the -c option. By default, all containers run at the same priority and get the same proportion of CPU cycles, but you can tell the kernel to give more shares of CPU time to one or more containers when you start them via \fBdocker run\fR.
+
+.TP
+.B -m, --memory=\fImemory-limit\fR: 
+Allows you to constrain the memory available to a container. If the host supports swap memory, then the -m memory setting can be larger than physical RAM. The memory limit format: <number><optional unit>, where unit = b, k, m or g.
+
+.TP
+.B --cidfile=\fIfile\fR: 
+Write the container ID to the file specified.
+
+.TP
+.B  -d, --detach=\fItrue\fR|\fIfalse\fR: 
+Detached mode. This runs the container in the background. It outputs the new container's id and and error messages. At any time you can run \fBdocker ps\fR in the other shell to view a list of the running containers. You can reattach to a detached container with \fBdocker attach\fR. If you choose to run a container in the detached mode, then you cannot use the -rm option.
+
+.TP
+.B --dns=\fIIP-address\fR: 
+Set custom DNS servers. This option can be used to override the DNS configuration passed to the container. Typically this is necessary when the host DNS configuration is invalid for the container (eg. 127.0.0.1). When this is the case the \fB-dns\fR flags is necessary for every run.
+
+.TP
+.B  -e, --env=\fIenvironment\fR: 
+Set environment variables. This option allows you to specify arbitrary environment variables that are available for the process that will be launched inside of the container. 
+
+.TP
+.B --entrypoint=\ficommand\fR: 
+This option allows you to overwrite the default entrypoint of the image that is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND because it specifies what executable to run when the container starts, but it is (purposely) more difficult to override. The ENTRYPOINT gives a container its default nature or behavior, so that when you set an ENTRYPOINT you can run the container as if it were that binary, complete with default options, and you can pass in more options via the COMMAND. But, sometimes an operator may want to run something else inside the container, so you can override the default ENTRYPOINT at runtime by using a \fB--entrypoint\fR and a string to specify the new ENTRYPOINT. 
+
+.TP
+.B --expose=\fIport\fR: 
+Expose a port from the container without publishing it to your host. A containers port can be exposed to other containers in three ways: 1) The developer can expose the port using the EXPOSE parameter of the Dockerfile, 2) the operator can use the \fB--expose\fR option with \fBdocker run\fR, or 3) the container can be started with the \fB--link\fR.
+
+.TP
+.B  -P, --publish-all=\fItrue\fR|\fIfalse\fR: 
+When set to true publish all exposed ports to the host interfaces. The default is false. If the operator uses -P (or -p) then Docker will make the exposed port accessible on the host and the ports will be available to any client that can reach the host. To find the map between the host ports and the exposed ports, use \fBdocker port\fR. 
+
+.TP
+.B -p, --publish=[]: 
+Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)
+
+.TP
+.B -h , --hostname=\fIhostname\fR: 
+Sets the container host name that is available inside the container.
+  
+.TP
+.B -i , --interactive=\fItrue\fR|\fIfalse\fR: 
+When set to true, keep stdin open even if not attached. The default is false.
+
+.TP
+.B --link=\fIname\fR:\fIalias\fR: 
+Add link to another container. The format is name:alias. If the operator uses \fB--link\fR when starting the new client container, then the client container can access the exposed port via a private networking interface. Docker will set some environment variables in the client container to help indicate which interface and port to use. 
+
+.TP
+.B -n, --networking=\fItrue\fR|\fIfalse\fR: 
+By default, all containers have networking enabled (true) and can make outgoing connections. The operator can disable networking with \fB--networking\fR to false. This disables all incoming and outgoing networking. In cases like this, I/O can only be performed through files or by using STDIN/STDOUT.
+
+Also by default, the container will use the same DNS servers as the host. but you canThe operator may override this with \fB-dns\fR. 
+
+.TP
+.B  --name=\fIname\fR: 
+Assign a name to the container. The operator can identify a container in three ways:
+.sp
+.nf
+UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”)
+UUID short identifier (“f78375b1c487”)
+Name (“jonah”)
+.fi
+.sp
+The UUID identifiers come from the Docker daemon, and if a name is not assigned to the container with \fB--name\fR then the daemon will also generate a random string name. The name is useful when defining links (see \fB--link\fR) (or any other place you need to identify a container). This works for both background and foreground Docker containers.
+
+.TP
+.B --privileged=\fItrue\fR|\fIfalse\fR: 
+Give extended privileges to this container. By default, Docker containers are “unprivileged” (=false) and cannot, for example, run a Docker daemon inside the Docker container. This is because by default a container is not allowed to access any devices. A “privileged” container is given access to all devices.
+
+When the operator executes \fBdocker run -privileged\fR, Docker will enable access to all devices on the host as well as set some configuration in AppArmor (\fB???\fR) to allow the container nearly all the same access to the host as processes running outside of a container on the host.
+
+.TP
+.B --rm=\fItrue\fR|\fIfalse\fR: 
+If set to \fItrue\fR the container is automatically removed when it exits. The default is \fIfalse\fR. This option is incompatible with \fB-d\fR.
+
+.TP
+.B --sig-proxy=\fItrue\fR|\fIfalse\fR: 
+When set to true, proxify all received signals to the process (even in non-tty mode). The default is true.
+  
+.TP
+.B -t, --tty=\fItrue\fR|\fIfalse\fR: 
+When set to true Docker can allocate a pseudo-tty and attach to the standard input of any container. This can be used, for example, to run a throwaway interactive shell. The default is value is false.
+
+.TP
+.B -u, --user=\fIusername\fR,\fRuid\fR: 
+Set a username or UID for the container.
+
+.TP
+.B -v, --volume=\fIvolume\fR: 
+Bind mount a volume to the container. The \fB-v\fR option can be used one or more times to add one or more mounts to a container. These mounts can then be used in other containers using the \fB--volumes-from\fR option. See examples.
+
+.TP
+.B --volumes-from=\fIcontainer-id\fR: 
+Will mount volumes from the specified container identified by container-id. Once a volume is mounted in a one container it can be shared with other containers using the \fB--volumes-from\fR option when running those other containers. The volumes can be shared even if the original container with the mount is not running. 
+
+.TP
+.B -w, --workdir=\fIdirectory\fR: 
+Working directory inside the container. The default working directory for running binaries within a container is the root directory (/). The developer can set a different default with the Dockerfile WORKDIR instruction. The operator can override the working directory by using the \fB-w\fR option. 
+
+.TP
+.B IMAGE: 
+The image name or ID.
+
+.TP
+.B COMMAND: 
+The command or program to run inside the image.
+
+.TP
+.B ARG: 
+The arguments for the command to be run in the container.
+
+.SH EXAMPLES
+.sp
+.sp
+.B Exposing log messages from the container to the host's log
+.TP
+If you want messages that are logged in your container to show up in the host's syslog/journal then you should bind mount the /var/log directory as follows.
+.sp
+.RS
+docker run -v /dev/log:/dev/log -i -t fedora /bin/bash
+.RE
+.sp
+From inside the container you can test this by sending a message to the log.
+.sp
+.RS
+logger "Hello from my container"
+.sp
+.RE
+Then exit and check the journal.
+.RS
+.sp
+exit
+.sp
+journalctl -b | grep hello
+.RE
+.sp
+This should list the message sent to logger.
+.sp
+.B Attaching to one or more from STDIN, STDOUT, STDERR
+.TP
+If you do not specify -a then Docker will attach everything (stdin,stdout,stderr). You can specify to which of the three standard streams (stdin, stdout, stderr) you’d like to connect instead, as in:
+.sp
+.RS
+docker run -a stdin -a stdout -i -t fedora /bin/bash
+.RE
+.sp
+.B Linking Containers
+.TP
+The link feature allows multiple containers to communicate with each other. For example, a container whose Dockerfile has exposed port 80 can be run and named as follows: 
+.sp
+.RS
+docker run --name=link-test -d -i -t fedora/httpd
+.RE
+.sp
+.TP
+A second container, in this case called linker, can communicate with the httpd container, named link-test, by running with the \fB--link=<name>:<alias>\fR
+.sp
+.RS
+docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash
+.RE
+.sp
+.TP
+Now the container linker is linked to container link-test with the alias lt. Running the \fBenv\fR command in the linker container shows environment variables with the LT (alias) context (\fBLT_\fR)
+.sp
+.nf
+.RS
+# env
+HOSTNAME=668231cb0978
+TERM=xterm
+LT_PORT_80_TCP=tcp://172.17.0.3:80
+LT_PORT_80_TCP_PORT=80
+LT_PORT_80_TCP_PROTO=tcp
+LT_PORT=tcp://172.17.0.3:80
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+PWD=/
+LT_NAME=/linker/lt
+SHLVL=1
+HOME=/
+LT_PORT_80_TCP_ADDR=172.17.0.3
+_=/usr/bin/env
+.RE
+.fi
+.sp
+.TP 
+When linking two containers Docker will use the exposed ports of the container to create a secure tunnel for the parent to access. 
+.TP
+.sp
+.B Mapping Ports for External Usage
+.TP
+The exposed port of an application can be mapped to a host port using the \fB-p\fR flag. For example a httpd port 80 can be mapped to the host port 8080 using the following:
+.sp
+.RS
+docker run -p 8080:80 -d -i -t fedora/httpd
+.RE
+.sp
+.TP
+.B Creating and Mounting a Data Volume Container
+.TP
+Many applications require the sharing of persistent data across several containers. Docker allows you to create a Data Volume Container that other containers can mount from. For example, create a named container that contains directories /var/volume1 and /tmp/volume2. The image will need to contain these directories so a couple of RUN mkdir instructions might be required for you fedora-data image: 
+.sp
+.RS
+docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true
+.sp
+docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash
+.RE
+.sp
+.TP
+Multiple -volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermidiery container, allowing to abstract the actual data source from users of that data:
+.sp
+.RS
+docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash
+.RE
+.TP
+.sp
+.B Mounting External Volumes
+.TP
+To mount a host directory as a container volume, specify the absolute path to the directory and the absolute path for the container directory separated by a colon:  
+.sp
+.RS
+docker run -v /var/db:/data1 -i -t fedora bash
+.RE
+.sp
+.TP
+When using SELinux, be aware that the host has no knowledge of container SELinux policy. Therefore, in the above example, if SELinux policy is enforced, the /var/db directory is not writable to the container. A "Permission Denied" message will occur and an avc: message in the host's syslog. 
+.sp
+.TP
+To work around this, at time of writing this man page, the following command needs to be run in order for the proper SELinux policy type label to be attached to the host directory:  
+.sp
+.RS
+chcon -Rt svirt_sandbox_file_t /var/db
+.RE
+.sp
+.TP
+Now, writing to the /data1 volume in the container will be allowed and the changes will also be reflected on the host in /var/db.
+.sp 
+.SH HISTORY
+March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

+ 49 - 0
contrib/man/man1/docker-tag.1

@@ -0,0 +1,49 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-tag.1
+.\"
+.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
+.SH NAME
+docker-tag \- Tag an image in the repository
+.SH SYNOPSIS
+.B docker tag 
+[\fB-f\fR|\fB--force\fR[=\fIfalse\fR] 
+\fBIMAGE\fR [REGISTRYHOST/][USERNAME/]NAME[:TAG]
+.SH DESCRIPTION
+This will tag an image in the repository. 
+.SH "OPTIONS"
+.TP
+.B -f, --force=\fItrue\fR|\fIfalse\fR: 
+When set to true, force the tag name. The default is \fIfalse\fR.
+.TP
+.B REGISTRYHOST:
+The hostname of the registry if required. This may also include the port separated by a ':'
+.TP
+.B USERNAME:
+The username or other qualifying identifier for the image.
+.TP
+.B NAME:
+The image name. 
+.TP
+.B TAG:
+The tag you are assigning to the image.
+.SH EXAMPLES
+.sp
+.PP
+.B Tagging an image
+.TP
+Here is an example where an image is tagged  with the tag 'Version-1.0' :
+.sp
+.RS
+docker tag 0e5574283393 fedora/httpd:Version-1.0
+.RE
+.sp
+.B Tagging an image for an internal repository
+.TP
+To push an image to an internal Registry and not the default docker.io based registry you must tag it with the registry hostname and port (if needed).
+.sp
+.RS
+docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0
+.RE
+.sp
+.SH HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

+ 172 - 0
contrib/man/man1/docker.1

@@ -0,0 +1,172 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker.1
+.\"
+.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
+.SH NAME
+docker \- Docker image and container command line interface
+.SH SYNOPSIS
+.B docker [OPTIONS] [COMMAND] [arg...]
+.SH DESCRIPTION
+\fBdocker\fR has two distinct functions. It is used for starting the Docker daemon and to run the CLI (i.e., to command the daemon to manage images, containers etc.) So \fBdocker\fR is both a server as deamon and a client to the daemon through the CLI.
+.sp
+To run the Docker deamon you do not specify any of the commands listed below but must specify the \fB-d\fR option.  The other options listed below are for the daemon only.
+.sp
+The Docker CLI has over 30 commands. The commands are listed below and each has its own man page which explain usage and arguements. 
+.sp
+To see the man page for a command run \fBman docker <command>\fR.
+.SH "OPTIONS"
+.B \-D=false: 
+Enable debug mode
+.TP
+.B\-H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or unix://[/path/to/socket] to use. 
+When host=[0.0.0.0], port=[4243] or path
+=[/var/run/docker.sock] is omitted, default values are used.
+.TP
+.B \-\-api-enable-cors=false
+Enable CORS headers in the remote API
+.TP
+.B \-b=""
+Attach containers to a pre\-existing network bridge; use 'none' to disable container networking
+.TP
+.B \-\-bip=""
+Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of \-b
+.TP
+.B \-d=false
+Enable daemon mode
+.TP
+.B \-\-dns=""
+Force Docker to use specific DNS servers
+.TP
+.B \-g="/var/lib/docker"
+Path to use as the root of the Docker runtime
+.TP
+.B \-\-icc=true
+Enable inter\-container communication
+.TP
+.B \-\-ip="0.0.0.0"
+Default IP address to use when binding container ports
+.TP
+.B \-\-iptables=true
+Disable Docker's addition of iptables rules
+.TP
+.B \-\-mtu=1500
+Set the containers network mtu
+.TP
+.B \-p="/var/run/docker.pid"
+Path to use for daemon PID file
+.TP
+.B \-r=true
+Restart previously running containers
+.TP
+.B \-s=""
+Force the Docker runtime to use a specific storage driver
+.TP
+.B \-v=false
+Print version information and quit
+.SH "COMMANDS"
+.TP
+.B attach 
+Attach to a running container
+.TP
+.B build 
+Build a container from a Dockerfile
+.TP
+.B commit 
+Create a new image from a container's changes
+.TP
+.B cp 
+Copy files/folders from the containers filesystem to the host at path
+.TP
+.B diff 
+Inspect changes on a container's filesystem
+    
+.TP
+.B events
+Get real time events from the server
+.TP
+.B export 
+Stream the contents of a container as a tar archive
+.TP
+.B history
+Show the history of an image
+.TP
+.B images
+List images
+.TP
+.B import 
+Create a new filesystem image from the contents of a tarball
+.TP
+.B info 
+Display system-wide information
+.TP
+.B insert 
+Insert a file in an image
+.TP
+.B inspect  
+Return low-level information on a container
+.TP
+.B kill 
+Kill a running container (which includes the wrapper process and everything inside it) 
+.TP
+.B load 
+Load an image from a tar archive
+.TP
+.B login 
+Register or Login to a Docker registry server
+.TP
+.B logs 
+Fetch the logs of a container
+.TP
+.B port 
+Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
+.TP
+.B ps 
+List containers
+.TP
+.B pull 
+Pull an image or a repository from a Docker registry server
+.TP
+.B push 
+Push an image or a repository to a Docker registry server
+.TP
+.B restart 
+Restart a running container
+.TP
+.B rm 
+Remove one or more containers
+.TP
+.B rmi 
+Remove one or more images
+.TP
+.B run 
+Run a command in a new container
+.TP
+.B save 
+Save an image to a tar archive
+.TP
+.B search 
+Search for an image in the Docker index
+.TP
+.B start 
+Start a stopped container
+.TP
+.B stop 
+Stop a running container
+.TP
+.B tag 
+Tag an image into a repository
+.TP
+.B top 
+Lookup the running processes of a container
+.TP
+.B version
+Show the Docker version information
+.TP
+.B wait 
+Block until a container stops, then print its exit code
+.SH EXAMPLES
+.sp
+For specific examples please see the man page for the specific Docker command.
+.sp
+.SH HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.

+ 1 - 0
contrib/mkimage-debootstrap.sh

@@ -219,6 +219,7 @@ if [ -z "$strictDebootstrap" ]; then
 	
 	# make sure our packages lists are as up to date as we can get them
 	sudo chroot . apt-get update
+	sudo chroot . apt-get dist-upgrade -y
 fi
 
 if [ "$justTar" ]; then

+ 1 - 1
contrib/mkseccomp.pl

@@ -10,7 +10,7 @@
 # can configure the list of syscalls.  When run, this script produces output
 # which, when stored in a file, can be passed to docker as follows:
 #
-# docker run -lxc-conf="lxc.seccomp=$file" <rest of arguments>
+# docker run --lxc-conf="lxc.seccomp=$file" <rest of arguments>
 #
 # The included sample file shows how to cut about a quarter of all syscalls,
 # which affecting most applications.

+ 12 - 7
config.go → daemonconfig/config.go

@@ -1,10 +1,9 @@
-package docker
+package daemonconfig
 
 import (
-	"net"
-
 	"github.com/dotcloud/docker/engine"
-	"github.com/dotcloud/docker/networkdriver"
+	"github.com/dotcloud/docker/runtime/networkdriver"
+	"net"
 )
 
 const (
@@ -13,11 +12,12 @@ const (
 )
 
 // FIXME: separate runtime configuration from http api configuration
-type DaemonConfig struct {
+type Config struct {
 	Pidfile                     string
 	Root                        string
 	AutoRestart                 bool
 	Dns                         []string
+	DnsSearch                   []string
 	EnableIptables              bool
 	EnableIpForward             bool
 	DefaultIp                   net.IP
@@ -28,12 +28,13 @@ type DaemonConfig struct {
 	ExecDriver                  string
 	Mtu                         int
 	DisableNetwork              bool
+	EnableSelinuxSupport        bool
 }
 
 // ConfigFromJob creates and returns a new DaemonConfig object
 // by parsing the contents of a job's environment.
-func DaemonConfigFromJob(job *engine.Job) *DaemonConfig {
-	config := &DaemonConfig{
+func ConfigFromJob(job *engine.Job) *Config {
+	config := &Config{
 		Pidfile:                     job.Getenv("Pidfile"),
 		Root:                        job.Getenv("Root"),
 		AutoRestart:                 job.GetenvBool("AutoRestart"),
@@ -45,10 +46,14 @@ func DaemonConfigFromJob(job *engine.Job) *DaemonConfig {
 		InterContainerCommunication: job.GetenvBool("InterContainerCommunication"),
 		GraphDriver:                 job.Getenv("GraphDriver"),
 		ExecDriver:                  job.Getenv("ExecDriver"),
+		EnableSelinuxSupport:        false, // FIXME: hardcoded default to disable selinux for .10 release
 	}
 	if dns := job.GetenvList("Dns"); dns != nil {
 		config.Dns = dns
 	}
+	if dnsSearch := job.GetenvList("DnsSearch"); dnsSearch != nil {
+		config.DnsSearch = dnsSearch
+	}
 	if mtu := job.GetenvInt("Mtu"); mtu != 0 {
 		config.Mtu = mtu
 	} else {

+ 73 - 4
docker/docker.go

@@ -1,21 +1,35 @@
 package main
 
 import (
+	"crypto/tls"
+	"crypto/x509"
 	"fmt"
+	"io/ioutil"
 	"log"
 	"os"
 	"strings"
 
 	"github.com/dotcloud/docker/api"
+	"github.com/dotcloud/docker/api/client"
 	"github.com/dotcloud/docker/builtins"
 	"github.com/dotcloud/docker/dockerversion"
 	"github.com/dotcloud/docker/engine"
+	"github.com/dotcloud/docker/opts"
 	flag "github.com/dotcloud/docker/pkg/mflag"
-	"github.com/dotcloud/docker/pkg/opts"
 	"github.com/dotcloud/docker/sysinit"
 	"github.com/dotcloud/docker/utils"
 )
 
+const (
+	defaultCaFile   = "ca.pem"
+	defaultKeyFile  = "key.pem"
+	defaultCertFile = "cert.pem"
+)
+
+var (
+	dockerConfDir = os.Getenv("HOME") + "/.docker/"
+)
+
 func main() {
 	if selfPath := utils.SelfPath(); strings.Contains(selfPath, ".dockerinit") {
 		// Running in init mode
@@ -35,16 +49,23 @@ func main() {
 		flSocketGroup        = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group")
 		flEnableCors         = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API")
 		flDns                = opts.NewListOpts(opts.ValidateIp4Address)
-		flEnableIptables     = flag.Bool([]string{"#iptables", "-iptables"}, true, "Disable docker's addition of iptables rules")
-		flEnableIpForward    = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Disable enabling of net.ipv4.ip_forward")
+		flDnsSearch          = opts.NewListOpts(opts.ValidateDomain)
+		flEnableIptables     = flag.Bool([]string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules")
+		flEnableIpForward    = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward")
 		flDefaultIp          = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports")
 		flInterContainerComm = flag.Bool([]string{"#icc", "-icc"}, true, "Enable inter-container communication")
 		flGraphDriver        = flag.String([]string{"s", "-storage-driver"}, "", "Force the docker runtime to use a specific storage driver")
 		flExecDriver         = flag.String([]string{"e", "-exec-driver"}, "native", "Force the docker runtime to use a specific exec driver")
 		flHosts              = opts.NewListOpts(api.ValidateHost)
 		flMtu                = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available")
+		flTls                = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags")
+		flTlsVerify          = flag.Bool([]string{"-tlsverify"}, false, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)")
+		flCa                 = flag.String([]string{"-tlscacert"}, dockerConfDir+defaultCaFile, "Trust only remotes providing a certificate signed by the CA given here")
+		flCert               = flag.String([]string{"-tlscert"}, dockerConfDir+defaultCertFile, "Path to TLS certificate file")
+		flKey                = flag.String([]string{"-tlskey"}, dockerConfDir+defaultKeyFile, "Path to TLS key file")
 	)
 	flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers")
+	flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains")
 	flag.Var(&flHosts, []string{"H", "-host"}, "tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified")
 
 	flag.Parse()
@@ -73,6 +94,7 @@ func main() {
 	if *flDebug {
 		os.Setenv("DEBUG", "1")
 	}
+
 	if *flDaemon {
 		if flag.NArg() != 0 {
 			flag.Usage()
@@ -115,6 +137,7 @@ func main() {
 			job.Setenv("Root", realRoot)
 			job.SetenvBool("AutoRestart", *flAutoRestart)
 			job.SetenvList("Dns", flDns.GetAll())
+			job.SetenvList("DnsSearch", flDnsSearch.GetAll())
 			job.SetenvBool("EnableIptables", *flEnableIptables)
 			job.SetenvBool("EnableIpForward", *flEnableIpForward)
 			job.Setenv("BridgeIface", *bridgeName)
@@ -140,6 +163,12 @@ func main() {
 		job.SetenvBool("EnableCors", *flEnableCors)
 		job.Setenv("Version", dockerversion.VERSION)
 		job.Setenv("SocketGroup", *flSocketGroup)
+
+		job.SetenvBool("Tls", *flTls)
+		job.SetenvBool("TlsVerify", *flTlsVerify)
+		job.Setenv("TlsCa", *flCa)
+		job.Setenv("TlsCert", *flCert)
+		job.Setenv("TlsKey", *flKey)
 		if err := job.Run(); err != nil {
 			log.Fatal(err)
 		}
@@ -148,7 +177,47 @@ func main() {
 			log.Fatal("Please specify only one -H")
 		}
 		protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2)
-		if err := api.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil {
+
+		var (
+			cli       *client.DockerCli
+			tlsConfig tls.Config
+		)
+		tlsConfig.InsecureSkipVerify = true
+
+		// If we should verify the server, we need to load a trusted ca
+		if *flTlsVerify {
+			*flTls = true
+			certPool := x509.NewCertPool()
+			file, err := ioutil.ReadFile(*flCa)
+			if err != nil {
+				log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err)
+			}
+			certPool.AppendCertsFromPEM(file)
+			tlsConfig.RootCAs = certPool
+			tlsConfig.InsecureSkipVerify = false
+		}
+
+		// If tls is enabled, try to load and send client certificates
+		if *flTls || *flTlsVerify {
+			_, errCert := os.Stat(*flCert)
+			_, errKey := os.Stat(*flKey)
+			if errCert == nil && errKey == nil {
+				*flTls = true
+				cert, err := tls.LoadX509KeyPair(*flCert, *flKey)
+				if err != nil {
+					log.Fatalf("Couldn't load X509 key pair: %s. Key encrypted?", err)
+				}
+				tlsConfig.Certificates = []tls.Certificate{cert}
+			}
+		}
+
+		if *flTls || *flTlsVerify {
+			cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], &tlsConfig)
+		} else {
+			cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], nil)
+		}
+
+		if err := cli.ParseCommands(flag.Args()...); err != nil {
 			if sterr, ok := err.(*utils.StatusError); ok {
 				if sterr.Status != "" {
 					log.Println(sterr.Status)

+ 0 - 1
docs/MAINTAINERS

@@ -1,3 +1,2 @@
-Andy Rothfusz <andy@dotcloud.com> (@metalivedev)
 James Turnbull <james@lovedthanlost.net> (@jamtur01)
 Sven Dowideit <SvenDowideit@fosiki.com> (@SvenDowideit)

+ 1 - 1
docs/sources/articles/runmetrics.rst

@@ -63,7 +63,7 @@ For Docker containers using cgroups, the container name will be the
 full ID or long ID of the container. If a container shows up as
 ae836c95b4c3 in ``docker ps``, its long ID might be something like
 ``ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79``. You
-can look it up with ``docker inspect`` or ``docker ps -notrunc``.
+can look it up with ``docker inspect`` or ``docker ps --no-trunc``.
 
 Putting everything together to look at the memory metrics for a Docker
 container, take a look at ``/sys/fs/cgroup/memory/lxc/<longid>/``.

+ 4 - 2
docs/sources/articles/security.rst

@@ -7,7 +7,7 @@
 Docker Security
 ===============
 
-  *Adapted from* `Containers & Docker: How Secure are They? <blogsecurity>`_
+  *Adapted from* `Containers & Docker: How Secure are They? <blogsecurity_>`_
 
 There are three major areas to consider when reviewing Docker security:
 
@@ -82,6 +82,8 @@ when some applications start to misbehave.
 Control Groups have been around for a while as well: the code was
 started in 2006, and initially merged in kernel 2.6.24.
 
+.. _dockersecurity_daemon:
+
 Docker Daemon Attack Surface
 ----------------------------
 
@@ -261,7 +263,7 @@ with Docker, since everything is provided by the kernel anyway.
 
 For more context and especially for comparisons with VMs and other
 container systems, please also see the `original blog post
-<blogsecurity>`_.
+<blogsecurity_>`_.
 
 .. _blogsecurity: http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/
 

+ 15 - 0
docs/sources/examples/apt-cacher-ng.Dockerfile

@@ -0,0 +1,15 @@
+#
+# Build: docker build -t apt-cacher .
+# Run: docker run -d -p 3142:3142 --name apt-cacher-run apt-cacher
+#
+# and then you can run containers with:
+#	docker run -t -i --rm -e http_proxy http://dockerhost:3142/ debian bash
+#
+FROM		ubuntu
+MAINTAINER	SvenDowideit@docker.com
+
+VOLUME		["/var/cache/apt-cacher-ng"]
+RUN		apt-get update ; apt-get install -yq apt-cacher-ng
+
+EXPOSE		3142
+CMD		chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/*

+ 102 - 0
docs/sources/examples/apt-cacher-ng.rst

@@ -0,0 +1,102 @@
+:title: Running an apt-cacher-ng service
+:description: Installing and running an apt-cacher-ng service
+:keywords: docker, example, package installation, networking, debian, ubuntu
+
+.. _running_apt-cacher-ng_service:
+
+Apt-Cacher-ng Service
+=====================
+
+.. include:: example_header.inc
+
+
+When you have multiple Docker servers, or build unrelated Docker containers
+which can't make use of the Docker build cache, it can be useful to have a 
+caching proxy for your packages. This container makes the second download of
+any package almost instant.
+
+Use the following Dockerfile:
+
+.. literalinclude:: apt-cacher-ng.Dockerfile
+
+To build the image using:
+
+.. code-block:: bash
+
+    $ sudo docker build -t eg_apt_cacher_ng .
+
+Then run it, mapping the exposed port to one on the host
+
+.. code-block:: bash
+
+    $ sudo docker run -d -p 3142:3142 --name test_apt_cacher_ng eg_apt_cacher_ng
+
+To see the logfiles that are 'tailed' in the default command, you can use: 
+
+.. code-block:: bash
+
+    $ sudo docker logs -f test_apt_cacher_ng
+
+To get your Debian-based containers to use the proxy, you can do one of three things
+
+1. Add an apt Proxy setting ``echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy``
+2. Set an environment variable: ``http_proxy=http://dockerhost:3142/``
+3. Change your ``sources.list`` entries to start with ``http://dockerhost:3142/``
+
+**Option 1** injects the settings safely into your apt configuration in a local
+version of a common base:
+
+.. code-block:: bash
+
+    FROM ubuntu
+    RUN  echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/apt.conf.d/01proxy
+    RUN apt-get update ; apt-get install vim git
+
+    # docker build -t my_ubuntu .
+
+**Option 2** is good for testing, but will 
+break other HTTP clients which obey ``http_proxy``, such as ``curl``, ``wget`` and others:
+
+.. code-block:: bash
+
+    $ sudo docker run --rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash
+
+**Option 3** is the least portable, but there will be times when you might need to
+do it and you can do it from your ``Dockerfile`` too.
+
+Apt-cacher-ng has some tools that allow you to manage the repository, and they 
+can be used by leveraging the ``VOLUME`` instruction, and the image we built to run the 
+service:
+
+.. code-block:: bash
+
+    $ sudo docker run --rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash
+
+    $$ /usr/lib/apt-cacher-ng/distkill.pl
+    Scanning /var/cache/apt-cacher-ng, please wait...
+    Found distributions:
+    bla, taggedcount: 0
+         1. precise-security (36 index files)
+         2. wheezy (25 index files)
+         3. precise-updates (36 index files)
+         4. precise (36 index files)
+         5. wheezy-updates (18 index files)
+
+    Found architectures:
+         6. amd64 (36 index files)
+         7. i386 (24 index files)
+
+    WARNING: The removal action may wipe out whole directories containing
+             index files. Select d to see detailed list.
+
+    (Number nn: tag distribution or architecture nn; 0: exit; d: show details; r: remove tagged; q: quit): q
+
+
+Finally, clean up after your test by stopping and removing the container, and
+then removing the image.
+
+.. code-block:: bash
+
+    $ sudo docker stop test_apt_cacher_ng
+    $ sudo docker rm test_apt_cacher_ng
+    $ sudo docker rmi eg_apt_cacher_ng

+ 1 - 0
docs/sources/examples/example_header.inc

@@ -4,4 +4,5 @@
     * This example assumes you have Docker running in daemon mode. For
       more information please see :ref:`running_examples`.
     * **If you don't like sudo** then see :ref:`dockergroup`
+    * **If you're using OS X or docker via TCP** then you shouldn't use `sudo`
 

+ 17 - 17
docs/sources/examples/hello_world.rst

@@ -52,8 +52,8 @@ This command will run a simple ``echo`` command, that will echo ``hello world``
 
 **Explanation:**
 
-- **"sudo"** execute the following commands as user *root* 
-- **"docker run"** run a command in a new container 
+- **"sudo"** execute the following commands as user *root*
+- **"docker run"** run a command in a new container
 - **"busybox"** is the image we are running the command in.
 - **"/bin/echo"** is the command we want to run in the container
 - **"hello world"** is the input for the echo command
@@ -67,9 +67,9 @@ See the example in action
 .. raw:: html
 
    <iframe width="560" height="400" frameborder="0"
-           sandbox="allow-same-origin allow-scripts" 
-   srcdoc="<body><script type=&quot;text/javascript&quot; 
-           src=&quot;https://asciinema.org/a/7658.js&quot; 
+           sandbox="allow-same-origin allow-scripts"
+   srcdoc="<body><script type=&quot;text/javascript&quot;
+           src=&quot;https://asciinema.org/a/7658.js&quot;
            id=&quot;asciicast-7658&quot; async></script></body>">
    </iframe>
 
@@ -92,7 +92,7 @@ we stop it.
 
 .. code-block:: bash
 
-    CONTAINER_ID=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done")
+    container_id=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done")
 
 We are going to run a simple hello world daemon in a new container
 made from the ``ubuntu`` image.
@@ -104,30 +104,30 @@ made from the ``ubuntu`` image.
 - **"while true; do echo hello world; sleep 1; done"** is the mini
   script we want to run, that will just print hello world once a
   second until we stop it.
-- **$CONTAINER_ID** the output of the run command will return a
+- **$container_id** the output of the run command will return a
   container id, we can use in future commands to see what is going on
   with this process.
 
 .. code-block:: bash
 
-    sudo docker logs $CONTAINER_ID
+    sudo docker logs $container_id
 
 Check the logs make sure it is working correctly.
 
 - **"docker logs**" This will return the logs for a container
-- **$CONTAINER_ID** The Id of the container we want the logs for.
+- **$container_id** The Id of the container we want the logs for.
 
 .. code-block:: bash
 
-    sudo docker attach -sig-proxy=false $CONTAINER_ID
+    sudo docker attach --sig-proxy=false $container_id
 
 Attach to the container to see the results in real-time.
 
 - **"docker attach**" This will allow us to attach to a background
   process to see what is going on.
-- **"-sig-proxy=false"** Do not forward signals to the container; allows
+- **"--sig-proxy=false"** Do not forward signals to the container; allows
   us to exit the attachment using Control-C without stopping the container.
-- **$CONTAINER_ID** The Id of the container we want to attach too.
+- **$container_id** The Id of the container we want to attach to.
 
 Exit from the container attachment by pressing Control-C.
 
@@ -141,12 +141,12 @@ Check the process list to make sure it is running.
 
 .. code-block:: bash
 
-    sudo docker stop $CONTAINER_ID
+    sudo docker stop $container_id
 
 Stop the container, since we don't need it anymore.
 
 - **"docker stop"** This stops a container
-- **$CONTAINER_ID** The Id of the container we want to stop.
+- **$container_id** The Id of the container we want to stop.
 
 .. code-block:: bash
 
@@ -162,9 +162,9 @@ See the example in action
 .. raw:: html
 
    <iframe width="560" height="400" frameborder="0"
-           sandbox="allow-same-origin allow-scripts" 
-   srcdoc="<body><script type=&quot;text/javascript&quot; 
-           src=&quot;https://asciinema.org/a/2562.js&quot; 
+           sandbox="allow-same-origin allow-scripts"
+   srcdoc="<body><script type=&quot;text/javascript&quot;
+           src=&quot;https://asciinema.org/a/2562.js&quot;
            id=&quot;asciicast-2562&quot; async></script></body>">
    </iframe>
 

+ 126 - 0
docs/sources/examples/https.rst

@@ -0,0 +1,126 @@
+:title: Docker HTTPS Setup
+:description: How to setup docker with https
+:keywords: docker, example, https, daemon
+
+.. _running_docker_https:
+
+Running Docker with https
+=========================
+
+By default, Docker runs via a non-networked Unix socket. It can also optionally
+communicate using a HTTP socket.
+
+If you need Docker reachable via the network in a safe manner, you can enable
+TLS by specifying the `tlsverify` flag and pointing Docker's `tlscacert` flag to a
+trusted CA certificate.
+
+In daemon mode, it will only allow connections from clients authenticated by a
+certificate signed by that CA. In client mode, it will only connect to servers
+with a certificate signed by that CA.
+
+.. warning::
+
+  Using TLS and managing a CA is an advanced topic. Please make you self familiar
+  with openssl, x509 and tls before using it in production.
+
+Create a CA, server and client keys with OpenSSL
+------------------------------------------------
+
+First, initialize the CA serial file and generate CA private and public keys:
+
+.. code-block:: bash
+
+    $ echo 01 > ca.srl
+    $ openssl genrsa -des3 -out ca-key.pem
+    $ openssl req -new -x509 -days 365 -key ca-key.pem -out ca.pem
+
+Now that we have a CA, you can create a server key and certificate signing request.
+Make sure that `"Common Name (e.g. server FQDN or YOUR name)"` matches the hostname you will use
+to connect to Docker or just use '*' for a certificate valid for any hostname:
+
+.. code-block:: bash
+
+    $ openssl genrsa -des3 -out server-key.pem
+    $ openssl req -new -key server-key.pem -out server.csr
+
+Next we're going to sign the key with our CA:
+
+.. code-block:: bash
+
+    $ openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem \
+      -out server-cert.pem
+
+For client authentication, create a client key and certificate signing request:
+
+.. code-block:: bash
+
+    $ openssl genrsa -des3 -out client-key.pem
+    $ openssl req -new -key client-key.pem -out client.csr
+
+
+To make the key suitable for client authentication, create a extensions config file:
+
+.. code-block:: bash
+
+    $ echo extendedKeyUsage = clientAuth > extfile.cnf
+
+Now sign the key:
+
+.. code-block:: bash
+
+    $ openssl x509 -req -days 365 -in client.csr -CA ca.pem -CAkey ca-key.pem \
+      -out client-cert.pem -extfile extfile.cnf
+
+Finally you need to remove the passphrase from the client and server key:
+
+.. code-block:: bash
+
+    $ openssl rsa -in server-key.pem -out server-key.pem
+    $ openssl rsa -in client-key.pem -out client-key.pem
+  
+Now you can make the Docker daemon only accept connections from clients providing
+a certificate trusted by our CA:
+
+.. code-block:: bash
+
+    $ sudo docker -d --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem \
+      -H=0.0.0.0:4243
+
+To be able to connect to Docker and validate its certificate, you now need to provide your client keys,
+certificates and trusted CA:
+
+.. code-block:: bash
+
+   $ docker --tlsverify --tlscacert=ca.pem --tlscert=client-cert.pem --tlskey=client-key.pem \
+     -H=dns-name-of-docker-host:4243
+
+.. warning::
+
+  As shown in the example above, you don't have to run the ``docker``
+  client  with ``sudo`` or the ``docker`` group when you use
+  certificate authentication. That means anyone with the keys can
+  give any instructions to your Docker daemon, giving them root
+  access to the machine hosting the daemon. Guard these keys as you
+  would a root password!
+
+Other modes
+-----------
+If you don't want to have complete two-way authentication, you can run Docker in
+various other modes by mixing the flags.
+
+Daemon modes
+~~~~~~~~~~~~
+- tlsverify, tlscacert, tlscert, tlskey set: Authenticate clients
+- tls, tlscert, tlskey: Do not authenticate clients
+
+Client modes
+~~~~~~~~~~~~
+- tls: Authenticate server based on public/default CA pool
+- tlsverify, tlscacert: Authenticate server based on given CA
+- tls, tlscert, tlskey: Authenticate with client certificate, do not authenticate
+  server based on given CA
+- tlsverify, tlscacert, tlscert, tlskey: Authenticate with client certificate,
+  authenticate server based on given CA
+
+The client will send its client certificate if found, so you just need to drop
+your keys into `~/.docker/<ca, cert or key>.pem`

+ 2 - 0
docs/sources/examples/index.rst

@@ -26,3 +26,5 @@ to more substantial services like those which you might find in production.
    using_supervisord
    cfengine_process_management
    python_web_app
+   apt-cacher-ng
+   https

+ 3 - 3
docs/sources/examples/mongodb.rst

@@ -47,7 +47,7 @@ divert ``/sbin/initctl`` to ``/bin/true`` so it thinks everything is working.
 
     # Hack for initctl not being available in Ubuntu
     RUN dpkg-divert --local --rename --add /sbin/initctl
-    RUN ln -s /bin/true /sbin/initctl
+    RUN ln -sf /bin/true /sbin/initctl
 
 Afterwards we'll be able to update our apt repositories and install MongoDB
 
@@ -86,10 +86,10 @@ the local port!
 .. code-block:: bash
 
     # Regular style
-    MONGO_ID=$(sudo docker run -d <yourname>/mongodb)
+    MONGO_ID=$(sudo docker run -P -d <yourname>/mongodb)
 
     # Lean and mean
-    MONGO_ID=$(sudo docker run -d <yourname>/mongodb --noprealloc --smallfiles)
+    MONGO_ID=$(sudo docker run -P -d <yourname>/mongodb --noprealloc --smallfiles)
 
     # Check the logs out
     sudo docker logs $MONGO_ID

+ 3 - 3
docs/sources/examples/nodejs_web_app.rst

@@ -18,7 +18,7 @@ https://github.com/gasi/docker-node-hello.
 Create Node.js app
 ++++++++++++++++++
 
-First, create a ``package.json`` file that describes your app and its
+First, create a directory ``src`` where all the files would live. Then create a ``package.json`` file that describes your app and its
 dependencies:
 
 .. code-block:: json
@@ -50,7 +50,7 @@ Then, create an ``index.js`` file that defines a web app using the
       res.send('Hello World\n');
     });
 
-    app.listen(PORT)
+    app.listen(PORT);
     console.log('Running on http://localhost:' + PORT);
 
 
@@ -91,7 +91,7 @@ To install the right package for CentOS, we’ll use the instructions from the
 .. code-block:: bash
 
     # Enable EPEL for Node.js
-    RUN     rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
+    RUN     rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
     # Install Node.js and npm
     RUN     yum install -y npm
 

+ 1 - 1
docs/sources/examples/postgresql_service.Dockerfile

@@ -7,7 +7,7 @@ MAINTAINER SvenDowideit@docker.com
 
 # Add the PostgreSQL PGP key to verify their Debian packages.
 # It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc 
-RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8
+RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8
 
 # Add PostgreSQL's repository. It contains the most recent stable release
 #     of PostgreSQL, ``9.3``.

+ 5 - 5
docs/sources/examples/postgresql_service.rst

@@ -37,24 +37,24 @@ And run the PostgreSQL server container (in the foreground):
 
 .. code-block:: bash
 
-    $ sudo docker run -rm -P -name pg_test eg_postgresql
+    $ sudo docker run --rm -P --name pg_test eg_postgresql
 
 There are  2 ways to connect to the PostgreSQL server. We can use 
 :ref:`working_with_links_names`, or we can access it from our host (or the network).
 
-.. note:: The ``-rm`` removes the container and its image when the container 
+.. note:: The ``--rm`` removes the container and its image when the container 
           exists successfully.
 
 Using container linking
 ^^^^^^^^^^^^^^^^^^^^^^^
 
 Containers can be linked to another container's ports directly using 
-``-link remote_name:local_alias`` in the client's ``docker run``. This will
+``--link remote_name:local_alias`` in the client's ``docker run``. This will
 set a number of environment variables that can then be used to connect:
 
 .. code-block:: bash
 
-    $ sudo docker run -rm -t -i -link pg_test:pg eg_postgresql bash
+    $ sudo docker run --rm -t -i --link pg_test:pg eg_postgresql bash
 
     postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password
 
@@ -104,7 +104,7 @@ configuration and data:
 
 .. code-block:: bash
 
-    docker run -rm --volumes-from pg_test -t -i busybox sh
+    docker run --rm --volumes-from pg_test -t -i busybox sh
 
     / # ls
     bin      etc      lib      linuxrc  mnt      proc     run      sys      usr

+ 1 - 1
docs/sources/examples/python_web_app.rst

@@ -51,7 +51,7 @@ try things out, and then exit when you're done.
 
 .. code-block:: bash
 
-    $ sudo docker run -i -t -name pybuilder_run shykes/pybuilder bash
+    $ sudo docker run -i -t --name pybuilder_run shykes/pybuilder bash
 
     $$ URL=http://github.com/shykes/helloflask/archive/master.tar.gz
     $$ /usr/local/bin/buildapp $URL

+ 4 - 4
docs/sources/examples/running_redis_service.rst

@@ -18,11 +18,11 @@ Firstly, we create a ``Dockerfile`` for our new Redis image.
 
 .. code-block:: bash
 
-    FROM        ubuntu:12.10
-    RUN         apt-get update
-    RUN         apt-get -y install redis-server
+    FROM        debian:jessie
+    RUN         apt-get update && apt-get install -y redis-server
     EXPOSE      6379
     ENTRYPOINT  ["/usr/bin/redis-server"]
+    CMD ["--bind", "0.0.0.0"]
 
 Next we build an image from our ``Dockerfile``. Replace ``<your username>`` 
 with your own user name.
@@ -49,7 +49,7 @@ use a container link to provide access to our Redis database.
 Create your web application container
 -------------------------------------
 
-Next we can create a container for our application. We're going to use the ``-link`` 
+Next we can create a container for our application. We're going to use the ``--link`` 
 flag to create a link to the ``redis`` container we've just created with an alias of 
 ``db``. This will create a secure tunnel to the ``redis`` container and expose the 
 Redis instance running inside that container to only this container.

+ 1 - 1
docs/sources/examples/running_riak_service.rst

@@ -88,7 +88,7 @@ Almost there. Next, we add a hack to get us by the lack of ``initctl``:
     # Hack for initctl
     # See: https://github.com/dotcloud/docker/issues/1024
     RUN dpkg-divert --local --rename --add /sbin/initctl
-    RUN ln -s /bin/true /sbin/initctl
+    RUN ln -sf /bin/true /sbin/initctl
 
 Then, we expose the Riak Protocol Buffers and HTTP interfaces, along with SSH:
 

+ 2 - 2
docs/sources/examples/running_ssh_service.rst

@@ -19,14 +19,14 @@ Build the image using:
 
 .. code-block:: bash
 
-    $ sudo docker build -rm -t eg_sshd .
+    $ sudo docker build -t eg_sshd .
 
 Then run it. You can then use ``docker port`` to find out what host port the container's
 port 22 is mapped to:
 
 .. code-block:: bash
 
-    $ sudo docker run -d -P -name test_sshd eg_sshd
+    $ sudo docker run -d -P --name test_sshd eg_sshd
     $ sudo docker port test_sshd 22
     0.0.0.0:49154
 

+ 32 - 0
docs/sources/installation/amazon.rst

@@ -9,6 +9,7 @@ Amazon EC2
 
 There are several ways to install Docker on AWS EC2:
 
+* :ref:`amazonquickstart_new` or
 * :ref:`amazonquickstart` or
 * :ref:`amazonstandard`
 
@@ -61,6 +62,37 @@ for every Docker command.
 Once you've got Docker installed, you're ready to try it out -- head
 on over to the :doc:`../use/basics` or :doc:`../examples/index` section.
 
+.. _amazonquickstart_new:
+
+Amazon QuickStart (Release Candidate - March 2014)
+--------------------------------------------------
+
+Amazon just published new Docker-ready AMIs (2014.03 Release Candidate).  Docker packages 
+can now be installed from Amazon's provided Software Repository.
+
+1. **Choose an image:**
+
+   * Launch the `Create Instance Wizard
+     <https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:>`_ menu
+     on your AWS Console.
+
+   * Click the ``Community AMI`` menu option on the left side
+   
+   * Search for '2014.03' and select one of the Amazon provided AMI, for example ``amzn-ami-pv-2014.03.rc-0.x86_64-ebs``
+
+   * For testing you can use the default (possibly free)
+     ``t1.micro`` instance (more info on `pricing
+     <http://aws.amazon.com/en/ec2/pricing/>`_).
+
+   * Click the ``Next: Configure Instance Details`` button at the bottom right.
+   
+2. After a few more standard choices where defaults are probably ok, your Amazon
+   Linux instance should be running!  
+   
+3. SSH to your instance to install Docker : ``ssh -i <path to your private key> ec2-user@<your public IP address>``
+
+4. Once connected to the instance, type ``sudo yum install -y docker ; sudo service docker start`` to install and start Docker
+
 .. _amazonstandard:
 
 Standard Ubuntu Installation

+ 14 - 1
docs/sources/installation/binaries.rst

@@ -29,6 +29,12 @@ To run properly, docker needs the following software to be installed at runtime:
 - iptables version 1.4 or later
 - Git version 1.7 or later
 - XZ Utils 4.9 or later
+- a `properly mounted
+  <https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount>`_
+  cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point `is
+  <https://github.com/dotcloud/docker/issues/2683>`_ `not
+  <https://github.com/dotcloud/docker/issues/3485>`_ `sufficient
+  <https://github.com/dotcloud/docker/issues/4568>`_)
 
 
 Check kernel dependencies
@@ -37,6 +43,9 @@ Check kernel dependencies
 Docker in daemon mode has specific kernel requirements. For details,
 check your distribution in :ref:`installation_list`.
 
+In general, a 3.8 Linux kernel (or higher) is preferred, as some of the 
+prior versions have known issues that are triggered by Docker.
+
 Note that Docker also has a client mode, which can run on virtually
 any Linux kernel (it even builds on OSX!).
 
@@ -49,6 +58,9 @@ Get the docker binary:
     wget https://get.docker.io/builds/Linux/x86_64/docker-latest -O docker
     chmod +x docker
 
+.. note::
+    If you have trouble downloading the binary, you can also get the smaller
+    compressed release file: https://get.docker.io/builds/Linux/x86_64/docker-latest.tgz
 
 Run the docker daemon
 ---------------------
@@ -77,7 +89,8 @@ always run as the root user, but if you run the ``docker`` client as a
 user in the *docker* group then you don't need to add ``sudo`` to all
 the client commands.
 
-.. warning:: The *docker* group is root-equivalent.
+.. warning:: The *docker* group (or the group specified with ``-G``) is
+   root-equivalent; see :ref:`dockersecurity_daemon` details.
 
 
 Upgrades

+ 4 - 4
docs/sources/installation/fedora.rst

@@ -23,15 +23,15 @@ The ``docker-io`` package provides Docker on Fedora.
 
 If you have the (unrelated) ``docker`` package installed already, it will
 conflict with ``docker-io``. There's a `bug report`_ filed for it.
-To proceed with ``docker-io`` installation on Fedora 19, please remove
-``docker`` first.
+To proceed with ``docker-io`` installation on Fedora 19 or Fedora 20, please
+remove ``docker`` first.
 
 .. code-block:: bash
 
    sudo yum -y remove docker
 
-For Fedora 20 and later, the ``wmdocker`` package will provide the same
-functionality as ``docker`` and will also not conflict with ``docker-io``.
+For Fedora 21 and later, the ``wmdocker`` package will provide the same
+functionality as the old ``docker`` and will also not conflict with ``docker-io``.
 
 .. code-block:: bash
 

+ 1 - 0
docs/sources/installation/index.rst

@@ -30,4 +30,5 @@ Contents:
    amazon
    rackspace
    google
+   softlayer
    binaries

+ 6 - 5
docs/sources/installation/mac.rst

@@ -65,11 +65,12 @@ Run the following commands to get it downloaded and set up:
 
 .. code-block:: bash
 
-    # Get the file
-    curl -o docker https://get.docker.io/builds/Darwin/x86_64/docker-latest
-
-    # Mark it executable
-    chmod +x docker
+    # Get the docker client file
+    DIR=$(mktemp -d ${TMPDIR:-/tmp}/dockerdl.XXXXXXX) && \
+    curl -f -o $DIR/ld.tgz https://get.docker.io/builds/Darwin/x86_64/docker-latest.tgz && \
+    gunzip $DIR/ld.tgz && \
+    tar xvf $DIR/ld.tar -C $DIR/ && \
+    cp $DIR/usr/local/bin/docker ./docker
 
     # Set the environment variable for the docker daemon
     export DOCKER_HOST=tcp://127.0.0.1:4243

+ 4 - 0
docs/sources/installation/rhel.rst

@@ -22,6 +22,9 @@ for the RHEL distribution.
 Also note that due to the current Docker limitations, Docker is able to run
 only on the **64 bit** architecture.
 
+You will need `RHEL 6.5`_ or higher, with a RHEL 6 kernel version 2.6.32-431 or higher
+as this has specific kernel fixes to allow Docker to work.
+
 Installation
 ------------
 
@@ -78,4 +81,5 @@ If you have any issues - please report them directly in the `Red Hat Bugzilla fo
 .. _EPEL installation instructions: https://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F
 .. _Red Hat Bugzilla for docker-io component : https://bugzilla.redhat.com/enter_bug.cgi?product=Fedora%20EPEL&component=docker-io
 .. _bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1043676
+.. _RHEL 6.5: https://access.redhat.com/site/articles/3078#RHEL6
 

+ 25 - 0
docs/sources/installation/softlayer.rst

@@ -0,0 +1,25 @@
+:title: Installation on IBM SoftLayer 
+:description: Please note this project is currently under heavy development. It should not be used in production. 
+:keywords: IBM SoftLayer, virtualization, cloud, docker, documentation, installation
+
+IBM SoftLayer
+=============
+
+.. include:: install_header.inc
+
+IBM SoftLayer QuickStart
+-------------------------
+
+1. Create an `IBM SoftLayer account <https://www.softlayer.com/cloudlayer/>`_.
+2. Log in to the `SoftLayer Console <https://control.softlayer.com/devices/>`_.
+3. Go to `Order Hourly Computing Instance Wizard <https://manage.softlayer.com/Sales/orderHourlyComputingInstance>`_ on your SoftLayer Console.
+4. Create a new *CloudLayer Computing Instance* (CCI) using the default values for all the fields and choose:
+
+- *First Available* as ``Datacenter`` and 
+- *Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (64 bit)* as ``Operating System``.
+
+5. Click the *Continue Your Order* button at the bottom right and select *Go to checkout*.
+6. Insert the required *User Metadata* and place the order.
+7. Then continue with the :ref:`ubuntu_linux` instructions.
+
+Continue with the :ref:`hello_world` example.

+ 21 - 10
docs/sources/installation/ubuntulinux.rst

@@ -64,15 +64,26 @@ Installation
    an earlier version, you will need to follow them again.
 
 Docker is available as a Debian package, which makes installation
-easy. **See the :ref:`installmirrors` section below if you are not in
+easy. **See the** :ref:`installmirrors` **section below if you are not in
 the United States.** Other sources of the Debian packages may be
 faster for you to install.
 
-First add the Docker repository key to your local keychain.
+First, check that your APT system can deal with ``https`` URLs:
+the file ``/usr/lib/apt/methods/https`` should exist. If it doesn't,
+you need to install the package ``apt-transport-https``.
+
+.. code-block:: bash
+
+   [ -e /usr/lib/apt/methods/https ] || {
+     apt-get update
+     apt-get install apt-transport-https
+   }
+
+Then, add the Docker repository key to your local keychain.
 
 .. code-block:: bash
 
-   sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
+   sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
 
 Add the Docker repository to your apt sources list, update and install the
 ``lxc-docker`` package.
@@ -82,7 +93,7 @@ continue installation.*
 
 .. code-block:: bash
 
-   sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\
+   sudo sh -c "echo deb https://get.docker.io/ubuntu docker main\
    > /etc/apt/sources.list.d/docker.list"
    sudo apt-get update
    sudo apt-get install lxc-docker
@@ -144,7 +155,7 @@ First add the Docker repository key to your local keychain.
 
 .. code-block:: bash
 
-   sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
+   sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
 
 Add the Docker repository to your apt sources list, update and install the
 ``lxc-docker`` package.
@@ -186,7 +197,7 @@ client commands. As of 0.9.0, you can specify that a group other than ``docker``
 should own the Unix socket with the ``-G`` option.
 
 .. warning:: The *docker* group (or the group specified with ``-G``) is
-   root-equivalent.
+   root-equivalent; see :ref:`dockersecurity_daemon` details.
 
 
 **Example:**
@@ -282,8 +293,6 @@ incoming connections on the Docker port (default 4243):
 
    sudo ufw allow 4243/tcp
 
-.. _installmirrors:
-
 Docker and local DNS server warnings
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -309,9 +318,9 @@ daemon for the containers:
    sudo nano /etc/default/docker
    ---
    # Add:
-   DOCKER_OPTS="-dns 8.8.8.8"
+   DOCKER_OPTS="--dns 8.8.8.8"
    # 8.8.8.8 could be replaced with a local DNS server, such as 192.168.1.1
-   # multiple DNS servers can be specified: -dns 8.8.8.8 -dns 192.168.1.1
+   # multiple DNS servers can be specified: --dns 8.8.8.8 --dns 192.168.1.1
 
 The Docker daemon has to be restarted:
 
@@ -342,6 +351,8 @@ NetworkManager and Docker need to be restarted afterwards:
 
 .. warning:: This might make DNS resolution slower on some networks.
 
+.. _installmirrors:
+
 Mirrors
 ^^^^^^^
 

+ 4 - 4
docs/sources/reference/api/docker_io_accounts_api.rst

@@ -49,14 +49,14 @@ docker.io Accounts API
         {
             "id": 2,
             "username": "janedoe",
-            "url": "",
+            "url": "https://www.docker.io/api/v1.1/users/janedoe/",
             "date_joined": "2014-02-12T17:58:01.431312Z",
             "type": "User",
             "full_name": "Jane Doe",
             "location": "San Francisco, CA",
             "company": "Success, Inc.",
             "profile_url": "https://docker.io/",
-            "gravatar_email": "jane.doe+gravatar@example.com",
+            "gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm"
             "email": "jane.doe@example.com",
             "is_active": true
         }
@@ -111,14 +111,14 @@ docker.io Accounts API
         {
             "id": 2,
             "username": "janedoe",
-            "url": "",
+            "url": "https://www.docker.io/api/v1.1/users/janedoe/",
             "date_joined": "2014-02-12T17:58:01.431312Z",
             "type": "User",
             "full_name": "Jane Doe",
             "location": "Private Island",
             "company": "Retired",
             "profile_url": "http://janedoe.com/",
-            "gravatar_email": "jane.doe+gravatar@example.com",
+            "gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm"
             "email": "jane.doe@example.com",
             "is_active": true
         }

+ 4 - 1
docs/sources/reference/api/docker_remote_api.rst

@@ -22,6 +22,8 @@ Docker Remote API
 - Since API version 1.2, the auth configuration is now handled client
   side, so the client has to send the authConfig as POST in
   /images/(name)/push
+- authConfig, set as the ``X-Registry-Auth`` header, is currently a Base64 encoded (json) string with credentials:  
+  ``{'username': string, 'password': string, 'email': string, 'serveraddress' : string}``
 
 2. Versions
 ===========
@@ -50,6 +52,7 @@ What's new
 
    **New!** You can now use the force parameter to force delete of an image, even if it's
    tagged in multiple repositories.
+   **New!** You can now use the noprune parameter to prevent the deletion of parent images
 
 .. http:delete:: /containers/(id)
 
@@ -203,7 +206,7 @@ What's new
 
 .. http:get:: /images/viz
 
-   This URI no longer exists.  The ``images -viz`` output is now generated in
+   This URI no longer exists.  The ``images --viz`` output is now generated in
    the client, using the ``/images/json`` data.
 
 v1.6

+ 4 - 2
docs/sources/reference/api/docker_remote_api_v1.10.rst

@@ -136,6 +136,7 @@ Create a container
                 },
                 "VolumesFrom":"",
                 "WorkingDir":"",
+                "DisableNetwork": false,
                 "ExposedPorts":{
                         "22/tcp": {}
                 }
@@ -931,6 +932,7 @@ Remove an image
            ]
 
         :query force: 1/True/true or 0/False/false, default false
+        :query noprune: 1/True/true or 0/False/false, default false
         :statuscode 200: no error
         :statuscode 404: no such image
         :statuscode 409: conflict
@@ -1276,8 +1278,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
 3.3 CORS Requests
 -----------------
 
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
 
 .. code-block:: bash
 
-   docker -d -H="192.168.1.9:4243" -api-enable-cors
+   docker -d -H="192.168.1.9:4243" --api-enable-cors

+ 2 - 2
docs/sources/reference/api/docker_remote_api_v1.2.rst

@@ -1045,7 +1045,7 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
 3.3 CORS Requests
 -----------------
 
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
     
-    docker -d -H="tcp://192.168.1.9:4243" -api-enable-cors
+    docker -d -H="tcp://192.168.1.9:4243" --api-enable-cors
 

+ 2 - 2
docs/sources/reference/api/docker_remote_api_v1.3.rst

@@ -1124,7 +1124,7 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
 3.3 CORS Requests
 -----------------
 
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
     
-    docker -d -H="192.168.1.9:4243" -api-enable-cors
+    docker -d -H="192.168.1.9:4243" --api-enable-cors
 

+ 2 - 2
docs/sources/reference/api/docker_remote_api_v1.4.rst

@@ -1168,9 +1168,9 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
 3.3 CORS Requests
 -----------------
 
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
 
 .. code-block:: bash
 
-   docker -d -H="192.168.1.9:4243" -api-enable-cors
+   docker -d -H="192.168.1.9:4243" --api-enable-cors
 

+ 2 - 2
docs/sources/reference/api/docker_remote_api_v1.5.rst

@@ -1137,8 +1137,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
 3.3 CORS Requests
 -----------------
 
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
 
 .. code-block:: bash
 
- docker -d -H="192.168.1.9:4243" -api-enable-cors
+ docker -d -H="192.168.1.9:4243" --api-enable-cors

+ 2 - 2
docs/sources/reference/api/docker_remote_api_v1.6.rst

@@ -1274,9 +1274,9 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
 3.3 CORS Requests
 -----------------
 
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
 
 .. code-block:: bash
 
-   docker -d -H="192.168.1.9:4243" -api-enable-cors
+   docker -d -H="192.168.1.9:4243" --api-enable-cors
 

+ 2 - 2
docs/sources/reference/api/docker_remote_api_v1.7.rst

@@ -1254,9 +1254,9 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
 3.3 CORS Requests
 -----------------
 
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
 
 .. code-block:: bash
 
-   docker -d -H="192.168.1.9:4243" -api-enable-cors
+   docker -d -H="192.168.1.9:4243" --api-enable-cors
 

+ 2 - 2
docs/sources/reference/api/docker_remote_api_v1.8.rst

@@ -1287,8 +1287,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
 3.3 CORS Requests
 -----------------
 
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
 
 .. code-block:: bash
 
-   docker -d -H="192.168.1.9:4243" -api-enable-cors
+   docker -d -H="192.168.1.9:4243" --api-enable-cors

+ 2 - 2
docs/sources/reference/api/docker_remote_api_v1.9.rst

@@ -1288,8 +1288,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
 3.3 CORS Requests
 -----------------
 
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
 
 .. code-block:: bash
 
-   docker -d -H="192.168.1.9:4243" -api-enable-cors
+   docker -d -H="192.168.1.9:4243" --api-enable-cors

+ 6 - 0
docs/sources/reference/api/remote_api_client_libraries.rst

@@ -41,7 +41,13 @@ and we will add the libraries here.
 +----------------------+----------------+--------------------------------------------+----------+
 | Go                   | go-dockerclient| https://github.com/fsouza/go-dockerclient  | Active   |
 +----------------------+----------------+--------------------------------------------+----------+
+| Go                   | dockerclient   | https://github.com/samalba/dockerclient    | Active   |
++----------------------+----------------+--------------------------------------------+----------+
 | PHP                  | Alvine         | http://pear.alvine.io/ (alpha)             | Active   |
 +----------------------+----------------+--------------------------------------------+----------+
 | PHP                  | Docker-PHP     | http://stage1.github.io/docker-php/        | Active   |
 +----------------------+----------------+--------------------------------------------+----------+
+| Perl                 | Net::Docker    | https://metacpan.org/pod/Net::Docker       | Active   |
++----------------------+----------------+--------------------------------------------+----------+
+| Perl                 | Eixo::Docker   | https://github.com/alambike/eixo-docker    | Active   |
++----------------------+----------------+--------------------------------------------+----------+

+ 46 - 43
docs/sources/reference/builder.rst

@@ -13,12 +13,10 @@ Dockerfile Reference
 to create an image. Executing ``docker build`` will run your steps and
 commit them along the way, giving you a final image.
 
-.. contents:: Table of Contents
-
 .. _dockerfile_usage:
 
-1. Usage
-========
+Usage
+=====
 
 To :ref:`build <cli_build>` an image from a source repository, create
 a description file called ``Dockerfile`` at the root of your
@@ -49,7 +47,7 @@ to be created - so ``RUN cd /tmp`` will not have any effect on the next
 instructions.
 
 Whenever possible, Docker will re-use the intermediate images, 
-accelerating ``docker build`` significantly (indicated by ``Using cache``:
+accelerating ``docker build`` significantly (indicated by ``Using cache``):
 
 .. code-block:: bash
 
@@ -71,8 +69,8 @@ When you're done with your build, you're ready to look into
 
 .. _dockerfile_format:
 
-2. Format
-=========
+Format
+======
 
 Here is the format of the Dockerfile:
 
@@ -99,16 +97,14 @@ allows statements like:
 
 .. _dockerfile_instructions:
 
-3. Instructions
-===============
 
 Here is the set of instructions you can use in a ``Dockerfile`` for
 building images.
 
 .. _dockerfile_from:
 
-3.1 FROM
---------
+``FROM``
+========
 
     ``FROM <image>``
 
@@ -134,8 +130,8 @@ assumed. If the used tag does not exist, an error will be returned.
 
 .. _dockerfile_maintainer:
 
-3.2 MAINTAINER
---------------
+``MAINTAINER``
+==============
 
     ``MAINTAINER <name>``
 
@@ -144,8 +140,8 @@ the generated images.
 
 .. _dockerfile_run:
 
-3.3 RUN
--------
+``RUN``
+=======
 
 RUN has 2 forms:
 
@@ -174,8 +170,8 @@ Known Issues (RUN)
 
 .. _dockerfile_cmd:
 
-3.4 CMD
--------
+``CMD``
+=======
 
 CMD has three forms:
 
@@ -192,9 +188,7 @@ omit the executable, in which case you must specify an ENTRYPOINT as
 well.
 
 When used in the shell or exec formats, the ``CMD`` instruction sets
-the command to be executed when running the image.  This is
-functionally equivalent to running ``docker commit -run '{"Cmd":
-<command>}'`` outside the builder.
+the command to be executed when running the image.
 
 If you use the *shell* form of the CMD, then the ``<command>`` will
 execute in ``/bin/sh -c``:
@@ -229,20 +223,20 @@ override the default specified in CMD.
 
 .. _dockerfile_expose:
 
-3.5 EXPOSE
-----------
+``EXPOSE``
+==========
 
     ``EXPOSE <port> [<port>...]``
 
-The ``EXPOSE`` instruction exposes ports for use within links. This is
-functionally equivalent to running ``docker commit -run '{"PortSpecs":
-["<port>", "<port2>"]}'`` outside the builder. Refer to
-:ref:`port_redirection` for detailed information.
+The ``EXPOSE`` instructions informs Docker that the container will listen
+on the specified network ports at runtime. Docker uses this information
+to interconnect containers using links (see :ref:`links <working_with_links_names>`),
+and to setup port redirection on the host system (see :ref:`port_redirection`).
 
 .. _dockerfile_env:
 
-3.6 ENV
--------
+``ENV``
+=======
 
     ``ENV <key> <value>``
 
@@ -262,8 +256,8 @@ from the resulting image. You can view the values using ``docker inspect``, and
 
 .. _dockerfile_add:
 
-3.7 ADD
--------
+``ADD``
+=======
 
     ``ADD <src> <dest>``
 
@@ -329,8 +323,8 @@ The copy obeys the following rules:
 
 .. _dockerfile_entrypoint:
 
-3.8 ENTRYPOINT
---------------
+``ENTRYPOINT``
+==============
 
 ENTRYPOINT has two forms:
 
@@ -378,8 +372,8 @@ this optional but default, you could use a CMD:
 
 .. _dockerfile_volume:
 
-3.9 VOLUME
-----------
+``VOLUME``
+==========
 
     ``VOLUME ["/data"]``
 
@@ -389,8 +383,8 @@ and mounting instructions via docker client, refer to :ref:`volume_def` document
 
 .. _dockerfile_user:
 
-3.10 USER
----------
+``USER``
+========
 
     ``USER daemon``
 
@@ -399,18 +393,27 @@ the image.
 
 .. _dockerfile_workdir:
 
-3.11 WORKDIR
-------------
+``WORKDIR``
+===========
 
     ``WORKDIR /path/to/workdir``
 
 The ``WORKDIR`` instruction sets the working directory for the ``RUN``, ``CMD`` and
 ``ENTRYPOINT``  Dockerfile commands that follow it.
 
-It can be used multiple times in the one Dockerfile.
+It can be used multiple times in the one Dockerfile.  If a relative path is
+provided, it will be relative to the path of the previous ``WORKDIR``
+instruction.  For example:
+
+    WORKDIR /a
+    WORKDIR b
+    WORKDIR c
+    RUN pwd
+
+The output of the final ``pwd`` command in this Dockerfile would be ``/a/b/c``.
 
-3.11 ONBUILD
-------------
+``ONBUILD``
+===========
 
     ``ONBUILD [INSTRUCTION]``
 
@@ -471,7 +474,7 @@ For example you might add something like this:
 
 .. _dockerfile_examples:
 
-4. Dockerfile Examples
+Dockerfile Examples
 ======================
 
 .. code-block:: bash
@@ -481,7 +484,7 @@ For example you might add something like this:
     # VERSION               0.0.1
 
     FROM      ubuntu
-    MAINTAINER Guillaume J. Charmes <guillaume@dotcloud.com>
+    MAINTAINER Guillaume J. Charmes <guillaume@docker.com>
 
     # make sure the package repository is up to date
     RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list

+ 203 - 208
docs/sources/reference/commandline/cli.rst

@@ -52,7 +52,7 @@ Sometimes this can use a more complex value string, as for ``-v``::
 Strings and Integers
 ~~~~~~~~~~~~~~~~~~~~
 
-Options like ``-name=""`` expect a string, and they can only be
+Options like ``--name=""`` expect a string, and they can only be
 specified once. Options like ``-c=0`` expect an integer, and they can
 only be specified once.
 
@@ -74,36 +74,45 @@ Commands
       -G, --group="docker": Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group
       --api-enable-cors=false: Enable CORS headers in the remote API
       -b, --bridge="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking
-      --bip="": Use this CIDR notation address for the network bridge's IP, not compatible with -b
+      -bip="": Use this CIDR notation address for the network bridge's IP, not compatible with -b
       -d, --daemon=false: Enable daemon mode
       --dns=[]: Force docker to use specific DNS servers
+      --dns-search=[]: Force Docker to use specific DNS search domains
       -g, --graph="/var/lib/docker": Path to use as the root of the docker runtime
       --icc=true: Enable inter-container communication
       --ip="0.0.0.0": Default IP address to use when binding container ports
-      --iptables=true: Disable docker's addition of iptables rules
+      --ip-forward=true: Enable net.ipv4.ip_forward
+      --iptables=true: Enable Docker's addition of iptables rules
       -p, --pidfile="/var/run/docker.pid": Path to use for daemon PID file
       -r, --restart=true: Restart previously running containers
       -s, --storage-driver="": Force the docker runtime to use a specific storage driver
       -e, --exec-driver="native": Force the docker runtime to use a specific exec driver
       -v, --version=false: Print version information and quit
+      --tls=false: Use TLS; implied by tls-verify flags
+      --tlscacert="~/.docker/ca.pem": Trust only remotes providing a certificate signed by the CA given here
+      --tlscert="~/.docker/cert.pem": Path to TLS certificate file
+      --tlskey="~/.docker/key.pem": Path to TLS key file
+      --tlsverify=false: Use TLS and verify the remote (daemon: verify client, client: verify daemon)
       --mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available
 
-The Docker daemon is the persistent process that manages containers.  Docker uses the same binary for both the 
+The Docker daemon is the persistent process that manages containers.  Docker uses the same binary for both the
 daemon and client.  To run the daemon you provide the ``-d`` flag.
 
 To force Docker to use devicemapper as the storage driver, use ``docker -d -s devicemapper``.
 
-To set the DNS server for all Docker containers, use ``docker -d -dns 8.8.8.8``.
+To set the DNS server for all Docker containers, use ``docker -d --dns 8.8.8.8``.
+
+To set the DNS search domain for all Docker containers, use ``docker -d --dns-search example.com``.
 
 To run the daemon with debug output, use ``docker -d -D``.
 
 To use lxc as the execution driver, use ``docker -d -e lxc``.
 
 The docker client will also honor the ``DOCKER_HOST`` environment variable to set
-the ``-H`` flag for the client.  
+the ``-H`` flag for the client.
 
 ::
- 
+
         docker -H tcp://0.0.0.0:4243 ps
         # or
         export DOCKER_HOST="tcp://0.0.0.0:4243"
@@ -141,7 +150,7 @@ TMPDIR and the data directory can be set like this:
 
 You can detach from the container again (and leave it running) with
 ``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of
-the Docker client when it quits.  When you detach from the container's 
+the Docker client when it quits.  When you detach from the container's
 process the exit code will be returned to the client.
 
 To stop a container, use ``docker stop``.
@@ -202,12 +211,16 @@ Examples:
       --no-cache: Do not use the cache when building the image.
       --rm=true: Remove intermediate containers after a successful build
 
-The files at ``PATH`` or ``URL`` are called the "context" of the build. The
-build process may refer to any of the files in the context, for example when
-using an :ref:`ADD <dockerfile_add>` instruction.  When a single ``Dockerfile``
-is given as ``URL``, then no context is set.  When a Git repository is set as
-``URL``, then the repository is used as the context. Git repositories are
-cloned with their submodules (`git clone --recursive`).
+The files at ``PATH`` or ``URL`` are called the "context" of the build.
+The build process may refer to any of the files in the context, for example when
+using an :ref:`ADD <dockerfile_add>` instruction.
+When a single ``Dockerfile`` is given as ``URL``, then no context is set.
+
+When a Git repository is set as ``URL``, then the repository is used as the context. 
+The Git repository is cloned with its submodules (`git clone --recursive`).
+A fresh git clone occurs in a temporary directory on your local host, and then this 
+is sent to the Docker daemon as the context. 
+This way, your local user credentials and vpn's etc can be used to access private repositories
 
 .. _cli_build_examples:
 
@@ -303,8 +316,6 @@ by using the ``git://`` schema.
 
       -m, --message="": Commit message
       -a, --author="": Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
-      --run="": Configuration to be applied when the image is launched with `docker run`.
-               (ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
 
 .. _cli_commit_examples:
 
@@ -315,74 +326,14 @@ Commit an existing container
 
 	$ sudo docker ps
 	ID                  IMAGE               COMMAND             CREATED             STATUS              PORTS
-	c3f279d17e0a        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours                             
-	197387f1b436        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours                             
+	c3f279d17e0a        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours
+	197387f1b436        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours
 	$ docker commit c3f279d17e0a  SvenDowideit/testimage:version3
 	f5283438590d
 	$ docker images | head
 	REPOSITORY                        TAG                 ID                  CREATED             VIRTUAL SIZE
 	SvenDowideit/testimage            version3            f5283438590d        16 seconds ago      335.7 MB
-	
-Change the command that a container runs
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Sometimes you have an application container running just a service and you need
-to make a quick change and then change it back.
-
-In this example, we run a container with ``ls`` and then change the image to
-run ``ls /etc``.
-
-.. code-block:: bash
-
-        $ docker run -t -name test ubuntu ls
-        bin  boot  dev  etc  home  lib  lib64  media  mnt  opt  proc  root  run  sbin  selinux  srv  sys  tmp  usr  var
-        $ docker commit -run='{"Cmd": ["ls","/etc"]}' test test2
-        933d16de9e70005304c1717b5c6f2f39d6fd50752834c6f34a155c70790011eb
-        $ docker run -t test2
-        adduser.conf            gshadow          login.defs           rc0.d
-        alternatives            gshadow-         logrotate.d          rc1.d
-        apt                     host.conf        lsb-base             rc2.d
-        ...
-
-Full -run example
-.................
-
-The ``--run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID``
-or ``config`` when running ``docker inspect IMAGEID``.
 
-(Multiline is okay within a single quote ``'``)
-
-.. code-block:: bash
-
-  $ sudo docker commit -run='
-  {
-      "Entrypoint" : null,
-      "Privileged" : false,
-      "User" : "",
-      "VolumesFrom" : "",
-      "Cmd" : ["cat", "-e", "/etc/resolv.conf"],
-      "Dns" : ["8.8.8.8", "8.8.4.4"],
-      "MemorySwap" : 0,
-      "AttachStdin" : false,
-      "AttachStderr" : false,
-      "CpuShares" : 0,
-      "OpenStdin" : false,
-      "Volumes" : null,
-      "Hostname" : "122612f45831",
-      "PortSpecs" : ["22", "80", "443"],
-      "Image" : "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
-      "Tty" : false,
-      "Env" : [
-         "HOME=/",
-         "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-      ],
-      "StdinOnce" : false,
-      "Domainname" : "",
-      "WorkingDir" : "/",
-      "NetworkDisabled" : false,
-      "Memory" : 0,
-      "AttachStdout" : false
-  }' $CONTAINER_ID
 
 .. _cli_cp:
 
@@ -486,16 +437,16 @@ Show events in the past from a specified time
 
 .. code-block:: bash
 
-    $ sudo docker events -since 1378216169
+    $ sudo docker events --since 1378216169
     [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
     [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
 
-    $ sudo docker events -since '2013-09-03'
+    $ sudo docker events --since '2013-09-03'
     [2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start
     [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
     [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
 
-    $ sudo docker events -since '2013-09-03 15:49:29 +0200 CEST'
+    $ sudo docker events --since '2013-09-03 15:49:29 +0200 CEST'
     [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
     [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
 
@@ -535,35 +486,14 @@ To see how the ``docker:latest`` image was built:
 .. code-block:: bash
 
 	$ docker history docker
-	ID                  CREATED             CREATED BY
-	docker:latest       19 hours ago        /bin/sh -c #(nop) ADD . in /go/src/github.com/dotcloud/docker
-	cf5f2467662d        2 weeks ago         /bin/sh -c #(nop) ENTRYPOINT ["hack/dind"]
-	3538fbe372bf        2 weeks ago         /bin/sh -c #(nop) WORKDIR /go/src/github.com/dotcloud/docker
-	7450f65072e5        2 weeks ago         /bin/sh -c #(nop) VOLUME /var/lib/docker
-	b79d62b97328        2 weeks ago         /bin/sh -c apt-get install -y -q lxc
-	36714852a550        2 weeks ago         /bin/sh -c apt-get install -y -q iptables
-	8c4c706df1d6        2 weeks ago         /bin/sh -c /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEYn' > /.s3cfg
-	b89989433c48        2 weeks ago         /bin/sh -c pip install python-magic
-	a23e640d85b5        2 weeks ago         /bin/sh -c pip install s3cmd
-	41f54fec7e79        2 weeks ago         /bin/sh -c apt-get install -y -q python-pip
-	d9bc04add907        2 weeks ago         /bin/sh -c apt-get install -y -q reprepro dpkg-sig
-	e74f4760fa70        2 weeks ago         /bin/sh -c gem install --no-rdoc --no-ri fpm
-	1e43224726eb        2 weeks ago         /bin/sh -c apt-get install -y -q ruby1.9.3 rubygems libffi-dev
-	460953ae9d7f        2 weeks ago         /bin/sh -c #(nop) ENV GOPATH=/go:/go/src/github.com/dotcloud/docker/vendor
-	8b63eb1d666b        2 weeks ago         /bin/sh -c #(nop) ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/goroot/bin
-	3087f3bcedf2        2 weeks ago         /bin/sh -c #(nop) ENV GOROOT=/goroot
-	635840d198e5        2 weeks ago         /bin/sh -c cd /goroot/src && ./make.bash
-	439f4a0592ba        2 weeks ago         /bin/sh -c curl -s https://go.googlecode.com/files/go1.1.2.src.tar.gz | tar -v -C / -xz && mv /go /goroot
-	13967ed36e93        2 weeks ago         /bin/sh -c #(nop) ENV CGO_ENABLED=0
-	bf7424458437        2 weeks ago         /bin/sh -c apt-get install -y -q build-essential
-	a89ec997c3bf        2 weeks ago         /bin/sh -c apt-get install -y -q mercurial
-	b9f165c6e749        2 weeks ago         /bin/sh -c apt-get install -y -q git
-	17a64374afa7        2 weeks ago         /bin/sh -c apt-get install -y -q curl
-	d5e85dc5b1d8        2 weeks ago         /bin/sh -c apt-get update
-	13e642467c11        2 weeks ago         /bin/sh -c echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
-	ae6dde92a94e        2 weeks ago         /bin/sh -c #(nop) MAINTAINER Solomon Hykes <solomon@dotcloud.com>
-	ubuntu:12.04        6 months ago
-
+        IMAGE                                                              CREATED             CREATED BY                                                                                                                                                 SIZE
+        3e23a5875458790b7a806f95f7ec0d0b2a5c1659bfc899c89f939f6d5b8f7094   8 days ago          /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8                                                                                                                       0 B
+        8578938dd17054dce7993d21de79e96a037400e8d28e15e7290fea4f65128a36   8 days ago          /bin/sh -c dpkg-reconfigure locales &&    locale-gen C.UTF-8 &&    /usr/sbin/update-locale LANG=C.UTF-8                                                    1.245 MB
+        be51b77efb42f67a5e96437b3e102f81e0a1399038f77bf28cea0ed23a65cf60   8 days ago          /bin/sh -c apt-get update && apt-get install -y    git    libxml2-dev    python    build-essential    make    gcc    python-dev    locales    python-pip   338.3 MB
+        4b137612be55ca69776c7f30c2d2dd0aa2e7d72059820abf3e25b629f887a084   6 weeks ago         /bin/sh -c #(nop) ADD jessie.tar.xz in /                                                                                                                   121 MB
+        750d58736b4b6cc0f9a9abe8f258cef269e3e9dceced1146503522be9f985ada   6 weeks ago         /bin/sh -c #(nop) MAINTAINER Tianon Gravi <admwiggin@gmail.com> - mkimage-debootstrap.sh -t jessie.tar.xz jessie http://http.debian.net/debian             0 B
+        511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158   9 months ago                                                                                                                                                                   0 B
+	
 .. _cli_images:
 
 ``images``
@@ -575,11 +505,16 @@ To see how the ``docker:latest`` image was built:
 
     List images
 
-      -a, --all=false: Show all images (by default filter out the intermediate images used to build)
+      -a, --all=false: Show all images (by default filter out the intermediate image layers)
       --no-trunc=false: Don't truncate output
       -q, --quiet=false: Only show numeric IDs
-      --tree=false: Output graph in tree format
-      --viz=false: Output graph in graphviz format
+
+The default ``docker images`` will show all top level images, their repository
+and tags, and their virtual size.
+
+Docker images have intermediate layers that increase reuseability, decrease 
+disk usage, and speed up ``docker build`` by allowing each step to be cached.
+These intermediate layers are not shown by default.
 
 Listing the most recently created images
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -615,46 +550,6 @@ Listing the full length image IDs
 	tryout                        latest              2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074   23 hours ago        131.5 MB
 	<none>                        <none>              5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df   24 hours ago        1.089 GB
 
-Displaying images visually
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. code-block:: bash
-
-    $ sudo docker images --viz | dot -Tpng -o docker.png
-
-.. image:: docker_images.gif
-   :alt: Example inheritance graph of Docker images.
-
-
-Displaying image hierarchy
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. code-block:: bash
-
-    $ sudo docker images --tree
-
-    ├─8dbd9e392a96 Size: 131.5 MB (virtual 131.5 MB) Tags: ubuntu:12.04,ubuntu:latest,ubuntu:precise
-    └─27cf78414709 Size: 180.1 MB (virtual 180.1 MB)
-      └─b750fe79269d Size: 24.65 kB (virtual 180.1 MB) Tags: ubuntu:12.10,ubuntu:quantal
-        ├─f98de3b610d5 Size: 12.29 kB (virtual 180.1 MB)
-        │ └─7da80deb7dbf Size: 16.38 kB (virtual 180.1 MB)
-        │   └─65ed2fee0a34 Size: 20.66 kB (virtual 180.2 MB)
-        │     └─a2b9ea53dddc Size: 819.7 MB (virtual 999.8 MB)
-        │       └─a29b932eaba8 Size: 28.67 kB (virtual 999.9 MB)
-        │         └─e270a44f124d Size: 12.29 kB (virtual 999.9 MB) Tags: progrium/buildstep:latest
-        └─17e74ac162d8 Size: 53.93 kB (virtual 180.2 MB)
-          └─339a3f56b760 Size: 24.65 kB (virtual 180.2 MB)
-            └─904fcc40e34d Size: 96.7 MB (virtual 276.9 MB)
-              └─b1b0235328dd Size: 363.3 MB (virtual 640.2 MB)
-                └─7cb05d1acb3b Size: 20.48 kB (virtual 640.2 MB)
-                  └─47bf6f34832d Size: 20.48 kB (virtual 640.2 MB)
-                    └─f165104e82ed Size: 12.29 kB (virtual 640.2 MB)
-                      └─d9cf85a47b7e Size: 1.911 MB (virtual 642.2 MB)
-                        └─3ee562df86ca Size: 17.07 kB (virtual 642.2 MB)
-                          └─b05fc2d00e4a Size: 24.96 kB (virtual 642.2 MB)
-                            └─c96a99614930 Size: 12.29 kB (virtual 642.2 MB)
-                              └─a6a357a48c49 Size: 12.29 kB (virtual 642.2 MB) Tags: ndj/mongodb:latest
-
 .. _cli_import:
 
 ``import``
@@ -664,7 +559,7 @@ Displaying image hierarchy
 
     Usage: docker import URL|- [REPOSITORY[:TAG]]
 
-    Create an empty filesystem image and import the contents of the tarball 
+    Create an empty filesystem image and import the contents of the tarball
     (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.
 
 At this time, the URL must start with ``http`` and point to a single
@@ -731,34 +626,6 @@ preserved.
 	WARNING: No swap limit support
 
 
-.. _cli_insert:
-
-``insert``
-----------
-
-::
-
-    Usage: docker insert IMAGE URL PATH
-
-    Insert a file from URL in the IMAGE at PATH
-
-Use the specified ``IMAGE`` as the parent for a new image which adds a
-:ref:`layer <layer_def>` containing the new file. The ``insert`` command does
-not modify the original image, and the new image has the contents of the parent
-image, plus the new file.
-
-
-Examples
-~~~~~~~~
-
-Insert file from GitHub
-.......................
-
-.. code-block:: bash
-
-    $ sudo docker insert 8283e18b24bc https://raw.github.com/metalivedev/django/master/postinstall /tmp/postinstall.sh
-    06fd35556d7b
-
 .. _cli_inspect:
 
 ``inspect``
@@ -799,7 +666,7 @@ text output:
 
 .. code-block:: bash
 
-    $ sudo docker inspect -format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID
+    $ sudo docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID
 
 Find a Specific Port Mapping
 ............................
@@ -814,7 +681,7 @@ we ask for the ``HostPort`` field to get the public address.
 
 .. code-block:: bash
 
-    $ sudo docker inspect -format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID
+    $ sudo docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID
 
 Get config
 ..........
@@ -826,7 +693,7 @@ to convert config object into JSON
 
 .. code-block:: bash
 
-    $ sudo docker inspect -format='{{json .config}}' $INSTANCE_ID
+    $ sudo docker inspect --format='{{json .config}}' $INSTANCE_ID
 
 
 .. _cli_kill:
@@ -859,10 +726,32 @@ Known Issues (kill)
 
 ::
 
-    Usage: docker load < repository.tar
+    Usage: docker load 
+
+    Load an image from a tar archive on STDIN
+
+      -i, --input="": Read from a tar archive file, instead of STDIN
+
+Loads a tarred repository from a file or the standard input stream.
+Restores both images and tags.
+
+.. code-block:: bash
+
+   $ sudo docker images
+   REPOSITORY          TAG                 IMAGE ID            CREATED             VIRTUAL SIZE
+   $ sudo docker load < busybox.tar
+   $ sudo docker images
+   REPOSITORY          TAG                 IMAGE ID            CREATED             VIRTUAL SIZE
+   busybox             latest              769b9341d937        7 weeks ago         2.489 MB
+   $ sudo docker load --input fedora.tar
+   $ sudo docker images
+   REPOSITORY          TAG                 IMAGE ID            CREATED             VIRTUAL SIZE
+   busybox             latest              769b9341d937        7 weeks ago         2.489 MB
+   fedora              rawhide             0d20aec6529d        7 weeks ago         387 MB
+   fedora              20                  58394af37342        7 weeks ago         385.5 MB
+   fedora              heisenbug           58394af37342        7 weeks ago         385.5 MB
+   fedora              latest              58394af37342        7 weeks ago         385.5 MB
 
-    Loads a tarred repository from the standard input stream.
-    Restores both images and tags.
 
 .. _cli_login:
 
@@ -933,8 +822,14 @@ new output from the container's stdout and stderr.
     List containers
 
       -a, --all=false: Show all containers. Only running containers are shown by default.
+      --before="": Show only container created before Id or Name, include non-running ones.
+      -l, --latest=false: Show only the latest created container, include non-running ones.
+      -n=-1: Show n last created containers, include non-running ones.
       --no-trunc=false: Don't truncate output
       -q, --quiet=false: Only display numeric IDs
+      -s, --size=false: Display sizes, not to be used with -q
+      --since="": Show only containers created since Id or Name, include non-running ones.
+
 
 Running ``docker ps`` showing 2 linked containers.
 
@@ -942,7 +837,7 @@ Running ``docker ps`` showing 2 linked containers.
 
     $ docker ps
     CONTAINER ID        IMAGE                        COMMAND                CREATED              STATUS              PORTS               NAMES
-    4c01db0b339c        ubuntu:12.04                 bash                   17 seconds ago       Up 16 seconds                           webapp              
+    4c01db0b339c        ubuntu:12.04                 bash                   17 seconds ago       Up 16 seconds                           webapp
     d7886598dbe2        crosbymichael/redis:latest   /redis-server --dir    33 minutes ago       Up 33 minutes       6379/tcp            redis,webapp/db
     fd2645e2e2b5        busybox:latest               top                    10 days ago          Ghost                                   insane_ptolemy
 
@@ -957,7 +852,7 @@ The last container is marked as a ``Ghost`` container. It is a container that wa
 
 ::
 
-    Usage: docker pull NAME
+    Usage: docker pull NAME[:TAG]
 
     Pull an image or a repository from the registry
 
@@ -969,7 +864,7 @@ The last container is marked as a ``Ghost`` container. It is a container that wa
 
 ::
 
-    Usage: docker push NAME
+    Usage: docker push NAME[:TAG]
 
     Push an image or a repository to the registry
 
@@ -985,6 +880,8 @@ The last container is marked as a ``Ghost`` container. It is a container that wa
 
     Restart a running container
 
+       -t, --time=10: Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10
+
 .. _cli_rm:
 
 ``rm``
@@ -997,6 +894,7 @@ The last container is marked as a ``Ghost`` container. It is a container that wa
     Remove one or more containers
         -l, --link="": Remove the link instead of the actual container
         -f, --force=false: Force removal of running container
+        -v, --volumes=false: Remove the volumes associated to the container
 
 Known Issues (rm)
 ~~~~~~~~~~~~~~~~~
@@ -1047,7 +945,8 @@ containers will not be deleted.
     Remove one or more images
 
       -f, --force=false: Force
-    
+      --no-prune=false: Do not delete untagged parents
+
 Removing tagged images
 ~~~~~~~~~~~~~~~~~~~~~~
 
@@ -1096,7 +995,8 @@ image is removed.
       --cidfile="": Write the container ID to the file
       -d, --detach=false: Detached mode: Run container in the background, print new container id
       -e, --env=[]: Set environment variables
-      -h, --host="": Container host name
+      --env-file="": Read in a line delimited file of ENV variables
+      -h, --hostname="": Container host name
       -i, --interactive=false: Keep stdin open even if not attached
       --privileged=false: Give extended privileges to this container
       -m, --memory="": Memory limit (format: <number><optional unit>, where unit = b, k, m or g)
@@ -1106,11 +1006,12 @@ image is removed.
       -t, --tty=false: Allocate a pseudo-tty
       -u, --user="": Username or UID
       --dns=[]: Set custom dns servers for the container
+      --dns-search=[]: Set custom DNS search domains for the container
       -v, --volume=[]: Create a bind mount to a directory or file with: [host-path]:[container-path]:[rw|ro]. If a directory "container-path" is missing, then docker creates a new volume.
       --volumes-from="": Mount all volumes from the given container(s)
       --entrypoint="": Overwrite the default entrypoint set by the image
       -w, --workdir="": Working directory inside the container
-      --lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
+      --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
       --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
       --expose=[]: Expose a port from the container without publishing it to your host
       --link="": Add link to another container (name:alias)
@@ -1126,12 +1027,12 @@ Once the container is stopped it still exists and can be started back up.  See `
 The ``docker run`` command can be used in combination with ``docker commit`` to
 :ref:`change the command that a container runs <cli_commit_examples>`.
 
-See :ref:`port_redirection` for more detailed information about the ``--expose``, 
-``-p``, ``-P`` and ``--link`` parameters, and :ref:`working_with_links_names` for 
+See :ref:`port_redirection` for more detailed information about the ``--expose``,
+``-p``, ``-P`` and ``--link`` parameters, and :ref:`working_with_links_names` for
 specific examples using ``--link``.
 
-Known Issues (run -volumes-from)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Known Issues (run --volumes-from)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 * :issue:`2702`: "lxc-start: Permission denied - failed to mount"
   could indicate a permissions problem with AppArmor. Please see the
@@ -1158,7 +1059,7 @@ error. Docker will close this file when ``docker run`` exits.
 
 This will *not* work, because by default, most potentially dangerous
 kernel capabilities are dropped; including ``cap_sys_admin`` (which is
-required to mount filesystems). However, the ``-privileged`` flag will
+required to mount filesystems). However, the ``--privileged`` flag will
 allow it to run:
 
 .. code-block:: bash
@@ -1170,7 +1071,7 @@ allow it to run:
    none            1.9G     0  1.9G   0% /mnt
 
 
-The ``-privileged`` flag gives *all* capabilities to the container,
+The ``--privileged`` flag gives *all* capabilities to the container,
 and it also lifts all the limitations enforced by the ``device``
 cgroup controller. In other words, the container can then do almost
 everything that the host can do. This flag exists to allow special
@@ -1207,8 +1108,8 @@ starting your container.
 
    $ sudo docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v ./static-docker:/usr/bin/docker busybox sh
 
-By bind-mounting the docker unix socket and statically linked docker binary 
-(such as that provided by https://get.docker.io), you give the container 
+By bind-mounting the docker unix socket and statically linked docker binary
+(such as that provided by https://get.docker.io), you give the container
 the full access to create and manipulate the host's docker daemon.
 
 .. code-block:: bash
@@ -1227,6 +1128,54 @@ This exposes port ``80`` of the container for use within a link without
 publishing the port to the host system's interfaces. :ref:`port_redirection`
 explains in detail how to manipulate ports in Docker.
 
+.. code-block:: bash
+
+    $ sudo docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash
+
+This sets environmental variables in the container. For illustration all three
+flags are shown here. Where ``-e``, ``--env`` take an environment variable and
+value, or if no "=" is provided, then that variable's current value is passed
+through (i.e. $MYVAR1 from the host is set to $MYVAR1 in the container). All
+three flags, ``-e``, ``--env``  and ``--env-file`` can be repeated.
+
+Regardless of the order of these three flags, the ``--env-file`` are processed
+first, and then ``-e``/``--env`` flags. This way, the ``-e`` or ``--env`` will
+override variables as needed.
+
+.. code-block:: bash
+
+    $ cat ./env.list
+    TEST_FOO=BAR
+    $ sudo docker run --env TEST_FOO="This is a test" --env-file ./env.list busybox env | grep TEST_FOO
+    TEST_FOO=This is a test
+
+The ``--env-file`` flag takes a filename as an argument and expects each line
+to be in the VAR=VAL format, mimicking the argument passed to ``--env``.
+Comment lines need only be prefixed with ``#``
+
+An example of a file passed with ``--env-file``
+
+.. code-block:: bash
+
+    $ cat ./env.list
+    TEST_FOO=BAR
+
+    # this is a comment
+    TEST_APP_DEST_HOST=10.10.0.127
+    TEST_APP_DEST_PORT=8888
+
+    # pass through this variable from the caller
+    TEST_PASSTHROUGH
+    $ sudo TEST_PASSTHROUGH=howdy docker run --env-file ./env.list busybox env
+    HOME=/
+    PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+    HOSTNAME=5198e0745561
+    TEST_FOO=BAR
+    TEST_APP_DEST_HOST=10.10.0.127
+    TEST_APP_DEST_PORT=8888
+    TEST_PASSTHROUGH=howdy
+
+
 .. code-block:: bash
 
     $ sudo docker run --name console -t -i ubuntu bash
@@ -1255,6 +1204,35 @@ ID may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
 read-only or read-write mode, respectively. By default, the volumes are mounted
 in the same mode (read write or read only) as the reference container.
 
+The ``-a`` flag tells ``docker run`` to bind to the container's stdin, stdout
+or stderr. This makes it possible to manipulate the output and input as needed.
+
+.. code-block:: bash
+
+   $ sudo echo "test" | docker run -i -a stdin ubuntu cat -
+
+This pipes data into a container and prints the container's ID by attaching
+only to the container's stdin.
+
+.. code-block:: bash
+
+   $ sudo docker run -a stderr ubuntu echo test
+
+This isn't going to print anything unless there's an error because we've only
+attached to the stderr of the container. The container's logs still store
+what's been written to stderr and stdout.
+
+.. code-block:: bash
+
+   $ sudo cat somefile | docker run -i -a stdin mybuilder dobuild
+
+This is how piping a file into a container could be done for a build.
+The container's ID will be printed after the build is done and the build logs
+could be retrieved using ``docker logs``. This is useful if you need to pipe
+a file or something else into a container and retrieve the container's ID once
+the container has finished running.
+
+
 A complete example
 ..................
 
@@ -1263,7 +1241,7 @@ A complete example
    $ sudo docker run -d --name static static-web-files sh
    $ sudo docker run -d --expose=8098 --name riak riakserver
    $ sudo docker run -d -m 100m -e DEVELOPMENT=1 -e BRANCH=example-code -v $(pwd):/app/bin:ro --name app appserver
-   $ sudo docker run -d -p 1443:443 --dns=dns.dev.org -v /var/log/httpd --volumes-from static --link riak --link app -h www.sven.dev.org --name web webserver
+   $ sudo docker run -d -p 1443:443 --dns=dns.dev.org --dns-search=dev.org -v /var/log/httpd --volumes-from static --link riak --link app -h www.sven.dev.org --name web webserver
    $ sudo docker run -t -i --rm --volumes-from web -w /var/log/httpd busybox tail -f access.log
 
 This example shows 5 containers that might be set up to test a web application change:
@@ -1271,8 +1249,8 @@ This example shows 5 containers that might be set up to test a web application c
 1. Start a pre-prepared volume image ``static-web-files`` (in the background) that has CSS, image and static HTML in it, (with a ``VOLUME`` instruction in the ``Dockerfile`` to allow the web server to use those files);
 2. Start a pre-prepared ``riakserver`` image, give the container name ``riak`` and expose port ``8098`` to any containers that link to it;
 3. Start the ``appserver`` image, restricting its memory usage to 100MB, setting two environment variables ``DEVELOPMENT`` and ``BRANCH`` and bind-mounting the current directory (``$(pwd)``) in the container in read-only mode as ``/app/bin``;
-4. Start the ``webserver``, mapping port ``443`` in the container to port ``1443`` on the Docker server, setting the DNS server to ``dns.dev.org``, creating a volume to put the log files into (so we can access it from another container), then importing the files from the volume exposed by the ``static`` container, and linking to all exposed ports from ``riak`` and ``app``. Lastly, we set the hostname to ``web.sven.dev.org`` so its consistent with the pre-generated SSL certificate;
-5. Finally, we create a container that runs ``tail -f access.log`` using the logs volume from the ``web`` container, setting the workdir to ``/var/log/httpd``. The ``-rm`` option means that when the container exits, the container's layer is removed.
+4. Start the ``webserver``, mapping port ``443`` in the container to port ``1443`` on the Docker server, setting the DNS server to ``dns.dev.org`` and DNS search domain to ``dev.org``, creating a volume to put the log files into (so we can access it from another container), then importing the files from the volume exposed by the ``static`` container, and linking to all exposed ports from ``riak`` and ``app``. Lastly, we set the hostname to ``web.sven.dev.org`` so its consistent with the pre-generated SSL certificate;
+5. Finally, we create a container that runs ``tail -f access.log`` using the logs volume from the ``web`` container, setting the workdir to ``/var/log/httpd``. The ``--rm`` option means that when the container exits, the container's layer is removed.
 
 
 .. _cli_save:
@@ -1282,10 +1260,27 @@ This example shows 5 containers that might be set up to test a web application c
 
 ::
 
-    Usage: docker save image > repository.tar
+    Usage: docker save IMAGE
+
+    Save an image to a tar archive (streamed to stdout by default)
+
+      -o, --output="": Write to an file, instead of STDOUT
+
+
+Produces a tarred repository to the standard output stream.
+Contains all parent layers, and all tags + versions, or specified repo:tag.
+
+.. code-block:: bash
+
+   $ sudo docker save busybox > busybox.tar
+   $ ls -sh b.tar
+   2.7M b.tar
+   $ sudo docker save --output busybox.tar busybox
+   $ ls -sh b.tar
+   2.7M b.tar
+   $ sudo docker save -o fedora-all.tar fedora
+   $ sudo docker save -o fedora-latest.tar fedora:latest
 
-    Streams a tarred repository to the standard output stream.
-    Contains all parent layers, and all tags + versions.
 
 .. _cli_search:
 

+ 49 - 48
docs/sources/reference/run.rst

@@ -80,7 +80,7 @@ through network connections or shared volumes because the container is
 no longer listening to the commandline where you executed ``docker
 run``. You can reattach to a detached container with ``docker``
 :ref:`cli_attach`. If you choose to run a container in the detached
-mode, then you cannot use the ``-rm`` option.
+mode, then you cannot use the ``--rm`` option.
 
 Foreground
 ..........
@@ -92,10 +92,10 @@ error. It can even pretend to be a TTY (this is what most commandline
 executables expect) and pass along signals. All of that is
 configurable::
 
-   -a=[]          : Attach to ``stdin``, ``stdout`` and/or ``stderr``
-   -t=false       : Allocate a pseudo-tty
-   -sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
-   -i=false       : Keep STDIN open even if not attached
+   -a=[]           : Attach to ``stdin``, ``stdout`` and/or ``stderr``
+   -t=false        : Allocate a pseudo-tty
+   --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
+   -i=false        : Keep STDIN open even if not attached
 
 If you do not specify ``-a`` then Docker will `attach everything
 (stdin,stdout,stderr)
@@ -112,8 +112,8 @@ as well as persistent standard input (``stdin``), so you'll use ``-i
 Container Identification
 ------------------------
 
-Name (-name)
-............
+Name (--name)
+.............
 
 The operator can identify a container in three ways:
 
@@ -122,7 +122,7 @@ The operator can identify a container in three ways:
 * Name ("evil_ptolemy")
 
 The UUID identifiers come from the Docker daemon, and if you do not
-assign a name to the container with ``-name`` then the daemon will
+assign a name to the container with ``--name`` then the daemon will
 also generate a random string name too. The name can become a handy
 way to add meaning to a container since you can use this name when
 defining :ref:`links <working_with_links_names>` (or any other place
@@ -137,7 +137,7 @@ container ID out to a file of your choosing. This is similar to how
 some programs might write out their process ID to a file (you've seen
 them as PID files)::
 
-      -cidfile="": Write the container ID to the file
+      --cidfile="": Write the container ID to the file
 
 Network Settings
 ----------------
@@ -145,7 +145,7 @@ Network Settings
 ::
 
    -n=true   : Enable networking for this container
-   -dns=[]   : Set custom dns servers for the container
+   --dns=[]  : Set custom dns servers for the container
 
 By default, all containers have networking enabled and they can make
 any outgoing connections. The operator can completely disable
@@ -154,10 +154,10 @@ networking. In cases like this, you would perform I/O through files or
 STDIN/STDOUT only.
 
 Your container will use the same DNS servers as the host by default,
-but you can override this with ``-dns``.
+but you can override this with ``--dns``.
 
-Clean Up (-rm)
---------------
+Clean Up (--rm)
+---------------
 
 By default a container's file system persists even after the container
 exits. This makes debugging a lot easier (since you can inspect the
@@ -165,9 +165,9 @@ final state) and you retain all your data by default. But if you are
 running short-term **foreground** processes, these container file
 systems can really pile up. If instead you'd like Docker to
 **automatically clean up the container and remove the file system when
-the container exits**, you can add the ``-rm`` flag::
+the container exits**, you can add the ``--rm`` flag::
 
-   -rm=false: Automatically remove the container when it exits (incompatible with -d)
+   --rm=false: Automatically remove the container when it exits (incompatible with -d)
 
 
 Runtime Constraints on CPU and Memory
@@ -193,8 +193,8 @@ Runtime Privilege and LXC Configuration
 
 ::
 
-   -privileged=false: Give extended privileges to this container
-   -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
+   --privileged=false: Give extended privileges to this container
+   --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
 
 By default, Docker containers are "unprivileged" and cannot, for
 example, run a Docker daemon inside a Docker container. This is
@@ -203,20 +203,21 @@ but a "privileged" container is given access to all devices (see
 lxc-template.go_ and documentation on `cgroups devices
 <https://www.kernel.org/doc/Documentation/cgroups/devices.txt>`_).
 
-When the operator executes ``docker run -privileged``, Docker will
+When the operator executes ``docker run --privileged``, Docker will
 enable to access to all devices on the host as well as set some
 configuration in AppArmor to allow the container nearly all the same
 access to the host as processes running outside containers on the
-host. Additional information about running with ``-privileged`` is
+host. Additional information about running with ``--privileged`` is
 available on the `Docker Blog
 <http://blog.docker.io/2013/09/docker-can-now-run-within-docker/>`_.
 
-An operator can also specify LXC options using one or more
-``-lxc-conf`` parameters. These can be new parameters or override
-existing parameters from the lxc-template.go_. Note that in the
-future, a given host's Docker daemon may not use LXC, so this is an
-implementation-specific configuration meant for operators already
-familiar with using LXC directly.
+If the Docker daemon was started using the ``lxc`` exec-driver
+(``docker -d --exec-driver=lxc``) then the operator can also specify
+LXC options using one or more ``--lxc-conf`` parameters. These can be
+new parameters or override existing parameters from the lxc-template.go_.
+Note that in the future, a given host's Docker daemon may not use LXC,
+so this is an implementation-specific configuration meant for operators
+already familiar with using LXC directly.
 
 .. _lxc-template.go: https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go
 
@@ -260,7 +261,7 @@ ENTRYPOINT (Default Command to Execute at Runtime
 
 ::
 
-   -entrypoint="": Overwrite the default entrypoint set by the image
+   --entrypoint="": Overwrite the default entrypoint set by the image
 
 The ENTRYPOINT of an image is similar to a ``COMMAND`` because it
 specifies what executable to run when the container starts, but it is
@@ -274,12 +275,12 @@ runtime by using a string to specify the new ``ENTRYPOINT``. Here is an
 example of how to run a shell in a container that has been set up to
 automatically run something else (like ``/usr/bin/redis-server``)::
 
-  docker run -i -t -entrypoint /bin/bash example/redis
+  docker run -i -t --entrypoint /bin/bash example/redis
 
 or two examples of how to pass more parameters to that ENTRYPOINT::
 
-  docker run -i -t -entrypoint /bin/bash example/redis -c ls -l
-  docker run -i -t -entrypoint /usr/bin/redis-cli example/redis --help
+  docker run -i -t --entrypoint /bin/bash example/redis -c ls -l
+  docker run -i -t --entrypoint /usr/bin/redis-cli example/redis --help
 
 
 EXPOSE (Incoming Ports)
@@ -290,16 +291,16 @@ providing the ``EXPOSE`` instruction to give a hint to the operator
 about what incoming ports might provide services. The following
 options work with or override the ``Dockerfile``'s exposed defaults::
 
-   -expose=[]: Expose a port from the container 
+   --expose=[]: Expose a port from the container 
                without publishing it to your host
-   -P=false  : Publish all exposed ports to the host interfaces
-   -p=[]     : Publish a container's port to the host (format: 
-               ip:hostPort:containerPort | ip::containerPort | 
-               hostPort:containerPort) 
-               (use 'docker port' to see the actual mapping)
-   -link=""  : Add link to another container (name:alias)
-
-As mentioned previously, ``EXPOSE`` (and ``-expose``) make a port
+   -P=false   : Publish all exposed ports to the host interfaces
+   -p=[]      : Publish a container's port to the host (format: 
+                ip:hostPort:containerPort | ip::containerPort | 
+                hostPort:containerPort) 
+                (use 'docker port' to see the actual mapping)
+   --link=""  : Add link to another container (name:alias)
+
+As mentioned previously, ``EXPOSE`` (and ``--expose``) make a port
 available **in** a container for incoming connections. The port number
 on the inside of the container (where the service listens) does not
 need to be the same number as the port exposed on the outside of the
@@ -308,16 +309,16 @@ have an HTTP service listening on port 80 (and so you ``EXPOSE 80`` in
 the ``Dockerfile``), but outside the container the port might be 42800.
 
 To help a new client container reach the server container's internal
-port operator ``-expose``'d by the operator or ``EXPOSE``'d by the
+port operator ``--expose``'d by the operator or ``EXPOSE``'d by the
 developer, the operator has three choices: start the server container
-with ``-P`` or ``-p,`` or start the client container with ``-link``.
+with ``-P`` or ``-p,`` or start the client container with ``--link``.
 
 If the operator uses ``-P`` or ``-p`` then Docker will make the
 exposed port accessible on the host and the ports will be available to
 any client that can reach the host. To find the map between the host
 ports and the exposed ports, use ``docker port``)
 
-If the operator uses ``-link`` when starting the new client container,
+If the operator uses ``--link`` when starting the new client container,
 then the client container can access the exposed port via a private
 networking interface. Docker will set some environment variables in
 the client container to help indicate which interface and port to use.
@@ -329,7 +330,7 @@ The operator can **set any environment variable** in the container by
 using one or more ``-e`` flags, even overriding those already defined by the
 developer with a Dockefile ``ENV``::
 
-   $ docker run -e "deep=purple" -rm ubuntu /bin/bash -c export
+   $ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export
    declare -x HOME="/"
    declare -x HOSTNAME="85bc26a0e200"
    declare -x OLDPWD
@@ -341,13 +342,13 @@ developer with a Dockefile ``ENV``::
 
 Similarly the operator can set the **hostname** with ``-h``.
 
-``-link name:alias`` also sets environment variables, using the
+``--link name:alias`` also sets environment variables, using the
 *alias* string to define environment variables within the container
 that give the IP and PORT information for connecting to the service
 container. Let's imagine we have a container running Redis::
 
    # Start the service container, named redis-name
-   $ docker run -d -name redis-name dockerfiles/redis
+   $ docker run -d --name redis-name dockerfiles/redis
    4241164edf6f5aca5b0e9e4c9eccd899b0b8080c64c0cd26efe02166c73208f3
 
    # The redis-name container exposed port 6379
@@ -361,12 +362,12 @@ container. Let's imagine we have a container running Redis::
 
 
 Yet we can get information about the Redis container's exposed ports
-with ``-link``. Choose an alias that will form a valid environment
+with ``--link``. Choose an alias that will form a valid environment
 variable!
 
 ::
 
-   $ docker run -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c export
+   $ docker run --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c export
    declare -x HOME="/"
    declare -x HOSTNAME="acda7f7b1cdc"
    declare -x OLDPWD
@@ -383,7 +384,7 @@ variable!
 
 And we can use that information to connect from another container as a client::
 
-   $ docker run -i -t -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT'
+   $ docker run -i -t --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT'
    172.17.0.32:6379>
 
 VOLUME (Shared Filesystems)
@@ -393,7 +394,7 @@ VOLUME (Shared Filesystems)
 
    -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. 
           If "container-dir" is missing, then docker creates a new volume.
-   -volumes-from="": Mount all volumes from the given container(s)
+   --volumes-from="": Mount all volumes from the given container(s)
 
 The volumes commands are complex enough to have their own
 documentation in section :ref:`volume_def`. A developer can define one

BIN
docs/sources/terms/images/docker-filesystems-busyboxrw.png


BIN
docs/sources/terms/images/docker-filesystems-debian.png


BIN
docs/sources/terms/images/docker-filesystems-debianrw.png


BIN
docs/sources/terms/images/docker-filesystems-generic.png


BIN
docs/sources/terms/images/docker-filesystems-multilayer.png


BIN
docs/sources/terms/images/docker-filesystems-multiroot.png


+ 63 - 68
docs/sources/terms/images/docker-filesystems.svg

@@ -11,7 +11,7 @@
    xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
    inkscape:export-ydpi="90"
    inkscape:export-xdpi="90"
-   inkscape:export-filename="/Users/arothfusz/src/metalivedev/docker/docs/sources/terms/images/docker-filesystems-multiroot.png"
+   inkscape:export-filename="/Users/arothfusz/src/metalivedev/dockerclone/docs/sources/terms/images/docker-filesystems-multilayer.png"
    sodipodi:docname="docker-filesystems.svg"
    width="800"
    height="600"
@@ -26,10 +26,10 @@
      inkscape:pageopacity="0.0"
      inkscape:pageshadow="2"
      inkscape:zoom="0.82666667"
-     inkscape:cx="236.08871"
+     inkscape:cx="495.95588"
      inkscape:cy="300"
      inkscape:document-units="px"
-     inkscape:current-layer="layer2"
+     inkscape:current-layer="layer13"
      showgrid="false"
      width="800px"
      inkscape:window-width="1327"
@@ -98,6 +98,32 @@
   </sodipodi:namedview>
   <defs
      id="defs4">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path4054"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Lend"
+       style="overflow:visible;">
+      <path
+         id="path4048"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none;"
+         transform="scale(0.8) rotate(180) translate(12.5,0)" />
+    </marker>
     <inkscape:perspective
        sodipodi:type="inkscape:persp3d"
        inkscape:vp_x="-406.34117 : 522.93291 : 1"
@@ -149,7 +175,7 @@
         <dc:format>image/svg+xml</dc:format>
         <dc:type
            rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-        <dc:title />
+        <dc:title></dc:title>
       </cc:Work>
     </rdf:RDF>
   </metadata>
@@ -294,70 +320,9 @@
          d="m 514.91047,422.62215 c 0,0 -1.06434,42.27288 -1.06434,42.27288 0,0 4.45362,-2.8241 4.45362,-2.8241 0.2761,-0.17507 0.46813,-0.15759 0.57629,0.0523 0.10868,0.18619 0.15712,0.50328 0.14534,0.95133 -0.0112,0.42443 -0.0782,0.81493 -0.20113,1.17164 -0.12299,0.35687 -0.32235,0.62363 -0.59831,0.80035 0,0 -10.15763,6.50487 -10.15763,6.50487 -0.27917,0.17878 -0.476,0.16246 -0.5903,-0.0494 -0.11437,-0.21191 -0.16642,-0.53506 -0.15609,-0.96944 0.0109,-0.45857 0.0801,-0.85922 0.20776,-1.20182 0.12814,-0.36656 0.33197,-0.63844 0.61129,-0.81556 0,0 4.56188,-2.89274 4.56188,-2.89274 0,0 0.97884,-39.26779 0.97884,-39.26779 0,0 -3.35907,1.85407 -3.35907,1.85407 -0.27977,0.15447 -0.48159,0.1208 -0.60529,-0.10124 -0.11445,-0.22726 -0.16609,-0.57399 -0.15489,-1.04015 0.0106,-0.44163 0.0802,-0.843 0.20889,-1.204 0.12859,-0.36073 0.33761,-0.62003 0.62686,-0.77784 0,0 4.51628,-2.46343 4.51628,-2.46343"
          inkscape:connector-curvature="0" />
     </g>
-    <g
-       id="text3655"
-       style="font-size:40px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Arial;-inkscape-font-specification:Arial"
-       transform="matrix(0.67123869,0,0,0.67123869,53.68199,126.56876)">
-      <path
-         id="path3662"
-         d="m 132.8684,367.78607 c 0,0 0.71572,-54.35962 0.71572,-54.35962 0,0 2.66242,1.51122 2.66242,1.51122 0,0 -0.71153,54.62187 -0.71153,54.62187 0,0 -2.66661,-1.77347 -2.66661,-1.77347"
-         inkscape:connector-curvature="0" />
-      <path
-         id="path3664"
-         d="m 137.92667,371.15014 c 0,0 6.14809,-16.99741 6.14809,-16.99741 0,0 -5.19986,-22.51479 -5.19986,-22.51479 0,0 3.39897,2.02031 3.39897,2.02031 0,0 2.36954,10.8944 2.36954,10.8944 0.44814,2.07993 0.80843,3.81608 1.08051,5.20679 0.47284,-1.39022 0.90795,-2.61465 1.30519,-3.67276 0,0 2.89882,-7.87895 2.89882,-7.87895 0,0 3.37501,2.00607 3.37501,2.00607 0,0 -5.97372,15.60005 -5.97372,15.60005 0,0 5.92178,25.52797 5.92178,25.52797 0,0 -3.4783,-2.3133 -3.4783,-2.3133 0,0 -3.23409,-14.8189 -3.23409,-14.8189 0,0 -0.8528,-3.95585 -0.8528,-3.95585 0,0 -4.46772,13.08538 -4.46772,13.08538 0,0 -3.29142,-2.18901 -3.29142,-2.18901"
-         inkscape:connector-curvature="0" />
-      <path
-         id="path3666"
-         d="m 166.82131,374.91047 c 0,0 2.93572,2.79373 2.93572,2.79373 -0.37761,4.62343 -1.24922,7.86985 -2.61073,9.73548 -1.34456,1.83887 -2.96947,2.11901 -4.86973,0.85217 -2.3637,-1.5758 -4.23108,-4.67579 -5.61088,-9.29124 -1.36166,-4.61024 -1.99867,-10.32878 -1.91636,-17.16995 0.0532,-4.42099 0.40174,-8.10179 1.04648,-11.0477 0.64585,-2.95094 1.59765,-4.88106 2.85839,-5.78928 1.27692,-0.93132 2.65738,-0.95975 4.14303,-0.0791 1.88674,1.11849 3.42575,3.18947 4.61182,6.21733 1.19146,3.01472 1.93755,6.74983 2.23475,11.20086 0,0 -2.92082,-0.72724 -2.92082,-0.72724 -0.24353,-2.97398 -0.70922,-5.3811 -1.39599,-7.22057 -0.67412,-1.8282 -1.50208,-3.03683 -2.48268,-3.62779 -1.47568,-0.88924 -2.68418,-0.33926 -3.629,1.6424 -0.94184,1.95024 -1.44412,5.64763 -1.50886,11.09862 -0.0657,5.53171 0.32577,9.83698 1.17652,12.92095 0.85352,3.09406 1.99526,5.11378 3.42833,6.05501 1.15583,0.75914 2.13411,0.54393 2.93293,-0.65009 0.80075,-1.19694 1.32691,-3.50191 1.57708,-6.91359"
-         inkscape:connector-curvature="0" />
-      <path
-         id="path3668"
-         d="m 172.97661,394.46064 c 0,0 0.0905,-8.17492 0.0905,-8.17492 0,0 3.48861,2.27245 3.48861,2.27245 0,0 -0.0895,8.22327 -0.0895,8.22327 -0.0329,3.02363 -0.28765,5.30542 -0.76375,6.84314 -0.47577,1.56243 -1.21303,2.51325 -2.20987,2.85324 0,0 -0.81311,-3.65386 -0.81311,-3.65386 0.65091,-0.22881 1.13685,-0.89297 1.45702,-1.99285 0.32015,-1.07418 0.51068,-2.8142 0.57137,-5.21909 0,0 -1.73124,-1.15138 -1.73124,-1.15138"
-         inkscape:connector-curvature="0" />
-      <path
-         id="path3670"
-         d="m 204.77784,410.06983 c -1.27022,1.55778 -2.48568,2.44071 -3.64678,2.65261 -1.1447,0.21934 -2.36657,-0.10529 -3.66459,-0.97064 -2.13127,-1.42084 -3.74779,-3.67649 -4.85717,-6.76514 -1.10483,-3.1041 -1.63719,-6.47275 -1.60031,-10.11391 0.0216,-2.13477 0.25062,-3.94364 0.6874,-5.42825 0.44957,-1.50612 1.02226,-2.57799 1.71876,-3.21526 0.71002,-0.63098 1.50367,-0.94896 2.38159,-0.95288 0.64759,0.017 1.6255,0.25355 2.93681,0.71095 2.68835,0.95136 4.68535,1.32634 5.97773,1.11825 0.0222,-1.02578 0.0346,-1.67832 0.0372,-1.95765 0.0289,-3.07178 -0.26872,-5.42898 -0.8919,-7.06976 -0.84101,-2.21749 -2.10184,-3.83086 -3.77761,-4.84085 -1.55688,-0.93829 -2.71034,-1.00947 -3.46489,-0.21839 -0.74047,0.76925 -1.30109,2.5996 -1.68287,5.49061 0,0 -3.16708,-2.94172 -3.16708,-2.94172 0.31864,-2.91383 0.81734,-5.11515 1.49696,-6.60484 0.6812,-1.51989 1.65517,-2.41342 2.92464,-2.67921 1.27473,-0.29431 2.75127,0.0544 4.43259,1.05105 1.67794,0.99472 3.04366,2.25211 4.09313,3.7721 1.05306,1.52531 1.82526,3.12483 2.31452,4.79681 0.49033,1.64692 0.82696,3.5698 1.00937,5.76792 0.10151,1.36012 0.13673,3.72492 0.1056,7.09479 0,0 -0.0935,10.11679 -0.0935,10.11679 -0.0653,7.05995 -0.0372,11.58025 0.0844,13.55797 0.13448,1.95911 0.40887,3.94126 0.8236,5.94773 0,0 -3.55349,-2.3633 -3.55349,-2.3633 -0.33594,-1.80359 -0.5439,-3.78856 -0.62416,-5.9558 m -0.12224,-17.05427 c -1.23154,0.34731 -3.06331,0.14247 -5.48491,-0.60924 -1.36335,-0.41924 -2.32581,-0.53009 -2.89103,-0.33412 -0.56424,0.19568 -1.00286,0.73389 -1.31639,1.61435 -0.31298,0.85222 -0.4758,1.92485 -0.48867,3.21853 -0.0197,1.98221 0.29058,3.84732 0.93197,5.59804 0.65498,1.76261 1.62279,3.0659 2.90625,3.90947 1.27641,0.83893 2.42209,0.96176 3.43544,0.36456 1.01669,-0.62694 1.7731,-1.89094 2.26739,-3.79238 0.3778,-1.47261 0.58252,-3.87376 0.61388,-7.20158 0,0 0.0261,-2.76763 0.0261,-2.76763"
-         inkscape:connector-curvature="0" />
-      <path
-         id="path3672"
-         d="m 226.91498,430.33317 c 0,0 0.056,-6.79135 0.056,-6.79135 -1.69979,4.12585 -3.95958,5.23997 -6.76691,3.36841 -1.23125,-0.82083 -2.37518,-2.1017 -3.4326,-3.84047 -1.04088,-1.72429 -1.81148,-3.52427 -2.31374,-5.40182 -0.48827,-1.89422 -0.82487,-4.02954 -1.01034,-6.40682 -0.12775,-1.59592 -0.17698,-4.02489 -0.14772,-7.28678 0,0 0.25063,-27.95019 0.25063,-27.95019 0,0 3.47921,2.068 3.47921,2.068 0,0 -0.22098,25.15376 -0.22098,25.15376 -0.0353,4.02044 0.0122,6.77614 0.14272,8.26649 0.20297,2.17003 0.65699,4.07445 1.36316,5.71471 0.70804,1.61546 1.59303,2.77268 2.65633,3.47053 1.06676,0.70016 2.07587,0.76801 3.02668,0.20066 0.95364,-0.59783 1.63329,-1.79901 2.03728,-3.60358 0.41794,-1.82668 0.64337,-4.71043 0.67595,-8.64861 0,0 0.20406,-24.67831 0.20406,-24.67831 0,0 3.62583,2.15515 3.62583,2.15515 0,0 -0.37466,46.37229 -0.37466,46.37229 0,0 -3.25092,-2.16207 -3.25092,-2.16207"
-         inkscape:connector-curvature="0" />
-      <path
-         id="path3674"
-         d="m 236.84818,436.9394 c 0,0 0.31458,-40.68866 0.31458,-40.68866 0,0 -3.27066,-1.97443 -3.27066,-1.97443 0,0 0.0485,-6.13244 0.0485,-6.13244 0,0 3.26986,1.94357 3.26986,1.94357 0,0 0.0384,-4.9718 0.0384,-4.9718 0.0242,-3.13718 0.17313,-5.39171 0.44675,-6.76504 0.37445,-1.8466 1.0157,-3.14492 1.92523,-3.8952 0.92597,-0.77365 2.21207,-0.69593 3.86256,0.23811 1.06731,0.60412 2.24898,1.54093 3.54628,2.81271 0,0 -0.62418,6.66996 -0.62418,6.66996 -0.78934,-0.75385 -1.53564,-1.33338 -2.23919,-1.73932 -1.15067,-0.66373 -1.96603,-0.6152 -2.44858,0.14318 -0.48194,0.75751 -0.73333,2.55103 -0.75467,5.38196 0,0 -0.0327,4.33654 -0.0327,4.33654 0,0 4.35398,2.58795 4.35398,2.58795 0,0 -0.0456,6.23957 -0.0456,6.23957 0,0 -4.35509,-2.62908 -4.35509,-2.62908 0,0 -0.30843,40.92114 -0.30843,40.92114 0,0 -3.72704,-2.47872 -3.72704,-2.47872"
-         inkscape:connector-curvature="0" />
-      <path
-         id="path3676"
-         d="m 246.46465,429.05307 c 0,0 3.81968,1.1922 3.81968,1.1922 0.19276,3.35392 0.7721,6.20708 1.74012,8.56243 0.98544,2.37207 2.3721,4.14723 4.16469,5.32459 1.81668,1.19318 3.17579,1.3205 4.07171,0.37548 0.89826,-0.97786 1.35491,-2.50699 1.36833,-4.58524 0.012,-1.86394 -0.37148,-3.58214 -1.14903,-5.15206 -0.54183,-1.08052 -1.89103,-2.87259 -4.03793,-5.36553 -2.87017,-3.33767 -4.84719,-5.88768 -5.94667,-7.66691 -1.08128,-1.7942 -1.8993,-3.82568 -2.45597,-6.09572 -0.54119,-2.28674 -0.80303,-4.59245 -0.78627,-6.91984 0.0153,-2.11796 0.25669,-3.93345 0.72469,-5.44816 0.48302,-1.53765 1.12853,-2.66509 1.93745,-3.38209 0.60808,-0.56866 1.4316,-0.86027 2.47213,-0.87408 1.05827,-0.0353 2.19002,0.30354 3.396,1.01839 1.82428,1.08147 3.42677,2.57943 4.80442,4.49544 1.39816,1.9329 2.42778,4.04798 3.08549,6.34283 0.65928,2.26923 1.10658,5.05898 1.34104,8.36831 0,0 -3.93498,-1.30965 -3.93498,-1.30965 -0.1613,-2.60573 -0.66572,-4.86818 -1.51169,-6.78511 -0.82908,-1.90296 -2.01211,-3.31622 -3.54556,-4.24034 -1.80214,-1.08596 -3.08681,-1.24118 -3.85989,-0.47117 -0.77146,0.76845 -1.16235,1.97686 -1.17391,3.62665 -0.007,1.05006 0.14407,2.09235 0.45452,3.12753 0.31055,1.06635 0.80269,2.09487 1.47721,3.08626 0.38829,0.54294 1.53561,1.95069 3.44979,4.23261 2.78949,3.29205 4.7444,5.79841 5.85003,7.50277 1.12436,1.68881 2.00304,3.68747 2.63416,5.99522 0.63237,2.3125 0.94024,4.88426 0.92265,7.71231 -0.0173,2.76736 -0.43134,5.12235 -1.24099,7.06139 -0.79291,1.91427 -1.93089,3.05649 -3.41056,3.42835 -1.47342,0.33983 -3.12755,-0.1039 -4.95957,-1.32524 -3.01245,-2.00831 -5.28496,-4.82452 -6.83171,-8.44857 -1.52498,-3.59708 -2.47979,-8.05614 -2.86938,-13.38305"
-         inkscape:connector-curvature="0" />
-      <path
-         id="path3678"
-         d="m 267.46509,458.46409 c 0,0 10.16276,-64.44628 10.16276,-64.44628 0,0 3.35985,1.90154 3.35985,1.90154 0,0 -10.22211,64.7453 -10.22211,64.7453 0,0 -3.3005,-2.20056 -3.3005,-2.20056"
-         inkscape:connector-curvature="0" />
-      <path
-         id="path3680"
-         d="m 287.73074,470.77961 c 0,0 -3.98413,-2.64971 -3.98413,-2.64971 0,0 0.36657,-69.26132 0.36657,-69.26132 0,0 4.28286,2.431 4.28286,2.431 0,0 -0.12574,24.80354 -0.12574,24.80354 1.84841,-3.43804 4.20286,-4.3171 7.07399,-2.61515 1.5995,0.94822 3.11282,2.48894 4.53901,4.62548 1.44866,2.12297 2.63509,4.62828 3.55675,7.51533 0.94101,2.87289 1.67339,6.11301 2.19582,9.71903 0.52331,3.61258 0.77764,7.29172 0.76223,11.03361 -0.0367,8.8888 -1.19889,15.02735 -3.47692,18.39523 -2.26525,3.34891 -4.9514,3.97742 -8.04813,1.91293 -3.05429,-2.0362 -5.42013,-6.12345 -7.11007,-12.2502 0,0 -0.0322,6.34023 -0.0322,6.34023 m 0.0826,-25.6991 c -0.0308,6.05748 0.36263,10.70405 1.18198,13.94323 1.3439,5.31484 3.18967,8.7503 5.54452,10.29694 1.92772,1.26611 3.60983,0.72174 5.04245,-1.64447 1.43781,-2.407 2.17299,-6.89882 2.20167,-13.46572 0.0293,-6.72399 -0.63702,-12.10528 -1.99483,-16.13506 -1.33586,-4.00333 -2.96003,-6.57643 -4.86901,-7.72687 -1.91517,-1.15407 -3.57055,-0.50907 -4.97003,1.92406 -1.39445,2.39298 -2.10547,6.6592 -2.13675,12.80789"
-         inkscape:connector-curvature="0" />
-      <path
-         id="path3682"
-         d="m 322.12463,485.58433 c 0,0 0.65936,8.40758 0.65936,8.40758 -1.33673,-0.35442 -2.52804,-0.88064 -3.57528,-1.5781 -1.70425,-1.13503 -3.01872,-2.52454 -3.94739,-4.16917 -0.92628,-1.6404 -1.57435,-3.40805 -1.9457,-5.30454 -0.37079,-1.92713 -0.54592,-5.5546 -0.52573,-10.88197 0,0 0.114,-30.08386 0.114,-30.08386 0,0 -3.36894,-2.03377 -3.36894,-2.03377 0,0 0.0272,-6.84805 0.0272,-6.84805 0,0 3.36786,2.00182 3.36786,2.00182 0,0 0.0489,-12.91135 0.0489,-12.91135 0,0 4.63253,-2.66881 4.63253,-2.66881 0,0 -0.065,18.3241 -0.065,18.3241 0,0 4.72675,2.80952 4.72675,2.80952 0,0 -0.023,6.96866 -0.023,6.96866 0,0 -4.72829,-2.85438 -4.72829,-2.85438 0,0 -0.10923,30.77205 -0.10923,30.77205 -0.009,2.54809 0.0632,4.23726 0.21665,5.06728 0.17091,0.8418 0.43796,1.59732 0.80137,2.26677 0.38115,0.6815 0.92028,1.25067 1.61806,1.70755 0.52419,0.34326 1.21588,0.67931 2.07599,1.00867"
-         inkscape:connector-curvature="0" />
-      <path
-         id="path3684"
-         d="m 326.68371,496.68588 c 0,0 0.16352,-53.31935 0.16352,-53.31935 0,0 4.33405,2.57612 4.33405,2.57612 0,0 -0.0231,8.11168 -0.0231,8.11168 1.12479,-3.12783 2.15869,-5.02087 3.10122,-5.67423 0.96285,-0.64401 2.01732,-0.62746 3.16426,0.0524 1.66273,0.98571 3.35799,2.97819 5.08643,5.98483 0,0 -1.73463,7.50163 -1.73463,7.50163 -1.20956,-2.06252 -2.41678,-3.45673 -3.62177,-4.18598 -1.07402,-0.64988 -2.03784,-0.62407 -2.89238,0.075 -0.85268,0.66393 -1.46157,1.94671 -1.82782,3.84834 -0.54904,2.90043 -0.82874,6.26858 -0.83955,10.10792 0,0 -0.0793,28.13461 -0.0793,28.13461 0,0 -4.83103,-3.21295 -4.83103,-3.21295"
-         inkscape:connector-curvature="0" />
-      <path
-         id="path3686"
-         d="m 346.63844,509.95707 c 0,0 0.0968,-47.55946 0.0968,-47.55946 0,0 -4.43131,-2.6751 -4.43131,-2.6751 0,0 0.0162,-7.15908 0.0162,-7.15908 0,0 4.42975,2.633 4.42975,2.633 0,0 0.0118,-5.80848 0.0118,-5.80848 0.007,-3.66486 0.19039,-6.28429 0.54899,-7.86025 0.49107,-2.11858 1.34725,-3.56796 2.57091,-4.34826 1.24623,-0.8062 2.9874,-0.57829 5.2303,0.69102 1.45137,0.82149 3.06136,2.04536 4.83196,3.67489 0,0 -0.79224,7.74699 -0.79224,7.74699 -1.07705,-0.96968 -2.09389,-1.73012 -3.05099,-2.28234 -1.56464,-0.90254 -2.66858,-0.93449 -3.31577,-0.0995 -0.64623,0.83385 -0.9719,2.90502 -0.97777,6.21534 0,0 -0.009,5.07119 -0.009,5.07119 0,0 5.92043,3.51903 5.92043,3.51903 0,0 -0.0107,7.30549 -0.0107,7.30549 0,0 -5.92257,-3.57534 -5.92257,-3.57534 0,0 -0.0849,47.87735 -0.0849,47.87735 0,0 -5.0619,-3.36649 -5.0619,-3.36649"
-         inkscape:connector-curvature="0" />
-      <path
-         id="path3688"
-         d="m 359.60073,501.90418 c 0,0 5.20059,1.86777 5.20059,1.86777 0.29001,3.96114 1.10193,7.38322 2.43911,10.27061 1.36176,2.91073 3.2661,5.17238 5.72054,6.78444 2.48967,1.63519 4.34728,1.95881 5.56379,0.96109 1.21993,-1.0365 1.83154,-2.77869 1.83229,-5.22389 6.2e-4,-2.19296 -0.5384,-4.26389 -1.61481,-6.20909 -0.7497,-1.33918 -2.60804,-3.61528 -5.55946,-6.8122 -3.94075,-4.27425 -6.65395,-7.50944 -8.16465,-9.73106 -1.48522,-2.23573 -2.61386,-4.7171 -3.38893,-7.44614 -0.75395,-2.74593 -1.12852,-5.48045 -1.12491,-8.2074 0.003,-2.48146 0.31617,-4.58205 0.93929,-6.30404 0.64345,-1.7475 1.51123,-2.99566 2.60481,-3.74404 0.82208,-0.59757 1.93976,-0.84564 3.35554,-0.74295 1.44048,0.0796 2.98492,0.60687 4.63457,1.58472 2.49729,1.48044 4.69744,3.42626 6.59564,5.83924 1.92772,2.43694 3.35406,5.04673 4.27363,7.82559 0.92183,2.74989 1.55812,6.08842 1.90744,10.01415 0,0 -5.39591,-2.01583 -5.39591,-2.01583 -0.24253,-3.08522 -0.95109,-5.80694 -2.12313,-8.16184 -1.14834,-2.33544 -2.7751,-4.13563 -4.87465,-5.40091 -2.46541,-1.48565 -4.2164,-1.81727 -5.26239,-1.00324 -1.04343,0.8121 -1.56519,2.18465 -1.56724,4.11944 -10e-4,1.23148 0.21335,2.47259 0.64434,3.72428 0.43146,1.28852 1.10985,2.55443 2.03645,3.7988 0.53331,0.68393 2.10812,2.47474 4.73703,5.38635 3.83534,4.20888 6.52812,7.39657 8.05468,9.53851 1.55295,2.12718 2.77297,4.59004 3.65706,7.38727 0.88613,2.80397 1.33003,5.87348 1.33006,9.20426 -3e-5,3.25947 -0.54743,5.98195 -1.64026,8.16269 -1.06972,2.15296 -2.61798,3.35081 -4.63932,3.59644 -2.01164,0.20856 -4.27524,-0.52848 -6.78627,-2.2025 -4.12399,-2.74933 -7.24172,-6.34882 -9.37583,-10.80056 -2.10254,-4.4137 -3.43626,-9.76409 -4.0091,-16.05996"
-         inkscape:connector-curvature="0" />
-    </g>
   </g>
   <g
-     style="display:inline"
+     style="display:none"
      inkscape:groupmode="layer"
      id="layer9"
      inkscape:label="GenericRootfs">
@@ -553,7 +518,7 @@
     </g>
   </g>
   <g
-     style="display:none"
+     style="display:inline"
      inkscape:label="Debian"
      id="layer5"
      inkscape:groupmode="layer">
@@ -1178,7 +1143,7 @@
        inkscape:groupmode="layer"
        id="layer10"
        inkscape:label="Base Image"
-       style="display:none">
+       style="display:inline">
       <g
          transform="matrix(0.71864924,0,0,0.71864924,102.10269,88.99025)"
          style="font-size:40px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Courier New;-inkscape-font-specification:Courier New Bold"
@@ -1251,6 +1216,36 @@
            inkscape:connector-curvature="0" />
       </g>
     </g>
+    <g
+       inkscape:groupmode="layer"
+       id="layer13"
+       inkscape:label="Parent Pointer">
+      <path
+         style="fill:none;stroke:#000000;stroke-width:4.80000019;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+         d="m 546.77419,134.2742 c 110.08065,13.30645 1.20968,53.2258 1.20968,53.2258"
+         id="path3272"
+         inkscape:connector-curvature="0"
+         sodipodi:nodetypes="cc" />
+      <text
+         xml:space="preserve"
+         style="font-size:32px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:103.99999619%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Helvetica"
+         x="601.20972"
+         y="125.80645"
+         id="text4672"
+         sodipodi:linespacing="104%"><tspan
+           sodipodi:role="line"
+           id="tspan4674"
+           x="601.20972"
+           y="125.80645">references</tspan><tspan
+           sodipodi:role="line"
+           x="601.20972"
+           y="159.08644"
+           id="tspan4676">parent</tspan><tspan
+           sodipodi:role="line"
+           x="601.20972"
+           y="192.36644"
+           id="tspan4678">image</tspan></text>
+    </g>
   </g>
   <g
      style="display:none"

+ 14 - 14
docs/sources/use/ambassador_pattern_linking.rst

@@ -43,26 +43,26 @@ Start actual redis server on one Docker host
 
 .. code-block:: bash
 
-	big-server $ docker run -d -name redis crosbymichael/redis
+	big-server $ docker run -d --name redis crosbymichael/redis
 
 Then add an ambassador linked to the redis server, mapping a port to the outside world
 
 .. code-block:: bash
 
-	big-server $ docker run -d -link redis:redis -name redis_ambassador -p 6379:6379 svendowideit/ambassador
+	big-server $ docker run -d --link redis:redis --name redis_ambassador -p 6379:6379 svendowideit/ambassador
 
 On the other host, you can set up another ambassador setting environment variables for each remote port we want to proxy to the ``big-server``
 
 .. code-block:: bash
 
-	client-server $ docker run -d -name redis_ambassador -expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador
+	client-server $ docker run -d --name redis_ambassador --expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador
 
 Then on the ``client-server`` host, you can use a redis client container to talk 
 to the remote redis server, just by linking to the local redis ambassador.
 
 .. code-block:: bash
 
-	client-server $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
+	client-server $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
 	redis 172.17.0.160:6379> ping
 	PONG
 
@@ -79,19 +79,19 @@ On the docker host (192.168.1.52) that redis will run on:
 .. code-block:: bash
 
 	# start actual redis server
-	$ docker run -d -name redis crosbymichael/redis
+	$ docker run -d --name redis crosbymichael/redis
 
 	# get a redis-cli container for connection testing	
 	$ docker pull relateiq/redis-cli
 
 	# test the redis server by talking to it directly
-	$ docker run -t -i -rm -link redis:redis relateiq/redis-cli
+	$ docker run -t -i --rm --link redis:redis relateiq/redis-cli
 	redis 172.17.0.136:6379> ping
 	PONG
 	^D
 	
 	# add redis ambassador
-	$ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 busybox sh
+	$ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 busybox sh
 	
 in the redis_ambassador container, you can see the linked redis containers's env
 
@@ -119,7 +119,7 @@ This environment is used by the ambassador socat script to expose redis to the w
 
 	$ docker rm redis_ambassador
 	$ sudo ./contrib/mkimage-unittest.sh
-	$ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 docker-ut sh
+	$ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 docker-ut sh
 	
 	$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379
 	
@@ -127,7 +127,7 @@ then ping the redis server via the ambassador
 
 .. code-block::bash
 
-	$ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
+	$ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
 	redis 172.17.0.160:6379> ping
 	PONG
 
@@ -136,7 +136,7 @@ Now goto a different server
 .. code-block:: bash
 
 	$ sudo ./contrib/mkimage-unittest.sh
-	$ docker run -t -i  -expose 6379 -name redis_ambassador docker-ut sh
+	$ docker run -t -i  --expose 6379 --name redis_ambassador docker-ut sh
 	
 	$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379
 
@@ -145,7 +145,7 @@ and get the redis-cli image so we can talk over the ambassador bridge
 .. code-block:: bash
 
 	$ docker pull relateiq/redis-cli
-	$ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
+	$ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
 	redis 172.17.0.160:6379> ping
 	PONG
 
@@ -157,7 +157,7 @@ When you start the container, it uses a small ``sed`` script to parse out the (p
 link environment variables to set up the port forwarding. On the remote host, you need to set the 
 variable using the ``-e`` command line option.
 
-``-expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`` will forward the 
+``--expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`` will forward the 
 local ``1234`` port to the remote IP and port - in this case ``192.168.1.52:6379``.
 
 
@@ -171,9 +171,9 @@ local ``1234`` port to the remote IP and port - in this case ``192.168.1.52:6379
 	#   docker build -t SvenDowideit/ambassador .
 	#   docker tag SvenDowideit/ambassador ambassador
 	# then to run it (on the host that has the real backend on it)
-	#   docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 ambassador
+	#   docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 ambassador
 	# on the remote host, you can set up another ambassador
-	#    docker run -t -i -name redis_ambassador -expose 6379 sh
+	#    docker run -t -i --name redis_ambassador --expose 6379 sh
 
 	FROM	docker-ut
 	MAINTAINER	SvenDowideit@home.org.au

+ 3 - 1
docs/sources/use/basics.rst

@@ -39,7 +39,9 @@ Repository to a local image cache.
    12 character hash ``539c0211cd76: Download complete`` which is the
    short form of the image ID. These short image IDs are the first 12
    characters of the full image ID - which can be found using ``docker
-   inspect`` or ``docker images -notrunc=true``
+   inspect`` or ``docker images --no-trunc=true``
+   
+   **If you're using OS X** then you shouldn't use ``sudo``
 
 Running an interactive shell
 ----------------------------

+ 95 - 0
docs/sources/use/chef.rst

@@ -0,0 +1,95 @@
+:title: Chef Usage
+:description: Installation and using Docker via Chef
+:keywords: chef, installation, usage, docker, documentation
+
+.. _install_using_chef:
+
+Using Chef
+=============
+
+.. note::
+
+   Please note this is a community contributed installation path. The
+   only 'official' installation is using the :ref:`ubuntu_linux`
+   installation path. This version may sometimes be out of date.
+
+Requirements
+------------
+
+To use this guide you'll need a working installation of 
+`Chef <http://www.getchef.com/>`_. This cookbook supports a variety of 
+operating systems.
+
+Installation
+------------
+
+The cookbook is available on the `Chef Community Site
+<community.opscode.com/cookbooks/docker>`_ and can be installed
+using your favorite cookbook dependency manager.
+
+The source can be found on `GitHub
+<https://github.com/bflad/chef-docker>`_.
+
+Usage
+-----
+
+The cookbook provides recipes for installing Docker, configuring init
+for Docker, and resources for managing images and containers.
+It supports almost all Docker functionality.
+
+Installation
+~~~~~~~~~~~~
+
+.. code-block:: ruby
+
+  include_recipe 'docker'
+
+Images
+~~~~~~
+
+The next step is to pull a Docker image. For this, we have a resource:
+
+.. code-block:: ruby
+
+  docker_image 'samalba/docker-registry'
+
+This is equivalent to running:
+
+.. code-block:: bash
+
+  docker pull samalba/docker-registry
+
+There are attributes available to control how long the cookbook
+will allow for downloading (5 minute default).
+
+To remove images you no longer need:
+
+.. code-block:: ruby
+
+  docker_image 'samalba/docker-registry' do
+    action :remove
+  end
+
+Containers
+~~~~~~~~~~
+
+Now you have an image where you can run commands within a container
+managed by Docker.
+
+.. code-block:: ruby
+
+  docker_container 'samalba/docker-registry' do
+    detach true
+    port '5000:5000'
+    env 'SETTINGS_FLAVOR=local'
+    volume '/mnt/docker:/docker-storage'
+  end
+
+This is equivalent to running the following command, but under upstart:
+
+.. code-block:: bash
+
+  docker run --detach=true --publish='5000:5000' --env='SETTINGS_FLAVOR=local' --volume='/mnt/docker:/docker-storage' samalba/docker-registry
+
+The resources will accept a single string or an array of values
+for any docker flags that allow multiple values.

+ 0 - 5
docs/sources/use/host_integration.rst

@@ -43,11 +43,6 @@ into it:
    stop on runlevel [!2345]
    respawn
    script
-     # Wait for docker to finish starting up first.
-     FILE=/var/run/docker.sock
-     while [ ! -e $FILE ] ; do
-       inotifywait -t 2 -e create $(dirname $FILE)
-     done
      /usr/bin/docker start -a redis_server
    end script
 

+ 1 - 0
docs/sources/use/index.rst

@@ -20,4 +20,5 @@ Contents:
    working_with_volumes
    working_with_links_names
    ambassador_pattern_linking
+   chef
    puppet

Bu fark içinde çok fazla dosya değişikliği olduğu için bazı dosyalar gösterilmiyor