Prechádzať zdrojové kódy

Merge pull request #2577 from dotcloud/bump_v0.6.6

Bump v0.6.6
Victor Vieux 11 rokov pred
rodič
commit
cb1c90975b
83 zmenil súbory, kde vykonal 2659 pridanie a 1177 odobranie
  1. 3 0
      AUTHORS
  2. 693 424
      CHANGELOG.md
  3. 7 7
      Dockerfile
  4. 1 1
      VERSION
  5. 3 1
      api.go
  6. 1 5
      api_params.go
  7. 7 14
      api_test.go
  8. 1 0
      archive/MAINTAINERS
  9. 1 1
      archive/archive.go
  10. 1 1
      archive/archive_test.go
  11. 9 6
      buildfile.go
  12. 38 1
      buildfile_test.go
  13. 92 41
      commands.go
  14. 54 0
      commands_test.go
  15. 26 1
      config.go
  16. 138 84
      container.go
  17. 75 47
      container_test.go
  18. 1 1
      contrib/completion/bash/docker
  19. 11 0
      contrib/desktop-integration/README.txt
  20. 38 0
      contrib/desktop-integration/data/Dockerfile
  21. 49 0
      contrib/desktop-integration/firefox/Dockerfile
  22. 1 1
      contrib/init/systemd/docker.service
  23. 29 20
      contrib/init/sysvinit/docker
  24. 17 105
      docker/docker.go
  25. 19 19
      docs/sources/api/docker_remote_api_v1.4.rst
  26. 18 18
      docs/sources/api/docker_remote_api_v1.6.rst
  27. 68 47
      docs/sources/commandline/cli.rst
  28. 4 9
      docs/sources/contributing/devenvironment.rst
  29. 5 5
      docs/sources/examples/couchdb_data_volumes.rst
  30. 10 9
      docs/sources/examples/hello_world.rst
  31. 1 1
      docs/sources/examples/index.rst
  32. 27 22
      docs/sources/examples/linking_into_redis.rst
  33. 9 9
      docs/sources/examples/mongodb.rst
  34. 16 16
      docs/sources/examples/nodejs_web_app.rst
  35. 4 5
      docs/sources/examples/postgresql_service.rst
  36. 20 20
      docs/sources/examples/python_web_app.rst
  37. 6 6
      docs/sources/examples/running_redis_service.rst
  38. 1 1
      docs/sources/examples/running_riak_service.rst
  39. 7 6
      docs/sources/examples/running_ssh_service.rst
  40. 2 2
      docs/sources/installation/ubuntulinux.rst
  41. 1 1
      docs/sources/installation/vagrant.rst
  42. 4 0
      docs/sources/installation/windows.rst
  43. 5 9
      docs/sources/use/basics.rst
  44. 13 10
      docs/sources/use/builder.rst
  45. 1 0
      docs/sources/use/index.rst
  46. 123 16
      docs/sources/use/port_redirection.rst
  47. 73 0
      docs/sources/use/working_with_volumes.rst
  48. 1 0
      engine/MAINTAINERS
  49. 82 0
      engine/engine.go
  50. 29 0
      engine/env_test.go
  51. 42 0
      engine/init_test.go
  52. 113 0
      engine/job.go
  53. 15 2
      gograph/gograph.go
  54. 37 0
      gograph/gograph_test.go
  55. 7 5
      graph.go
  56. 2 1
      graph_test.go
  57. 23 17
      hack/RELEASE-CHECKLIST.md
  58. 8 0
      hack/dind
  59. 18 21
      hack/infrastructure/docker-ci/buildbot/master.cfg
  60. 5 4
      hack/infrastructure/docker-ci/deployment.py
  61. 30 0
      hack/infrastructure/docker-ci/docker-test/Dockerfile
  62. 11 8
      hack/infrastructure/docker-ci/docker-test/test_docker.sh
  63. 1 4
      hack/infrastructure/docker-ci/nightlyrelease/Dockerfile
  64. 11 6
      hack/infrastructure/docker-ci/nightlyrelease/dockerbuild
  65. 5 4
      image.go
  66. 24 14
      lxc_template.go
  67. 2 2
      namesgenerator/names-generator.go
  68. 29 0
      netlink/netlink_darwin.go
  69. 0 0
      netlink/netlink_linux.go
  70. 29 2
      network.go
  71. 16 0
      network_test.go
  72. 11 3
      registry/registry.go
  73. 84 25
      runtime.go
  74. 33 22
      runtime_test.go
  75. 110 23
      server.go
  76. 3 3
      server_test.go
  77. 14 6
      sysinit/sysinit.go
  78. 8 6
      utils.go
  79. 36 0
      utils/daemon.go
  80. 16 0
      utils/random.go
  81. 25 0
      utils/utils.go
  82. 38 0
      utils/utils_test.go
  83. 8 7
      utils_test.go

+ 3 - 0
AUTHORS

@@ -69,12 +69,14 @@ Gabriel Monroy <gabriel@opdemand.com>
 Gareth Rushgrove <gareth@morethanseven.net>
 Greg Thornton <xdissent@me.com>
 Guillaume J. Charmes <guillaume.charmes@dotcloud.com>
+Gurjeet Singh <gurjeet@singh.im>
 Guruprasad <lgp171188@gmail.com>
 Harley Laue <losinggeneration@gmail.com>
 Hector Castro <hectcastro@gmail.com>
 Hunter Blanks <hunter@twilio.com>
 Isao Jonas <isao.jonas@gmail.com>
 James Carr <james.r.carr@gmail.com>
+James Turnbull <james@lovedthanlost.net>
 Jason McVetta <jason.mcvetta@gmail.com>
 Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
 Jeff Lindsay <progrium@gmail.com>
@@ -140,6 +142,7 @@ odk- <github@odkurzacz.org>
 Pascal Borreli <pascal@borreli.com>
 Paul Bowsher <pbowsher@globalpersonals.co.uk>
 Paul Hammond <paul@paulhammond.org>
+Paul Nasrat <pnasrat@gmail.com>
 Phil Spitler <pspitler@gmail.com>
 Piotr Bogdan <ppbogdan@gmail.com>
 pysqz <randomq@126.com>

+ 693 - 424
CHANGELOG.md

@@ -1,514 +1,783 @@
 # Changelog
 
+## 0.6.6 (2013-11-06)
+
+#### Runtime
+
+* Ensure container name on register
+* Fix regression in /etc/hosts
++ Add lock around write operations in graph
+* Check if port is valid
+* Fix restart runtime error with ghost container networking
++ Added some more colors and animals to increase the pool of generated names
+* Fix issues in docker inspect
++ Escape apparmor confinement
++ Set environment variables using a file.
+* Prevent docker insert to erase something
++ Prevent DNS server conflicts in CreateBridgeIface
++ Validate bind mounts on the server side
++ Use parent image config in docker build
+* Fix regression in /etc/hosts
+
+#### Client
+
++ Add -P flag to publish all exposed ports
++ Add -notrunc and -q flags to docker history
+* Fix docker commit, tag and import usage
++ Add stars, trusted builds and library flags in docker search
+* Fix docker logs with tty
+
+#### RemoteAPI
+
+* Make /events API send headers immediately
+* Do not split last column docker top
++ Add size to history
+
+#### Other
+
++ Contrib: Desktop integration. Firefox usecase.
++ Dockerfile: bump to go1.2rc3
+
 ## 0.6.5 (2013-10-29)
-+ Runtime: Containers can now be named
-+ Runtime: Containers can now be linked together for service discovery
-+ Runtime: 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors
-+ Runtime: Automatically start crashed containers after a reboot
-+ Runtime: Expose IP, port, and proto as separate environment vars for container links
-* Runtime: Allow ports to be published to specific ips
-* Runtime: Prohibit inter-container communication by default
+
+#### Runtime
+
++ Containers can now be named
++ Containers can now be linked together for service discovery
++ 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors
++ Automatically start crashed containers after a reboot
++ Expose IP, port, and proto as separate environment vars for container links
+* Allow ports to be published to specific ips
+* Prohibit inter-container communication by default
+- Ignore ErrClosedPipe for stdin in Container.Attach
+- Remove unused field kernelVersion
+* Fix issue when mounting subdirectories of /mnt in container
+- Fix untag during removal of images
+* Check return value of syscall.Chdir when changing working directory inside dockerinit
+
+#### Client
+
+- Only pass stdin to hijack when needed to avoid closed pipe errors
+* Use less reflection in command-line method invocation
+- Monitor the tty size after starting the container, not prior
+- Remove useless os.Exit() calls after log.Fatal
+
+#### Hack
+
++ Add initial init scripts library and a safer Ubuntu packaging script that works for Debian
+* Add -p option to invoke debootstrap with http_proxy
+- Update install.sh with $sh_c to get sudo/su for modprobe
+* Update all the mkimage scripts to use --numeric-owner as a tar argument
+* Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues
+
+#### Other
+
 * Documentation: Fix the flags for nc in example
-- Client: Only pass stdin to hijack when needed to avoid closed pipe errors
 * Testing: Remove warnings and prevent mount issues
-* Client: Use less reflection in command-line method invocation
-- Runtime: Ignore ErrClosedPipe for stdin in Container.Attach
 - Testing: Change logic for tty resize to avoid warning in tests
-- Client: Monitor the tty size after starting the container, not prior
-- Hack: Update install.sh with $sh_c to get sudo/su for modprobe
-- Client: Remove useless os.Exit() calls after log.Fatal
-* Hack: Update all the mkimage scripts to use --numeric-owner as a tar argument
-* Hack: Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues
-+ Hack: Add initial init scripts library and a safer Ubuntu packaging script that works for Debian
-- Runtime: Fix untag during removal of images 
-- Runtime: Remove unused field kernelVersion 
-* Hack: Add -p option to invoke debootstrap with http_proxy
 - Builder: Fix race condition in docker build with verbose output
 - Registry: Fix content-type for PushImageJSONIndex method
-* Runtime: Fix issue when mounting subdirectories of /mnt in container
-* Runtime: Check return value of syscall.Chdir when changing working directory inside dockerinit
 * Contrib: Improve helper tools to generate debian and Arch linux server images
 
 ## 0.6.4 (2013-10-16)
-- Runtime: Add cleanup of container when Start() fails
-- Testing: Catch errClosing error when TCP and UDP proxies are terminated
-- Testing: Add aggregated docker-ci email report
-- Testing: Remove a few errors in tests
-* Contrib: Reorganize contributed completion scripts to add zsh completion
-* Contrib: Add vim syntax highlighting for Dockerfiles from @honza
-* Runtime: Add better comments to utils/stdcopy.go
-- Testing: add cleanup to remove leftover containers
-* Documentation: Document how to edit and release docs
-* Documentation: Add initial draft of the Docker infrastructure doc
-* Contrib: Add mkimage-arch.sh
+
+#### Runtime
+
+- Add cleanup of container when Start() fails
+* Add better comments to utils/stdcopy.go
+* Add utils.Errorf for error logging
++ Add -rm to docker run for removing a container on exit
+- Remove error messages which are not actually errors
+- Fix `docker rm` with volumes
+- Fix some error cases where a HTTP body might not be closed
+- Fix panic with wrong dockercfg file
+- Fix the attach behavior with -i
+* Record termination time in state.
+- Use empty string so TempDir uses the OS's temp dir automatically
+- Make sure to close the network allocators
++ Autorestart containers by default
+* Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)`
+* lxc: Allow set_file_cap capability in container
+- Move run -rm to the cli only
+* Split stdout stderr
+* Always create a new session for the container
+
+#### Testing
+
+- Add aggregated docker-ci email report
+- Add cleanup to remove leftover containers
+* Add nightly release to docker-ci
+* Add more tests around auth.ResolveAuthConfig
+- Remove a few errors in tests
+- Catch errClosing error when TCP and UDP proxies are terminated
+* Only run certain tests with TESTFLAGS='-run TestName' make.sh
+* Prevent docker-ci to test closing PRs
+* Replace panic by log.Fatal in tests
+- Increase TestRunDetach timeout
+
+#### Documentation
+
+* Add initial draft of the Docker infrastructure doc
+* Add devenvironment link to CONTRIBUTING.md
+* Add `apt-get install curl` to Ubuntu docs
+* Add explanation for export restrictions
+* Add .dockercfg doc
+* Remove Gentoo install notes about #1422 workaround
+* Fix help text for -v option
+* Fix Ping endpoint documentation
+- Fix parameter names in docs for ADD command
+- Fix ironic typo in changelog
+* Various command fixes in postgres example
+* Document how to edit and release docs
+- Minor updates to `postgresql_service.rst`
+* Clarify LGTM process to contributors
+- Corrected error in the package name
+* Document what `vagrant up` is actually doing
++ improve doc search results
+* Cleanup whitespace in API 1.5 docs
+* use angle brackets in MAINTAINER example email
+* Update archlinux.rst
++ Changes to a new style for the docs. Includes version switcher.
+* Formatting, add information about multiline json
+* Improve registry and index REST API documentation
+- Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3
+* Update Gentoo installation documentation now that we're in the portage tree proper
+* Cleanup and reorganize docs and tooling for contributors and maintainers
+- Minor spelling correction of protocoll -> protocol
+
+#### Contrib
+
+* Add vim syntax highlighting for Dockerfiles from @honza
+* Add mkimage-arch.sh
+* Reorganize contributed completion scripts to add zsh completion
+
+#### Hack
+
+* Add vagrant user to the docker group
+* Add proper bash completion for "docker push"
+* Add xz utils as a runtime dep
+* Add cleanup/refactor portion of #2010 for hack and Dockerfile updates
++ Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link
+* Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly
++ Add @tianon to hack/MAINTAINERS
+* Improve network performance for VirtualBox
+* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.)
+- Fix contrib/mkimage-debian.sh apt caching prevention
++ Added Dockerfile.tmLanguage to contrib
+* Configured FPM to make /etc/init/docker.conf a config file
+* Enable SSH Agent forwarding in Vagrant VM
+* Several small tweaks/fixes for contrib/mkimage-debian.sh
+
+#### Other
+
 - Builder: Abort build if mergeConfig returns an error and fix duplicate error message
-- Runtime: Remove error messages which are not actually errors
-* Testing: Only run certain tests with TESTFLAGS='-run TestName' make.sh
-* Testing: Prevent docker-ci to test closing PRs
-- Documentation: Minor updates to postgresql_service.rst
-* Testing: Add nightly release to docker-ci
-* Hack: Improve network performance for VirtualBox
-* Hack: Add vagrant user to the docker group
-* Runtime: Add utils.Errorf for error logging
 - Packaging: Remove deprecated packaging directory
-* Hack: Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.)
-- Hack: Fix contrib/mkimage-debian.sh apt caching prevention
-* Documentation: Clarify LGTM process to contributors
-- Documentation: Small fixes to parameter names in docs for ADD command
-* Runtime: Record termination time in state.
 - Registry: Use correct auth config when logging in.
-- Documentation: Corrected error in the package name
-* Documentation: Document what `vagrant up` is actually doing
-- Runtime: Fix `docker rm` with volumes
-- Runtime: Use empty string so TempDir uses the OS's temp dir automatically
-- Runtime: Make sure to close the network allocators
-* Testing: Replace panic by log.Fatal in tests
-+ Documentation: improve doc search results
-- Runtime: Fix some error cases where a HTTP body might not be closed
-* Hack: Add proper bash completion for "docker push"
-* Documentation: Add devenvironment link to CONTRIBUTING.md
-* Documentation: Cleanup whitespace in API 1.5 docs
-* Documentation: use angle brackets in MAINTAINER example email
-- Testing: Increase TestRunDetach timeout
-* Documentation: Fix help text for -v option
-+ Hack: Added Dockerfile.tmLanguage to contrib
-+ Runtime: Autorestart containers by default
-* Testing: Adding more tests around auth.ResolveAuthConfig
-* Hack: Configured FPM to make /etc/init/docker.conf a config file
-* Hack: Add xz utils as a runtime dep
-* Documentation: Add `apt-get install curl` to Ubuntu docs
-* Documentation: Remove Gentoo install notes about #1422 workaround
-* Documentation: Fix Ping endpoint documentation
-* Runtime: Bump vendor kr/pty to commit 3b1f6487b (syscall.O_NOCTTY)
-* Runtime: lxc: Allow set_file_cap capability in container
-* Documentation: Update archlinux.rst
-- Documentation: Fix ironic typo in changelog
-* Documentation: Add explanation for export restrictions
-* Hack: Add cleanup/refactor portion of #2010 for hack and Dockerfile updates
-+ Documentation: Changes to a new style for the docs. Includes version switcher.
-* Documentation: Formatting, add information about multiline json
-+ Hack: Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link
-- Runtime: Fix panic with wrong dockercfg file
-- Runtime: Fix the attach behavior with -i
-* Documentation: Add .dockercfg doc
-- Runtime: Move run -rm to the cli only
-* Hack: Enable SSH Agent forwarding in Vagrant VM
-+ Runtime: Add -rm to docker run for removing a container on exit
-* Documentation: Improve registry and index REST API documentation
-* Runtime: Split stdout stderr
-- Documentation: Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3
-* Documentation: Update Gentoo installation documentation now that we're in the portage tree proper
 - Registry: Fix the error message so it is the same as the regex
-* Runtime: Always create a new session for the container
-* Hack: Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly
-* Documentation: Various command fixes in postgres example
-* Documentation: Cleanup and reorganize docs and tooling for contributors and maintainers
-- Documentation: Minor spelling correction of protocoll -> protocol
-* Hack: Several small tweaks/fixes for contrib/mkimage-debian.sh
-+ Hack: Add @tianon to hack/MAINTAINERS
 
 ## 0.6.3 (2013-09-23)
-* Packaging: Update tar vendor dependency
+
+#### Packaging
+
+* Add 'docker' group on install for ubuntu package
+* Update tar vendor dependency
+* Download apt key over HTTPS
+
+#### Runtime
+
+- Only copy and change permissions on non-bindmount volumes
+* Allow multiple volumes-from
+- Fix HTTP imports from STDIN
+
+#### Documentation
+
+* Update section on extracting the docker binary after build
+* Update development environment docs for new build process
+* Remove 'base' image from documentation
+
+#### Other
+
 - Client: Fix detach issue
-- Runtime: Only copy and change permissions on non-bindmount volumes
 - Registry: Update regular expression to match index
-* Runtime: Allow multiple volumes-from
-* Packaging: Download apt key over HTTPS
-* Documentation: Update section on extracting the docker binary after build
-* Documentation: Update development environment docs for new build process
-* Documentation: Remove 'base' image from documentation
-* Packaging: Add 'docker' group on install for ubuntu package
-- Runtime: Fix HTTP imports from STDIN
 
 ## 0.6.2 (2013-09-17)
+
+#### Runtime
+
++ Add domainname support
++ Implement image filtering with path.Match
+* Remove unnecesasry warnings
+* Remove os/user dependency
+* Only mount the hostname file when the config exists
+* Handle signals within the `docker login` command
+- UID and GID are now also applied to volumes
+- `docker start` set error code upon error
+- `docker run` set the same error code as the process started
+
+#### Builder
+
++ Add -rm option in order to remove intermediate containers
+* Allow multiline for the RUN instruction
+
+#### Registry
+
+* Implement login with private registry
+- Fix push issues
+
+#### Other
+
 + Hack: Vendor all dependencies
-+ Builder: Add -rm option in order to remove intermediate containers
-+ Runtime: Add domainname support
-+ Runtime: Implement image filtering with path.Match
-* Builder: Allow multiline for the RUN instruction
-* Runtime: Remove unnecesasry warnings
-* Runtime: Only mount the hostname file when the config exists
-* Runtime: Handle signals within the `docker login` command
-* Runtime: Remove os/user dependency
-* Registry: Implement login with private registry
 * Remote API: Bump to v1.5
 * Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc.
-* Documentation: General improvements
-- Runtime: UID and GID are now also applied to volumes
-- Runtime: `docker start` set error code upon error
-- Runtime: `docker run` set the same error code as the process started
-- Registry: Fix push issues
+* Documentation: General improvments
 
 ## 0.6.1 (2013-08-23)
-* Registry: Pass "meta" headers in API calls to the registry
-- Packaging: Use correct upstart script with new build tool
-- Packaging: Use libffi-dev, don't build it from sources
-- Packaging: Removed duplicate mercurial install command
+
+#### Registry
+
+* Pass "meta" headers in API calls to the registry
+
+#### Packaging
+
+- Use correct upstart script with new build tool
+- Use libffi-dev, don`t build it from sources
+- Remove duplicate mercurial install command
 
 ## 0.6.0 (2013-08-22)
-- Runtime: Load authConfig only when needed and fix useless WARNING
-+ Runtime: Add lxc-conf flag to allow custom lxc options
-- Runtime: Fix race conditions in parallel pull
-- Runtime: Improve CMD, ENTRYPOINT, and attach docs.
-* Documentation: Small fix to docs regarding adding docker groups
-* Documentation: Add MongoDB image example
-+ Builder: Add USER instruction do Dockerfile
-* Documentation: updated default -H docs
-* Remote API: Sort Images by most recent creation date.
-+ Builder: Add workdir support for the Buildfile
-+ Runtime: Add an option to set the working directory
-- Runtime: Show tag used when image is missing
-* Documentation: Update readme with dependencies for building
-* Documentation: Add instructions for creating and using the docker group
-* Remote API: Reworking opaque requests in registry module
-- Runtime: Fix Graph ByParent() to generate list of child images per parent image.
-* Runtime: Add Image name to LogEvent tests
-* Documentation: Add sudo to examples and installation to documentation
-+ Hack: Bash Completion: Limit commands to containers of a relevant state
-* Remote API: Add image name in /events
-* Runtime: Apply volumes-from before creating volumes
-- Runtime: Make docker run handle SIGINT/SIGTERM
-- Runtime: Prevent crash when .dockercfg not readable
-* Hack: Add docker dependencies coverage testing into docker-ci
-+ Runtime: Add -privileged flag and relevant tests, docs, and examples
-+ Packaging: Docker-brew 0.5.2 support and memory footprint reduction
-- Runtime: Install script should be fetched over https, not http.
-* Packaging: Add new docker dependencies into docker-ci
-* Runtime: Use Go 1.1.2 for dockerbuilder
-* Registry: Improve auth push
-* Runtime: API, issue 1471: Use groups for socket permissions
-* Documentation: PostgreSQL service example in documentation
+
+#### Runtime
+
++ Add lxc-conf flag to allow custom lxc options
++ Add an option to set the working directory
+* Add Image name to LogEvent tests
++ Add -privileged flag and relevant tests, docs, and examples
+* Add websocket support to /container/<name>/attach/ws
+* Add warning when net.ipv4.ip_forwarding = 0
+* Add hostname to environment
+* Add last stable version in `docker version`
+- Fix race conditions in parallel pull
+- Fix Graph ByParent() to generate list of child images per parent image.
+- Fix typo: fmt.Sprint -> fmt.Sprintf
+- Fix small \n error un docker build
+* Fix to "Inject dockerinit at /.dockerinit"
+* Fix #910. print user name to docker info output
+* Use Go 1.1.2 for dockerbuilder
+* Use ranged for loop on channels
+- Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete
+- Improve CMD, ENTRYPOINT, and attach docs.
+- Improve connect message with socket error
+- Load authConfig only when needed and fix useless WARNING
+- Show tag used when image is missing
+* Apply volumes-from before creating volumes
+- Make docker run handle SIGINT/SIGTERM
+- Prevent crash when .dockercfg not readable
+- Install script should be fetched over https, not http.
+* API, issue 1471: Use groups for socket permissions
+- Correctly detect IPv4 forwarding
+* Mount /dev/shm as a tmpfs
+- Switch from http to https for get.docker.io
+* Let userland proxy handle container-bound traffic
+* Updated the Docker CLI to specify a value for the "Host" header.
+- Change network range to avoid conflict with EC2 DNS
+- Reduce connect and read timeout when pinging the registry
+* Parallel pull
+- Handle ip route showing mask-less IP addresses
+* Allow ENTRYPOINT without CMD
+- Always consider localhost as a domain name when parsing the FQN repos name
+* Refactor checksum
+
+#### Documentation
+
+* Add MongoDB image example
+* Add instructions for creating and using the docker group
+* Add sudo to examples and installation to documentation
+* Add ufw doc
+* Add a reference to ps -a
+* Add information about Docker`s high level tools over LXC.
+* Fix typo in docs for docker run -dns
+* Fix a typo in the ubuntu installation guide
+* Fix to docs regarding adding docker groups
+* Update default -H docs
+* Update readme with dependencies for building
+* Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2
+* PostgreSQL service example in documentation
+* Suggest installing linux-headers by default.
+* Change the twitter handle
+* Clarify Amazon EC2 installation
+* 'Base' image is deprecated and should no longer be referenced in the docs.
+* Move note about officially supported kernel
+- Solved the logo being squished in Safari
+
+#### Builder
+
++ Add USER instruction do Dockerfile
++ Add workdir support for the Buildfile
+* Add no cache for docker build
+- Fix docker build and docker events output
+- Only count known instructions as build steps
+- Make sure ENV instruction within build perform a commit each time
+- Forbid certain paths within docker build ADD
+- Repository name (and optionally a tag) in build usage
+- Make sure ADD will create everything in 0755
+
+#### Remote API
+
+* Sort Images by most recent creation date.
+* Reworking opaque requests in registry module
+* Add image name in /events
+* Use mime pkg to parse Content-Type
+* 650 http utils and user agent field
+
+#### Hack
+
++ Bash Completion: Limit commands to containers of a relevant state
+* Add docker dependencies coverage testing into docker-ci
+
+#### Packaging
+
++ Docker-brew 0.5.2 support and memory footprint reduction
+* Add new docker dependencies into docker-ci
+- Revert "docker.upstart: avoid spawning a `sh` process"
++ Docker-brew and Docker standard library
++ Release docker with docker
+* Fix the upstart script generated by get.docker.io
+* Enabled the docs to generate manpages.
+* Revert Bind daemon to 0.0.0.0 in Vagrant.
+
+#### Register
+
+* Improve auth push
+* Registry unit tests + mock registry
+
+#### Tests
+
+* Improve TestKillDifferentUser to prevent timeout on buildbot
+- Fix typo in TestBindMounts (runContainer called without image)
+* Improve TestGetContainersTop so it does not rely on sleep
+* Relax the lo interface test to allow iface index != 1
+* Add registry functional test to docker-ci
+* Add some tests in server and utils
+
+#### Other
+
 * Contrib: bash completion script
-* Tests: Improve TestKillDifferentUser to prevent timeout on buildbot
-* Documentation: Fix typo in docs for docker run -dns
-* Documentation: Adding a reference to ps -a
-- Runtime: Correctly detect IPv4 forwarding
-- Packaging: Revert "docker.upstart: avoid spawning a `sh` process"
-* Runtime: Use ranged for loop on channels
-- Runtime: Fix typo: fmt.Sprint -> fmt.Sprintf
-- Tests: Fix typo in TestBindMounts (runContainer called without image)
-* Runtime: add websocket support to /container/<name>/attach/ws
-* Runtime: Mount /dev/shm as a tmpfs
-- Builder: Only count known instructions as build steps
-- Builder: Fix docker build and docker events output
-- Runtime: switch from http to https for get.docker.io
-* Tests: Improve TestGetContainersTop so it does not rely on sleep
-+ Packaging: Docker-brew and Docker standard library
-* Testing: Add some tests in server and utils
-+ Packaging: Release docker with docker
-- Builder: Make sure ENV instruction within build perform a commit each time
-* Packaging: Fix the upstart script generated by get.docker.io
-- Runtime: fix small \n error un docker build
-* Runtime: Let userland proxy handle container-bound traffic
-* Runtime: Updated the Docker CLI to specify a value for the "Host" header.
-* Runtime: Add warning when net.ipv4.ip_forwarding = 0
-* Registry: Registry unit tests + mock registry
-* Runtime: fixed #910. print user name to docker info output
-- Builder: Forbid certain paths within docker build ADD
-- Runtime: change network range to avoid conflict with EC2 DNS
-* Tests: Relax the lo interface test to allow iface index != 1
-* Documentation: Suggest installing linux-headers by default.
-* Documentation: Change the twitter handle
 * Client: Add docker cp command and copy api endpoint to copy container files/folders to the host
-* Remote API: Use mime pkg to parse Content-Type
-- Runtime: Reduce connect and read timeout when pinging the registry
-* Documentation: Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2
-* Packaging: Enabled the docs to generate manpages.
-* Runtime: Parallel pull
-- Runtime: Handle ip route showing mask-less IP addresses
-* Documentation: Clarify Amazon EC2 installation
-* Documentation: 'Base' image is deprecated and should no longer be referenced in the docs.
-* Runtime: Fix to "Inject dockerinit at /.dockerinit"
-* Runtime: Allow ENTRYPOINT without CMD
-- Runtime: Always consider localhost as a domain name when parsing the FQN repos name
-* Remote API: 650 http utils and user agent field
-* Documentation: fix a typo in the ubuntu installation guide
-- Builder: Repository name (and optionally a tag) in build usage
-* Documentation: Move note about officially supported kernel
-* Packaging: Revert "Bind daemon to 0.0.0.0 in Vagrant.
-* Builder: Add no cache for docker build
-* Runtime: Add hostname to environment
-* Runtime: Add last stable version in `docker version`
-- Builder: Make sure ADD will create everything in 0755
-* Documentation: Add ufw doc
-* Tests: Add registry functional test to docker-ci
-- Documentation: Solved the logo being squished in Safari
-- Runtime: Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete
-* Runtime: Refactor checksum
-- Runtime: Improve connect message with socket error
-* Documentation: Added information about Docker's high level tools over LXC.
-* Don't read from stdout when only attached to stdin
+* Don`t read from stdout when only attached to stdin
 
 ## 0.5.3 (2013-08-13)
-* Runtime: Use docker group for socket permissions
-- Runtime: Spawn shell within upstart script
-- Builder: Make sure ENV instruction within build perform a commit each time
-- Runtime: Handle ip route showing mask-less IP addresses
-- Runtime: Add hostname to environment
+
+#### Runtime
+
+* Use docker group for socket permissions
+- Spawn shell within upstart script
+- Handle ip route showing mask-less IP addresses
+- Add hostname to environment
+
+#### Builder
+
+- Make sure ENV instruction within build perform a commit each time
 
 ## 0.5.2 (2013-08-08)
- * Builder: Forbid certain paths within docker build ADD
- - Runtime: Change network range to avoid conflict with EC2 DNS
- * API: Change daemon to listen on unix socket by default
+
+* Builder: Forbid certain paths within docker build ADD
+- Runtime: Change network range to avoid conflict with EC2 DNS
+* API: Change daemon to listen on unix socket by default
 
 ## 0.5.1 (2013-07-30)
- + API: Docker client now sets useragent (RFC 2616)
- + Runtime: Add `ps` args to `docker top`
- + Runtime: Add support for container ID files (pidfile like)
- + Runtime: Add container=lxc in default env
- + Runtime: Support networkless containers with `docker run -n` and `docker -d -b=none`
- + API: Add /events endpoint
- + Builder: ADD command now understands URLs
- + Builder: CmdAdd and CmdEnv now respect Dockerfile-set ENV variables
- * Hack: Simplify unit tests with helpers
- * Hack: Improve docker.upstart event
- * Hack: Add coverage testing into docker-ci
- * Runtime: Stdout/stderr logs are now stored in the same file as JSON
- * Runtime: Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3.
- * Runtime: Change .dockercfg format to json and support multiple auth remote
- - Runtime: Do not override volumes from config
- - Runtime: Fix issue with EXPOSE override
- - Builder: Create directories with 755 instead of 700 within ADD instruction
+
+#### Runtime
+
++ Add `ps` args to `docker top`
++ Add support for container ID files (pidfile like)
++ Add container=lxc in default env
++ Support networkless containers with `docker run -n` and `docker -d -b=none`
+* Stdout/stderr logs are now stored in the same file as JSON
+* Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3.
+* Change .dockercfg format to json and support multiple auth remote
+- Do not override volumes from config
+- Fix issue with EXPOSE override
+
+#### API
+
++ Docker client now sets useragent (RFC 2616)
++ Add /events endpoint
+
+#### Builder
+
++ ADD command now understands URLs
++ CmdAdd and CmdEnv now respect Dockerfile-set ENV variables
+- Create directories with 755 instead of 700 within ADD instruction
+
+#### Hack
+
+* Simplify unit tests with helpers
+* Improve docker.upstart event
+* Add coverage testing into docker-ci
 
 ## 0.5.0 (2013-07-17)
- + Runtime: List all processes running inside a container with 'docker top'
- + Runtime: Host directories can be mounted as volumes with 'docker run -v'
- + Runtime: Containers can expose public UDP ports (eg, '-p 123/udp')
- + Runtime: Optionally specify an exact public port (eg. '-p 80:4500')
- + Registry: New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries
- + Builder: ENTRYPOINT instruction sets a default binary entry point to a container
- + Builder: VOLUME instruction marks a part of the container as persistent data
- * Builder: 'docker build' displays the full output of a build by default
- * Runtime: 'docker login' supports additional options
- - Runtime: Dont save a container's hostname when committing an image.
- - Registry: Fix issues when uploading images to a private registry
+
+#### Runtime
+
++ List all processes running inside a container with 'docker top'
++ Host directories can be mounted as volumes with 'docker run -v'
++ Containers can expose public UDP ports (eg, '-p 123/udp')
++ Optionally specify an exact public port (eg. '-p 80:4500')
+* 'docker login' supports additional options
+- Dont save a container`s hostname when committing an image.
+
+#### Registry
+
++ New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries
+- Fix issues when uploading images to a private registry
+
+#### Builder
+
++ ENTRYPOINT instruction sets a default binary entry point to a container
++ VOLUME instruction marks a part of the container as persistent data
+* 'docker build' displays the full output of a build by default
 
 ## 0.4.8 (2013-07-01)
- + Builder: New build operation ENTRYPOINT adds an executable entry point to the container.
- - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID.
- - Tests: Fix issues in the test suite
+
++ Builder: New build operation ENTRYPOINT adds an executable entry point to the container.  - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID.
+- Tests: Fix issues in the test suite
 
 ## 0.4.7 (2013-06-28)
- * Registry: easier push/pull to a custom registry
- * Remote API: the progress bar updates faster when downloading and uploading large files
- - Remote API: fix a bug in the optional unix socket transport
- * Runtime: improve detection of kernel version
- + Runtime: host directories can be mounted as volumes with 'docker run -b'
- - Runtime: fix an issue when only attaching to stdin
- * Runtime: use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts
- * Hack: improve test suite and dev environment
- * Hack: remove dependency on unit tests on 'os/user'
- + Documentation: add terminology section
+
+#### Remote API
+
+* The progress bar updates faster when downloading and uploading large files
+- Fix a bug in the optional unix socket transport
+
+#### Runtime
+
+* Improve detection of kernel version
++ Host directories can be mounted as volumes with 'docker run -b'
+- fix an issue when only attaching to stdin
+* Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts
+
+#### Hack
+
+* Improve test suite and dev environment
+* Remove dependency on unit tests on 'os/user'
+
+#### Other
+
+* Registry: easier push/pull to a custom registry
++ Documentation: add terminology section
 
 ## 0.4.6 (2013-06-22)
- - Runtime: fix a bug which caused creation of empty images (and volumes) to crash.
+
+- Runtime: fix a bug which caused creation of empty images (and volumes) to crash.
 
 ## 0.4.5 (2013-06-21)
- + Builder: 'docker build git://URL' fetches and builds a remote git repository
- * Runtime: 'docker ps -s' optionally prints container size
- * Tests: Improved and simplified
- - Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail.
- - Builder: fix a regression when using ADD with single regular file.
+
++ Builder: 'docker build git://URL' fetches and builds a remote git repository
+* Runtime: 'docker ps -s' optionally prints container size
+* Tests: Improved and simplified
+- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail.
+- Builder: fix a regression when using ADD with single regular file.
 
 ## 0.4.4 (2013-06-19)
- - Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients.
+
+- Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients.
 
 ## 0.4.3 (2013-06-19)
- + Builder: ADD of a local file will detect tar archives and unpack them
- * Runtime: Remove bsdtar dependency
- * Runtime: Add unix socket and multiple -H support
- * Runtime: Prevent rm of running containers
- * Runtime: Use go1.1 cookiejar
- * Builder: ADD improvements: use tar for copy + automatically unpack local archives
- * Builder: ADD uses tar/untar for copies instead of calling 'cp -ar'
- * Builder: nicer output for 'docker build'
- * Builder: fixed the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented.
- * Client: HumanReadable ProgressBar sizes in pull
- * Client: Fix docker version's git commit output
- * API: Send all tags on History API call
- * API: Add tag lookup to history command. Fixes #882
- - Runtime: Fix issue detaching from running TTY container
- - Runtime: Forbid parralel push/pull for a single image/repo. Fixes #311
- - Runtime: Fix race condition within Run command when attaching.
- - Builder: fix a bug which caused builds to fail if ADD was the first command
- - Documentation: fix missing command in irc bouncer example
+
+#### Builder
+
++ ADD of a local file will detect tar archives and unpack them
+* ADD improvements: use tar for copy + automatically unpack local archives
+* ADD uses tar/untar for copies instead of calling 'cp -ar'
+* Fixed the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented.
+- Fix a bug which caused builds to fail if ADD was the first command
+* Nicer output for 'docker build'
+
+#### Runtime
+
+* Remove bsdtar dependency
+* Add unix socket and multiple -H support
+* Prevent rm of running containers
+* Use go1.1 cookiejar
+- Fix issue detaching from running TTY container
+- Forbid parralel push/pull for a single image/repo. Fixes #311
+- Fix race condition within Run command when attaching.
+
+#### Client
+
+* HumanReadable ProgressBar sizes in pull
+* Fix docker version`s git commit output
+
+#### API
+
+* Send all tags on History API call
+* Add tag lookup to history command. Fixes #882
+
+#### Documentation
+
+- Fix missing command in irc bouncer example
 
 ## 0.4.2 (2013-06-17)
- - Packaging: Bumped version to work around an Ubuntu bug
+
+- Packaging: Bumped version to work around an Ubuntu bug
 
 ## 0.4.1 (2013-06-17)
- + Remote Api: Add flag to enable cross domain requests
- + Remote Api/Client: Add images and containers sizes in docker ps and docker images
- + Runtime: Configure dns configuration host-wide with 'docker -d -dns'
- + Runtime: Detect faulty DNS configuration and replace it with a public default
- + Runtime: allow docker run <name>:<id>
- + Runtime: you can now specify public port (ex: -p 80:4500)
- * Client: allow multiple params in inspect
- * Client: Print the container id before the hijack in `docker run`
- * Registry: add regexp check on repo's name
- * Registry: Move auth to the client
- * Runtime: improved image removal to garbage-collect unreferenced parents
- * Vagrantfile: Add the rest api port to vagrantfile's port_forward
- * Upgrade to Go 1.1
- - Builder: don't ignore last line in Dockerfile when it doesn't end with \n
- - Registry: Remove login check on pull
+
+#### Remote Api
+
++ Add flag to enable cross domain requests
++ Add images and containers sizes in docker ps and docker images
+
+#### Runtime
+
++ Configure dns configuration host-wide with 'docker -d -dns'
++ Detect faulty DNS configuration and replace it with a public default
++ Allow docker run <name>:<id>
++ You can now specify public port (ex: -p 80:4500)
+* Improved image removal to garbage-collect unreferenced parents
+
+#### Client
+
+* Allow multiple params in inspect
+* Print the container id before the hijack in `docker run`
+
+#### Registry
+
+* Add regexp check on repo`s name
+* Move auth to the client
+- Remove login check on pull
+
+#### Other
+
+* Vagrantfile: Add the rest api port to vagrantfile`s port_forward
+* Upgrade to Go 1.1
+- Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n
 
 ## 0.4.0 (2013-06-03)
- + Introducing Builder: 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile
- + Introducing Remote API: control Docker programmatically using a simple HTTP/json API
- * Runtime: various reliability and usability improvements
+
+#### Builder
+
++ Introducing Builder
++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile
+
+#### Remote API
+
++ Introducing Remote API
++ control Docker programmatically using a simple HTTP/json API
+
+#### Runtime
+
+* Various reliability and usability improvements
 
 ## 0.3.4 (2013-05-30)
- + Builder: 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile
- + Builder: 'docker build -t FOO' applies the tag FOO to the newly built container.
- + Runtime: interactive TTYs correctly handle window resize
- * Runtime: fix how configuration is merged between layers
- + Remote API: split stdout and stderr on 'docker run'
- + Remote API: optionally listen on a different IP and port (use at your own risk)
- * Documentation: improved install instructions.
+
+#### Builder
+
++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile
++ 'docker build -t FOO' applies the tag FOO to the newly built container.
+
+#### Runtime
+
++ Interactive TTYs correctly handle window resize
+* Fix how configuration is merged between layers
+
+#### Remote API
+
++ Split stdout and stderr on 'docker run'
++ Optionally listen on a different IP and port (use at your own risk)
+
+#### Documentation
+
+* Improved install instructions.
 
 ## 0.3.3 (2013-05-23)
- - Registry: Fix push regression
- - Various bugfixes
+
+- Registry: Fix push regression
+- Various bugfixes
 
 ## 0.3.2 (2013-05-09)
- * Runtime: Store the actual archive on commit
- * Registry: Improve the checksum process
- * Registry: Use the size to have a good progress bar while pushing
- * Registry: Use the actual archive if it exists in order to speed up the push
- - Registry: Fix error 400 on push
+
+#### Registry
+
+* Improve the checksum process
+* Use the size to have a good progress bar while pushing
+* Use the actual archive if it exists in order to speed up the push
+- Fix error 400 on push
+
+#### Runtime
+
+* Store the actual archive on commit
 
 ## 0.3.1 (2013-05-08)
- + Builder: Implement the autorun capability within docker builder
- + Builder: Add caching to docker builder
- + Builder: Add support for docker builder with native API as top level command
- + Runtime: Add go version to debug infos
- + Builder: Implement ENV within docker builder
- + Registry: Add docker search top level command in order to search a repository
- + Images: output graph of images to dot (graphviz)
- + Documentation: new introduction and high-level overview
- + Documentation: Add the documentation for docker builder
- + Website: new high-level overview
- - Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc
- - Images: fix ByParent function
- - Builder: Check the command existance prior create and add Unit tests for the case
- - Registry: Fix pull for official images with specific tag
- - Registry: Fix issue when login in with a different user and trying to push
- - Documentation: CSS fix for docker documentation to make REST API docs look better.
- - Documentation: Fixed CouchDB example page header mistake
- - Documentation: fixed README formatting
- * Registry: Improve checksum - async calculation
- * Runtime: kernel version - don't show the dash if flavor is empty
- * Documentation: updated www.docker.io website.
- * Builder: use any whitespaces instead of tabs
- * Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker
+
+#### Builder
+
++ Implement the autorun capability within docker builder
++ Add caching to docker builder
++ Add support for docker builder with native API as top level command
++ Implement ENV within docker builder
+- Check the command existance prior create and add Unit tests for the case
+* use any whitespaces instead of tabs
+
+#### Runtime
+
++ Add go version to debug infos
+* Kernel version - don`t show the dash if flavor is empty
+
+#### Registry
+
++ Add docker search top level command in order to search a repository
+- Fix pull for official images with specific tag
+- Fix issue when login in with a different user and trying to push
+* Improve checksum - async calculation
+
+#### Images
+
++ Output graph of images to dot (graphviz)
+- Fix ByParent function
+
+#### Documentation
+
++ New introduction and high-level overview
++ Add the documentation for docker builder
+- CSS fix for docker documentation to make REST API docs look better.
+- Fix CouchDB example page header mistake
+- Fix README formatting
+* Update www.docker.io website.
+
+#### Other
+
++ Website: new high-level overview
+- Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc
+* Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker
 
 ## 0.3.0 (2013-05-06)
- + Registry: Implement the new registry
- + Documentation: new example: sharing data between 2 couchdb databases
- - Runtime: Fix the command existance check
- - Runtime: strings.Split may return an empty string on no match
- - Runtime: Fix an index out of range crash if cgroup memory is not
- * Documentation: Various improvments
- * Vagrant: Use only one deb line in /etc/apt
+
+#### Runtime
+
+- Fix the command existance check
+- strings.Split may return an empty string on no match
+- Fix an index out of range crash if cgroup memory is not
+
+#### Documentation
+
+* Various improvments
++ New example: sharing data between 2 couchdb databases
+
+#### Other
+
+* Vagrant: Use only one deb line in /etc/apt
++ Registry: Implement the new registry
 
 ## 0.2.2 (2013-05-03)
- + Support for data volumes ('docker run -v=PATH')
- + Share data volumes between containers ('docker run -volumes-from')
- + Improved documentation
- * Upgrade to Go 1.0.3
- * Various upgrades to the dev environment for contributors
+
++ Support for data volumes ('docker run -v=PATH')
++ Share data volumes between containers ('docker run -volumes-from')
++ Improved documentation
+* Upgrade to Go 1.0.3
+* Various upgrades to the dev environment for contributors
 
 ## 0.2.1 (2013-05-01)
- + 'docker commit -run' bundles a layer with default runtime options: command, ports etc.
- * Improve install process on Vagrant
- + New Dockerfile operation: "maintainer"
- + New Dockerfile operation: "expose"
- + New Dockerfile operation: "cmd"
- + Contrib script to build a Debian base layer
- + 'docker -d -r': restart crashed containers at daemon startup
- * Runtime: improve test coverage
+
++ 'docker commit -run' bundles a layer with default runtime options: command, ports etc.
+* Improve install process on Vagrant
++ New Dockerfile operation: "maintainer"
++ New Dockerfile operation: "expose"
++ New Dockerfile operation: "cmd"
++ Contrib script to build a Debian base layer
++ 'docker -d -r': restart crashed containers at daemon startup
+* Runtime: improve test coverage
 
 ## 0.2.0 (2013-04-23)
- - Runtime: ghost containers can be killed and waited for
- * Documentation: update install intructions
- - Packaging: fix Vagrantfile
- - Development: automate releasing binaries and ubuntu packages
- + Add a changelog
- - Various bugfixes
+
+- Runtime: ghost containers can be killed and waited for
+* Documentation: update install intructions
+- Packaging: fix Vagrantfile
+- Development: automate releasing binaries and ubuntu packages
++ Add a changelog
+- Various bugfixes
 
 ## 0.1.8 (2013-04-22)
- - Dynamically detect cgroup capabilities
- - Issue stability warning on kernels <3.8
- - 'docker push' buffers on disk instead of memory
- - Fix 'docker diff' for removed files
- - Fix 'docker stop' for ghost containers
- - Fix handling of pidfile
- - Various bugfixes and stability improvements
+
+- Dynamically detect cgroup capabilities
+- Issue stability warning on kernels <3.8
+- 'docker push' buffers on disk instead of memory
+- Fix 'docker diff' for removed files
+- Fix 'docker stop' for ghost containers
+- Fix handling of pidfile
+- Various bugfixes and stability improvements
 
 ## 0.1.7 (2013-04-18)
- - Container ports are available on localhost
- - 'docker ps' shows allocated TCP ports
- - Contributors can run 'make hack' to start a continuous integration VM
- - Streamline ubuntu packaging & uploading
- - Various bugfixes and stability improvements
+
+- Container ports are available on localhost
+- 'docker ps' shows allocated TCP ports
+- Contributors can run 'make hack' to start a continuous integration VM
+- Streamline ubuntu packaging & uploading
+- Various bugfixes and stability improvements
 
 ## 0.1.6 (2013-04-17)
- - Record the author an image with 'docker commit -author'
+
+- Record the author an image with 'docker commit -author'
 
 ## 0.1.5 (2013-04-17)
- - Disable standalone mode
- - Use a custom DNS resolver with 'docker -d -dns'
- - Detect ghost containers
- - Improve diagnosis of missing system capabilities
- - Allow disabling memory limits at compile time
- - Add debian packaging
- - Documentation: installing on Arch Linux
- - Documentation: running Redis on docker
- - Fixed lxc 0.9 compatibility
- - Automatically load aufs module
- - Various bugfixes and stability improvements
+
+- Disable standalone mode
+- Use a custom DNS resolver with 'docker -d -dns'
+- Detect ghost containers
+- Improve diagnosis of missing system capabilities
+- Allow disabling memory limits at compile time
+- Add debian packaging
+- Documentation: installing on Arch Linux
+- Documentation: running Redis on docker
+- Fixed lxc 0.9 compatibility
+- Automatically load aufs module
+- Various bugfixes and stability improvements
 
 ## 0.1.4 (2013-04-09)
- - Full support for TTY emulation
- - Detach from a TTY session with the escape sequence `C-p C-q`
- - Various bugfixes and stability improvements
- - Minor UI improvements
- - Automatically create our own bridge interface 'docker0'
+
+- Full support for TTY emulation
+- Detach from a TTY session with the escape sequence `C-p C-q`
+- Various bugfixes and stability improvements
+- Minor UI improvements
+- Automatically create our own bridge interface 'docker0'
 
 ## 0.1.3 (2013-04-04)
- - Choose TCP frontend port with '-p :PORT'
- - Layer format is versioned
- - Major reliability improvements to the process manager
- - Various bugfixes and stability improvements
+
+- Choose TCP frontend port with '-p :PORT'
+- Layer format is versioned
+- Major reliability improvements to the process manager
+- Various bugfixes and stability improvements
 
 ## 0.1.2 (2013-04-03)
- - Set container hostname with 'docker run -h'
- - Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]'
- - Various bugfixes and stability improvements
- - UI polish
- - Progress bar on push/pull
- - Use XZ compression by default
- - Make IP allocator lazy
+
+- Set container hostname with 'docker run -h'
+- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]'
+- Various bugfixes and stability improvements
+- UI polish
+- Progress bar on push/pull
+- Use XZ compression by default
+- Make IP allocator lazy
 
 ## 0.1.1 (2013-03-31)
- - Display shorthand IDs for convenience
- - Stabilize process management
- - Layers can include a commit message
- - Simplified 'docker attach'
- - Fixed support for re-attaching
- - Various bugfixes and stability improvements
- - Auto-download at run
- - Auto-login on push
- - Beefed up documentation
+
+- Display shorthand IDs for convenience
+- Stabilize process management
+- Layers can include a commit message
+- Simplified 'docker attach'
+- Fixed support for re-attaching
+- Various bugfixes and stability improvements
+- Auto-download at run
+- Auto-login on push
+- Beefed up documentation
 
 ## 0.1.0 (2013-03-23)
- - First release
- - Implement registry in order to push/pull images
- - TCP port allocation
- - Fix termcaps on Linux
- - Add documentation
- - Add Vagrant support with Vagrantfile
- - Add unit tests
- - Add repository/tags to ease image management
- - Improve the layer implementation
+
+Initial public release
+
+- Implement registry in order to push/pull images
+- TCP port allocation
+- Fix termcaps on Linux
+- Add documentation
+- Add Vagrant support with Vagrantfile
+- Add unit tests
+- Add repository/tags to ease image management
+- Improve the layer implementation

+ 7 - 7
Dockerfile

@@ -4,24 +4,24 @@
 #
 # # Assemble the full dev environment. This is slow the first time.
 # docker build -t docker .
-# # Apparmor messes with privileged mode: disable it
-# /etc/init.d/apparmor stop ; /etc/init.d/apparmor teardown
 #
 # # Mount your source in an interactive container for quick testing:
-# docker run -v `pwd`:/go/src/github.com/dotcloud/docker -privileged -lxc-conf=lxc.aa_profile=unconfined -i -t docker bash
-#
+# docker run -v `pwd`:/go/src/github.com/dotcloud/docker -privileged -i -t docker bash
 #
 # # Run the test suite:
-# docker run -privileged -lxc-conf=lxc.aa_profile=unconfined docker hack/make.sh test
+# docker run -privileged docker hack/make.sh test
 #
 # # Publish a release:
-# docker run -privileged -lxc-conf=lxc.aa_profile=unconfined \
+# docker run -privileged \
 #  -e AWS_S3_BUCKET=baz \
 #  -e AWS_ACCESS_KEY=foo \
 #  -e AWS_SECRET_KEY=bar \
 #  -e GPG_PASSPHRASE=gloubiboulga \
 #  docker hack/release.sh
 #
+# Note: Apparmor used to mess with privileged mode, but this is no longer
+# the case. Therefore, you don't have to disable it anymore.
+#
 
 docker-version 0.6.1
 from	ubuntu:12.04
@@ -36,7 +36,7 @@ run	apt-get install -y -q mercurial
 run apt-get install -y -q build-essential libsqlite3-dev
 
 # Install Go
-run	curl -s https://go.googlecode.com/files/go1.2rc2.src.tar.gz | tar -v -C /usr/local -xz
+run	curl -s https://go.googlecode.com/files/go1.2rc3.src.tar.gz | tar -v -C /usr/local -xz
 env	PATH	/usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
 env	GOPATH	/go:/go/src/github.com/dotcloud/docker/vendor
 run cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std

+ 1 - 1
VERSION

@@ -1 +1 @@
-0.6.5
+0.6.6-dev

+ 3 - 1
api.go

@@ -5,6 +5,7 @@ import (
 	"encoding/base64"
 	"encoding/json"
 	"fmt"
+	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/auth"
 	"github.com/dotcloud/docker/utils"
 	"github.com/gorilla/mux"
@@ -235,6 +236,7 @@ func getEvents(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
 	}
 	w.Header().Set("Content-Type", "application/json")
 	wf := utils.NewWriteFlusher(w)
+	wf.Flush()
 	if since != 0 {
 		// If since, send previous events that happened after the timestamp
 		for _, event := range srv.events {
@@ -905,7 +907,7 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
 			return fmt.Errorf("Error trying to use git: %s (%s)", err, output)
 		}
 
-		c, err := Tar(root, Bzip2)
+		c, err := archive.Tar(root, archive.Bzip2)
 		if err != nil {
 			return err
 		}

+ 1 - 5
api_params.go

@@ -5,6 +5,7 @@ type APIHistory struct {
 	Tags      []string `json:",omitempty"`
 	Created   int64
 	CreatedBy string `json:",omitempty"`
+	Size      int64
 }
 
 type APIImages struct {
@@ -77,11 +78,6 @@ type APIContainersOld struct {
 	SizeRootFs int64
 }
 
-type APISearch struct {
-	Name        string
-	Description string
-}
-
 type APIID struct {
 	ID string `json:"Id"`
 }

+ 7 - 14
api_test.go

@@ -499,8 +499,7 @@ func TestGetContainersTop(t *testing.T) {
 		container.WaitTimeout(2 * time.Second)
 	}()
 
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 
@@ -704,8 +703,7 @@ func TestPostContainersKill(t *testing.T) {
 	}
 	defer runtime.Destroy(container)
 
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 
@@ -747,8 +745,7 @@ func TestPostContainersRestart(t *testing.T) {
 	}
 	defer runtime.Destroy(container)
 
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 
@@ -855,8 +852,7 @@ func TestPostContainersStop(t *testing.T) {
 	}
 	defer runtime.Destroy(container)
 
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 
@@ -903,8 +899,7 @@ func TestPostContainersWait(t *testing.T) {
 	}
 	defer runtime.Destroy(container)
 
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 
@@ -947,8 +942,7 @@ func TestPostContainersAttach(t *testing.T) {
 	defer runtime.Destroy(container)
 
 	// Start the process
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 
@@ -1037,8 +1031,7 @@ func TestPostContainersAttachStderr(t *testing.T) {
 	defer runtime.Destroy(container)
 
 	// Start the process
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 

+ 1 - 0
archive/MAINTAINERS

@@ -0,0 +1 @@
+Michael Crosby <michael@crosbymichael.com> (@crosbymichael)

+ 1 - 1
archive.go → archive/archive.go

@@ -1,4 +1,4 @@
-package docker
+package archive
 
 import (
 	"archive/tar"

+ 1 - 1
archive_test.go → archive/archive_test.go

@@ -1,4 +1,4 @@
-package docker
+package archive
 
 import (
 	"bytes"

+ 9 - 6
buildfile.go

@@ -3,6 +3,7 @@ package docker
 import (
 	"encoding/json"
 	"fmt"
+	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"io/ioutil"
@@ -64,6 +65,9 @@ func (b *buildFile) CmdFrom(name string) error {
 	}
 	b.image = image.ID
 	b.config = &Config{}
+	if image.Config != nil {
+		b.config = image.Config
+	}
 	if b.config.Env == nil || len(b.config.Env) == 0 {
 		b.config.Env = append(b.config.Env, "HOME=/", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin")
 	}
@@ -291,17 +295,17 @@ func (b *buildFile) addContext(container *Container, orig, dest string) error {
 		return fmt.Errorf("%s: no such file or directory", orig)
 	}
 	if fi.IsDir() {
-		if err := CopyWithTar(origPath, destPath); err != nil {
+		if err := archive.CopyWithTar(origPath, destPath); err != nil {
 			return err
 		}
 		// First try to unpack the source as an archive
-	} else if err := UntarPath(origPath, destPath); err != nil {
+	} else if err := archive.UntarPath(origPath, destPath); err != nil {
 		utils.Debugf("Couldn't untar %s to %s: %s", origPath, destPath, err)
 		// If that fails, just copy it as a regular file
 		if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
 			return err
 		}
-		if err := CopyWithTar(origPath, destPath); err != nil {
+		if err := archive.CopyWithTar(origPath, destPath); err != nil {
 			return err
 		}
 	}
@@ -387,8 +391,7 @@ func (b *buildFile) run() (string, error) {
 	}
 
 	//start the container
-	hostConfig := &HostConfig{}
-	if err := c.Start(hostConfig); err != nil {
+	if err := c.Start(); err != nil {
 		return "", err
 	}
 
@@ -473,7 +476,7 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
 	if err != nil {
 		return "", err
 	}
-	if err := Untar(context, name); err != nil {
+	if err := archive.Untar(context, name); err != nil {
 		return "", err
 	}
 	defer os.RemoveAll(name)

+ 38 - 1
buildfile_test.go

@@ -2,6 +2,7 @@ package docker
 
 import (
 	"fmt"
+	"github.com/dotcloud/docker/archive"
 	"io/ioutil"
 	"net"
 	"net/http"
@@ -12,7 +13,7 @@ import (
 
 // mkTestContext generates a build context from the contents of the provided dockerfile.
 // This context is suitable for use as an argument to BuildFile.Build()
-func mkTestContext(dockerfile string, files [][2]string, t *testing.T) Archive {
+func mkTestContext(dockerfile string, files [][2]string, t *testing.T) archive.Archive {
 	context, err := mkBuildContext(dockerfile, files)
 	if err != nil {
 		t.Fatal(err)
@@ -541,3 +542,39 @@ func TestBuildADDFileNotFound(t *testing.T) {
 		t.Fail()
 	}
 }
+
+func TestBuildInheritance(t *testing.T) {
+	runtime, err := newTestRuntime("")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer nuke(runtime)
+
+	srv := &Server{
+		runtime:     runtime,
+		pullingPool: make(map[string]struct{}),
+		pushingPool: make(map[string]struct{}),
+	}
+
+	img := buildImage(testContextTemplate{`
+            from {IMAGE}
+            expose 4243
+            `,
+		nil, nil}, t, srv, true)
+
+	img2 := buildImage(testContextTemplate{fmt.Sprintf(`
+            from %s
+            entrypoint ["/bin/echo"]
+            `, img.ID),
+		nil, nil}, t, srv, true)
+
+	// from child
+	if img2.Config.Entrypoint[0] != "/bin/echo" {
+		t.Fail()
+	}
+
+	// from parent
+	if img.Config.PortSpecs[0] != "4243" {
+		t.Fail()
+	}
+}

+ 92 - 41
commands.go

@@ -9,6 +9,7 @@ import (
 	"errors"
 	"flag"
 	"fmt"
+	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/auth"
 	"github.com/dotcloud/docker/registry"
 	"github.com/dotcloud/docker/term"
@@ -137,7 +138,7 @@ func (cli *DockerCli) CmdInsert(args ...string) error {
 
 // mkBuildContext returns an archive of an empty context with the contents
 // of `dockerfile` at the path ./Dockerfile
-func mkBuildContext(dockerfile string, files [][2]string) (Archive, error) {
+func mkBuildContext(dockerfile string, files [][2]string) (archive.Archive, error) {
 	buf := new(bytes.Buffer)
 	tw := tar.NewWriter(buf)
 	files = append(files, [2]string{"Dockerfile", dockerfile})
@@ -175,7 +176,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 	}
 
 	var (
-		context  Archive
+		context  archive.Archive
 		isRemote bool
 		err      error
 	)
@@ -194,7 +195,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 		if _, err := os.Stat(cmd.Arg(0)); err != nil {
 			return err
 		}
-		context, err = Tar(cmd.Arg(0), Uncompressed)
+		context, err = archive.Tar(cmd.Arg(0), archive.Uncompressed)
 	}
 	var body io.Reader
 	// Setup an upload progress bar
@@ -491,7 +492,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
 }
 
 func (cli *DockerCli) CmdStop(args ...string) error {
-	cmd := Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container")
+	cmd := Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)")
 	nSeconds := cmd.Int("t", 10, "Number of seconds to wait for the container to stop before killing it.")
 	if err := cmd.Parse(args); err != nil {
 		return nil
@@ -654,7 +655,11 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
 		if err != nil {
 			obj, _, err = cli.call("GET", "/images/"+name+"/json", nil)
 			if err != nil {
-				fmt.Fprintf(cli.err, "No such image or container: %s\n", name)
+				if strings.Contains(err.Error(), "No such") {
+					fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name)
+				} else {
+					fmt.Fprintf(cli.err, "%s", err)
+				}
 				status = 1
 				continue
 			}
@@ -667,9 +672,11 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
 		}
 		indented.WriteString(",")
 	}
-	// Remove trailling ','
-	indented.Truncate(indented.Len() - 1)
 
+	if indented.Len() > 0 {
+		// Remove trailing ','
+		indented.Truncate(indented.Len() - 1)
+	}
 	fmt.Fprintf(cli.out, "[")
 	if _, err := io.Copy(cli.out, indented); err != nil {
 		return err
@@ -682,7 +689,7 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
 }
 
 func (cli *DockerCli) CmdTop(args ...string) error {
-	cmd := Subcmd("top", "CONTAINER", "Lookup the running processes of a container")
+	cmd := Subcmd("top", "CONTAINER [ps OPTIONS]", "Lookup the running processes of a container")
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -788,7 +795,10 @@ func (cli *DockerCli) CmdRmi(args ...string) error {
 }
 
 func (cli *DockerCli) CmdHistory(args ...string) error {
-	cmd := Subcmd("history", "IMAGE", "Show the history of an image")
+	cmd := Subcmd("history", "[OPTIONS] IMAGE", "Show the history of an image")
+	quiet := cmd.Bool("q", false, "only show numeric IDs")
+	noTrunc := cmd.Bool("notrunc", false, "Don't truncate output")
+
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -807,14 +817,35 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
 	if err != nil {
 		return err
 	}
+
 	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
-	fmt.Fprintln(w, "ID\tCREATED\tCREATED BY")
+	if !*quiet {
+		fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE")
+	}
 
 	for _, out := range outs {
-		if out.Tags != nil {
-			out.ID = out.Tags[0]
+		if !*quiet {
+			if *noTrunc {
+				fmt.Fprintf(w, "%s\t", out.ID)
+			} else {
+				fmt.Fprintf(w, "%s\t", utils.TruncateID(out.ID))
+			}
+
+			fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))))
+
+			if *noTrunc {
+				fmt.Fprintf(w, "%s\t", out.CreatedBy)
+			} else {
+				fmt.Fprintf(w, "%s\t", utils.Trunc(out.CreatedBy, 45))
+			}
+			fmt.Fprintf(w, "%s\n", utils.HumanSize(out.Size))
+		} else {
+			if *noTrunc {
+				fmt.Fprintln(w, out.ID)
+			} else {
+				fmt.Fprintln(w, utils.TruncateID(out.ID))
+			}
 		}
-		fmt.Fprintf(w, "%s \t%s ago\t%s\n", out.ID, utils.HumanDuration(time.Now().Sub(time.Unix(out.Created, 0))), out.CreatedBy)
 	}
 	w.Flush()
 	return nil
@@ -852,7 +883,7 @@ func (cli *DockerCli) CmdRm(args ...string) error {
 
 // 'docker kill NAME' kills a running container
 func (cli *DockerCli) CmdKill(args ...string) error {
-	cmd := Subcmd("kill", "CONTAINER [CONTAINER...]", "Kill a running container")
+	cmd := Subcmd("kill", "CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL)")
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -873,7 +904,7 @@ func (cli *DockerCli) CmdKill(args ...string) error {
 }
 
 func (cli *DockerCli) CmdImport(args ...string) error {
-	cmd := Subcmd("import", "URL|- [REPOSITORY [TAG]]", "Create a new filesystem image from the contents of a tarball(.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz).")
+	cmd := Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create a new filesystem image from the contents of a tarball(.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz).")
 
 	if err := cmd.Parse(args); err != nil {
 		return nil
@@ -882,7 +913,8 @@ func (cli *DockerCli) CmdImport(args ...string) error {
 		cmd.Usage()
 		return nil
 	}
-	src, repository, tag := cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
+	src := cmd.Arg(0)
+	repository, tag := utils.ParseRepositoryTag(cmd.Arg(1))
 	v := url.Values{}
 	v.Set("repo", repository)
 	v.Set("tag", tag)
@@ -1062,7 +1094,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
 
 		w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
 		if !*quiet {
-			fmt.Fprintln(w, "REPOSITORY\tTAG\tID\tCREATED\tSIZE")
+			fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tSIZE")
 		}
 
 		for _, out := range outs {
@@ -1155,7 +1187,7 @@ func (cli *DockerCli) CmdPs(args ...string) error {
 	}
 	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
 	if !*quiet {
-		fmt.Fprint(w, "ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES")
+		fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES")
 		if *size {
 			fmt.Fprintln(w, "\tSIZE")
 		} else {
@@ -1199,14 +1231,16 @@ func (cli *DockerCli) CmdPs(args ...string) error {
 }
 
 func (cli *DockerCli) CmdCommit(args ...string) error {
-	cmd := Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY [TAG]]", "Create a new image from a container's changes")
+	cmd := Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes")
 	flComment := cmd.String("m", "", "Commit message")
 	flAuthor := cmd.String("author", "", "Author (eg. \"John Hannibal Smith <hannibal@a-team.com>\"")
 	flConfig := cmd.String("run", "", "Config automatically applied when the image is run. "+`(ex: {"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`)
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
-	name, repository, tag := cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
+	name := cmd.Arg(0)
+	repository, tag := utils.ParseRepositoryTag(cmd.Arg(1))
+
 	if name == "" {
 		cmd.Usage()
 		return nil
@@ -1316,8 +1350,18 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
 		return nil
 	}
 	name := cmd.Arg(0)
+	body, _, err := cli.call("GET", "/containers/"+name+"/json", nil)
+	if err != nil {
+		return err
+	}
 
-	if err := cli.hijack("POST", "/containers/"+name+"/attach?logs=1&stdout=1&stderr=1", false, nil, cli.out, cli.err, nil); err != nil {
+	container := &Container{}
+	err = json.Unmarshal(body, container)
+	if err != nil {
+		return err
+	}
+
+	if err := cli.hijack("POST", "/containers/"+name+"/attach?logs=1&stdout=1&stderr=1", container.Config.Tty, nil, cli.out, cli.err, nil); err != nil {
 		return err
 	}
 	return nil
@@ -1379,8 +1423,10 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
 }
 
 func (cli *DockerCli) CmdSearch(args ...string) error {
-	cmd := Subcmd("search", "NAME", "Search the docker index for images")
+	cmd := Subcmd("search", "TERM", "Search the docker index for images")
 	noTrunc := cmd.Bool("notrunc", false, "Don't truncate output")
+	trusted := cmd.Bool("trusted", false, "Only show trusted builds")
+	stars := cmd.Int("stars", 0, "Only displays with at least xxx stars")
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -1396,27 +1442,32 @@ func (cli *DockerCli) CmdSearch(args ...string) error {
 		return err
 	}
 
-	outs := []APISearch{}
+	outs := []registry.SearchResult{}
 	err = json.Unmarshal(body, &outs)
 	if err != nil {
 		return err
 	}
-	fmt.Fprintf(cli.out, "Found %d results matching your query (\"%s\")\n", len(outs), cmd.Arg(0))
-	w := tabwriter.NewWriter(cli.out, 33, 1, 3, ' ', 0)
-	fmt.Fprintf(w, "NAME\tDESCRIPTION\n")
-	_, width := cli.getTtySize()
-	if width == 0 {
-		width = 45
-	} else {
-		width = width - 33 //remote the first column
-	}
+	w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0)
+	fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tTRUSTED\n")
 	for _, out := range outs {
+		if (*trusted && !out.IsTrusted) || (*stars > out.StarCount) {
+			continue
+		}
 		desc := strings.Replace(out.Description, "\n", " ", -1)
 		desc = strings.Replace(desc, "\r", " ", -1)
-		if !*noTrunc && len(desc) > width {
-			desc = utils.Trunc(desc, width-3) + "..."
+		if !*noTrunc && len(desc) > 45 {
+			desc = utils.Trunc(desc, 42) + "..."
+		}
+		fmt.Fprintf(w, "%s\t%s\t%d\t", out.Name, desc, out.StarCount)
+		if out.IsOfficial {
+			fmt.Fprint(w, "[OK]")
+
 		}
-		fmt.Fprintf(w, "%s\t%s\n", out.Name, desc)
+		fmt.Fprint(w, "\t")
+		if out.IsTrusted {
+			fmt.Fprint(w, "[OK]")
+		}
+		fmt.Fprint(w, "\n")
 	}
 	w.Flush()
 	return nil
@@ -1484,7 +1535,7 @@ func (opts PathOpts) Set(val string) error {
 }
 
 func (cli *DockerCli) CmdTag(args ...string) error {
-	cmd := Subcmd("tag", "[OPTIONS] IMAGE REPOSITORY [TAG]", "Tag an image into a repository")
+	cmd := Subcmd("tag", "[OPTIONS] IMAGE REPOSITORY[:TAG]", "Tag an image into a repository")
 	force := cmd.Bool("f", false, "Force")
 	if err := cmd.Parse(args); err != nil {
 		return nil
@@ -1495,10 +1546,10 @@ func (cli *DockerCli) CmdTag(args ...string) error {
 	}
 
 	v := url.Values{}
-	v.Set("repo", cmd.Arg(1))
-	if cmd.NArg() == 3 {
-		v.Set("tag", cmd.Arg(2))
-	}
+	repository, tag := utils.ParseRepositoryTag(cmd.Arg(1))
+
+	v.Set("repo", repository)
+	v.Set("tag", tag)
 
 	if *force {
 		v.Set("force", "1")
@@ -1749,7 +1800,7 @@ func (cli *DockerCli) CmdCp(args ...string) error {
 
 	if statusCode == 200 {
 		r := bytes.NewReader(data)
-		if err := Untar(r, copyData.HostPath); err != nil {
+		if err := archive.Untar(r, copyData.HostPath); err != nil {
 			return err
 		}
 	}

+ 54 - 0
commands_test.go

@@ -645,3 +645,57 @@ func TestRunAutoRemove(t *testing.T) {
 		t.Fatalf("failed to remove container automatically: container %s still exists", temporaryContainerID)
 	}
 }
+
+func TestCmdLogs(t *testing.T) {
+	cli := NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr)
+	defer cleanup(globalRuntime)
+
+	if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil {
+		t.Fatal(err)
+	}
+	if err := cli.CmdRun("-t", unitTestImageID, "sh", "-c", "ls -l"); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := cli.CmdLogs(globalRuntime.List()[0].ID); err != nil {
+		t.Fatal(err)
+	}
+}
+
+// Expected behaviour: using / as a bind mount source should throw an error
+func TestRunErrorBindMountRootSource(t *testing.T) {
+
+	cli := NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
+	defer cleanup(globalRuntime)
+
+	c := make(chan struct{})
+	go func() {
+		defer close(c)
+		if err := cli.CmdRun("-v", "/:/tmp", unitTestImageID, "echo 'should fail'"); err == nil {
+			t.Fatal("should have failed to run when using / as a source for the bind mount")
+		}
+	}()
+
+	setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
+		<-c
+	})
+}
+
+// Expected behaviour: error out when attempting to bind mount non-existing source paths
+func TestRunErrorBindNonExistingSource(t *testing.T) {
+
+	cli := NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
+	defer cleanup(globalRuntime)
+
+	c := make(chan struct{})
+	go func() {
+		defer close(c)
+		if err := cli.CmdRun("-v", "/i/dont/exist:/tmp", unitTestImageID, "echo 'should fail'"); err == nil {
+			t.Fatal("should have failed to run when using /i/dont/exist as a source for the bind mount")
+		}
+	}()
+
+	setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
+		<-c
+	})
+}

+ 26 - 1
config.go

@@ -2,11 +2,13 @@ package docker
 
 import (
 	"net"
+	"github.com/dotcloud/docker/engine"
 )
 
+// FIXME: separate runtime configuration from http api configuration
 type DaemonConfig struct {
 	Pidfile                     string
-	GraphPath                   string
+	Root                        string
 	ProtoAddresses              []string
 	AutoRestart                 bool
 	EnableCors                  bool
@@ -16,3 +18,26 @@ type DaemonConfig struct {
 	DefaultIp                   net.IP
 	InterContainerCommunication bool
 }
+
+// ConfigFromJob creates and returns a new DaemonConfig object
+// by parsing the contents of a job's environment.
+func ConfigFromJob(job *engine.Job) *DaemonConfig {
+	var config DaemonConfig
+	config.Pidfile = job.Getenv("Pidfile")
+	config.Root = job.Getenv("Root")
+	config.AutoRestart = job.GetenvBool("AutoRestart")
+	config.EnableCors = job.GetenvBool("EnableCors")
+	if dns := job.Getenv("Dns"); dns != "" {
+		config.Dns = []string{dns}
+	}
+	config.EnableIptables = job.GetenvBool("EnableIptables")
+	if br := job.Getenv("BridgeIface"); br != "" {
+		config.BridgeIface = br
+	} else {
+		config.BridgeIface = DefaultNetworkBridge
+	}
+	config.ProtoAddresses = job.GetenvList("ProtoAddresses")
+	config.DefaultIp = net.ParseIP(job.Getenv("DefaultIp"))
+	config.InterContainerCommunication = job.GetenvBool("InterContainerCommunication")
+	return &config
+}

+ 138 - 84
container.go

@@ -6,6 +6,7 @@ import (
 	"errors"
 	"flag"
 	"fmt"
+	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/term"
 	"github.com/dotcloud/docker/utils"
 	"github.com/kr/pty"
@@ -59,11 +60,15 @@ type Container struct {
 	Volumes  map[string]string
 	// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
 	// Easier than migrating older container configs :)
-	VolumesRW map[string]bool
+	VolumesRW  map[string]bool
+	hostConfig *HostConfig
 
 	activeLinks map[string]*Link
 }
 
+// Note: the Config structure should hold only portable information about the container.
+// Here, "portable" means "independent from the host we are running on".
+// Non-portable information *should* appear in HostConfig.
 type Config struct {
 	Hostname        string
 	Domainname      string
@@ -88,15 +93,16 @@ type Config struct {
 	WorkingDir      string
 	Entrypoint      []string
 	NetworkDisabled bool
-	Privileged      bool
 }
 
 type HostConfig struct {
 	Binds           []string
 	ContainerIDFile string
 	LxcConf         []KeyValuePair
+	Privileged      bool
 	PortBindings    map[Port][]PortBinding
 	Links           []string
+	PublishAllPorts bool
 }
 
 type BindMap struct {
@@ -168,6 +174,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 	flAutoRemove := cmd.Bool("rm", false, "Automatically remove the container when it exits (incompatible with -d)")
 	cmd.Bool("sig-proxy", true, "Proxify all received signal to the process (even in non-tty mode)")
 	cmd.String("name", "", "Assign a name to the container")
+	flPublishAll := cmd.Bool("P", false, "Publish all exposed ports to the host interfaces")
 
 	if capabilities != nil && *flMemory > 0 && !capabilities.MemoryLimit {
 		//fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
@@ -226,12 +233,27 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 		}
 	}
 
+	envs := []string{}
+
+	for _, env := range flEnv {
+		arr := strings.Split(env, "=")
+		if len(arr) > 1 {
+			envs = append(envs, env)
+		} else {
+			v := os.Getenv(env)
+			envs = append(envs, env+"="+v)
+		}
+	}
+
 	var binds []string
 
 	// add any bind targets to the list of container volumes
 	for bind := range flVolumes {
 		arr := strings.Split(bind, ":")
 		if len(arr) > 1 {
+			if arr[0] == "/" {
+				return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'")
+			}
 			dstDir := arr[1]
 			flVolumes[dstDir] = struct{}{}
 			binds = append(binds, bind)
@@ -285,7 +307,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 	}
 
 	config := &Config{
-		Hostname:        *flHostname,
+		Hostname:        hostname,
 		Domainname:      domainname,
 		PortSpecs:       nil, // Deprecated
 		ExposedPorts:    ports,
@@ -298,14 +320,13 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 		AttachStdin:     flAttach.Get("stdin"),
 		AttachStdout:    flAttach.Get("stdout"),
 		AttachStderr:    flAttach.Get("stderr"),
-		Env:             flEnv,
+		Env:             envs,
 		Cmd:             runCmd,
 		Dns:             flDns,
 		Image:           image,
 		Volumes:         flVolumes,
 		VolumesFrom:     strings.Join(flVolumesFrom, ","),
 		Entrypoint:      entrypoint,
-		Privileged:      *flPrivileged,
 		WorkingDir:      *flWorkingDir,
 	}
 
@@ -313,8 +334,10 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 		Binds:           binds,
 		ContainerIDFile: *flContainerIDFile,
 		LxcConf:         lxcConf,
+		Privileged:      *flPrivileged,
 		PortBindings:    portBindings,
 		Links:           flLinks,
+		PublishAllPorts: *flPublishAll,
 	}
 
 	if capabilities != nil && *flMemory > 0 && !capabilities.SwapLimit {
@@ -367,11 +390,22 @@ func (settings *NetworkSettings) PortMappingAPI() []APIPort {
 
 // Inject the io.Reader at the given path. Note: do not close the reader
 func (container *Container) Inject(file io.Reader, pth string) error {
+	// Return error if path exists
+	if _, err := os.Stat(path.Join(container.rwPath(), pth)); err == nil {
+		// Since err is nil, the path could be stat'd and it exists
+		return fmt.Errorf("%s exists", pth)
+	} else if ! os.IsNotExist(err) {
+		// Expect err might be that the file doesn't exist, so
+		// if it's some other error, return that. 
+
+		return err
+	}
+
 	// Make sure the directory exists
 	if err := os.MkdirAll(path.Join(container.rwPath(), path.Dir(pth)), 0755); err != nil {
 		return err
 	}
-	// FIXME: Handle permissions/already existing dest
+
 	dest, err := os.Create(path.Join(container.rwPath(), pth))
 	if err != nil {
 		return err
@@ -400,7 +434,7 @@ func (container *Container) FromDisk() error {
 	if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") {
 		return err
 	}
-	return nil
+	return container.readHostConfig()
 }
 
 func (container *Container) ToDisk() (err error) {
@@ -408,44 +442,53 @@ func (container *Container) ToDisk() (err error) {
 	if err != nil {
 		return
 	}
-	return ioutil.WriteFile(container.jsonPath(), data, 0666)
+	err = ioutil.WriteFile(container.jsonPath(), data, 0666)
+	if err != nil {
+		return
+	}
+	return container.writeHostConfig()
 }
 
-func (container *Container) ReadHostConfig() (*HostConfig, error) {
+func (container *Container) readHostConfig() error {
+	container.hostConfig = &HostConfig{}
+	// If the hostconfig file does not exist, do not read it.
+	// (We still have to initialize container.hostConfig,
+	// but that's OK, since we just did that above.)
+	_, err := os.Stat(container.hostConfigPath())
+	if os.IsNotExist(err) {
+		return nil
+	}
 	data, err := ioutil.ReadFile(container.hostConfigPath())
 	if err != nil {
-		return &HostConfig{}, err
-	}
-	hostConfig := &HostConfig{}
-	if err := json.Unmarshal(data, hostConfig); err != nil {
-		return &HostConfig{}, err
+		return err
 	}
-	return hostConfig, nil
+	return json.Unmarshal(data, container.hostConfig)
 }
 
-func (container *Container) SaveHostConfig(hostConfig *HostConfig) (err error) {
-	data, err := json.Marshal(hostConfig)
+func (container *Container) writeHostConfig() (err error) {
+	data, err := json.Marshal(container.hostConfig)
 	if err != nil {
 		return
 	}
 	return ioutil.WriteFile(container.hostConfigPath(), data, 0666)
 }
 
-func (container *Container) generateLXCConfig(hostConfig *HostConfig) error {
-	fo, err := os.Create(container.lxcConfigPath())
+func (container *Container) generateEnvConfig(env []string) error {
+	data, err := json.Marshal(env)
 	if err != nil {
 		return err
 	}
-	defer fo.Close()
-	if err := LxcTemplateCompiled.Execute(fo, container); err != nil {
+	ioutil.WriteFile(container.EnvConfigPath(), data, 0600)
+	return nil
+}
+
+func (container *Container) generateLXCConfig() error {
+	fo, err := os.Create(container.lxcConfigPath())
+	if err != nil {
 		return err
 	}
-	if hostConfig != nil {
-		if err := LxcHostConfigTemplateCompiled.Execute(fo, hostConfig); err != nil {
-			return err
-		}
-	}
-	return nil
+	defer fo.Close()
+	return LxcTemplateCompiled.Execute(fo, container)
 }
 
 func (container *Container) startPty() error {
@@ -640,7 +683,7 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 	})
 }
 
-func (container *Container) Start(hostConfig *HostConfig) (err error) {
+func (container *Container) Start() (err error) {
 	container.State.Lock()
 	defer container.State.Unlock()
 	defer func() {
@@ -649,10 +692,6 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
 		}
 	}()
 
-	if hostConfig == nil { // in docker start of docker restart we want to reuse previous HostConfigFile
-		hostConfig, _ = container.ReadHostConfig()
-	}
-
 	if container.State.Running {
 		return fmt.Errorf("The container %s is already running.", container.ID)
 	}
@@ -662,7 +701,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
 	if container.runtime.networkManager.disabled {
 		container.Config.NetworkDisabled = true
 	} else {
-		if err := container.allocateNetwork(hostConfig); err != nil {
+		if err := container.allocateNetwork(); err != nil {
 			return err
 		}
 	}
@@ -686,7 +725,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
 	// Define illegal container destinations
 	illegalDsts := []string{"/", "."}
 
-	for _, bind := range hostConfig.Binds {
+	for _, bind := range container.hostConfig.Binds {
 		// FIXME: factorize bind parsing in parseBind
 		var src, dst, mode string
 		arr := strings.Split(bind, ":")
@@ -796,7 +835,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
 				}
 				if len(srcList) == 0 {
 					// If the source volume is empty copy files from the root into the volume
-					if err := CopyWithTar(rootVolPath, srcPath); err != nil {
+					if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil {
 						return err
 					}
 
@@ -820,7 +859,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
 		}
 	}
 
-	if err := container.generateLXCConfig(hostConfig); err != nil {
+	if err := container.generateLXCConfig(); err != nil {
 		return err
 	}
 
@@ -841,17 +880,17 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
 		params = append(params, "-u", container.Config.User)
 	}
 
-	if container.Config.Tty {
-		params = append(params, "-e", "TERM=xterm")
+	// Setup environment
+	env := []string{
+		"HOME=/",
+		"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+		"container=lxc",
+		"HOSTNAME=" + container.Config.Hostname,
 	}
 
-	// Setup environment
-	params = append(params,
-		"-e", "HOME=/",
-		"-e", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-		"-e", "container=lxc",
-		"-e", "HOSTNAME="+container.Config.Hostname,
-	)
+	if container.Config.Tty {
+		env = append(env, "TERM=xterm")
+	}
 
 	// Init any links between the parent and children
 	runtime := container.runtime
@@ -887,11 +926,19 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
 			}
 
 			for _, envVar := range link.ToEnv() {
-				params = append(params, "-e", envVar)
+				env = append(env, envVar)
 			}
 		}
 	}
 
+	for _, elem := range container.Config.Env {
+		env = append(env, elem)
+	}
+
+	if err := container.generateEnvConfig(env); err != nil {
+		return err
+	}
+
 	if container.Config.WorkingDir != "" {
 		workingDir := path.Clean(container.Config.WorkingDir)
 		utils.Debugf("[working dir] working dir is %s", workingDir)
@@ -905,16 +952,15 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
 		)
 	}
 
-	for _, elem := range container.Config.Env {
-		params = append(params, "-e", elem)
-	}
-
 	// Program
 	params = append(params, "--", container.Path)
 	params = append(params, container.Args...)
 
-	container.cmd = exec.Command("lxc-start", params...)
-
+	var lxcStart string = "lxc-start"
+	if container.hostConfig.Privileged && container.runtime.capabilities.AppArmor {
+		lxcStart = path.Join(container.runtime.config.Root, "lxc-start-unconfined")
+	}
+	container.cmd = exec.Command(lxcStart, params...)
 	// Setup logging of stdout and stderr to disk
 	if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
 		return err
@@ -941,8 +987,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
 	container.waitLock = make(chan struct{})
 
 	container.ToDisk()
-	container.SaveHostConfig(hostConfig)
-	go container.monitor(hostConfig)
+	go container.monitor()
 
 	defer utils.Debugf("Container running: %v", container.State.Running)
 	// We wait for the container to be fully running.
@@ -979,7 +1024,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
 }
 
 func (container *Container) Run() error {
-	if err := container.Start(&HostConfig{}); err != nil {
+	if err := container.Start(); err != nil {
 		return err
 	}
 	container.Wait()
@@ -992,8 +1037,7 @@ func (container *Container) Output() (output []byte, err error) {
 		return nil, err
 	}
 	defer pipe.Close()
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		return nil, err
 	}
 	output, err = ioutil.ReadAll(pipe)
@@ -1025,19 +1069,14 @@ func (container *Container) StderrPipe() (io.ReadCloser, error) {
 	return utils.NewBufReader(reader), nil
 }
 
-func (container *Container) allocateNetwork(hostConfig *HostConfig) error {
+func (container *Container) allocateNetwork() error {
 	if container.Config.NetworkDisabled {
 		return nil
 	}
 
 	var iface *NetworkInterface
 	var err error
-	if !container.State.Ghost {
-		iface, err = container.runtime.networkManager.Allocate()
-		if err != nil {
-			return err
-		}
-	} else {
+	if container.State.Ghost {
 		manager := container.runtime.networkManager
 		if manager.disabled {
 			iface = &NetworkInterface{disabled: true}
@@ -1047,18 +1086,30 @@ func (container *Container) allocateNetwork(hostConfig *HostConfig) error {
 				Gateway: manager.bridgeNetwork.IP,
 				manager: manager,
 			}
-			ipNum := ipToInt(iface.IPNet.IP)
-			manager.ipAllocator.inUse[ipNum] = struct{}{}
+			if iface !=nil && iface.IPNet.IP != nil {
+				ipNum := ipToInt(iface.IPNet.IP)
+				manager.ipAllocator.inUse[ipNum] = struct{}{}
+			} else {
+				iface, err = container.runtime.networkManager.Allocate()
+				if err != nil {
+					return err
+				}
+			}
+		}
+	} else {
+		iface, err = container.runtime.networkManager.Allocate()
+		if err != nil {
+			return err
 		}
 	}
 
 	if container.Config.PortSpecs != nil {
 		utils.Debugf("Migrating port mappings for container: %s", strings.Join(container.Config.PortSpecs, ", "))
-		if err := migratePortMappings(container.Config, hostConfig); err != nil {
+		if err := migratePortMappings(container.Config, container.hostConfig); err != nil {
 			return err
 		}
 		container.Config.PortSpecs = nil
-		if err := container.SaveHostConfig(hostConfig); err != nil {
+		if err := container.writeHostConfig(); err != nil {
 			return err
 		}
 	}
@@ -1070,8 +1121,8 @@ func (container *Container) allocateNetwork(hostConfig *HostConfig) error {
 		if container.Config.ExposedPorts != nil {
 			portSpecs = container.Config.ExposedPorts
 		}
-		if hostConfig.PortBindings != nil {
-			bindings = hostConfig.PortBindings
+		if container.hostConfig.PortBindings != nil {
+			bindings = container.hostConfig.PortBindings
 		}
 	} else {
 		if container.NetworkSettings.Ports != nil {
@@ -1086,6 +1137,9 @@ func (container *Container) allocateNetwork(hostConfig *HostConfig) error {
 
 	for port := range portSpecs {
 		binding := bindings[port]
+		if container.hostConfig.PublishAllPorts && len(binding) == 0 {
+			binding = append(binding, PortBinding{})
+		}
 		for i := 0; i < len(binding); i++ {
 			b := binding[i]
 			nat, err := iface.AllocatePort(port, b)
@@ -1098,7 +1152,7 @@ func (container *Container) allocateNetwork(hostConfig *HostConfig) error {
 		}
 		bindings[port] = binding
 	}
-	container.SaveHostConfig(hostConfig)
+	container.writeHostConfig()
 
 	container.NetworkSettings.Ports = bindings
 	container.network = iface
@@ -1134,7 +1188,7 @@ func (container *Container) waitLxc() error {
 	}
 }
 
-func (container *Container) monitor(hostConfig *HostConfig) {
+func (container *Container) monitor() {
 	// Wait for the program to exit
 
 	// If the command does not exist, try to wait via lxc
@@ -1291,11 +1345,7 @@ func (container *Container) Restart(seconds int) error {
 	if err := container.Stop(seconds); err != nil {
 		return err
 	}
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
-		return err
-	}
-	return nil
+	return container.Start()
 }
 
 // Wait blocks until the container stops running, then returns its exit code.
@@ -1312,23 +1362,23 @@ func (container *Container) Resize(h, w int) error {
 	return term.SetWinsize(pty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
 }
 
-func (container *Container) ExportRw() (Archive, error) {
-	return Tar(container.rwPath(), Uncompressed)
+func (container *Container) ExportRw() (archive.Archive, error) {
+	return archive.Tar(container.rwPath(), archive.Uncompressed)
 }
 
 func (container *Container) RwChecksum() (string, error) {
-	rwData, err := Tar(container.rwPath(), Xz)
+	rwData, err := archive.Tar(container.rwPath(), archive.Xz)
 	if err != nil {
 		return "", err
 	}
 	return utils.HashData(rwData)
 }
 
-func (container *Container) Export() (Archive, error) {
+func (container *Container) Export() (archive.Archive, error) {
 	if err := container.EnsureMounted(); err != nil {
 		return nil, err
 	}
-	return Tar(container.RootfsPath(), Uncompressed)
+	return archive.Tar(container.RootfsPath(), archive.Uncompressed)
 }
 
 func (container *Container) WaitTimeout(timeout time.Duration) error {
@@ -1416,6 +1466,10 @@ func (container *Container) jsonPath() string {
 	return path.Join(container.root, "config.json")
 }
 
+func (container *Container) EnvConfigPath() string {
+	return path.Join(container.root, "config.env")
+}
+
 func (container *Container) lxcConfigPath() string {
 	return path.Join(container.root, "config.lxc")
 }
@@ -1459,7 +1513,7 @@ func (container *Container) GetSize() (int64, int64) {
 	return sizeRw, sizeRootfs
 }
 
-func (container *Container) Copy(resource string) (Archive, error) {
+func (container *Container) Copy(resource string) (archive.Archive, error) {
 	if err := container.EnsureMounted(); err != nil {
 		return nil, err
 	}
@@ -1477,7 +1531,7 @@ func (container *Container) Copy(resource string) (Archive, error) {
 		filter = []string{path.Base(basePath)}
 		basePath = path.Dir(basePath)
 	}
-	return TarFilter(basePath, Uncompressed, filter)
+	return archive.TarFilter(basePath, archive.Uncompressed, filter)
 }
 
 // Returns true if the container exposes a certain port

+ 75 - 47
container_test.go

@@ -40,7 +40,7 @@ func TestIDFormat(t *testing.T) {
 func TestMultipleAttachRestart(t *testing.T) {
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
-	container, hostConfig, _ := mkContainer(
+	container, _ := mkContainer(
 		runtime,
 		[]string{"_", "/bin/sh", "-c", "i=1; while [ $i -le 5 ]; do i=`expr $i + 1`;  echo hello; done"},
 		t,
@@ -61,7 +61,7 @@ func TestMultipleAttachRestart(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 	l1, err := bufio.NewReader(stdout1).ReadString('\n')
@@ -102,7 +102,7 @@ func TestMultipleAttachRestart(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 
@@ -136,7 +136,7 @@ func TestDiff(t *testing.T) {
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	// Create a container and remove a file
-	container1, _, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t)
+	container1, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t)
 	defer runtime.Destroy(container1)
 
 	// The changelog should be empty and not fail before run. See #1705
@@ -178,7 +178,7 @@ func TestDiff(t *testing.T) {
 	}
 
 	// Create a new container from the commited image
-	container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t)
+	container2, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t)
 	defer runtime.Destroy(container2)
 
 	if err := container2.Run(); err != nil {
@@ -197,7 +197,7 @@ func TestDiff(t *testing.T) {
 	}
 
 	// Create a new container
-	container3, _, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t)
+	container3, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t)
 	defer runtime.Destroy(container3)
 
 	if err := container3.Run(); err != nil {
@@ -223,7 +223,7 @@ func TestDiff(t *testing.T) {
 func TestCommitAutoRun(t *testing.T) {
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
-	container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
+	container1, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
 	defer runtime.Destroy(container1)
 
 	if container1.State.Running {
@@ -246,7 +246,7 @@ func TestCommitAutoRun(t *testing.T) {
 	}
 
 	// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
-	container2, hostConfig, _ := mkContainer(runtime, []string{img.ID}, t)
+	container2, _ := mkContainer(runtime, []string{img.ID}, t)
 	defer runtime.Destroy(container2)
 	stdout, err := container2.StdoutPipe()
 	if err != nil {
@@ -256,7 +256,7 @@ func TestCommitAutoRun(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
-	if err := container2.Start(hostConfig); err != nil {
+	if err := container2.Start(); err != nil {
 		t.Fatal(err)
 	}
 	container2.Wait()
@@ -283,7 +283,7 @@ func TestCommitRun(t *testing.T) {
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 
-	container1, hostConfig, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
+	container1, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
 	defer runtime.Destroy(container1)
 
 	if container1.State.Running {
@@ -306,7 +306,7 @@ func TestCommitRun(t *testing.T) {
 	}
 
 	// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
-	container2, hostConfig, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t)
+	container2, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t)
 	defer runtime.Destroy(container2)
 	stdout, err := container2.StdoutPipe()
 	if err != nil {
@@ -316,7 +316,7 @@ func TestCommitRun(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
-	if err := container2.Start(hostConfig); err != nil {
+	if err := container2.Start(); err != nil {
 		t.Fatal(err)
 	}
 	container2.Wait()
@@ -342,7 +342,7 @@ func TestCommitRun(t *testing.T) {
 func TestStart(t *testing.T) {
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
-	container, hostConfig, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t)
+	container, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t)
 	defer runtime.Destroy(container)
 
 	cStdin, err := container.StdinPipe()
@@ -350,7 +350,7 @@ func TestStart(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 
@@ -360,7 +360,7 @@ func TestStart(t *testing.T) {
 	if !container.State.Running {
 		t.Errorf("Container should be running")
 	}
-	if err := container.Start(hostConfig); err == nil {
+	if err := container.Start(); err == nil {
 		t.Fatalf("A running container should be able to be started")
 	}
 
@@ -372,7 +372,7 @@ func TestStart(t *testing.T) {
 func TestRun(t *testing.T) {
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
-	container, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
+	container, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
 	defer runtime.Destroy(container)
 
 	if container.State.Running {
@@ -452,7 +452,7 @@ func TestKillDifferentUser(t *testing.T) {
 	if container.State.Running {
 		t.Errorf("Container shouldn't be running")
 	}
-	if err := container.Start(&HostConfig{}); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 
@@ -501,7 +501,8 @@ func TestCreateVolume(t *testing.T) {
 		t.Fatal(err)
 	}
 	defer runtime.Destroy(c)
-	if err := c.Start(hc); err != nil {
+	c.hostConfig = hc
+	if err := c.Start(); err != nil {
 		t.Fatal(err)
 	}
 	c.WaitTimeout(500 * time.Millisecond)
@@ -525,8 +526,7 @@ func TestKill(t *testing.T) {
 	if container.State.Running {
 		t.Errorf("Container shouldn't be running")
 	}
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 
@@ -642,8 +642,7 @@ func TestRestartStdin(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 	if _, err := io.WriteString(stdin, "hello world"); err != nil {
@@ -673,7 +672,7 @@ func TestRestartStdin(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 	if _, err := io.WriteString(stdin, "hello world #2"); err != nil {
@@ -850,11 +849,10 @@ func TestMultipleContainers(t *testing.T) {
 	defer runtime.Destroy(container2)
 
 	// Start both containers
-	hostConfig := &HostConfig{}
-	if err := container1.Start(hostConfig); err != nil {
+	if err := container1.Start(); err != nil {
 		t.Fatal(err)
 	}
-	if err := container2.Start(hostConfig); err != nil {
+	if err := container2.Start(); err != nil {
 		t.Fatal(err)
 	}
 
@@ -904,8 +902,7 @@ func TestStdin(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 	defer stdin.Close()
@@ -950,8 +947,7 @@ func TestTty(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 	defer stdin.Close()
@@ -973,14 +969,15 @@ func TestTty(t *testing.T) {
 }
 
 func TestEnv(t *testing.T) {
+	os.Setenv("TRUE", "false")
+	os.Setenv("TRICKY", "tri\ncky\n")
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
-	container, _, err := runtime.Create(&Config{
-		Image: GetTestImage(runtime).ID,
-		Cmd:   []string{"env"},
-	},
-		"",
-	)
+	config, _, _, err := ParseRun([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	container, _, err := runtime.Create(config, "")
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -991,8 +988,7 @@ func TestEnv(t *testing.T) {
 		t.Fatal(err)
 	}
 	defer stdout.Close()
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		t.Fatal(err)
 	}
 	container.Wait()
@@ -1010,6 +1006,11 @@ func TestEnv(t *testing.T) {
 		"HOME=/",
 		"container=lxc",
 		"HOSTNAME=" + container.ShortID(),
+		"FALSE=true",
+		"TRUE=false",
+		"TRICKY=tri",
+		"cky",
+		"",
 	}
 	sort.Strings(goodEnv)
 	if len(goodEnv) != len(actualEnv) {
@@ -1115,7 +1116,7 @@ func TestLXCConfig(t *testing.T) {
 		t.Fatal(err)
 	}
 	defer runtime.Destroy(container)
-	container.generateLXCConfig(nil)
+	container.generateLXCConfig()
 	grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar")
 	grepFile(t, container.lxcConfigPath(),
 		fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem))
@@ -1138,7 +1139,7 @@ func TestCustomLxcConfig(t *testing.T) {
 		t.Fatal(err)
 	}
 	defer runtime.Destroy(container)
-	hostConfig := &HostConfig{LxcConf: []KeyValuePair{
+	container.hostConfig = &HostConfig{LxcConf: []KeyValuePair{
 		{
 			Key:   "lxc.utsname",
 			Value: "docker",
@@ -1149,7 +1150,7 @@ func TestCustomLxcConfig(t *testing.T) {
 		},
 	}}
 
-	container.generateLXCConfig(hostConfig)
+	container.generateLXCConfig()
 	grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker")
 	grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1")
 }
@@ -1202,8 +1203,7 @@ func BenchmarkRunParallel(b *testing.B) {
 				return
 			}
 			defer runtime.Destroy(container)
-			hostConfig := &HostConfig{}
-			if err := container.Start(hostConfig); err != nil {
+			if err := container.Start(); err != nil {
 				complete <- err
 				return
 			}
@@ -1247,7 +1247,7 @@ func TestCopyVolumeUidGid(t *testing.T) {
 	defer nuke(r)
 
 	// Add directory not owned by root
-	container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t)
+	container1, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t)
 	defer r.Destroy(container1)
 
 	if container1.State.Running {
@@ -1284,7 +1284,7 @@ func TestCopyVolumeContent(t *testing.T) {
 	defer nuke(r)
 
 	// Put some content in a directory of a container and commit it
-	container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t)
+	container1, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t)
 	defer r.Destroy(container1)
 
 	if container1.State.Running {
@@ -1521,9 +1521,9 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
-
 	defer runtime.Destroy(c)
-	if err := c.Start(hc); err != nil {
+	c.hostConfig = hc
+	if err := c.Start(); err != nil {
 		t.Fatal(err)
 	}
 	c.WaitTimeout(500 * time.Millisecond)
@@ -1652,3 +1652,31 @@ func TestMultipleVolumesFrom(t *testing.T) {
 		t.Fail()
 	}
 }
+
+func TestRestartGhost(t *testing.T) {
+	runtime := mkRuntime(t)
+	defer nuke(runtime)
+
+	container, _, err := runtime.Create(
+		&Config{
+			Image:   GetTestImage(runtime).ID,
+			Cmd:     []string{"sh", "-c", "echo -n bar > /test/foo"},
+			Volumes: map[string]struct{}{"/test": {}},
+		},
+		"",
+	)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := container.Kill(); err != nil {
+		t.Fatal(err)
+	}
+
+	container.State.Ghost = true
+	_, err = container.Output()
+
+	if err != nil {
+		t.Fatal(err)
+	}
+}

+ 1 - 1
contrib/completion/bash/docker

@@ -426,7 +426,7 @@ _docker_run()
 
 _docker_search()
 {
-	COMPREPLY=( $( compgen -W "-notrunc" -- "$cur" ) )
+	COMPREPLY=( $( compgen -W "-notrunc" "-stars" "-trusted" -- "$cur" ) )
 }
 
 _docker_start()

+ 11 - 0
contrib/desktop-integration/README.txt

@@ -0,0 +1,11 @@
+Desktop Integration
+===================
+
+The ./contrib/desktop-integration contains examples of typical dockerized
+desktop applications.
+
+Examples
+========
+
+* Data container: ./data/Dockerfile creates a data image sharing /data volume
+* Firefox: ./firefox/Dockerfile shows a way to dockerize a common multimedia application

+ 38 - 0
contrib/desktop-integration/data/Dockerfile

@@ -0,0 +1,38 @@
+# VERSION:        0.1
+# DESCRIPTION:    Create data image sharing /data volume
+# AUTHOR:         Daniel Mizyrycki <daniel@dotcloud.com>
+# COMMENTS:
+#   This image is used as base for all data containers.
+#   /data volume is owned by sysadmin.
+# USAGE:
+#   # Download data Dockerfile
+#   wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/data/Dockerfile
+#
+#   # Build data image
+#   docker build -t data -rm .
+#
+#   # Create a data container. (eg: firefox-data)
+#   docker run -name firefox-data data true
+#
+#   # List data from it
+#   docker run -volumes-from firefox-data busybox ls -al /data
+
+docker-version 0.6.5
+
+# Smallest base image, just to launch a container
+from busybox
+maintainer	Daniel Mizyrycki <daniel@docker.com>
+
+# Create a regular user
+run echo 'sysadmin:x:1000:1000::/data:/bin/sh' >> /etc/passwd
+run echo 'sysadmin:x:1000:' >> /etc/group
+
+# Create directory for that user
+run mkdir /data
+run chown sysadmin.sysadmin /data
+
+# Add content to /data. This will keep sysadmin ownership
+run touch /data/init_volume
+
+# Create /data volume
+VOLUME /data

+ 49 - 0
contrib/desktop-integration/firefox/Dockerfile

@@ -0,0 +1,49 @@
+# VERSION:        0.7
+# DESCRIPTION:    Create firefox container with its dependencies
+# AUTHOR:         Daniel Mizyrycki <daniel@dotcloud.com>
+# COMMENTS:
+#   This file describes how to build a Firefox container with all
+#   dependencies installed. It uses native X11 unix socket and alsa
+#   sound devices. Tested on Debian 7.2
+# USAGE:
+#   # Download Firefox Dockerfile
+#   wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/firefox/Dockerfile
+#
+#   # Build firefox image
+#   docker build -t firefox -rm .
+#
+#   # Run stateful data-on-host firefox. For ephemeral, remove -v /data/firefox:/data
+#   docker run -v /data/firefox:/data -v /tmp/.X11-unix:/tmp/.X11-unix \
+#     -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
+#     -e DISPLAY=unix$DISPLAY firefox
+#
+#   # To run stateful dockerized data containers
+#   docker run -volumes-from firefox-data -v /tmp/.X11-unix:/tmp/.X11-unix \
+#     -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
+#     -e DISPLAY=unix$DISPLAY firefox
+
+docker-version 0.6.5
+
+# Base docker image
+from tianon/debian:wheezy
+maintainer	Daniel Mizyrycki <daniel@docker.com>
+
+# Install firefox dependencies
+run echo "deb http://ftp.debian.org/debian/ wheezy main contrib" > /etc/apt/sources.list
+run apt-get update
+run DEBIAN_FRONTEND=noninteractive apt-get install -y libXrender1 libasound2 \
+    libdbus-glib-1-2 libgtk2.0-0 libpango1.0-0 libxt6 wget bzip2 sudo
+
+# Install Firefox
+run mkdir /application
+run cd /application; wget -O - \
+    http://ftp.mozilla.org/pub/mozilla.org/firefox/releases/25.0/linux-x86_64/en-US/firefox-25.0.tar.bz2 | tar jx
+
+# create sysadmin account
+run useradd -m -d /data -p saIVpsc0EVTwA sysadmin
+run sed -Ei 's/sudo:x:27:/sudo:x:27:sysadmin/' /etc/group
+run sed -Ei 's/(\%sudo\s+ALL=\(ALL\:ALL\) )ALL/\1 NOPASSWD:ALL/' /etc/sudoers
+
+# Autorun firefox. -no-remote is necessary to create a new container, as firefox
+# appears to communicate with itself through X11.
+cmd ["/bin/sh", "-c", "/usr/bin/sudo -u sysadmin -H -E /application/firefox/firefox -no-remote"]

+ 1 - 1
contrib/init/systemd/docker.service

@@ -1,5 +1,5 @@
 [Unit]
-Description=Easily create lightweight, portable, self-sufficient containers from any application!
+Description=Docker Application Container Engine 
 Documentation=http://docs.docker.io
 Requires=network.target
 After=multi-user.target

+ 29 - 20
contrib/init/sysvinit/docker

@@ -6,43 +6,52 @@
 # Required-Stop:      $syslog $remote_fs
 # Default-Start:      2 3 4 5
 # Default-Stop:       0 1 6
-# Short-Description:  Linux container runtime
-# Description:        Linux container runtime
+# Short-Description:  Create lightweight, portable, self-sufficient containers.
+# Description:
+#  Docker is an open-source project to easily create lightweight, portable,
+#  self-sufficient containers from any application. The same container that a
+#  developer builds and tests on a laptop can run at scale, in production, on
+#  VMs, bare metal, OpenStack clusters, public clouds and more.
 ### END INIT INFO
 
-DOCKER=/usr/bin/docker
-DOCKER_PIDFILE=/var/run/docker.pid
+BASE=$(basename $0)
+
+DOCKER=/usr/bin/$BASE
+DOCKER_PIDFILE=/var/run/$BASE.pid
 DOCKER_OPTS=
 
 PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
 
-# Check lxc-docker is present
-[ -x $DOCKER ] || (log_failure_msg "docker not present"; exit 1)
-
 # Get lsb functions
 . /lib/lsb/init-functions
 
-if [ -f /etc/default/lxc ]; then
-	. /etc/default/lxc
+if [ -f /etc/default/$BASE ]; then
+	. /etc/default/$BASE
 fi
 
 if [ "$1" = start ] && which initctl >/dev/null && initctl version | grep -q upstart; then
 	exit 1
 fi
 
-check_root_id ()
-{
-	if [ "$(id -u)" != "0" ]; then
-		log_failure_msg "Docker must be run as root"; exit 1
+# Check docker is present
+if [ ! -x $DOCKER ]; then
+	log_failure_msg "$DOCKER not present or not executable"
+	exit 1
+fi
+
+fail_unless_root() {
+	if [ "$(id -u)" != '0' ]; then
+		log_failure_msg "Docker must be run as root"
+		exit 1
 	fi
 }
 
 case "$1" in
 	start)
-		check_root_id || exit 1
-		log_begin_msg "Starting Docker"
+		fail_unless_root
+		log_begin_msg "Starting Docker: $BASE"
 		mount | grep cgroup >/dev/null || mount -t cgroup none /sys/fs/cgroup 2>/dev/null
-		start-stop-daemon --start --background $NO_CLOSE \
+		start-stop-daemon --start --background \
 			--exec "$DOCKER" \
 			--pidfile "$DOCKER_PIDFILE" \
 			-- -d -p "$DOCKER_PIDFILE" \
@@ -51,15 +60,15 @@ case "$1" in
 		;;
 
 	stop)
-		check_root_id || exit 1
-		log_begin_msg "Stopping Docker"
+		fail_unless_root
+		log_begin_msg "Stopping Docker: $BASE"
 		start-stop-daemon --stop \
 			--pidfile "$DOCKER_PIDFILE"
 		log_end_msg $?
 		;;
 
 	restart)
-		check_root_id || exit 1
+		fail_unless_root
 		docker_pid=`cat "$DOCKER_PIDFILE" 2>/dev/null`
 		[ -n "$docker_pid" ] \
 			&& ps -p $docker_pid > /dev/null 2>&1 \
@@ -68,7 +77,7 @@ case "$1" in
 		;;
 
 	force-reload)
-		check_root_id || exit 1
+		fail_unless_root
 		$0 restart
 		;;
 

+ 17 - 105
docker/docker.go

@@ -6,14 +6,10 @@ import (
 	"github.com/dotcloud/docker"
 	"github.com/dotcloud/docker/sysinit"
 	"github.com/dotcloud/docker/utils"
-	"io/ioutil"
+	"github.com/dotcloud/docker/engine"
 	"log"
-	"net"
 	"os"
-	"os/signal"
-	"strconv"
 	"strings"
-	"syscall"
 )
 
 var (
@@ -34,7 +30,7 @@ func main() {
 	flAutoRestart := flag.Bool("r", true, "Restart previously running containers")
 	bridgeName := flag.String("b", "", "Attach containers to a pre-existing network bridge. Use 'none' to disable container networking")
 	pidfile := flag.String("p", "/var/run/docker.pid", "File containing process PID")
-	flGraphPath := flag.String("g", "/var/lib/docker", "Path to graph storage base dir.")
+	flRoot := flag.String("g", "/var/lib/docker", "Path to use as the root of the docker runtime.")
 	flEnableCors := flag.Bool("api-enable-cors", false, "Enable CORS requests in the remote api.")
 	flDns := flag.String("dns", "", "Set custom dns servers")
 	flHosts := utils.ListOpts{fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET)}
@@ -61,10 +57,6 @@ func main() {
 		}
 	}
 
-	bridge := docker.DefaultNetworkBridge
-	if *bridgeName != "" {
-		bridge = *bridgeName
-	}
 	if *flDebug {
 		os.Setenv("DEBUG", "1")
 	}
@@ -75,26 +67,22 @@ func main() {
 			flag.Usage()
 			return
 		}
-		var dns []string
-		if *flDns != "" {
-			dns = []string{*flDns}
-		}
-
-		ip := net.ParseIP(*flDefaultIp)
-
-		config := &docker.DaemonConfig{
-			Pidfile:                     *pidfile,
-			GraphPath:                   *flGraphPath,
-			AutoRestart:                 *flAutoRestart,
-			EnableCors:                  *flEnableCors,
-			Dns:                         dns,
-			EnableIptables:              *flEnableIptables,
-			BridgeIface:                 bridge,
-			ProtoAddresses:              flHosts,
-			DefaultIp:                   ip,
-			InterContainerCommunication: *flInterContainerComm,
+		eng, err := engine.New(*flRoot)
+		if err != nil {
+			log.Fatal(err)
 		}
-		if err := daemon(config); err != nil {
+		job := eng.Job("serveapi")
+		job.Setenv("Pidfile", *pidfile)
+		job.Setenv("Root", *flRoot)
+		job.SetenvBool("AutoRestart", *flAutoRestart)
+		job.SetenvBool("EnableCors", *flEnableCors)
+		job.Setenv("Dns", *flDns)
+		job.SetenvBool("EnableIptables", *flEnableIptables)
+		job.Setenv("BridgeIface", *bridgeName)
+		job.SetenvList("ProtoAddresses", flHosts)
+		job.Setenv("DefaultIp", *flDefaultIp)
+		job.SetenvBool("InterContainerCommunication", *flInterContainerComm)
+		if err := job.Run(); err != nil {
 			log.Fatal(err)
 		}
 	} else {
@@ -114,79 +102,3 @@ func main() {
 func showVersion() {
 	fmt.Printf("Docker version %s, build %s\n", VERSION, GITCOMMIT)
 }
-
-func createPidFile(pidfile string) error {
-	if pidString, err := ioutil.ReadFile(pidfile); err == nil {
-		pid, err := strconv.Atoi(string(pidString))
-		if err == nil {
-			if _, err := os.Stat(fmt.Sprintf("/proc/%d/", pid)); err == nil {
-				return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile)
-			}
-		}
-	}
-
-	file, err := os.Create(pidfile)
-	if err != nil {
-		return err
-	}
-
-	defer file.Close()
-
-	_, err = fmt.Fprintf(file, "%d", os.Getpid())
-	return err
-}
-
-func removePidFile(pidfile string) {
-	if err := os.Remove(pidfile); err != nil {
-		log.Printf("Error removing %s: %s", pidfile, err)
-	}
-}
-
-func daemon(config *docker.DaemonConfig) error {
-	if err := createPidFile(config.Pidfile); err != nil {
-		log.Fatal(err)
-	}
-	defer removePidFile(config.Pidfile)
-
-	server, err := docker.NewServer(config)
-	if err != nil {
-		return err
-	}
-	defer server.Close()
-
-	c := make(chan os.Signal, 1)
-	signal.Notify(c, os.Interrupt, os.Kill, os.Signal(syscall.SIGTERM))
-	go func() {
-		sig := <-c
-		log.Printf("Received signal '%v', exiting\n", sig)
-		server.Close()
-		removePidFile(config.Pidfile)
-		os.Exit(0)
-	}()
-
-	chErrors := make(chan error, len(config.ProtoAddresses))
-	for _, protoAddr := range config.ProtoAddresses {
-		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
-		if protoAddrParts[0] == "unix" {
-			syscall.Unlink(protoAddrParts[1])
-		} else if protoAddrParts[0] == "tcp" {
-			if !strings.HasPrefix(protoAddrParts[1], "127.0.0.1") {
-				log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
-			}
-		} else {
-			server.Close()
-			removePidFile(config.Pidfile)
-			log.Fatal("Invalid protocol format.")
-		}
-		go func() {
-			chErrors <- docker.ListenAndServe(protoAddrParts[0], protoAddrParts[1], server, true)
-		}()
-	}
-	for i := 0; i < len(config.ProtoAddresses); i += 1 {
-		err := <-chErrors
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}

+ 19 - 19
docs/sources/api/docker_remote_api_v1.4.rst

@@ -943,36 +943,36 @@ Build an image from Dockerfile via stdin
 
 .. http:post:: /build
 
-   Build an image from Dockerfile via stdin
+    Build an image from Dockerfile via stdin
 
-   **Example request**:
+    **Example request**:
 
-   .. sourcecode:: http
+    .. sourcecode:: http
 
-      POST /build HTTP/1.1
+        POST /build HTTP/1.1
 
-      {{ STREAM }}
+        {{ STREAM }}
 
-   **Example response**:
+    **Example response**:
 
-   .. sourcecode:: http
+    .. sourcecode:: http
 
-      HTTP/1.1 200 OK
+        HTTP/1.1 200 OK
 
-      {{ STREAM }}
+        {{ STREAM }}
 
 
-       The stream must be a tar archive compressed with one of the following algorithms:
-       identity (no compression), gzip, bzip2, xz. The archive must include a file called
-       `Dockerfile` at its root. It may include any number of other files, which will be
-       accessible in the build context (See the ADD build command).
+    The stream must be a tar archive compressed with one of the following algorithms:
+    identity (no compression), gzip, bzip2, xz. The archive must include a file called
+    `Dockerfile` at its root. It may include any number of other files, which will be
+    accessible in the build context (See the ADD build command).
 
-       The Content-type header should be set to "application/tar".
+    The Content-type header should be set to "application/tar".
 
-	:query t: repository name (and optionally a tag) to be applied to the resulting image in case of success
-	:query q: suppress verbose build output
+    :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success
+    :query q: suppress verbose build output
     :query nocache: do not use the cache when building the image
-	:statuscode 200: no error
+    :statuscode 200: no error
     :statuscode 500: server error
 
 
@@ -1069,8 +1069,8 @@ Show the docker version information
 		"GoVersion":"go1.0.3"
 	   }
 
-        :statuscode 200: no error
-	:statuscode 500: server error
+    :statuscode 200: no error
+    :statuscode 500: server error
 
 
 Create a new image from a container's changes

+ 18 - 18
docs/sources/api/docker_remote_api_v1.6.rst

@@ -995,36 +995,36 @@ Build an image from Dockerfile via stdin
 
 .. http:post:: /build
 
-   Build an image from Dockerfile via stdin
+    Build an image from Dockerfile via stdin
 
-   **Example request**:
-
-   .. sourcecode:: http
+    **Example request**:
 
-      POST /build HTTP/1.1
+    .. sourcecode:: http
 
-      {{ STREAM }}
+        POST /build HTTP/1.1
 
-   **Example response**:
+        {{ STREAM }}
 
-   .. sourcecode:: http
+    **Example response**:
 
-      HTTP/1.1 200 OK
+    .. sourcecode:: http
 
-      {{ STREAM }}
+        HTTP/1.1 200 OK
 
+        {{ STREAM }}
 
-       The stream must be a tar archive compressed with one of the following algorithms:
-       identity (no compression), gzip, bzip2, xz. The archive must include a file called
-       `Dockerfile` at its root. It may include any number of other files, which will be
-       accessible in the build context (See the ADD build command).
 
-       The Content-type header should be set to "application/tar".
+    The stream must be a tar archive compressed with one of the following algorithms:
+    identity (no compression), gzip, bzip2, xz. The archive must include a file called
+    `Dockerfile` at its root. It may include any number of other files, which will be
+    accessible in the build context (See the ADD build command).
+    
+    The Content-type header should be set to "application/tar".
 
-	:query t: repository name (and optionally a tag) to be applied to the resulting image in case of success
-	:query q: suppress verbose build output
+    :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success
+    :query q: suppress verbose build output
     :query nocache: do not use the cache when building the image
-	:statuscode 200: no error
+    :statuscode 200: no error
     :statuscode 500: server error
 
 

+ 68 - 47
docs/sources/commandline/cli.rst

@@ -41,7 +41,7 @@ To stop a container, use ``docker stop``
 To kill the container, use ``docker kill``
 
 .. _cli_attach_examples:
- 
+
 Examples:
 ~~~~~~~~~
 
@@ -55,8 +55,8 @@ Examples:
      Mem:    373572k total,   355560k used,    18012k free,    27872k buffers
      Swap:   786428k total,        0k used,   786428k free,   221740k cached
 
-     PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND            
-      1 root      20   0 17200 1116  912 R    0  0.3   0:00.03 top                
+     PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND
+      1 root      20   0 17200 1116  912 R    0  0.3   0:00.03 top
 
       top - 02:05:55 up  3:05,  0 users,  load average: 0.01, 0.02, 0.05
       Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
@@ -64,8 +64,8 @@ Examples:
       Mem:    373572k total,   355244k used,    18328k free,    27872k buffers
       Swap:   786428k total,        0k used,   786428k free,   221776k cached
 
-        PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND            
-	    1 root      20   0 17208 1144  932 R    0  0.3   0:00.03 top                
+        PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND
+	    1 root      20   0 17208 1144  932 R    0  0.3   0:00.03 top
 
 
       top - 02:05:58 up  3:06,  0 users,  load average: 0.01, 0.02, 0.05
@@ -74,9 +74,9 @@ Examples:
       Mem:    373572k total,   355780k used,    17792k free,    27880k buffers
       Swap:   786428k total,        0k used,   786428k free,   221776k cached
 
-      PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND            
-           1 root      20   0 17208 1144  932 R    0  0.3   0:00.03 top                
-     ^C$ 
+      PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND
+           1 root      20   0 17208 1144  932 R    0  0.3   0:00.03 top
+     ^C$
      $ sudo docker stop $ID
 
 .. _cli_build:
@@ -133,7 +133,6 @@ to the ``docker`` daemon.  ``ADD`` doesn't work when running in this
 mode because the absence of the context provides no source files to
 copy to the container.
 
-
 .. code-block:: bash
 
     sudo docker build github.com/creack/docker-firefox
@@ -151,16 +150,35 @@ by using the ``git://`` schema.
 
 ::
 
-    Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY [TAG]]
+    Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]
 
     Create a new image from a container's changes
 
       -m="": Commit message
       -author="": Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
-      -run="": Configuration to be applied when the image is launched with `docker run`. 
+      -run="": Configuration to be applied when the image is launched with `docker run`.
                (ex: '{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
 
-Full -run example (multiline is ok within a single quote ``'``)
+Simple commit of an existing container
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: bash
+
+	$ docker ps
+	ID                  IMAGE               COMMAND             CREATED             STATUS              PORTS
+	c3f279d17e0a        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours                             
+	197387f1b436        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours                             
+	$ docker commit c3f279d17e0a  SvenDowideit/testimage:version3
+	f5283438590d
+	$ docker images | head
+	REPOSITORY                        TAG                 ID                  CREATED             SIZE
+	SvenDowideit/testimage            version3            f5283438590d        16 seconds ago      204.2 MB (virtual 335.7 MB)
+	S
+
+Full -run example
+.................
+
+(multiline is ok within a single quote ``'``)
 
 ::
 
@@ -239,7 +257,7 @@ Shell 1: Listening for events
 .............................
 
 .. code-block:: bash
-    
+
     $ sudo docker events
 
 Shell 2: Start and Stop a Container
@@ -282,6 +300,9 @@ Shell 1: (Again .. now showing events)
 
     Show the history of an image
 
+      -notrunc=false: Don't truncate output
+      -q=false: only show numeric IDs
+
 .. _cli_images:
 
 ``images``
@@ -314,7 +335,7 @@ Displaying images visually
 
 ::
 
-    Usage: docker import URL|- [REPOSITORY [TAG]]
+    Usage: docker import URL|- [REPOSITORY[:TAG]]
 
     Create a new filesystem image from the contents of a tarball
 
@@ -330,14 +351,16 @@ Examples
 Import from a remote location
 .............................
 
-``$ sudo docker import http://example.com/exampleimage.tgz exampleimagerepo``
+This will create a new untagged image.
+
+``$ sudo docker import http://example.com/exampleimage.tgz``
 
 Import from a local file
 ........................
 
 Import to docker via pipe and standard in
 
-``$ cat exampleimage.tgz | sudo docker import - exampleimagelocal``
+``$ cat exampleimage.tgz | sudo docker import - exampleimagelocal:new``
 
 Import from a local directory
 .............................
@@ -401,7 +424,9 @@ Insert file from github
 
     Usage: docker kill CONTAINER [CONTAINER...]
 
-    Kill a running container
+    Kill a running container (Send SIGKILL)
+
+The main process inside the container will be sent SIGKILL.
 
 .. _cli_login:
 
@@ -510,7 +535,7 @@ Insert file from github
 
     Remove one or more containers
         -link="": Remove the link instead of the actual container
- 
+
 
 Examples:
 ~~~~~~~~~
@@ -579,6 +604,7 @@ network communication.
       -expose=[]: Expose a port from the container without publishing it to your host
       -link="": Add link to another container (name:alias)
       -name="": Assign the specified name to the container. If no name is specific docker will generate a random name
+      -P=false: Publish all exposed ports to the host interfaces
 
 Examples
 --------
@@ -615,58 +641,51 @@ use-cases, like running Docker within Docker.
 
    docker  run -w /path/to/dir/ -i -t  ubuntu pwd
 
-The ``-w`` lets the command being executed inside directory given, 
-here /path/to/dir/. If the path does not exists it is created inside the 
+The ``-w`` lets the command being executed inside directory given,
+here /path/to/dir/. If the path does not exists it is created inside the
 container.
 
 .. code-block:: bash
 
    docker  run  -v `pwd`:`pwd` -w `pwd` -i -t  ubuntu pwd
 
-The ``-v`` flag mounts the current working directory into the container. 
-The ``-w`` lets the command being executed inside the current 
+The ``-v`` flag mounts the current working directory into the container.
+The ``-w`` lets the command being executed inside the current
 working directory, by changing into the directory to the value
 returned by ``pwd``. So this combination executes the command
 using the container, but inside the current working directory.
 
 .. code-block:: bash
 
-    docker run -p 127.0.0.0::80 ubuntu bash
-
-This the ``-p`` flag now allows you to bind a port to a specific
-interface of the host machine.  In this example port ``80`` of the 
-container will have a dynamically allocated port bound to 127.0.0.1 
-of the host.
-
-.. code-block:: bash
-
-    docker run -p 127.0.0.1:80:80 ubuntu bash
+    docker run -p 127.0.0.1:80:8080 ubuntu bash
 
-This will bind port ``80`` of the container to port ``80`` on 127.0.0.1 of your
-host machine.
+This binds port ``8080`` of the container to port ``80`` on 127.0.0.1 of the
+host machine. :ref:`port_redirection` explains in detail how to manipulate ports
+in Docker.
 
 .. code-block:: bash
 
     docker run -expose 80 ubuntu bash
 
-This will expose port ``80`` of the container for use within a link
-without publishing the port to the host system's interfaces.  
+This exposes port ``80`` of the container for use within a link without
+publishing the port to the host system's interfaces. :ref:`port_redirection`
+explains in detail how to manipulate ports in Docker.
 
 .. code-block:: bash
 
     docker run -name console -t -i ubuntu bash
 
-This will create and run a new container with the container name 
+This will create and run a new container with the container name
 being ``console``.
 
 .. code-block:: bash
 
     docker run -link /redis:redis -name console ubuntu bash
 
-The ``-link`` flag will link the container named ``/redis`` into the 
+The ``-link`` flag will link the container named ``/redis`` into the
 newly created container with the alias ``redis``.  The new container
 can access the network and environment of the redis container via
-environment variables.  The ``-name`` flag will assign the name ``console`` 
+environment variables.  The ``-name`` flag will assign the name ``console``
 to the newly created container.
 
 .. _cli_search:
@@ -678,8 +697,11 @@ to the newly created container.
 
     Usage: docker search TERM
 
-    Searches for the TERM parameter on the Docker index and prints out
-    a list of repositories that match.
+    Search the docker index for images
+
+     -notrunc=false: Don't truncate output
+     -stars=0: Only displays with at least xxx stars
+     -trusted=false: Only show trusted builds
 
 .. _cli_start:
 
@@ -704,10 +726,12 @@ to the newly created container.
 
     Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...]
 
-    Stop a running container
+    Stop a running container (Send SIGTERM, and then SIGKILL after grace period)
 
       -t=10: Number of seconds to wait for the container to stop before killing it.
 
+The main process inside the container will receive SIGTERM, and after a grace period, SIGKILL
+
 .. _cli_tag:
 
 ``tag``
@@ -715,7 +739,7 @@ to the newly created container.
 
 ::
 
-    Usage: docker tag [OPTIONS] IMAGE REPOSITORY [TAG]
+    Usage: docker tag [OPTIONS] IMAGE REPOSITORY[:TAG]
 
     Tag an image into a repository
 
@@ -728,7 +752,7 @@ to the newly created container.
 
 ::
 
-    Usage: docker top CONTAINER
+    Usage: docker top CONTAINER [ps OPTIONS]
 
     Lookup the running processes of a container
 
@@ -750,6 +774,3 @@ Show the version of the docker client, daemon, and latest released version.
     Usage: docker wait [OPTIONS] NAME
 
     Block until a container stops, then print its exit code.
-
-
-

+ 4 - 9
docs/sources/contributing/devenvironment.rst

@@ -56,7 +56,7 @@ To create the Docker binary, run this command:
 
 .. code-block:: bash
 
-	sudo docker run -lxc-conf=lxc.aa_profile=unconfined -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh binary
+	sudo docker run -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh binary
 
 This will create the Docker binary in ``./bundles/<version>-dev/binary/``
 
@@ -64,19 +64,14 @@ This will create the Docker binary in ``./bundles/<version>-dev/binary/``
 Step 5: Run the Tests
 ---------------------
 
-To run the Docker test cases you first need to disable `AppArmor <https://wiki.ubuntu.com/AppArmor>`_ using the following commands
-
-.. code-block:: bash
-
-	sudo /etc/init.d/apparmor stop
-	sudo /etc/init.d/apparmor teardown
-
 To execute the test cases, run this command:
 
 .. code-block:: bash
 
-	sudo docker run -lxc-conf=lxc.aa_profile=unconfined -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh test
+	sudo docker run -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh test
+
 
+Note: if you're running the tests in vagrant, you need to specify a dns entry in the command: `-dns 8.8.8.8`
 
 If the test are successful then the tail of the output should look something like this
 

+ 5 - 5
docs/sources/examples/couchdb_data_volumes.rst

@@ -10,7 +10,7 @@ CouchDB Service
 .. include:: example_header.inc
 
 Here's an example of using data volumes to share the same data between
-2 CouchDB containers.  This could be used for hot upgrades, testing
+two CouchDB containers.  This could be used for hot upgrades, testing
 different versions of CouchDB on the same data, etc.
 
 Create first database
@@ -25,8 +25,8 @@ Note that we're marking ``/var/lib/couchdb`` as a data volume.
 Add data to the first database
 ------------------------------
 
-We're assuming your docker host is reachable at `localhost`. If not,
-replace `localhost` with the public IP of your docker host.
+We're assuming your Docker host is reachable at ``localhost``. If not,
+replace ``localhost`` with the public IP of your Docker host.
 
 .. code-block:: bash
 
@@ -37,7 +37,7 @@ replace `localhost` with the public IP of your docker host.
 Create second database
 ----------------------
 
-This time, we're requesting shared access to $COUCH1's volumes.
+This time, we're requesting shared access to ``$COUCH1``'s volumes.
 
 .. code-block:: bash
 
@@ -52,5 +52,5 @@ Browse data on the second database
     URL="http://$HOST:$(sudo docker port $COUCH2 5984)/_utils/"
     echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!'
 
-Congratulations, you are running 2 Couchdb containers, completely
+Congratulations, you are now running two Couchdb containers, completely
 isolated from each other *except* for their data.

+ 10 - 9
docs/sources/examples/hello_world.rst

@@ -12,16 +12,16 @@ Hello World
 Running the Examples
 ====================
 
-All the examples assume your machine is running the docker daemon. To
-run the docker daemon in the background, simply type:
+All the examples assume your machine is running the ``docker`` daemon. To
+run the ``docker`` daemon in the background, simply type:
 
 .. code-block:: bash
 
    sudo docker -d &
 
-Now you can run docker in client mode: by default all commands will be
+Now you can run Docker in client mode: by default all commands will be
 forwarded to the ``docker`` daemon via a protected Unix socket, so you
-must run as root.
+must run as the ``root`` or via the ``sudo`` command.
 
 .. code-block:: bash
 
@@ -38,23 +38,24 @@ Hello World
 
 This is the most basic example available for using Docker.
 
-Download the base image (named "ubuntu"):
+Download the base image which is named ``ubuntu``:
 
 .. code-block:: bash
 
     # Download an ubuntu image
     sudo docker pull ubuntu
 
-Alternatively to the *ubuntu* image, you can select *busybox*, a bare
+Alternatively to the ``ubuntu`` image, you can select ``busybox``, a bare
 minimal Linux system. The images are retrieved from the Docker
 repository.
 
 
 .. code-block:: bash
 
-    #run a simple echo command, that will echo hello world back to the console over standard out.
     sudo docker run ubuntu /bin/echo hello world
 
+This command will run a simple ``echo`` command, that will echo ``hello world`` back to the console over standard out.
+
 **Explanation:**
 
 - **"sudo"** execute the following commands as user *root* 
@@ -100,9 +101,9 @@ we stop it.
     CONTAINER_ID=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done")
 
 We are going to run a simple hello world daemon in a new container
-made from the *ubuntu* image.
+made from the ``ubuntu`` image.
 
-- **"docker run -d "** run a command in a new container. We pass "-d"
+- **"sudo docker run -d "** run a command in a new container. We pass "-d"
   so it runs as a daemon.
 - **"ubuntu"** is the image we want to run the command inside of.
 - **"/bin/sh -c"** is the command we want to run in the container

+ 1 - 1
docs/sources/examples/index.rst

@@ -10,7 +10,7 @@ Examples
 
 Here are some examples of how to use Docker to create running
 processes, starting from a very simple *Hello World* and progressing
-to more substantial services like you might find in production.
+to more substantial services like those which you might find in production.
 
 .. toctree::
    :maxdepth: 1

+ 27 - 22
docs/sources/examples/linking_into_redis.rst

@@ -9,12 +9,14 @@ Linking Redis
 
 .. include:: example_header.inc
 
-Building a redis container to link as a child of our web application.
+Building a Redis container to link as a child of our web application.
 
-Building the redis container
+Building the Redis container
 ----------------------------
 
-Lets build a redis image with the following Dockerfile.
+Lets build a Redis image with the following Dockerfile.
+
+First checkout the Redis source code.
 
 .. code-block:: bash
 
@@ -22,7 +24,10 @@ Lets build a redis image with the following Dockerfile.
     cd redis
     git checkout 2.6
 
-    # Save this Dockerfile to the root of the redis repository.  
+
+Now let's create a Dockerfile in the root of the Redis repository.
+
+.. code-block:: bash
 
     # Build redis from source
     # Make sure you have the redis source code checked out in
@@ -51,37 +56,37 @@ Lets build a redis image with the following Dockerfile.
 
 
 We need to ``EXPOSE`` the default port of 6379 so that our link knows what ports 
-to connect to our redis container on.  If you do not expose any ports for the
+to connect to our Redis container on.  If you do not expose any ports for the
 image then docker will not be able to establish the link between containers.
 
 
-Run the redis container
+Run the Redis container
 -----------------------
 
 .. code-block:: bash
-    
-    docker run -d -e PASSWORD=docker -name redis redis-2.6 --requirepass docker
- 
-This will run our redis container wit the password docker 
+
+    sudo docker run -d -e PASSWORD=docker -name redis redis-2.6 --requirepass docker
+
+This will run our Redis container with the password docker 
 to secure our service.  By specifying the ``-name`` flag on run 
-we will assign the name ``redis`` to this container.  If we do not specify a name  for 
+we will assign the name ``redis`` to this container.  If we do not specify a name for 
 our container via the ``-name`` flag docker will automatically generate a name for us.
 We can issue all the commands that you would expect; start, stop, attach, using the name for our container.
 The name also allows us to link other containers into this one.
 
-Linking redis as a child
+Linking Redis as a child
 ------------------------
 
-Next we can start a new web application that has a dependency on redis and apply a link 
-to connect both containers.  If you noticed when running our redis server we did not use
-the ``-p`` flag to publish the redis port to the host system.  Redis exposed port 6379 via the Dockerfile 
+Next we can start a new web application that has a dependency on Redis and apply a link 
+to connect both containers.  If you noticed when running our Redis server we did not use
+the ``-p`` flag to publish the Redis port to the host system.  Redis exposed port 6379 via the Dockerfile 
 and this is all we need to establish a link.
 
-Now lets start our web application with a link into redis.
+Now let's start our web application with a link into Redis.
 
 .. code-block:: bash
-   
-    docker run -t -i -link redis:db -name webapp ubuntu bash
+
+    sudo docker run -t -i -link redis:db -name webapp ubuntu bash
 
     root@4c01db0b339c:/# env
 
@@ -95,7 +100,7 @@ Now lets start our web application with a link into redis.
     DB_PORT_6379_TCP_PORT=6379
     PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
     PWD=/
-    DB_ENV_PASSWORD=dockerpass
+    DB_ENV_PASSWORD=docker
     SHLVL=1
     HOME=/
     container=lxc
@@ -105,7 +110,7 @@ Now lets start our web application with a link into redis.
 
 When we inspect the environment of the linked container we can see a few extra environment 
 variables have been added.  When you specified ``-link redis:db`` you are telling docker
-to link the container named ``redis`` into this new container with the alias ``db``.  
+to link the container named ``redis`` into this new container with the alias ``db``.
 Environment variables are prefixed with the alias so that the parent container can access
 network and environment information from the containers that are linked into it.
 
@@ -124,9 +129,9 @@ network and environment information from the containers that are linked into it.
     DB_PORT_6379_TCP_PORT=6379
 
     # Get environment variables of the container 
-    DB_ENV_PASSWORD=dockerpass
+    DB_ENV_PASSWORD=docker
 
 
 Accessing the network information along with the environment of the child container allows
-us to easily connect to the redis service on the specific ip and port and use the password
+us to easily connect to the Redis service on the specific IP and port and use the password
 specified in the environment.

+ 9 - 9
docs/sources/examples/mongodb.rst

@@ -10,8 +10,8 @@ Building an Image with MongoDB
 .. include:: example_header.inc
 
 The goal of this example is to show how you can build your own
-docker images with MongoDB preinstalled. We will do that by
-constructing a Dockerfile that downloads a base image, adds an
+Docker images with MongoDB pre-installed. We will do that by
+constructing a ``Dockerfile`` that downloads a base image, adds an
 apt source and installs the database software on Ubuntu.
 
 Creating a ``Dockerfile``
@@ -41,7 +41,7 @@ Since we want to be running the latest version of MongoDB we'll need to add the
     RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list
 
 Then, we don't want Ubuntu to complain about init not being available so we'll
-divert /sbin/initctl to /bin/true so it thinks everything is working.
+divert ``/sbin/initctl`` to ``/bin/true`` so it thinks everything is working.
 
 .. code-block:: bash
 
@@ -65,8 +65,8 @@ run without needing to provide a special configuration file)
     # Create the MongoDB data directory
     RUN mkdir -p /data/db
 
-Finally, we'll expose the standard port that MongoDB runs on (27107) as well as
-define an ENTRYPOINT for the container.
+Finally, we'll expose the standard port that MongoDB runs on, 27107, as well as
+define an ``ENTRYPOINT`` instruction for the container.
 
 .. code-block:: bash
 
@@ -78,7 +78,7 @@ run all of the commands.
 
 .. code-block:: bash
 
-    docker build -t <yourname>/mongodb .
+    sudo docker build -t <yourname>/mongodb .
 
 Now you should be able to run ``mongod`` as a daemon and be able to connect on
 the local port!
@@ -86,13 +86,13 @@ the local port!
 .. code-block:: bash
 
     # Regular style
-    MONGO_ID=$(docker run -d <yourname>/mongodb)
+    MONGO_ID=$(sudo docker run -d <yourname>/mongodb)
 
     # Lean and mean
-    MONGO_ID=$(docker run -d <yourname>/mongodb --noprealloc --smallfiles)
+    MONGO_ID=$(sudo docker run -d <yourname>/mongodb --noprealloc --smallfiles)
 
     # Check the logs out
-    docker logs $MONGO_ID
+    sudo docker logs $MONGO_ID
 
     # Connect and play around
     mongo --port <port you get from `docker ps`>

+ 16 - 16
docs/sources/examples/nodejs_web_app.rst

@@ -10,7 +10,7 @@ Node.js Web App
 .. include:: example_header.inc
 
 The goal of this example is to show you how you can build your own
-docker images from a parent image using a ``Dockerfile`` . We will do
+Docker images from a parent image using a ``Dockerfile`` . We will do
 that by making a simple Node.js hello world web application running on
 CentOS. You can get the full source code at
 https://github.com/gasi/docker-node-hello.
@@ -55,7 +55,7 @@ Then, create an ``index.js`` file that defines a web app using the
 
 
 In the next steps, we’ll look at how you can run this app inside a CentOS
-container using docker. First, you’ll need to build a docker image of your app.
+container using Docker. First, you’ll need to build a Docker image of your app.
 
 Creating a ``Dockerfile``
 +++++++++++++++++++++++++
@@ -67,8 +67,8 @@ Create an empty file called ``Dockerfile``:
     touch Dockerfile
 
 Open the ``Dockerfile`` in your favorite text editor and add the following line
-that defines the version of docker the image requires to build
-(this example uses docker 0.3.4):
+that defines the version of Docker the image requires to build
+(this example uses Docker 0.3.4):
 
 .. code-block:: bash
 
@@ -76,7 +76,7 @@ that defines the version of docker the image requires to build
 
 Next, define the parent image you want to use to build your own image on top of.
 Here, we’ll use `CentOS <https://index.docker.io/_/centos/>`_ (tag: ``6.4``)
-available on the `docker index`_:
+available on the `Docker index`_:
 
 .. code-block:: bash
 
@@ -95,23 +95,23 @@ To install the right package for CentOS, we’ll use the instructions from the
     # Install Node.js and npm
     RUN     yum install -y npm
 
-To bundle your app’s source code inside the docker image, use the ``ADD``
-command:
+To bundle your app’s source code inside the Docker image, use the ``ADD``
+instruction:
 
 .. code-block:: bash
 
     # Bundle app source
     ADD . /src
 
-Install your app dependencies using npm:
+Install your app dependencies using the ``npm`` binary:
 
 .. code-block:: bash
 
     # Install app dependencies
     RUN cd /src; npm install
 
-Your app binds to port ``8080`` so you’ll use the ``EXPOSE`` command
-to have it mapped by the docker daemon:
+Your app binds to port ``8080`` so you’ll use the ``EXPOSE`` instruction
+to have it mapped by the ``docker`` daemon:
 
 .. code-block:: bash
 
@@ -152,7 +152,7 @@ Building your image
 +++++++++++++++++++
 
 Go to the directory that has your ``Dockerfile`` and run the following
-command to build a docker image. The ``-t`` flag let’s you tag your
+command to build a Docker image. The ``-t`` flag let’s you tag your
 image so it’s easier to find later using the ``docker images``
 command:
 
@@ -160,7 +160,7 @@ command:
 
     sudo docker build -t <your username>/centos-node-hello .
 
-Your image will now be listed by docker:
+Your image will now be listed by Docker:
 
 .. code-block:: bash
 
@@ -199,17 +199,17 @@ Print the output of your app:
 Test
 ++++
 
-To test your app, get the the port of your app that docker mapped:
+To test your app, get the the port of your app that Docker mapped:
 
 .. code-block:: bash
 
-    docker ps
+    sudo docker ps
 
     > # Example
     > ID            IMAGE                          COMMAND              ...   PORTS
     > ecce33b30ebf  gasi/centos-node-hello:latest  node /src/index.js         49160->8080
 
-In the example above, docker mapped the ``8080`` port of the container to
+In the example above, Docker mapped the ``8080`` port of the container to
 ``49160``.
 
 Now you can call your app using ``curl`` (install if needed via:
@@ -229,7 +229,7 @@ Now you can call your app using ``curl`` (install if needed via:
     > Hello World
 
 We hope this tutorial helped you get up and running with Node.js and
-CentOS on docker. You can get the full source code at
+CentOS on Docker. You can get the full source code at
 https://github.com/gasi/docker-node-hello.
 
 Continue to :ref:`running_redis_service`.

+ 4 - 5
docs/sources/examples/postgresql_service.rst

@@ -13,7 +13,7 @@ PostgreSQL Service
 
 .. note::
 
-    As of version 0.5.2, docker requires root privileges to run.
+    As of version 0.5.2, Docker requires root privileges to run.
     You have to either manually adjust your system configuration (permissions on
     /var/run/docker.sock or sudo config), or prefix `docker` with `sudo`. Check
     `this thread`_ for details.
@@ -24,8 +24,7 @@ PostgreSQL Service
 Installing PostgreSQL on Docker
 -------------------------------
 
-For clarity I won't be showing commands output.
-
+For clarity I won't be showing command output.
 
 Run an interactive shell in Docker container.
 
@@ -62,7 +61,7 @@ Finally, install PostgreSQL 9.2
 
 Now, create a PostgreSQL superuser role that can create databases and
 other roles.  Following Vagrant's convention the role will be named
-`docker` with `docker` password assigned to it.
+``docker`` with ``docker`` password assigned to it.
 
 .. code-block:: bash
 
@@ -108,7 +107,7 @@ Bash prompt; you can also locate it using ``docker ps -a``.
 
 .. code-block:: bash
 
-    docker commit <container_id> <your username>/postgresql
+    sudo docker commit <container_id> <your username>/postgresql
 
 Finally, run PostgreSQL server via ``docker``.
 

+ 20 - 20
docs/sources/examples/python_web_app.rst

@@ -10,9 +10,9 @@ Python Web App
 .. include:: example_header.inc
 
 The goal of this example is to show you how you can author your own
-docker images using a parent image, making changes to it, and then
+Docker images using a parent image, making changes to it, and then
 saving the results as a new image. We will do that by making a simple
-hello flask web application image.
+hello Flask web application image.
 
 **Steps:**
 
@@ -20,22 +20,22 @@ hello flask web application image.
 
     sudo docker pull shykes/pybuilder
 
-We are downloading the "shykes/pybuilder" docker image
+We are downloading the ``shykes/pybuilder`` Docker image
 
 .. code-block:: bash
 
     URL=http://github.com/shykes/helloflask/archive/master.tar.gz
 
-We set a URL variable that points to a tarball of a simple helloflask web app
+We set a ``URL`` variable that points to a tarball of a simple helloflask web app
 
 .. code-block:: bash
 
     BUILD_JOB=$(sudo docker run -d -t shykes/pybuilder:latest /usr/local/bin/buildapp $URL)
 
-Inside of the "shykes/pybuilder" image there is a command called
-buildapp, we are running that command and passing the $URL variable
+Inside of the ``shykes/pybuilder`` image there is a command called
+``buildapp``, we are running that command and passing the ``$URL`` variable
 from step 2 to it, and running the whole thing inside of a new
-container. BUILD_JOB will be set with the new container_id.
+container. The ``BUILD_JOB`` environment variable will be set with the new container ID.
 
 .. code-block:: bash
 
@@ -43,13 +43,13 @@ container. BUILD_JOB will be set with the new container_id.
     [...]
 
 While this container is running, we can attach to the new container to
-see what is going on. Ctrl-C to disconnect.
+see what is going on. You can use Ctrl-C to disconnect.
 
 .. code-block:: bash
 
     sudo docker ps -a
-    
-List all docker containers. If this container has already finished
+
+List all Docker containers. If this container has already finished
 running, it will still be listed here.
 
 .. code-block:: bash
@@ -57,8 +57,8 @@ running, it will still be listed here.
     BUILD_IMG=$(sudo docker commit $BUILD_JOB _/builds/github.com/shykes/helloflask/master)
 
 Save the changes we just made in the container to a new image called
-``_/builds/github.com/hykes/helloflask/master`` and save the image id in
-the BUILD_IMG variable name.
+``_/builds/github.com/hykes/helloflask/master`` and save the image ID in
+the ``BUILD_IMG`` variable name.
 
 .. code-block:: bash
 
@@ -72,24 +72,24 @@ the BUILD_IMG variable name.
 - **/usr/local/bin/runapp** is the command which starts the web app.
 
 Use the new image we just created and create a new container with
-network port 5000, and return the container id and store in the
-WEB_WORKER variable.
+network port 5000, and return the container ID and store in the
+``WEB_WORKER`` variable.
 
 .. code-block:: bash
 
     sudo docker logs $WEB_WORKER
      * Running on http://0.0.0.0:5000/
 
-View the logs for the new container using the WEB_WORKER variable, and
-if everything worked as planned you should see the line "Running on
-http://0.0.0.0:5000/" in the log output.
+View the logs for the new container using the ``WEB_WORKER`` variable, and
+if everything worked as planned you should see the line ``Running on
+http://0.0.0.0:5000/`` in the log output.
 
 .. code-block:: bash
 
     WEB_PORT=$(sudo docker port $WEB_WORKER 5000)
 
 Look up the public-facing port which is NAT-ed. Find the private port
-used by the container and store it inside of the WEB_PORT variable.
+used by the container and store it inside of the ``WEB_PORT`` variable.
 
 .. code-block:: bash
 
@@ -97,8 +97,8 @@ used by the container and store it inside of the WEB_PORT variable.
     curl http://127.0.0.1:$WEB_PORT
       Hello world!
 
-Access the web app using curl. If everything worked as planned you
-should see the line "Hello world!" inside of your console.
+Access the web app using the ``curl`` binary. If everything worked as planned you
+should see the line ``Hello world!`` inside of your console.
 
 **Video:**
 

+ 6 - 6
docs/sources/examples/running_redis_service.rst

@@ -9,7 +9,7 @@ Redis Service
 
 .. include:: example_header.inc
 
-Very simple, no frills, redis service.
+Very simple, no frills, Redis service.
 
 Open a docker container
 -----------------------
@@ -35,13 +35,13 @@ Snapshot the installation
 
 .. code-block:: bash
 
-    docker ps -a  # grab the container id (this will be the first one in the list)
-    docker commit <container_id> <your username>/redis
+    sudo docker ps -a  # grab the container id (this will be the first one in the list)
+    sudo docker commit <container_id> <your username>/redis
 
 Run the service
 ---------------
 
-Running the service with `-d` runs the container in detached mode, leaving the
+Running the service with ``-d`` runs the container in detached mode, leaving the
 container running in the background. Use your snapshot.
 
 .. code-block:: bash
@@ -51,7 +51,7 @@ container running in the background. Use your snapshot.
 Test 1
 ++++++
 
-Connect to the container with the redis-cli.
+Connect to the container with the ``redis-cli`` binary.
 
 .. code-block:: bash
 
@@ -67,7 +67,7 @@ Connect to the container with the redis-cli.
 Test 2
 ++++++
 
-Connect to the host os with the redis-cli.
+Connect to the host os with the ``redis-cli`` binary.
 
 .. code-block:: bash
 

+ 1 - 1
docs/sources/examples/running_riak_service.rst

@@ -107,7 +107,7 @@ Create a ``supervisord`` configuration file
 +++++++++++++++++++++++++++++++++++++++++++
 
 Create an empty file called ``supervisord.conf``. Make sure it's at the same
-level as your ``Dockerfile``:
+directory level as your ``Dockerfile``:
 
 .. code-block:: bash
 

+ 7 - 6
docs/sources/examples/running_ssh_service.rst

@@ -12,14 +12,14 @@ SSH Daemon Service
 
 **Video:**
 
-I've create a little screencast to show how to create a sshd service
+I've create a little screencast to show how to create a SSHd service
 and connect to it. It is something like 11 minutes and not entirely
 smooth, but gives you a good idea.
 
 .. note::
-   This screencast was created before ``docker`` version 0.5.2, so the
+   This screencast was created before Docker version 0.5.2, so the
    daemon is unprotected and available via a TCP port. When you run
-   through the same steps in a newer version of ``docker``, you will
+   through the same steps in a newer version of Docker, you will
    need to add ``sudo`` in front of each ``docker`` command in order
    to reach the daemon over its protected Unix socket.
 
@@ -29,13 +29,14 @@ smooth, but gives you a good idea.
       <iframe width="800" height="400" src="http://ascii.io/a/2637/raw" frameborder="0"></iframe>
     </div>
         
-You can also get this sshd container by using
-::
+You can also get this sshd container by using:
+
+.. code-block:: bash
 
     sudo docker pull dhrp/sshd
 
 
-The password is 'screencast'
+The password is ``screencast``.
 
 **Video's Transcription:**
 

+ 2 - 2
docs/sources/installation/ubuntulinux.rst

@@ -162,8 +162,8 @@ Verify it worked
 Docker and UFW
 ^^^^^^^^^^^^^^
 
-Docker uses a bridge to manage containers networking, by default UFW
-drop all `forwarding`, a first step is to enable forwarding:
+Docker uses a bridge to manage container networking. By default, UFW
+drops all `forwarding`, thus a first step is to enable UFW forwarding:
 
 .. code-block:: bash
 

+ 1 - 1
docs/sources/installation/vagrant.rst

@@ -9,7 +9,7 @@ Using Vagrant (Mac, Linux)
 
 This guide will setup a new virtualbox virtual machine with docker
 installed on your computer. This works on most operating systems,
-including MacOX, Windows, Linux, FreeBSD and others. If you can
+including MacOSX, Windows, Linux, FreeBSD and others. If you can
 install these and have at least 400MB RAM to spare you should be good.
 
 Install Vagrant and Virtualbox

+ 4 - 0
docs/sources/installation/windows.rst

@@ -208,6 +208,10 @@ configuration / Device configuration)
 
 .. image:: images/win/hp_bios_vm.JPG
 
+On some machines the BIOS menu can only be accessed before startup.
+To access BIOS in this scenario you should restart your computer and 
+press ESC/Enter when prompted to access the boot and BIOS controls. Typically
+the option to allow virtualization is contained within the BIOS/Security menu.
 
 Docker is not installed
 ```````````````````````

+ 5 - 9
docs/sources/use/basics.rst

@@ -138,22 +138,19 @@ Listing all running containers
 
   sudo docker ps
 
-Expose a service on a TCP port
+Bind a service on a TCP port
 ------------------------------
 
 .. code-block:: bash
 
-  # Expose port 4444 of this container, and tell netcat to listen on it
+  # Bind port 4444 of this container, and tell netcat to listen on it
   JOB=$(sudo docker run -d -p 4444 ubuntu:12.10 /bin/nc -l 4444)
 
   # Which public port is NATed to my container?
-  PORT=$(sudo docker port $JOB 4444)
+  PORT=$(sudo docker port $JOB 4444 | awk -F: '{ print $2 }')
 
-  # Connect to the public port via the host's public address
-  # Please note that because of how routing works connecting to localhost or 127.0.0.1 $PORT will not work.
-  # Replace *eth0* according to your local interface name.
-  IP=$(ip -o -4 addr list eth0 | perl -n -e 'if (m{inet\s([\d\.]+)\/\d+\s}xms) { print $1 }')
-  echo hello world | nc $IP $PORT
+  # Connect to the public port
+  echo hello world | nc 127.0.0.1 $PORT
 
   # Verify that the network connection worked
   echo "Daemon received: $(sudo docker logs $JOB)"
@@ -183,4 +180,3 @@ You now have a image state from which you can create new instances.
 
 Read more about :ref:`working_with_the_repository` or continue to the
 complete :ref:`cli`
-

+ 13 - 10
docs/sources/use/builder.rst

@@ -174,10 +174,10 @@ override the default specified in CMD.
 
     ``EXPOSE <port> [<port>...]``
 
-The ``EXPOSE`` instruction sets ports to be publicly exposed when
-running the image. This is functionally equivalent to running ``docker
-commit -run '{"PortSpecs": ["<port>", "<port2>"]}'`` outside the
-builder. Take a look at :ref:`port_redirection` for more information.
+The ``EXPOSE`` instruction exposes ports for use within links. This is
+functionally equivalent to running ``docker commit -run '{"PortSpecs":
+["<port>", "<port2>"]}'`` outside the builder. Refer to
+:ref:`port_redirection` for detailed information.
 
 3.6 ENV
 -------
@@ -208,6 +208,9 @@ a remote file URL.
 ``<dest>`` is the path at which the source will be copied in the
 destination container.
 
+All new files and directories are created with mode 0755, uid and gid
+0.
+
 The copy obeys the following rules:
 
 * If ``<src>`` is a URL and ``<dest>`` does not end with a trailing slash,
@@ -220,8 +223,9 @@ The copy obeys the following rules:
   (``http://example.com`` will not work).
 * If ``<src>`` is a directory, the entire directory is copied,
   including filesystem metadata.
-* If ``<src>`` is a tar archive in a recognized compression format
-  (identity, gzip, bzip2 or xz), it is unpacked as a directory.
+* If ``<src>`` is a *local* tar archive in a recognized compression
+  format (identity, gzip, bzip2 or xz) then it is unpacked as a
+  directory. Resources from *remote* URLs are **not** decompressed.
 
   When a directory is copied or unpacked, it has the same behavior as
   ``tar -x``: the result is the union of
@@ -229,7 +233,7 @@ The copy obeys the following rules:
   1. whatever existed at the destination path and
   2. the contents of the source tree,
 
-  with conflicts resolved in favor of 2) on a file-by-file basis.
+  with conflicts resolved in favor of "2." on a file-by-file basis.
 
 * If ``<src>`` is any other kind of file, it is copied individually
   along with its metadata. In this case, if ``<dest>`` ends with a
@@ -237,10 +241,9 @@ The copy obeys the following rules:
   contents of ``<src>`` will be written at ``<dest>/base(<src>)``.
 * If ``<dest>`` does not end with a trailing slash, it will be
   considered a regular file and the contents of ``<src>`` will be
-  written at ``<dst>``.
+  written at ``<dest>``.
 * If ``<dest>`` doesn't exist, it is created along with all missing
-  directories in its path. All new files and directories are created
-  with mode 0755, uid and gid 0.
+  directories in its path.
 
 .. _entrypoint_def:
 

+ 1 - 0
docs/sources/use/index.rst

@@ -19,3 +19,4 @@ Contents:
    port_redirection
    puppet
    host_integration
+   working_with_volumes

+ 123 - 16
docs/sources/use/port_redirection.rst

@@ -8,29 +8,136 @@
 Port redirection
 ================
 
-Docker can redirect public TCP and UDP ports to your container, so it can be
-reached over the network.  Port redirection is done on ``docker run``
-using the -p flag.
+Interacting with a service is commonly done through a connection to a
+port. When this service runs inside a container, one can connect to
+the port after finding the IP address of the container as follows:
 
-A port redirect is specified as *PUBLIC:PRIVATE*, where TCP port
-*PUBLIC* will be redirected to TCP port *PRIVATE*. As a special case,
-the public port can be omitted, in which case a random public port
-will be allocated.
+.. code-block:: bash
+
+    # Find IP address of container with ID <container_id>
+    docker inspect <container_id> | grep IPAddress | cut -d '"' -f 4
+
+However, this IP address is local to the host system and the container
+port is not reachable by the outside world. Furthermore, even if the
+port is used locally, e.g. by another container, this method is
+tedious as the IP address of the container changes every time it
+starts.
+
+Docker addresses these two problems and give a simple and robust way
+to access services running inside containers.
+
+To allow non-local clients to reach the service running inside the
+container, Docker provide ways to bind the container port to an
+interface of the host system. To simplify communication between
+containers, Docker provides the linking mechanism.
+
+Binding a port to an host interface
+-----------------------------------
+
+To bind a port of the container to a specific interface of the host
+system, use the ``-p`` parameter of the ``docker run`` command:
 
 .. code-block:: bash
 
-    # A random PUBLIC port is redirected to PRIVATE port 80 on the container
-    sudo docker run -p 80 <image> <cmd>
+    # General syntax
+    docker run -p [([<host_interface>:[host_port]])|(<host_port>):]<container_port>[/udp] <image> <cmd>
+
+When no host interface is provided, the port is bound to all available
+interfaces of the host machine (aka INADDR_ANY, or 0.0.0.0).When no host port is
+provided, one is dynamically allocated. The possible combinations of options for
+TCP port are the following:
+
+.. code-block:: bash
 
-    # PUBLIC port 80 is redirected to PRIVATE port 80
-    sudo docker run -p 80:80 <image> <cmd>
+    # Bind TCP port 8080 of the container to TCP port 80 on 127.0.0.1 of the host machine.
+    docker run -p 127.0.0.1:80:8080 <image> <cmd>
 
-To redirect a UDP port the redirection must be expressed as *PUBLIC:PRIVATE/udp*:
+    # Bind TCP port 8080 of the container to a dynamically allocated TCP port on 127.0.0.1 of the host machine.
+    docker run -p 127.0.0.1::8080 <image> <cmd>
+
+    # Bind TCP port 8080 of the container to TCP port 80 on all available interfaces of the host machine.
+    docker run -p 80:8080 <image> <cmd>
+
+    # Bind TCP port 8080 of the container to a dynamically allocated TCP port on all available interfaces of the host machine.
+    docker run -p 8080 <image> <cmd>
+
+UDP ports can also be bound by adding a trailing ``/udp``. All the
+combinations described for TCP work. Here is only one example:
 
 .. code-block:: bash
 
-    # PUBLIC port 5300 is redirected to the PRIVATE port 53 using UDP
-    sudo docker run -p 5300:53/udp <image> <cmd>
+    # Bind UDP port 5353 of the container to UDP port 53 on 127.0.0.1 of the host machine.
+    docker run -p 127.0.0.1:53:5353/udp <image> <cmd>
+
+The command ``docker port`` lists the interface and port on the host
+machine bound to a given container port. It is useful when using
+dynamically allocated ports:
+
+.. code-block:: bash
+
+   # Bind to a dynamically allocated port
+   docker run -p 127.0.0.1::8080 -name dyn-bound <image> <cmd>
+
+   # Lookup the actual port
+   docker port dyn-bound 8080
+   127.0.0.1:49160
+
+
+Linking a container
+-------------------
+
+Communication between two containers can also be established in a
+docker-specific way called linking.
+
+To briefly present the concept of linking, let us consider two
+containers: ``server``, containing the service, and ``client``,
+accessing the service.  Once ``server`` is running, ``client`` is
+started and links to server. Linking sets environment variables in
+``client`` giving it some information about ``server``. In this sense,
+linking is a method of service discovery.
+
+Let us now get back to our topic of interest; communication between
+the two containers. We mentioned that the tricky part about this
+communication was that the IP address of ``server`` was not
+fixed. Therefore, some of the environment variables are going to be
+used to inform ``client`` about this IP address. This process called
+exposure, is possible because ``client`` is started after ``server``
+has been started.
+
+Here is a full example. On ``server``, the port of interest is
+exposed. The exposure is done either through the ``-expose`` parameter
+to the ``docker run`` command, or the ``EXPOSE`` build command in a
+Dockerfile:
+
+.. code-block:: bash
+
+    # Expose port 80
+    docker run -expose 80 -name server <image> <cmd>
+
+The ``client`` then links to the ``server``:
+
+.. code-block:: bash
+
+    # Link
+    docker run -name client -link server:linked-server <image> <cmd>
+
+``client`` locally refers to ``server`` as ``linked-server``. The
+following environment variables, among others, are available on
+``client``:
+
+.. code-block:: bash
+
+    # The default protocol, ip, and port of the service running in the container
+    LINKED-SERVER_PORT=tcp://172.17.0.8:80
+
+    # A specific protocol, ip, and port of various services
+    LINKED-SERVER_PORT_80_TCP=tcp://172.17.0.8:80
+    LINKED-SERVER_PORT_80_TCP_PROTO=tcp
+    LINKED-SERVER_PORT_80_TCP_ADDR=172.17.0.8
+    LINKED-SERVER_PORT_80_TCP_PORT=80
+
+This tells ``client`` that a service is running on port 80 of
+``server`` and that ``server`` is accessible at the IP address
+172.17.0.8
 
-Default port redirects can be built into a container with the
-``EXPOSE`` build command.
+Note: Using the ``-p`` parameter also exposes the port..

+ 73 - 0
docs/sources/use/working_with_volumes.rst

@@ -0,0 +1,73 @@
+:title: Working with Volumes
+:description: How to create and share volumes
+:keywords: Examples, Usage, volume, docker, documentation, examples
+
+.. _volume_def:
+
+Data Volume
+===========
+
+.. versionadded:: v0.3.0
+   Data volumes have been available since version 1 of the
+   :doc:`../api/docker_remote_api`
+
+A *data volume* is a specially-designated directory within one or more
+containers that bypasses the :ref:`ufs_def` to provide several useful
+features for persistant or shared data:
+
+* **Data volumes can be shared and reused between containers.** This
+  is the feature that makes data volumes so powerful. You can use it
+  for anything from hot database upgrades to custom backup or
+  replication tools. See the example below.
+* **Changes to a data volume are made directly**, without the overhead
+  of a copy-on-write mechanism. This is good for very large files.
+* **Changes to a data volume will not be included at the next commit**
+  because they are not recorded as regular filesystem changes in the
+  top layer of the :ref:`ufs_def`
+
+Each container can have zero or more data volumes.
+
+Getting Started
+...............
+
+
+
+Using data volumes is as simple as adding a new flag: ``-v``. The parameter ``-v`` can be used more than once in order to create more volumes within the new container. The example below shows the instruction to create a container with two new volumes::
+
+  docker run -v /var/volume1 -v /var/volume2 shykes/couchdb
+
+For a Dockerfile, the VOLUME instruction will add one or more new volumes to any container created from the image::
+
+  VOLUME ["/var/volume1", "/var/volume2"]
+
+
+Create a new container using existing volumes from an existing container:
+---------------------------------------------------------------------------
+
+
+The command below creates a new container which is runnning as daemon ``-d`` and with one volume ``/var/lib/couchdb``::
+
+  COUCH1=$(sudo docker run -d -v /var/lib/couchdb shykes/couchdb:2013-05-03)
+
+From the container id of that previous container ``$COUCH1`` it's possible to create new container sharing the same volume using the parameter ``-volumes-from container_id``::
+
+  COUCH2=$(sudo docker run -d -volumes-from $COUCH1 shykes/couchdb:2013-05-03)
+
+Now, the second container has the all the information from the first volume.
+
+
+Create a new container which mounts a host directory into it:
+-------------------------------------------------------------
+
+  -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro].
+  If "host-dir" is missing, then docker creates a new volume.
+
+  This is not available for a Dockerfile due the portability and sharing purpose of it. The [host-dir] volumes is something    100% host dependent and will break on any other machine.
+
+For example::
+
+  sudo docker run -v /var/logs:/var/host_logs:ro shykes/couchdb:2013-05-03
+
+The command above mounts the host directory ``/var/logs`` into the container with read only permissions as ``/var/host_logs``.
+
+.. versionadded:: v0.5.0

+ 1 - 0
engine/MAINTAINERS

@@ -0,0 +1 @@
+Solomon Hykes <solomon@dotcloud.com>

+ 82 - 0
engine/engine.go

@@ -0,0 +1,82 @@
+package engine
+
+import (
+	"fmt"
+	"os"
+	"log"
+	"runtime"
+	"github.com/dotcloud/docker/utils"
+)
+
+
+type Handler func(*Job) string
+
+var globalHandlers map[string]Handler
+
+func Register(name string, handler Handler) error {
+	if globalHandlers == nil {
+		globalHandlers = make(map[string]Handler)
+	}
+	globalHandlers[name] = handler
+	return nil
+}
+
+// The Engine is the core of Docker.
+// It acts as a store for *containers*, and allows manipulation of these
+// containers by executing *jobs*.
+type Engine struct {
+	root		string
+	handlers	map[string]Handler
+}
+
+// New initializes a new engine managing the directory specified at `root`.
+// `root` is used to store containers and any other state private to the engine.
+// Changing the contents of the root without executing a job will cause unspecified
+// behavior.
+func New(root string) (*Engine, error) {
+	// Check for unsupported architectures
+	if runtime.GOARCH != "amd64" {
+		return nil, fmt.Errorf("The docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
+	}
+	// Check for unsupported kernel versions
+	// FIXME: it would be cleaner to not test for specific versions, but rather
+	// test for specific functionalities.
+	// Unfortunately we can't test for the feature "does not cause a kernel panic"
+	// without actually causing a kernel panic, so we need this workaround until
+	// the circumstances of pre-3.8 crashes are clearer.
+	// For details see http://github.com/dotcloud/docker/issues/407
+	if k, err := utils.GetKernelVersion(); err != nil {
+		log.Printf("WARNING: %s\n", err)
+	} else {
+		if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
+			log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
+		}
+	}
+	if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) {
+		return nil, err
+	}
+	eng := &Engine{
+		root:		root,
+		handlers:	globalHandlers,
+	}
+	return eng, nil
+}
+
+// Job creates a new job which can later be executed.
+// This function mimics `Command` from the standard os/exec package.
+func (eng *Engine) Job(name string, args ...string) *Job {
+	job := &Job{
+		eng:		eng,
+		Name:		name,
+		Args:		args,
+		Stdin:		os.Stdin,
+		Stdout:		os.Stdout,
+		Stderr:		os.Stderr,
+	}
+	handler, exists := eng.handlers[name]
+	if exists {
+		job.handler = handler
+	}
+	return job
+}
+

+ 29 - 0
engine/env_test.go

@@ -0,0 +1,29 @@
+package engine
+
+import (
+	"testing"
+)
+
+func TestNewJob(t *testing.T) {
+	job := mkJob(t, "dummy", "--level=awesome")
+	if job.Name != "dummy" {
+		t.Fatalf("Wrong job name: %s", job.Name)
+	}
+	if len(job.Args) != 1 {
+		t.Fatalf("Wrong number of job arguments: %d", len(job.Args))
+	}
+	if job.Args[0] != "--level=awesome" {
+		t.Fatalf("Wrong job arguments: %s", job.Args[0])
+	}
+}
+
+func TestSetenv(t *testing.T) {
+	job := mkJob(t, "dummy")
+	job.Setenv("foo", "bar")
+	if val := job.Getenv("foo"); val != "bar" {
+		t.Fatalf("Getenv returns incorrect value: %s", val)
+	}
+	if val := job.Getenv("nonexistent"); val != "" {
+		t.Fatalf("Getenv returns incorrect value: %s", val)
+	}
+}

+ 42 - 0
engine/init_test.go

@@ -0,0 +1,42 @@
+package engine
+
+import (
+	"testing"
+	"runtime"
+	"strings"
+	"fmt"
+	"io/ioutil"
+	"github.com/dotcloud/docker/utils"
+)
+
+var globalTestID string
+
+func init() {
+	Register("dummy", func(job *Job) string { return ""; })
+}
+
+func mkEngine(t *testing.T) *Engine {
+	// Use the caller function name as a prefix.
+	// This helps trace temp directories back to their test.
+	pc, _, _, _ := runtime.Caller(1)
+	callerLongName := runtime.FuncForPC(pc).Name()
+	parts := strings.Split(callerLongName, ".")
+	callerShortName := parts[len(parts)-1]
+	if globalTestID == "" {
+		globalTestID = utils.RandomString()[:4]
+	}
+	prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, callerShortName)
+	root, err := ioutil.TempDir("", prefix)
+	if err != nil {
+		t.Fatal(err)
+	}
+	eng, err := New(root)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return eng
+}
+
+func mkJob(t *testing.T, name string, args ...string) *Job {
+	return mkEngine(t).Job(name, args...)
+}

+ 113 - 0
engine/job.go

@@ -0,0 +1,113 @@
+package engine
+
+import (
+	"io"
+	"strings"
+	"fmt"
+	"encoding/json"
+	"github.com/dotcloud/docker/utils"
+)
+
+// A job is the fundamental unit of work in the docker engine.
+// Everything docker can do should eventually be exposed as a job.
+// For example: execute a process in a container, create a new container,
+// download an archive from the internet, serve the http api, etc.
+//
+// The job API is designed after unix processes: a job has a name, arguments,
+// environment variables, standard streams for input, output and error, and
+// an exit status which can indicate success (0) or error (anything else).
+//
+// One slight variation is that jobs report their status as a string. The
+// string "0" indicates success, and any other strings indicates an error.
+// This allows for richer error reporting.
+// 
+type Job struct {
+	eng	*Engine
+	Name	string
+	Args	[]string
+	env	[]string
+	Stdin	io.ReadCloser
+	Stdout	io.WriteCloser
+	Stderr	io.WriteCloser
+	handler	func(*Job) string
+	status	string
+}
+
+// Run executes the job and blocks until the job completes.
+// If the job returns a failure status, an error is returned
+// which includes the status.
+func (job *Job) Run() error {
+	randId := utils.RandomString()[:4]
+	fmt.Printf("Job #%s: %s\n", randId, job)
+	defer fmt.Printf("Job #%s: %s = '%s'", randId, job, job.status)
+	if job.handler == nil {
+		job.status = "command not found"
+	} else {
+		job.status = job.handler(job)
+	}
+	if job.status != "0" {
+		return fmt.Errorf("%s: %s", job.Name, job.status)
+	}
+	return nil
+}
+
+// String returns a human-readable description of `job`
+func (job *Job) String() string {
+	return strings.Join(append([]string{job.Name}, job.Args...), " ")
+}
+
+func (job *Job) Getenv(key string) (value string) {
+        for _, kv := range job.env {
+                if strings.Index(kv, "=") == -1 {
+                        continue
+                }
+                parts := strings.SplitN(kv, "=", 2)
+                if parts[0] != key {
+                        continue
+                }
+                if len(parts) < 2 {
+                        value = ""
+                } else {
+                        value = parts[1]
+                }
+        }
+        return
+}
+
+func (job *Job) GetenvBool(key string) (value bool) {
+	s := strings.ToLower(strings.Trim(job.Getenv(key), " \t"))
+	if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
+		return false
+	}
+	return true
+}
+
+func (job *Job) SetenvBool(key string, value bool) {
+	if value {
+		job.Setenv(key, "1")
+	} else {
+		job.Setenv(key, "0")
+	}
+}
+
+func (job *Job) GetenvList(key string) []string {
+	sval := job.Getenv(key)
+	l := make([]string, 0, 1)
+	if err := json.Unmarshal([]byte(sval), &l); err != nil {
+		l = append(l, sval)
+	}
+	return l
+}
+
+func (job *Job) SetenvList(key string, value []string) error {
+	sval, err := json.Marshal(value)
+	if err != nil {
+		return err
+	}
+	job.Setenv(key, string(sval))
+	return nil
+}
+
+func (job *Job) Setenv(key, value string) {
+	job.env = append(job.env, key + "=" + value)
+}

+ 15 - 2
gograph/gograph.go

@@ -4,6 +4,7 @@ import (
 	"database/sql"
 	"fmt"
 	"path"
+	"sync"
 )
 
 const (
@@ -47,6 +48,7 @@ type WalkFunc func(fullPath string, entity *Entity) error
 // Graph database for storing entities and their relationships
 type Database struct {
 	conn *sql.DB
+	mux  sync.Mutex
 }
 
 // Create a new graph database initialized with a root entity
@@ -54,7 +56,7 @@ func NewDatabase(conn *sql.DB, init bool) (*Database, error) {
 	if conn == nil {
 		return nil, fmt.Errorf("Database connection cannot be nil")
 	}
-	db := &Database{conn}
+	db := &Database{conn: conn}
 
 	if init {
 		if _, err := conn.Exec(createEntityTable); err != nil {
@@ -99,7 +101,9 @@ func (db *Database) Close() error {
 
 // Set the entity id for a given path
 func (db *Database) Set(fullPath, id string) (*Entity, error) {
-	// FIXME: is rollback implicit when closing the connection?
+	db.mux.Lock()
+	defer db.mux.Unlock()
+
 	rollback := func() {
 		db.conn.Exec("ROLLBACK")
 	}
@@ -256,6 +260,9 @@ func (db *Database) RefPaths(id string) Edges {
 
 // Delete the reference to an entity at a given path
 func (db *Database) Delete(name string) error {
+	db.mux.Lock()
+	defer db.mux.Unlock()
+
 	if name == "/" {
 		return fmt.Errorf("Cannot delete root entity")
 	}
@@ -276,6 +283,9 @@ func (db *Database) Delete(name string) error {
 // Walk the graph to make sure all references to the entity
 // are removed and return the number of references removed
 func (db *Database) Purge(id string) (int, error) {
+	db.mux.Lock()
+	defer db.mux.Unlock()
+
 	rollback := func() {
 		db.conn.Exec("ROLLBACK")
 	}
@@ -310,6 +320,9 @@ func (db *Database) Purge(id string) (int, error) {
 
 // Rename an edge for a given path
 func (db *Database) Rename(currentName, newName string) error {
+	db.mux.Lock()
+	defer db.mux.Unlock()
+
 	parentPath, name := splitPath(currentName)
 	newParentPath, newEdgeName := splitPath(newName)
 

+ 37 - 0
gograph/gograph_test.go

@@ -3,6 +3,7 @@ package gograph
 import (
 	_ "code.google.com/p/gosqlite/sqlite3"
 	"database/sql"
+	"fmt"
 	"os"
 	"path"
 	"strconv"
@@ -501,3 +502,39 @@ func TestGetNameWithTrailingSlash(t *testing.T) {
 		t.Fatalf("Entity should not be nil")
 	}
 }
+
+func TestConcurrentWrites(t *testing.T) {
+	db, dbpath := newTestDb(t)
+	defer destroyTestDb(dbpath)
+
+	errs := make(chan error, 2)
+
+	save := func(name string, id string) {
+		if _, err := db.Set(fmt.Sprintf("/%s", name), id); err != nil {
+			errs <- err
+		}
+		errs <- nil
+	}
+	purge := func(id string) {
+		if _, err := db.Purge(id); err != nil {
+			errs <- err
+		}
+		errs <- nil
+	}
+
+	save("/1", "1")
+
+	go purge("1")
+	go save("/2", "2")
+
+	any := false
+	for i := 0; i < 2; i++ {
+		if err := <-errs; err != nil {
+			any = true
+			t.Log(err)
+		}
+	}
+	if any {
+		t.Fatal()
+	}
+}

+ 7 - 5
graph.go

@@ -2,6 +2,7 @@ package docker
 
 import (
 	"fmt"
+	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"io/ioutil"
@@ -94,7 +95,7 @@ func (graph *Graph) Get(name string) (*Image, error) {
 }
 
 // Create creates a new image and registers it in the graph.
-func (graph *Graph) Create(layerData Archive, container *Container, comment, author string, config *Config) (*Image, error) {
+func (graph *Graph) Create(layerData archive.Archive, container *Container, comment, author string, config *Config) (*Image, error) {
 	img := &Image{
 		ID:            GenerateID(),
 		Comment:       comment,
@@ -117,7 +118,7 @@ func (graph *Graph) Create(layerData Archive, container *Container, comment, aut
 
 // Register imports a pre-existing image into the graph.
 // FIXME: pass img as first argument
-func (graph *Graph) Register(jsonData []byte, layerData Archive, img *Image) error {
+func (graph *Graph) Register(jsonData []byte, layerData archive.Archive, img *Image) error {
 	if err := ValidateID(img.ID); err != nil {
 		return err
 	}
@@ -146,7 +147,7 @@ func (graph *Graph) Register(jsonData []byte, layerData Archive, img *Image) err
 //   The archive is stored on disk and will be automatically deleted as soon as has been read.
 //   If output is not nil, a human-readable progress bar will be written to it.
 //   FIXME: does this belong in Graph? How about MktempFile, let the caller use it for archives?
-func (graph *Graph) TempLayerArchive(id string, compression Compression, sf *utils.StreamFormatter, output io.Writer) (*TempArchive, error) {
+func (graph *Graph) TempLayerArchive(id string, compression archive.Compression, sf *utils.StreamFormatter, output io.Writer) (*archive.TempArchive, error) {
 	image, err := graph.Get(id)
 	if err != nil {
 		return nil, err
@@ -155,11 +156,11 @@ func (graph *Graph) TempLayerArchive(id string, compression Compression, sf *uti
 	if err != nil {
 		return nil, err
 	}
-	archive, err := image.TarLayer(compression)
+	a, err := image.TarLayer(compression)
 	if err != nil {
 		return nil, err
 	}
-	return NewTempArchive(utils.ProgressReader(ioutil.NopCloser(archive), 0, output, sf.FormatProgress("", "Buffering to disk", "%v/%v (%v)"), sf, true), tmp.Root)
+	return archive.NewTempArchive(utils.ProgressReader(ioutil.NopCloser(a), 0, output, sf.FormatProgress("", "Buffering to disk", "%v/%v (%v)"), sf, true), tmp.Root)
 }
 
 // Mktemp creates a temporary sub-directory inside the graph's filesystem.
@@ -201,6 +202,7 @@ func (graph *Graph) getDockerInitLayer() (string, error) {
 		"/proc":            "dir",
 		"/sys":             "dir",
 		"/.dockerinit":     "file",
+		"/.dockerenv":      "file",
 		"/etc/resolv.conf": "file",
 		"/etc/hosts":       "file",
 		"/etc/hostname":    "file",

+ 2 - 1
graph_test.go

@@ -4,6 +4,7 @@ import (
 	"archive/tar"
 	"bytes"
 	"errors"
+	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"io/ioutil"
@@ -301,7 +302,7 @@ func tempGraph(t *testing.T) *Graph {
 	return graph
 }
 
-func testArchive(t *testing.T) Archive {
+func testArchive(t *testing.T) archive.Archive {
 	archive, err := fakeTar()
 	if err != nil {
 		t.Fatal(err)

+ 23 - 17
hack/RELEASE-CHECKLIST.md

@@ -9,7 +9,8 @@ to keep it up-to-date.
 ### 1. Pull from master and create a release branch
 
 ```bash
-git checkout master
+export VERSION=vXXX
+git checkout release
 git pull
 git checkout -b bump_$VERSION
 ```
@@ -55,7 +56,9 @@ EXAMPLES:
 
 ### 4. Run all tests
 
-FIXME
+```bash
+docker run -privileged -lxc-conf=lxc.aa_profile=unconfined docker hack/make.sh test
+```
 
 ### 5. Test the docs
 
@@ -66,21 +69,17 @@ the docs are in ``docs/README.md``
 ### 6. Commit and create a pull request to the "release" branch
 
 ```bash
-git add CHANGELOG.md
+git add VERSION CHANGELOG.md
 git commit -m "Bump version to $VERSION"
 git push origin bump_$VERSION
 ```
 
 ### 7. Get 2 other maintainers to validate the pull request
 
-### 8. Merge the pull request and apply tags
+### 8. Apply tag
 
 ```bash
-git checkout release
-git merge bump_$VERSION
 git tag -a v$VERSION # Don't forget the v!
-git tag -f -a latest
-git push
 git push --tags
 ```
 
@@ -90,28 +89,35 @@ should see the updated docs 5-10 minutes after the merge. The docs
 will appear on http://docs.docker.io/. For more information about
 documentation releases, see ``docs/README.md``
 
-### 9. Publish binaries
+### 9. Go to github to merge the bump_$VERSION into release
+
+### 10. Publish binaries
 
 To run this you will need access to the release credentials.
 Get them from [the infrastructure maintainers](
 https://github.com/dotcloud/docker/blob/master/hack/infrastructure/MAINTAINERS).
 
 ```bash
+git checkout release
+git fetch
+git reset --hard origin/release
 docker build -t docker .
 docker run  \
-	-e AWS_S3_BUCKET=get-nightly.docker.io \
-	-e AWS_ACCESS_KEY=$(cat ~/.aws/access_key) \
-	-e AWS_SECRET_KEY=$(cat ~/.aws/secret_key) \
-	-e GPG_PASSPHRASE=supersecretsesame \
-	docker
-	hack/release.sh
+       -e AWS_S3_BUCKET=test.docker.io \
+       -e AWS_ACCESS_KEY=$(cat ~/.aws/access_key) \
+       -e AWS_SECRET_KEY=$(cat ~/.aws/secret_key) \
+       -e GPG_PASSPHRASE=supersecretsesame \
+       -privileged -lxc-conf=lxc.aa_profile=unconfined \
+       -t -i \
+       docker \
+       hack/release.sh
 ```
 
 It will build and upload the binaries on the specified bucket (you should
-use get-nightly.docker.io for general testing, and once everything is fine,
+use test.docker.io for general testing, and once everything is fine,
 switch to get.docker.io).
 
 
-### 10. Rejoice!
+### 11. Rejoice!
 
 Congratulations! You're done.

+ 8 - 0
hack/dind

@@ -21,6 +21,14 @@ mountpoint -q $CGROUP ||
 		exit 1
 	}
 
+if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
+then
+    mount -t securityfs none /sys/kernel/security || {
+	echo "Could not mount /sys/kernel/security."
+	echo "AppArmor detection and -privileged mode might break."
+    }
+fi
+
 # Mount the cgroup hierarchies exactly as they are in the parent system.
 for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
 do

+ 18 - 21
hack/infrastructure/docker-ci/buildbot/master.cfg

@@ -17,15 +17,12 @@ PORT_GITHUB = 8011      # Buildbot github hook port
 PORT_MASTER = 9989      # Port where buildbot master listen buildworkers
 TEST_USER = 'buildbot'  # Credential to authenticate build triggers
 TEST_PWD = 'docker'     # Credential to authenticate build triggers
-BUILDER_NAME = 'docker'
 GITHUB_DOCKER = 'github.com/dotcloud/docker'
 BUILDBOT_PATH = '/data/buildbot'
 DOCKER_PATH = '/go/src/github.com/dotcloud/docker'
 DOCKER_CI_PATH = '/docker-ci'
-BUILDER_PATH = '/data/buildbot/slave/{0}/build'.format(BUILDER_NAME)
-PULL_REQUEST_PATH = '/data/buildbot/slave/pullrequest/build'
 
-# Credentials set by setup.sh and Vagrantfile
+# Credentials set by setup.sh from deployment.py
 BUILDBOT_PWD = ''
 IRC_PWD = ''
 IRC_CHANNEL = ''
@@ -45,34 +42,35 @@ c['slavePortnum'] = PORT_MASTER
 
 
 # Schedulers
-c['schedulers'] = [ForceScheduler(name='trigger', builderNames=[BUILDER_NAME,
+c['schedulers'] = [ForceScheduler(name='trigger', builderNames=['docker',
     'index','registry','coverage','nightlyrelease'])]
-c['schedulers'] += [SingleBranchScheduler(name="all",
-    change_filter=filter.ChangeFilter(branch='master'), treeStableTimer=None,
-    builderNames=[BUILDER_NAME])]
+c['schedulers'] += [SingleBranchScheduler(name="all", treeStableTimer=None,
+    change_filter=filter.ChangeFilter(branch='master',
+    repository='https://github.com/dotcloud/docker'), builderNames=['docker'])]
 c['schedulers'] += [SingleBranchScheduler(name='pullrequest',
     change_filter=filter.ChangeFilter(category='github_pullrequest'), treeStableTimer=None,
     builderNames=['pullrequest'])]
-c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['nightlyrelease'],
-    hour=7, minute=00)]
+c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=['nightlyrelease',
+    'coverage'], hour=7, minute=00)]
 c['schedulers'] += [Nightly(name='every4hrs', branch=None, builderNames=['registry','index'],
     hour=range(0,24,4), minute=15)]
 
 # Builders
 # Docker commit test
+test_cmd = ('docker run -privileged mzdaniel/test_docker hack/dind'
+    ' test_docker.sh %(src::revision)s')
 factory = BuildFactory()
 factory.addStep(ShellCommand(description='Docker', logEnviron=False,
-    usePTY=True, command=['sh', '-c', Interpolate(
-    '{0}/docker-test/test_docker.sh %(src::revision)s'.format(DOCKER_CI_PATH))]))
+    usePTY=True, command=["sh", "-c", Interpolate(test_cmd)]))
 c['builders'] = [BuilderConfig(name='docker',slavenames=['buildworker'],
     factory=factory)]
 
 # Docker pull request test
+test_cmd = ('docker run -privileged mzdaniel/test_docker hack/dind'
+    ' test_docker.sh %(src::revision)s %(src::repository)s %(src::branch)s')
 factory = BuildFactory()
 factory.addStep(ShellCommand(description='pull_request', logEnviron=False,
-    usePTY=True, command=['sh', '-c', Interpolate(
-    '{0}/docker-test/test_docker.sh %(src::revision)s %(src::repository)s'
-    ' %(src::branch)s'.format(DOCKER_CI_PATH))]))
+    usePTY=True, command=["sh", "-c", Interpolate(test_cmd)]))
 c['builders'] += [BuilderConfig(name='pullrequest',slavenames=['buildworker'],
     factory=factory)]
 
@@ -97,17 +95,16 @@ c['builders'] += [BuilderConfig(name='registry',slavenames=['buildworker'],
 factory = BuildFactory()
 factory.addStep(ShellCommand(description='index', logEnviron=False,
     command='. {0}/master/credentials.cfg; '
-    '{1}/testing/functionaltests/test_index.py'.format(BUILDBOT_PATH,
-    DOCKER_PATH), usePTY=True))
+    '/docker-ci/functionaltests/test_index.py'.format(BUILDBOT_PATH),
+    usePTY=True))
 c['builders'] += [BuilderConfig(name='index',slavenames=['buildworker'],
     factory=factory)]
 
 # Docker nightly release
-nightlyrelease_cmd = ('docker run -i -t -privileged -lxc-conf=lxc.aa_profile=unconfined'
-    ' -e AWS_S3_BUCKET=test.docker.io dockerbuilder')
 factory = BuildFactory()
-factory.addStep(ShellCommand(description='NightlyRelease',logEnviron=False,usePTY=True,
-    command=nightlyrelease_cmd))
+factory.addStep(ShellCommand(description='NightlyRelease', logEnviron=False,
+    usePTY=True, command='docker run -privileged'
+    ' -e AWS_S3_BUCKET=test.docker.io dockerbuilder'))
 c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'],
     factory=factory)]
 

+ 5 - 4
hack/infrastructure/docker-ci/deployment.py

@@ -135,16 +135,17 @@ sudo('curl -s https://phantomjs.googlecode.com/files/'
     'phantomjs-1.9.1-linux-x86_64.tar.bz2 | tar jx -C /usr/bin'
     ' --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs')
 
-#### FIXME. Temporarily install docker with proper apparmor handling
-sudo('stop docker')
-sudo('wget -q -O /usr/bin/docker http://test.docker.io/test/docker')
-sudo('start docker')
+# Preventively reboot docker-ci daily
+sudo('ln -s /sbin/reboot /etc/cron.daily')
 
 # Build docker-ci containers
 sudo('cd {}; docker build -t docker .'.format(DOCKER_PATH))
 sudo('cd {}/nightlyrelease; docker build -t dockerbuilder .'.format(
     DOCKER_CI_PATH))
 
+# Download docker-ci testing container
+sudo('docker pull mzdaniel/test_docker')
+
 # Setup buildbot
 sudo('mkdir /data')
 sudo('{0}/setup.sh root {0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}'

+ 30 - 0
hack/infrastructure/docker-ci/docker-test/Dockerfile

@@ -0,0 +1,30 @@
+# VERSION:        0.3
+# DOCKER-VERSION  0.6.3
+# AUTHOR:         Daniel Mizyrycki <daniel@dotcloud.com>
+# DESCRIPTION:    Testing docker PRs and commits on top of master using
+# REFERENCES:     This code reuses the excellent implementation of
+#                 Docker in Docker made by Jerome Petazzoni.
+#                 https://github.com/jpetazzo/dind
+# COMMENTS:
+#   This Dockerfile adapts /Dockerfile to enable docker PRs and commits testing
+#   Optional arguments:
+#       [commit]  (default: 'HEAD')
+#       [repo]    (default: 'http://github.com/dotcloud/docker')
+#       [branch]  (default: 'master')
+# TO_BUILD:       docker build -t test_docker .
+# TO_RUN:         docker run -privileged test_docker hack/dind test_docker.sh [commit] [repo] [branch]
+
+from docker
+maintainer Daniel Mizyrycki <daniel@dotcloud.com>
+
+# Setup go environment. Extracted from /Dockerfile
+env     CGO_ENABLED 0
+env     GOROOT  /goroot
+env     PATH    $PATH:/goroot/bin
+env     GOPATH  /go:/go/src/github.com/dotcloud/docker/vendor
+volume  /var/lib/docker
+workdir /go/src/github.com/dotcloud/docker
+
+# Add test_docker.sh
+add test_docker.sh /usr/bin/test_docker.sh
+run chmod +x /usr/bin/test_docker.sh

+ 11 - 8
hack/infrastructure/docker-ci/docker-test/test_docker.sh

@@ -5,17 +5,11 @@ COMMIT=${1-HEAD}
 REPO=${2-http://github.com/dotcloud/docker}
 BRANCH=${3-master}
 
-# Generate a random string of $1 characters
-function random {
-    cat /dev/urandom | tr -cd 'a-f0-9' | head -c $1
-}
-
 # Compute test paths
-BASE_PATH=`pwd`/test_docker_$(random 12)
-DOCKER_PATH=$BASE_PATH/go/src/github.com/dotcloud/docker
-export GOPATH=$BASE_PATH/go:$DOCKER_PATH/vendor
+DOCKER_PATH=/go/src/github.com/dotcloud/docker
 
 # Fetch latest master
+rm -rf /go
 mkdir -p $DOCKER_PATH
 cd $DOCKER_PATH
 git init .
@@ -23,12 +17,21 @@ git fetch -q http://github.com/dotcloud/docker master
 git reset --hard FETCH_HEAD
 
 # Merge commit
+#echo FIXME. Temporarily skip TestPrivilegedCanMount until DinD works reliable on AWS
+git pull -q https://github.com/mzdaniel/docker.git dind-aws || exit 1
+
+# Merge commit in top of master
 git fetch -q "$REPO" "$BRANCH"
 git merge --no-edit $COMMIT || exit 1
 
 # Test commit
 go test -v; exit_status=$?
 
+# Display load if test fails
+if [ $exit_status -eq 1 ] ; then
+    uptime; echo; free
+fi
+
 # Cleanup testing directory
 rm -rf $BASE_PATH
 

+ 1 - 4
hack/infrastructure/docker-ci/nightlyrelease/Dockerfile

@@ -11,7 +11,7 @@
 #         "GPG_PASSPHRASE='Test_docker_GPG_passphrase_signature'
 #         "INDEX_AUTH='Encripted_index_authentication' }
 # TO_BUILD:       docker build -t dockerbuilder .
-# TO_RELEASE:     docker run -i -t -privileged -lxc-conf="lxc.aa_profile = unconfined" -e AWS_S3_BUCKET="test.docker.io" dockerbuilder
+# TO_RELEASE:     docker run -i -t -privileged  -e AWS_S3_BUCKET="test.docker.io" dockerbuilder
 
 from docker
 maintainer Daniel Mizyrycki <daniel@dotcloud.com>
@@ -23,9 +23,6 @@ run apt-get update; apt-get install -y -q wget python2.7
 # Add production docker binary
 run wget -q -O /usr/bin/docker http://get.docker.io/builds/Linux/x86_64/docker-latest; chmod +x /usr/bin/docker
 
-#### FIXME. Temporarily install docker with proper apparmor handling
-run wget -q -O /usr/bin/docker http://test.docker.io/test/docker; chmod +x /usr/bin/docker
-
 # Add proto docker builder
 add ./dockerbuild /usr/bin/dockerbuild
 run chmod +x /usr/bin/dockerbuild

+ 11 - 6
hack/infrastructure/docker-ci/nightlyrelease/dockerbuild

@@ -13,10 +13,6 @@ cd /
 git clone -q http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker
 cd /go/src/github.com/dotcloud/docker
 
-echo FIXME. Temporarily add Jerome changeset with proper apparmor handling
-git fetch  http://github.com/jpetazzo/docker escape-apparmor-confinement:escape-apparmor-confinement
-git rebase --onto master master escape-apparmor-confinement
-
 # Launch docker daemon using dind inside the container
 ./hack/dind /usr/bin/docker -d &
 sleep 5
@@ -28,7 +24,17 @@ date > timestamp
 docker build -t docker .
 
 # Run Docker unittests binary and Ubuntu package
-docker run -privileged -lxc-conf=lxc.aa_profile=unconfined docker hack/make.sh || exit 1
+docker run -privileged docker hack/make.sh
+exit_status=$?
+
+# Display load if test fails
+if [ $exit_status -eq 1 ] ; then
+    uptime; echo; free
+    exit 1
+fi
+
+# Commit binary and ubuntu bundles for release
+docker commit -run '{"Env": ["PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin"], "WorkingDir": "/go/src/github.com/dotcloud/docker"}' $(docker ps -l -q) release
 
 # Turn debug off to load credentials from the environment
 set +x
@@ -36,7 +42,6 @@ eval $(cat /root/release_credentials.json  | python -c '
 import sys,json,base64;
 d=json.loads(base64.b64decode(sys.stdin.read()));
 exec("""for k in d: print "export {0}=\\"{1}\\"".format(k,d[k])""")')
-echo '{"https://index.docker.io/v1/":{"auth":"'$INDEX_AUTH'","email":"engineering@dotcloud.com"}}' > /.dockercfg
 set -x
 
 # Push docker nightly

+ 5 - 4
image.go

@@ -5,6 +5,7 @@ import (
 	"encoding/hex"
 	"encoding/json"
 	"fmt"
+	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"io/ioutil"
@@ -72,7 +73,7 @@ func LoadImage(root string) (*Image, error) {
 	return img, nil
 }
 
-func StoreImage(img *Image, jsonData []byte, layerData Archive, root string) error {
+func StoreImage(img *Image, jsonData []byte, layerData archive.Archive, root string) error {
 	// Check that root doesn't already exist
 	if _, err := os.Stat(root); err == nil {
 		return fmt.Errorf("Image %s already exists", img.ID)
@@ -89,7 +90,7 @@ func StoreImage(img *Image, jsonData []byte, layerData Archive, root string) err
 	if layerData != nil {
 		start := time.Now()
 		utils.Debugf("Start untar layer")
-		if err := Untar(layerData, layer); err != nil {
+		if err := archive.Untar(layerData, layer); err != nil {
 			return err
 		}
 		utils.Debugf("Untar time: %vs", time.Now().Sub(start).Seconds())
@@ -162,12 +163,12 @@ func MountAUFS(ro []string, rw string, target string) error {
 }
 
 // TarLayer returns a tar archive of the image's filesystem layer.
-func (image *Image) TarLayer(compression Compression) (Archive, error) {
+func (image *Image) TarLayer(compression archive.Compression) (archive.Archive, error) {
 	layerPath, err := image.layer()
 	if err != nil {
 		return nil, err
 	}
-	return Tar(layerPath, compression)
+	return archive.Tar(layerPath, compression)
 }
 
 func (image *Image) Mount(root, rw string) error {

+ 24 - 14
lxc_template.go

@@ -11,7 +11,6 @@ lxc.utsname = {{.Config.Hostname}}
 {{else}}
 lxc.utsname = {{.Id}}
 {{end}}
-#lxc.aa_profile = unconfined
 
 {{if .Config.NetworkDisabled}}
 # network is disabled (-n=false)
@@ -46,7 +45,7 @@ lxc.console = none
 # no controlling tty at all
 lxc.tty = 1
 
-{{if .Config.Privileged}}
+{{if (getHostConfig .).Privileged}}
 lxc.cgroup.devices.allow = a 
 {{else}}
 # no implicit access to devices
@@ -66,7 +65,7 @@ lxc.cgroup.devices.allow = c 4:1 rwm
 lxc.cgroup.devices.allow = c 1:9 rwm
 lxc.cgroup.devices.allow = c 1:8 rwm
 
-# /dev/pts/* - pts namespaces are "coming soon"
+# /dev/pts/ - pts namespaces are "coming soon"
 lxc.cgroup.devices.allow = c 136:* rwm
 lxc.cgroup.devices.allow = c 5:2 rwm
 
@@ -97,6 +96,9 @@ lxc.mount.entry = shm {{$ROOTFS}}/dev/shm tmpfs size=65536k,nosuid,nodev,noexec
 # Inject dockerinit
 lxc.mount.entry = {{.SysInitPath}} {{$ROOTFS}}/.dockerinit none bind,ro 0 0
 
+# Inject env
+lxc.mount.entry = {{.EnvConfigPath}} {{$ROOTFS}}/.dockerenv none bind,ro 0 0
+
 # In order to get a working DNS environment, mount bind (ro) the host's /etc/resolv.conf into the container
 lxc.mount.entry = {{.ResolvConfPath}} {{$ROOTFS}}/etc/resolv.conf none bind,ro 0 0
 {{if .Volumes}}
@@ -106,8 +108,13 @@ lxc.mount.entry = {{$realPath}} {{$ROOTFS}}/{{$virtualPath}} none bind,{{ if ind
 {{end}}
 {{end}}
 
-{{if .Config.Privileged}}
+{{if (getHostConfig .).Privileged}}
 # retain all capabilities; no lxc.cap.drop line
+{{if (getCapabilities .).AppArmor}}
+lxc.aa_profile = unconfined
+{{else}}
+#lxc.aa_profile = unconfined
+{{end}}
 {{else}}
 # drop linux capabilities (apply mainly to the user root in the container)
 #  (Note: 'lxc.cap.keep' is coming soon and should replace this under the
@@ -127,18 +134,15 @@ lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}}
 {{if .Config.CpuShares}}
 lxc.cgroup.cpu.shares = {{.Config.CpuShares}}
 {{end}}
-`
 
-const LxcHostConfigTemplate = `
-{{if .LxcConf}}
-{{range $pair := .LxcConf}}
+{{if (getHostConfig .).LxcConf}}
+{{range $pair := (getHostConfig .).LxcConf}}
 {{$pair.Key}} = {{$pair.Value}}
 {{end}}
 {{end}}
 `
 
 var LxcTemplateCompiled *template.Template
-var LxcHostConfigTemplateCompiled *template.Template
 
 func getMemorySwap(config *Config) int64 {
 	// By default, MemorySwap is set to twice the size of RAM.
@@ -149,17 +153,23 @@ func getMemorySwap(config *Config) int64 {
 	return config.Memory * 2
 }
 
+func getHostConfig(container *Container) *HostConfig {
+	return container.hostConfig
+}
+
+func getCapabilities(container *Container) *Capabilities {
+	return container.runtime.capabilities
+}
+
 func init() {
 	var err error
 	funcMap := template.FuncMap{
-		"getMemorySwap": getMemorySwap,
+		"getMemorySwap":   getMemorySwap,
+		"getHostConfig":   getHostConfig,
+		"getCapabilities": getCapabilities,
 	}
 	LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate)
 	if err != nil {
 		panic(err)
 	}
-	LxcHostConfigTemplateCompiled, err = template.New("lxc-hostconfig").Funcs(funcMap).Parse(LxcHostConfigTemplate)
-	if err != nil {
-		panic(err)
-	}
 }

+ 2 - 2
namesgenerator/names-generator.go

@@ -11,8 +11,8 @@ type NameChecker interface {
 }
 
 var (
-	colors  = [...]string{"white", "silver", "gray", "black", "blue", "green", "cyan", "yellow", "gold", "orange", "brown", "red", "violet", "pink", "magenta", "purple"}
-	animals = [...]string{"ant", "bird", "cat", "chicken", "cow", "dog", "fish", "fox", "horse", "lion", "monkey", "pig", "sheep", "tiger", "whale", "wolf"}
+	colors  = [...]string{"white", "silver", "gray", "black", "blue", "green", "cyan", "yellow", "gold", "orange", "brown", "red", "violet", "pink", "magenta", "purple", "maroon", "crimson", "plum", "fuchsia", "lavender", "slate", "navy", "azure", "aqua", "olive", "teal", "lime", "beige", "tan", "sienna"}
+  animals = [...]string{"ant", "bear", "bird", "cat", "chicken", "cow", "deer", "dog", "donkey", "duck", "fish", "fox", "frog", "horse", "kangaroo", "koala", "lemur", "lion", "lizard", "monkey", "octopus", "pig", "shark", "sheep", "sloth", "spider", "squirrel", "tiger", "toad", "weasel", "whale", "wolf"}
 )
 
 func GenerateRandomName(checker NameChecker) (string, error) {

+ 29 - 0
netlink/netlink_darwin.go

@@ -0,0 +1,29 @@
+package netlink
+
+import (
+	"fmt"
+	"net"
+)
+
+func NetworkGetRoutes() ([]*net.IPNet, error) {
+	return nil, fmt.Errorf("Not implemented")
+}
+
+
+func NetworkLinkAdd(name string, linkType string) error {
+	return fmt.Errorf("Not implemented")
+}
+
+func NetworkLinkUp(iface *net.Interface) error {
+	return fmt.Errorf("Not implemented")
+}
+
+
+func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error {
+	return fmt.Errorf("Not implemented")
+}
+
+func AddDefaultGw(ip net.IP) error {
+	return fmt.Errorf("Not implemented")
+
+}

+ 0 - 0
netlink/netlink.go → netlink/netlink_linux.go


+ 29 - 2
network.go

@@ -76,6 +76,21 @@ func checkRouteOverlaps(networks []*net.IPNet, dockerNetwork *net.IPNet) error {
 	return nil
 }
 
+func checkNameserverOverlaps(nameservers []string, dockerNetwork *net.IPNet) error {
+	if len(nameservers) > 0 {
+		for _, ns := range nameservers {
+			_, nsNetwork, err := net.ParseCIDR(ns)
+			if err != nil {
+				return err
+			}
+			if networkOverlaps(dockerNetwork, nsNetwork) {
+				return fmt.Errorf("%s overlaps nameserver %s", dockerNetwork, nsNetwork)
+			}
+		}
+	}
+	return nil
+}
+
 // CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`,
 // and attempts to configure it with an address which doesn't conflict with any other interface on the host.
 // If it can't find an address which doesn't conflict, it will return an error.
@@ -100,6 +115,16 @@ func CreateBridgeIface(config *DaemonConfig) error {
 		"192.168.44.1/24",
 	}
 
+	nameservers := []string{}
+	resolvConf, _ := utils.GetResolvConf()
+	// we don't check for an error here, because we don't really care
+	// if we can't read /etc/resolv.conf. So instead we skip the append
+	// if resolvConf is nil. It either doesn't exist, or we can't read it
+	// for some reason.
+	if resolvConf != nil {
+		nameservers = append(nameservers, utils.GetNameserversAsCIDR(resolvConf)...)
+	}
+
 	var ifaceAddr string
 	for _, addr := range addrs {
 		_, dockerNetwork, err := net.ParseCIDR(addr)
@@ -111,8 +136,10 @@ func CreateBridgeIface(config *DaemonConfig) error {
 			return err
 		}
 		if err := checkRouteOverlaps(routes, dockerNetwork); err == nil {
-			ifaceAddr = addr
-			break
+			if err := checkNameserverOverlaps(nameservers, dockerNetwork); err == nil {
+				ifaceAddr = addr
+				break
+			}
 		} else {
 			utils.Debugf("%s: %s", addr, err)
 		}

+ 16 - 0
network_test.go

@@ -295,3 +295,19 @@ func TestCheckRouteOverlaps(t *testing.T) {
 		t.Fatalf("10.0.2.0/24 and 10.0.2.0 should overlap but it doesn't")
 	}
 }
+
+func TestCheckNameserverOverlaps(t *testing.T) {
+	nameservers := []string{"10.0.2.3/32", "192.168.102.1/32"}
+
+	_, netX, _ := net.ParseCIDR("10.0.2.3/32")
+
+	if err := checkNameserverOverlaps(nameservers, netX); err == nil {
+		t.Fatalf("%s should overlap 10.0.2.3/32 but doesn't", netX)
+	}
+
+	_, netX, _ = net.ParseCIDR("192.168.102.2/32")
+
+	if err := checkNameserverOverlaps(nameservers, netX); err != nil {
+		t.Fatalf("%s should not overlap %v but it does", netX, nameservers)
+	}
+}

+ 11 - 3
registry/registry.go

@@ -615,10 +615,18 @@ func (r *Registry) GetAuthConfig(withPasswd bool) *auth.AuthConfig {
 	}
 }
 
+type SearchResult struct {
+	StarCount   int    `json:"star_count"`
+	IsOfficial  bool   `json:"is_official"`
+	Name        string `json:"name"`
+	IsTrusted   bool   `json:"is_trusted"`
+	Description string `json:"description"`
+}
+
 type SearchResults struct {
-	Query      string              `json:"query"`
-	NumResults int                 `json:"num_results"`
-	Results    []map[string]string `json:"results"`
+	Query      string         `json:"query"`
+	NumResults int            `json:"num_results"`
+	Results    []SearchResult `json:"results"`
 }
 
 type RepositoryData struct {

+ 84 - 25
runtime.go

@@ -24,6 +24,7 @@ type Capabilities struct {
 	MemoryLimit            bool
 	SwapLimit              bool
 	IPv4ForwardingDisabled bool
+	AppArmor               bool
 }
 
 type Runtime struct {
@@ -112,6 +113,9 @@ func (runtime *Runtime) Register(container *Container) error {
 	if err := validateID(container.ID); err != nil {
 		return err
 	}
+	if err := runtime.ensureName(container); err != nil {
+		return err
+	}
 
 	// init the wait lock
 	container.waitLock = make(chan struct{})
@@ -149,8 +153,7 @@ func (runtime *Runtime) Register(container *Container) error {
 				utils.Debugf("Restarting")
 				container.State.Ghost = false
 				container.State.setStopped(0)
-				hostConfig, _ := container.ReadHostConfig()
-				if err := container.Start(hostConfig); err != nil {
+				if err := container.Start(); err != nil {
 					return err
 				}
 				nomonitor = true
@@ -169,9 +172,27 @@ func (runtime *Runtime) Register(container *Container) error {
 	if !container.State.Running {
 		close(container.waitLock)
 	} else if !nomonitor {
-		hostConfig, _ := container.ReadHostConfig()
-		container.allocateNetwork(hostConfig)
-		go container.monitor(hostConfig)
+		go container.monitor()
+	}
+	return nil
+}
+
+func (runtime *Runtime) ensureName(container *Container) error {
+	if container.Name == "" {
+		name, err := generateRandomName(runtime)
+		if err != nil {
+			name = container.ShortID()
+		}
+		container.Name = name
+
+		if err := container.ToDisk(); err != nil {
+			utils.Debugf("Error saving container name %s", err)
+		}
+		if !runtime.containerGraph.Exists(name) {
+			if _, err := runtime.containerGraph.Set(name, container.ID); err != nil {
+				utils.Debugf("Setting default id - %s", err)
+			}
+		}
 	}
 	return nil
 }
@@ -265,7 +286,10 @@ func (runtime *Runtime) restore() error {
 	// Any containers that are left over do not exist in the graph
 	for _, container := range containers {
 		// Try to set the default name for a container if it exists prior to links
-		name := generateRandomName(runtime)
+		name, err := generateRandomName(runtime)
+		if err != nil {
+			container.Name = container.ShortID()
+		}
 		container.Name = name
 
 		if _, err := runtime.containerGraph.Set(name, container.ID); err != nil {
@@ -307,6 +331,15 @@ func (runtime *Runtime) UpdateCapabilities(quiet bool) {
 	if runtime.capabilities.IPv4ForwardingDisabled && !quiet {
 		log.Printf("WARNING: IPv4 forwarding is disabled.")
 	}
+
+	// Check if AppArmor seems to be enabled on this system.
+	if _, err := os.Stat("/sys/kernel/security/apparmor"); os.IsNotExist(err) {
+		utils.Debugf("/sys/kernel/security/apparmor not found; assuming AppArmor is not enabled.")
+		runtime.capabilities.AppArmor = false
+	} else {
+		utils.Debugf("/sys/kernel/security/apparmor found; assuming AppArmor is enabled.")
+		runtime.capabilities.AppArmor = true
+	}
 }
 
 // Create creates a new container from the given configuration with a given name.
@@ -356,7 +389,10 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin
 	id := GenerateID()
 
 	if name == "" {
-		name = generateRandomName(runtime)
+		name, err = generateRandomName(runtime)
+		if err != nil {
+			name = utils.TruncateID(id)
+		}
 	}
 	if name[0] != '/' {
 		name = "/" + name
@@ -394,6 +430,7 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin
 		Path:            entrypoint,
 		Args:            args, //FIXME: de-duplicate from config
 		Config:          config,
+		hostConfig:      &HostConfig{},
 		Image:           img.ID, // Always use the resolved image id
 		NetworkSettings: &NetworkSettings{},
 		// FIXME: do we need to store this in the container?
@@ -504,15 +541,22 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a
 	return img, nil
 }
 
-func (runtime *Runtime) getFullName(name string) string {
+func (runtime *Runtime) getFullName(name string) (string, error) {
+	if name == "" {
+		return "", fmt.Errorf("Container name cannot be empty")
+	}
 	if name[0] != '/' {
 		name = "/" + name
 	}
-	return name
+	return name, nil
 }
 
 func (runtime *Runtime) GetByName(name string) (*Container, error) {
-	entity := runtime.containerGraph.Get(runtime.getFullName(name))
+	fullName, err := runtime.getFullName(name)
+	if err != nil {
+		return nil, err
+	}
+	entity := runtime.containerGraph.Get(fullName)
 	if entity == nil {
 		return nil, fmt.Errorf("Could not find entity for %s", name)
 	}
@@ -524,10 +568,13 @@ func (runtime *Runtime) GetByName(name string) (*Container, error) {
 }
 
 func (runtime *Runtime) Children(name string) (map[string]*Container, error) {
-	name = runtime.getFullName(name)
+	name, err := runtime.getFullName(name)
+	if err != nil {
+		return nil, err
+	}
 	children := make(map[string]*Container)
 
-	err := runtime.containerGraph.Walk(name, func(p string, e *gograph.Entity) error {
+	err = runtime.containerGraph.Walk(name, func(p string, e *gograph.Entity) error {
 		c := runtime.Get(e.ID())
 		if c == nil {
 			return fmt.Errorf("Could not get container for name %s and id %s", e.ID(), p)
@@ -557,34 +604,29 @@ func NewRuntime(config *DaemonConfig) (*Runtime, error) {
 	if err != nil {
 		return nil, err
 	}
-
-	if k, err := utils.GetKernelVersion(); err != nil {
-		log.Printf("WARNING: %s\n", err)
-	} else {
-		if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
-			log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
-		}
-	}
 	runtime.UpdateCapabilities(false)
 	return runtime, nil
 }
 
 func NewRuntimeFromDirectory(config *DaemonConfig) (*Runtime, error) {
-	runtimeRepo := path.Join(config.GraphPath, "containers")
+	runtimeRepo := path.Join(config.Root, "containers")
 
 	if err := os.MkdirAll(runtimeRepo, 0700); err != nil && !os.IsExist(err) {
 		return nil, err
 	}
 
-	g, err := NewGraph(path.Join(config.GraphPath, "graph"))
+	if err := linkLxcStart(config.Root); err != nil {
+		return nil, err
+	}
+	g, err := NewGraph(path.Join(config.Root, "graph"))
 	if err != nil {
 		return nil, err
 	}
-	volumes, err := NewGraph(path.Join(config.GraphPath, "volumes"))
+	volumes, err := NewGraph(path.Join(config.Root, "volumes"))
 	if err != nil {
 		return nil, err
 	}
-	repositories, err := NewTagStore(path.Join(config.GraphPath, "repositories"), g)
+	repositories, err := NewTagStore(path.Join(config.Root, "repositories"), g)
 	if err != nil {
 		return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
 	}
@@ -596,7 +638,7 @@ func NewRuntimeFromDirectory(config *DaemonConfig) (*Runtime, error) {
 		return nil, err
 	}
 
-	gographPath := path.Join(config.GraphPath, "linkgraph.db")
+	gographPath := path.Join(config.Root, "linkgraph.db")
 	initDatabase := false
 	if _, err := os.Stat(gographPath); err != nil {
 		if os.IsNotExist(err) {
@@ -638,6 +680,23 @@ func (runtime *Runtime) Close() error {
 	return runtime.containerGraph.Close()
 }
 
+func linkLxcStart(root string) error {
+	sourcePath, err := exec.LookPath("lxc-start")
+	if err != nil {
+		return err
+	}
+	targetPath := path.Join(root, "lxc-start-unconfined")
+
+	if _, err := os.Stat(targetPath); err != nil && !os.IsNotExist(err) {
+		return err
+	} else if err == nil {
+		if err := os.Remove(targetPath); err != nil {
+			return err
+		}
+	}
+	return os.Symlink(sourcePath, targetPath)
+}
+
 // History is a convenience type for storing a list of containers,
 // ordered by creation date.
 type History []*Container

+ 33 - 22
runtime_test.go

@@ -46,8 +46,8 @@ func nuke(runtime *Runtime) error {
 	wg.Wait()
 	runtime.Close()
 
-	os.Remove(filepath.Join(runtime.config.GraphPath, "linkgraph.db"))
-	return os.RemoveAll(runtime.config.GraphPath)
+	os.Remove(filepath.Join(runtime.config.Root, "linkgraph.db"))
+	return os.RemoveAll(runtime.config.Root)
 }
 
 func cleanup(runtime *Runtime) error {
@@ -119,7 +119,7 @@ func init() {
 
 func setupBaseImage() {
 	config := &DaemonConfig{
-		GraphPath:   unitTestStoreBase,
+		Root:        unitTestStoreBase,
 		AutoRestart: false,
 		BridgeIface: unitTestNetworkBridge,
 	}
@@ -325,13 +325,13 @@ func TestGet(t *testing.T) {
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 
-	container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
+	container1, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
 	defer runtime.Destroy(container1)
 
-	container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
+	container2, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
 	defer runtime.Destroy(container2)
 
-	container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
+	container3, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
 	defer runtime.Destroy(container3)
 
 	if runtime.Get(container1.ID) != container1 {
@@ -390,13 +390,13 @@ func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container,
 		t.Logf("Port %v already in use, trying another one", strPort)
 	}
 
-	hostConfig := &HostConfig{
+	container.hostConfig = &HostConfig{
 		PortBindings: make(map[Port][]PortBinding),
 	}
-	hostConfig.PortBindings[p] = []PortBinding{
+	container.hostConfig.PortBindings[p] = []PortBinding{
 		{},
 	}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		nuke(runtime)
 		t.Fatal(err)
 	}
@@ -503,16 +503,15 @@ func TestRestore(t *testing.T) {
 	runtime1 := mkRuntime(t)
 	defer nuke(runtime1)
 	// Create a container with one instance of docker
-	container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
+	container1, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
 	defer runtime1.Destroy(container1)
 
 	// Create a second container meant to be killed
-	container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
+	container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
 	defer runtime1.Destroy(container2)
 
 	// Start the container non blocking
-	hostConfig := &HostConfig{}
-	if err := container2.Start(hostConfig); err != nil {
+	if err := container2.Start(); err != nil {
 		t.Fatal(err)
 	}
 
@@ -575,25 +574,23 @@ func TestReloadContainerLinks(t *testing.T) {
 	runtime1 := mkRuntime(t)
 	defer nuke(runtime1)
 	// Create a container with one instance of docker
-	container1, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
+	container1, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
 	defer runtime1.Destroy(container1)
 
 	// Create a second container meant to be killed
-	container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
+	container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
 	defer runtime1.Destroy(container2)
 
 	// Start the container non blocking
-	hostConfig := &HostConfig{}
-	if err := container2.Start(hostConfig); err != nil {
+	if err := container2.Start(); err != nil {
 		t.Fatal(err)
 	}
-	h1 := &HostConfig{}
 	// Add a link to container 2
-	h1.Links = []string{"/" + container2.ID + ":first"}
+	container1.hostConfig.Links = []string{"/" + container2.ID + ":first"}
 	if err := runtime1.RegisterLink(container1, container2, "first"); err != nil {
 		t.Fatal(err)
 	}
-	if err := container1.Start(h1); err != nil {
+	if err := container1.Start(); err != nil {
 		t.Fatal(err)
 	}
 
@@ -623,7 +620,6 @@ func TestReloadContainerLinks(t *testing.T) {
 	runningCount := 0
 	for _, c := range runtime2.List() {
 		if c.State.Running {
-			t.Logf("Running container found: %v (%v)", c.ID, c.Path)
 			runningCount++
 		}
 	}
@@ -638,7 +634,6 @@ func TestReloadContainerLinks(t *testing.T) {
 		t.Fatalf("Container 2 %s should be registered first in the runtime", container2.ID)
 	}
 
-	t.Logf("Number of links: %d", runtime2.containerGraph.Refs("0"))
 	// Verify that the link is still registered in the runtime
 	entity := runtime2.containerGraph.Get(container1.Name)
 	if entity == nil {
@@ -833,3 +828,19 @@ func TestGetAllChildren(t *testing.T) {
 		}
 	}
 }
+
+func TestGetFullName(t *testing.T) {
+	runtime := mkRuntime(t)
+	defer nuke(runtime)
+
+	name, err := runtime.getFullName("testing")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if name != "/testing" {
+		t.Fatalf("Expected /testing got %s", name)
+	}
+	if _, err := runtime.getFullName(""); err == nil {
+		t.Fatal("Error should not be nil")
+	}
+}

+ 110 - 23
server.go

@@ -5,7 +5,9 @@ import (
 	"encoding/json"
 	"errors"
 	"fmt"
+	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/auth"
+	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/gograph"
 	"github.com/dotcloud/docker/registry"
 	"github.com/dotcloud/docker/utils"
@@ -16,11 +18,13 @@ import (
 	"net/url"
 	"os"
 	"os/exec"
+	"os/signal"
 	"path"
 	"path/filepath"
 	"runtime"
 	"strings"
 	"sync"
+	"syscall"
 	"time"
 )
 
@@ -28,6 +32,70 @@ func (srv *Server) Close() error {
 	return srv.runtime.Close()
 }
 
+func init() {
+	engine.Register("serveapi", JobServeApi)
+}
+
+func JobServeApi(job *engine.Job) string {
+	srv, err := NewServer(ConfigFromJob(job))
+	if err != nil {
+		return err.Error()
+	}
+	defer srv.Close()
+	if err := srv.Daemon(); err != nil {
+		return err.Error()
+	}
+	return "0"
+}
+
+// Daemon runs the remote api server `srv` as a daemon,
+// Only one api server can run at the same time - this is enforced by a pidfile.
+// The signals SIGINT, SIGKILL and SIGTERM are intercepted for cleanup.
+func (srv *Server) Daemon() error {
+	if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil {
+		log.Fatal(err)
+	}
+	defer utils.RemovePidFile(srv.runtime.config.Pidfile)
+
+	c := make(chan os.Signal, 1)
+	signal.Notify(c, os.Interrupt, os.Kill, os.Signal(syscall.SIGTERM))
+	go func() {
+		sig := <-c
+		log.Printf("Received signal '%v', exiting\n", sig)
+		utils.RemovePidFile(srv.runtime.config.Pidfile)
+		srv.Close()
+		os.Exit(0)
+	}()
+
+	protoAddrs := srv.runtime.config.ProtoAddresses
+	chErrors := make(chan error, len(protoAddrs))
+	for _, protoAddr := range protoAddrs {
+		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
+		switch protoAddrParts[0] {
+		case "unix":
+			if err := syscall.Unlink(protoAddrParts[1]); err != nil && !os.IsNotExist(err) {
+				log.Fatal(err)
+			}
+		case "tcp":
+			if !strings.HasPrefix(protoAddrParts[1], "127.0.0.1") {
+				log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
+			}
+		default:
+			return fmt.Errorf("Invalid protocol format.")
+		}
+		go func() {
+			chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], srv, true)
+		}()
+	}
+	for i := 0; i < len(protoAddrs); i += 1 {
+		err := <-chErrors
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
 func (srv *Server) DockerVersion() APIVersion {
 	return APIVersion{
 		Version:   VERSION,
@@ -118,8 +186,8 @@ func (srv *Server) ContainerExport(name string, out io.Writer) error {
 	return fmt.Errorf("No such container: %s", name)
 }
 
-func (srv *Server) ImagesSearch(term string) ([]APISearch, error) {
-	r, err := registry.NewRegistry(srv.runtime.config.GraphPath, nil, srv.HTTPRequestFactory(nil))
+func (srv *Server) ImagesSearch(term string) ([]registry.SearchResult, error) {
+	r, err := registry.NewRegistry(srv.runtime.config.Root, nil, srv.HTTPRequestFactory(nil))
 	if err != nil {
 		return nil, err
 	}
@@ -127,15 +195,7 @@ func (srv *Server) ImagesSearch(term string) ([]APISearch, error) {
 	if err != nil {
 		return nil, err
 	}
-
-	var outs []APISearch
-	for _, repo := range results.Results {
-		var out APISearch
-		out.Description = repo["description"]
-		out.Name = repo["name"]
-		outs = append(outs, out)
-	}
-	return outs, nil
+	return results.Results, nil
 }
 
 func (srv *Server) ImageInsert(name, url, path string, out io.Writer, sf *utils.StreamFormatter) (string, error) {
@@ -320,10 +380,11 @@ func (srv *Server) ImageHistory(name string) ([]APIHistory, error) {
 	outs := []APIHistory{} //produce [] when empty instead of 'null'
 	err = image.WalkHistory(func(img *Image) error {
 		var out APIHistory
-		out.ID = srv.runtime.repositories.ImageName(img.ShortID())
+		out.ID = img.ID
 		out.Created = img.Created.Unix()
 		out.CreatedBy = strings.Join(img.ContainerConfig.Cmd, " ")
 		out.Tags = lookupMap[img.ID]
+		out.Size = img.Size
 		outs = append(outs, out)
 		return nil
 	})
@@ -350,7 +411,11 @@ func (srv *Server) ContainerTop(name, ps_args string) (*APITop, error) {
 			}
 			// no scanner.Text because we skip container id
 			for scanner.Scan() {
-				words = append(words, scanner.Text())
+				if i != 0 && len(words) == len(procs.Titles) {
+					words[len(words)-1] = fmt.Sprintf("%s %s", words[len(words)-1], scanner.Text())
+				} else {
+					words = append(words, scanner.Text())
+				}
 			}
 			if i == 0 {
 				procs.Titles = words
@@ -663,7 +728,7 @@ func (srv *Server) poolRemove(kind, key string) error {
 }
 
 func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig, metaHeaders map[string][]string, parallel bool) error {
-	r, err := registry.NewRegistry(srv.runtime.config.GraphPath, authConfig, srv.HTTPRequestFactory(metaHeaders))
+	r, err := registry.NewRegistry(srv.runtime.config.Root, authConfig, srv.HTTPRequestFactory(metaHeaders))
 	if err != nil {
 		return err
 	}
@@ -836,7 +901,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
 		return "", err
 	}
 
-	layerData, err := srv.runtime.graph.TempLayerArchive(imgID, Uncompressed, sf, out)
+	layerData, err := srv.runtime.graph.TempLayerArchive(imgID, archive.Uncompressed, sf, out)
 	if err != nil {
 		return "", fmt.Errorf("Failed to generate layer archive: %s", err)
 	}
@@ -872,7 +937,7 @@ func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFo
 
 	out = utils.NewWriteFlusher(out)
 	img, err := srv.runtime.graph.Get(localName)
-	r, err2 := registry.NewRegistry(srv.runtime.config.GraphPath, authConfig, srv.HTTPRequestFactory(metaHeaders))
+	r, err2 := registry.NewRegistry(srv.runtime.config.Root, authConfig, srv.HTTPRequestFactory(metaHeaders))
 	if err2 != nil {
 		return err2
 	}
@@ -985,7 +1050,10 @@ func (srv *Server) ContainerDestroy(name string, removeVolume, removeLink bool)
 		if container == nil {
 			return fmt.Errorf("No such link: %s", name)
 		}
-		name = srv.runtime.getFullName(name)
+		name, err := srv.runtime.getFullName(name)
+		if err != nil {
+			return err
+		}
 		parent, n := path.Split(name)
 		if parent == "/" {
 			return fmt.Errorf("Conflict, cannot remove the default name of the container")
@@ -1238,7 +1306,7 @@ func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
 		// After we load all the links into the runtime
 		// set them to nil on the hostconfig
 		hostConfig.Links = nil
-		if err := container.SaveHostConfig(hostConfig); err != nil {
+		if err := container.writeHostConfig(); err != nil {
 			return err
 		}
 	}
@@ -1248,11 +1316,33 @@ func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
 func (srv *Server) ContainerStart(name string, hostConfig *HostConfig) error {
 	runtime := srv.runtime
 	container := runtime.Get(name)
+
+	if hostConfig != nil {
+		for _, bind := range hostConfig.Binds {
+			splitBind := strings.Split(bind, ":")
+			source := splitBind[0]
+
+			// refuse to bind mount "/" to the container
+			if source == "/" {
+				return fmt.Errorf("Invalid bind mount '%s' : source can't be '/'", bind)
+			}
+
+			// ensure the source exists on the host
+			_, err := os.Stat(source)
+			if err != nil && os.IsNotExist(err) {
+				return fmt.Errorf("Invalid bind mount '%s' : source doesn't exist", bind)
+			}
+		}
+	}
+
 	if container == nil {
 		return fmt.Errorf("No such container: %s", name)
 	}
-
-	if err := container.Start(hostConfig); err != nil {
+	if hostConfig != nil {
+		container.hostConfig = hostConfig
+		container.ToDisk()
+	}
+	if err := container.Start(); err != nil {
 		return fmt.Errorf("Cannot start container %s: %s", name, err)
 	}
 	srv.LogEvent("start", container.ShortID(), runtime.repositories.ImageName(container.Image))
@@ -1409,9 +1499,6 @@ func (srv *Server) ContainerCopy(name string, resource string, out io.Writer) er
 }
 
 func NewServer(config *DaemonConfig) (*Server, error) {
-	if runtime.GOARCH != "amd64" {
-		log.Fatalf("The docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
-	}
 	runtime, err := NewRuntime(config)
 	if err != nil {
 		return nil, err

+ 3 - 3
server_test.go

@@ -246,14 +246,14 @@ func TestContainerTop(t *testing.T) {
 
 	srv := &Server{runtime: runtime}
 
-	c, hostConfig, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
-	c, hostConfig, err := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
+	c, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
+	c, err := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
 	if err != nil {
 		t.Fatal(err)
 	}
 
 	defer runtime.Destroy(c)
-	if err := c.Start(hostConfig); err != nil {
+	if err := c.Start(); err != nil {
 		t.Fatal(err)
 	}
 

+ 14 - 6
sysinit/sysinit.go

@@ -1,10 +1,12 @@
 package sysinit
 
 import (
+	"encoding/json"
 	"flag"
 	"fmt"
 	"github.com/dotcloud/docker/netlink"
 	"github.com/dotcloud/docker/utils"
+	"io/ioutil"
 	"log"
 	"net"
 	"os"
@@ -69,9 +71,18 @@ func changeUser(u string) {
 }
 
 // Clear environment pollution introduced by lxc-start
-func cleanupEnv(env utils.ListOpts) {
+func cleanupEnv() {
 	os.Clearenv()
-	for _, kv := range env {
+	var lines []string
+	content, err := ioutil.ReadFile("/.dockerenv")
+	if err != nil {
+		log.Fatalf("Unable to load environment variables: %v", err)
+	}
+	err = json.Unmarshal(content, &lines)
+	if err != nil {
+		log.Fatalf("Unable to unmarshal environment variables: %v", err)
+	}
+	for _, kv := range lines {
 		parts := strings.SplitN(kv, "=", 2)
 		if len(parts) == 1 {
 			parts = append(parts, "")
@@ -104,12 +115,9 @@ func SysInit() {
 	var gw = flag.String("g", "", "gateway address")
 	var workdir = flag.String("w", "", "workdir")
 
-	var flEnv utils.ListOpts
-	flag.Var(&flEnv, "e", "Set environment variables")
-
 	flag.Parse()
 
-	cleanupEnv(flEnv)
+	cleanupEnv()
 	setupNetworking(*gw)
 	setupWorkingDirectory(*workdir)
 	changeUser(*u)

+ 8 - 6
utils.go

@@ -225,6 +225,12 @@ func parsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding,
 		if containerPort == "" {
 			return nil, nil, fmt.Errorf("No port specified: %s<empty>", rawPort)
 		}
+		if _, err := strconv.ParseUint(containerPort, 10, 16); err != nil {
+			return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort)
+		}
+		if _, err := strconv.ParseUint(hostPort, 10, 16); hostPort != "" && err != nil {
+			return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort)
+		}
 
 		port := NewPort(proto, containerPort)
 		if _, exists := exposedPorts[port]; !exists {
@@ -304,10 +310,6 @@ func (c *checker) Exists(name string) bool {
 }
 
 // Generate a random and unique name
-func generateRandomName(runtime *Runtime) string {
-	n, err := namesgenerator.GenerateRandomName(&checker{runtime})
-	if err != nil {
-		panic(err)
-	}
-	return n
+func generateRandomName(runtime *Runtime) (string, error) {
+	return namesgenerator.GenerateRandomName(&checker{runtime})
 }

+ 36 - 0
utils/daemon.go

@@ -0,0 +1,36 @@
+package utils
+
+import (
+	"os"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"strconv"
+)
+
+func CreatePidFile(pidfile string) error {
+	if pidString, err := ioutil.ReadFile(pidfile); err == nil {
+		pid, err := strconv.Atoi(string(pidString))
+		if err == nil {
+			if _, err := os.Stat(fmt.Sprintf("/proc/%d/", pid)); err == nil {
+				return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile)
+			}
+		}
+	}
+
+	file, err := os.Create(pidfile)
+	if err != nil {
+		return err
+	}
+
+	defer file.Close()
+
+	_, err = fmt.Fprintf(file, "%d", os.Getpid())
+	return err
+}
+
+func RemovePidFile(pidfile string) {
+	if err := os.Remove(pidfile); err != nil {
+		log.Printf("Error removing %s: %s", pidfile, err)
+	}
+}

+ 16 - 0
utils/random.go

@@ -0,0 +1,16 @@
+package utils
+
+import (
+	"io"
+	"crypto/rand"
+	"encoding/hex"
+)
+
+func RandomString() string {
+	id := make([]byte, 32)
+	_, err := io.ReadFull(rand.Reader, id)
+	if err != nil {
+		panic(err) // This shouldn't happen
+	}
+	return hex.EncodeToString(id)
+}

+ 25 - 0
utils/utils.go

@@ -16,6 +16,7 @@ import (
 	"os/exec"
 	"path/filepath"
 	"runtime"
+	"regexp"
 	"strconv"
 	"strings"
 	"sync"
@@ -695,6 +696,13 @@ func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
 	return n, err
 }
 
+// Flush the stream immediately.
+func (wf *WriteFlusher) Flush() {
+	wf.Lock()
+	defer wf.Unlock()
+	wf.flusher.Flush()
+}
+
 func NewWriteFlusher(w io.Writer) *WriteFlusher {
 	var flusher http.Flusher
 	if f, ok := w.(http.Flusher); ok {
@@ -896,6 +904,23 @@ func StripComments(input []byte, commentMarker []byte) []byte {
 	return output
 }
 
+// GetNameserversAsCIDR returns nameservers (if any) listed in 
+// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32")
+// This function's output is intended for net.ParseCIDR
+func GetNameserversAsCIDR(resolvConf []byte) []string {
+	var parsedResolvConf = StripComments(resolvConf, []byte("#"))
+	nameservers := []string{}
+	re := regexp.MustCompile(`^\s*nameserver\s*(([0-9]\.){3}([0-9]))\s*$`)
+	for _, line := range bytes.Split(parsedResolvConf, []byte("\n")) {
+		var ns = re.FindSubmatch(line)
+		if len(ns) > 0 {
+			nameservers = append(nameservers, string(ns[1])+"/32")
+		}
+	}
+
+	return nameservers
+}
+
 func ParseHost(host string, port int, addr string) (string, error) {
 	var proto string
 	switch {

+ 38 - 0
utils/utils_test.go

@@ -444,3 +444,41 @@ func TestParsePortMapping(t *testing.T) {
 		t.Fail()
 	}
 }
+
+func TestGetNameserversAsCIDR(t *testing.T) {
+	for resolv, result := range map[string][]string{`
+nameserver 1.2.3.4
+nameserver 4.3.2.1
+search example.com`: {"1.2.3.4/32", "4.3.2.1/32"},
+		`search example.com`: {},
+		`nameserver 1.2.3.4
+search example.com
+nameserver 4.3.2.1`: []string{"1.2.3.4/32", "4.3.2.1/32"},
+    ``: []string{},
+    `  nameserver 1.2.3.4   `: []string{"1.2.3.4/32"},
+    `search example.com
+nameserver 1.2.3.4
+#nameserver 4.3.2.1`: []string{"1.2.3.4/32"},
+    `search example.com
+nameserver 1.2.3.4 # not 4.3.2.1`: []string{"1.2.3.4/32"},
+    } {
+        test := GetNameserversAsCIDR([]byte(resolv))
+        if !StrSlicesEqual(test, result) {
+            t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv)
+        }
+    }
+}
+
+func StrSlicesEqual(a, b []string) bool {
+	if len(a) != len(b) {
+		return false
+	}
+
+	for i, v := range a {
+		if v != b[i] {
+			return false
+		}
+	}
+
+	return true
+}

+ 8 - 7
utils_test.go

@@ -67,7 +67,7 @@ func newTestRuntime(prefix string) (runtime *Runtime, err error) {
 	}
 
 	config := &DaemonConfig{
-		GraphPath:   root,
+		Root:   root,
 		AutoRestart: false,
 	}
 	runtime, err = NewRuntimeFromDirectory(config)
@@ -116,7 +116,7 @@ func readFile(src string, t *testing.T) (content string) {
 // dynamically replaced by the current test image.
 // The caller is responsible for destroying the container.
 // Call t.Fatal() at the first error.
-func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, *HostConfig, error) {
+func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, error) {
 	config, hostConfig, _, err := ParseRun(args, nil)
 	defer func() {
 		if err != nil && t != nil {
@@ -124,16 +124,17 @@ func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, *HostConf
 		}
 	}()
 	if err != nil {
-		return nil, nil, err
+		return nil, err
 	}
 	if config.Image == "_" {
 		config.Image = GetTestImage(r).ID
 	}
 	c, _, err := r.Create(config, "")
 	if err != nil {
-		return nil, nil, err
+		return nil, err
 	}
-	return c, hostConfig, nil
+	c.hostConfig = hostConfig
+	return c, nil
 }
 
 // Create a test container, start it, wait for it to complete, destroy it,
@@ -146,7 +147,7 @@ func runContainer(r *Runtime, args []string, t *testing.T) (output string, err e
 			t.Fatal(err)
 		}
 	}()
-	container, hostConfig, err := mkContainer(r, args, t)
+	container, err := mkContainer(r, args, t)
 	if err != nil {
 		return "", err
 	}
@@ -156,7 +157,7 @@ func runContainer(r *Runtime, args []string, t *testing.T) (output string, err e
 		return "", err
 	}
 	defer stdout.Close()
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(); err != nil {
 		return "", err
 	}
 	container.Wait()