Explorar o código

Merge branch 'master' of https://github.com/dotcloud/docker

Kyle Conroy %!s(int64=12) %!d(string=hai) anos
pai
achega
9a60f36ccc
Modificáronse 66 ficheiros con 1524 adicións e 710 borrados
  1. 100 0
      CHANGELOG.md
  2. 1 1
      CONTRIBUTING.md
  3. 17 14
      Dockerfile
  4. 0 95
      Makefile
  5. 1 25
      README.md
  6. 1 0
      VERSION
  7. 5 4
      Vagrantfile
  8. 16 4
      api.go
  9. 4 4
      auth/auth.go
  10. 6 1
      buildfile.go
  11. 11 0
      buildfile_test.go
  12. 53 23
      commands.go
  13. 2 4
      commands_test.go
  14. 60 8
      container.go
  15. 31 1
      container_test.go
  16. 107 31
      contrib/docker.bash
  17. 5 5
      contrib/install.sh
  18. 11 0
      docker/docker.go
  19. 1 1
      docs/sources/api/docker_remote_api.rst
  20. 2 1
      docs/sources/api/docker_remote_api_v1.4.rst
  21. 1 1
      docs/sources/commandline/cli.rst
  22. 47 1
      docs/sources/commandline/command/attach.rst
  23. 4 3
      docs/sources/commandline/command/cp.rst
  24. 1 1
      docs/sources/commandline/command/run.rst
  25. BIN=BIN
      docs/sources/concepts/images/lego_docker.jpg
  26. 0 16
      docs/sources/concepts/index.rst
  27. 0 129
      docs/sources/concepts/manifesto.rst
  28. 1 0
      docs/sources/examples/index.rst
  29. 100 0
      docs/sources/examples/mongodb.rst
  30. 2 2
      docs/sources/examples/nodejs_web_app.rst
  31. 4 4
      docs/sources/index.rst
  32. BIN=BIN
      docs/sources/installation/images/win/hp_bios_vm.JPG
  33. BIN=BIN
      docs/sources/installation/images/win/ts_go_bios.JPG
  34. BIN=BIN
      docs/sources/installation/images/win/ts_no_docker.JPG
  35. 23 24
      docs/sources/installation/ubuntulinux.rst
  36. 2 0
      docs/sources/installation/vagrant.rst
  37. 28 0
      docs/sources/installation/windows.rst
  38. 0 0
      docs/sources/static_files/dockerlogo-h.png
  39. 0 0
      docs/sources/static_files/dockerlogo-v.png
  40. 1 4
      docs/sources/toctree.rst
  41. 43 0
      docs/sources/use/baseimages.rst
  42. 5 1
      docs/sources/use/basics.rst
  43. 112 21
      docs/sources/use/builder.rst
  44. 2 2
      docs/sources/use/index.rst
  45. 2 0
      docs/sources/use/port_redirection.rst
  46. 42 28
      docs/sources/use/workingwithrepository.rst
  47. 3 1
      docs/theme/docker/layout.html
  48. 0 133
      hack/RELEASE.md
  49. 0 36
      hack/dockerbuilder/Dockerfile
  50. 0 1
      hack/dockerbuilder/MAINTAINERS
  51. 0 42
      hack/dockerbuilder/dockerbuilder
  52. 106 0
      hack/release/README.md
  53. 179 0
      hack/release/make.sh
  54. 175 0
      hack/release/release.sh
  55. 13 0
      lxc_template.go
  56. 16 1
      network.go
  57. 12 0
      packaging/README.md
  58. 0 8
      packaging/README.rst
  59. 16 6
      runtime.go
  60. 1 1
      runtime_test.go
  61. 66 17
      server.go
  62. 21 0
      utils.go
  63. 14 0
      utils/http.go
  64. 13 5
      utils/utils.go
  65. 18 0
      utils/utils_test.go
  66. 17 0
      utils_test.go

+ 100 - 0
CHANGELOG.md

@@ -1,5 +1,105 @@
 # Changelog
 # Changelog
 
 
+## 0.6.1 (2013-08-23)
+* Registry: Pass "meta" headers in API calls to the registry
+- Packaging: Use correct upstart script with new build tool
+- Packaging: Use libffi-dev, don't build it from sources
+- Packaging: Removed duplicate mercurial install command
+
+## 0.6.0 (2013-08-22)
+- Runtime: Load authConfig only when needed and fix useless WARNING
++ Runtime: Add lxc-conf flag to allow custom lxc options
+- Runtime: Fix race conditions in parallel pull
+- Runtime: Improve CMD, ENTRYPOINT, and attach docs.
+* Documentation: Small fix to docs regarding adding docker groups
+* Documentation: Add MongoDB image example
++ Builder: Add USER instruction do Dockerfile
+* Documentation: updated default -H docs
+* Remote API: Sort Images by most recent creation date.
++ Builder: Add workdir support for the Buildfile
++ Runtime: Add an option to set the working directory
+- Runtime: Show tag used when image is missing
+* Documentation: Update readme with dependencies for building
+* Documentation: Add instructions for creating and using the docker group
+* Remote API: Reworking opaque requests in registry module
+- Runtime: Fix Graph ByParent() to generate list of child images per parent image.
+* Runtime: Add Image name to LogEvent tests
+* Documentation: Add sudo to examples and installation to documentation
++ Hack: Bash Completion: Limit commands to containers of a relevant state
+* Remote API: Add image name in /events
+* Runtime: Apply volumes-from before creating volumes
+- Runtime: Make docker run handle SIGINT/SIGTERM
+- Runtime: Prevent crash when .dockercfg not readable
+* Hack: Add docker dependencies coverage testing into docker-ci
++ Runtime: Add -privileged flag and relevant tests, docs, and examples
++ Packaging: Docker-brew 0.5.2 support and memory footprint reduction
+- Runtime: Install script should be fetched over https, not http.
+* Packaging: Add new docker dependencies into docker-ci
+* Runtime: Use Go 1.1.2 for dockerbuilder
+* Registry: Improve auth push
+* Runtime: API, issue 1471: Use groups for socket permissions
+* Documentation: PostgreSQL service example in documentation
+* Contrib: bash completion script
+* Tests: Improve TestKillDifferentUser to prevent timeout on buildbot
+* Documentation: Fix typo in docs for docker run -dns
+* Documentation: Adding a reference to ps -a
+- Runtime: Correctly detect IPv4 forwarding
+- Packaging: Revert "docker.upstart: avoid spawning a `sh` process"
+* Runtime: Use ranged for loop on channels
+- Runtime: Fix typo: fmt.Sprint -> fmt.Sprintf
+- Tests: Fix typo in TestBindMounts (runContainer called without image)
+* Runtime: add websocket support to /container/<name>/attach/ws
+* Runtime: Mount /dev/shm as a tmpfs
+- Builder: Only count known instructions as build steps
+- Builder: Fix docker build and docker events output
+- Runtime: switch from http to https for get.docker.io
+* Tests: Improve TestGetContainersTop so it does not rely on sleep
++ Packaging: Docker-brew and Docker standard library
+* Testing: Add some tests in server and utils
++ Packaging: Release docker with docker
+- Builder: Make sure ENV instruction within build perform a commit each time
+* Packaging: Fix the upstart script generated by get.docker.io
+- Runtime: fix small \n error un docker build
+* Runtime: Let userland proxy handle container-bound traffic
+* Runtime: Updated the Docker CLI to specify a value for the "Host" header.
+* Runtime: Add warning when net.ipv4.ip_forwarding = 0
+* Registry: Registry unit tests + mock registry
+* Runtime: fixed #910. print user name to docker info output
+- Builder: Forbid certain paths within docker build ADD
+- Runtime: change network range to avoid conflict with EC2 DNS
+* Tests: Relax the lo interface test to allow iface index != 1
+* Documentation: Suggest installing linux-headers by default.
+* Documentation: Change the twitter handle
+* Client: Add docker cp command and copy api endpoint to copy container files/folders to the host
+* Remote API: Use mime pkg to parse Content-Type
+- Runtime: Reduce connect and read timeout when pinging the registry
+* Documentation: Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2
+* Packaging: Enabled the docs to generate manpages.
+* Runtime: Parallel pull
+- Runtime: Handle ip route showing mask-less IP addresses
+* Documentation: Clarify Amazon EC2 installation
+* Documentation: 'Base' image is deprecated and should no longer be referenced in the docs.
+* Runtime: Fix to "Inject dockerinit at /.dockerinit"
+* Runtime: Allow ENTRYPOINT without CMD
+- Runtime: Always consider localhost as a domain name when parsing the FQN repos name
+* Remote API: 650 http utils and user agent field
+* Documentation: fix a typo in the ubuntu installation guide
+- Builder: Repository name (and optionally a tag) in build usage
+* Documentation: Move note about officially supported kernel
+* Packaging: Revert "Bind daemon to 0.0.0.0 in Vagrant.
+* Builder: Add no cache for docker build
+* Runtime: Add hostname to environment
+* Runtime: Add last stable version in `docker version`
+- Builder: Make sure ADD will create everything in 0755
+* Documentation: Add ufw doc
+* Tests: Add registry functional test to docker-ci
+- Documentation: Solved the logo being squished in Safari
+- Runtime: Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete
+* Runtime: Refactor checksum
+- Runtime: Improve connect message with socket error
+* Documentation: Added information about Docker's high level tools over LXC.
+* Don't read from stdout when only attached to stdin
+
 ## 0.5.3 (2013-08-13)
 ## 0.5.3 (2013-08-13)
 * Runtime: Use docker group for socket permissions
 * Runtime: Use docker group for socket permissions
 - Runtime: Spawn shell within upstart script
 - Runtime: Spawn shell within upstart script

+ 1 - 1
CONTRIBUTING.md

@@ -23,7 +23,7 @@ that feature *on top of* docker.
 ### Discuss your design on the mailing list
 ### Discuss your design on the mailing list
 
 
 We recommend discussing your plans [on the mailing
 We recommend discussing your plans [on the mailing
-list](https://groups.google.com/forum/?fromgroups#!forum/docker-club)
+list](https://groups.google.com/forum/?fromgroups#!forum/docker-dev)
 before starting to code - especially for more ambitious contributions.
 before starting to code - especially for more ambitious contributions.
 This gives other contributors a chance to point you in the right
 This gives other contributors a chance to point you in the right
 direction, give feedback on your design, and maybe point out if someone
 direction, give feedback on your design, and maybe point out if someone

+ 17 - 14
Dockerfile

@@ -3,32 +3,35 @@ docker-version 0.4.2
 from	ubuntu:12.04
 from	ubuntu:12.04
 maintainer	Solomon Hykes <solomon@dotcloud.com>
 maintainer	Solomon Hykes <solomon@dotcloud.com>
 # Build dependencies
 # Build dependencies
+run	echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
+run	apt-get update
 run	apt-get install -y -q curl
 run	apt-get install -y -q curl
 run	apt-get install -y -q git
 run	apt-get install -y -q git
+run	apt-get install -y -q mercurial
 # Install Go
 # Install Go
-run	curl -s https://go.googlecode.com/files/go1.1.1.linux-amd64.tar.gz | tar -v -C /usr/local -xz
+run	curl -s https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | tar -v -C /usr/local -xz
 env	PATH	/usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
 env	PATH	/usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
 env	GOPATH	/go
 env	GOPATH	/go
 env	CGO_ENABLED 0
 env	CGO_ENABLED 0
 run	cd /tmp && echo 'package main' > t.go && go test -a -i -v
 run	cd /tmp && echo 'package main' > t.go && go test -a -i -v
+# Ubuntu stuff
+run	apt-get install -y -q ruby1.9.3 rubygems libffi-dev
+run	gem install fpm
+run	apt-get install -y -q reprepro dpkg-sig
+# Install s3cmd 1.0.1 (earlier versions don't support env variables in the config)
+run	apt-get install -y -q python-pip
+run	pip install s3cmd
+run	pip install python-magic
+run	/bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY\n' > /.s3cfg
 # Download dependencies
 # Download dependencies
 run	PKG=github.com/kr/pty REV=27435c699;		 git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
 run	PKG=github.com/kr/pty REV=27435c699;		 git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
 run	PKG=github.com/gorilla/context/ REV=708054d61e5; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
 run	PKG=github.com/gorilla/context/ REV=708054d61e5; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
 run	PKG=github.com/gorilla/mux/ REV=9b36453141c;	 git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
 run	PKG=github.com/gorilla/mux/ REV=9b36453141c;	 git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
 run	PKG=github.com/dotcloud/tar/ REV=d06045a6d9;	 git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
 run	PKG=github.com/dotcloud/tar/ REV=d06045a6d9;	 git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
-# Run dependencies
-run	apt-get install -y iptables
-# lxc requires updating ubuntu sources
-run	echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
-run	apt-get update
-run	apt-get install -y lxc
-run	apt-get install -y aufs-tools
-# Docker requires code.google.com/p/go.net/websocket
-run	apt-get install -y -q mercurial
-run	PKG=code.google.com/p/go.net REV=78ad7f42aa2e;	 hg clone https://$PKG /go/src/$PKG && cd /go/src/$PKG && hg checkout -r $REV
+run	PKG=code.google.com/p/go.net/ REV=84a4013f96e0;  hg  clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && hg  checkout    $REV
 # Upload docker source
 # Upload docker source
 add	.       /go/src/github.com/dotcloud/docker
 add	.       /go/src/github.com/dotcloud/docker
+run	ln -s	/go/src/github.com/dotcloud/docker /src
 # Build the binary
 # Build the binary
-run	cd /go/src/github.com/dotcloud/docker/docker && go install -ldflags "-X main.GITCOMMIT '??' -d -w"
-env	PATH	/usr/local/go/bin:/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
-cmd	["docker"]
+run	cd /go/src/github.com/dotcloud/docker && hack/release/make.sh
+cmd	cd /go/src/github.com/dotcloud/docker && hack/release/release.sh

+ 0 - 95
Makefile

@@ -1,95 +0,0 @@
-DOCKER_PACKAGE := github.com/dotcloud/docker
-RELEASE_VERSION := $(shell git tag | grep -E "v[0-9\.]+$$" | sort -nr | head -n 1)
-SRCRELEASE := docker-$(RELEASE_VERSION)
-BINRELEASE := docker-$(RELEASE_VERSION).tgz
-BUILD_SRC := build_src
-BUILD_PATH := ${BUILD_SRC}/src/${DOCKER_PACKAGE}
-
-GIT_ROOT := $(shell git rev-parse --show-toplevel)
-BUILD_DIR := $(CURDIR)/.gopath
-
-GOPATH ?= $(BUILD_DIR)
-export GOPATH
-
-GO_OPTIONS ?= -a -ldflags='-w -d'
-ifeq ($(VERBOSE), 1)
-GO_OPTIONS += -v
-endif
-
-GIT_COMMIT = $(shell git rev-parse --short HEAD)
-GIT_STATUS = $(shell test -n "`git status --porcelain`" && echo "+CHANGES")
-
-BUILD_OPTIONS = -a -ldflags "-X main.GITCOMMIT $(GIT_COMMIT)$(GIT_STATUS) -d -w"
-
-SRC_DIR := $(GOPATH)/src
-
-DOCKER_DIR := $(SRC_DIR)/$(DOCKER_PACKAGE)
-DOCKER_MAIN := $(DOCKER_DIR)/docker
-
-DOCKER_BIN_RELATIVE := bin/docker
-DOCKER_BIN := $(CURDIR)/$(DOCKER_BIN_RELATIVE)
-
-.PHONY: all clean test hack release srcrelease $(BINRELEASE) $(SRCRELEASE) $(DOCKER_BIN) $(DOCKER_DIR)
-
-all: $(DOCKER_BIN)
-
-$(DOCKER_BIN): $(DOCKER_DIR)
-	@mkdir -p  $(dir $@)
-	@(cd $(DOCKER_MAIN); CGO_ENABLED=0 go build $(GO_OPTIONS) $(BUILD_OPTIONS) -o $@)
-	@echo $(DOCKER_BIN_RELATIVE) is created.
-
-$(DOCKER_DIR):
-	@mkdir -p $(dir $@)
-	@if [ -h $@ ]; then rm -f $@; fi; ln -sf $(CURDIR)/ $@
-	@(cd $(DOCKER_MAIN); go get -d $(GO_OPTIONS))
-
-whichrelease:
-	echo $(RELEASE_VERSION)
-
-release: $(BINRELEASE)
-	s3cmd -P put $(BINRELEASE) s3://get.docker.io/builds/`uname -s`/`uname -m`/docker-$(RELEASE_VERSION).tgz
-	s3cmd -P put docker-latest.tgz s3://get.docker.io/builds/`uname -s`/`uname -m`/docker-latest.tgz
-	s3cmd -P put $(SRCRELEASE)/bin/docker s3://get.docker.io/builds/`uname -s`/`uname -m`/docker
-	echo $(RELEASE_VERSION) > latest ; s3cmd -P put latest s3://get.docker.io/latest ; rm latest
-
-srcrelease: $(SRCRELEASE)
-deps: $(DOCKER_DIR)
-
-# A clean checkout of $RELEASE_VERSION, with vendored dependencies
-$(SRCRELEASE):
-	rm -fr $(SRCRELEASE)
-	git clone $(GIT_ROOT) $(SRCRELEASE)
-	cd $(SRCRELEASE); git checkout -q $(RELEASE_VERSION)
-
-# A binary release ready to be uploaded to a mirror
-$(BINRELEASE): $(SRCRELEASE)
-	rm -f $(BINRELEASE)
-	cd $(SRCRELEASE); make; cp -R bin docker-$(RELEASE_VERSION); tar -f ../$(BINRELEASE) -zv -c docker-$(RELEASE_VERSION)
-	cd $(SRCRELEASE); cp -R bin docker-latest; tar -f ../docker-latest.tgz -zv -c docker-latest
-clean:
-	@rm -rf $(dir $(DOCKER_BIN))
-ifeq ($(GOPATH), $(BUILD_DIR))
-	@rm -rf $(BUILD_DIR)
-else ifneq ($(DOCKER_DIR), $(realpath $(DOCKER_DIR)))
-	@rm -f $(DOCKER_DIR)
-endif
-
-test:
-	# Copy docker source and dependencies for testing
-	rm -rf ${BUILD_SRC}; mkdir -p ${BUILD_PATH}
-	tar --exclude=${BUILD_SRC} -cz . | tar -xz -C ${BUILD_PATH}
-	GOPATH=${CURDIR}/${BUILD_SRC} go get -d
-	# Do the test
-	sudo -E GOPATH=${CURDIR}/${BUILD_SRC} CGO_ENABLED=0 go test ${GO_OPTIONS}
-
-testall: all
-	@(cd $(DOCKER_DIR); CGO_ENABLED=0 sudo -E go test ./... $(GO_OPTIONS))
-
-fmt:
-	@gofmt -s -l -w .
-
-hack:
-	cd $(CURDIR)/hack && vagrant up
-
-ssh-dev:
-	cd $(CURDIR)/hack && vagrant ssh

+ 1 - 25
README.md

@@ -163,29 +163,6 @@ supported.
 * [Windows (with Vagrant)](http://docs.docker.io/en/latest/installation/windows/)
 * [Windows (with Vagrant)](http://docs.docker.io/en/latest/installation/windows/)
 * [Amazon EC2 (with Vagrant)](http://docs.docker.io/en/latest/installation/amazon/)
 * [Amazon EC2 (with Vagrant)](http://docs.docker.io/en/latest/installation/amazon/)
 
 
-Installing from source
-----------------------
-
-1. Install Dependencies
-    * [Go language 1.1.x](http://golang.org/doc/install)
-    * [git](http://git-scm.com)
-    * [lxc](http://lxc.sourceforge.net)
-    * [aufs-tools](http://aufs.sourceforge.net)
-
-2. Checkout the source code
-
-   ```bash
-   git clone http://github.com/dotcloud/docker
-   ```
-
-3. Build the ``docker`` binary
-
-   ```bash
-   cd docker
-   make VERBOSE=1
-   sudo cp ./bin/docker /usr/local/bin/docker
-   ```
-
 Usage examples
 Usage examples
 ==============
 ==============
 
 
@@ -305,8 +282,7 @@ Contributing to Docker
 ======================
 ======================
 
 
 Want to hack on Docker? Awesome! There are instructions to get you
 Want to hack on Docker? Awesome! There are instructions to get you
-started on the website:
-http://docs.docker.io/en/latest/contributing/contributing/
+started [here](CONTRIBUTING.md).
 
 
 They are probably not perfect, please let us know if anything feels
 They are probably not perfect, please let us know if anything feels
 wrong or incomplete.
 wrong or incomplete.

+ 1 - 0
VERSION

@@ -0,0 +1 @@
+0.6.1-dev

+ 5 - 4
Vagrantfile

@@ -17,11 +17,12 @@ Vagrant::Config.run do |config|
   # Provision docker and new kernel if deployment was not done
   # Provision docker and new kernel if deployment was not done
   if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
   if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
     # Add lxc-docker package
     # Add lxc-docker package
-    pkg_cmd = "apt-get update -qq; apt-get install -q -y python-software-properties; " \
-      "add-apt-repository -y ppa:dotcloud/lxc-docker; apt-get update -qq; " \
-      "apt-get install -q -y lxc-docker; "
+    pkg_cmd = "wget -q -O - http://get.docker.io/gpg | apt-key add -;" \
+      "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list;" \
+      "apt-get update -qq; apt-get install -q -y --force-yes lxc-docker; "
     # Add X.org Ubuntu backported 3.8 kernel
     # Add X.org Ubuntu backported 3.8 kernel
-    pkg_cmd << "add-apt-repository -y ppa:ubuntu-x-swat/r-lts-backport; " \
+    pkg_cmd << "apt-get update -qq; apt-get install -q -y python-software-properties; " \
+      "add-apt-repository -y ppa:ubuntu-x-swat/r-lts-backport; " \
       "apt-get update -qq; apt-get install -q -y linux-image-3.8.0-19-generic; "
       "apt-get update -qq; apt-get install -q -y linux-image-3.8.0-19-generic; "
     # Add guest additions if local vbox VM
     # Add guest additions if local vbox VM
     is_vbox = true
     is_vbox = true

+ 16 - 4
api.go

@@ -101,7 +101,7 @@ func postAuth(srv *Server, version float64, w http.ResponseWriter, r *http.Reque
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	status, err := auth.Login(authConfig, srv.HTTPRequestFactory())
+	status, err := auth.Login(authConfig, srv.HTTPRequestFactory(nil))
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -399,7 +399,13 @@ func postImagesCreate(srv *Server, version float64, w http.ResponseWriter, r *ht
 	}
 	}
 	sf := utils.NewStreamFormatter(version > 1.0)
 	sf := utils.NewStreamFormatter(version > 1.0)
 	if image != "" { //pull
 	if image != "" { //pull
-		if err := srv.ImagePull(image, tag, w, sf, &auth.AuthConfig{}, version > 1.3); err != nil {
+		metaHeaders := map[string][]string{}
+		for k, v := range r.Header {
+			if strings.HasPrefix(k, "X-Meta-") {
+				metaHeaders[k] = v
+			}
+		}
+		if err := srv.ImagePull(image, tag, w, sf, &auth.AuthConfig{}, metaHeaders, version > 1.3); err != nil {
 			if sf.Used() {
 			if sf.Used() {
 				w.Write(sf.FormatError(err))
 				w.Write(sf.FormatError(err))
 				return nil
 				return nil
@@ -468,6 +474,12 @@ func postImagesInsert(srv *Server, version float64, w http.ResponseWriter, r *ht
 
 
 func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	authConfig := &auth.AuthConfig{}
 	authConfig := &auth.AuthConfig{}
+	metaHeaders := map[string][]string{}
+	for k, v := range r.Header {
+		if strings.HasPrefix(k, "X-Meta-") {
+			metaHeaders[k] = v
+		}
+	}
 	if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil {
 	if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil {
 		return err
 		return err
 	}
 	}
@@ -483,7 +495,7 @@ func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http
 		w.Header().Set("Content-Type", "application/json")
 		w.Header().Set("Content-Type", "application/json")
 	}
 	}
 	sf := utils.NewStreamFormatter(version > 1.0)
 	sf := utils.NewStreamFormatter(version > 1.0)
-	if err := srv.ImagePush(name, w, sf, authConfig); err != nil {
+	if err := srv.ImagePush(name, w, sf, authConfig, metaHeaders); err != nil {
 		if sf.Used() {
 		if sf.Used() {
 			w.Write(sf.FormatError(err))
 			w.Write(sf.FormatError(err))
 			return nil
 			return nil
@@ -526,7 +538,7 @@ func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r
 		out.Warnings = append(out.Warnings, "Your kernel does not support memory swap capabilities. Limitation discarded.")
 		out.Warnings = append(out.Warnings, "Your kernel does not support memory swap capabilities. Limitation discarded.")
 	}
 	}
 
 
-	if !srv.runtime.capabilities.IPv4Forwarding {
+	if srv.runtime.capabilities.IPv4ForwardingDisabled {
 		log.Println("Warning: IPv4 forwarding is disabled.")
 		log.Println("Warning: IPv4 forwarding is disabled.")
 		out.Warnings = append(out.Warnings, "IPv4 forwarding is disabled.")
 		out.Warnings = append(out.Warnings, "IPv4 forwarding is disabled.")
 	}
 	}

+ 4 - 4
auth/auth.go

@@ -76,7 +76,7 @@ func LoadConfig(rootPath string) (*ConfigFile, error) {
 	configFile := ConfigFile{Configs: make(map[string]AuthConfig), rootPath: rootPath}
 	configFile := ConfigFile{Configs: make(map[string]AuthConfig), rootPath: rootPath}
 	confFile := path.Join(rootPath, CONFIGFILE)
 	confFile := path.Join(rootPath, CONFIGFILE)
 	if _, err := os.Stat(confFile); err != nil {
 	if _, err := os.Stat(confFile); err != nil {
-		return &configFile, ErrConfigFileMissing
+		return &configFile, nil //missing file is not an error
 	}
 	}
 	b, err := ioutil.ReadFile(confFile)
 	b, err := ioutil.ReadFile(confFile)
 	if err != nil {
 	if err != nil {
@@ -86,13 +86,13 @@ func LoadConfig(rootPath string) (*ConfigFile, error) {
 	if err := json.Unmarshal(b, &configFile.Configs); err != nil {
 	if err := json.Unmarshal(b, &configFile.Configs); err != nil {
 		arr := strings.Split(string(b), "\n")
 		arr := strings.Split(string(b), "\n")
 		if len(arr) < 2 {
 		if len(arr) < 2 {
-			return nil, fmt.Errorf("The Auth config file is empty")
+			return &configFile, fmt.Errorf("The Auth config file is empty")
 		}
 		}
 		authConfig := AuthConfig{}
 		authConfig := AuthConfig{}
 		origAuth := strings.Split(arr[0], " = ")
 		origAuth := strings.Split(arr[0], " = ")
 		authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1])
 		authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1])
 		if err != nil {
 		if err != nil {
-			return nil, err
+			return &configFile, err
 		}
 		}
 		origEmail := strings.Split(arr[1], " = ")
 		origEmail := strings.Split(arr[1], " = ")
 		authConfig.Email = origEmail[1]
 		authConfig.Email = origEmail[1]
@@ -101,7 +101,7 @@ func LoadConfig(rootPath string) (*ConfigFile, error) {
 		for k, authConfig := range configFile.Configs {
 		for k, authConfig := range configFile.Configs {
 			authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth)
 			authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth)
 			if err != nil {
 			if err != nil {
-				return nil, err
+				return &configFile, err
 			}
 			}
 			authConfig.Auth = ""
 			authConfig.Auth = ""
 			configFile.Configs[k] = authConfig
 			configFile.Configs[k] = authConfig

+ 6 - 1
buildfile.go

@@ -56,7 +56,7 @@ func (b *buildFile) CmdFrom(name string) error {
 	if err != nil {
 	if err != nil {
 		if b.runtime.graph.IsNotExist(err) {
 		if b.runtime.graph.IsNotExist(err) {
 			remote, tag := utils.ParseRepositoryTag(name)
 			remote, tag := utils.ParseRepositoryTag(name)
-			if err := b.srv.ImagePull(remote, tag, b.out, utils.NewStreamFormatter(false), nil, true); err != nil {
+			if err := b.srv.ImagePull(remote, tag, b.out, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
 				return err
 				return err
 			}
 			}
 			image, err = b.runtime.repositories.LookupImage(name)
 			image, err = b.runtime.repositories.LookupImage(name)
@@ -197,6 +197,11 @@ func (b *buildFile) CmdExpose(args string) error {
 	return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
 	return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
 }
 }
 
 
+func (b *buildFile) CmdUser(args string) error {
+	b.config.User = args
+	return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args))
+}
+
 func (b *buildFile) CmdInsert(args string) error {
 func (b *buildFile) CmdInsert(args string) error {
 	return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
 	return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
 }
 }

+ 11 - 0
buildfile_test.go

@@ -270,6 +270,17 @@ func TestBuildMaintainer(t *testing.T) {
 	}
 	}
 }
 }
 
 
+func TestBuildUser(t *testing.T) {
+	img := buildImage(testContextTemplate{`
+        from {IMAGE}
+        user dockerio
+    `, nil, nil}, t, nil, true)
+
+	if img.Config.User != "dockerio" {
+		t.Fail()
+	}
+}
+
 func TestBuildEnv(t *testing.T) {
 func TestBuildEnv(t *testing.T) {
 	img := buildImage(testContextTemplate{`
 	img := buildImage(testContextTemplate{`
         from {IMAGE}
         from {IMAGE}

+ 53 - 23
commands.go

@@ -19,6 +19,7 @@ import (
 	"os/signal"
 	"os/signal"
 	"path/filepath"
 	"path/filepath"
 	"reflect"
 	"reflect"
+	"runtime"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 	"syscall"
 	"syscall"
@@ -27,10 +28,9 @@ import (
 	"unicode"
 	"unicode"
 )
 )
 
 
-const VERSION = "0.5.3-dev"
-
 var (
 var (
 	GITCOMMIT string
 	GITCOMMIT string
+	VERSION string
 )
 )
 
 
 func (cli *DockerCli) getMethod(name string) (reflect.Method, bool) {
 func (cli *DockerCli) getMethod(name string) (reflect.Method, bool) {
@@ -72,7 +72,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
 			return nil
 			return nil
 		}
 		}
 	}
 	}
-	help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n  -H=[tcp://%s:%d]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTHTTPHOST, DEFAULTHTTPPORT)
+	help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTUNIXSOCKET)
 	for _, command := range [][]string{
 	for _, command := range [][]string{
 		{"attach", "Attach to a running container"},
 		{"attach", "Attach to a running container"},
 		{"build", "Build a container from a Dockerfile"},
 		{"build", "Build a container from a Dockerfile"},
@@ -303,6 +303,8 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
 		return nil
 		return nil
 	}
 	}
 
 
+	cli.LoadConfigFile()
+
 	var oldState *term.State
 	var oldState *term.State
 	if *flUsername == "" || *flPassword == "" || *flEmail == "" {
 	if *flUsername == "" || *flPassword == "" || *flEmail == "" {
 		oldState, err = term.SetRawTerminal(cli.terminalFd)
 		oldState, err = term.SetRawTerminal(cli.terminalFd)
@@ -433,6 +435,12 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
 		return nil
 		return nil
 	}
 	}
 
 
+	fmt.Fprintf(cli.out, "Client version: %s\n", VERSION)
+	fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version())
+	if GITCOMMIT != "" {
+		fmt.Fprintf(cli.out, "Git commit (client): %s\n", GITCOMMIT)
+	}
+
 	body, _, err := cli.call("GET", "/version", nil)
 	body, _, err := cli.call("GET", "/version", nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -444,13 +452,12 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
 		utils.Debugf("Error unmarshal: body: %s, err: %s\n", body, err)
 		utils.Debugf("Error unmarshal: body: %s, err: %s\n", body, err)
 		return err
 		return err
 	}
 	}
-	fmt.Fprintf(cli.out, "Client version: %s\n", VERSION)
 	fmt.Fprintf(cli.out, "Server version: %s\n", out.Version)
 	fmt.Fprintf(cli.out, "Server version: %s\n", out.Version)
 	if out.GitCommit != "" {
 	if out.GitCommit != "" {
-		fmt.Fprintf(cli.out, "Git commit: %s\n", out.GitCommit)
+		fmt.Fprintf(cli.out, "Git commit (server): %s\n", out.GitCommit)
 	}
 	}
 	if out.GoVersion != "" {
 	if out.GoVersion != "" {
-		fmt.Fprintf(cli.out, "Go version: %s\n", out.GoVersion)
+		fmt.Fprintf(cli.out, "Go version (server): %s\n", out.GoVersion)
 	}
 	}
 
 
 	release := utils.GetReleaseVersion()
 	release := utils.GetReleaseVersion()
@@ -498,6 +505,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
 	}
 	}
 
 
 	if len(out.IndexServerAddress) != 0 {
 	if len(out.IndexServerAddress) != 0 {
+		cli.LoadConfigFile()
 		u := cli.configFile.Configs[out.IndexServerAddress].Username
 		u := cli.configFile.Configs[out.IndexServerAddress].Username
 		if len(u) > 0 {
 		if len(u) > 0 {
 			fmt.Fprintf(cli.out, "Username: %v\n", u)
 			fmt.Fprintf(cli.out, "Username: %v\n", u)
@@ -576,15 +584,17 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 		return nil
 		return nil
 	}
 	}
 
 
+	var encounteredError error
 	for _, name := range args {
 	for _, name := range args {
 		_, _, err := cli.call("POST", "/containers/"+name+"/start", nil)
 		_, _, err := cli.call("POST", "/containers/"+name+"/start", nil)
 		if err != nil {
 		if err != nil {
 			fmt.Fprintf(cli.err, "%s\n", err)
 			fmt.Fprintf(cli.err, "%s\n", err)
+			encounteredError = fmt.Errorf("Error: failed to start one or more containers")
 		} else {
 		} else {
 			fmt.Fprintf(cli.out, "%s\n", name)
 			fmt.Fprintf(cli.out, "%s\n", name)
 		}
 		}
 	}
 	}
-	return nil
+	return encounteredError
 }
 }
 
 
 func (cli *DockerCli) CmdInspect(args ...string) error {
 func (cli *DockerCli) CmdInspect(args ...string) error {
@@ -838,12 +848,18 @@ func (cli *DockerCli) CmdPush(args ...string) error {
 		return nil
 		return nil
 	}
 	}
 
 
+	cli.LoadConfigFile()
+
 	// If we're not using a custom registry, we know the restrictions
 	// If we're not using a custom registry, we know the restrictions
 	// applied to repository names and can warn the user in advance.
 	// applied to repository names and can warn the user in advance.
 	// Custom repositories can have different rules, and we must also
 	// Custom repositories can have different rules, and we must also
 	// allow pushing by image ID.
 	// allow pushing by image ID.
 	if len(strings.SplitN(name, "/", 2)) == 1 {
 	if len(strings.SplitN(name, "/", 2)) == 1 {
-		return fmt.Errorf("Impossible to push a \"root\" repository. Please rename your repository in <user>/<repo> (ex: %s/%s)", cli.configFile.Configs[auth.IndexServerAddress()].Username, name)
+		username := cli.configFile.Configs[auth.IndexServerAddress()].Username
+		if username == "" {
+			username = "<user>"
+		}
+		return fmt.Errorf("Impossible to push a \"root\" repository. Please rename your repository in <user>/<repo> (ex: %s/%s)", username, name)
 	}
 	}
 
 
 	v := url.Values{}
 	v := url.Values{}
@@ -1467,15 +1483,18 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		v := url.Values{}
 		v := url.Values{}
 		v.Set("logs", "1")
 		v.Set("logs", "1")
 		v.Set("stream", "1")
 		v.Set("stream", "1")
+		var out io.Writer
 
 
 		if config.AttachStdin {
 		if config.AttachStdin {
 			v.Set("stdin", "1")
 			v.Set("stdin", "1")
 		}
 		}
 		if config.AttachStdout {
 		if config.AttachStdout {
 			v.Set("stdout", "1")
 			v.Set("stdout", "1")
+			out = cli.out
 		}
 		}
 		if config.AttachStderr {
 		if config.AttachStderr {
 			v.Set("stderr", "1")
 			v.Set("stderr", "1")
+			out = cli.out
 		}
 		}
 
 
 		signals := make(chan os.Signal, 1)
 		signals := make(chan os.Signal, 1)
@@ -1489,7 +1508,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 			}
 			}
 		}()
 		}()
 
 
-		if err := cli.hijack("POST", "/containers/"+runResult.ID+"/attach?"+v.Encode(), config.Tty, cli.in, cli.out); err != nil {
+		if err := cli.hijack("POST", "/containers/"+runResult.ID+"/attach?"+v.Encode(), config.Tty, cli.in, out); err != nil {
 			utils.Debugf("Error hijack: %s", err)
 			utils.Debugf("Error hijack: %s", err)
 			return err
 			return err
 		}
 		}
@@ -1515,6 +1534,10 @@ func (cli *DockerCli) CmdCp(args ...string) error {
 	var copyData APICopy
 	var copyData APICopy
 	info := strings.Split(cmd.Arg(0), ":")
 	info := strings.Split(cmd.Arg(0), ":")
 
 
+	if len(info) != 2 {
+		return fmt.Errorf("Error: Resource not specified")
+	}
+
 	copyData.Resource = info[1]
 	copyData.Resource = info[1]
 	copyData.HostPath = cmd.Arg(1)
 	copyData.HostPath = cmd.Arg(1)
 
 
@@ -1661,11 +1684,14 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
 	rwc, br := clientconn.Hijack()
 	rwc, br := clientconn.Hijack()
 	defer rwc.Close()
 	defer rwc.Close()
 
 
-	receiveStdout := utils.Go(func() error {
-		_, err := io.Copy(out, br)
-		utils.Debugf("[hijack] End of stdout")
-		return err
-	})
+	var receiveStdout (chan error)
+	if out != nil {
+		receiveStdout = utils.Go(func() error {
+			_, err := io.Copy(out, br)
+			utils.Debugf("[hijack] End of stdout")
+			return err
+		})
+	}
 
 
 	if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
 	if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
 		oldState, err := term.SetRawTerminal(cli.terminalFd)
 		oldState, err := term.SetRawTerminal(cli.terminalFd)
@@ -1693,9 +1719,11 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
 		return nil
 		return nil
 	})
 	})
 
 
-	if err := <-receiveStdout; err != nil {
-		utils.Debugf("Error receiveStdout: %s", err)
-		return err
+	if out != nil {
+		if err := <-receiveStdout; err != nil {
+			utils.Debugf("Error receiveStdout: %s", err)
+			return err
+		}
 	}
 	}
 
 
 	if !cli.isTerminal {
 	if !cli.isTerminal {
@@ -1761,6 +1789,14 @@ func Subcmd(name, signature, description string) *flag.FlagSet {
 	return flags
 	return flags
 }
 }
 
 
+func (cli *DockerCli) LoadConfigFile() (err error) {
+	cli.configFile, err = auth.LoadConfig(os.Getenv("HOME"))
+	if err != nil {
+		fmt.Fprintf(cli.err, "WARNING: %s\n", err)
+	}
+	return err
+}
+
 func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli {
 func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli {
 	var (
 	var (
 		isTerminal = false
 		isTerminal = false
@@ -1777,15 +1813,9 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *Doc
 	if err == nil {
 	if err == nil {
 		err = out
 		err = out
 	}
 	}
-
-	configFile, e := auth.LoadConfig(os.Getenv("HOME"))
-	if e != nil {
-		fmt.Fprintf(err, "WARNING: %s\n", e)
-	}
 	return &DockerCli{
 	return &DockerCli{
 		proto:      proto,
 		proto:      proto,
 		addr:       addr,
 		addr:       addr,
-		configFile: configFile,
 		in:         in,
 		in:         in,
 		out:        out,
 		out:        out,
 		err:        err,
 		err:        err,

+ 2 - 4
commands_test.go

@@ -318,7 +318,7 @@ func TestRunAttachStdin(t *testing.T) {
 	ch := make(chan struct{})
 	ch := make(chan struct{})
 	go func() {
 	go func() {
 		defer close(ch)
 		defer close(ch)
-		cli.CmdRun("-i", "-a", "stdin", unitTestImageID, "sh", "-c", "echo hello && cat")
+		cli.CmdRun("-i", "-a", "stdin", unitTestImageID, "sh", "-c", "echo hello && cat && sleep 5")
 	}()
 	}()
 
 
 	// Send input to the command, close stdin
 	// Send input to the command, close stdin
@@ -346,12 +346,10 @@ func TestRunAttachStdin(t *testing.T) {
 
 
 	// wait for CmdRun to return
 	// wait for CmdRun to return
 	setTimeout(t, "Waiting for CmdRun timed out", 5*time.Second, func() {
 	setTimeout(t, "Waiting for CmdRun timed out", 5*time.Second, func() {
-		// Unblock hijack end
-		stdout.Read([]byte{})
 		<-ch
 		<-ch
 	})
 	})
 
 
-	setTimeout(t, "Waiting for command to exit timed out", 5*time.Second, func() {
+	setTimeout(t, "Waiting for command to exit timed out", 10*time.Second, func() {
 		container.Wait()
 		container.Wait()
 	})
 	})
 
 

+ 60 - 8
container.go

@@ -20,6 +20,7 @@ import (
 	"strings"
 	"strings"
 	"syscall"
 	"syscall"
 	"time"
 	"time"
+	"net"
 )
 )
 
 
 type Container struct {
 type Container struct {
@@ -86,6 +87,7 @@ type Config struct {
 type HostConfig struct {
 type HostConfig struct {
 	Binds           []string
 	Binds           []string
 	ContainerIDFile string
 	ContainerIDFile string
+	LxcConf         []KeyValuePair
 }
 }
 
 
 type BindMap struct {
 type BindMap struct {
@@ -98,6 +100,11 @@ var (
 	ErrInvaidWorikingDirectory = errors.New("The working directory is invalid. It needs to be an absolute path.")
 	ErrInvaidWorikingDirectory = errors.New("The working directory is invalid. It needs to be an absolute path.")
 )
 )
 
 
+type KeyValuePair struct {
+	Key   string
+	Value string
+}
+
 func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
 func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
 	cmd := Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container")
 	cmd := Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container")
 	if len(args) > 0 && args[0] != "--help" {
 	if len(args) > 0 && args[0] != "--help" {
@@ -140,6 +147,9 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 	flVolumesFrom := cmd.String("volumes-from", "", "Mount volumes from the specified container")
 	flVolumesFrom := cmd.String("volumes-from", "", "Mount volumes from the specified container")
 	flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
 	flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
 
 
+	var flLxcOpts ListOpts
+	cmd.Var(&flLxcOpts, "lxc-conf", "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
+
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil, nil, cmd, err
 		return nil, nil, cmd, err
 	}
 	}
@@ -187,6 +197,12 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 		entrypoint = []string{*flEntrypoint}
 		entrypoint = []string{*flEntrypoint}
 	}
 	}
 
 
+	var lxcConf []KeyValuePair
+	lxcConf, err := parseLxcConfOpts(flLxcOpts)
+	if err != nil {
+		return nil, nil, cmd, err
+	}
+
 	config := &Config{
 	config := &Config{
 		Hostname:        *flHostname,
 		Hostname:        *flHostname,
 		PortSpecs:       flPorts,
 		PortSpecs:       flPorts,
@@ -212,6 +228,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 	hostConfig := &HostConfig{
 	hostConfig := &HostConfig{
 		Binds:           binds,
 		Binds:           binds,
 		ContainerIDFile: *flContainerIDFile,
 		ContainerIDFile: *flContainerIDFile,
+		LxcConf:         lxcConf,
 	}
 	}
 
 
 	if capabilities != nil && *flMemory > 0 && !capabilities.SwapLimit {
 	if capabilities != nil && *flMemory > 0 && !capabilities.SwapLimit {
@@ -315,7 +332,7 @@ func (container *Container) SaveHostConfig(hostConfig *HostConfig) (err error) {
 	return ioutil.WriteFile(container.hostConfigPath(), data, 0666)
 	return ioutil.WriteFile(container.hostConfigPath(), data, 0666)
 }
 }
 
 
-func (container *Container) generateLXCConfig() error {
+func (container *Container) generateLXCConfig(hostConfig *HostConfig) error {
 	fo, err := os.Create(container.lxcConfigPath())
 	fo, err := os.Create(container.lxcConfigPath())
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -324,6 +341,11 @@ func (container *Container) generateLXCConfig() error {
 	if err := LxcTemplateCompiled.Execute(fo, container); err != nil {
 	if err := LxcTemplateCompiled.Execute(fo, container); err != nil {
 		return err
 		return err
 	}
 	}
+	if hostConfig != nil {
+		if err := LxcHostConfigTemplateCompiled.Execute(fo, hostConfig); err != nil {
+			return err
+		}
+	}
 	return nil
 	return nil
 }
 }
 
 
@@ -520,7 +542,7 @@ func (container *Container) Start(hostConfig *HostConfig) error {
 	container.State.Lock()
 	container.State.Lock()
 	defer container.State.Unlock()
 	defer container.State.Unlock()
 
 
-	if len(hostConfig.Binds) == 0 {
+	if len(hostConfig.Binds) == 0 && len(hostConfig.LxcConf) == 0 {
 		hostConfig, _ = container.ReadHostConfig()
 		hostConfig, _ = container.ReadHostConfig()
 	}
 	}
 
 
@@ -548,7 +570,7 @@ func (container *Container) Start(hostConfig *HostConfig) error {
 		container.Config.MemorySwap = -1
 		container.Config.MemorySwap = -1
 	}
 	}
 
 
-	if !container.runtime.capabilities.IPv4Forwarding {
+	if container.runtime.capabilities.IPv4ForwardingDisabled {
 		log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
 		log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
 	}
 	}
 
 
@@ -645,7 +667,7 @@ func (container *Container) Start(hostConfig *HostConfig) error {
 		}
 		}
 	}
 	}
 
 
-	if err := container.generateLXCConfig(); err != nil {
+	if err := container.generateLXCConfig(hostConfig); err != nil {
 		return err
 		return err
 	}
 	}
 
 
@@ -778,14 +800,44 @@ func (container *Container) allocateNetwork() error {
 		return nil
 		return nil
 	}
 	}
 
 
-	iface, err := container.runtime.networkManager.Allocate()
-	if err != nil {
-		return err
+	var iface *NetworkInterface
+	var err error
+	if !container.State.Ghost {
+		iface, err = container.runtime.networkManager.Allocate()
+		if err != nil {
+			return err
+		}
+	} else {
+		manager := container.runtime.networkManager
+		if manager.disabled {
+			iface = &NetworkInterface{disabled: true}
+		} else {
+			iface = &NetworkInterface{
+				IPNet: net.IPNet{IP: net.ParseIP(container.NetworkSettings.IPAddress), Mask: manager.bridgeNetwork.Mask},
+				Gateway: manager.bridgeNetwork.IP,
+				manager: manager,
+				}
+			ipNum := ipToInt(iface.IPNet.IP)
+			manager.ipAllocator.inUse[ipNum] = struct{}{}
+		}
 	}
 	}
+
+	var portSpecs []string
+	if !container.State.Ghost {
+		portSpecs = container.Config.PortSpecs
+	} else {
+		for backend, frontend := range container.NetworkSettings.PortMapping["Tcp"] {
+			portSpecs = append(portSpecs, fmt.Sprintf("%s:%s/tcp",frontend, backend))
+		}
+		for backend, frontend := range container.NetworkSettings.PortMapping["Udp"] {
+			portSpecs = append(portSpecs, fmt.Sprintf("%s:%s/udp",frontend, backend))
+		}
+	}
+
 	container.NetworkSettings.PortMapping = make(map[string]PortMapping)
 	container.NetworkSettings.PortMapping = make(map[string]PortMapping)
 	container.NetworkSettings.PortMapping["Tcp"] = make(PortMapping)
 	container.NetworkSettings.PortMapping["Tcp"] = make(PortMapping)
 	container.NetworkSettings.PortMapping["Udp"] = make(PortMapping)
 	container.NetworkSettings.PortMapping["Udp"] = make(PortMapping)
-	for _, spec := range container.Config.PortSpecs {
+	for _, spec := range portSpecs {
 		nat, err := iface.AllocatePort(spec)
 		nat, err := iface.AllocatePort(spec)
 		if err != nil {
 		if err != nil {
 			iface.Release()
 			iface.Release()

+ 31 - 1
container_test.go

@@ -1070,7 +1070,7 @@ func TestLXCConfig(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	defer runtime.Destroy(container)
 	defer runtime.Destroy(container)
-	container.generateLXCConfig()
+	container.generateLXCConfig(nil)
 	grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar")
 	grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar")
 	grepFile(t, container.lxcConfigPath(),
 	grepFile(t, container.lxcConfigPath(),
 		fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem))
 		fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem))
@@ -1078,6 +1078,36 @@ func TestLXCConfig(t *testing.T) {
 		fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2))
 		fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2))
 }
 }
 
 
+func TestCustomLxcConfig(t *testing.T) {
+	runtime := mkRuntime(t)
+	defer nuke(runtime)
+	container, err := NewBuilder(runtime).Create(&Config{
+		Image: GetTestImage(runtime).ID,
+		Cmd:   []string{"/bin/true"},
+
+		Hostname: "foobar",
+	},
+	)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer runtime.Destroy(container)
+	hostConfig := &HostConfig{LxcConf: []KeyValuePair{
+		{
+			Key:   "lxc.utsname",
+			Value: "docker",
+		},
+		{
+			Key:   "lxc.cgroup.cpuset.cpus",
+			Value: "0,1",
+		},
+	}}
+
+	container.generateLXCConfig(hostConfig)
+	grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker")
+	grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1")
+}
+
 func BenchmarkRunSequencial(b *testing.B) {
 func BenchmarkRunSequencial(b *testing.B) {
 	runtime := mkRuntime(b)
 	runtime := mkRuntime(b)
 	defer nuke(runtime)
 	defer nuke(runtime)

+ 107 - 31
contrib/docker.bash

@@ -115,7 +115,7 @@ _docker_build()
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "-t -q" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "-no-cache -t -q" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			_filedir
 			_filedir
@@ -138,11 +138,37 @@ _docker_commit()
 			COMPREPLY=( $( compgen -W "-author -m -run" -- "$cur" ) )
 			COMPREPLY=( $( compgen -W "-author -m -run" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
-			__docker_containers_all
+			local counter=$cpos
+			while [ $counter -le $cword ]; do
+				case "${words[$counter]}" in
+					-author|-m|-run)
+						(( counter++ ))
+						;;
+					-*)
+						;;
+					*)
+						break
+						;;
+				esac
+				(( counter++ ))
+			done
+
+			if [ $counter -eq $cword ]; then
+				__docker_containers_all
+			fi
 			;;
 			;;
 	esac
 	esac
 }
 }
 
 
+_docker_cp()
+{
+	if [ $cpos -eq $cword ]; then
+		__docker_containers_all
+	else
+		_filedir
+	fi
+}
+
 _docker_diff()
 _docker_diff()
 {
 {
 	if [ $cpos -eq $cword ]; then
 	if [ $cpos -eq $cword ]; then
@@ -152,7 +178,21 @@ _docker_diff()
 
 
 _docker_events()
 _docker_events()
 {
 {
-	COMPREPLY=( $( compgen -W "-since" -- "$cur" ) )
+	case "$prev" in
+		-since)
+			return
+			;;
+		*)
+			;;
+	esac
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "-since" -- "$cur" ) )
+			;;
+		*)
+			;;
+	esac
 }
 }
 
 
 _docker_export()
 _docker_export()
@@ -231,7 +271,21 @@ _docker_kill()
 
 
 _docker_login()
 _docker_login()
 {
 {
-	COMPREPLY=( $( compgen -W "-e -p -u" -- "$cur" ) )
+	case "$prev" in
+		-e|-p|-u)
+			return
+			;;
+		*)
+			;;
+	esac
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "-e -p -u" -- "$cur" ) )
+			;;
+		*)
+			;;
+	esac
 }
 }
 
 
 _docker_logs()
 _docker_logs()
@@ -250,12 +304,40 @@ _docker_port()
 
 
 _docker_ps()
 _docker_ps()
 {
 {
-	COMPREPLY=( $( compgen -W "-a -beforeId -l -n -notrunc -q -s -sinceId" -- "$cur" ) )
+	case "$prev" in
+		-beforeId|-n|-sinceId)
+			return
+			;;
+		*)
+			;;
+	esac
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "-a -beforeId -l -n -notrunc -q -s -sinceId" -- "$cur" ) )
+			;;
+		*)
+			;;
+	esac
 }
 }
 
 
 _docker_pull()
 _docker_pull()
 {
 {
-	COMPREPLY=( $( compgen -W "-t" -- "$cur" ) )
+	case "$prev" in
+		-t)
+			return
+			;;
+		*)
+			;;
+	esac
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "-t" -- "$cur" ) )
+			;;
+		*)
+			;;
+	esac
 }
 }
 
 
 _docker_push()
 _docker_push()
@@ -309,7 +391,7 @@ _docker_run()
 		-volumes-from)
 		-volumes-from)
 			__docker_containers_all
 			__docker_containers_all
 			;;
 			;;
-		-a|-c|-dns|-e|-entrypoint|-h|-m|-p|-u|-v)
+		-a|-c|-dns|-e|-entrypoint|-h|-lxc-conf|-m|-p|-u|-v|-w)
 			return
 			return
 			;;
 			;;
 		*)
 		*)
@@ -318,34 +400,27 @@ _docker_run()
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "-a -c -cidfile -d -dns -e -entrypoint -h -i -m -n -p -t -u -v -volumes-from" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "-a -c -cidfile -d -dns -e -entrypoint -h -i -lxc-conf -m -n -p -privileged -t -u -v -volumes-from -w" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
-			case "$cur" in
-				-*)
-					COMPREPLY=( $( compgen -W "-a -notrunc -q -viz" -- "$cur" ) )
-					;;
-				*)
-					local counter=$cpos
-					while [ $counter -le $cword ]; do
-						case "${words[$counter]}" in
-							-a|-c|-cidfile|-dns|-e|-entrypoint|-h|-m|-p|-u|-v|-volumes-from)
-								(( counter++ ))
-								;;
-							-*)
-								;;
-							*)
-								break
-								;;
-						esac
+			local counter=$cpos
+			while [ $counter -le $cword ]; do
+				case "${words[$counter]}" in
+					-a|-c|-cidfile|-dns|-e|-entrypoint|-h|-lxc-conf|-m|-p|-u|-v|-volumes-from|-w)
 						(( counter++ ))
 						(( counter++ ))
-					done
+						;;
+					-*)
+						;;
+					*)
+						break
+						;;
+				esac
+				(( counter++ ))
+			done
 
 
-					if [ $counter -eq $cword ]; then
-						__docker_image_repos_and_tags
-					fi
-					;;
-			esac
+			if [ $counter -eq $cword ]; then
+				__docker_image_repos_and_tags
+			fi
 			;;
 			;;
 	esac
 	esac
 }
 }
@@ -409,6 +484,7 @@ _docker()
 			attach
 			attach
 			build
 			build
 			commit
 			commit
+			cp
 			diff
 			diff
 			events
 			events
 			export
 			export

+ 5 - 5
contrib/install.sh

@@ -35,10 +35,10 @@ else
     fi
     fi
 fi
 fi
 
 
-echo "Downloading docker binary and uncompressing into /usr/local/bin..."
-curl -s https://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-latest.tgz |
-tar -C /usr/local/bin --strip-components=1 -zxf- \
-docker-latest/docker
+echo "Downloading docker binary to /usr/local/bin..."
+curl -s https://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-latest \
+    > /usr/local/bin/docker
+chmod +x /usr/local/bin/docker
 
 
 if [ -f /etc/init/dockerd.conf ]
 if [ -f /etc/init/dockerd.conf ]
 then
 then
@@ -50,7 +50,7 @@ description "Docker daemon"
 start on filesystem or runlevel [2345]
 start on filesystem or runlevel [2345]
 stop on runlevel [!2345]
 stop on runlevel [!2345]
 respawn
 respawn
-exec env LANG="en_US.UTF-8" /usr/local/bin/docker -d
+exec /usr/local/bin/docker -d
 EOF
 EOF
 fi
 fi
 
 

+ 11 - 0
docker/docker.go

@@ -16,6 +16,7 @@ import (
 
 
 var (
 var (
 	GITCOMMIT string
 	GITCOMMIT string
+	VERSION string
 )
 )
 
 
 func main() {
 func main() {
@@ -25,6 +26,7 @@ func main() {
 		return
 		return
 	}
 	}
 	// FIXME: Switch d and D ? (to be more sshd like)
 	// FIXME: Switch d and D ? (to be more sshd like)
+	flVersion := flag.Bool("v", false, "Print version information and quit")
 	flDaemon := flag.Bool("d", false, "Daemon mode")
 	flDaemon := flag.Bool("d", false, "Daemon mode")
 	flDebug := flag.Bool("D", false, "Debug mode")
 	flDebug := flag.Bool("D", false, "Debug mode")
 	flAutoRestart := flag.Bool("r", false, "Restart previously running containers")
 	flAutoRestart := flag.Bool("r", false, "Restart previously running containers")
@@ -36,6 +38,10 @@ func main() {
 	flHosts := docker.ListOpts{fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET)}
 	flHosts := docker.ListOpts{fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET)}
 	flag.Var(&flHosts, "H", "tcp://host:port to bind/connect to or unix://path/to/socket to use")
 	flag.Var(&flHosts, "H", "tcp://host:port to bind/connect to or unix://path/to/socket to use")
 	flag.Parse()
 	flag.Parse()
+	if *flVersion {
+		showVersion()
+		return
+	}
 	if len(flHosts) > 1 {
 	if len(flHosts) > 1 {
 		flHosts = flHosts[1:] //trick to display a nice default value in the usage
 		flHosts = flHosts[1:] //trick to display a nice default value in the usage
 	}
 	}
@@ -52,6 +58,7 @@ func main() {
 		os.Setenv("DEBUG", "1")
 		os.Setenv("DEBUG", "1")
 	}
 	}
 	docker.GITCOMMIT = GITCOMMIT
 	docker.GITCOMMIT = GITCOMMIT
+	docker.VERSION = VERSION
 	if *flDaemon {
 	if *flDaemon {
 		if flag.NArg() != 0 {
 		if flag.NArg() != 0 {
 			flag.Usage()
 			flag.Usage()
@@ -74,6 +81,10 @@ func main() {
 	}
 	}
 }
 }
 
 
+func showVersion() {
+	fmt.Printf("Docker version %s, build %s\n", VERSION, GITCOMMIT)
+}
+
 func createPidFile(pidfile string) error {
 func createPidFile(pidfile string) error {
 	if pidString, err := ioutil.ReadFile(pidfile); err == nil {
 	if pidString, err := ioutil.ReadFile(pidfile); err == nil {
 		pid, err := strconv.Atoi(string(pidString))
 		pid, err := strconv.Atoi(string(pidString))

+ 1 - 1
docs/sources/api/docker_remote_api.rst

@@ -165,7 +165,7 @@ Initial version
 Docker Remote API Client Libraries
 Docker Remote API Client Libraries
 ==================================
 ==================================
 
 
-These libraries have been not tested by the Docker Maintainers for
+These libraries have not been tested by the Docker Maintainers for
 compatibility. Please file issues with the library owners.  If you
 compatibility. Please file issues with the library owners.  If you
 find more library implementations, please list them in Docker doc bugs
 find more library implementations, please list them in Docker doc bugs
 and we will add the libraries here.
 and we will add the libraries here.

+ 2 - 1
docs/sources/api/docker_remote_api_v1.4.rst

@@ -356,7 +356,8 @@ Start a container
            Content-Type: application/json
            Content-Type: application/json
 
 
            {
            {
-                "Binds":["/tmp:/tmp"]
+                "Binds":["/tmp:/tmp"],
+                "LxcConf":{"lxc.utsname":"docker"}
            }
            }
 
 
         **Example response**:
         **Example response**:

+ 1 - 1
docs/sources/commandline/cli.rst

@@ -15,7 +15,7 @@ To list available commands, either run ``docker`` with no parameters or execute
 
 
   $ sudo docker
   $ sudo docker
     Usage: docker [OPTIONS] COMMAND [arg...]
     Usage: docker [OPTIONS] COMMAND [arg...]
-      -H=[tcp://127.0.0.1:4243]: tcp://host:port to bind/connect to or unix://path/to/socket to use
+      -H=[unix:///var/run/docker.sock]: tcp://host:port to bind/connect to or unix://path/to/socket to use
 
 
     A self-sufficient runtime for linux containers.
     A self-sufficient runtime for linux containers.
 
 

+ 47 - 1
docs/sources/commandline/command/attach.rst

@@ -10,4 +10,50 @@
 
 
     Usage: docker attach CONTAINER
     Usage: docker attach CONTAINER
 
 
-    Attach to a running container
+    Attach to a running container.
+
+You can detach from the container again (and leave it running) with
+``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of
+the Docker client when it quits.
+
+To stop a container, use ``docker stop``
+
+To kill the container, use ``docker kill``
+ 
+Examples:
+---------
+
+.. code-block:: bash
+
+     $ ID=$(sudo docker run -d ubuntu /usr/bin/top -b)
+     $ sudo docker attach $ID
+     top - 02:05:52 up  3:05,  0 users,  load average: 0.01, 0.02, 0.05
+     Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
+     Cpu(s):  0.1%us,  0.2%sy,  0.0%ni, 99.7%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
+     Mem:    373572k total,   355560k used,    18012k free,    27872k buffers
+     Swap:   786428k total,        0k used,   786428k free,   221740k cached
+
+     PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND            
+      1 root      20   0 17200 1116  912 R    0  0.3   0:00.03 top                
+
+      top - 02:05:55 up  3:05,  0 users,  load average: 0.01, 0.02, 0.05
+      Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
+      Cpu(s):  0.0%us,  0.2%sy,  0.0%ni, 99.8%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
+      Mem:    373572k total,   355244k used,    18328k free,    27872k buffers
+      Swap:   786428k total,        0k used,   786428k free,   221776k cached
+
+        PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND            
+	    1 root      20   0 17208 1144  932 R    0  0.3   0:00.03 top                
+
+
+      top - 02:05:58 up  3:06,  0 users,  load average: 0.01, 0.02, 0.05
+      Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
+      Cpu(s):  0.2%us,  0.3%sy,  0.0%ni, 99.5%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
+      Mem:    373572k total,   355780k used,    17792k free,    27880k buffers
+      Swap:   786428k total,        0k used,   786428k free,   221776k cached
+
+      PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND            
+           1 root      20   0 17208 1144  932 R    0  0.3   0:00.03 top                
+     ^C$ 
+     $ sudo docker stop $ID
+

+ 4 - 3
docs/sources/commandline/command/cp.rst

@@ -2,12 +2,13 @@
 :description: Copy files/folders from the containers filesystem to the host path
 :description: Copy files/folders from the containers filesystem to the host path
 :keywords: cp, docker, container, documentation, copy
 :keywords: cp, docker, container, documentation, copy
 
 
-===========================================================
+============================================================================
 ``cp`` -- Copy files/folders from the containers filesystem to the host path
 ``cp`` -- Copy files/folders from the containers filesystem to the host path
-===========================================================
+============================================================================
 
 
 ::
 ::
 
 
     Usage: docker cp CONTAINER:RESOURCE HOSTPATH
     Usage: docker cp CONTAINER:RESOURCE HOSTPATH
 
 
-    Copy files/folders from the containers filesystem to the host path.  Paths are relative to the root of the filesystem.
+    Copy files/folders from the containers filesystem to the host
+    path.  Paths are relative to the root of the filesystem.

+ 1 - 1
docs/sources/commandline/command/run.rst

@@ -30,7 +30,7 @@
       -volumes-from="": Mount all volumes from the given container.
       -volumes-from="": Mount all volumes from the given container.
       -entrypoint="": Overwrite the default entrypoint set by the image.
       -entrypoint="": Overwrite the default entrypoint set by the image.
       -w="": Working directory inside the container
       -w="": Working directory inside the container
-
+      -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
 
 
 Examples
 Examples
 --------
 --------

BIN=BIN
docs/sources/concepts/images/lego_docker.jpg


+ 0 - 16
docs/sources/concepts/index.rst

@@ -1,16 +0,0 @@
-:title: Overview
-:description: Docker documentation summary
-:keywords: concepts, documentation, docker, containers
-
-
-
-Overview
-========
-
-Contents:
-
-.. toctree::
-   :maxdepth: 1
-
-   ../index
-   manifesto

+ 0 - 129
docs/sources/concepts/manifesto.rst

@@ -1,129 +0,0 @@
-:title: Manifesto
-:description: An overview of Docker and standard containers
-:keywords: containers, lxc, concepts, explanation
-
-.. _dockermanifesto:
-
-Docker Manifesto
-----------------
-
-Docker complements LXC with a high-level API which operates at the
-process level. It runs unix processes with strong guarantees of
-isolation and repeatability across servers.
-
-Docker is a great building block for automating distributed systems:
-large-scale web deployments, database clusters, continuous deployment
-systems, private PaaS, service-oriented architectures, etc.
-
-- **Heterogeneous payloads** Any combination of binaries, libraries,
-  configuration files, scripts, virtualenvs, jars, gems, tarballs, you
-  name it. No more juggling between domain-specific tools. Docker can
-  deploy and run them all.
-- **Any server** Docker can run on any x64 machine with a modern linux
-  kernel - whether it's a laptop, a bare metal server or a VM. This
-  makes it perfect for multi-cloud deployments.
-- **Isolation** docker isolates processes from each other and from the
-  underlying host, using lightweight containers.
-- **Repeatability** Because containers are isolated in their own
-  filesystem, they behave the same regardless of where, when, and
-  alongside what they run.
-
-.. image:: images/lego_docker.jpg
-   :target: http://bricks.argz.com/ins/7823-1/12
-
-What is a Standard Container?
-.............................
-
-Docker defines a unit of software delivery called a Standard
-Container. The goal of a Standard Container is to encapsulate a
-software component and all its dependencies in a format that is
-self-describing and portable, so that any compliant runtime can run it
-without extra dependency, regardless of the underlying machine and the
-contents of the container.
-
-The spec for Standard Containers is currently work in progress, but it
-is very straightforward. It mostly defines 1) an image format, 2) a
-set of standard operations, and 3) an execution environment.
-
-A great analogy for this is the shipping container. Just like Standard
-Containers are a fundamental unit of software delivery, shipping
-containers are a fundamental unit of physical delivery.
-
-Standard operations
-~~~~~~~~~~~~~~~~~~~
-
-Just like shipping containers, Standard Containers define a set of
-STANDARD OPERATIONS. Shipping containers can be lifted, stacked,
-locked, loaded, unloaded and labelled. Similarly, standard containers
-can be started, stopped, copied, snapshotted, downloaded, uploaded and
-tagged.
-
-
-Content-agnostic
-~~~~~~~~~~~~~~~~~~~
-
-Just like shipping containers, Standard Containers are
-CONTENT-AGNOSTIC: all standard operations have the same effect
-regardless of the contents. A shipping container will be stacked in
-exactly the same way whether it contains Vietnamese powder coffee or
-spare Maserati parts. Similarly, Standard Containers are started or
-uploaded in the same way whether they contain a postgres database, a
-php application with its dependencies and application server, or Java
-build artifacts.
-
-Infrastructure-agnostic
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Both types of containers are INFRASTRUCTURE-AGNOSTIC: they can be
-transported to thousands of facilities around the world, and
-manipulated by a wide variety of equipment. A shipping container can
-be packed in a factory in Ukraine, transported by truck to the nearest
-routing center, stacked onto a train, loaded into a German boat by an
-Australian-built crane, stored in a warehouse at a US facility,
-etc. Similarly, a standard container can be bundled on my laptop,
-uploaded to S3, downloaded, run and snapshotted by a build server at
-Equinix in Virginia, uploaded to 10 staging servers in a home-made
-Openstack cluster, then sent to 30 production instances across 3 EC2
-regions.
-
-
-Designed for automation
-~~~~~~~~~~~~~~~~~~~~~~~
-
-Because they offer the same standard operations regardless of content
-and infrastructure, Standard Containers, just like their physical
-counterpart, are extremely well-suited for automation. In fact, you
-could say automation is their secret weapon.
-
-Many things that once required time-consuming and error-prone human
-effort can now be programmed. Before shipping containers, a bag of
-powder coffee was hauled, dragged, dropped, rolled and stacked by 10
-different people in 10 different locations by the time it reached its
-destination. 1 out of 50 disappeared. 1 out of 20 was damaged. The
-process was slow, inefficient and cost a fortune - and was entirely
-different depending on the facility and the type of goods.
-
-Similarly, before Standard Containers, by the time a software
-component ran in production, it had been individually built,
-configured, bundled, documented, patched, vendored, templated, tweaked
-and instrumented by 10 different people on 10 different
-computers. Builds failed, libraries conflicted, mirrors crashed,
-post-it notes were lost, logs were misplaced, cluster updates were
-half-broken. The process was slow, inefficient and cost a fortune -
-and was entirely different depending on the language and
-infrastructure provider.
-
-Industrial-grade delivery
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-There are 17 million shipping containers in existence, packed with
-every physical good imaginable. Every single one of them can be loaded
-on the same boats, by the same cranes, in the same facilities, and
-sent anywhere in the World with incredible efficiency. It is
-embarrassing to think that a 30 ton shipment of coffee can safely
-travel half-way across the World in *less time* than it takes a
-software team to deliver its code from one datacenter to another
-sitting 10 miles away.
-
-With Standard Containers we can put an end to that embarrassment, by
-making INDUSTRIAL-GRADE DELIVERY of software a reality.

+ 1 - 0
docs/sources/examples/index.rst

@@ -21,3 +21,4 @@ Contents:
    running_ssh_service
    running_ssh_service
    couchdb_data_volumes
    couchdb_data_volumes
    postgresql_service
    postgresql_service
+   mongodb

+ 100 - 0
docs/sources/examples/mongodb.rst

@@ -0,0 +1,100 @@
+:title: Building a Docker Image with MongoDB
+:description: How to build a Docker image with MongoDB pre-installed
+:keywords: docker, example, package installation, networking, mongodb
+
+.. _mongodb_image:
+
+Building an Image with MongoDB
+==============================
+
+.. include:: example_header.inc
+
+The goal of this example is to show how you can build your own
+docker images with MongoDB preinstalled. We will do that by
+constructing a Dockerfile that downloads a base image, adds an
+apt source and installs the database software on Ubuntu.
+
+Creating a ``Dockerfile``
++++++++++++++++++++++++++
+
+Create an empty file called ``Dockerfile``:
+
+.. code-block:: bash
+
+    touch Dockerfile
+
+Next, define the parent image you want to use to build your own image on top of.
+Here, we’ll use `Ubuntu <https://index.docker.io/_/ubuntu/>`_ (tag: ``latest``)
+available on the `docker index <http://index.docker.io>`_:
+
+.. code-block:: bash
+
+    FROM    ubuntu:latest
+
+Since we want to be running the latest version of MongoDB we'll need to add the
+10gen repo to our apt sources list.
+
+.. code-block:: bash
+
+    # Add 10gen official apt source to the sources list
+    RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
+    RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list
+
+Then, we don't want Ubuntu to complain about init not being available so we'll
+divert /sbin/initctl to /bin/true so it thinks everything is working.
+
+.. code-block:: bash
+
+    # Hack for initctl not being available in Ubuntu
+    RUN dpkg-divert --local --rename --add /sbin/initctl
+    RUN ln -s /bin/true /sbin/initctl
+
+Afterwards we'll be able to update our apt repositories and install MongoDB
+
+.. code-block:: bash
+
+    # Install MongoDB
+    RUN apt-get update
+    RUN apt-get install mongodb-10gen
+
+To run MongoDB we'll have to create the default data directory (because we want it to
+run without needing to provide a special configuration file)
+
+.. code-block:: bash
+
+    # Create the MongoDB data directory
+    RUN mkdir -p /data/db
+
+Finally, we'll expose the standard port that MongoDB runs on (27107) as well as
+define an ENTRYPOINT for the container.
+
+.. code-block:: bash
+
+    EXPOSE 27017
+    ENTRYPOINT ["usr/bin/mongod"]
+
+Now, lets build the image which will go through the ``Dockerfile`` we made and
+run all of the commands.
+
+.. code-block:: bash
+
+    docker build -t <yourname>/mongodb .
+
+Now you should be able to run ``mongod`` as a daemon and be able to connect on
+the local port!
+
+.. code-block:: bash
+
+    # Regular style
+    MONGO_ID=$(docker run -d <yourname>/mongodb)
+
+    # Lean and mean
+    MONGO_ID=$(docker run -d <yourname>/mongodb --noprealloc --smallfiles)
+
+    # Check the logs out
+    docker logs $MONGO_ID
+
+    # Connect and play around
+    mongo --port <port you get from `docker ps`>
+
+Sweet!

+ 2 - 2
docs/sources/examples/nodejs_web_app.rst

@@ -93,7 +93,7 @@ To install the right package for CentOS, we’ll use the instructions from the
     # Enable EPEL for Node.js
     # Enable EPEL for Node.js
     RUN     rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
     RUN     rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
     # Install Node.js and npm
     # Install Node.js and npm
-    RUN     yum install -y npm-1.2.17-5.el6
+    RUN     yum install -y npm
 
 
 To bundle your app’s source code inside the docker image, use the ``ADD``
 To bundle your app’s source code inside the docker image, use the ``ADD``
 command:
 command:
@@ -137,7 +137,7 @@ Your ``Dockerfile`` should now look like this:
     # Enable EPEL for Node.js
     # Enable EPEL for Node.js
     RUN     rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
     RUN     rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
     # Install Node.js and npm
     # Install Node.js and npm
-    RUN     yum install -y npm-1.2.17-5.el6
+    RUN     yum install -y npm
 
 
     # Bundle app source
     # Bundle app source
     ADD . /src
     ADD . /src

+ 4 - 4
docs/sources/index.rst

@@ -1,11 +1,11 @@
-:title: Welcome to the Docker Documentation
+:title: Docker Documentation
 :description: An overview of the Docker Documentation
 :description: An overview of the Docker Documentation
 :keywords: containers, lxc, concepts, explanation
 :keywords: containers, lxc, concepts, explanation
 
 
-Welcome
-=======
+.. image:: static_files/dockerlogo-h.png
 
 
-.. image:: concepts/images/dockerlogo-h.png
+Introduction
+------------
 
 
 ``docker``, the Linux Container Runtime, runs Unix processes with
 ``docker``, the Linux Container Runtime, runs Unix processes with
 strong guarantees of isolation across servers. Your software runs
 strong guarantees of isolation across servers. Your software runs

BIN=BIN
docs/sources/installation/images/win/hp_bios_vm.JPG


BIN=BIN
docs/sources/installation/images/win/ts_go_bios.JPG


BIN=BIN
docs/sources/installation/images/win/ts_no_docker.JPG


+ 23 - 24
docs/sources/installation/ubuntulinux.rst

@@ -2,12 +2,15 @@
 :description: Please note this project is currently under heavy development. It should not be used in production.
 :description: Please note this project is currently under heavy development. It should not be used in production.
 :keywords: Docker, Docker documentation, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux
 :keywords: Docker, Docker documentation, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux
 
 
+**These instructions have changed for 0.6. If you are upgrading from an earlier version, you will need to follow them again.**
+
 .. _ubuntu_linux:
 .. _ubuntu_linux:
 
 
 Ubuntu Linux
 Ubuntu Linux
 ============
 ============
 
 
-  **Please note this project is currently under heavy development. It should not be used in production.**
+   **Please note this project is currently under heavy development. It should not be used in production.**
+
 
 
 Right now, the officially supported distribution are:
 Right now, the officially supported distribution are:
 
 
@@ -35,8 +38,8 @@ Dependencies
 **Linux kernel 3.8**
 **Linux kernel 3.8**
 
 
 Due to a bug in LXC, docker works best on the 3.8 kernel. Precise
 Due to a bug in LXC, docker works best on the 3.8 kernel. Precise
-comes with a 3.2 kernel, so we need to upgrade it. The kernel we
-install comes with AUFS built in. We also include the generic headers
+comes with a 3.2 kernel, so we need to upgrade it. The kernel you'll install when following these steps
+comes with AUFS built in. We also include the generic headers
 to enable packages that depend on them, like ZFS and the VirtualBox
 to enable packages that depend on them, like ZFS and the VirtualBox
 guest additions. If you didn't install the headers for your "precise"
 guest additions. If you didn't install the headers for your "precise"
 kernel, then you can skip these headers for the "raring" kernel. But
 kernel, then you can skip these headers for the "raring" kernel. But
@@ -56,14 +59,18 @@ it is safer to include them if you're not sure.
 Installation
 Installation
 ------------
 ------------
 
 
-Docker is available as a Ubuntu PPA (Personal Package Archive),
-`hosted on launchpad  <https://launchpad.net/~dotcloud/+archive/lxc-docker>`_
-which makes installing Docker on Ubuntu very easy.
+Docker is available as a Debian package, which makes installation easy.
+
+*Please note that these instructions have changed for 0.6. If you are upgrading from an earlier version, you will need
+to follow them again.*
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-   # Add the PPA sources to your apt sources list.
-   sudo apt-get install python-software-properties && sudo add-apt-repository ppa:dotcloud/lxc-docker
+   # Add the Docker repository key to your local keychain
+   sudo sh -c "curl https://get.docker.io/gpg | apt-key add -"
+
+   # Add the Docker repository to your apt sources list.
+   sudo sh -c "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
 
 
    # Update your sources
    # Update your sources
    sudo apt-get update
    sudo apt-get update
@@ -101,30 +108,22 @@ have AUFS filesystem support enabled, so we need to install it.
    sudo apt-get update
    sudo apt-get update
    sudo apt-get install linux-image-extra-`uname -r`
    sudo apt-get install linux-image-extra-`uname -r`
 
 
-**add-apt-repository support**
-
-Some installations of Ubuntu 13.04 require ``software-properties-common`` to be
-installed before being able to use add-apt-repository.
-
-.. code-block:: bash
-
-  sudo apt-get install software-properties-common
-
 
 
 Installation
 Installation
 ------------
 ------------
 
 
-Docker is available as a Ubuntu PPA (Personal Package Archive),
-`hosted on launchpad  <https://launchpad.net/~dotcloud/+archive/lxc-docker>`_
-which makes installing Docker on Ubuntu very easy.
+Docker is available as a Debian package, which makes installation easy.
 
 
-
-Add the custom package sources to your apt sources list.
+*Please note that these instructions have changed for 0.6. If you are upgrading from an earlier version, you will need
+to follow them again.*
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-   # add the sources to your apt
-   sudo add-apt-repository ppa:dotcloud/lxc-docker
+   # Add the Docker repository key to your local keychain
+   sudo sh -c "curl http://get.docker.io/gpg | apt-key add -"
+
+   # Add the Docker repository to your apt sources list.
+   sudo sh -c "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
 
 
    # update
    # update
    sudo apt-get update
    sudo apt-get update

+ 2 - 0
docs/sources/installation/vagrant.rst

@@ -2,6 +2,8 @@
 :description: This guide will setup a new virtualbox virtual machine with docker installed on your computer.
 :description: This guide will setup a new virtualbox virtual machine with docker installed on your computer.
 :keywords: Docker, Docker documentation, virtualbox, vagrant, git, ssh, putty, cygwin
 :keywords: Docker, Docker documentation, virtualbox, vagrant, git, ssh, putty, cygwin
 
 
+**Vagrant installation is temporarily out of date, it will be updated for 0.6 soon.**
+
 .. _install_using_vagrant:
 .. _install_using_vagrant:
 
 
 Using Vagrant (Mac, Linux)
 Using Vagrant (Mac, Linux)

+ 28 - 0
docs/sources/installation/windows.rst

@@ -2,6 +2,8 @@
 :description: Docker's tutorial to run docker on Windows
 :description: Docker's tutorial to run docker on Windows
 :keywords: Docker, Docker documentation, Windows, requirements, virtualbox, vagrant, git, ssh, putty, cygwin
 :keywords: Docker, Docker documentation, Windows, requirements, virtualbox, vagrant, git, ssh, putty, cygwin
 
 
+**Vagrant installation is temporarily out of date, it will be updated for 0.6 soon.**
+
 .. _windows:
 .. _windows:
 
 
 Using Vagrant (Windows)
 Using Vagrant (Windows)
@@ -47,6 +49,8 @@ This should open a cmd prompt window.
 
 
 Alternatively, you can also use a Cygwin terminal, or Git Bash (or any other command line program you are usually using). The next steps would be the same.
 Alternatively, you can also use a Cygwin terminal, or Git Bash (or any other command line program you are usually using). The next steps would be the same.
 
 
+.. _launch_ubuntu:
+
 Launch an Ubuntu virtual server
 Launch an Ubuntu virtual server
 -------------------------------
 -------------------------------
 
 
@@ -166,3 +170,27 @@ You are now ready for the docker’s “hello world” example. Run
 All done!
 All done!
 
 
 Now you can continue with the :ref:`hello_world` example.
 Now you can continue with the :ref:`hello_world` example.
+
+Troubleshooting
+---------------
+
+VM does not boot
+````````````````
+
+.. image:: images/win/ts_go_bios.JPG
+
+If you run into this error message "The VM failed to remain in the 'running'
+state while attempting to boot", please check that your computer has virtualization
+technology available and activated by going to the BIOS. Here's an example for an HP
+computer (System configuration / Device configuration)
+
+.. image:: images/win/hp_bios_vm.JPG
+
+
+Docker is not installed
+```````````````````````
+
+.. image:: images/win/ts_no_docker.JPG
+
+If you run into this error message "The program 'docker' is currently not installed",
+try deleting the docker folder and restart from :ref:`launch_ubuntu`

+ 0 - 0
docs/sources/concepts/images/dockerlogo-h.png → docs/sources/static_files/dockerlogo-h.png


+ 0 - 0
docs/sources/concepts/images/dockerlogo-v.png → docs/sources/static_files/dockerlogo-v.png


+ 1 - 4
docs/sources/toctree.rst

@@ -10,7 +10,7 @@ This documentation has the following resources:
 .. toctree::
 .. toctree::
    :titlesonly:
    :titlesonly:
 
 
-   concepts/index
+   Introduction <index>
    installation/index
    installation/index
    use/index
    use/index
    examples/index
    examples/index
@@ -19,6 +19,3 @@ This documentation has the following resources:
    api/index
    api/index
    terms/index
    terms/index
    faq
    faq
-
-
-

+ 43 - 0
docs/sources/use/baseimages.rst

@@ -0,0 +1,43 @@
+:title: Base Image Creation
+:description: How to create base images
+:keywords: Examples, Usage, base image, docker, documentation, examples
+
+.. _base_image_creation:
+
+Base Image Creation
+===================
+
+So you want to create your own :ref:`base_image_def`? Great!
+
+The specific process will depend heavily on the Linux distribution you
+want to package. We have some examples below, and you are encouraged
+to submit pull requests to contribute new ones.
+
+Getting Started
+...............
+
+In general, you'll want to start with a working machine that is
+running the distribution you'd like to package as a base image, though
+that is not required for some tools like Debian's `Debootstrap
+<https://wiki.debian.org/Debootstrap>`_, which you can also use to
+build Ubuntu images.
+
+It can be as simple as this to create an Ubuntu base image::
+
+  $ sudo debootstrap raring raring > /dev/null
+  $ sudo tar -C raring -c . | sudo docker import - raring
+  a29c15f1bf7a
+  $ sudo docker run raring cat /etc/lsb-release                     
+  DISTRIB_ID=Ubuntu
+  DISTRIB_RELEASE=13.04
+  DISTRIB_CODENAME=raring
+  DISTRIB_DESCRIPTION="Ubuntu 13.04"
+
+There are more example scripts for creating base images in the
+Docker Github Repo:
+
+* `BusyBox <https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh>`_
+* `CentOS
+  <https://github.com/dotcloud/docker/blob/master/contrib/mkimage-centos.sh>`_
+* `Debian
+  <https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debian.sh>`_

+ 5 - 1
docs/sources/use/basics.rst

@@ -33,6 +33,8 @@ Running an interactive shell
 
 
   # Run an interactive shell in the ubuntu image,
   # Run an interactive shell in the ubuntu image,
   # allocate a tty, attach stdin and stdout
   # allocate a tty, attach stdin and stdout
+  # To detach the tty without exiting the shell,
+  # use the escape sequence Ctrl-p + Ctrl-q
   sudo docker run -i -t ubuntu /bin/bash
   sudo docker run -i -t ubuntu /bin/bash
 
 
 Why ``sudo``?
 Why ``sudo``?
@@ -41,7 +43,7 @@ Why ``sudo``?
 The ``docker`` daemon always runs as root, and since ``docker``
 The ``docker`` daemon always runs as root, and since ``docker``
 version 0.5.2, ``docker`` binds to a Unix socket instead of a TCP
 version 0.5.2, ``docker`` binds to a Unix socket instead of a TCP
 port. By default that Unix socket is owned by the user *root*, and so,
 port. By default that Unix socket is owned by the user *root*, and so,
-by default, you can access it with ``sudo``. 
+by default, you can access it with ``sudo``.
 
 
 Starting in version 0.5.3, if you create a Unix group called *docker*
 Starting in version 0.5.3, if you create a Unix group called *docker*
 and add users to it, then the ``docker`` daemon will make the
 and add users to it, then the ``docker`` daemon will make the
@@ -56,6 +58,8 @@ you don't need to add ``sudo`` to all the client commands.
   sudo groupadd docker
   sudo groupadd docker
 
 
   # Add the ubuntu user to the docker group
   # Add the ubuntu user to the docker group
+  # You may have to logout and log back in again for
+  # this to take effect
   sudo gpasswd -a ubuntu docker
   sudo gpasswd -a ubuntu docker
 
 
   # Restart the docker daemon
   # Restart the docker daemon

+ 112 - 21
docs/sources/use/builder.rst

@@ -4,13 +4,13 @@
 
 
 .. _dockerbuilder:
 .. _dockerbuilder:
 
 
-==================
-Dockerfile Builder
-==================
+======================
+Dockerfiles for Images
+======================
 
 
 **Docker can act as a builder** and read instructions from a text
 **Docker can act as a builder** and read instructions from a text
-Dockerfile to automate the steps you would otherwise make manually to
-create an image. Executing ``docker build`` will run your steps and
+``Dockerfile`` to automate the steps you would otherwise take manually
+to create an image. Executing ``docker build`` will run your steps and
 commit them along the way, giving you a final image.
 commit them along the way, giving you a final image.
 
 
 .. contents:: Table of Contents
 .. contents:: Table of Contents
@@ -35,6 +35,8 @@ build succeeds:
 Docker will run your steps one-by-one, committing the result if necessary,
 Docker will run your steps one-by-one, committing the result if necessary,
 before finally outputting the ID of your new image.
 before finally outputting the ID of your new image.
 
 
+When you're done with your build, you're ready to look into :ref:`image_push`.
+
 2. Format
 2. Format
 =========
 =========
 
 
@@ -48,9 +50,9 @@ The Dockerfile format is quite simple:
 The Instruction is not case-sensitive, however convention is for them to be
 The Instruction is not case-sensitive, however convention is for them to be
 UPPERCASE in order to distinguish them from arguments more easily.
 UPPERCASE in order to distinguish them from arguments more easily.
 
 
-Docker evaluates the instructions in a Dockerfile in order. **The first
-instruction must be `FROM`** in order to specify the base image from
-which you are building.
+Docker evaluates the instructions in a Dockerfile in order. **The
+first instruction must be `FROM`** in order to specify the
+:ref:`base_image_def` from which you are building.
 
 
 Docker will ignore **comment lines** *beginning* with ``#``. A comment
 Docker will ignore **comment lines** *beginning* with ``#``. A comment
 marker anywhere in the rest of the line will be treated as an argument.
 marker anywhere in the rest of the line will be treated as an argument.
@@ -68,7 +70,9 @@ building images.
 
 
 The ``FROM`` instruction sets the :ref:`base_image_def` for subsequent
 The ``FROM`` instruction sets the :ref:`base_image_def` for subsequent
 instructions. As such, a valid Dockerfile must have ``FROM`` as its
 instructions. As such, a valid Dockerfile must have ``FROM`` as its
-first instruction.
+first instruction. The image can be any valid image -- it is
+especially easy to start by **pulling an image** from the
+:ref:`using_public_repositories`.
 
 
 ``FROM`` must be the first non-comment instruction in the
 ``FROM`` must be the first non-comment instruction in the
 ``Dockerfile``.
 ``Dockerfile``.
@@ -102,11 +106,50 @@ control.
 3.4 CMD
 3.4 CMD
 -------
 -------
 
 
-    ``CMD <command>``
+CMD has three forms:
+
+* ``CMD ["executable","param1","param2"]`` (like an *exec*, preferred form)
+* ``CMD ["param1","param2"]`` (as *default parameters to ENTRYPOINT*)
+* ``CMD command param1 param2`` (as a *shell*)
+
+There can only be one CMD in a Dockerfile. If you list more than one
+CMD then only the last CMD will take effect.
+
+**The main purpose of a CMD is to provide defaults for an executing
+container.** These defaults can include an executable, or they can
+omit the executable, in which case you must specify an ENTRYPOINT as
+well.
+
+When used in the shell or exec formats, the ``CMD`` instruction sets
+the command to be executed when running the image.  This is
+functionally equivalent to running ``docker commit -run '{"Cmd":
+<command>}'`` outside the builder.
+
+If you use the *shell* form of the CMD, then the ``<command>`` will
+execute in ``/bin/sh -c``:
+
+.. code-block:: bash
+
+    FROM ubuntu
+    CMD echo "This is a test." | wc -
+
+If you want to **run your** ``<command>`` **without a shell** then you
+must express the command as a JSON array and give the full path to the
+executable. **This array form is the preferred format of CMD.** Any
+additional parameters must be individually expressed as strings in the
+array:
+
+.. code-block:: bash
+
+    FROM ubuntu
+    CMD ["/usr/bin/wc","--help"]
+
+If you would like your container to run the same executable every
+time, then you should consider using ``ENTRYPOINT`` in combination
+with ``CMD``. See :ref:`entrypoint_def`.
 
 
-The ``CMD`` instruction sets the command to be executed when running
-the image.  This is functionally equivalent to running ``docker commit
--run '{"Cmd": <command>}'`` outside the builder.
+If the user specifies arguments to ``docker run`` then they will
+override the default specified in CMD.
 
 
 .. note::
 .. note::
     Don't confuse ``RUN`` with ``CMD``. ``RUN`` actually runs a
     Don't confuse ``RUN`` with ``CMD``. ``RUN`` actually runs a
@@ -121,7 +164,7 @@ the image.  This is functionally equivalent to running ``docker commit
 The ``EXPOSE`` instruction sets ports to be publicly exposed when
 The ``EXPOSE`` instruction sets ports to be publicly exposed when
 running the image. This is functionally equivalent to running ``docker
 running the image. This is functionally equivalent to running ``docker
 commit -run '{"PortSpecs": ["<port>", "<port2>"]}'`` outside the
 commit -run '{"PortSpecs": ["<port>", "<port2>"]}'`` outside the
-builder.
+builder. Take a look at :ref:`port_redirection` for more information.
 
 
 3.6 ENV
 3.6 ENV
 -------
 -------
@@ -186,16 +229,55 @@ The copy obeys the following rules:
   directories in its path. All new files and directories are created
   directories in its path. All new files and directories are created
   with mode 0755, uid and gid 0.
   with mode 0755, uid and gid 0.
 
 
+.. _entrypoint_def:
+
 3.8 ENTRYPOINT
 3.8 ENTRYPOINT
 --------------
 --------------
 
 
-    ``ENTRYPOINT ["/bin/echo"]``
+ENTRYPOINT has two forms:
+
+* ``ENTRYPOINT ["executable", "param1", "param2"]`` (like an *exec*,
+  preferred form)
+* ``ENTRYPOINT command param1 param2`` (as a *shell*)
 
 
-The ``ENTRYPOINT`` instruction adds an entry command that will not be
-overwritten when arguments are passed to docker run, unlike the
+There can only be one ``ENTRYPOINT`` in a Dockerfile. If you have more
+than one ``ENTRYPOINT``, then only the last one in the Dockerfile will
+have an effect.
+
+An ``ENTRYPOINT`` helps you to configure a container that you can run
+as an executable. That is, when you specify an ``ENTRYPOINT``, then
+the whole container runs as if it was just that executable.
+
+The ``ENTRYPOINT`` instruction adds an entry command that will **not**
+be overwritten when arguments are passed to ``docker run``, unlike the
 behavior of ``CMD``.  This allows arguments to be passed to the
 behavior of ``CMD``.  This allows arguments to be passed to the
-entrypoint.  i.e. ``docker run <image> -d`` will pass the "-d" argument
-to the entrypoint.
+entrypoint.  i.e. ``docker run <image> -d`` will pass the "-d"
+argument to the ENTRYPOINT.
+
+You can specify parameters either in the ENTRYPOINT JSON array (as in
+"like an exec" above), or by using a CMD statement. Parameters in the
+ENTRYPOINT will not be overridden by the ``docker run`` arguments, but
+parameters specified via CMD will be overridden by ``docker run``
+arguments.
+
+Like a ``CMD``, you can specify a plain string for the ENTRYPOINT and
+it will execute in ``/bin/sh -c``:
+
+.. code-block:: bash
+
+    FROM ubuntu
+    ENTRYPOINT wc -l -
+
+For example, that Dockerfile's image will *always* take stdin as input
+("-") and print the number of lines ("-l"). If you wanted to make
+this optional but default, you could use a CMD:
+
+.. code-block:: bash
+
+    FROM ubuntu
+    CMD ["-l", "-"]
+    ENTRYPOINT ["/usr/bin/wc"]
+
 
 
 3.9 VOLUME
 3.9 VOLUME
 ----------
 ----------
@@ -205,14 +287,23 @@ to the entrypoint.
 The ``VOLUME`` instruction will add one or more new volumes to any
 The ``VOLUME`` instruction will add one or more new volumes to any
 container created from the image.
 container created from the image.
 
 
-3.10 WORKDIR
---------------
+3.10 USER
+---------
+
+    ``USER daemon``
+
+The ``USER`` instruction sets the username or UID to use when running
+the image.
+
+3.11 WORKDIR
+------------
 
 
     ``WORKDIR /path/to/workdir``
     ``WORKDIR /path/to/workdir``
 
 
 The ``WORKDIR`` instruction sets the working directory in which
 The ``WORKDIR`` instruction sets the working directory in which
 the command given by ``CMD`` is executed.
 the command given by ``CMD`` is executed.
 
 
+
 4. Dockerfile Examples
 4. Dockerfile Examples
 ======================
 ======================
 
 

+ 2 - 2
docs/sources/use/index.rst

@@ -13,8 +13,8 @@ Contents:
    :maxdepth: 1
    :maxdepth: 1
 
 
    basics
    basics
+   builder
    workingwithrepository
    workingwithrepository
+   baseimages
    port_redirection
    port_redirection
-   builder
    puppet
    puppet
-

+ 2 - 0
docs/sources/use/port_redirection.rst

@@ -3,6 +3,8 @@
 :keywords: Usage, basic port, docker, documentation, examples
 :keywords: Usage, basic port, docker, documentation, examples
 
 
 
 
+.. _port_redirection:
+
 Port redirection
 Port redirection
 ================
 ================
 
 

+ 42 - 28
docs/sources/use/workingwithrepository.rst

@@ -28,12 +28,18 @@ repositories. You can host your own Registry too! Docker acts as a
 client for these services via ``docker search, pull, login`` and
 client for these services via ``docker search, pull, login`` and
 ``push``.
 ``push``.
 
 
-Top-level, User, and Your Own Repositories
-------------------------------------------
+.. _using_public_repositories:
+
+Public Repositories
+-------------------
 
 
 There are two types of public repositories: *top-level* repositories
 There are two types of public repositories: *top-level* repositories
 which are controlled by the Docker team, and *user* repositories
 which are controlled by the Docker team, and *user* repositories
-created by individual contributors.
+created by individual contributors. Anyone can read from these
+repositories -- they really help people get started quickly! You could
+also use :ref:`using_private_repositories` if you need to keep control
+of who accesses your images, but we will only refer to public
+repositories in these examples.
 
 
 * Top-level repositories can easily be recognized by **not** having a
 * Top-level repositories can easily be recognized by **not** having a
   ``/`` (slash) in their name. These repositories can generally be
   ``/`` (slash) in their name. These repositories can generally be
@@ -46,28 +52,6 @@ created by individual contributors.
 * User images are not checked, it is therefore up to you whether or
 * User images are not checked, it is therefore up to you whether or
   not you trust the creator of this image.
   not you trust the creator of this image.
 
 
-Right now (version 0.5), private repositories are only possible by
-hosting `your own registry
-<https://github.com/dotcloud/docker-registry>`_.  To push or pull to a
-repository on your own registry, you must prefix the tag with the
-address of the registry's host, like this:
-
-.. code-block:: bash
-
-    # Tag to create a repository with the full registry location.
-    # The location (e.g. localhost.localdomain:5000) becomes
-    # a permanent part of the repository name
-    sudo docker tag 0u812deadbeef localhost.localdomain:5000/repo_name
-
-    # Push the new repository to its home location on localhost
-    sudo docker push localhost.localdomain:5000/repo_name
-
-Once a repository has your registry's host name as part of the tag,
-you can push and pull it like any other repository, but it will
-**not** be searchable (or indexed at all) in the Central Index, and
-there will be no user name checking performed. Your registry will
-function completely independently from the Central Index.
-
 Find public images available on the Central Index
 Find public images available on the Central Index
 -------------------------------------------------
 -------------------------------------------------
 
 
@@ -105,6 +89,7 @@ If your username does not exist it will prompt you to also enter a
 password and your e-mail address. It will then automatically log you
 password and your e-mail address. It will then automatically log you
 in.
 in.
 
 
+.. _container_commit:
 
 
 Committing a container to a named image
 Committing a container to a named image
 ---------------------------------------
 ---------------------------------------
@@ -117,16 +102,45 @@ your container to an image within your username namespace.
     # for example docker commit $CONTAINER_ID dhrp/kickassapp
     # for example docker commit $CONTAINER_ID dhrp/kickassapp
     sudo docker commit <container_id> <username>/<repo_name>
     sudo docker commit <container_id> <username>/<repo_name>
 
 
+.. _image_push:
 
 
-Pushing a container to its repository
--------------------------------------
+Pushing an image to its repository
+----------------------------------
 
 
 In order to push an image to its repository you need to have committed
 In order to push an image to its repository you need to have committed
 your container to a named image (see above)
 your container to a named image (see above)
 
 
-Now you can commit this image to the repository
+Now you can commit this image to the repository designated by its name
+or tag.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
     # for example docker push dhrp/kickassapp
     # for example docker push dhrp/kickassapp
     sudo docker push <username>/<repo_name>
     sudo docker push <username>/<repo_name>
+
+.. _using_private_repositories:
+
+Private Repositories
+--------------------
+
+Right now (version 0.5), private repositories are only possible by
+hosting `your own registry
+<https://github.com/dotcloud/docker-registry>`_.  To push or pull to a
+repository on your own registry, you must prefix the tag with the
+address of the registry's host, like this:
+
+.. code-block:: bash
+
+    # Tag to create a repository with the full registry location.
+    # The location (e.g. localhost.localdomain:5000) becomes
+    # a permanent part of the repository name
+    sudo docker tag 0u812deadbeef localhost.localdomain:5000/repo_name
+
+    # Push the new repository to its home location on localhost
+    sudo docker push localhost.localdomain:5000/repo_name
+
+Once a repository has your registry's host name as part of the tag,
+you can push and pull it like any other repository, but it will
+**not** be searchable (or indexed at all) in the Central Index, and
+there will be no user name checking performed. Your registry will
+function completely independently from the Central Index.

+ 3 - 1
docs/theme/docker/layout.html

@@ -213,7 +213,9 @@
         }
         }
 
 
         // attached handler on click
         // attached handler on click
-        $('.sidebar > ul > li > a').not(':last').click(function(){
+        // Do not attach to first element or last (intro, faq) so that
+        // first and last link directly instead of accordian
+        $('.sidebar > ul > li > a').not(':last').not(':first').click(function(){
 
 
             var index = $.inArray(this.href, openmenus)
             var index = $.inArray(this.href, openmenus)
 
 

+ 0 - 133
hack/RELEASE.md

@@ -1,133 +0,0 @@
-## A maintainer's guide to releasing Docker
-
-So you're in charge of a docker release? Cool. Here's what to do.
-
-If your experience deviates from this document, please document the changes to keep it
-up-to-date.
-
-
-### 1. Pull from master and create a release branch
-
-	```bash
-	$ git checkout master
-	$ git pull
-	$ git checkout -b bump_$VERSION
-	```
-
-### 2. Update CHANGELOG.md
-
-	You can run this command for reference:
-
-	```bash
-	LAST_VERSION=$(git tag | grep -E "v[0-9\.]+$" | sort -nr | head -n 1)
-	git log $LAST_VERSION..HEAD
-	```
-
-	Each change should be formatted as ```BULLET CATEGORY: DESCRIPTION```
-
-	* BULLET is either ```-```, ```+``` or ```*```, to indicate a bugfix,
-	new feature or upgrade, respectively.
-
-	* CATEGORY should describe which part of the project is affected.
-	Valid categories are:
-		* Builder
-		* Documentation
-		* Hack
-		* Packaging
-		* Remote API
-		* Runtime
-
-	* DESCRIPTION: a concise description of the change that is relevant to the end-user,
-	using the present tense.
-	Changes should be described in terms of how they affect the user, for example "new feature
-	X which allows Y", "fixed bug which caused X", "increased performance of Y".
-
-	EXAMPLES:
-
-		```
-		 + Builder: 'docker build -t FOO' applies the tag FOO to the newly built container.
-		 * Runtime: improve detection of kernel version
-		 - Remote API: fix a bug in the optional unix socket transport
-		 ```
-
-### 3. Change VERSION in commands.go
-
-### 4. Run all tests
-
-	```bash
-	$ make test
-	```
-
-### 5. Commit and create a pull request
-
-	```bash
-	$ git add commands.go CHANGELOG.md
-	$ git commit -m "Bump version to $VERSION"
-	$ git push origin bump_$VERSION
-	```
-
-### 6. Get 2 other maintainers to validate the pull request
-
-### 7. Merge the pull request and apply tags
-
-	```bash
-	$ git checkout master
-	$ git merge bump_$VERSION
-	$ git tag -a v$VERSION # Don't forget the v!
-	$ git tag -f -a latest
-	$ git push
-	$ git push --tags
-	```
-
-### 8. Publish binaries
-
-	To run this you will need access to the release credentials.
-	Get them from [the infrastructure maintainers](https://github.com/dotcloud/docker/blob/master/hack/infrastructure/MAINTAINERS).
-
-	```bash
-	$ RELEASE_IMAGE=image_provided_by_infrastructure_maintainers
-	$ BUILD=$(docker run -d -e RELEASE_PPA=0 $RELEASE_IMAGE)
-	```
-
-	This will do 2 things:
-	
-	* It will build and upload the binaries on http://get.docker.io
-	* It will *test* the release on our Ubuntu PPA (a PPA is a community repository for ubuntu packages)
-
-	Wait for the build to complete.
-
-	```bash
-	$ docker wait $BUILD # This should print 0. If it doesn't, your build failed.
-	```
-
-	Check that the output looks OK. Here's an example of a correct output:
-
-	```bash
-	$ docker logs 2>&1 b4e7c8299d73 | grep -e 'Public URL' -e 'Successfully uploaded'
-	Public URL of the object is: http://get.docker.io.s3.amazonaws.com/builds/Linux/x86_64/docker-v0.4.7.tgz
-	Public URL of the object is: http://get.docker.io.s3.amazonaws.com/builds/Linux/x86_64/docker-latest.tgz
-	Successfully uploaded packages.
-	```
-
-	If you don't see 3 lines similar to this, something might be wrong. Check the full logs and try again.
-	
-
-### 9. Publish Ubuntu packages
-
-	If everything went well in the previous step, you can finalize the release by submitting the Ubuntu
-	packages.
-
-	```bash
-	$ RELEASE_IMAGE=image_provided_by_infrastructure_maintainers
-	$ docker run -e RELEASE_PPA=1 $RELEASE_IMAGE
-	```
-
-	If that goes well, Ubuntu Precise package is in its way. It will take anywhere from 0.5 to 30 hours
-	for the builders to complete their job depending on builder demand at this time. At this point, Quantal
-	and Raring packages need to be created using the Launchpad interface:
-	  https://launchpad.net/~dotcloud/+archive/lxc-docker/+packages
-
-	Notify [the packager maintainers](https://github.com/dotcloud/docker/blob/master/packaging/MAINTAINERS)
-	who will ensure PPA is ready.
-
-	Congratulations! You're done

+ 0 - 36
hack/dockerbuilder/Dockerfile

@@ -1,36 +0,0 @@
-# DESCRIPTION     Build a container capable of producing official binary and
-#                 PPA packages and uploading them to S3 and Launchpad
-# VERSION         1.2
-# DOCKER_VERSION  0.4
-# AUTHOR          Solomon Hykes <solomon@dotcloud.com>
-#                 Daniel Mizyrycki <daniel@dotcloud.net>
-# BUILD_CMD       docker build -t dockerbuilder .
-# RUN_CMD         docker run -e AWS_ID="$AWS_ID" -e AWS_KEY="$AWS_KEY" -e GPG_KEY="$GPG_KEY" -e PUBLISH_PPA="$PUBLISH_PPA" dockerbuilder
-#
-# ENV_VARIABLES   AWS_ID, AWS_KEY: S3 credentials for uploading Docker binary and tarball
-#                 GPG_KEY: Signing key for docker package
-#                 PUBLISH_PPA: 0 for staging release, 1 for production release
-#
-from	ubuntu:12.04
-maintainer	Solomon Hykes <solomon@dotcloud.com>
-# Workaround the upstart issue
-run dpkg-divert --local --rename --add /sbin/initctl
-run ln -s /bin/true /sbin/initctl
-# Enable universe and gophers PPA
-run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q python-software-properties
-run	add-apt-repository "deb http://archive.ubuntu.com/ubuntu $(lsb_release -sc) universe"
-run	add-apt-repository -y ppa:dotcloud/docker-golang/ubuntu
-run	apt-get update
-# Packages required to checkout, build and upload docker
-run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q s3cmd curl
-run	curl -s -o /go.tar.gz https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz
-run	tar -C /usr/local -xzf /go.tar.gz
-run	echo "export PATH=/usr/local/go/bin:$PATH" > /.bashrc
-run	echo "export PATH=/usr/local/go/bin:$PATH" > /.bash_profile
-run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q git build-essential
-# Packages required to build an ubuntu package
-run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang-stable debhelper autotools-dev devscripts
-# Copy dockerbuilder files into the container
-add	.       /src
-run	cp /src/dockerbuilder /usr/local/bin/ && chmod +x /usr/local/bin/dockerbuilder
-cmd	["dockerbuilder"]

+ 0 - 1
hack/dockerbuilder/MAINTAINERS

@@ -1 +0,0 @@
-Daniel Mizyrycki <daniel@dotcloud.com> (@mzdaniel)

+ 0 - 42
hack/dockerbuilder/dockerbuilder

@@ -1,42 +0,0 @@
-#!/bin/sh
-set -x
-set -e
-
-export PATH=/usr/local/go/bin:$PATH
-
-PACKAGE=github.com/dotcloud/docker
-
-if [ $# -gt 1 ]; then
-	echo "Usage: $0 [REVISION]"
-	exit 1
-fi
-
-export REVISION=$1
-
-if [ -z "$AWS_ID" -o -z "$AWS_KEY" ]; then
-	echo "Warning: either AWS_ID or AWS_KEY environment variable not set. Won't upload to S3."
-else
-	/bin/echo -e "[default]\naccess_key = $AWS_ID\nsecret_key = $AWS_KEY\n" > /.s3cfg
-fi
-
-if [ -z "$GPG_KEY" ]; then
-	echo "Warning: environment variable GPG_KEY is not set. Ubuntu package upload will not succeed."
-	NO_UBUNTU=1
-fi
-
-rm -fr docker-release
-git clone https://github.com/dotcloud/docker docker-release
-cd docker-release
-if [ -z "$REVISION" ]; then
-	make release
-else
-	make release RELEASE_VERSION=$REVISION
-fi
-
-# Remove credentials from container
-rm -f /.s3cfg
-
-if [ -z "$NO_UBUNTU" ]; then
-	export PATH=`echo $PATH | sed 's#/usr/local/go/bin:##g'`
-	(cd packaging/ubuntu && make ubuntu)
-fi

+ 106 - 0
hack/release/README.md

@@ -0,0 +1,106 @@
+## A maintainer's guide to releasing Docker
+
+So you're in charge of a Docker release? Cool. Here's what to do.
+
+If your experience deviates from this document, please document the changes
+to keep it up-to-date.
+
+
+### 1. Pull from master and create a release branch
+
+```bash
+git checkout master
+git pull
+git checkout -b bump_$VERSION
+```
+
+### 2. Update CHANGELOG.md
+
+You can run this command for reference:
+
+```bash
+LAST_VERSION=$(git tag | grep -E "v[0-9\.]+$" | sort -nr | head -n 1)
+git log $LAST_VERSION..HEAD
+```
+
+Each change should be formatted as ```BULLET CATEGORY: DESCRIPTION```
+
+* BULLET is either ```-```, ```+``` or ```*```, to indicate a bugfix,
+  new feature or upgrade, respectively.
+
+* CATEGORY should describe which part of the project is affected.
+  Valid categories are:
+  * Builder
+  * Documentation
+  * Hack
+  * Packaging
+  * Remote API
+  * Runtime
+
+* DESCRIPTION: a concise description of the change that is relevant to the 
+  end-user, using the present tense. Changes should be described in terms 
+  of how they affect the user, for example "new feature X which allows Y", 
+  "fixed bug which caused X", "increased performance of Y".
+
+EXAMPLES:
+
+```
++ Builder: 'docker build -t FOO' applies the tag FOO to the newly built
+  container.
+* Runtime: improve detection of kernel version
+- Remote API: fix a bug in the optional unix socket transport
+```
+
+### 3. Change the contents of the VERSION file
+
+### 4. Run all tests
+
+```bash
+go test
+```
+
+### 5. Commit and create a pull request
+
+```bash
+git add CHANGELOG.md
+git commit -m "Bump version to $VERSION"
+git push origin bump_$VERSION
+```
+
+### 6. Get 2 other maintainers to validate the pull request
+
+### 7. Merge the pull request and apply tags
+
+```bash
+git checkout master
+git merge bump_$VERSION
+git tag -a v$VERSION # Don't forget the v!
+git tag -f -a latest
+git push
+git push --tags
+```
+
+### 8. Publish binaries
+
+To run this you will need access to the release credentials.
+Get them from [the infrastructure maintainers](
+https://github.com/dotcloud/docker/blob/master/hack/infrastructure/MAINTAINERS).
+
+```bash
+docker build -t releasedocker .
+docker run  \
+	-e AWS_S3_BUCKET=get-nightly.docker.io \
+	-e AWS_ACCESS_KEY=$(cat ~/.aws/access_key) \
+	-e AWS_SECRET_KEY=$(cat ~/.aws/secret_key) \
+	-e GPG_PASSPHRASE=supersecretsesame \
+	releasedocker
+```
+
+It will build and upload the binaries on the specified bucket (you should
+use get-nightly.docker.io for general testing, and once everything is fine,
+switch to get.docker.io).
+
+
+### 9. Rejoice!
+
+Congratulations! You're done.

+ 179 - 0
hack/release/make.sh

@@ -0,0 +1,179 @@
+#!/bin/sh
+
+# This script builds various binary artifacts from a checkout of the docker
+# source code.
+#
+# Requirements:
+# - The current directory should be a checkout of the docker source code
+#   (http://github.com/dotcloud/docker). Whatever version is checked out
+#   will be built.
+# - The VERSION file, at the root of the repository, should exist, and
+#   will be used as Docker binary version and package version.
+# - The hash of the git commit will also be included in the Docker binary,
+#   with the suffix -dirty if the repository isn't clean.
+# - The script is intented to be run as part of a docker build, as defined
+#   in the Dockerfile at the root of the source. In other words:
+#   DO NOT CALL THIS SCRIPT DIRECTLY.
+# - The right way to call this script is to invoke "docker build ." from
+#   your checkout of the Docker repository.
+# 
+
+set -e
+
+# We're a nice, sexy, little shell script, and people might try to run us;
+# but really, they shouldn't. We want to be in a container!
+RESOLVCONF=$(readlink --canonicalize /etc/resolv.conf)
+grep -q "$RESOLVCONF" /proc/mounts || {
+	echo "# I will only run within a container."
+	echo "# Try this instead:"
+	echo "docker build ."
+	exit 1
+}
+
+VERSION=$(cat ./VERSION)
+PKGVERSION="$VERSION"
+GITCOMMIT=$(git rev-parse --short HEAD)
+if test -n "$(git status --porcelain)"
+then
+	GITCOMMIT="$GITCOMMIT-dirty"
+	PKGVERSION="$PKGVERSION-$(date +%Y%m%d%H%M%S)-$GITCOMMIT"
+fi
+
+PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)"
+PACKAGE_URL="http://www.docker.io/"
+PACKAGE_MAINTAINER="docker@dotcloud.com"
+PACKAGE_DESCRIPTION="lxc-docker is a Linux container runtime
+Docker complements LXC with a high-level API which operates at the process
+level. It runs unix processes with strong guarantees of isolation and
+repeatability across servers.
+Docker is a great building block for automating distributed systems:
+large-scale web deployments, database clusters, continuous deployment systems,
+private PaaS, service-oriented architectures, etc."
+
+UPSTART_SCRIPT='description     "Docker daemon"
+
+start on filesystem or runlevel [2345]
+stop on runlevel [!2345]
+
+respawn
+
+script
+    /usr/bin/docker -d
+end script
+'
+
+# Each "bundle" is a different type of build artefact: static binary, Ubuntu
+# package, etc.
+
+# Build Docker as a static binary file
+bundle_binary() {
+	mkdir -p bundles/$VERSION/binary
+	go build -o bundles/$VERSION/binary/docker-$VERSION \
+		-ldflags "-X main.GITCOMMIT $GITCOMMIT -X main.VERSION $VERSION -d -w" \
+		./docker
+}
+
+
+# Build Docker's test suite as a collection of binary files (one per
+# sub-package to test)
+bundle_test() {
+	mkdir -p bundles/$VERSION/test
+	for test_dir in $(find_test_dirs); do
+		test_binary=$(
+			cd $test_dir
+			go test -c -v -ldflags "-X main.GITCOMMIT $GITCOMMIT -X main.VERSION $VERSION -d -w" >&2
+			find . -maxdepth 1 -type f -name '*.test' -executable
+		)
+		cp $test_dir/$test_binary bundles/$VERSION/test/
+	done
+}
+
+# Build docker as an ubuntu package using FPM and REPREPRO (sue me).
+# bundle_binary must be called first.
+bundle_ubuntu() {
+	mkdir -p bundles/$VERSION/ubuntu
+
+	DIR=$(pwd)/bundles/$VERSION/ubuntu/build
+
+	# Generate an upstart config file (ubuntu-specific)
+	mkdir -p $DIR/etc/init
+	echo "$UPSTART_SCRIPT" > $DIR/etc/init/docker.conf
+
+	# Copy the binary
+	mkdir -p $DIR/usr/bin
+	cp bundles/$VERSION/binary/docker-$VERSION $DIR/usr/bin/docker
+
+	# Generate postinstall/prerm scripts
+	cat >/tmp/postinstall <<EOF
+#!/bin/sh
+/sbin/stop docker || true
+/sbin/start docker
+EOF
+	cat >/tmp/prerm <<EOF
+#!/bin/sh
+/sbin/stop docker || true
+EOF
+	chmod +x /tmp/postinstall /tmp/prerm
+
+	(
+		cd bundles/$VERSION/ubuntu
+		fpm -s dir -C $DIR \
+		    --name lxc-docker-$VERSION --version $PKGVERSION \
+		    --after-install /tmp/postinstall \
+		    --before-remove /tmp/prerm \
+		    --architecture "$PACKAGE_ARCHITECTURE" \
+		    --prefix / \
+		    --depends lxc --depends aufs-tools \
+		    --description "$PACKAGE_DESCRIPTION" \
+		    --maintainer "$PACKAGE_MAINTAINER" \
+		    --conflicts lxc-docker-virtual-package \
+		    --provides lxc-docker \
+		    --provides lxc-docker-virtual-package \
+		    --replaces lxc-docker \
+		    --replaces lxc-docker-virtual-package \
+		    --url "$PACKAGE_URL" \
+		    --vendor "$PACKAGE_VENDOR" \
+		    -t deb .
+		mkdir empty
+		fpm -s dir -C empty \
+		    --name lxc-docker --version $PKGVERSION \
+		    --architecture "$PACKAGE_ARCHITECTURE" \
+		    --depends lxc-docker-$VERSION \
+		    --description "$PACKAGE_DESCRIPTION" \
+		    --maintainer "$PACKAGE_MAINTAINER" \
+		    --url "$PACKAGE_URL" \
+		    --vendor "$PACKAGE_VENDOR" \
+		    -t deb .
+	)
+}
+
+
+# This helper function walks the current directory looking for directories
+# holding Go test files, and prints their paths on standard output, one per
+# line.
+find_test_dirs() {
+	find . -name '*_test.go' | 
+		{ while read f; do dirname $f; done; } | 
+		sort -u
+}
+
+
+main() {
+	bundle_binary
+	bundle_ubuntu
+	#bundle_test
+	cat <<EOF
+###############################################################################
+Now run the resulting image, making sure that you set AWS_S3_BUCKET,
+AWS_ACCESS_KEY, and AWS_SECRET_KEY environment variables:
+
+docker run -e AWS_S3_BUCKET=get-staging.docker.io \\
+              AWS_ACCESS_KEY=AKI1234... \\
+              AWS_SECRET_KEY=sEs3mE... \\
+              GPG_PASSPHRASE=sesame... \\
+              image_id_or_name
+###############################################################################
+EOF
+}
+
+main

+ 175 - 0
hack/release/release.sh

@@ -0,0 +1,175 @@
+#!/bin/sh
+
+# This script looks for bundles built by make.sh, and releases them on a
+# public S3 bucket.
+#
+# Bundles should be available for the VERSION string passed as argument.
+#
+# The correct way to call this script is inside a container built by the
+# official Dockerfile at the root of the Docker source code. The Dockerfile,
+# make.sh and release.sh should all be from the same source code revision.
+
+set -e
+
+# Print a usage message and exit.
+usage() {
+	cat <<EOF
+To run, I need:
+- to be in a container generated by the Dockerfile at the top of the Docker
+  repository;
+- to be provided with the name of an S3 bucket, in environment variable
+  AWS_S3_BUCKET;
+- to be provided with AWS credentials for this S3 bucket, in environment
+  variables AWS_ACCESS_KEY and AWS_SECRET_KEY;
+- the passphrase to unlock the GPG key which will sign the deb packages
+  (passed as environment variable GPG_PASSPHRASE);
+- a generous amount of good will and nice manners.
+The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
+
+docker run -e AWS_S3_BUCKET=get-staging.docker.io \\
+              AWS_ACCESS_KEY=AKI1234... \\
+              AWS_SECRET_KEY=sEs4mE... \\
+              GPG_PASSPHRASE=m0resEs4mE... \\
+              f0058411
+EOF
+	exit 1
+}
+
+[ "$AWS_S3_BUCKET" ] || usage
+[ "$AWS_ACCESS_KEY" ] || usage
+[ "$AWS_SECRET_KEY" ] || usage
+[ "$GPG_PASSPHRASE" ] || usage
+[ -d /go/src/github.com/dotcloud/docker/ ] || usage
+cd /go/src/github.com/dotcloud/docker/ 
+
+VERSION=$(cat VERSION)
+BUCKET=$AWS_S3_BUCKET
+
+setup_s3() {
+	# Try creating the bucket. Ignore errors (it might already exist).
+	s3cmd mb s3://$BUCKET 2>/dev/null || true
+	# Check access to the bucket.
+	# s3cmd has no useful exit status, so we cannot check that.
+	# Instead, we check if it outputs anything on standard output.
+	# (When there are problems, it uses standard error instead.)
+	s3cmd info s3://$BUCKET | grep -q .
+	# Make the bucket accessible through website endpoints.
+	s3cmd ws-create --ws-index index --ws-error error s3://$BUCKET
+}
+
+# write_to_s3 uploads the contents of standard input to the specified S3 url.
+write_to_s3() {
+	DEST=$1
+	F=`mktemp`
+	cat > $F
+	s3cmd --acl-public put $F $DEST
+	rm -f $F
+}
+
+s3_url() {
+	echo "http://$BUCKET.s3.amazonaws.com"
+}
+
+# Upload the 'ubuntu' bundle to S3:
+# 1. A full APT repository is published at $BUCKET/ubuntu/
+# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/info
+release_ubuntu() {
+	# Make sure that we have our keys
+	mkdir -p /.gnupg/
+	s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ /.gnupg/ || true
+	gpg --list-keys releasedocker >/dev/null || {
+		gpg --gen-key --batch <<EOF   
+Key-Type: RSA
+Key-Length: 2048
+Passphrase: $GPG_PASSPHRASE
+Name-Real: Docker Release Tool
+Name-Email: docker@dotcloud.com
+Name-Comment: releasedocker
+Expire-Date: 0
+%commit
+EOF
+	}
+
+	# Sign our packages
+	dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker \
+		 --sign builder bundles/$VERSION/ubuntu/*.deb
+
+	# Setup the APT repo
+	APTDIR=bundles/$VERSION/ubuntu/apt
+	mkdir -p $APTDIR/conf $APTDIR/db
+	s3cmd sync s3://$BUCKET/ubuntu/db/ $APTDIR/db/ || true
+	cat > $APTDIR/conf/distributions <<EOF
+Codename: docker
+Components: main
+Architectures: amd64 i386
+EOF
+
+	# Add the DEB package to the APT repo
+	DEBFILE=bundles/$VERSION/ubuntu/lxc-docker*.deb
+	reprepro -b $APTDIR includedeb docker $DEBFILE
+
+	# Sign
+	for F in $(find $APTDIR -name Release)
+	do
+		gpg -u releasedocker --passphrase $GPG_PASSPHRASE \
+			--armor --sign --detach-sign \
+			--output $F.gpg $F
+	done
+
+	# Upload keys
+	s3cmd sync /.gnupg/ s3://$BUCKET/ubuntu/.gnupg/
+	gpg --armor --export releasedocker > bundles/$VERSION/ubuntu/gpg
+	s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg
+
+	# Upload repo
+	s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/
+	cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/info
+# Add the repository to your APT sources
+echo deb $(s3_url $BUCKET)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
+# Then import the repository key
+curl $(s3_url $BUCKET)/gpg | apt-key add -
+# Install docker
+apt-get update ; apt-get install -y lxc-docker
+EOF
+	echo "APT repository uploaded. Instructions available at $(s3_url $BUCKET)/ubuntu/info"
+}
+
+# Upload a static binary to S3
+release_binary() {
+	[ -e bundles/$VERSION ]
+	S3DIR=s3://$BUCKET/builds/Linux/x86_64
+	s3cmd --acl-public put bundles/$VERSION/binary/docker-$VERSION $S3DIR/docker-$VERSION
+	cat <<EOF | write_to_s3 s3://$BUCKET/builds/info
+# To install, run the following command as root:
+curl -O http://$BUCKET.s3.amazonaws.com/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
+# Then start docker in daemon mode:
+sudo /usr/local/bin/docker -d
+EOF
+	if [ -z "$NOLATEST" ]; then
+		echo "Copying docker-$VERSION to docker-latest"
+		s3cmd --acl-public cp $S3DIR/docker-$VERSION $S3DIR/docker-latest
+		echo "Advertising $VERSION on $BUCKET as most recent version"
+		echo $VERSION | write_to_s3 s3://$BUCKET/latest
+	fi
+}
+
+# Upload the index script
+release_index() {
+	(
+	if [ "$BUCKET" != "get.docker.io" ]
+	then
+		sed s,https://get.docker.io/,http://$BUCKET.s3.amazonaws.com/, contrib/install.sh
+	else
+		cat contrib/install.sh
+	fi
+	) | write_to_s3 s3://$BUCKET/index
+}
+
+main() {
+	setup_s3
+	release_binary
+	release_ubuntu
+	release_index
+}
+
+main

+ 13 - 0
lxc_template.go

@@ -121,7 +121,16 @@ lxc.cgroup.cpu.shares = {{.Config.CpuShares}}
 {{end}}
 {{end}}
 `
 `
 
 
+const LxcHostConfigTemplate = `
+{{if .LxcConf}}
+{{range $pair := .LxcConf}}
+{{$pair.Key}} = {{$pair.Value}}
+{{end}}
+{{end}}
+`
+
 var LxcTemplateCompiled *template.Template
 var LxcTemplateCompiled *template.Template
+var LxcHostConfigTemplateCompiled *template.Template
 
 
 func getMemorySwap(config *Config) int64 {
 func getMemorySwap(config *Config) int64 {
 	// By default, MemorySwap is set to twice the size of RAM.
 	// By default, MemorySwap is set to twice the size of RAM.
@@ -141,4 +150,8 @@ func init() {
 	if err != nil {
 	if err != nil {
 		panic(err)
 		panic(err)
 	}
 	}
+	LxcHostConfigTemplateCompiled, err = template.New("lxc-hostconfig").Funcs(funcMap).Parse(LxcHostConfigTemplate)
+	if err != nil {
+		panic(err)
+	}
 }
 }

+ 16 - 1
network.go

@@ -635,10 +635,25 @@ func (manager *NetworkManager) Allocate() (*NetworkInterface, error) {
 		return &NetworkInterface{disabled: true}, nil
 		return &NetworkInterface{disabled: true}, nil
 	}
 	}
 
 
-	ip, err := manager.ipAllocator.Acquire()
+	var ip net.IP
+	var err error
+
+	ip, err = manager.ipAllocator.Acquire()
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
+	// avoid duplicate IP 
+	ipNum := ipToInt(ip)
+	firstIP := manager.ipAllocator.network.IP.To4().Mask(manager.ipAllocator.network.Mask)
+	firstIPNum := ipToInt(firstIP) + 1
+
+	if firstIPNum == ipNum {
+		ip, err = manager.ipAllocator.Acquire()
+		if err != nil {
+			return nil, err
+		}
+	}
+
 	iface := &NetworkInterface{
 	iface := &NetworkInterface{
 		IPNet:   net.IPNet{IP: ip, Mask: manager.bridgeNetwork.Mask},
 		IPNet:   net.IPNet{IP: ip, Mask: manager.bridgeNetwork.Mask},
 		Gateway: manager.bridgeNetwork.IP,
 		Gateway: manager.bridgeNetwork.IP,

+ 12 - 0
packaging/README.md

@@ -0,0 +1,12 @@
+# Docker packaging
+
+This directory has one subdirectory per packaging distribution.
+At minimum, each of these subdirectories should contain a
+README.$DISTRIBUTION explaining how to create the native
+docker package and how to install it.
+
+**Important:** the debian and ubuntu directories are here for
+reference only. Since we experienced many issues with Launchpad,
+we gave up on using it to have a Docker PPA (at least, for now!)
+and we are using a simpler process.
+See [/hack/release](../hack/release) for details.

+ 0 - 8
packaging/README.rst

@@ -1,8 +0,0 @@
-Docker packaging
-================
-
-This directory has one subdirectory per packaging distribution.
-At minimum, each of these subdirectories should contain a
-README.$DISTRIBUTION explaining how to create the native
-docker package and how to install it.
-

+ 16 - 6
runtime.go

@@ -15,9 +15,9 @@ import (
 )
 )
 
 
 type Capabilities struct {
 type Capabilities struct {
-	MemoryLimit    bool
-	SwapLimit      bool
-	IPv4Forwarding bool
+	MemoryLimit            bool
+	SwapLimit              bool
+	IPv4ForwardingDisabled bool
 }
 }
 
 
 type Runtime struct {
 type Runtime struct {
@@ -207,19 +207,29 @@ func (runtime *Runtime) Destroy(container *Container) error {
 }
 }
 
 
 func (runtime *Runtime) restore() error {
 func (runtime *Runtime) restore() error {
+	wheel := "-\\|/"
+	if os.Getenv("DEBUG") == "" {
+		fmt.Printf("Loading containers:  ")
+	}
 	dir, err := ioutil.ReadDir(runtime.repository)
 	dir, err := ioutil.ReadDir(runtime.repository)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	for _, v := range dir {
+	for i, v := range dir {
 		id := v.Name()
 		id := v.Name()
 		container, err := runtime.Load(id)
 		container, err := runtime.Load(id)
+		if i%21 == 0 && os.Getenv("DEBUG") == "" {
+			fmt.Printf("\b%c", wheel[i%4])
+		}
 		if err != nil {
 		if err != nil {
 			utils.Debugf("Failed to load container %v: %v", id, err)
 			utils.Debugf("Failed to load container %v: %v", id, err)
 			continue
 			continue
 		}
 		}
 		utils.Debugf("Loaded container %v", container.ID)
 		utils.Debugf("Loaded container %v", container.ID)
 	}
 	}
+	if os.Getenv("DEBUG") == "" {
+		fmt.Printf("\bdone.\n")
+	}
 	return nil
 	return nil
 }
 }
 
 
@@ -244,8 +254,8 @@ func (runtime *Runtime) UpdateCapabilities(quiet bool) {
 	}
 	}
 
 
 	content, err3 := ioutil.ReadFile("/proc/sys/net/ipv4/ip_forward")
 	content, err3 := ioutil.ReadFile("/proc/sys/net/ipv4/ip_forward")
-	runtime.capabilities.IPv4Forwarding = err3 == nil && len(content) > 0 && content[0] == '1'
-	if !runtime.capabilities.IPv4Forwarding && !quiet {
+	runtime.capabilities.IPv4ForwardingDisabled = err3 != nil || len(content) == 0 || content[0] != '1'
+	if runtime.capabilities.IPv4ForwardingDisabled && !quiet {
 		log.Printf("WARNING: IPv4 forwarding is disabled.")
 		log.Printf("WARNING: IPv4 forwarding is disabled.")
 	}
 	}
 }
 }

+ 1 - 1
runtime_test.go

@@ -101,7 +101,7 @@ func init() {
 	// If the unit test is not found, try to download it.
 	// If the unit test is not found, try to download it.
 	if img, err := globalRuntime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID {
 	if img, err := globalRuntime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID {
 		// Retrieve the Image
 		// Retrieve the Image
-		if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, true); err != nil {
+		if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
 			panic(err)
 			panic(err)
 		}
 		}
 	}
 	}

+ 66 - 17
server.go

@@ -102,7 +102,7 @@ func (srv *Server) ContainerExport(name string, out io.Writer) error {
 }
 }
 
 
 func (srv *Server) ImagesSearch(term string) ([]APISearch, error) {
 func (srv *Server) ImagesSearch(term string) ([]APISearch, error) {
-	r, err := registry.NewRegistry(srv.runtime.root, nil, srv.HTTPRequestFactory())
+	r, err := registry.NewRegistry(srv.runtime.root, nil, srv.HTTPRequestFactory(nil))
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -271,7 +271,7 @@ func (srv *Server) DockerInfo() *APIInfo {
 		Images:             imgcount,
 		Images:             imgcount,
 		MemoryLimit:        srv.runtime.capabilities.MemoryLimit,
 		MemoryLimit:        srv.runtime.capabilities.MemoryLimit,
 		SwapLimit:          srv.runtime.capabilities.SwapLimit,
 		SwapLimit:          srv.runtime.capabilities.SwapLimit,
-		IPv4Forwarding:     srv.runtime.capabilities.IPv4Forwarding,
+		IPv4Forwarding:     !srv.runtime.capabilities.IPv4ForwardingDisabled,
 		Debug:              os.Getenv("DEBUG") != "",
 		Debug:              os.Getenv("DEBUG") != "",
 		NFd:                utils.GetTotalUsedFds(),
 		NFd:                utils.GetTotalUsedFds(),
 		NGoroutines:        runtime.NumGoroutine(),
 		NGoroutines:        runtime.NumGoroutine(),
@@ -419,19 +419,30 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-
+	out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling", "dependend layers"))
 	// FIXME: Try to stream the images?
 	// FIXME: Try to stream the images?
 	// FIXME: Launch the getRemoteImage() in goroutines
 	// FIXME: Launch the getRemoteImage() in goroutines
+
 	for _, id := range history {
 	for _, id := range history {
+
+		// ensure no two downloads of the same layer happen at the same time
+		if err := srv.poolAdd("pull", "layer:"+id); err != nil {
+			utils.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
+			return nil
+		}
+		defer srv.poolRemove("pull", "layer:"+id)
+
 		if !srv.runtime.graph.Exists(id) {
 		if !srv.runtime.graph.Exists(id) {
 			out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling", "metadata"))
 			out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling", "metadata"))
 			imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token)
 			imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token)
 			if err != nil {
 			if err != nil {
+				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "pulling dependend layers"))
 				// FIXME: Keep going in case of error?
 				// FIXME: Keep going in case of error?
 				return err
 				return err
 			}
 			}
 			img, err := NewImgJSON(imgJSON)
 			img, err := NewImgJSON(imgJSON)
 			if err != nil {
 			if err != nil {
+				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "pulling dependend layers"))
 				return fmt.Errorf("Failed to parse json: %s", err)
 				return fmt.Errorf("Failed to parse json: %s", err)
 			}
 			}
 
 
@@ -439,13 +450,17 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
 			out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling", "fs layer"))
 			out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling", "fs layer"))
 			layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token)
 			layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token)
 			if err != nil {
 			if err != nil {
+				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "pulling dependend layers"))
 				return err
 				return err
 			}
 			}
 			defer layer.Close()
 			defer layer.Close()
 			if err := srv.runtime.graph.Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf.FormatProgress(utils.TruncateID(id), "Downloading", "%8v/%v (%v)"), sf, false), img); err != nil {
 			if err := srv.runtime.graph.Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf.FormatProgress(utils.TruncateID(id), "Downloading", "%8v/%v (%v)"), sf, false), img); err != nil {
+				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "downloading dependend layers"))
 				return err
 				return err
 			}
 			}
 		}
 		}
+		out.Write(sf.FormatProgress(utils.TruncateID(id), "Download", "complete"))
+
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -493,29 +508,57 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName
 		downloadImage := func(img *registry.ImgData) {
 		downloadImage := func(img *registry.ImgData) {
 			if askedTag != "" && img.Tag != askedTag {
 			if askedTag != "" && img.Tag != askedTag {
 				utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
 				utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
-				errors <- nil
+				if parallel {
+					errors <- nil
+				}
 				return
 				return
 			}
 			}
 
 
 			if img.Tag == "" {
 			if img.Tag == "" {
 				utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
 				utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
-				errors <- nil
+				if parallel {
+					errors <- nil
+				}
+				return
+			}
+
+			// ensure no two downloads of the same image happen at the same time
+			if err := srv.poolAdd("pull", "img:"+img.ID); err != nil {
+				utils.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
+				if parallel {
+					errors <- nil
+				}
 				return
 				return
 			}
 			}
+			defer srv.poolRemove("pull", "img:"+img.ID)
+
 			out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling", fmt.Sprintf("image (%s) from %s", img.Tag, localName)))
 			out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling", fmt.Sprintf("image (%s) from %s", img.Tag, localName)))
 			success := false
 			success := false
+			var lastErr error
 			for _, ep := range repoData.Endpoints {
 			for _, ep := range repoData.Endpoints {
+				out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling", fmt.Sprintf("image (%s) from %s, endpoint: %s", img.Tag, localName, ep)))
 				if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
 				if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
-					out.Write(sf.FormatStatus(utils.TruncateID(img.ID), "Error while retrieving image for tag: %s (%s); checking next endpoint", askedTag, err))
+					// Its not ideal that only the last error  is returned, it would be better to concatenate the errors.
+					// As the error is also given to the output stream the user will see the error.
+					lastErr = err
+					out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Error pulling", fmt.Sprintf("image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err)))
 					continue
 					continue
 				}
 				}
 				success = true
 				success = true
 				break
 				break
 			}
 			}
 			if !success {
 			if !success {
-				errors <- fmt.Errorf("Could not find repository on any of the indexed registries.")
+				out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Error pulling", fmt.Sprintf("image (%s) from %s, %s", img.Tag, localName, lastErr)))
+				if parallel {
+					errors <- fmt.Errorf("Could not find repository on any of the indexed registries.")
+					return
+				}
+			}
+			out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download", "complete"))
+
+			if parallel {
+				errors <- nil
 			}
 			}
-			errors <- nil
 		}
 		}
 
 
 		if parallel {
 		if parallel {
@@ -524,15 +567,18 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName
 			downloadImage(image)
 			downloadImage(image)
 		}
 		}
 	}
 	}
-
 	if parallel {
 	if parallel {
+		var lastError error
 		for i := 0; i < len(repoData.ImgList); i++ {
 		for i := 0; i < len(repoData.ImgList); i++ {
 			if err := <-errors; err != nil {
 			if err := <-errors; err != nil {
-				return err
+				lastError = err
 			}
 			}
 		}
 		}
-	}
+		if lastError != nil {
+			return lastError
+		}
 
 
+	}
 	for tag, id := range tagsList {
 	for tag, id := range tagsList {
 		if askedTag != "" && tag != askedTag {
 		if askedTag != "" && tag != askedTag {
 			continue
 			continue
@@ -586,8 +632,8 @@ func (srv *Server) poolRemove(kind, key string) error {
 	return nil
 	return nil
 }
 }
 
 
-func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig, parallel bool) error {
-	r, err := registry.NewRegistry(srv.runtime.root, authConfig, srv.HTTPRequestFactory())
+func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig, metaHeaders map[string][]string, parallel bool) error {
+	r, err := registry.NewRegistry(srv.runtime.root, authConfig, srv.HTTPRequestFactory(metaHeaders))
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -734,7 +780,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
 }
 }
 
 
 // FIXME: Allow to interrupt current push when new push of same image is done.
 // FIXME: Allow to interrupt current push when new push of same image is done.
-func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig) error {
+func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig, metaHeaders map[string][]string) error {
 	if err := srv.poolAdd("push", localName); err != nil {
 	if err := srv.poolAdd("push", localName); err != nil {
 		return err
 		return err
 	}
 	}
@@ -748,7 +794,7 @@ func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFo
 
 
 	out = utils.NewWriteFlusher(out)
 	out = utils.NewWriteFlusher(out)
 	img, err := srv.runtime.graph.Get(localName)
 	img, err := srv.runtime.graph.Get(localName)
-	r, err2 := registry.NewRegistry(srv.runtime.root, authConfig, srv.HTTPRequestFactory())
+	r, err2 := registry.NewRegistry(srv.runtime.root, authConfig, srv.HTTPRequestFactory(metaHeaders))
 	if err2 != nil {
 	if err2 != nil {
 		return err2
 		return err2
 	}
 	}
@@ -1221,10 +1267,13 @@ func NewServer(flGraphPath string, autoRestart, enableCors bool, dns ListOpts) (
 	return srv, nil
 	return srv, nil
 }
 }
 
 
-func (srv *Server) HTTPRequestFactory() *utils.HTTPRequestFactory {
+func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory {
 	if srv.reqFactory == nil {
 	if srv.reqFactory == nil {
 		ud := utils.NewHTTPUserAgentDecorator(srv.versionInfos()...)
 		ud := utils.NewHTTPUserAgentDecorator(srv.versionInfos()...)
-		factory := utils.NewHTTPRequestFactory(ud)
+		md := &utils.HTTPMetaHeadersDecorator{
+			Headers: metaHeaders,
+		}
+		factory := utils.NewHTTPRequestFactory(ud, md)
 		srv.reqFactory = factory
 		srv.reqFactory = factory
 	}
 	}
 	return srv.reqFactory
 	return srv.reqFactory

+ 21 - 0
utils.go

@@ -1,6 +1,7 @@
 package docker
 package docker
 
 
 import (
 import (
+	"fmt"
 	"strings"
 	"strings"
 )
 )
 
 
@@ -146,3 +147,23 @@ func MergeConfig(userConf, imageConf *Config) {
 		}
 		}
 	}
 	}
 }
 }
+
+func parseLxcConfOpts(opts ListOpts) ([]KeyValuePair, error) {
+	out := make([]KeyValuePair, len(opts))
+	for i, o := range opts {
+		k, v, err := parseLxcOpt(o)
+		if err != nil {
+			return nil, err
+		}
+		out[i] = KeyValuePair{Key: k, Value: v}
+	}
+	return out, nil
+}
+
+func parseLxcOpt(opt string) (string, string, error) {
+	parts := strings.SplitN(opt, "=", 2)
+	if len(parts) != 2 {
+		return "", "", fmt.Errorf("Unable to parse lxc conf option: %s", opt)
+	}
+	return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
+}

+ 14 - 0
utils/http.go

@@ -93,6 +93,20 @@ func (self *HTTPUserAgentDecorator) ChangeRequest(req *http.Request) (newReq *ht
 	return req, nil
 	return req, nil
 }
 }
 
 
+type HTTPMetaHeadersDecorator struct {
+	Headers map[string][]string
+}
+
+func (self *HTTPMetaHeadersDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) {
+	if self.Headers == nil {
+		return req, nil
+	}
+	for k, v := range self.Headers {
+		req.Header[k] = v
+	}
+	return req, nil
+}
+
 // HTTPRequestFactory creates an HTTP request
 // HTTPRequestFactory creates an HTTP request
 // and applies a list of decorators on the request.
 // and applies a list of decorators on the request.
 type HTTPRequestFactory struct {
 type HTTPRequestFactory struct {

+ 13 - 5
utils/utils.go

@@ -515,9 +515,7 @@ func FindCgroupMountpoint(cgroupType string) (string, error) {
 
 
 func GetKernelVersion() (*KernelVersionInfo, error) {
 func GetKernelVersion() (*KernelVersionInfo, error) {
 	var (
 	var (
-		flavor               string
-		kernel, major, minor int
-		err                  error
+		err error
 	)
 	)
 
 
 	uts, err := uname()
 	uts, err := uname()
@@ -536,8 +534,18 @@ func GetKernelVersion() (*KernelVersionInfo, error) {
 	// Remove the \x00 from the release for Atoi to parse correctly
 	// Remove the \x00 from the release for Atoi to parse correctly
 	release = release[:bytes.IndexByte(release, 0)]
 	release = release[:bytes.IndexByte(release, 0)]
 
 
-	tmp := strings.SplitN(string(release), "-", 2)
-	tmp2 := strings.SplitN(tmp[0], ".", 3)
+	return ParseRelease(string(release))
+}
+
+func ParseRelease(release string) (*KernelVersionInfo, error) {
+	var (
+		flavor               string
+		kernel, major, minor int
+		err                  error
+	)
+
+	tmp := strings.SplitN(release, "-", 2)
+	tmp2 := strings.Split(tmp[0], ".")
 
 
 	if len(tmp2) > 0 {
 	if len(tmp2) > 0 {
 		kernel, err = strconv.Atoi(tmp2[0])
 		kernel, err = strconv.Atoi(tmp2[0])

+ 18 - 0
utils/utils_test.go

@@ -337,3 +337,21 @@ search dotcloud.net`: true,
 		}
 		}
 	}
 	}
 }
 }
+
+func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) {
+	var (
+		a *KernelVersionInfo
+	)
+	a, _ = ParseRelease(release)
+
+	if r := CompareKernelVersion(a, b); r != result {
+		t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result)
+	}
+}
+
+func TestParseRelease(t *testing.T) {
+	assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0)
+	assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54}, 0)
+	assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: "1"}, 0)
+	assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "19-generic"}, 0)
+}

+ 17 - 0
utils_test.go

@@ -301,3 +301,20 @@ func TestMergeConfigPublicPortNotHonored(t *testing.T) {
 		t.Fail()
 		t.Fail()
 	}
 	}
 }
 }
+
+func TestParseLxcConfOpt(t *testing.T) {
+	opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "}
+
+	for _, o := range opts {
+		k, v, err := parseLxcOpt(o)
+		if err != nil {
+			t.FailNow()
+		}
+		if k != "lxc.utsname" {
+			t.Fail()
+		}
+		if v != "docker" {
+			t.Fail()
+		}
+	}
+}