Bläddra i källkod

Merge pull request #3081 from creack/bump_0.7.1

Bump to 0.7.1
Guillaume J. Charmes 11 år sedan
förälder
incheckning
bc9b239d74
100 ändrade filer med 5596 tillägg och 1407 borttagningar
  1. 3 0
      .gitignore
  2. 58 2
      CHANGELOG.md
  3. 9 1
      CONTRIBUTING.md
  4. 28 28
      Dockerfile
  5. 26 0
      Makefile
  6. 1 1
      VERSION
  7. 70 9
      api.go
  8. 4 0
      api_params.go
  9. 1 1
      archive/changes.go
  10. 42 42
      archive/changes_test.go
  11. 4 2
      archive/diff.go
  12. 11 0
      archive/stat_darwin.go
  13. 11 0
      archive/stat_linux.go
  14. 23 1
      auth/auth.go
  15. 59 19
      buildfile.go
  16. 133 164
      commands.go
  17. 157 0
      commands_unit_test.go
  18. 2 2
      config.go
  19. 217 148
      container.go
  20. 1 3
      contrib/init/systemd/docker.service
  21. 0 15
      contrib/mkimage-centos.sh
  22. 112 0
      contrib/mkimage-rinse.sh
  23. 77 0
      contrib/mkseccomp.pl
  24. 444 0
      contrib/mkseccomp.sample
  25. 3 0
      contrib/udev/80-docker.rules
  26. 31 0
      contrib/vagrant-docker/README.md
  27. 1 0
      contrib/zfs/MAINTAINERS
  28. 22 0
      contrib/zfs/README.md
  29. 29 30
      docker/docker.go
  30. 2 0
      docs/MAINTAINERS
  31. 22 0
      docs/sources/api/docker_remote_api.rst
  32. 9 3
      docs/sources/api/docker_remote_api_v1.7.rst
  33. 1273 0
      docs/sources/api/docker_remote_api_v1.8.rst
  34. 1 5
      docs/sources/api/registry_index_spec.rst
  35. 118 52
      docs/sources/commandline/cli.rst
  36. 23 8
      docs/sources/contributing/devenvironment.rst
  37. 0 2
      docs/sources/examples/hello_world.rst
  38. 30 39
      docs/sources/examples/postgresql_service.rst
  39. 28 11
      docs/sources/installation/amazon.rst
  40. 7 12
      docs/sources/installation/archlinux.rst
  41. 37 4
      docs/sources/installation/fedora.rst
  42. 65 0
      docs/sources/installation/google.rst
  43. 2 0
      docs/sources/installation/index.rst
  44. 65 0
      docs/sources/installation/rhel.rst
  45. 8 0
      docs/sources/installation/ubuntulinux.rst
  46. 182 0
      docs/sources/use/ambassador_pattern_linking.rst
  47. 3 3
      docs/sources/use/baseimages.rst
  48. 1 0
      docs/sources/use/index.rst
  49. 11 2
      docs/sources/use/working_with_links_names.rst
  50. 9 5
      engine/engine.go
  51. 51 3
      engine/engine_test.go
  52. 3 3
      engine/env_test.go
  53. 2 16
      engine/helpers_test.go
  54. 47 98
      engine/job.go
  55. 80 0
      engine/job_test.go
  56. 166 0
      engine/streams.go
  57. 274 0
      engine/streams_test.go
  58. 19 5
      graph.go
  59. 126 0
      graphdriver/devmapper/attach_loopback.go
  60. 11 8
      graphdriver/devmapper/deviceset.go
  61. 15 20
      graphdriver/devmapper/devmapper.go
  62. 2 0
      graphdriver/devmapper/devmapper_log.go
  63. 2 0
      graphdriver/devmapper/devmapper_test.go
  64. 70 189
      graphdriver/devmapper/devmapper_wrapper.go
  65. 2 0
      graphdriver/devmapper/driver.go
  66. 66 60
      graphdriver/devmapper/driver_test.go
  67. 60 0
      graphdriver/devmapper/ioctl.go
  68. 2 0
      graphdriver/devmapper/mount.go
  69. 13 6
      graphdriver/devmapper/sys.go
  70. 3 3
      hack/PACKAGERS.md
  71. 20 11
      hack/RELEASE-CHECKLIST.md
  72. 1 1
      hack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh
  73. 1 1
      hack/make/binary
  74. 1 1
      hack/make/dynbinary
  75. 29 12
      hack/make/dyntest
  76. 41 13
      hack/make/test
  77. 1 1
      hack/make/tgz
  78. 2 1
      hack/make/ubuntu
  79. 0 2
      hack/vendor.sh
  80. 17 15
      image.go
  81. 44 3
      integration/api_test.go
  82. 96 27
      integration/buildfile_test.go
  83. 96 67
      integration/commands_test.go
  84. 40 3
      integration/container_test.go
  85. 13 13
      integration/graph_test.go
  86. 43 3
      integration/runtime_test.go
  87. 1 1
      integration/server_test.go
  88. 1 1
      integration/utils_test.go
  89. 23 15
      lxc_template.go
  90. 18 0
      lxc_template_unit_test.go
  91. 115 68
      network.go
  92. 80 7
      network_test.go
  93. 146 0
      opts.go
  94. 24 0
      opts_unit_test.go
  95. 14 0
      reflink_copy_darwin.go
  96. 53 0
      reflink_copy_linux.go
  97. 36 15
      registry/registry.go
  98. 4 5
      registry/registry_test.go
  99. 25 5
      runtime.go
  100. 122 91
      server.go

+ 3 - 0
.gitignore

@@ -1,3 +1,6 @@
+# Docker project generated files to ignore
+#  if you want to ignore files created by your editor/tools,
+#  please consider a global .gitignore https://help.github.com/articles/ignoring-files
 .vagrant*
 .vagrant*
 bin
 bin
 docker/docker
 docker/docker

+ 58 - 2
CHANGELOG.md

@@ -1,11 +1,67 @@
 # Changelog
 # Changelog
 
 
+## 0.7.1 (2013-12-05)
+
+#### Documentation
+
++ Add @SvenDowideit as documentation maintainer
++ Add links example
++ Add documentation regarding ambassador pattern
++ Add Google Cloud Platform docs
++ Add dockerfile best practices
+* Update doc for RHEL
+* Update doc for registry
+* Update Postgres examples
+* Update doc for Ubuntu install
+* Improve remote api doc
+
+#### Runtime
+
++ Add hostconfig to docker inspect
++ Implement `docker log -f` to stream logs
++ Add env variable to disable kernel version warning
++ Add -format to `docker inspect`
++ Support bind-mount for files
+- Fix bridge creation on RHEL
+- Fix image size calculation
+- Make sure iptables are called even if the bridge already exists
+- Fix issue with stderr only attach
+- Remove init layer when destroying a container
+- Fix same port binding on different interfaces
+- `docker build` now returns the correct exit code
+- Fix `docker port` to display correct port
+- `docker build` now check that the dockerfile exists client side
+- `docker attach` now returns the correct exit code
+- Remove the name entry when the container does not exist
+
+#### Registry
+
+* Improve progress bars, add ETA for downloads
+* Simultaneous pulls now waits for the first to finish instead of failing
+- Tag only the top-layer image when pushing to registry
+- Fix issue with offline image transfer
+- Fix issue preventing using ':' in password for registry
+
+#### Other
+
++ Add pprof handler for debug
++ Create a Makefile
+* Use stdlib tar that now includes fix
+* Improve make.sh test script
+* Handle SIGQUIT on the daemon
+* Disable verbose during tests
+* Upgrade to go1.2 for official build
+* Improve unit tests
+* The test suite now runs all tests even if one fails
+* Refactor C in Go (Devmapper)
+- Fix OSX compilation
+
 ## 0.7.0 (2013-11-25)
 ## 0.7.0 (2013-11-25)
 
 
 #### Notable features since 0.6.0
 #### Notable features since 0.6.0
 
 
-* Storage drivers: choose from aufs, device mapper, vfs or btrfs.
-* Standard Linux support: docker now runs on unmodified linux kernels and all major distributions.
+* Storage drivers: choose from aufs, device-mapper, or vfs.
+* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions.
 * Links: compose complex software stacks by connecting containers to each other.
 * Links: compose complex software stacks by connecting containers to each other.
 * Container naming: organize your containers by giving them memorable names.
 * Container naming: organize your containers by giving them memorable names.
 * Advanced port redirects: specify port redirects per interface, or keep sensitive ports private.
 * Advanced port redirects: specify port redirects per interface, or keep sensitive ports private.

+ 9 - 1
CONTRIBUTING.md

@@ -4,6 +4,13 @@ Want to hack on Docker? Awesome! Here are instructions to get you
 started. They are probably not perfect, please let us know if anything
 started. They are probably not perfect, please let us know if anything
 feels wrong or incomplete.
 feels wrong or incomplete.
 
 
+## Reporting Issues
+
+When reporting [issues](https://github.com/dotcloud/docker/issues) 
+on Github please include your host OS ( Ubuntu 12.04, Fedora 19, etc... )
+and the output of `docker version` along with the output of `docker info` if possible.  
+This information will help us review and fix your issue faster.
+
 ## Build Environment
 ## Build Environment
 
 
 For instructions on setting up your development environment, please
 For instructions on setting up your development environment, please
@@ -64,7 +71,7 @@ your branch before submitting a pull request.
 
 
 Update the documentation when creating or modifying features. Test
 Update the documentation when creating or modifying features. Test
 your documentation changes for clarity, concision, and correctness, as
 your documentation changes for clarity, concision, and correctness, as
-well as a clean docmuent build. See ``docs/README.md`` for more
+well as a clean documentation build. See ``docs/README.md`` for more
 information on building the docs and how docs get released.
 information on building the docs and how docs get released.
 
 
 Write clean code. Universally formatted code promotes ease of writing, reading,
 Write clean code. Universally formatted code promotes ease of writing, reading,
@@ -115,6 +122,7 @@ For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
 * Step 1: learn the component inside out
 * Step 1: learn the component inside out
 * Step 2: make yourself useful by contributing code, bugfixes, support etc.
 * Step 2: make yourself useful by contributing code, bugfixes, support etc.
 * Step 3: volunteer on the irc channel (#docker@freenode)
 * Step 3: volunteer on the irc channel (#docker@freenode)
+* Step 4: propose yourself at a scheduled #docker-meeting
 
 
 Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available.
 Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available.
 You don't have to be a maintainer to make a difference on the project!
 You don't have to be a maintainer to make a difference on the project!

+ 28 - 28
Dockerfile

@@ -24,52 +24,52 @@
 #
 #
 
 
 docker-version	0.6.1
 docker-version	0.6.1
-from	ubuntu:12.04
-maintainer	Solomon Hykes <solomon@dotcloud.com>
+FROM	ubuntu:12.04
+MAINTAINER	Solomon Hykes <solomon@dotcloud.com>
 
 
 # Build dependencies
 # Build dependencies
-run	echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
-run	apt-get update
-run	apt-get install -y -q curl
-run	apt-get install -y -q git
-run	apt-get install -y -q mercurial
-run	apt-get install -y -q build-essential libsqlite3-dev
+RUN	echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
+RUN	apt-get update
+RUN	apt-get install -y -q curl
+RUN	apt-get install -y -q git
+RUN	apt-get install -y -q mercurial
+RUN	apt-get install -y -q build-essential libsqlite3-dev
 
 
 # Install Go
 # Install Go
-run	curl -s https://go.googlecode.com/files/go1.2rc5.src.tar.gz | tar -v -C /usr/local -xz
-env	PATH	/usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
-env	GOPATH	/go:/go/src/github.com/dotcloud/docker/vendor
-run	cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std
+RUN	curl -s https://go.googlecode.com/files/go1.2.src.tar.gz | tar -v -C /usr/local -xz
+ENV	PATH	/usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
+ENV	GOPATH	/go:/go/src/github.com/dotcloud/docker/vendor
+RUN	cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std
 
 
 # Ubuntu stuff
 # Ubuntu stuff
-run	apt-get install -y -q ruby1.9.3 rubygems libffi-dev
-run	gem install --no-rdoc --no-ri fpm
-run	apt-get install -y -q reprepro dpkg-sig
+RUN	apt-get install -y -q ruby1.9.3 rubygems libffi-dev
+RUN	gem install --no-rdoc --no-ri fpm
+RUN	apt-get install -y -q reprepro dpkg-sig
 
 
-run	apt-get install -y -q python-pip
-run	pip install s3cmd==1.1.0-beta3
-run	pip install python-magic==0.4.6
-run	/bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY\n' > /.s3cfg
+RUN	apt-get install -y -q python-pip
+RUN	pip install s3cmd==1.1.0-beta3
+RUN	pip install python-magic==0.4.6
+RUN	/bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY\n' > /.s3cfg
 
 
 # Runtime dependencies
 # Runtime dependencies
-run	apt-get install -y -q iptables
-run	apt-get install -y -q lxc
-run	apt-get install -y -q aufs-tools
+RUN	apt-get install -y -q iptables
+RUN	apt-get install -y -q lxc
+RUN	apt-get install -y -q aufs-tools
 
 
 # Get lvm2 source for compiling statically
 # Get lvm2 source for compiling statically
-run	git clone https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout v2_02_103
+RUN	git clone https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout v2_02_103
 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
 # note: we can't use "git clone -b" above because it requires at least git 1.7.10 to be able to use that on a tag instead of a branch and we only have 1.7.9.5
 # note: we can't use "git clone -b" above because it requires at least git 1.7.10 to be able to use that on a tag instead of a branch and we only have 1.7.9.5
 
 
 # Compile and install lvm2
 # Compile and install lvm2
-run	cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
+RUN	cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
 # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
 # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
 
 
-volume	/var/lib/docker
-workdir	/go/src/github.com/dotcloud/docker
+VOLUME	/var/lib/docker
+WORKDIR	/go/src/github.com/dotcloud/docker
 
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
-entrypoint	["hack/dind"]
+ENTRYPOINT	["hack/dind"]
 
 
 # Upload docker source
 # Upload docker source
-add	.	/go/src/github.com/dotcloud/docker
+ADD	.	/go/src/github.com/dotcloud/docker

+ 26 - 0
Makefile

@@ -0,0 +1,26 @@
+.PHONY: all binary build default doc shell test
+
+DOCKER_RUN_DOCKER := docker run -rm -i -t -privileged -e TESTFLAGS -v $(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles docker
+
+default: binary
+
+all: build
+	$(DOCKER_RUN_DOCKER) hack/make.sh
+
+binary: build
+	$(DOCKER_RUN_DOCKER) hack/make.sh binary
+
+doc:
+	docker build -t docker-docs docs && docker run -p 8000:8000 docker-docs
+
+test: build
+	$(DOCKER_RUN_DOCKER) hack/make.sh test
+
+shell: build
+	$(DOCKER_RUN_DOCKER) bash
+
+build: bundles
+	docker build -t docker .
+
+bundles:
+	mkdir bundles

+ 1 - 1
VERSION

@@ -1 +1 @@
-0.7.0
+0.7.1

+ 70 - 9
api.go

@@ -1,12 +1,16 @@
 package docker
 package docker
 
 
 import (
 import (
+	"bufio"
+	"bytes"
 	"code.google.com/p/go.net/websocket"
 	"code.google.com/p/go.net/websocket"
 	"encoding/base64"
 	"encoding/base64"
 	"encoding/json"
 	"encoding/json"
+	"expvar"
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/auth"
 	"github.com/dotcloud/docker/auth"
+	"github.com/dotcloud/docker/systemd"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"github.com/gorilla/mux"
 	"github.com/gorilla/mux"
 	"io"
 	"io"
@@ -15,6 +19,7 @@ import (
 	"mime"
 	"mime"
 	"net"
 	"net"
 	"net/http"
 	"net/http"
+	"net/http/pprof"
 	"os"
 	"os"
 	"os/exec"
 	"os/exec"
 	"regexp"
 	"regexp"
@@ -23,7 +28,7 @@ import (
 )
 )
 
 
 const (
 const (
-	APIVERSION        = 1.7
+	APIVERSION        = 1.8
 	DEFAULTHTTPHOST   = "127.0.0.1"
 	DEFAULTHTTPHOST   = "127.0.0.1"
 	DEFAULTHTTPPORT   = 4243
 	DEFAULTHTTPPORT   = 4243
 	DEFAULTUNIXSOCKET = "/var/run/docker.sock"
 	DEFAULTUNIXSOCKET = "/var/run/docker.sock"
@@ -564,12 +569,18 @@ func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r
 		job.SetenvList("Dns", defaultDns)
 		job.SetenvList("Dns", defaultDns)
 	}
 	}
 	// Read container ID from the first line of stdout
 	// Read container ID from the first line of stdout
-	job.StdoutParseString(&out.ID)
+	job.Stdout.AddString(&out.ID)
 	// Read warnings from stderr
 	// Read warnings from stderr
-	job.StderrParseLines(&out.Warnings, 0)
+	warnings := &bytes.Buffer{}
+	job.Stderr.Add(warnings)
 	if err := job.Run(); err != nil {
 	if err := job.Run(); err != nil {
 		return err
 		return err
 	}
 	}
+	// Parse warnings from stderr
+	scanner := bufio.NewScanner(warnings)
+	for scanner.Scan() {
+		out.Warnings = append(out.Warnings, scanner.Text())
+	}
 	if job.GetenvInt("Memory") > 0 && !srv.runtime.capabilities.MemoryLimit {
 	if job.GetenvInt("Memory") > 0 && !srv.runtime.capabilities.MemoryLimit {
 		log.Println("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
 		log.Println("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
 		out.Warnings = append(out.Warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.")
 		out.Warnings = append(out.Warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.")
@@ -865,7 +876,10 @@ func getContainersByName(srv *Server, version float64, w http.ResponseWriter, r
 		return fmt.Errorf("Conflict between containers and images")
 		return fmt.Errorf("Conflict between containers and images")
 	}
 	}
 
 
-	return writeJSON(w, http.StatusOK, container)
+	container.readHostConfig()
+	c := APIContainer{container, container.hostConfig}
+
+	return writeJSON(w, http.StatusOK, c)
 }
 }
 
 
 func getImagesByName(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 func getImagesByName(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -922,7 +936,7 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
 		}
 		}
 		context = c
 		context = c
 	} else if utils.IsURL(remoteURL) {
 	} else if utils.IsURL(remoteURL) {
-		f, err := utils.Download(remoteURL, ioutil.Discard)
+		f, err := utils.Download(remoteURL)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -951,9 +965,26 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
 		return err
 		return err
 	}
 	}
 
 
-	b := NewBuildFile(srv, utils.NewWriteFlusher(w), !suppressOutput, !noCache, rm)
+	if version >= 1.8 {
+		w.Header().Set("Content-Type", "application/json")
+	}
+	sf := utils.NewStreamFormatter(version >= 1.8)
+	b := NewBuildFile(srv,
+		&StdoutFormater{
+			Writer:          utils.NewWriteFlusher(w),
+			StreamFormatter: sf,
+		},
+		&StderrFormater{
+			Writer:          utils.NewWriteFlusher(w),
+			StreamFormatter: sf,
+		},
+		!suppressOutput, !noCache, rm, utils.NewWriteFlusher(w), sf)
 	id, err := b.Build(context)
 	id, err := b.Build(context)
 	if err != nil {
 	if err != nil {
+		if sf.Used() {
+			w.Write(sf.FormatError(err))
+			return nil
+		}
 		return fmt.Errorf("Error build: %s", err)
 		return fmt.Errorf("Error build: %s", err)
 	}
 	}
 	if repoName != "" {
 	if repoName != "" {
@@ -1037,9 +1068,37 @@ func makeHttpHandler(srv *Server, logging bool, localMethod string, localRoute s
 	}
 	}
 }
 }
 
 
+// Replicated from expvar.go as not public.
+func expvarHandler(w http.ResponseWriter, r *http.Request) {
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	fmt.Fprintf(w, "{\n")
+	first := true
+	expvar.Do(func(kv expvar.KeyValue) {
+		if !first {
+			fmt.Fprintf(w, ",\n")
+		}
+		first = false
+		fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
+	})
+	fmt.Fprintf(w, "\n}\n")
+}
+
+func AttachProfiler(router *mux.Router) {
+	router.HandleFunc("/debug/vars", expvarHandler)
+	router.HandleFunc("/debug/pprof/", pprof.Index)
+	router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
+	router.HandleFunc("/debug/pprof/profile", pprof.Profile)
+	router.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
+	router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP)
+	router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP)
+	router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP)
+}
+
 func createRouter(srv *Server, logging bool) (*mux.Router, error) {
 func createRouter(srv *Server, logging bool) (*mux.Router, error) {
 	r := mux.NewRouter()
 	r := mux.NewRouter()
-
+	if os.Getenv("DEBUG") != "" {
+		AttachProfiler(r)
+	}
 	m := map[string]map[string]HttpApiFunc{
 	m := map[string]map[string]HttpApiFunc{
 		"GET": {
 		"GET": {
 			"/events":                         getEvents,
 			"/events":                         getEvents,
@@ -1126,8 +1185,6 @@ func ServeRequest(srv *Server, apiversion float64, w http.ResponseWriter, req *h
 }
 }
 
 
 func ListenAndServe(proto, addr string, srv *Server, logging bool) error {
 func ListenAndServe(proto, addr string, srv *Server, logging bool) error {
-	log.Printf("Listening for HTTP on %s (%s)\n", addr, proto)
-
 	r, err := createRouter(srv, logging)
 	r, err := createRouter(srv, logging)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -1158,5 +1215,9 @@ func ListenAndServe(proto, addr string, srv *Server, logging bool) error {
 		}
 		}
 	}
 	}
 	httpSrv := http.Server{Addr: addr, Handler: r}
 	httpSrv := http.Server{Addr: addr, Handler: r}
+
+	log.Printf("Listening for HTTP on %s (%s)\n", addr, proto)
+	// Tell the init daemon we are accepting requests
+	go systemd.SdNotify("READY=1")
 	return httpSrv.Serve(l)
 	return httpSrv.Serve(l)
 }
 }

+ 4 - 0
api_params.go

@@ -118,6 +118,10 @@ type (
 		Resource string
 		Resource string
 		HostPath string
 		HostPath string
 	}
 	}
+	APIContainer struct {
+		*Container
+		HostConfig *HostConfig
+	}
 )
 )
 
 
 func (api APIImages) ToLegacy() []APIImagesOld {
 func (api APIImages) ToLegacy() []APIImagesOld {

+ 1 - 1
archive/changes.go

@@ -181,7 +181,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
 				oldStat.Rdev != newStat.Rdev ||
 				oldStat.Rdev != newStat.Rdev ||
 				// Don't look at size for dirs, its not a good measure of change
 				// Don't look at size for dirs, its not a good measure of change
 				(oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) ||
 				(oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) ||
-				oldStat.Mtim != newStat.Mtim {
+				getLastModification(oldStat) != getLastModification(newStat) {
 				change := Change{
 				change := Change{
 					Path: newChild.path(),
 					Path: newChild.path(),
 					Kind: ChangeModify,
 					Kind: ChangeModify,

+ 42 - 42
archive/changes_test.go

@@ -247,7 +247,7 @@ func TestChangesDirsMutated(t *testing.T) {
 		}
 		}
 		if changes[i].Path == expectedChanges[i].Path {
 		if changes[i].Path == expectedChanges[i].Path {
 			if changes[i] != expectedChanges[i] {
 			if changes[i] != expectedChanges[i] {
-				t.Fatalf("Wrong change for %s, expected %s, got %d\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
+				t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
 			}
 			}
 		} else if changes[i].Path < expectedChanges[i].Path {
 		} else if changes[i].Path < expectedChanges[i].Path {
 			t.Fatalf("unexpected change %s\n", changes[i].String())
 			t.Fatalf("unexpected change %s\n", changes[i].String())
@@ -261,45 +261,45 @@ func TestApplyLayer(t *testing.T) {
 	t.Skip("Skipping TestApplyLayer due to known failures") // Disable this for now as it is broken
 	t.Skip("Skipping TestApplyLayer due to known failures") // Disable this for now as it is broken
 	return
 	return
 
 
-	src, err := ioutil.TempDir("", "docker-changes-test")
-	if err != nil {
-		t.Fatal(err)
-	}
-	createSampleDir(t, src)
-	dst := src + "-copy"
-	if err := copyDir(src, dst); err != nil {
-		t.Fatal(err)
-	}
-	mutateSampleDir(t, dst)
-
-	changes, err := ChangesDirs(dst, src)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	layer, err := ExportChanges(dst, changes)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	layerCopy, err := NewTempArchive(layer, "")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if err := ApplyLayer(src, layerCopy); err != nil {
-		t.Fatal(err)
-	}
-
-	changes2, err := ChangesDirs(src, dst)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if len(changes2) != 0 {
-		t.Fatalf("Unexpected differences after re applying mutation: %v", changes)
-	}
-
-	os.RemoveAll(src)
-	os.RemoveAll(dst)
+	// src, err := ioutil.TempDir("", "docker-changes-test")
+	// if err != nil {
+	// 	t.Fatal(err)
+	// }
+	// createSampleDir(t, src)
+	// dst := src + "-copy"
+	// if err := copyDir(src, dst); err != nil {
+	// 	t.Fatal(err)
+	// }
+	// mutateSampleDir(t, dst)
+
+	// changes, err := ChangesDirs(dst, src)
+	// if err != nil {
+	// 	t.Fatal(err)
+	// }
+
+	// layer, err := ExportChanges(dst, changes)
+	// if err != nil {
+	// 	t.Fatal(err)
+	// }
+
+	// layerCopy, err := NewTempArchive(layer, "")
+	// if err != nil {
+	// 	t.Fatal(err)
+	// }
+
+	// if err := ApplyLayer(src, layerCopy); err != nil {
+	// 	t.Fatal(err)
+	// }
+
+	// changes2, err := ChangesDirs(src, dst)
+	// if err != nil {
+	// 	t.Fatal(err)
+	// }
+
+	// if len(changes2) != 0 {
+	// 	t.Fatalf("Unexpected differences after re applying mutation: %v", changes)
+	// }
+
+	// os.RemoveAll(src)
+	// os.RemoveAll(dst)
 }
 }

+ 4 - 2
archive/diff.go

@@ -83,8 +83,10 @@ func ApplyLayer(dest string, layer Archive) error {
 	}
 	}
 
 
 	for k, v := range modifiedDirs {
 	for k, v := range modifiedDirs {
-		aTime := time.Unix(v.Atim.Unix())
-		mTime := time.Unix(v.Mtim.Unix())
+		lastAccess := getLastAccess(v)
+		lastModification := getLastModification(v)
+		aTime := time.Unix(lastAccess.Unix())
+		mTime := time.Unix(lastModification.Unix())
 
 
 		if err := os.Chtimes(k, aTime, mTime); err != nil {
 		if err := os.Chtimes(k, aTime, mTime); err != nil {
 			return err
 			return err

+ 11 - 0
archive/stat_darwin.go

@@ -0,0 +1,11 @@
+package archive
+
+import "syscall"
+
+func getLastAccess(stat *syscall.Stat_t) syscall.Timespec {
+	return stat.Atimespec
+}
+
+func getLastModification(stat *syscall.Stat_t) syscall.Timespec {
+	return stat.Mtimespec
+}

+ 11 - 0
archive/stat_linux.go

@@ -0,0 +1,11 @@
+package archive
+
+import "syscall"
+
+func getLastAccess(stat *syscall.Stat_t) syscall.Timespec {
+	return stat.Atim
+}
+
+func getLastModification(stat *syscall.Stat_t) syscall.Timespec {
+	return stat.Mtim
+}

+ 23 - 1
auth/auth.go

@@ -63,7 +63,7 @@ func decodeAuth(authStr string) (string, string, error) {
 	if n > decLen {
 	if n > decLen {
 		return "", "", fmt.Errorf("Something went wrong decoding auth config")
 		return "", "", fmt.Errorf("Something went wrong decoding auth config")
 	}
 	}
-	arr := strings.Split(string(decoded), ":")
+	arr := strings.SplitN(string(decoded), ":", 2)
 	if len(arr) != 2 {
 	if len(arr) != 2 {
 		return "", "", fmt.Errorf("Invalid auth configuration file")
 		return "", "", fmt.Errorf("Invalid auth configuration file")
 	}
 	}
@@ -223,6 +223,28 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
 		} else {
 		} else {
 			return "", fmt.Errorf("Registration: %s", reqBody)
 			return "", fmt.Errorf("Registration: %s", reqBody)
 		}
 		}
+	} else if reqStatusCode == 401 {
+		// This case would happen with private registries where /v1/users is
+		// protected, so people can use `docker login` as an auth check.
+		req, err := factory.NewRequest("GET", serverAddress+"users/", nil)
+		req.SetBasicAuth(authConfig.Username, authConfig.Password)
+		resp, err := client.Do(req)
+		if err != nil {
+			return "", err
+		}
+		defer resp.Body.Close()
+		body, err := ioutil.ReadAll(resp.Body)
+		if err != nil {
+			return "", err
+    	}
+		if resp.StatusCode == 200 {
+			status = "Login Succeeded"
+		} else if resp.StatusCode == 401 {
+			return "", fmt.Errorf("Wrong login/password, please try again")
+		} else {
+			return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body,
+				resp.StatusCode, resp.Header)
+		}
 	} else {
 	} else {
 		return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody)
 		return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody)
 	}
 	}

+ 59 - 19
buildfile.go

@@ -36,14 +36,19 @@ type buildFile struct {
 	tmpContainers map[string]struct{}
 	tmpContainers map[string]struct{}
 	tmpImages     map[string]struct{}
 	tmpImages     map[string]struct{}
 
 
-	out io.Writer
+	outStream io.Writer
+	errStream io.Writer
+
+	// Deprecated, original writer used for ImagePull. To be removed.
+	outOld io.Writer
+	sf     *utils.StreamFormatter
 }
 }
 
 
 func (b *buildFile) clearTmp(containers map[string]struct{}) {
 func (b *buildFile) clearTmp(containers map[string]struct{}) {
 	for c := range containers {
 	for c := range containers {
 		tmp := b.runtime.Get(c)
 		tmp := b.runtime.Get(c)
 		b.runtime.Destroy(tmp)
 		b.runtime.Destroy(tmp)
-		fmt.Fprintf(b.out, "Removing intermediate container %s\n", utils.TruncateID(c))
+		fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c))
 	}
 	}
 }
 }
 
 
@@ -52,7 +57,7 @@ func (b *buildFile) CmdFrom(name string) error {
 	if err != nil {
 	if err != nil {
 		if b.runtime.graph.IsNotExist(err) {
 		if b.runtime.graph.IsNotExist(err) {
 			remote, tag := utils.ParseRepositoryTag(name)
 			remote, tag := utils.ParseRepositoryTag(name)
-			if err := b.srv.ImagePull(remote, tag, b.out, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
+			if err := b.srv.ImagePull(remote, tag, b.outOld, b.sf, nil, nil, true); err != nil {
 				return err
 				return err
 			}
 			}
 			image, err = b.runtime.repositories.LookupImage(name)
 			image, err = b.runtime.repositories.LookupImage(name)
@@ -100,7 +105,7 @@ func (b *buildFile) CmdRun(args string) error {
 		if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
 		if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
 			return err
 			return err
 		} else if cache != nil {
 		} else if cache != nil {
-			fmt.Fprintf(b.out, " ---> Using cache\n")
+			fmt.Fprintf(b.outStream, " ---> Using cache\n")
 			utils.Debugf("[BUILDER] Use cached version")
 			utils.Debugf("[BUILDER] Use cached version")
 			b.image = cache.ID
 			b.image = cache.ID
 			return nil
 			return nil
@@ -241,7 +246,7 @@ func (b *buildFile) CmdVolume(args string) error {
 		volume = []string{args}
 		volume = []string{args}
 	}
 	}
 	if b.config.Volumes == nil {
 	if b.config.Volumes == nil {
-		b.config.Volumes = PathOpts{}
+		b.config.Volumes = map[string]struct{}{}
 	}
 	}
 	for _, v := range volume {
 	for _, v := range volume {
 		b.config.Volumes[v] = struct{}{}
 		b.config.Volumes[v] = struct{}{}
@@ -253,7 +258,7 @@ func (b *buildFile) CmdVolume(args string) error {
 }
 }
 
 
 func (b *buildFile) addRemote(container *Container, orig, dest string) error {
 func (b *buildFile) addRemote(container *Container, orig, dest string) error {
-	file, err := utils.Download(orig, ioutil.Discard)
+	file, err := utils.Download(orig)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -288,7 +293,7 @@ func (b *buildFile) addContext(container *Container, orig, dest string) error {
 		destPath = destPath + "/"
 		destPath = destPath + "/"
 	}
 	}
 	if !strings.HasPrefix(origPath, b.context) {
 	if !strings.HasPrefix(origPath, b.context) {
-		return fmt.Errorf("Forbidden path: %s", origPath)
+		return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
 	}
 	}
 	fi, err := os.Stat(origPath)
 	fi, err := os.Stat(origPath)
 	if err != nil {
 	if err != nil {
@@ -364,6 +369,34 @@ func (b *buildFile) CmdAdd(args string) error {
 	return nil
 	return nil
 }
 }
 
 
+type StdoutFormater struct {
+	io.Writer
+	*utils.StreamFormatter
+}
+
+func (sf *StdoutFormater) Write(buf []byte) (int, error) {
+	formattedBuf := sf.StreamFormatter.FormatStatus("", "%s", string(buf))
+	n, err := sf.Writer.Write(formattedBuf)
+	if n != len(formattedBuf) {
+		return n, io.ErrShortWrite
+	}
+	return len(buf), err
+}
+
+type StderrFormater struct {
+	io.Writer
+	*utils.StreamFormatter
+}
+
+func (sf *StderrFormater) Write(buf []byte) (int, error) {
+	formattedBuf := sf.StreamFormatter.FormatStatus("", "%s", "\033[91m"+string(buf)+"\033[0m")
+	n, err := sf.Writer.Write(formattedBuf)
+	if n != len(formattedBuf) {
+		return n, io.ErrShortWrite
+	}
+	return len(buf), err
+}
+
 func (b *buildFile) run() (string, error) {
 func (b *buildFile) run() (string, error) {
 	if b.image == "" {
 	if b.image == "" {
 		return "", fmt.Errorf("Please provide a source image with `from` prior to run")
 		return "", fmt.Errorf("Please provide a source image with `from` prior to run")
@@ -376,7 +409,7 @@ func (b *buildFile) run() (string, error) {
 		return "", err
 		return "", err
 	}
 	}
 	b.tmpContainers[c.ID] = struct{}{}
 	b.tmpContainers[c.ID] = struct{}{}
-	fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(c.ID))
+	fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
 
 
 	// override the entry point that may have been picked up from the base image
 	// override the entry point that may have been picked up from the base image
 	c.Path = b.config.Cmd[0]
 	c.Path = b.config.Cmd[0]
@@ -386,7 +419,7 @@ func (b *buildFile) run() (string, error) {
 
 
 	if b.verbose {
 	if b.verbose {
 		errCh = utils.Go(func() error {
 		errCh = utils.Go(func() error {
-			return <-c.Attach(nil, nil, b.out, b.out)
+			return <-c.Attach(nil, nil, b.outStream, b.errStream)
 		})
 		})
 	}
 	}
 
 
@@ -403,7 +436,11 @@ func (b *buildFile) run() (string, error) {
 
 
 	// Wait for it to finish
 	// Wait for it to finish
 	if ret := c.Wait(); ret != 0 {
 	if ret := c.Wait(); ret != 0 {
-		return "", fmt.Errorf("The command %v returned a non-zero code: %d", b.config.Cmd, ret)
+		err := &utils.JSONError{
+			Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
+			Code:    ret,
+		}
+		return "", err
 	}
 	}
 
 
 	return c.ID, nil
 	return c.ID, nil
@@ -424,7 +461,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 			if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
 			if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
 				return err
 				return err
 			} else if cache != nil {
 			} else if cache != nil {
-				fmt.Fprintf(b.out, " ---> Using cache\n")
+				fmt.Fprintf(b.outStream, " ---> Using cache\n")
 				utils.Debugf("[BUILDER] Use cached version")
 				utils.Debugf("[BUILDER] Use cached version")
 				b.image = cache.ID
 				b.image = cache.ID
 				return nil
 				return nil
@@ -438,10 +475,10 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 			return err
 			return err
 		}
 		}
 		for _, warning := range warnings {
 		for _, warning := range warnings {
-			fmt.Fprintf(b.out, " ---> [Warning] %s\n", warning)
+			fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning)
 		}
 		}
 		b.tmpContainers[container.ID] = struct{}{}
 		b.tmpContainers[container.ID] = struct{}{}
-		fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(container.ID))
+		fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
 		id = container.ID
 		id = container.ID
 		if err := container.EnsureMounted(); err != nil {
 		if err := container.EnsureMounted(); err != nil {
 			return err
 			return err
@@ -507,22 +544,22 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
 
 
 		method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
 		method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
 		if !exists {
 		if !exists {
-			fmt.Fprintf(b.out, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
+			fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
 			continue
 			continue
 		}
 		}
 
 
 		stepN += 1
 		stepN += 1
-		fmt.Fprintf(b.out, "Step %d : %s %s\n", stepN, strings.ToUpper(instruction), arguments)
+		fmt.Fprintf(b.outStream, "Step %d : %s %s\n", stepN, strings.ToUpper(instruction), arguments)
 
 
 		ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
 		ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
 		if ret != nil {
 		if ret != nil {
 			return "", ret.(error)
 			return "", ret.(error)
 		}
 		}
 
 
-		fmt.Fprintf(b.out, " ---> %v\n", utils.TruncateID(b.image))
+		fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image))
 	}
 	}
 	if b.image != "" {
 	if b.image != "" {
-		fmt.Fprintf(b.out, "Successfully built %s\n", utils.TruncateID(b.image))
+		fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image))
 		if b.rm {
 		if b.rm {
 			b.clearTmp(b.tmpContainers)
 			b.clearTmp(b.tmpContainers)
 		}
 		}
@@ -531,16 +568,19 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
 	return "", fmt.Errorf("An error occurred during the build\n")
 	return "", fmt.Errorf("An error occurred during the build\n")
 }
 }
 
 
-func NewBuildFile(srv *Server, out io.Writer, verbose, utilizeCache, rm bool) BuildFile {
+func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter) BuildFile {
 	return &buildFile{
 	return &buildFile{
 		runtime:       srv.runtime,
 		runtime:       srv.runtime,
 		srv:           srv,
 		srv:           srv,
 		config:        &Config{},
 		config:        &Config{},
-		out:           out,
+		outStream:     outStream,
+		errStream:     errStream,
 		tmpContainers: make(map[string]struct{}),
 		tmpContainers: make(map[string]struct{}),
 		tmpImages:     make(map[string]struct{}),
 		tmpImages:     make(map[string]struct{}),
 		verbose:       verbose,
 		verbose:       verbose,
 		utilizeCache:  utilizeCache,
 		utilizeCache:  utilizeCache,
 		rm:            rm,
 		rm:            rm,
+		sf:            sf,
+		outOld:        outOld,
 	}
 	}
 }
 }

+ 133 - 164
commands.go

@@ -23,7 +23,6 @@ import (
 	"os"
 	"os"
 	"os/signal"
 	"os/signal"
 	"path"
 	"path"
-	"path/filepath"
 	"reflect"
 	"reflect"
 	"regexp"
 	"regexp"
 	"runtime"
 	"runtime"
@@ -32,6 +31,7 @@ import (
 	"strings"
 	"strings"
 	"syscall"
 	"syscall"
 	"text/tabwriter"
 	"text/tabwriter"
+	"text/template"
 	"time"
 	"time"
 )
 )
 
 
@@ -195,6 +195,10 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 		if _, err := os.Stat(cmd.Arg(0)); err != nil {
 		if _, err := os.Stat(cmd.Arg(0)); err != nil {
 			return err
 			return err
 		}
 		}
+		filename := path.Join(cmd.Arg(0), "Dockerfile")
+		if _, err = os.Stat(filename); os.IsNotExist(err) {
+			return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0))
+		}
 		context, err = archive.Tar(cmd.Arg(0), archive.Uncompressed)
 		context, err = archive.Tar(cmd.Arg(0), archive.Uncompressed)
 	}
 	}
 	var body io.Reader
 	var body io.Reader
@@ -202,7 +206,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 	// FIXME: ProgressReader shouldn't be this annoying to use
 	// FIXME: ProgressReader shouldn't be this annoying to use
 	if context != nil {
 	if context != nil {
 		sf := utils.NewStreamFormatter(false)
 		sf := utils.NewStreamFormatter(false)
-		body = utils.ProgressReader(ioutil.NopCloser(context), 0, cli.err, sf.FormatProgress("", "Uploading context", "%v bytes%0.0s%0.0s"), sf, true)
+		body = utils.ProgressReader(ioutil.NopCloser(context), 0, cli.err, sf, true, "", "Uploading context")
 	}
 	}
 	// Upload the build context
 	// Upload the build context
 	v := &url.Values{}
 	v := &url.Values{}
@@ -220,42 +224,18 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 	if *rm {
 	if *rm {
 		v.Set("rm", "1")
 		v.Set("rm", "1")
 	}
 	}
-	req, err := http.NewRequest("POST", fmt.Sprintf("/v%g/build?%s", APIVERSION, v.Encode()), body)
-	if err != nil {
-		return err
-	}
+
+	headers := http.Header(make(map[string][]string))
 	if context != nil {
 	if context != nil {
-		req.Header.Set("Content-Type", "application/tar")
-	}
-	dial, err := net.Dial(cli.proto, cli.addr)
-	if err != nil {
-		return err
-	}
-	clientconn := httputil.NewClientConn(dial, nil)
-	resp, err := clientconn.Do(req)
-	defer clientconn.Close()
-	if err != nil {
-		return err
-	}
-	defer resp.Body.Close()
-	// Check for errors
-	if resp.StatusCode < 200 || resp.StatusCode >= 400 {
-		body, err := ioutil.ReadAll(resp.Body)
-		if err != nil {
-			return err
-		}
-		if len(body) == 0 {
-			return fmt.Errorf("Error: %s", http.StatusText(resp.StatusCode))
-		}
-		return fmt.Errorf("Error: %s", body)
+		headers.Set("Content-Type", "application/tar")
 	}
 	}
-
-	// Output the result
-	if _, err := io.Copy(cli.out, resp.Body); err != nil {
-		return err
+	// Temporary hack to fix displayJSON behavior
+	cli.isTerminal = false
+	err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers)
+	if jerr, ok := err.(*utils.JSONError); ok {
+		return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
 	}
 	}
-
-	return nil
+	return err
 }
 }
 
 
 // 'docker login': login / register a user to registry service.
 // 'docker login': login / register a user to registry service.
@@ -655,6 +635,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
 
 
 func (cli *DockerCli) CmdInspect(args ...string) error {
 func (cli *DockerCli) CmdInspect(args ...string) error {
 	cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image")
 	cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image")
+	tmplStr := cmd.String("format", "", "Format the output using the given go template.")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
 	}
 	}
@@ -663,10 +644,21 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
 		return nil
 		return nil
 	}
 	}
 
 
+	var tmpl *template.Template
+	if *tmplStr != "" {
+		var err error
+		if tmpl, err = template.New("").Parse(*tmplStr); err != nil {
+			fmt.Fprintf(cli.err, "Template parsing error: %v\n", err)
+			return &utils.StatusError{StatusCode: 64,
+				Status: "Template parsing error: " + err.Error()}
+		}
+	}
+
 	indented := new(bytes.Buffer)
 	indented := new(bytes.Buffer)
+	indented.WriteByte('[')
 	status := 0
 	status := 0
 
 
-	for _, name := range args {
+	for _, name := range cmd.Args() {
 		obj, _, err := cli.call("GET", "/containers/"+name+"/json", nil)
 		obj, _, err := cli.call("GET", "/containers/"+name+"/json", nil)
 		if err != nil {
 		if err != nil {
 			obj, _, err = cli.call("GET", "/images/"+name+"/json", nil)
 			obj, _, err = cli.call("GET", "/images/"+name+"/json", nil)
@@ -681,25 +673,42 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
 			}
 			}
 		}
 		}
 
 
-		if err = json.Indent(indented, obj, "", "    "); err != nil {
-			fmt.Fprintf(cli.err, "%s\n", err)
-			status = 1
-			continue
+		if tmpl == nil {
+			if err = json.Indent(indented, obj, "", "    "); err != nil {
+				fmt.Fprintf(cli.err, "%s\n", err)
+				status = 1
+				continue
+			}
+		} else {
+			// Has template, will render
+			var value interface{}
+			if err := json.Unmarshal(obj, &value); err != nil {
+				fmt.Fprintf(cli.err, "%s\n", err)
+				status = 1
+				continue
+			}
+			if err := tmpl.Execute(cli.out, value); err != nil {
+				return err
+			}
+			cli.out.Write([]byte{'\n'})
 		}
 		}
 		indented.WriteString(",")
 		indented.WriteString(",")
 	}
 	}
 
 
-	if indented.Len() > 0 {
+	if indented.Len() > 1 {
 		// Remove trailing ','
 		// Remove trailing ','
 		indented.Truncate(indented.Len() - 1)
 		indented.Truncate(indented.Len() - 1)
 	}
 	}
-	fmt.Fprintf(cli.out, "[")
-	if _, err := io.Copy(cli.out, indented); err != nil {
-		return err
+	indented.WriteByte(']')
+
+	if tmpl == nil {
+		if _, err := io.Copy(cli.out, indented); err != nil {
+			return err
+		}
 	}
 	}
-	fmt.Fprintf(cli.out, "]")
+
 	if status != 0 {
 	if status != 0 {
-		return &utils.StatusError{Status: status}
+		return &utils.StatusError{StatusCode: status}
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -763,16 +772,12 @@ func (cli *DockerCli) CmdPort(args ...string) error {
 		return err
 		return err
 	}
 	}
 
 
-	if frontends, exists := out.NetworkSettings.Ports[Port(port+"/"+proto)]; exists {
-		if frontends == nil {
-			fmt.Fprintf(cli.out, "%s\n", port)
-		} else {
-			for _, frontend := range frontends {
-				fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort)
-			}
+	if frontends, exists := out.NetworkSettings.Ports[Port(port+"/"+proto)]; exists && frontends != nil {
+		for _, frontend := range frontends {
+			fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort)
 		}
 		}
 	} else {
 	} else {
-		return fmt.Errorf("Error: No private port '%s' allocated on %s", cmd.Arg(1), cmd.Arg(0))
+		return fmt.Errorf("Error: No public port '%s' published for %s", cmd.Arg(1), cmd.Arg(0))
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -928,7 +933,7 @@ func (cli *DockerCli) CmdKill(args ...string) error {
 }
 }
 
 
 func (cli *DockerCli) CmdImport(args ...string) error {
 func (cli *DockerCli) CmdImport(args ...string) error {
-	cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create a new filesystem image from the contents of a tarball(.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz).")
+	cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.")
 
 
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
@@ -1195,7 +1200,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
 
 
 		w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
 		w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
 		if !*quiet {
 		if !*quiet {
-			fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tSIZE")
+			fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
 		}
 		}
 
 
 		for _, out := range outs {
 		for _, out := range outs {
@@ -1208,12 +1213,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
 				}
 				}
 
 
 				if !*quiet {
 				if !*quiet {
-					fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t", repo, tag, out.ID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.Created, 0))))
-					if out.VirtualSize > 0 {
-						fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.Size), utils.HumanSize(out.VirtualSize))
-					} else {
-						fmt.Fprintf(w, "%s\n", utils.HumanSize(out.Size))
-					}
+					fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, out.ID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.Created, 0))), utils.HumanSize(out.VirtualSize))
 				} else {
 				} else {
 					fmt.Fprintln(w, out.ID)
 					fmt.Fprintln(w, out.ID)
 				}
 				}
@@ -1501,6 +1501,7 @@ func (cli *DockerCli) CmdDiff(args ...string) error {
 
 
 func (cli *DockerCli) CmdLogs(args ...string) error {
 func (cli *DockerCli) CmdLogs(args ...string) error {
 	cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container")
 	cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container")
+	follow := cmd.Bool("f", false, "Follow log output")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return nil
 		return nil
 	}
 	}
@@ -1520,7 +1521,15 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
 		return err
 		return err
 	}
 	}
 
 
-	if err := cli.hijack("POST", "/containers/"+name+"/attach?logs=1&stdout=1&stderr=1", container.Config.Tty, nil, cli.out, cli.err, nil); err != nil {
+	v := url.Values{}
+	v.Set("logs", "1")
+	v.Set("stdout", "1")
+	v.Set("stderr", "1")
+	if *follow && container.State.Running {
+		v.Set("stream", "1")
+	}
+
+	if err := cli.hijack("POST", "/containers/"+name+"/attach?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
@@ -1578,6 +1587,15 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
 	if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil {
 	if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil {
 		return err
 		return err
 	}
 	}
+
+	_, status, err := getExitCode(cli, cmd.Arg(0))
+	if err != nil {
+		return err
+	}
+	if status != 0 {
+		return &utils.StatusError{StatusCode: status}
+	}
+
 	return nil
 	return nil
 }
 }
 
 
@@ -1635,54 +1653,6 @@ func (cli *DockerCli) CmdSearch(args ...string) error {
 // Ports type - Used to parse multiple -p flags
 // Ports type - Used to parse multiple -p flags
 type ports []int
 type ports []int
 
 
-// AttachOpts stores arguments to 'docker run -a', eg. which streams to attach to
-type AttachOpts map[string]bool
-
-func (opts AttachOpts) String() string { return fmt.Sprintf("%v", map[string]bool(opts)) }
-func (opts AttachOpts) Set(val string) error {
-	if val != "stdin" && val != "stdout" && val != "stderr" {
-		return fmt.Errorf("Unsupported stream name: %s", val)
-	}
-	opts[val] = true
-	return nil
-}
-
-// LinkOpts stores arguments to `docker run -link`
-type LinkOpts []string
-
-func (link *LinkOpts) String() string { return fmt.Sprintf("%v", []string(*link)) }
-func (link *LinkOpts) Set(val string) error {
-	if _, err := parseLink(val); err != nil {
-		return err
-	}
-	*link = append(*link, val)
-	return nil
-}
-
-// PathOpts stores a unique set of absolute paths
-type PathOpts map[string]struct{}
-
-func (opts PathOpts) String() string { return fmt.Sprintf("%v", map[string]struct{}(opts)) }
-func (opts PathOpts) Set(val string) error {
-	var containerPath string
-
-	splited := strings.SplitN(val, ":", 2)
-	if len(splited) == 1 {
-		containerPath = splited[0]
-		val = filepath.Clean(splited[0])
-	} else {
-		containerPath = splited[1]
-		val = fmt.Sprintf("%s:%s", splited[0], filepath.Clean(splited[1]))
-	}
-
-	if !filepath.IsAbs(containerPath) {
-		utils.Debugf("%s is not an absolute path", containerPath)
-		return fmt.Errorf("%s is not an absolute path", containerPath)
-	}
-	opts[val] = struct{}{}
-	return nil
-}
-
 func (cli *DockerCli) CmdTag(args ...string) error {
 func (cli *DockerCli) CmdTag(args ...string) error {
 	cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE REPOSITORY[:TAG]", "Tag an image into a repository")
 	cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE REPOSITORY[:TAG]", "Tag an image into a repository")
 	force := cmd.Bool("f", false, "Force")
 	force := cmd.Bool("f", false, "Force")
@@ -1728,16 +1698,16 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
 func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
 	var (
 	var (
 		// FIXME: use utils.ListOpts for attach and volumes?
 		// FIXME: use utils.ListOpts for attach and volumes?
-		flAttach  = AttachOpts{}
-		flVolumes = PathOpts{}
-		flLinks   = LinkOpts{}
+		flAttach  = NewListOpts(ValidateAttach)
+		flVolumes = NewListOpts(ValidatePath)
+		flLinks   = NewListOpts(ValidateLink)
+		flEnv     = NewListOpts(ValidateEnv)
 
 
-		flPublish     utils.ListOpts
-		flExpose      utils.ListOpts
-		flEnv         utils.ListOpts
-		flDns         utils.ListOpts
-		flVolumesFrom utils.ListOpts
-		flLxcOpts     utils.ListOpts
+		flPublish     ListOpts
+		flExpose      ListOpts
+		flDns         ListOpts
+		flVolumesFrom ListOpts
+		flLxcOpts     ListOpts
 
 
 		flAutoRemove      = cmd.Bool("rm", false, "Automatically remove the container when it exits (incompatible with -d)")
 		flAutoRemove      = cmd.Bool("rm", false, "Automatically remove the container when it exits (incompatible with -d)")
 		flDetach          = cmd.Bool("d", false, "Detached mode: Run container in the background, print new container id")
 		flDetach          = cmd.Bool("d", false, "Detached mode: Run container in the background, print new container id")
@@ -1759,13 +1729,13 @@ func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Co
 		_ = cmd.String("name", "", "Assign a name to the container")
 		_ = cmd.String("name", "", "Assign a name to the container")
 	)
 	)
 
 
-	cmd.Var(flAttach, "a", "Attach to stdin, stdout or stderr.")
-	cmd.Var(flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
+	cmd.Var(&flAttach, "a", "Attach to stdin, stdout or stderr.")
+	cmd.Var(&flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
 	cmd.Var(&flLinks, "link", "Add link to another container (name:alias)")
 	cmd.Var(&flLinks, "link", "Add link to another container (name:alias)")
+	cmd.Var(&flEnv, "e", "Set environment variables")
 
 
 	cmd.Var(&flPublish, "p", fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", PortSpecTemplateFormat))
 	cmd.Var(&flPublish, "p", fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", PortSpecTemplateFormat))
 	cmd.Var(&flExpose, "expose", "Expose a port from the container without publishing it to your host")
 	cmd.Var(&flExpose, "expose", "Expose a port from the container without publishing it to your host")
-	cmd.Var(&flEnv, "e", "Set environment variables")
 	cmd.Var(&flDns, "dns", "Set custom dns servers")
 	cmd.Var(&flDns, "dns", "Set custom dns servers")
 	cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container(s)")
 	cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container(s)")
 	cmd.Var(&flLxcOpts, "lxc-conf", "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
 	cmd.Var(&flLxcOpts, "lxc-conf", "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
@@ -1780,7 +1750,7 @@ func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Co
 	}
 	}
 
 
 	// Validate input params
 	// Validate input params
-	if *flDetach && len(flAttach) > 0 {
+	if *flDetach && flAttach.Len() > 0 {
 		return nil, nil, cmd, ErrConflictAttachDetach
 		return nil, nil, cmd, ErrConflictAttachDetach
 	}
 	}
 	if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
 	if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
@@ -1791,7 +1761,7 @@ func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Co
 	}
 	}
 
 
 	// If neither -d or -a are set, attach to everything by default
 	// If neither -d or -a are set, attach to everything by default
-	if len(flAttach) == 0 && !*flDetach {
+	if flAttach.Len() == 0 && !*flDetach {
 		if !*flDetach {
 		if !*flDetach {
 			flAttach.Set("stdout")
 			flAttach.Set("stdout")
 			flAttach.Set("stderr")
 			flAttach.Set("stderr")
@@ -1801,17 +1771,6 @@ func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Co
 		}
 		}
 	}
 	}
 
 
-	var envs []string
-	for _, env := range flEnv {
-		arr := strings.Split(env, "=")
-		if len(arr) > 1 {
-			envs = append(envs, env)
-		} else {
-			v := os.Getenv(env)
-			envs = append(envs, env+"="+v)
-		}
-	}
-
 	var flMemory int64
 	var flMemory int64
 	if *flMemoryString != "" {
 	if *flMemoryString != "" {
 		parsedMemory, err := utils.RAMInBytes(*flMemoryString)
 		parsedMemory, err := utils.RAMInBytes(*flMemoryString)
@@ -1823,16 +1782,15 @@ func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Co
 
 
 	var binds []string
 	var binds []string
 	// add any bind targets to the list of container volumes
 	// add any bind targets to the list of container volumes
-	for bind := range flVolumes {
-		arr := strings.Split(bind, ":")
-		if len(arr) > 1 {
+	for bind := range flVolumes.GetMap() {
+		if arr := strings.Split(bind, ":"); len(arr) > 1 {
 			if arr[0] == "/" {
 			if arr[0] == "/" {
 				return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'")
 				return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'")
 			}
 			}
 			dstDir := arr[1]
 			dstDir := arr[1]
-			flVolumes[dstDir] = struct{}{}
+			flVolumes.Set(dstDir)
 			binds = append(binds, bind)
 			binds = append(binds, bind)
-			delete(flVolumes, bind)
+			flVolumes.Delete(bind)
 		}
 		}
 	}
 	}
 
 
@@ -1867,13 +1825,13 @@ func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Co
 		domainname = parts[1]
 		domainname = parts[1]
 	}
 	}
 
 
-	ports, portBindings, err := parsePortSpecs(flPublish)
+	ports, portBindings, err := parsePortSpecs(flPublish.GetAll())
 	if err != nil {
 	if err != nil {
 		return nil, nil, cmd, err
 		return nil, nil, cmd, err
 	}
 	}
 
 
 	// Merge in exposed ports to the map of published ports
 	// Merge in exposed ports to the map of published ports
-	for _, e := range flExpose {
+	for _, e := range flExpose.GetAll() {
 		if strings.Contains(e, ":") {
 		if strings.Contains(e, ":") {
 			return nil, nil, cmd, fmt.Errorf("Invalid port format for -expose: %s", e)
 			return nil, nil, cmd, fmt.Errorf("Invalid port format for -expose: %s", e)
 		}
 		}
@@ -1894,15 +1852,15 @@ func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Co
 		OpenStdin:       *flStdin,
 		OpenStdin:       *flStdin,
 		Memory:          flMemory,
 		Memory:          flMemory,
 		CpuShares:       *flCpuShares,
 		CpuShares:       *flCpuShares,
-		AttachStdin:     flAttach["stdin"],
-		AttachStdout:    flAttach["stdout"],
-		AttachStderr:    flAttach["stderr"],
-		Env:             envs,
+		AttachStdin:     flAttach.Get("stdin"),
+		AttachStdout:    flAttach.Get("stdout"),
+		AttachStderr:    flAttach.Get("stderr"),
+		Env:             flEnv.GetAll(),
 		Cmd:             runCmd,
 		Cmd:             runCmd,
-		Dns:             flDns,
+		Dns:             flDns.GetAll(),
 		Image:           image,
 		Image:           image,
-		Volumes:         flVolumes,
-		VolumesFrom:     strings.Join(flVolumesFrom, ","),
+		Volumes:         flVolumes.GetMap(),
+		VolumesFrom:     strings.Join(flVolumesFrom.GetAll(), ","),
 		Entrypoint:      entrypoint,
 		Entrypoint:      entrypoint,
 		WorkingDir:      *flWorkingDir,
 		WorkingDir:      *flWorkingDir,
 	}
 	}
@@ -1913,7 +1871,7 @@ func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Co
 		LxcConf:         lxcConf,
 		LxcConf:         lxcConf,
 		Privileged:      *flPrivileged,
 		Privileged:      *flPrivileged,
 		PortBindings:    portBindings,
 		PortBindings:    portBindings,
-		Links:           flLinks,
+		Links:           flLinks.GetAll(),
 		PublishAllPorts: *flPublishAll,
 		PublishAllPorts: *flPublishAll,
 	}
 	}
 
 
@@ -2154,7 +2112,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		}
 		}
 	}
 	}
 	if status != 0 {
 	if status != 0 {
-		return &utils.StatusError{Status: status}
+		return &utils.StatusError{StatusCode: status}
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -2195,7 +2153,7 @@ func (cli *DockerCli) CmdCp(args ...string) error {
 }
 }
 
 
 func (cli *DockerCli) CmdSave(args ...string) error {
 func (cli *DockerCli) CmdSave(args ...string) error {
-	cmd := cli.Subcmd("save", "IMAGE DESTINATION", "Save an image to a tar archive")
+	cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout)")
 	if err := cmd.Parse(args); err != nil {
 	if err := cmd.Parse(args); err != nil {
 		return err
 		return err
 	}
 	}
@@ -2340,7 +2298,7 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, h
 	}
 	}
 
 
 	if matchesContentType(resp.Header.Get("Content-Type"), "application/json") {
 	if matchesContentType(resp.Header.Get("Content-Type"), "application/json") {
-		return utils.DisplayJSONMessagesStream(resp.Body, out, cli.isTerminal)
+		return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal)
 	}
 	}
 	if _, err := io.Copy(out, resp.Body); err != nil {
 	if _, err := io.Copy(out, resp.Body); err != nil {
 		return err
 		return err
@@ -2388,8 +2346,27 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
 
 
 	var receiveStdout chan error
 	var receiveStdout chan error
 
 
-	if stdout != nil {
+	var oldState *term.State
+
+	if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
+		oldState, err = term.SetRawTerminal(cli.terminalFd)
+		if err != nil {
+			return err
+		}
+		defer term.RestoreTerminal(cli.terminalFd, oldState)
+	}
+
+	if stdout != nil || stderr != nil {
 		receiveStdout = utils.Go(func() (err error) {
 		receiveStdout = utils.Go(func() (err error) {
+			defer func() {
+				if in != nil {
+					if setRawTerminal && cli.isTerminal {
+						term.RestoreTerminal(cli.terminalFd, oldState)
+					}
+					in.Close()
+				}
+			}()
+
 			// When TTY is ON, use regular copy
 			// When TTY is ON, use regular copy
 			if setRawTerminal {
 			if setRawTerminal {
 				_, err = io.Copy(stdout, br)
 				_, err = io.Copy(stdout, br)
@@ -2401,14 +2378,6 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
 		})
 		})
 	}
 	}
 
 
-	if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
-		oldState, err := term.SetRawTerminal(cli.terminalFd)
-		if err != nil {
-			return err
-		}
-		defer term.RestoreTerminal(cli.terminalFd, oldState)
-	}
-
 	sendStdin := utils.Go(func() error {
 	sendStdin := utils.Go(func() error {
 		if in != nil {
 		if in != nil {
 			io.Copy(rwc, in)
 			io.Copy(rwc, in)
@@ -2427,7 +2396,7 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
 		return nil
 		return nil
 	})
 	})
 
 
-	if stdout != nil {
+	if stdout != nil || stderr != nil {
 		if err := <-receiveStdout; err != nil {
 		if err := <-receiveStdout; err != nil {
 			utils.Errorf("Error receiveStdout: %s", err)
 			utils.Errorf("Error receiveStdout: %s", err)
 			return err
 			return err

+ 157 - 0
commands_unit_test.go

@@ -0,0 +1,157 @@
+package docker
+
+import (
+	"strings"
+	"testing"
+)
+
+func parse(t *testing.T, args string) (*Config, *HostConfig, error) {
+	config, hostConfig, _, err := ParseRun(strings.Split(args+" ubuntu bash", " "), nil)
+	return config, hostConfig, err
+}
+
+func mustParse(t *testing.T, args string) (*Config, *HostConfig) {
+	config, hostConfig, err := parse(t, args)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return config, hostConfig
+}
+
+func TestParseRunLinks(t *testing.T) {
+	if _, hostConfig := mustParse(t, "-link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" {
+		t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links)
+	}
+	if _, hostConfig := mustParse(t, "-link a:b -link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" {
+		t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links)
+	}
+	if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 {
+		t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links)
+	}
+
+	if _, _, err := parse(t, "-link a"); err == nil {
+		t.Fatalf("Error parsing links. `-link a` should be an error but is not")
+	}
+	if _, _, err := parse(t, "-link"); err == nil {
+		t.Fatalf("Error parsing links. `-link` should be an error but is not")
+	}
+}
+
+func TestParseRunAttach(t *testing.T) {
+	if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr {
+		t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
+	}
+	if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr {
+		t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
+	}
+	if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
+		t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
+	}
+	if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
+		t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
+	}
+
+	if _, _, err := parse(t, "-a"); err == nil {
+		t.Fatalf("Error parsing attach flags, `-a` should be an error but is not")
+	}
+	if _, _, err := parse(t, "-a invalid"); err == nil {
+		t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not")
+	}
+	if _, _, err := parse(t, "-a invalid -a stdout"); err == nil {
+		t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not")
+	}
+	if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil {
+		t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not")
+	}
+	if _, _, err := parse(t, "-a stdin -d"); err == nil {
+		t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not")
+	}
+	if _, _, err := parse(t, "-a stdout -d"); err == nil {
+		t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not")
+	}
+	if _, _, err := parse(t, "-a stderr -d"); err == nil {
+		t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not")
+	}
+	if _, _, err := parse(t, "-d -rm"); err == nil {
+		t.Fatalf("Error parsing attach flags, `-d -rm` should be an error but is not")
+	}
+}
+
+func TestParseRunVolumes(t *testing.T) {
+	if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil {
+		t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds)
+	} else if _, exists := config.Volumes["/tmp"]; !exists {
+		t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
+	}
+
+	if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil {
+		t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds)
+	} else if _, exists := config.Volumes["/tmp"]; !exists {
+		t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes)
+	} else if _, exists := config.Volumes["/var"]; !exists {
+		t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes)
+	}
+
+	if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
+		t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
+	} else if _, exists := config.Volumes["/containerTmp"]; !exists {
+		t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
+	}
+
+	if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" {
+		t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
+	} else if _, exists := config.Volumes["/containerTmp"]; !exists {
+		t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
+	} else if _, exists := config.Volumes["/containerVar"]; !exists {
+		t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
+	}
+
+	if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" {
+		t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
+	} else if _, exists := config.Volumes["/containerTmp"]; !exists {
+		t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
+	} else if _, exists := config.Volumes["/containerVar"]; !exists {
+		t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
+	}
+
+	if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
+		t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
+	} else if _, exists := config.Volumes["/containerTmp"]; !exists {
+		t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
+	} else if _, exists := config.Volumes["/containerVar"]; !exists {
+		t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
+	}
+
+	if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil {
+		t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds)
+	} else if len(config.Volumes) != 0 {
+		t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes)
+	}
+
+	mustParse(t, "-v /")
+
+	if _, _, err := parse(t, "-v /:/"); err == nil {
+		t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't")
+	}
+	if _, _, err := parse(t, "-v"); err == nil {
+		t.Fatalf("Error parsing volume flags, `-v` should fail but didn't")
+	}
+	if _, _, err := parse(t, "-v /tmp:"); err == nil {
+		t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't")
+	}
+	if _, _, err := parse(t, "-v /tmp:ro"); err == nil {
+		t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't")
+	}
+	if _, _, err := parse(t, "-v /tmp::"); err == nil {
+		t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't")
+	}
+	if _, _, err := parse(t, "-v :"); err == nil {
+		t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't")
+	}
+	if _, _, err := parse(t, "-v ::"); err == nil {
+		t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't")
+	}
+	if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil {
+		t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't")
+	}
+}

+ 2 - 2
config.go

@@ -27,8 +27,8 @@ func ConfigFromJob(job *engine.Job) *DaemonConfig {
 	config.Root = job.Getenv("Root")
 	config.Root = job.Getenv("Root")
 	config.AutoRestart = job.GetenvBool("AutoRestart")
 	config.AutoRestart = job.GetenvBool("AutoRestart")
 	config.EnableCors = job.GetenvBool("EnableCors")
 	config.EnableCors = job.GetenvBool("EnableCors")
-	if dns := job.Getenv("Dns"); dns != "" {
-		config.Dns = []string{dns}
+	if dns := job.GetenvList("Dns"); dns != nil {
+		config.Dns = dns
 	}
 	}
 	config.EnableIptables = job.GetenvBool("EnableIptables")
 	config.EnableIptables = job.GetenvBool("EnableIptables")
 	if br := job.Getenv("BridgeIface"); br != "" {
 	if br := job.Getenv("BridgeIface"); br != "" {

+ 217 - 148
container.go

@@ -24,6 +24,11 @@ import (
 	"time"
 	"time"
 )
 )
 
 
+var (
+	ErrNotATTY = errors.New("The PTY is not a file")
+	ErrNoTTY   = errors.New("No PTY found")
+)
+
 type Container struct {
 type Container struct {
 	sync.Mutex
 	sync.Mutex
 	root   string // Path to the "home" of the container, including metadata.
 	root   string // Path to the "home" of the container, including metadata.
@@ -536,162 +541,18 @@ func (container *Container) Start() (err error) {
 		log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
 		log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
 	}
 	}
 
 
-	// Create the requested bind mounts
-	binds := make(map[string]BindMap)
-	// Define illegal container destinations
-	illegalDsts := []string{"/", "."}
-
-	for _, bind := range container.hostConfig.Binds {
-		// FIXME: factorize bind parsing in parseBind
-		var src, dst, mode string
-		arr := strings.Split(bind, ":")
-		if len(arr) == 2 {
-			src = arr[0]
-			dst = arr[1]
-			mode = "rw"
-		} else if len(arr) == 3 {
-			src = arr[0]
-			dst = arr[1]
-			mode = arr[2]
-		} else {
-			return fmt.Errorf("Invalid bind specification: %s", bind)
-		}
-
-		// Bail if trying to mount to an illegal destination
-		for _, illegal := range illegalDsts {
-			if dst == illegal {
-				return fmt.Errorf("Illegal bind destination: %s", dst)
-			}
-		}
-
-		bindMap := BindMap{
-			SrcPath: src,
-			DstPath: dst,
-			Mode:    mode,
-		}
-		binds[path.Clean(dst)] = bindMap
-	}
-
 	if container.Volumes == nil || len(container.Volumes) == 0 {
 	if container.Volumes == nil || len(container.Volumes) == 0 {
 		container.Volumes = make(map[string]string)
 		container.Volumes = make(map[string]string)
 		container.VolumesRW = make(map[string]bool)
 		container.VolumesRW = make(map[string]bool)
 	}
 	}
 
 
 	// Apply volumes from another container if requested
 	// Apply volumes from another container if requested
-	if container.Config.VolumesFrom != "" {
-		containerSpecs := strings.Split(container.Config.VolumesFrom, ",")
-		for _, containerSpec := range containerSpecs {
-			mountRW := true
-			specParts := strings.SplitN(containerSpec, ":", 2)
-			switch len(specParts) {
-			case 0:
-				return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom)
-			case 2:
-				switch specParts[1] {
-				case "ro":
-					mountRW = false
-				case "rw": // mountRW is already true
-				default:
-					return fmt.Errorf("Malformed volumes-from speficication: %s", containerSpec)
-				}
-			}
-			c := container.runtime.Get(specParts[0])
-			if c == nil {
-				return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
-			}
-			for volPath, id := range c.Volumes {
-				if _, exists := container.Volumes[volPath]; exists {
-					continue
-				}
-				if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
-					return err
-				}
-				container.Volumes[volPath] = id
-				if isRW, exists := c.VolumesRW[volPath]; exists {
-					container.VolumesRW[volPath] = isRW && mountRW
-				}
-			}
-
-		}
+	if err := container.applyExternalVolumes(); err != nil {
+		return err
 	}
 	}
 
 
-	volumesDriver := container.runtime.volumes.driver
-	// Create the requested volumes if they don't exist
-	for volPath := range container.Config.Volumes {
-		volPath = path.Clean(volPath)
-		// Skip existing volumes
-		if _, exists := container.Volumes[volPath]; exists {
-			continue
-		}
-		var srcPath string
-		var isBindMount bool
-		srcRW := false
-		// If an external bind is defined for this volume, use that as a source
-		if bindMap, exists := binds[volPath]; exists {
-			isBindMount = true
-			srcPath = bindMap.SrcPath
-			if strings.ToLower(bindMap.Mode) == "rw" {
-				srcRW = true
-			}
-			// Otherwise create an directory in $ROOT/volumes/ and use that
-		} else {
-
-			// Do not pass a container as the parameter for the volume creation.
-			// The graph driver using the container's information ( Image ) to
-			// create the parent.
-			c, err := container.runtime.volumes.Create(nil, nil, "", "", nil)
-			if err != nil {
-				return err
-			}
-			srcPath, err = volumesDriver.Get(c.ID)
-			if err != nil {
-				return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err)
-			}
-			srcRW = true // RW by default
-		}
-		container.Volumes[volPath] = srcPath
-		container.VolumesRW[volPath] = srcRW
-		// Create the mountpoint
-		rootVolPath := path.Join(container.RootfsPath(), volPath)
-		if err := os.MkdirAll(rootVolPath, 0755); err != nil {
-			return err
-		}
-
-		// Do not copy or change permissions if we are mounting from the host
-		if srcRW && !isBindMount {
-			volList, err := ioutil.ReadDir(rootVolPath)
-			if err != nil {
-				return err
-			}
-			if len(volList) > 0 {
-				srcList, err := ioutil.ReadDir(srcPath)
-				if err != nil {
-					return err
-				}
-				if len(srcList) == 0 {
-					// If the source volume is empty copy files from the root into the volume
-					if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil {
-						return err
-					}
-
-					var stat syscall.Stat_t
-					if err := syscall.Stat(rootVolPath, &stat); err != nil {
-						return err
-					}
-					var srcStat syscall.Stat_t
-					if err := syscall.Stat(srcPath, &srcStat); err != nil {
-						return err
-					}
-					// Change the source volume's ownership if it differs from the root
-					// files that where just copied
-					if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid {
-						if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil {
-							return err
-						}
-					}
-				}
-			}
-		}
+	if err := container.createVolumes(); err != nil {
+		return err
 	}
 	}
 
 
 	if err := container.generateLXCConfig(); err != nil {
 	if err := container.generateLXCConfig(); err != nil {
@@ -877,6 +738,204 @@ func (container *Container) Start() (err error) {
 	return ErrContainerStart
 	return ErrContainerStart
 }
 }
 
 
+func (container *Container) getBindMap() (map[string]BindMap, error) {
+	// Create the requested bind mounts
+	binds := make(map[string]BindMap)
+	// Define illegal container destinations
+	illegalDsts := []string{"/", "."}
+
+	for _, bind := range container.hostConfig.Binds {
+		// FIXME: factorize bind parsing in parseBind
+		var src, dst, mode string
+		arr := strings.Split(bind, ":")
+		if len(arr) == 2 {
+			src = arr[0]
+			dst = arr[1]
+			mode = "rw"
+		} else if len(arr) == 3 {
+			src = arr[0]
+			dst = arr[1]
+			mode = arr[2]
+		} else {
+			return nil, fmt.Errorf("Invalid bind specification: %s", bind)
+		}
+
+		// Bail if trying to mount to an illegal destination
+		for _, illegal := range illegalDsts {
+			if dst == illegal {
+				return nil, fmt.Errorf("Illegal bind destination: %s", dst)
+			}
+		}
+
+		bindMap := BindMap{
+			SrcPath: src,
+			DstPath: dst,
+			Mode:    mode,
+		}
+		binds[path.Clean(dst)] = bindMap
+	}
+  return binds, nil
+}
+
+func (container *Container) createVolumes() error {
+  binds, err := container.getBindMap()
+  if err != nil {
+    return err
+  }
+	volumesDriver := container.runtime.volumes.driver
+	// Create the requested volumes if they don't exist
+	for volPath := range container.Config.Volumes {
+		volPath = path.Clean(volPath)
+		volIsDir := true
+		// Skip existing volumes
+		if _, exists := container.Volumes[volPath]; exists {
+			continue
+		}
+		var srcPath string
+		var isBindMount bool
+		srcRW := false
+		// If an external bind is defined for this volume, use that as a source
+		if bindMap, exists := binds[volPath]; exists {
+			isBindMount = true
+			srcPath = bindMap.SrcPath
+			if strings.ToLower(bindMap.Mode) == "rw" {
+				srcRW = true
+			}
+			if file, err := os.Open(bindMap.SrcPath); err != nil {
+				return err
+			} else {
+				defer file.Close()
+				if stat, err := file.Stat(); err != nil {
+					return err
+				} else {
+					volIsDir = stat.IsDir()
+				}
+			}
+			// Otherwise create an directory in $ROOT/volumes/ and use that
+		} else {
+
+			// Do not pass a container as the parameter for the volume creation.
+			// The graph driver using the container's information ( Image ) to
+			// create the parent.
+			c, err := container.runtime.volumes.Create(nil, nil, "", "", nil)
+			if err != nil {
+				return err
+			}
+			srcPath, err = volumesDriver.Get(c.ID)
+			if err != nil {
+				return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err)
+			}
+			srcRW = true // RW by default
+		}
+		container.Volumes[volPath] = srcPath
+		container.VolumesRW[volPath] = srcRW
+		// Create the mountpoint
+		rootVolPath := path.Join(container.RootfsPath(), volPath)
+		if volIsDir {
+			if err := os.MkdirAll(rootVolPath, 0755); err != nil {
+				return err
+			}
+		}
+
+		volPath = path.Join(container.RootfsPath(), volPath)
+		if _, err := os.Stat(volPath); err != nil {
+			if os.IsNotExist(err) {
+				if volIsDir {
+					if err := os.MkdirAll(volPath, 0755); err != nil {
+						return err
+					}
+				} else {
+					if err := os.MkdirAll(path.Dir(volPath), 0755); err != nil {
+						return err
+					}
+					if f, err := os.OpenFile(volPath, os.O_CREATE, 0755); err != nil {
+						return err
+					} else {
+						f.Close()
+					}
+				}
+			}
+		}
+
+		// Do not copy or change permissions if we are mounting from the host
+		if srcRW && !isBindMount {
+			volList, err := ioutil.ReadDir(rootVolPath)
+			if err != nil {
+				return err
+			}
+			if len(volList) > 0 {
+				srcList, err := ioutil.ReadDir(srcPath)
+				if err != nil {
+					return err
+				}
+				if len(srcList) == 0 {
+					// If the source volume is empty copy files from the root into the volume
+					if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil {
+						return err
+					}
+
+					var stat syscall.Stat_t
+					if err := syscall.Stat(rootVolPath, &stat); err != nil {
+						return err
+					}
+					var srcStat syscall.Stat_t
+					if err := syscall.Stat(srcPath, &srcStat); err != nil {
+						return err
+					}
+					// Change the source volume's ownership if it differs from the root
+					// files that where just copied
+					if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid {
+						if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil {
+							return err
+						}
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (container *Container) applyExternalVolumes() error {
+	if container.Config.VolumesFrom != "" {
+		containerSpecs := strings.Split(container.Config.VolumesFrom, ",")
+		for _, containerSpec := range containerSpecs {
+			mountRW := true
+			specParts := strings.SplitN(containerSpec, ":", 2)
+			switch len(specParts) {
+			case 0:
+				return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom)
+			case 2:
+				switch specParts[1] {
+				case "ro":
+					mountRW = false
+				case "rw": // mountRW is already true
+				default:
+					return fmt.Errorf("Malformed volumes-from speficication: %s", containerSpec)
+				}
+			}
+			c := container.runtime.Get(specParts[0])
+			if c == nil {
+				return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
+			}
+			for volPath, id := range c.Volumes {
+				if _, exists := container.Volumes[volPath]; exists {
+					continue
+				}
+				if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
+					return err
+				}
+				container.Volumes[volPath] = id
+				if isRW, exists := c.VolumesRW[volPath]; exists {
+					container.VolumesRW[volPath] = isRW && mountRW
+				}
+			}
+
+		}
+	}
+	return nil
+}
+
 func (container *Container) Run() error {
 func (container *Container) Run() error {
 	if err := container.Start(); err != nil {
 	if err := container.Start(); err != nil {
 		return err
 		return err
@@ -1405,3 +1464,13 @@ func (container *Container) Exposes(p Port) bool {
 	_, exists := container.Config.ExposedPorts[p]
 	_, exists := container.Config.ExposedPorts[p]
 	return exists
 	return exists
 }
 }
+
+func (container *Container) GetPtyMaster() (*os.File, error) {
+	if container.ptyMaster == nil {
+		return nil, ErrNoTTY
+	}
+	if pty, ok := container.ptyMaster.(*os.File); ok {
+		return pty, nil
+	}
+	return nil, ErrNotATTY
+}

+ 1 - 3
contrib/init/systemd/docker.service

@@ -1,11 +1,9 @@
 [Unit]
 [Unit]
 Description=Docker Application Container Engine 
 Description=Docker Application Container Engine 
 Documentation=http://docs.docker.io
 Documentation=http://docs.docker.io
-Requires=network.target
-After=multi-user.target
+After=network.target
 
 
 [Service]
 [Service]
-Type=simple
 ExecStartPre=/bin/mount --make-rprivate /
 ExecStartPre=/bin/mount --make-rprivate /
 ExecStart=/usr/bin/docker -d
 ExecStart=/usr/bin/docker -d
 
 

+ 0 - 15
contrib/mkimage-centos.sh

@@ -1,15 +0,0 @@
-#!/bin/bash
-# Create a CentOS base image for Docker
-# From unclejack https://github.com/dotcloud/docker/issues/290
-set -e
-
-MIRROR_URL="http://centos.netnitco.net/6.4/os/x86_64/"
-MIRROR_URL_UPDATES="http://centos.netnitco.net/6.4/updates/x86_64/"
-
-yum install -y febootstrap xz
-
-febootstrap -i bash -i coreutils -i tar -i bzip2 -i gzip -i vim-minimal -i wget -i patch -i diffutils -i iproute -i yum centos centos64  $MIRROR_URL -u $MIRROR_URL_UPDATES
-touch centos64/etc/resolv.conf
-touch centos64/sbin/init
-
-tar --numeric-owner -Jcpf centos-64.tar.xz -C centos64 .

+ 112 - 0
contrib/mkimage-rinse.sh

@@ -0,0 +1,112 @@
+#!/bin/bash
+set -e
+
+repo="$1"
+distro="$2"
+mirror="$3"
+
+if [ ! "$repo" ] || [ ! "$distro" ]; then
+	self="$(basename $0)"
+	echo >&2 "usage: $self repo distro [mirror]"
+	echo >&2
+	echo >&2 "   ie: $self username/centos centos-5"
+	echo >&2 "       $self username/centos centos-6"
+	echo >&2
+	echo >&2 "   ie: $self username/slc slc-5"
+	echo >&2 "       $self username/slc slc-6"
+	echo >&2
+	echo >&2 "   ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/"
+	echo >&2 "       $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/"
+	echo >&2
+	echo >&2 'See /etc/rinse for supported values of "distro" and for examples of'
+	echo >&2 '  expected values of "mirror".'
+	echo >&2
+	echo >&2 'This script is tested to work with the original upstream version of rinse,'
+	echo >&2 '  found at http://www.steve.org.uk/Software/rinse/ and also in Debian at'
+	echo >&2 '  http://packages.debian.org/wheezy/rinse -- as always, YMMV.'
+	echo >&2
+	exit 1
+fi
+
+target="/tmp/docker-rootfs-rinse-$distro-$$-$RANDOM"
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+returnTo="$(pwd -P)"
+
+rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" )
+if [ "$mirror" ]; then
+	rinseArgs+=( --mirror "$mirror" )
+fi
+
+set -x
+
+mkdir -p "$target"
+
+sudo rinse "${rinseArgs[@]}"
+
+cd "$target"
+
+# rinse fails a little at setting up /dev, so we'll just wipe it out and create our own
+sudo rm -rf dev
+sudo mkdir -m 755 dev
+(
+	cd dev
+	sudo ln -sf /proc/self/fd ./
+	sudo mkdir -m 755 pts
+	sudo mkdir -m 1777 shm
+	sudo mknod -m 600 console c 5 1
+	sudo mknod -m 600 initctl p
+	sudo mknod -m 666 full c 1 7
+	sudo mknod -m 666 null c 1 3
+	sudo mknod -m 666 ptmx c 5 2
+	sudo mknod -m 666 random c 1 8
+	sudo mknod -m 666 tty c 5 0
+	sudo mknod -m 666 tty0 c 4 0
+	sudo mknod -m 666 urandom c 1 9
+	sudo mknod -m 666 zero c 1 5
+)
+
+# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target"
+#  locales
+sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive}
+#  docs
+sudo rm -rf usr/share/{man,doc,info,gnome/help}
+#  cracklib
+sudo rm -rf usr/share/cracklib
+#  i18n
+sudo rm -rf usr/share/i18n
+#  yum cache
+sudo rm -rf var/cache/yum
+sudo mkdir -p --mode=0755 var/cache/yum
+#  sln
+sudo rm -rf sbin/sln
+#  ldconfig
+#sudo rm -rf sbin/ldconfig
+sudo rm -rf etc/ld.so.cache var/cache/ldconfig
+sudo mkdir -p --mode=0755 var/cache/ldconfig
+
+# allow networking init scripts inside the container to work without extra steps
+echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null
+
+# to restore locales later:
+#  yum reinstall glibc-common
+
+version=
+if [ -r etc/redhat-release ]; then
+	version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)"
+elif [ -r etc/SuSE-release ]; then
+	version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)"
+fi
+
+if [ -z "$version" ]; then
+	echo >&2 "warning: cannot autodetect OS version, using $distro as tag"
+	sleep 20
+	version="$distro"
+fi
+
+sudo tar --numeric-owner -c . | docker import - $repo:$version
+
+docker run -i -t $repo:$version echo success
+
+cd "$returnTo"
+sudo rm -rf "$target"

+ 77 - 0
contrib/mkseccomp.pl

@@ -0,0 +1,77 @@
+#!/usr/bin/perl
+#
+# A simple helper script to help people build seccomp profiles for
+# Docker/LXC.  The goal is mostly to reduce the attack surface to the
+# kernel, by restricting access to rarely used, recently added or not used
+# syscalls.
+#
+# This script processes one or more files which contain the list of system
+# calls to be allowed.  See mkseccomp.sample for more information how you
+# can configure the list of syscalls.  When run, this script produces output
+# which, when stored in a file, can be passed to docker as follows:
+#
+# docker run -lxc-conf="lxc.seccomp=$file" <rest of arguments>
+#
+# The included sample file shows how to cut about a quarter of all syscalls,
+# which affecting most applications.
+#
+# For specific situations it is possible to reduce the list further. By
+# reducing the list to just those syscalls required by a certain application
+# you can make it difficult for unknown/unexpected code to run.
+#
+# Run this script as follows:
+#
+# ./mkseccomp.pl < mkseccomp.sample >syscalls.list
+# or
+# ./mkseccomp.pl mkseccomp.sample >syscalls.list
+#
+# Multiple files can be specified, in which case the lists of syscalls are
+# combined.
+#
+# By Martijn van Oosterhout <kleptog@svana.org> Nov 2013
+
+# How it works:
+#
+# This program basically spawns two processes to form a chain like:
+#
+# <process data section to prefix __NR_> | cpp | <add header and filter unknown syscalls>
+
+use strict;
+use warnings;
+
+if( -t ) {
+    print STDERR "Helper script to make seccomp filters for Docker/LXC.\n";
+    print STDERR "Usage: mkseccomp.pl [files...]\n";
+    exit 1;
+}
+
+my $pid = open(my $in, "-|") // die "Couldn't fork1 ($!)\n";
+
+if($pid == 0) {  # Child
+    $pid = open(my $out, "|-") // die "Couldn't fork2 ($!)\n";
+
+    if($pid == 0) { # Child, which execs cpp
+        exec "cpp" or die "Couldn't exec cpp ($!)\n";
+        exit 1;
+    }
+
+    # Process the DATA section and output to cpp
+    print $out "#include <sys/syscall.h>\n";
+    while(<>) {
+        if(/^\w/) {
+            print $out "__NR_$_";
+        }
+    }
+    close $out;
+    exit 0;
+
+}
+
+# Print header and then process output from cpp.
+print "1\n";
+print "whitelist\n";
+
+while(<$in>) {
+    print if( /^[0-9]/ );
+}
+

+ 444 - 0
contrib/mkseccomp.sample

@@ -0,0 +1,444 @@
+/* This sample file is an example for mkseccomp.pl to produce a seccomp file
+ * which restricts syscalls that are only useful for an admin but allows the
+ * vast majority of normal userspace programs to run normally.
+ *
+ * The format of this file is one line per syscall.  This is then processed
+ * and passed to 'cpp' to convert the names to numbers using whatever is
+ * correct for your platform.  As such C-style comments are permitted.  Note
+ * this also means that C preprocessor macros are also allowed.  So it is
+ * possible to create groups surrounded by #ifdef/#endif and control their
+ * inclusion via #define (not #include).
+ *
+ * Syscalls that don't exist on your architecture are silently filtered out.
+ * Syscalls marked with (*) are required for a container to spawn a bash
+ * shell successfully (not necessarily full featured).  Listing the same
+ * syscall multiple times is no problem.
+ *
+ * If you want to make a list specifically for one application the easiest
+ * way is to run the application under strace, like so:
+ *
+ * $ strace -f -q -c -o strace.out application args...
+ *
+ * Once you have a reasonable sample of the execution of the program, exit
+ * it.  The file strace.out will have a summary of the syscalls used.  Copy
+ * that list into this file, comment out everything else except the starred
+ * syscalls (which you need for the container to start) and you're done.
+ *
+ * To get the list of syscalls from the strace output this works well for
+ * me
+ *
+ * $ cut -c52 < strace.out
+ *
+ * This sample list was compiled as a combination of all the syscalls
+ * available on i386 and amd64 on Ubuntu Precise, as such it may not contain
+ * everything and not everything may be relevent for your system.  This
+ * shouldn't be a problem.
+ */
+
+// Filesystem/File descriptor related
+access                 // (*)
+chdir                  // (*)
+chmod
+chown
+chown32
+close                  // (*)
+creat
+dup                    // (*)
+dup2                   // (*)
+dup3
+epoll_create
+epoll_create1
+epoll_ctl
+epoll_ctl_old
+epoll_pwait
+epoll_wait
+epoll_wait_old
+eventfd
+eventfd2
+faccessat              // (*)
+fadvise64
+fadvise64_64
+fallocate
+fanotify_init
+fanotify_mark
+ioctl                  // (*)
+fchdir
+fchmod
+fchmodat
+fchown
+fchown32
+fchownat
+fcntl                  // (*)
+fcntl64
+fdatasync
+fgetxattr
+flistxattr
+flock
+fremovexattr
+fsetxattr
+fstat                  // (*)
+fstat64
+fstatat64
+fstatfs
+fstatfs64
+fsync
+ftruncate
+ftruncate64
+getcwd                 // (*)
+getdents               // (*)
+getdents64
+getxattr
+inotify_add_watch
+inotify_init
+inotify_init1
+inotify_rm_watch
+io_cancel
+io_destroy
+io_getevents
+io_setup
+io_submit
+lchown
+lchown32
+lgetxattr
+link
+linkat
+listxattr
+llistxattr
+llseek
+_llseek
+lremovexattr
+lseek                  // (*)
+lsetxattr
+lstat
+lstat64
+mkdir
+mkdirat
+mknod
+mknodat
+newfstatat
+_newselect
+oldfstat
+oldlstat
+oldolduname
+oldstat
+olduname
+oldwait4
+open                   // (*)
+openat                 // (*)
+pipe                   // (*)
+pipe2
+poll
+ppoll
+pread64
+preadv
+futimesat
+pselect6
+pwrite64
+pwritev
+read                   // (*)
+readahead
+readdir
+readlink
+readlinkat
+readv
+removexattr
+rename
+renameat
+rmdir
+select
+sendfile
+sendfile64
+setxattr
+splice
+stat                   // (*)
+stat64
+statfs                 // (*)
+statfs64
+symlink
+symlinkat
+sync
+sync_file_range
+sync_file_range2
+syncfs
+tee
+truncate
+truncate64
+umask
+unlink
+unlinkat
+ustat
+utime
+utimensat
+utimes
+write                  // (*)
+writev
+
+// Network related
+accept
+accept4
+bind                   // (*)
+connect                // (*)
+getpeername
+getsockname            // (*)
+getsockopt
+listen
+recv
+recvfrom               // (*)
+recvmmsg
+recvmsg
+send
+sendmmsg
+sendmsg
+sendto                 // (*)
+setsockopt
+shutdown
+socket                 // (*)
+socketcall
+socketpair
+
+// Signal related
+pause
+rt_sigaction           // (*)
+rt_sigpending
+rt_sigprocmask         // (*)
+rt_sigqueueinfo
+rt_sigreturn           // (*)
+rt_sigsuspend
+rt_sigtimedwait
+rt_tgsigqueueinfo
+sigaction
+sigaltstack            // (*)
+signal
+signalfd
+signalfd4
+sigpending
+sigprocmask
+sigreturn
+sigsuspend
+
+// Other needed POSIX
+alarm
+brk                    // (*)
+clock_adjtime
+clock_getres
+clock_gettime
+clock_nanosleep
+//clock_settime
+gettimeofday
+nanosleep
+nice
+sysinfo
+syslog
+time
+timer_create
+timer_delete
+timerfd_create
+timerfd_gettime
+timerfd_settime
+timer_getoverrun
+timer_gettime
+timer_settime
+times
+uname                  // (*)
+
+// Memory control
+madvise
+mbind
+mincore
+mlock
+mlockall
+mmap                   // (*)
+mmap2
+mprotect               // (*)
+mremap
+msync
+munlock
+munlockall
+munmap                 // (*)
+remap_file_pages
+set_mempolicy
+vmsplice
+
+// Process control
+capget
+//capset
+clone                  // (*)
+execve                 // (*)
+exit                   // (*)
+exit_group             // (*)
+fork
+getcpu
+getpgid
+getpgrp                // (*)
+getpid                 // (*)
+getppid                // (*)
+getpriority
+getresgid
+getresgid32
+getresuid
+getresuid32
+getrlimit              // (*)
+getrusage
+getsid
+getuid                 // (*)
+getuid32
+getegid                // (*)
+getegid32
+geteuid                // (*)
+geteuid32
+getgid                 // (*)
+getgid32
+getgroups
+getgroups32
+getitimer
+get_mempolicy
+kill
+//personality
+prctl
+prlimit64
+sched_getaffinity
+sched_getparam
+sched_get_priority_max
+sched_get_priority_min
+sched_getscheduler
+sched_rr_get_interval
+//sched_setaffinity
+//sched_setparam
+//sched_setscheduler
+sched_yield
+setfsgid
+setfsgid32
+setfsuid
+setfsuid32
+setgid
+setgid32
+setgroups
+setgroups32
+setitimer
+setpgid                // (*)
+setpriority
+setregid
+setregid32
+setresgid
+setresgid32
+setresuid
+setresuid32
+setreuid
+setreuid32
+setrlimit
+setsid
+setuid
+setuid32
+ugetrlimit
+vfork
+wait4                  // (*)
+waitid
+waitpid
+
+// IPC
+ipc
+mq_getsetattr
+mq_notify
+mq_open
+mq_timedreceive
+mq_timedsend
+mq_unlink
+msgctl
+msgget
+msgrcv
+msgsnd
+semctl
+semget
+semop
+semtimedop
+shmat
+shmctl
+shmdt
+shmget
+
+// Linux specific, mostly needed for thread-related stuff
+arch_prctl             // (*)
+get_robust_list
+get_thread_area
+gettid
+futex                  // (*)
+restart_syscall        // (*)
+set_robust_list        // (*)
+set_thread_area
+set_tid_address        // (*)
+tgkill
+tkill
+
+// Admin syscalls, these are blocked
+//acct
+//adjtimex
+//bdflush
+//chroot
+//create_module
+//delete_module
+//get_kernel_syms      // Obsolete
+//idle                 // Obsolete
+//init_module
+//ioperm
+//iopl
+//ioprio_get
+//ioprio_set
+//kexec_load
+//lookup_dcookie       // oprofile only?
+//migrate_pages        // NUMA
+//modify_ldt
+//mount
+//move_pages           // NUMA
+//name_to_handle_at    // NFS server
+//nfsservctl           // NFS server
+//open_by_handle_at    // NFS server
+//perf_event_open
+//pivot_root
+//process_vm_readv     // For debugger
+//process_vm_writev    // For debugger
+//ptrace               // For debugger
+//query_module
+//quotactl
+//reboot
+//setdomainname
+//sethostname
+//setns
+//settimeofday
+//sgetmask             // Obsolete
+//ssetmask             // Obsolete
+//stime
+//swapoff
+//swapon
+//_sysctl
+//sysfs
+//sys_setaltroot
+//umount
+//umount2
+//unshare
+//uselib
+//vhangup
+//vm86
+//vm86old
+
+// Kernel key management
+//add_key
+//keyctl
+//request_key
+
+// Unimplemented
+//afs_syscall
+//break
+//ftime
+//getpmsg
+//gtty
+//lock
+//madvise1
+//mpx
+//prof
+//profil
+//putpmsg
+//security
+//stty
+//tuxcall
+//ulimit
+//vserver

+ 3 - 0
contrib/udev/80-docker.rules

@@ -0,0 +1,3 @@
+# hide docker's loopback devices from udisks, and thus from user desktops
+SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1"
+SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1"

+ 31 - 0
contrib/vagrant-docker/README.md

@@ -17,3 +17,34 @@ meaning you can use Vagrant to control Docker containers.
 
 
 * [docker-provider](https://github.com/fgrehm/docker-provider)
 * [docker-provider](https://github.com/fgrehm/docker-provider)
 * [vagrant-shell](https://github.com/destructuring/vagrant-shell)
 * [vagrant-shell](https://github.com/destructuring/vagrant-shell)
+
+## Setting up Vagrant-docker with the Remote API
+
+The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this:
+
+```
+description     "Docker daemon"
+
+start on filesystem and started lxc-net
+stop on runlevel [!2345]
+
+respawn
+
+script
+    /usr/bin/docker -d -H=tcp://0.0.0.0:4243/
+end script
+```
+
+Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal:
+
+```
+ssh -L 4243:localhost:4243 -p 2222 vagrant@localhost
+```
+
+(The first 4243 is what your host can connect to, the second 4243 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.)
+
+Note that because the port has been changed, to run docker commands from within the command line you must run them like this:
+
+```
+sudo docker -H 0.0.0.0:4243 < commands for docker >
+```

+ 1 - 0
contrib/zfs/MAINTAINERS

@@ -0,0 +1 @@
+Gurjeet Singh <gurjeet@singh.im> (gurjeet.singh.im)

+ 22 - 0
contrib/zfs/README.md

@@ -0,0 +1,22 @@
+# ZFS Storage Driver
+
+This is a placeholder to declare the presence and status of ZFS storage driver
+for containers.
+
+The current development is done in Gurjeet Singh's fork of Docker, under the
+branch named [zfs_driver].
+
+[zfs_driver]: https://github.com/gurjeet/docker/tree/zfs_driver
+
+
+# Status
+
+Pre-alpha
+
+The code is under development. Contributions in the form of suggestions,
+code-reviews, and patches are welcome.
+
+Please send the communication to gurjeet@singh.im and CC at least one Docker
+mailing list.
+
+

+ 29 - 30
docker/docker.go

@@ -23,22 +23,25 @@ func main() {
 		sysinit.SysInit()
 		sysinit.SysInit()
 		return
 		return
 	}
 	}
-	// FIXME: Switch d and D ? (to be more sshd like)
-	flVersion := flag.Bool("v", false, "Print version information and quit")
-	flDaemon := flag.Bool("d", false, "Enable daemon mode")
-	flDebug := flag.Bool("D", false, "Enable debug mode")
-	flAutoRestart := flag.Bool("r", true, "Restart previously running containers")
-	bridgeName := flag.String("b", "", "Attach containers to a pre-existing network bridge; use 'none' to disable container networking")
-	pidfile := flag.String("p", "/var/run/docker.pid", "Path to use for daemon PID file")
-	flRoot := flag.String("g", "/var/lib/docker", "Path to use as the root of the docker runtime")
-	flEnableCors := flag.Bool("api-enable-cors", false, "Enable CORS headers in the remote API")
-	flDns := flag.String("dns", "", "Force docker to use specific DNS servers")
-	flHosts := utils.ListOpts{fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET)}
+
+	var (
+		flVersion            = flag.Bool("v", false, "Print version information and quit")
+		flDaemon             = flag.Bool("d", false, "Enable daemon mode")
+		flDebug              = flag.Bool("D", false, "Enable debug mode")
+		flAutoRestart        = flag.Bool("r", true, "Restart previously running containers")
+		bridgeName           = flag.String("b", "", "Attach containers to a pre-existing network bridge; use 'none' to disable container networking")
+		pidfile              = flag.String("p", "/var/run/docker.pid", "Path to use for daemon PID file")
+		flRoot               = flag.String("g", "/var/lib/docker", "Path to use as the root of the docker runtime")
+		flEnableCors         = flag.Bool("api-enable-cors", false, "Enable CORS headers in the remote API")
+		flDns                = docker.NewListOpts(docker.ValidateIp4Address)
+		flEnableIptables     = flag.Bool("iptables", true, "Disable docker's addition of iptables rules")
+		flDefaultIp          = flag.String("ip", "0.0.0.0", "Default IP address to use when binding container ports")
+		flInterContainerComm = flag.Bool("icc", true, "Enable inter-container communication")
+		flGraphDriver        = flag.String("s", "", "Force the docker runtime to use a specific storage driver")
+		flHosts              = docker.NewListOpts(docker.ValidateHost)
+	)
+	flag.Var(&flDns, "dns", "Force docker to use specific DNS servers")
 	flag.Var(&flHosts, "H", "Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise")
 	flag.Var(&flHosts, "H", "Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise")
-	flEnableIptables := flag.Bool("iptables", true, "Disable docker's addition of iptables rules")
-	flDefaultIp := flag.String("ip", "0.0.0.0", "Default IP address to use when binding container ports")
-	flInterContainerComm := flag.Bool("icc", true, "Enable inter-container communication")
-	flGraphDriver := flag.String("s", "", "Force the docker runtime to use a specific storage driver")
 
 
 	flag.Parse()
 	flag.Parse()
 
 
@@ -46,16 +49,9 @@ func main() {
 		showVersion()
 		showVersion()
 		return
 		return
 	}
 	}
-	if len(flHosts) > 1 {
-		flHosts = flHosts[1:] //trick to display a nice default value in the usage
-	}
-	for i, flHost := range flHosts {
-		host, err := utils.ParseHost(docker.DEFAULTHTTPHOST, docker.DEFAULTHTTPPORT, flHost)
-		if err == nil {
-			flHosts[i] = host
-		} else {
-			log.Fatal(err)
-		}
+	if flHosts.Len() == 0 {
+		// If we do not have a host, default to unix socket
+		flHosts.Set(fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET))
 	}
 	}
 
 
 	if *flDebug {
 	if *flDebug {
@@ -78,7 +74,7 @@ func main() {
 		job.Setenv("Root", *flRoot)
 		job.Setenv("Root", *flRoot)
 		job.SetenvBool("AutoRestart", *flAutoRestart)
 		job.SetenvBool("AutoRestart", *flAutoRestart)
 		job.SetenvBool("EnableCors", *flEnableCors)
 		job.SetenvBool("EnableCors", *flEnableCors)
-		job.Setenv("Dns", *flDns)
+		job.SetenvList("Dns", flDns.GetAll())
 		job.SetenvBool("EnableIptables", *flEnableIptables)
 		job.SetenvBool("EnableIptables", *flEnableIptables)
 		job.Setenv("BridgeIface", *bridgeName)
 		job.Setenv("BridgeIface", *bridgeName)
 		job.Setenv("DefaultIp", *flDefaultIp)
 		job.Setenv("DefaultIp", *flDefaultIp)
@@ -88,19 +84,22 @@ func main() {
 			log.Fatal(err)
 			log.Fatal(err)
 		}
 		}
 		// Serve api
 		// Serve api
-		job = eng.Job("serveapi", flHosts...)
+		job = eng.Job("serveapi", flHosts.GetAll()...)
 		job.SetenvBool("Logging", true)
 		job.SetenvBool("Logging", true)
 		if err := job.Run(); err != nil {
 		if err := job.Run(); err != nil {
 			log.Fatal(err)
 			log.Fatal(err)
 		}
 		}
 	} else {
 	} else {
-		if len(flHosts) > 1 {
+		if flHosts.Len() > 1 {
 			log.Fatal("Please specify only one -H")
 			log.Fatal("Please specify only one -H")
 		}
 		}
-		protoAddrParts := strings.SplitN(flHosts[0], "://", 2)
+		protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2)
 		if err := docker.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil {
 		if err := docker.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil {
 			if sterr, ok := err.(*utils.StatusError); ok {
 			if sterr, ok := err.(*utils.StatusError); ok {
-				os.Exit(sterr.Status)
+				if sterr.Status != "" {
+					log.Println(sterr.Status)
+				}
+				os.Exit(sterr.StatusCode)
 			}
 			}
 			log.Fatal(err)
 			log.Fatal(err)
 		}
 		}

+ 2 - 0
docs/MAINTAINERS

@@ -1,2 +1,4 @@
 Andy Rothfusz <andy@dotcloud.com> (@metalivedev)
 Andy Rothfusz <andy@dotcloud.com> (@metalivedev)
 Ken Cochrane <ken@dotcloud.com> (@kencochrane)
 Ken Cochrane <ken@dotcloud.com> (@kencochrane)
+James Turnbull <james@lovedthanlost.net> (@jamesturnbull)
+Sven Dowideit <SvenDowideit@fosiki.com> (@SvenDowideit)

+ 22 - 0
docs/sources/api/docker_remote_api.rst

@@ -34,6 +34,28 @@ Calling /images/<name>/insert is the same as calling
 You can still call an old version of the api using
 You can still call an old version of the api using
 /v1.0/images/<name>/insert
 /v1.0/images/<name>/insert
 
 
+
+v1.8
+****
+
+Full Documentation
+------------------
+
+:doc:`docker_remote_api_v1.8`
+
+What's new
+----------
+
+.. http:post:: /build
+
+   **New!** This endpoint now returns build status as json stream. In case
+   of a build error, it returns the exit status of the failed command.
+
+.. http:get:: /containers/(id)/json
+
+    **New!** This endpoint now returns the host config for the container.
+
+
 v1.7
 v1.7
 ****
 ****
 
 

+ 9 - 3
docs/sources/api/docker_remote_api_v1.7.rst

@@ -132,7 +132,9 @@ Create a container
 		],
 		],
 		"Dns":null,
 		"Dns":null,
 		"Image":"base",
 		"Image":"base",
-		"Volumes":{},
+		"Volumes":{
+			"/tmp": {}
+		},
 		"VolumesFrom":"",
 		"VolumesFrom":"",
 		"WorkingDir":""
 		"WorkingDir":""
 
 
@@ -361,8 +363,12 @@ Start a container
 
 
            {
            {
                 "Binds":["/tmp:/tmp"],
                 "Binds":["/tmp:/tmp"],
-                "LxcConf":{"lxc.utsname":"docker"}
+                "LxcConf":{"lxc.utsname":"docker"},
+                "PortBindings":null
+                "PublishAllPorts":false
            }
            }
+           
+        Binds need to reference Volumes that were defined during container creation.
 
 
         **Example response**:
         **Example response**:
 
 
@@ -990,10 +996,10 @@ Build an image from Dockerfile via stdin
    .. sourcecode:: http
    .. sourcecode:: http
 
 
       HTTP/1.1 200 OK
       HTTP/1.1 200 OK
+      Content-Type: application/json
 
 
       {{ STREAM }}
       {{ STREAM }}
 
 
-
    The stream must be a tar archive compressed with one of the
    The stream must be a tar archive compressed with one of the
    following algorithms: identity (no compression), gzip, bzip2,
    following algorithms: identity (no compression), gzip, bzip2,
    xz. 
    xz. 

+ 1273 - 0
docs/sources/api/docker_remote_api_v1.8.rst

@@ -0,0 +1,1273 @@
+:title: Remote API v1.8
+:description: API Documentation for Docker
+:keywords: API, Docker, rcli, REST, documentation
+
+:orphan:
+
+======================
+Docker Remote API v1.8
+======================
+
+.. contents:: Table of Contents
+
+1. Brief introduction
+=====================
+
+- The Remote API has replaced rcli
+- The daemon listens on ``unix:///var/run/docker.sock``, but you can
+  :ref:`bind_docker`.
+- The API tends to be REST, but for some complex commands, like
+  ``attach`` or ``pull``, the HTTP connection is hijacked to transport
+  ``stdout, stdin`` and ``stderr``
+
+2. Endpoints
+============
+
+2.1 Containers
+--------------
+
+List containers
+***************
+
+.. http:get:: /containers/json
+
+	List containers
+
+	**Example request**:
+
+	.. sourcecode:: http
+
+	   GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1
+	   
+	**Example response**:
+
+	.. sourcecode:: http
+
+	   HTTP/1.1 200 OK
+	   Content-Type: application/json
+	   
+	   [
+		{
+			"Id": "8dfafdbc3a40",
+			"Image": "base:latest",
+			"Command": "echo 1",
+			"Created": 1367854155,
+			"Status": "Exit 0",
+			"Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}],
+			"SizeRw":12288,
+			"SizeRootFs":0
+		},
+		{
+			"Id": "9cd87474be90",
+			"Image": "base:latest",
+			"Command": "echo 222222",
+			"Created": 1367854155,
+			"Status": "Exit 0",
+			"Ports":[],
+			"SizeRw":12288,
+			"SizeRootFs":0
+		},
+		{
+			"Id": "3176a2479c92",
+			"Image": "base:latest",
+			"Command": "echo 3333333333333333",
+			"Created": 1367854154,
+			"Status": "Exit 0",
+			"Ports":[],
+			"SizeRw":12288,
+			"SizeRootFs":0
+		},
+		{
+			"Id": "4cb07b47f9fb",
+			"Image": "base:latest",
+			"Command": "echo 444444444444444444444444444444444",
+			"Created": 1367854152,
+			"Status": "Exit 0",
+			"Ports":[],
+			"SizeRw":12288,
+			"SizeRootFs":0
+		}
+	   ]
+ 
+	:query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default
+	:query limit: Show ``limit`` last created containers, include non-running ones.
+	:query since: Show only containers created since Id, include non-running ones.
+	:query before: Show only containers created before Id, include non-running ones.
+	:query size: 1/True/true or 0/False/false, Show the containers sizes
+	:statuscode 200: no error
+	:statuscode 400: bad parameter
+	:statuscode 500: server error
+
+
+Create a container
+******************
+
+.. http:post:: /containers/create
+
+	Create a container
+
+	**Example request**:
+
+	.. sourcecode:: http
+
+	   POST /containers/create HTTP/1.1
+	   Content-Type: application/json
+
+	   {
+		"Hostname":"",
+		"User":"",
+		"Memory":0,
+		"MemorySwap":0,
+		"AttachStdin":false,
+		"AttachStdout":true,
+		"AttachStderr":true,
+		"PortSpecs":null,
+		"Privileged": false,
+		"Tty":false,
+		"OpenStdin":false,
+		"StdinOnce":false,
+		"Env":null,
+		"Cmd":[
+			"date"
+		],
+		"Dns":null,
+		"Image":"base",
+		"Volumes":{},
+		"VolumesFrom":"",
+		"WorkingDir":""
+
+	   }
+	   
+	**Example response**:
+
+	.. sourcecode:: http
+
+	   HTTP/1.1 201 OK
+	   Content-Type: application/json
+
+	   {
+		"Id":"e90e34656806"
+		"Warnings":[]
+	   }
+	
+	:jsonparam config: the container's configuration
+	:statuscode 201: no error
+	:statuscode 404: no such container
+	:statuscode 406: impossible to attach (container not running)
+	:statuscode 500: server error
+
+
+Inspect a container
+*******************
+
+.. http:get:: /containers/(id)/json
+
+	Return low-level information on the container ``id``
+
+	**Example request**:
+
+	.. sourcecode:: http
+
+	   GET /containers/4fa6e0f0c678/json HTTP/1.1
+	   
+	**Example response**:
+
+	.. sourcecode:: http
+
+	   HTTP/1.1 200 OK
+	   Content-Type: application/json
+
+	   {
+			"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
+			"Created": "2013-05-07T14:51:42.041847+02:00",
+			"Path": "date",
+			"Args": [],
+			"Config": {
+				"Hostname": "4fa6e0f0c678",
+				"User": "",
+				"Memory": 0,
+				"MemorySwap": 0,
+				"AttachStdin": false,
+				"AttachStdout": true,
+				"AttachStderr": true,
+				"PortSpecs": null,
+				"Tty": false,
+				"OpenStdin": false,
+				"StdinOnce": false,
+				"Env": null,
+				"Cmd": [
+					"date"
+				],
+				"Dns": null,
+				"Image": "base",
+				"Volumes": {},
+				"VolumesFrom": "",
+				"WorkingDir":""
+
+			},
+			"State": {
+				"Running": false,
+				"Pid": 0,
+				"ExitCode": 0,
+				"StartedAt": "2013-05-07T14:51:42.087658+02:01360",
+				"Ghost": false
+			},
+			"Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
+			"NetworkSettings": {
+				"IpAddress": "",
+				"IpPrefixLen": 0,
+				"Gateway": "",
+				"Bridge": "",
+				"PortMapping": null
+			},
+			"SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+			"ResolvConfPath": "/etc/resolv.conf",
+			"Volumes": {},
+                        "HostConfig": {
+                            "Binds": null,
+                            "ContainerIDFile": "",
+                            "LxcConf": [],
+                            "Privileged": false,
+                            "PortBindings": {
+                               "80/tcp": [
+                                   {
+                                       "HostIp": "0.0.0.0",
+                                       "HostPort": "49153"
+                                   }
+                               ]
+                            },
+                            "Links": null,
+                            "PublishAllPorts": false
+                        }
+	   }
+
+	:statuscode 200: no error
+	:statuscode 404: no such container
+	:statuscode 500: server error
+
+
+List processes running inside a container
+*****************************************
+
+.. http:get:: /containers/(id)/top
+
+	List processes running inside the container ``id``
+
+	**Example request**:
+
+	.. sourcecode:: http
+
+	   GET /containers/4fa6e0f0c678/top HTTP/1.1
+
+	**Example response**:
+
+	.. sourcecode:: http
+
+	   HTTP/1.1 200 OK
+	   Content-Type: application/json
+
+	   {
+		"Titles":[
+			"USER",
+			"PID",
+			"%CPU",
+			"%MEM",
+			"VSZ",
+			"RSS",
+			"TTY",
+			"STAT",
+			"START",
+			"TIME",
+			"COMMAND"
+			],
+		"Processes":[
+			["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"],
+			["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"]
+		]
+	   }
+
+	:query ps_args: ps arguments to use (eg. aux)
+	:statuscode 200: no error
+	:statuscode 404: no such container
+	:statuscode 500: server error
+
+
+Inspect changes on a container's filesystem
+*******************************************
+
+.. http:get:: /containers/(id)/changes
+
+	Inspect changes on container ``id`` 's filesystem
+
+	**Example request**:
+
+	.. sourcecode:: http
+
+	   GET /containers/4fa6e0f0c678/changes HTTP/1.1
+
+	   
+	**Example response**:
+
+	.. sourcecode:: http
+
+	   HTTP/1.1 200 OK
+	   Content-Type: application/json
+	   
+	   [
+		{
+			"Path":"/dev",
+			"Kind":0
+		},
+		{
+			"Path":"/dev/kmsg",
+			"Kind":1
+		},
+		{
+			"Path":"/test",
+			"Kind":1
+		}
+	   ]
+
+	:statuscode 200: no error
+	:statuscode 404: no such container
+	:statuscode 500: server error
+
+
+Export a container
+******************
+
+.. http:get:: /containers/(id)/export
+
+	Export the contents of container ``id``
+
+	**Example request**:
+
+	.. sourcecode:: http
+
+	   GET /containers/4fa6e0f0c678/export HTTP/1.1
+
+	   
+	**Example response**:
+
+	.. sourcecode:: http
+
+	   HTTP/1.1 200 OK
+	   Content-Type: application/octet-stream
+	   
+	   {{ STREAM }}
+
+	:statuscode 200: no error
+	:statuscode 404: no such container
+	:statuscode 500: server error
+
+
+Start a container
+*****************
+
+.. http:post:: /containers/(id)/start
+
+        Start the container ``id``
+
+        **Example request**:
+
+        .. sourcecode:: http
+
+           POST /containers/(id)/start HTTP/1.1
+           Content-Type: application/json
+
+           {
+                "Binds":["/tmp:/tmp"],
+                "LxcConf":{"lxc.utsname":"docker"}
+           }
+
+        **Example response**:
+
+        .. sourcecode:: http
+
+           HTTP/1.1 204 No Content
+           Content-Type: text/plain
+
+        :jsonparam hostConfig: the container's host configuration (optional)
+        :statuscode 204: no error
+        :statuscode 404: no such container
+        :statuscode 500: server error
+
+
+Stop a container
+****************
+
+.. http:post:: /containers/(id)/stop
+
+	Stop the container ``id``
+
+	**Example request**:
+
+	.. sourcecode:: http
+
+	   POST /containers/e90e34656806/stop?t=5 HTTP/1.1
+	   
+	**Example response**:
+
+	.. sourcecode:: http
+
+	   HTTP/1.1 204 OK
+	   	
+	:query t: number of seconds to wait before killing the container
+	:statuscode 204: no error
+	:statuscode 404: no such container
+	:statuscode 500: server error
+
+
+Restart a container
+*******************
+
+.. http:post:: /containers/(id)/restart
+
+	Restart the container ``id``
+
+	**Example request**:
+
+	.. sourcecode:: http
+
+	   POST /containers/e90e34656806/restart?t=5 HTTP/1.1
+	   
+	**Example response**:
+
+	.. sourcecode:: http
+
+	   HTTP/1.1 204 OK
+	   	
+	:query t: number of seconds to wait before killing the container
+	:statuscode 204: no error
+	:statuscode 404: no such container
+	:statuscode 500: server error
+
+
+Kill a container
+****************
+
+.. http:post:: /containers/(id)/kill
+
+	Kill the container ``id``
+
+	**Example request**:
+
+	.. sourcecode:: http
+
+	   POST /containers/e90e34656806/kill HTTP/1.1
+	   
+	**Example response**:
+
+	.. sourcecode:: http
+
+	   HTTP/1.1 204 OK
+	   	
+	:statuscode 204: no error
+	:statuscode 404: no such container
+	:statuscode 500: server error
+
+
+Attach to a container
+*********************
+
+.. http:post:: /containers/(id)/attach
+
+	Attach to the container ``id``
+
+	**Example request**:
+
+	.. sourcecode:: http
+
+	   POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1
+	   
+	**Example response**:
+
+	.. sourcecode:: http
+
+	   HTTP/1.1 200 OK
+	   Content-Type: application/vnd.docker.raw-stream
+
+	   {{ STREAM }}
+	   	
+	:query logs: 1/True/true or 0/False/false, return logs. Default false
+	:query stream: 1/True/true or 0/False/false, return stream. Default false
+	:query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false
+	:query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false
+	:query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false
+	:statuscode 200: no error
+	:statuscode 400: bad parameter
+	:statuscode 404: no such container
+	:statuscode 500: server error
+
+	**Stream details**:
+
+	When using the TTY setting is enabled in
+	:http:post:`/containers/create`, the stream is the raw data
+	from the process PTY and client's stdin.  When the TTY is
+	disabled, then the stream is multiplexed to separate stdout
+	and stderr.
+
+	The format is a **Header** and a **Payload** (frame).
+
+	**HEADER**
+
+	The header will contain the information on which stream write
+	the stream (stdout or stderr). It also contain the size of
+	the associated frame encoded on the last 4 bytes (uint32).
+
+	It is encoded on the first 8 bytes like this::
+
+	    header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}
+
+	``STREAM_TYPE`` can be:
+
+	- 0: stdin (will be writen on stdout)
+	- 1: stdout
+	- 2: stderr
+
+	``SIZE1, SIZE2, SIZE3, SIZE4`` are the 4 bytes of the uint32 size encoded as big endian.
+
+	**PAYLOAD**
+
+	The payload is the raw stream.
+
+	**IMPLEMENTATION**
+
+	The simplest way to implement the Attach protocol is the following:
+
+	1) Read 8 bytes
+	2) chose stdout or stderr depending on the first byte
+	3) Extract the frame size from the last 4 byets
+	4) Read the extracted size and output it on the correct output
+	5) Goto 1)
+
+
+
+Wait a container
+****************
+
+.. http:post:: /containers/(id)/wait
+
+	Block until container ``id`` stops, then returns the exit code
+
+	**Example request**:
+
+	.. sourcecode:: http
+
+	   POST /containers/16253994b7c4/wait HTTP/1.1
+	   
+	**Example response**:
+
+	.. sourcecode:: http
+
+	   HTTP/1.1 200 OK
+	   Content-Type: application/json
+
+	   {"StatusCode":0}
+	   	
+	:statuscode 200: no error
+	:statuscode 404: no such container
+	:statuscode 500: server error
+
+
+Remove a container
+*******************
+
+.. http:delete:: /containers/(id)
+
+	Remove the container ``id`` from the filesystem
+
+	**Example request**:
+
+        .. sourcecode:: http
+
+           DELETE /containers/16253994b7c4?v=1 HTTP/1.1
+
+        **Example response**:
+
+        .. sourcecode:: http
+
+	   HTTP/1.1 204 OK
+
+	:query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false
+        :statuscode 204: no error
+	:statuscode 400: bad parameter
+        :statuscode 404: no such container
+        :statuscode 500: server error
+
+
+Copy files or folders from a container
+**************************************
+
+.. http:post:: /containers/(id)/copy
+
+	Copy files or folders of container ``id``
+
+	**Example request**:
+
+	.. sourcecode:: http
+
+	   POST /containers/4fa6e0f0c678/copy HTTP/1.1
+	   Content-Type: application/json
+
+	   {
+		"Resource":"test.txt"
+	   }
+
+	**Example response**:
+
+	.. sourcecode:: http
+
+	   HTTP/1.1 200 OK
+	   Content-Type: application/octet-stream
+	   
+	   {{ STREAM }}
+
+	:statuscode 200: no error
+	:statuscode 404: no such container
+	:statuscode 500: server error
+
+
+2.2 Images
+----------
+
+List Images
+***********
+
+.. http:get:: /images/json
+
+	**Example request**:
+
+	.. sourcecode:: http
+
+	   GET /images/json?all=0 HTTP/1.1
+
+	**Example response**:
+
+	.. sourcecode:: http
+
+	   HTTP/1.1 200 OK
+	   Content-Type: application/json
+	   
+	   [
+	     {
+	   	"RepoTag": [
+	   	  "ubuntu:12.04",
+	   	  "ubuntu:precise",
+	   	  "ubuntu:latest"
+	   	],
+	   	"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
+	   	"Created": 1365714795,
+	   	"Size": 131506275,
+	   	"VirtualSize": 131506275
+	     },
+	     {
+	   	"RepoTag": [
+	   	  "ubuntu:12.10",
+	   	  "ubuntu:quantal"
+	   	],
+	   	"ParentId": "27cf784147099545",
+	   	"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
+	   	"Created": 1364102658,
+	   	"Size": 24653,
+	   	"VirtualSize": 180116135
+	     }
+	   ]
+
+
+Create an image
+***************
+
+.. http:post:: /images/create
+
+	Create an image, either by pull it from the registry or by importing it
+
+	**Example request**:
+
+        .. sourcecode:: http
+
+           POST /images/create?fromImage=base HTTP/1.1
+
+        **Example response**:
+
+        .. sourcecode:: http
+
+           HTTP/1.1 200 OK
+	   Content-Type: application/json
+
+	   {"status":"Pulling..."}
+	   {"status":"Pulling", "progress":"1/? (n/a)"}
+	   {"error":"Invalid..."}
+	   ...
+
+	When using this endpoint to pull an image from the registry,
+	the ``X-Registry-Auth`` header can be used to include a
+	base64-encoded AuthConfig object.
+
+        :query fromImage: name of the image to pull
+	:query fromSrc: source to import, - means stdin
+        :query repo: repository
+	:query tag: tag
+	:query registry: the registry to pull from
+	:reqheader X-Registry-Auth: base64-encoded AuthConfig object
+        :statuscode 200: no error
+        :statuscode 500: server error
+
+
+
+Insert a file in an image
+*************************
+
+.. http:post:: /images/(name)/insert
+
+	Insert a file from ``url`` in the image ``name`` at ``path``
+
+	**Example request**:
+
+        .. sourcecode:: http
+
+           POST /images/test/insert?path=/usr&url=myurl HTTP/1.1
+
+	**Example response**:
+
+        .. sourcecode:: http
+
+           HTTP/1.1 200 OK
+	   Content-Type: application/json
+
+	   {"status":"Inserting..."}
+	   {"status":"Inserting", "progress":"1/? (n/a)"}
+	   {"error":"Invalid..."}
+	   ...
+
+	:statuscode 200: no error
+        :statuscode 500: server error
+
+
+Inspect an image
+****************
+
+.. http:get:: /images/(name)/json
+
+	Return low-level information on the image ``name``
+
+	**Example request**:
+
+	.. sourcecode:: http
+
+	   GET /images/base/json HTTP/1.1
+
+	**Example response**:
+
+        .. sourcecode:: http
+
+           HTTP/1.1 200 OK
+	   Content-Type: application/json
+
+	   {
+		"id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
+		"parent":"27cf784147099545",
+		"created":"2013-03-23T22:24:18.818426-07:00",
+		"container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0",
+		"container_config":
+			{
+				"Hostname":"",
+				"User":"",
+				"Memory":0,
+				"MemorySwap":0,
+				"AttachStdin":false,
+				"AttachStdout":false,
+				"AttachStderr":false,
+				"PortSpecs":null,
+				"Tty":true,
+				"OpenStdin":true,
+				"StdinOnce":false,
+				"Env":null,
+				"Cmd": ["/bin/bash"]
+				,"Dns":null,
+				"Image":"base",
+				"Volumes":null,
+				"VolumesFrom":"",
+				"WorkingDir":""
+			},
+		"Size": 6824592
+	   }
+
+	:statuscode 200: no error
+	:statuscode 404: no such image
+        :statuscode 500: server error
+
+
+Get the history of an image
+***************************
+
+.. http:get:: /images/(name)/history
+
+        Return the history of the image ``name``
+
+        **Example request**:
+
+        .. sourcecode:: http
+
+           GET /images/base/history HTTP/1.1
+
+        **Example response**:
+
+        .. sourcecode:: http
+
+           HTTP/1.1 200 OK
+	   Content-Type: application/json
+
+	   [
+		{
+			"Id":"b750fe79269d",
+			"Created":1364102658,
+			"CreatedBy":"/bin/bash"
+		},
+		{
+			"Id":"27cf78414709",
+			"Created":1364068391,
+			"CreatedBy":""
+		}
+	   ]
+
+        :statuscode 200: no error
+        :statuscode 404: no such image
+        :statuscode 500: server error
+
+
+Push an image on the registry
+*****************************
+
+.. http:post:: /images/(name)/push
+
+   Push the image ``name`` on the registry
+
+   **Example request**:
+
+   .. sourcecode:: http
+
+      POST /images/test/push HTTP/1.1
+
+   **Example response**:
+
+   .. sourcecode:: http
+
+    HTTP/1.1 200 OK
+    Content-Type: application/json
+
+    {"status":"Pushing..."}
+    {"status":"Pushing", "progress":"1/? (n/a)"}
+    {"error":"Invalid..."}
+    ...
+
+   :query registry: the registry you wan to push, optional
+   :reqheader X-Registry-Auth: include a base64-encoded AuthConfig object.
+   :statuscode 200: no error
+   :statuscode 404: no such image
+   :statuscode 500: server error
+
+
+Tag an image into a repository
+******************************
+
+.. http:post:: /images/(name)/tag
+
+	Tag the image ``name`` into a repository
+
+        **Example request**:
+
+        .. sourcecode:: http
+			
+	   POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1
+
+	**Example response**:
+
+        .. sourcecode:: http
+
+           HTTP/1.1 200 OK
+
+	:query repo: The repository to tag in
+	:query force: 1/True/true or 0/False/false, default false
+	:statuscode 200: no error
+	:statuscode 400: bad parameter
+	:statuscode 404: no such image
+	:statuscode 409: conflict
+        :statuscode 500: server error
+
+
+Remove an image
+***************
+
+.. http:delete:: /images/(name)
+
+	Remove the image ``name`` from the filesystem 
+	
+	**Example request**:
+
+	.. sourcecode:: http
+
+	   DELETE /images/test HTTP/1.1
+
+	**Example response**:
+
+        .. sourcecode:: http
+
+	   HTTP/1.1 200 OK
+	   Content-type: application/json
+
+	   [
+	    {"Untagged":"3e2f21a89f"},
+	    {"Deleted":"3e2f21a89f"},
+	    {"Deleted":"53b4f83ac9"}
+	   ]
+
+	:statuscode 200: no error
+        :statuscode 404: no such image
+	:statuscode 409: conflict
+        :statuscode 500: server error
+
+
+Search images
+*************
+
+.. http:get:: /images/search
+
+	Search for an image in the docker index.
+	
+	.. note::
+	
+	   The response keys have changed from API v1.6 to reflect the JSON 
+	   sent by the registry server to the docker daemon's request.
+	
+	**Example request**:
+
+        .. sourcecode:: http
+
+           GET /images/search?term=sshd HTTP/1.1
+
+	**Example response**:
+
+	.. sourcecode:: http
+
+	   HTTP/1.1 200 OK
+	   Content-Type: application/json
+	   
+	   [
+		   {
+		       "description": "",
+		       "is_official": false,
+		       "is_trusted": false,
+		       "name": "wma55/u1210sshd",
+		       "star_count": 0
+		   },
+		   {
+		       "description": "",
+		       "is_official": false,
+		       "is_trusted": false,
+		       "name": "jdswinbank/sshd",
+		       "star_count": 0
+		   },
+		   {
+		       "description": "",
+		       "is_official": false,
+		       "is_trusted": false,
+		       "name": "vgauthier/sshd",
+		       "star_count": 0
+		   }
+	   ...
+	   ]
+
+	:query term: term to search
+	:statuscode 200: no error
+	:statuscode 500: server error
+
+
+2.3 Misc
+--------
+
+Build an image from Dockerfile via stdin
+****************************************
+
+.. http:post:: /build
+
+   Build an image from Dockerfile via stdin
+
+   **Example request**:
+
+   .. sourcecode:: http
+
+      POST /build HTTP/1.1
+
+      {{ STREAM }}
+
+   **Example response**:
+
+   .. sourcecode:: http
+
+      HTTP/1.1 200 OK
+      Content-Type: application/json
+
+      {"status":"Step 1..."}
+      {"status":"..."}
+      {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}}
+
+
+   The stream must be a tar archive compressed with one of the
+   following algorithms: identity (no compression), gzip, bzip2,
+   xz. 
+
+   The archive must include a file called ``Dockerfile`` at its
+   root. It may include any number of other files, which will be
+   accessible in the build context (See the :ref:`ADD build command
+   <dockerbuilder>`).
+
+   :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success
+   :query q: suppress verbose build output
+   :query nocache: do not use the cache when building the image
+   :reqheader Content-type: should be set to ``"application/tar"``.
+   :statuscode 200: no error
+   :statuscode 500: server error
+
+
+
+Check auth configuration
+************************
+
+.. http:post:: /auth
+
+        Get the default username and email
+
+        **Example request**:
+
+        .. sourcecode:: http
+
+           POST /auth HTTP/1.1
+	   Content-Type: application/json
+
+	   {
+		"username":"hannibal",
+		"password:"xxxx",
+		"email":"hannibal@a-team.com",
+		"serveraddress":"https://index.docker.io/v1/"
+	   }
+
+        **Example response**:
+
+        .. sourcecode:: http
+
+           HTTP/1.1 200 OK
+
+        :statuscode 200: no error
+        :statuscode 204: no error
+        :statuscode 500: server error
+
+
+Display system-wide information
+*******************************
+
+.. http:get:: /info
+
+	Display system-wide information
+	
+	**Example request**:
+
+        .. sourcecode:: http
+
+           GET /info HTTP/1.1
+
+        **Example response**:
+
+        .. sourcecode:: http
+
+           HTTP/1.1 200 OK
+	   Content-Type: application/json
+
+	   {
+		"Containers":11,
+		"Images":16,
+		"Debug":false,
+		"NFd": 11,
+		"NGoroutines":21,
+		"MemoryLimit":true,
+		"SwapLimit":false,
+		"IPv4Forwarding":true
+	   }
+
+        :statuscode 200: no error
+        :statuscode 500: server error
+
+
+Show the docker version information
+***********************************
+
+.. http:get:: /version
+
+	Show the docker version information
+
+	**Example request**:
+
+        .. sourcecode:: http
+
+           GET /version HTTP/1.1
+
+        **Example response**:
+
+        .. sourcecode:: http
+
+           HTTP/1.1 200 OK
+	   Content-Type: application/json
+
+	   {
+		"Version":"0.2.2",
+		"GitCommit":"5a2a5cc+CHANGES",
+		"GoVersion":"go1.0.3"
+	   }
+
+        :statuscode 200: no error
+	:statuscode 500: server error
+
+
+Create a new image from a container's changes
+*********************************************
+
+.. http:post:: /commit
+
+    Create a new image from a container's changes
+
+    **Example request**:
+
+    .. sourcecode:: http
+
+        POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+
+    **Example response**:
+
+    .. sourcecode:: http
+
+        HTTP/1.1 201 OK
+	    Content-Type: application/vnd.docker.raw-stream
+
+        {"Id":"596069db4bf5"}
+
+    :query container: source container
+    :query repo: repository
+    :query tag: tag
+    :query m: commit message
+    :query author: author (eg. "John Hannibal Smith <hannibal@a-team.com>")
+    :query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]})
+    :statuscode 201: no error
+    :statuscode 404: no such container
+    :statuscode 500: server error
+
+
+Monitor Docker's events
+***********************
+
+.. http:get:: /events
+
+	Get events from docker, either in real time via streaming, or via polling (using `since`)
+
+	**Example request**:
+
+	.. sourcecode:: http
+
+           POST /events?since=1374067924
+
+        **Example response**:
+
+        .. sourcecode:: http
+
+           HTTP/1.1 200 OK
+	   Content-Type: application/json
+
+	   {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
+	   {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
+	   {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966}
+	   {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970}
+
+	:query since: timestamp used for polling
+        :statuscode 200: no error
+        :statuscode 500: server error
+
+Get a tarball containing all images and tags in a repository
+************************************************************
+
+.. http:get:: /images/(name)/get
+
+  Get a tarball containing all images and metadata for the repository specified by ``name``.
+
+  **Example request**
+
+  .. sourcecode:: http
+  
+           GET /images/ubuntu/get
+
+       **Example response**:
+
+       .. sourcecode:: http
+
+          HTTP/1.1 200 OK
+    Content-Type: application/x-tar
+
+    Binary data stream
+        :statuscode 200: no error
+        :statuscode 500: server error
+
+Load a tarball with a set of images and tags into docker
+********************************************************
+
+.. http:post:: /images/load
+
+  Load a set of images and tags into the docker repository.
+
+  **Example request**
+
+  .. sourcecode:: http
+
+           POST /images/load
+
+         Tarball in body
+
+       **Example response**:
+
+       .. sourcecode:: http
+
+          HTTP/1.1 200 OK
+
+        :statuscode 200: no error
+        :statuscode 500: server error
+
+3. Going further
+================
+
+3.1 Inside 'docker run'
+-----------------------
+
+Here are the steps of 'docker run' :
+
+* Create the container
+* If the status code is 404, it means the image doesn't exists:
+        * Try to pull it
+        * Then retry to create the container
+* Start the container
+* If you are not in detached mode:
+        * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1
+* If in detached mode or only stdin is attached:
+	* Display the container's id
+
+
+3.2 Hijacking
+-------------
+
+In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future.
+
+3.3 CORS Requests
+-----------------
+
+To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+
+.. code-block:: bash
+
+   docker -d -H="192.168.1.9:4243" -api-enable-cors
+

+ 1 - 5
docs/sources/api/registry_index_spec.rst

@@ -37,7 +37,7 @@ We expect that there will be only one instance of the index, run and managed by
 - It delegates authentication and authorization to the Index Auth service using tokens
 - It delegates authentication and authorization to the Index Auth service using tokens
 - It supports different storage backends (S3, cloud files, local FS)
 - It supports different storage backends (S3, cloud files, local FS)
 - It doesn’t have a local database
 - It doesn’t have a local database
-- It will be open-sourced at some point
+- `Source Code <https://github.com/dotcloud/docker-registry>`_
 
 
 We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries:
 We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries:
 
 
@@ -46,10 +46,6 @@ We expect that there will be multiple registries out there. To help to grasp the
 - **vendor registry**: such a registry is provided by a software vendor, who wants to distribute docker images. It would be operated and managed by the vendor. Only users authorized by the vendor would be able to get write access. Some images would be public (accessible for anyone), others private (accessible only for authorized users). Authentication and authorization would be delegated to the Index. The goal of vendor registries is to let someone do “docker pull basho/riak1.3” and automatically push from the vendor registry (instead of a sponsor registry); i.e. get all the convenience of a sponsor registry, while retaining control on the asset distribution.
 - **vendor registry**: such a registry is provided by a software vendor, who wants to distribute docker images. It would be operated and managed by the vendor. Only users authorized by the vendor would be able to get write access. Some images would be public (accessible for anyone), others private (accessible only for authorized users). Authentication and authorization would be delegated to the Index. The goal of vendor registries is to let someone do “docker pull basho/riak1.3” and automatically push from the vendor registry (instead of a sponsor registry); i.e. get all the convenience of a sponsor registry, while retaining control on the asset distribution.
 - **private registry**: such a registry is located behind a firewall, or protected by an additional security layer (HTTP authorization, SSL client-side certificates, IP address authorization...). The registry is operated by a private entity, outside of dotCloud’s control. It can optionally delegate additional authorization to the Index, but it is not mandatory.
 - **private registry**: such a registry is located behind a firewall, or protected by an additional security layer (HTTP authorization, SSL client-side certificates, IP address authorization...). The registry is operated by a private entity, outside of dotCloud’s control. It can optionally delegate additional authorization to the Index, but it is not mandatory.
 
 
-.. note::
-
-    Mirror registries and private registries which do not use the Index don’t even need to run the registry code. They can be implemented by any kind of transport implementing HTTP GET and PUT. Read-only registries can be powered by a simple static HTTP server.
-
 .. note::
 .. note::
 
 
     The latter implies that while HTTP is the protocol of choice for a registry, multiple schemes are possible (and in some cases, trivial):
     The latter implies that while HTTP is the protocol of choice for a registry, multiple schemes are possible (and in some cases, trivial):

+ 118 - 52
docs/sources/commandline/cli.rst

@@ -66,7 +66,8 @@ To run the daemon with debug output, use ``docker -d -D``
 
 
 You can detach from the container again (and leave it running) with
 You can detach from the container again (and leave it running) with
 ``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of
 ``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of
-the Docker client when it quits.
+the Docker client when it quits.  When you detach from the container's 
+process the exit code will be retuned to the client.
 
 
 To stop a container, use ``docker stop``
 To stop a container, use ``docker stop``
 
 
@@ -142,7 +143,7 @@ Examples:
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    sudo docker build .
+    $ sudo docker build .
     Uploading context 10240 bytes
     Uploading context 10240 bytes
     Step 1 : FROM busybox
     Step 1 : FROM busybox
     Pulling repository busybox
     Pulling repository busybox
@@ -182,7 +183,7 @@ message.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-   sudo docker build -t vieux/apache:2.0 .
+   $ sudo docker build -t vieux/apache:2.0 .
 
 
 This will build like the previous example, but it will then tag the
 This will build like the previous example, but it will then tag the
 resulting image. The repository name will be ``vieux/apache`` and the
 resulting image. The repository name will be ``vieux/apache`` and the
@@ -191,7 +192,7 @@ tag will be ``2.0``
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    sudo docker build - < Dockerfile
+    $ sudo docker build - < Dockerfile
 
 
 This will read a ``Dockerfile`` from *stdin* without context. Due to
 This will read a ``Dockerfile`` from *stdin* without context. Due to
 the lack of a context, no contents of any local directory will be sent
 the lack of a context, no contents of any local directory will be sent
@@ -200,7 +201,7 @@ to the ``docker`` daemon.  Since there is no context, a Dockerfile
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    sudo docker build github.com/creack/docker-firefox
+    $ sudo docker build github.com/creack/docker-firefox
 
 
 This will clone the Github repository and use the cloned repository as
 This will clone the Github repository and use the cloned repository as
 context. The ``Dockerfile`` at the root of the repository is used as
 context. The ``Dockerfile`` at the root of the repository is used as
@@ -229,15 +230,15 @@ Simple commit of an existing container
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-	$ docker ps
+	$ sudo docker ps
 	ID                  IMAGE               COMMAND             CREATED             STATUS              PORTS
 	ID                  IMAGE               COMMAND             CREATED             STATUS              PORTS
 	c3f279d17e0a        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours                             
 	c3f279d17e0a        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours                             
 	197387f1b436        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours                             
 	197387f1b436        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours                             
 	$ docker commit c3f279d17e0a  SvenDowideit/testimage:version3
 	$ docker commit c3f279d17e0a  SvenDowideit/testimage:version3
 	f5283438590d
 	f5283438590d
 	$ docker images | head
 	$ docker images | head
-	REPOSITORY                        TAG                 ID                  CREATED             SIZE
-	SvenDowideit/testimage            version3            f5283438590d        16 seconds ago      204.2 MB (virtual 335.7 MB)
+	REPOSITORY                        TAG                 ID                  CREATED             VIRTUAL SIZE
+	SvenDowideit/testimage            version3            f5283438590d        16 seconds ago      335.7 MB
 	
 	
 
 
 Full -run example
 Full -run example
@@ -480,16 +481,16 @@ Listing the most recently created images
 .. code-block:: bash
 .. code-block:: bash
 
 
 	$ sudo docker images | head
 	$ sudo docker images | head
-	REPOSITORY                    TAG                 IMAGE ID            CREATED             SIZE
-	<none>                        <none>              77af4d6b9913        19 hours ago        30.53 MB (virtual 1.089 GB)
-	committest                    latest              b6fa739cedf5        19 hours ago        30.53 MB (virtual 1.089 GB)
-	<none>                        <none>              78a85c484f71        19 hours ago        30.53 MB (virtual 1.089 GB)
-	docker                        latest              30557a29d5ab        20 hours ago        30.53 MB (virtual 1.089 GB)
-	<none>                        <none>              0124422dd9f9        20 hours ago        30.53 MB (virtual 1.089 GB)
-	<none>                        <none>              18ad6fad3402        22 hours ago        23.68 MB (virtual 1.082 GB)
-	<none>                        <none>              f9f1e26352f0        23 hours ago        30.46 MB (virtual 1.089 GB)
-	tryout                        latest              2629d1fa0b81        23 hours ago        16.4 kB (virtual 131.5 MB)
-	<none>                        <none>              5ed6274db6ce        24 hours ago        30.44 MB (virtual 1.089 GB)
+	REPOSITORY                    TAG                 IMAGE ID            CREATED             VIRTUAL SIZE
+	<none>                        <none>              77af4d6b9913        19 hours ago        1.089 GB
+	committest                    latest              b6fa739cedf5        19 hours ago        1.089 GB
+	<none>                        <none>              78a85c484f71        19 hours ago        1.089 GB
+	docker                        latest              30557a29d5ab        20 hours ago        1.089 GB
+	<none>                        <none>              0124422dd9f9        20 hours ago        1.089 GB
+	<none>                        <none>              18ad6fad3402        22 hours ago        1.082 GB
+	<none>                        <none>              f9f1e26352f0        23 hours ago        1.089 GB
+	tryout                        latest              2629d1fa0b81        23 hours ago        131.5 MB
+	<none>                        <none>              5ed6274db6ce        24 hours ago        1.089 GB
 
 
 Listing the full length image IDs
 Listing the full length image IDs
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -497,16 +498,16 @@ Listing the full length image IDs
 .. code-block:: bash
 .. code-block:: bash
 
 
 	$ sudo docker images -notrunc | head
 	$ sudo docker images -notrunc | head
-	REPOSITORY                    TAG                 IMAGE ID                                                           CREATED             SIZE
-	<none>                        <none>              77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182   19 hours ago        30.53 MB (virtual 1.089 GB)
-	committest                    latest              b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f   19 hours ago        30.53 MB (virtual 1.089 GB)
-	<none>                        <none>              78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921   19 hours ago        30.53 MB (virtual 1.089 GB)
-	docker                        latest              30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4   20 hours ago        30.53 MB (virtual 1.089 GB)
-	<none>                        <none>              0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5   20 hours ago        30.53 MB (virtual 1.089 GB)
-	<none>                        <none>              18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b   22 hours ago        23.68 MB (virtual 1.082 GB)
-	<none>                        <none>              f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a   23 hours ago        30.46 MB (virtual 1.089 GB)
-	tryout                        latest              2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074   23 hours ago        16.4 kB (virtual 131.5 MB)
-	<none>                        <none>              5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df   24 hours ago        30.44 MB (virtual 1.089 GB)
+	REPOSITORY                    TAG                 IMAGE ID                                                           CREATED             VIRTUAL SIZE
+	<none>                        <none>              77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182   19 hours ago        1.089 GB
+	committest                    latest              b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f   19 hours ago        1.089 GB
+	<none>                        <none>              78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921   19 hours ago        1.089 GB
+	docker                        latest              30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4   20 hours ago        1.089 GB
+	<none>                        <none>              0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5   20 hours ago        1.089 GB
+	<none>                        <none>              18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b   22 hours ago        1.082 GB
+	<none>                        <none>              f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a   23 hours ago        1.089 GB
+	tryout                        latest              2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074   23 hours ago        131.5 MB
+	<none>                        <none>              5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df   24 hours ago        1.089 GB
 
 
 Displaying images visually
 Displaying images visually
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -557,7 +558,8 @@ Displaying image hierarchy
 
 
     Usage: docker import URL|- [REPOSITORY[:TAG]]
     Usage: docker import URL|- [REPOSITORY[:TAG]]
 
 
-    Create a new filesystem image from the contents of a tarball
+    Create an empty filesystem image and import the contents of the tarball 
+    (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.
 
 
 At this time, the URL must start with ``http`` and point to a single
 At this time, the URL must start with ``http`` and point to a single
 file archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) containing a
 file archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) containing a
@@ -657,6 +659,52 @@ Insert file from github
 
 
     Return low-level information on a container
     Return low-level information on a container
 
 
+      -format="": template to output results
+
+By default, this will render all results in a JSON array.  If a format
+is specified, the given template will be executed for each result.
+
+Go's `text/template <http://golang.org/pkg/text/template/>` package
+describes all the details of the format.
+
+Examples
+~~~~~~~~
+
+Get an instance's IP Address
+............................
+
+For the most part, you can pick out any field from the JSON in a
+fairly straightforward manner.
+
+.. code-block:: bash
+
+    $ sudo docker inspect -format='{{.NetworkSettings.IPAddress}}' $INSTANCE_ID
+
+List All Port Bindings
+......................
+
+One can loop over arrays and maps in the results to produce simple
+text output:
+
+.. code-block:: bash
+
+    $ sudo docker inspect -format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID
+
+Find a Specific Port Mapping
+............................
+
+The ``.Field`` syntax doesn't work when the field name begins with a
+number, but the template language's ``index`` function does.  The
+``.NetworkSettings.Ports`` section contains a map of the internal port
+mappings to a list of external address/port objects, so to grab just
+the numeric public port, you use ``index`` to find the specific port
+map, and then ``index`` 0 contains first object inside of that.  Then
+we ask for the ``HostPort`` field to get the public address.
+
+.. code-block:: bash
+
+    $ sudo docker inspect -format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID
+
 .. _cli_kill:
 .. _cli_kill:
 
 
 ``kill``
 ``kill``
@@ -749,6 +797,15 @@ Known Issues (kill)
       -notrunc=false: Don't truncate output
       -notrunc=false: Don't truncate output
       -q=false: Only display numeric IDs
       -q=false: Only display numeric IDs
 
 
+Running ``docker ps`` showing 2 linked containers.
+
+.. code-block:: bash
+
+    $ docker ps
+    CONTAINER ID        IMAGE                        COMMAND                CREATED              STATUS              PORTS               NAMES
+    4c01db0b339c        ubuntu:12.04                 bash                   17 seconds ago       Up 16 seconds                           webapp              
+    d7886598dbe2        crosbymichael/redis:latest   /redis-server --dir    33 minutes ago       Up 33 minutes       6379/tcp            redis,webapp/db     
+
 .. _cli_pull:
 .. _cli_pull:
 
 
 ``pull``
 ``pull``
@@ -797,7 +854,7 @@ Known Issues (kill)
         -link="": Remove the link instead of the actual container
         -link="": Remove the link instead of the actual container
 
 
 Known Issues (rm)
 Known Issues (rm)
-~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~
 
 
 * :issue:`197` indicates that ``docker kill`` may leave directories
 * :issue:`197` indicates that ``docker kill`` may leave directories
   behind and make it difficult to remove the container.
   behind and make it difficult to remove the container.
@@ -808,7 +865,7 @@ Examples:
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    $ docker rm /redis
+    $ sudo docker rm /redis
     /redis
     /redis
 
 
 
 
@@ -817,7 +874,7 @@ This will remove the container referenced under the link ``/redis``.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    $ docker rm -link /webapp/redis
+    $ sudo docker rm -link /webapp/redis
     /webapp/redis
     /webapp/redis
 
 
 
 
@@ -826,7 +883,7 @@ network communication.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    $ docker rm `docker ps -a -q`
+    $ sudo docker rm `docker ps -a -q`
 
 
 
 
 This command will delete all stopped containers. The command ``docker ps -a -q`` will return all
 This command will delete all stopped containers. The command ``docker ps -a -q`` will return all
@@ -881,12 +938,19 @@ containers will not be deleted.
       -name="": Assign the specified name to the container. If no name is specific docker will generate a random name
       -name="": Assign the specified name to the container. If no name is specific docker will generate a random name
       -P=false: Publish all exposed ports to the host interfaces
       -P=false: Publish all exposed ports to the host interfaces
 
 
-Examples
---------
+Known Issues (run -volumes-from)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
+  could indicate a permissions problem with AppArmor. Please see the
+  issue for a workaround.
+
+Examples:
+~~~~~~~~~
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
+    $ sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
 
 
 This will create a container and print "test" to the console. The
 This will create a container and print "test" to the console. The
 ``cidfile`` flag makes docker attempt to create a new file and write the
 ``cidfile`` flag makes docker attempt to create a new file and write the
@@ -895,7 +959,10 @@ error. Docker will close this file when docker run exits.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-   docker run mount -t tmpfs none /var/spool/squid
+   $ sudo docker run -t -i -rm ubuntu bash
+   root@bc338942ef20:/# mount -t tmpfs none /mnt
+   mount: permission denied
+
 
 
 This will *not* work, because by default, most potentially dangerous
 This will *not* work, because by default, most potentially dangerous
 kernel capabilities are dropped; including ``cap_sys_admin`` (which is
 kernel capabilities are dropped; including ``cap_sys_admin`` (which is
@@ -904,7 +971,12 @@ allow it to run:
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-   docker run -privileged mount -t tmpfs none /var/spool/squid
+   $ sudo docker run -privileged ubuntu bash
+   root@50e3f57e16e6:/# mount -t tmpfs none /mnt
+   root@50e3f57e16e6:/# df -h
+   Filesystem      Size  Used Avail Use% Mounted on
+   none            1.9G     0  1.9G   0% /mnt
+
 
 
 The ``-privileged`` flag gives *all* capabilities to the container,
 The ``-privileged`` flag gives *all* capabilities to the container,
 and it also lifts all the limitations enforced by the ``device``
 and it also lifts all the limitations enforced by the ``device``
@@ -914,7 +986,7 @@ use-cases, like running Docker within Docker.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-   docker  run -w /path/to/dir/ -i -t  ubuntu pwd
+   $ sudo docker  run -w /path/to/dir/ -i -t  ubuntu pwd
 
 
 The ``-w`` lets the command being executed inside directory given,
 The ``-w`` lets the command being executed inside directory given,
 here /path/to/dir/. If the path does not exists it is created inside the
 here /path/to/dir/. If the path does not exists it is created inside the
@@ -922,7 +994,7 @@ container.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-   docker  run  -v `pwd`:`pwd` -w `pwd` -i -t  ubuntu pwd
+   $ sudo docker  run  -v `pwd`:`pwd` -w `pwd` -i -t  ubuntu pwd
 
 
 The ``-v`` flag mounts the current working directory into the container.
 The ``-v`` flag mounts the current working directory into the container.
 The ``-w`` lets the command being executed inside the current
 The ``-w`` lets the command being executed inside the current
@@ -932,7 +1004,7 @@ using the container, but inside the current working directory.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    docker run -p 127.0.0.1:80:8080 ubuntu bash
+   $ sudo docker run -p 127.0.0.1:80:8080 ubuntu bash
 
 
 This binds port ``8080`` of the container to port ``80`` on 127.0.0.1 of the
 This binds port ``8080`` of the container to port ``80`` on 127.0.0.1 of the
 host machine. :ref:`port_redirection` explains in detail how to manipulate ports
 host machine. :ref:`port_redirection` explains in detail how to manipulate ports
@@ -940,7 +1012,7 @@ in Docker.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    docker run -expose 80 ubuntu bash
+    $ sudo docker run -expose 80 ubuntu bash
 
 
 This exposes port ``80`` of the container for use within a link without
 This exposes port ``80`` of the container for use within a link without
 publishing the port to the host system's interfaces. :ref:`port_redirection`
 publishing the port to the host system's interfaces. :ref:`port_redirection`
@@ -948,14 +1020,14 @@ explains in detail how to manipulate ports in Docker.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    docker run -name console -t -i ubuntu bash
+    $ sudo docker run -name console -t -i ubuntu bash
 
 
 This will create and run a new container with the container name
 This will create and run a new container with the container name
 being ``console``.
 being ``console``.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    docker run -link /redis:redis -name console ubuntu bash
+    $ sudo docker run -link /redis:redis -name console ubuntu bash
 
 
 The ``-link`` flag will link the container named ``/redis`` into the
 The ``-link`` flag will link the container named ``/redis`` into the
 newly created container with the alias ``redis``.  The new container
 newly created container with the alias ``redis``.  The new container
@@ -965,7 +1037,7 @@ to the newly created container.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-   docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
+   $ sudo docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
 
 
 The ``-volumes-from`` flag mounts all the defined volumes from the
 The ``-volumes-from`` flag mounts all the defined volumes from the
 refrence containers. Containers can be specified by a comma seperated
 refrence containers. Containers can be specified by a comma seperated
@@ -974,16 +1046,10 @@ id may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
 read-only or read-write mode, respectively. By default, the volumes are mounted
 read-only or read-write mode, respectively. By default, the volumes are mounted
 in the same mode (rw or ro) as the reference container.
 in the same mode (rw or ro) as the reference container.
 
 
-Known Issues (run -volumes-from)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
-  could indicate a permissions problem with AppArmor. Please see the
-  issue for a workaround.
-
 .. _cli_save:
 .. _cli_save:
 
 
 ``save``
 ``save``
+---------
 
 
 ::
 ::
 
 

+ 23 - 8
docs/sources/contributing/devenvironment.rst

@@ -42,9 +42,7 @@ This following command will build a development environment using the Dockerfile
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    sudo docker build -t docker .
-
-
+    sudo make build
 
 
 If the build is successful, congratulations! You have produced a clean build of docker, neatly encapsulated in a standard build environment. 
 If the build is successful, congratulations! You have produced a clean build of docker, neatly encapsulated in a standard build environment. 
 
 
@@ -56,7 +54,7 @@ To create the Docker binary, run this command:
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-	sudo docker run -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh binary
+	sudo make binary
 
 
 This will create the Docker binary in ``./bundles/<version>-dev/binary/``
 This will create the Docker binary in ``./bundles/<version>-dev/binary/``
 
 
@@ -68,10 +66,15 @@ To execute the test cases, run this command:
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-	sudo docker run -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh test
+	sudo make test
+
 
 
+Note: if you're running the tests in vagrant, you need to specify a dns entry in 
+the command (either edit the Makefile, or run the step manually): 
+
+.. code-block:: bash
 
 
-Note: if you're running the tests in vagrant, you need to specify a dns entry in the command: `-dns 8.8.8.8`
+	sudo docker run -dns 8.8.8.8 -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh test
 
 
 If the test are successful then the tail of the output should look something like this
 If the test are successful then the tail of the output should look something like this
 
 
@@ -113,10 +116,22 @@ You can run an interactive session in the newly built container:
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-	sudo docker run -privileged -i -t docker bash
+	sudo make shell
 
 
-	# type 'exit' to exit
+	# type 'exit' or Ctrl-D to exit
+
+
+Extra Step: Build and view the Documenation
+-------------------------------------------
+
+If you want to read the documentation from a local website, or are making changes
+to it, you can build the documentation and then serve it by:
+
+.. code-block:: bash
 
 
+	sudo make doc
+    # when its done, you can point your browser to http://yourdockerhost:8000
+	# type Ctrl-C to exit
 
 
 
 
 .. note:: The binary is available outside the container in the directory  ``./bundles/<version>-dev/binary/``. You can swap your host docker executable with this binary for live testing - for example, on ubuntu: ``sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/<version>-dev/binary/docker-<version>-dev $(which docker);sudo service docker start``.
 .. note:: The binary is available outside the container in the directory  ``./bundles/<version>-dev/binary/``. You can swap your host docker executable with this binary for live testing - for example, on ubuntu: ``sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/<version>-dev/binary/docker-<version>-dev $(which docker);sudo service docker start``.

+ 0 - 2
docs/sources/examples/hello_world.rst

@@ -131,8 +131,6 @@ Attach to the container to see the results in real-time.
 
 
 - **"docker attach**" This will allow us to attach to a background
 - **"docker attach**" This will allow us to attach to a background
   process to see what is going on.
   process to see what is going on.
-- **"-sig-proxy=true"** Proxify all received signal to the process
-  (even in non-tty mode)
 - **$CONTAINER_ID** The Id of the container we want to attach too.
 - **$CONTAINER_ID** The Id of the container we want to attach too.
 
 
 Exit from the container attachment by pressing Control-C.
 Exit from the container attachment by pressing Control-C.

+ 30 - 39
docs/sources/examples/postgresql_service.rst

@@ -7,26 +7,18 @@
 PostgreSQL Service
 PostgreSQL Service
 ==================
 ==================
 
 
-.. note::
-
-    A shorter version of `this blog post`_.
+.. include:: example_header.inc
 
 
 .. note::
 .. note::
 
 
-    As of version 0.5.2, Docker requires root privileges to run.
-    You have to either manually adjust your system configuration (permissions on
-    /var/run/docker.sock or sudo config), or prefix `docker` with `sudo`. Check
-    `this thread`_ for details.
+    A shorter version of `this blog post`_.
 
 
 .. _this blog post: http://zaiste.net/2013/08/docker_postgresql_how_to/
 .. _this blog post: http://zaiste.net/2013/08/docker_postgresql_how_to/
-.. _this thread: https://groups.google.com/forum/?fromgroups#!topic/docker-club/P3xDLqmLp0E
 
 
 Installing PostgreSQL on Docker
 Installing PostgreSQL on Docker
 -------------------------------
 -------------------------------
 
 
-For clarity I won't be showing command output.
-
-Run an interactive shell in Docker container.
+Run an interactive shell in a Docker container.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
@@ -38,26 +30,26 @@ Update its dependencies.
 
 
     apt-get update
     apt-get update
 
 
-Install ``python-software-properties``.
+Install ``python-software-properties``, ``software-properties-common``, ``wget`` and ``vim``.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    apt-get -y install python-software-properties
-    apt-get -y install software-properties-common
+    apt-get -y install python-software-properties software-properties-common wget vim
 
 
-Add Pitti's PostgreSQL repository. It contains the most recent stable release
-of PostgreSQL i.e. ``9.2``.
+Add PostgreSQL's repository. It contains the most recent stable release
+of PostgreSQL, ``9.3``.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    add-apt-repository ppa:pitti/postgresql
+    wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
+    echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list
     apt-get update
     apt-get update
 
 
-Finally, install PostgreSQL 9.2
+Finally, install PostgreSQL 9.3
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    apt-get -y install postgresql-9.2 postgresql-client-9.2 postgresql-contrib-9.2
+    apt-get -y install postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3
 
 
 Now, create a PostgreSQL superuser role that can create databases and
 Now, create a PostgreSQL superuser role that can create databases and
 other roles.  Following Vagrant's convention the role will be named
 other roles.  Following Vagrant's convention the role will be named
@@ -76,15 +68,14 @@ role.
 
 
 Adjust PostgreSQL configuration so that remote connections to the
 Adjust PostgreSQL configuration so that remote connections to the
 database are possible. Make sure that inside
 database are possible. Make sure that inside
-``/etc/postgresql/9.2/main/pg_hba.conf`` you have following line (you will need
-to install an editor, e.g. ``apt-get install vim``):
+``/etc/postgresql/9.3/main/pg_hba.conf`` you have following line:
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
     host    all             all             0.0.0.0/0               md5
     host    all             all             0.0.0.0/0               md5
 
 
-Additionaly, inside ``/etc/postgresql/9.2/main/postgresql.conf``
-uncomment ``listen_addresses`` so it is as follows:
+Additionaly, inside ``/etc/postgresql/9.3/main/postgresql.conf``
+uncomment ``listen_addresses`` like so:
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
@@ -94,7 +85,7 @@ uncomment ``listen_addresses`` so it is as follows:
 
 
     This PostgreSQL setup is for development only purposes. Refer
     This PostgreSQL setup is for development only purposes. Refer
     to PostgreSQL documentation how to fine-tune these settings so that it
     to PostgreSQL documentation how to fine-tune these settings so that it
-    is enough secure.
+    is secure enough.
 
 
 Exit.
 Exit.
 
 
@@ -102,43 +93,43 @@ Exit.
 
 
     exit
     exit
 
 
-Create an image and assign it a name. ``<container_id>`` is in the
-Bash prompt; you can also locate it using ``docker ps -a``.
+Create an image from our container and assign it a name. The ``<container_id>``
+is in the Bash prompt; you can also locate it using ``docker ps -a``.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
     sudo docker commit <container_id> <your username>/postgresql
     sudo docker commit <container_id> <your username>/postgresql
 
 
-Finally, run PostgreSQL server via ``docker``.
+Finally, run the PostgreSQL server via ``docker``.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
     CONTAINER=$(sudo docker run -d -p 5432 \
     CONTAINER=$(sudo docker run -d -p 5432 \
       -t <your username>/postgresql \
       -t <your username>/postgresql \
-      /bin/su postgres -c '/usr/lib/postgresql/9.2/bin/postgres \
-        -D /var/lib/postgresql/9.2/main \
-        -c config_file=/etc/postgresql/9.2/main/postgresql.conf')
+      /bin/su postgres -c '/usr/lib/postgresql/9.3/bin/postgres \
+        -D /var/lib/postgresql/9.3/main \
+        -c config_file=/etc/postgresql/9.3/main/postgresql.conf')
 
 
-Connect the PostgreSQL server using ``psql`` (You will need postgres installed
-on the machine.  For ubuntu, use something like
-``sudo apt-get install postgresql``).
+Connect the PostgreSQL server using ``psql`` (You will need the
+postgresql client installed on the machine.  For ubuntu, use something
+like ``sudo apt-get install postgresql-client``).
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    CONTAINER_IP=$(sudo docker inspect $CONTAINER | grep IPAddress | awk '{ print $2 }' | tr -d ',"')
+    CONTAINER_IP=$(sudo docker inspect -format='{{.NetworkSettings.IPAddress}}' $CONTAINER)
     psql -h $CONTAINER_IP -p 5432 -d docker -U docker -W
     psql -h $CONTAINER_IP -p 5432 -d docker -U docker -W
 
 
 As before, create roles or databases if needed.
 As before, create roles or databases if needed.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    psql (9.2.4)
+    psql (9.3.1)
     Type "help" for help.
     Type "help" for help.
 
 
     docker=# CREATE DATABASE foo OWNER=docker;
     docker=# CREATE DATABASE foo OWNER=docker;
     CREATE DATABASE
     CREATE DATABASE
 
 
-Additionally, publish your newly created image on Docker Index.
+Additionally, publish your newly created image on the Docker Index.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
@@ -160,9 +151,9 @@ container starts.
 .. code-block:: bash
 .. code-block:: bash
 
 
     sudo docker commit -run='{"Cmd": \
     sudo docker commit -run='{"Cmd": \
-      ["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.2/bin/postgres -D \
-      /var/lib/postgresql/9.2/main -c \
-      config_file=/etc/postgresql/9.2/main/postgresql.conf"], "PortSpecs": ["5432"]}' \
+      ["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.3/bin/postgres -D \
+      /var/lib/postgresql/9.3/main -c \
+      config_file=/etc/postgresql/9.3/main/postgresql.conf"], "PortSpecs": ["5432"]}' \
       <container_id> <your username>/postgresql
       <container_id> <your username>/postgresql
 
 
 From now on, just type ``docker run <your username>/postgresql`` and
 From now on, just type ``docker run <your username>/postgresql`` and

+ 28 - 11
docs/sources/installation/amazon.rst

@@ -22,20 +22,37 @@ Amazon QuickStart
 
 
 1. **Choose an image:**
 1. **Choose an image:**
 
 
-   * Launch the `Create Instance Wizard` <https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:> menu on your AWS Console
-   * Select "Community AMIs" option and serch for ``amd64 precise`` (click enter to search)
-   * If you choose a EBS enabled AMI you will be able to launch a `t1.micro` instance (more info on `pricing` <http://aws.amazon.com/en/ec2/pricing/> )
-   * When you click select you'll be taken to the instance setup, and you're one click away from having your Ubuntu VM up and running.
+   * Launch the `Create Instance Wizard
+     <https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:>`_ menu
+     on your AWS Console.
+
+   * When picking the source AMI for your instance type, select "Community
+     AMIs".
+
+   * Search for ``amd64 precise``. Pick one of the amd64 Ubuntu images.
+
+   * If you choose a EBS enabled AMI, you'll also be able to launch a
+     ``t1.micro`` instance (more info on `pricing
+     <http://aws.amazon.com/en/ec2/pricing/>`_).  ``t1.micro`` instances are
+     eligible for Amazon's Free Usage Tier.
+
+   * When you click select you'll be taken to the instance setup, and you're one
+     click away from having your Ubuntu VM up and running.
 
 
 2. **Tell CloudInit to install Docker:**
 2. **Tell CloudInit to install Docker:**
 
 
-   * Enter ``#include https://get.docker.io`` into the instance *User
-     Data*. `CloudInit <https://help.ubuntu.com/community/CloudInit>`_
-     is part of the Ubuntu image you chose and it bootstraps from this
-     *User Data*.
+   * When you're on the "Configure Instance Details" step, expand the "Advanced
+     Details" section.
+
+   * Under "User data", select "As text".
+
+   * Enter ``#include https://get.docker.io`` into the instance *User Data*.
+     `CloudInit <https://help.ubuntu.com/community/CloudInit>`_ is part of the
+     Ubuntu image you chose; it will bootstrap Docker by running the shell
+     script located at this URL.
 
 
-3. After a few more standard choices where defaults are probably ok, your
-   AWS Ubuntu instance with Docker should be running!
+3. After a few more standard choices where defaults are probably ok, your AWS
+   Ubuntu instance with Docker should be running!
 
 
 **If this is your first AWS instance, you may need to set up your
 **If this is your first AWS instance, you may need to set up your
 Security Group to allow SSH.** By default all incoming ports to your
 Security Group to allow SSH.** By default all incoming ports to your
@@ -152,7 +169,7 @@ Docker that way too. Vagrant 1.1 or higher is required.
    includes rights to SSH (port 22) to your container.
    includes rights to SSH (port 22) to your container.
 
 
    If you have an advanced AWS setup, you might want to have a look at
    If you have an advanced AWS setup, you might want to have a look at
-   https://github.com/mitchellh/vagrant-aws
+   `vagrant-aws <https://github.com/mitchellh/vagrant-aws>`_.
 
 
 7. Connect to your machine
 7. Connect to your machine
 
 

+ 7 - 12
docs/sources/installation/archlinux.rst

@@ -12,27 +12,28 @@ Arch Linux
 .. include:: install_unofficial.inc
 .. include:: install_unofficial.inc
 
 
 Installing on Arch Linux is not officially supported but can be handled via 
 Installing on Arch Linux is not officially supported but can be handled via 
-either of the following AUR packages:
+one of the following AUR packages:
 
 
 * `lxc-docker <https://aur.archlinux.org/packages/lxc-docker/>`_
 * `lxc-docker <https://aur.archlinux.org/packages/lxc-docker/>`_
 * `lxc-docker-git <https://aur.archlinux.org/packages/lxc-docker-git/>`_
 * `lxc-docker-git <https://aur.archlinux.org/packages/lxc-docker-git/>`_
+* `lxc-docker-nightly <https://aur.archlinux.org/packages/lxc-docker-nightly/>`_
 
 
 The lxc-docker package will install the latest tagged version of docker. 
 The lxc-docker package will install the latest tagged version of docker. 
 The lxc-docker-git package will build from the current master branch.
 The lxc-docker-git package will build from the current master branch.
+The lxc-docker-nightly package will install the latest build.
 
 
 Dependencies
 Dependencies
 ------------
 ------------
 
 
 Docker depends on several packages which are specified as dependencies in
 Docker depends on several packages which are specified as dependencies in
-either AUR package.
+the AUR packages. The core dependencies are:
 
 
-* aufs3
 * bridge-utils
 * bridge-utils
-* go
+* device-mapper
 * iproute2
 * iproute2
-* linux-aufs_friendly
 * lxc
 * lxc
 
 
+
 Installation
 Installation
 ------------
 ------------
 
 
@@ -41,20 +42,14 @@ The instructions here assume **yaourt** is installed.  See
 for information on building and installing packages from the AUR if you have not
 for information on building and installing packages from the AUR if you have not
 done so before.
 done so before.
 
 
-Keep in mind that if **linux-aufs_friendly** is not already installed that a
-new kernel will be compiled and this can take quite a while.
-
 ::
 ::
 
 
-    yaourt -S lxc-docker-git
+    yaourt -S lxc-docker
 
 
 
 
 Starting Docker
 Starting Docker
 ---------------
 ---------------
 
 
-Prior to starting docker modify your bootloader to use the 
-**linux-aufs_friendly** kernel and reboot your system.
-
 There is a systemd service unit created for docker.  To start the docker service:
 There is a systemd service unit created for docker.  To start the docker service:
 
 
 ::
 ::

+ 37 - 4
docs/sources/installation/fedora.rst

@@ -11,9 +11,42 @@ Fedora
 
 
 .. include:: install_unofficial.inc
 .. include:: install_unofficial.inc
 
 
-.. warning::
+Docker is available in **Fedora 19 and later**. Please note that due to the
+current Docker limitations Docker is able to run only on the **64 bit**
+architecture.
 
 
-   This is a placeholder for the Fedora installation instructions. Currently there is not an available
-   Docker package in the Fedora distribution. These packages are being built and should be available soon.
-   These instructions will be updated when the package is available.
+Installation
+------------
+
+Firstly, let's make sure our Fedora host is up-to-date.
+
+.. code-block:: bash
+
+    sudo yum -y upgrade
+
+Next let's install the ``docker-io`` package which will install Docker on our host.
+
+.. code-block:: bash
+
+   sudo yum -y install docker-io
+
+Now it's installed lets start the Docker daemon.
+
+.. code-block:: bash
+
+    sudo systemctl start docker
+
+If we want Docker to start at boot we should also:
+
+.. code-block:: bash
+
+   sudo systemctl enable docker
+
+Now let's verify that Docker is working.
+
+.. code-block:: bash
+
+   sudo docker run -i -t ubuntu /bin/bash
+
+**Done!**, now continue with the :ref:`hello_world` example.
 
 

+ 65 - 0
docs/sources/installation/google.rst

@@ -0,0 +1,65 @@
+:title: Installation on Google Cloud Platform
+:description: Please note this project is currently under heavy development. It should not be used in production.
+:keywords: Docker, Docker documentation, installation, google, Google Compute Engine, Google Cloud Platform
+
+`Google Cloud Platform <https://cloud.google.com/>`_
+====================================================
+
+.. include:: install_header.inc
+
+.. _googlequickstart:
+
+`Compute Engine <https://developers.google.com/compute>`_ QuickStart for `Debian <https://www.debian.org>`_
+-----------------------------------------------------------------------------------------------------------
+
+1. Go to `Google Cloud Console <https://cloud.google.com/console>`_ and create a new Cloud Project with billing enabled.
+
+2. Download and configure the `Google Cloud SDK <https://developers.google.com/cloud/sdk/>`_ to use your project with the following commands:
+
+.. code-block:: bash
+
+    $ curl https://dl.google.com/dl/cloudsdk/release/install_google_cloud_sdk.bash | bash
+    $ gcloud auth login
+    Enter a cloud project id (or leave blank to not set): <google-cloud-project-id>
+
+3. Start a new instance, select a zone close to you and the desired instance size:
+
+.. code-block:: bash
+
+    $ gcutil addinstance docker-playground --image=backports-debian-7
+    1: europe-west1-a
+    ...
+    4: us-central1-b
+    >>> <zone-index>
+    1: machineTypes/n1-standard-1
+    ...
+    12: machineTypes/g1-small
+    >>> <machine-type-index>
+
+4. Connect to the instance using SSH:
+
+.. code-block:: bash
+
+    $ gcutil ssh docker-playground
+    docker-playground:~$ 
+
+5. Enable IP forwarding:
+
+.. code-block:: bash
+
+    docker-playground:~$ echo net.ipv4.ip_forward=1 | sudo tee /etc/sysctl.d/99-docker.conf 
+    docker-playground:~$ sudo sysctl --system
+
+6. Install the latest Docker release and configure it to start when the instance boots:
+
+.. code-block:: bash
+
+    docker-playground:~$ curl get.docker.io | bash
+    docker-playground:~$ sudo update-rc.d docker defaults
+
+7. Start a new container:
+
+.. code-block:: bash
+
+    docker-playground:~$ sudo docker run busybox echo 'docker on GCE \o/'
+    docker on GCE \o/

+ 2 - 0
docs/sources/installation/index.rst

@@ -18,6 +18,7 @@ Contents:
    :maxdepth: 1
    :maxdepth: 1
 
 
    ubuntulinux
    ubuntulinux
+   rhel
    fedora
    fedora
    archlinux
    archlinux
    gentoolinux
    gentoolinux
@@ -25,6 +26,7 @@ Contents:
    windows
    windows
    amazon
    amazon
    rackspace
    rackspace
+   google
    kernel
    kernel
    binaries
    binaries
    security
    security

+ 65 - 0
docs/sources/installation/rhel.rst

@@ -0,0 +1,65 @@
+:title: Requirements and Installation on Red Hat Enterprise Linux / CentOS
+:description: Please note this project is currently under heavy development. It should not be used in production.
+:keywords: Docker, Docker documentation, requirements, linux, rhel, centos
+
+.. _rhel:
+
+Red Hat Enterprise Linux / CentOS
+=================================
+
+.. include:: install_header.inc
+
+.. include:: install_unofficial.inc
+
+Docker is available for **RHEL/CentOS 6**.
+
+Please note that this package is part of a `Extra Packages for Enterprise Linux (EPEL)`_, a community effort to create and maintain additional packages for RHEL distribution.
+
+Please note that due to the current Docker limitations Docker is able to run only on the **64 bit** architecture.
+
+Installation
+------------
+
+1. Firstly, let's make sure our RHEL host is up-to-date.
+
+.. code-block:: bash
+
+    sudo yum -y upgrade
+
+2. Next you need to install the EPEL repository. Please follow the `EPEL installation instructions`_.
+
+3. Next let's install the ``docker-io`` package which will install Docker on our host.
+
+.. code-block:: bash
+
+   sudo yum -y install docker-io
+
+4. Now it's installed lets start the Docker daemon.
+
+.. code-block:: bash
+
+    sudo service docker start
+
+If we want Docker to start at boot we should also:
+
+.. code-block:: bash
+
+   sudo chkconfig docker on
+
+5. Now let's verify that Docker is working.
+
+.. code-block:: bash
+
+   sudo docker run -i -t ubuntu /bin/bash
+
+**Done!**, now continue with the :ref:`hello_world` example.
+
+Issues?
+-------
+
+If you have any issues - please report them directly in the `Red Hat Bugzilla for docker-io component`_.
+
+.. _Extra Packages for Enterprise Linux (EPEL): https://fedoraproject.org/wiki/EPEL
+.. _EPEL installation instructions: https://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F
+.. _Red Hat Bugzilla for docker-io component : https://bugzilla.redhat.com/enter_bug.cgi?product=Fedora%20EPEL&component=docker-io
+

+ 8 - 0
docs/sources/installation/ubuntulinux.rst

@@ -86,6 +86,14 @@ continue installation.*
    sudo apt-get update
    sudo apt-get update
    sudo apt-get install lxc-docker
    sudo apt-get install lxc-docker
 
 
+.. note::
+
+    There is also a simple ``curl`` script available to help with this process.
+
+    .. code-block:: bash
+
+        curl -s http://get.docker.io/ubuntu/ | sudo sh
+
 Now verify that the installation has worked by downloading the ``ubuntu`` image
 Now verify that the installation has worked by downloading the ``ubuntu`` image
 and launching a container.
 and launching a container.
 
 

+ 182 - 0
docs/sources/use/ambassador_pattern_linking.rst

@@ -0,0 +1,182 @@
+:title: Ambassador pattern linking
+:description: Using the Ambassador pattern to abstract (network) services
+:keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming
+
+.. _ambassador_pattern_linking:
+
+Ambassador pattern linking
+==========================
+
+Rather than hardcoding network links between a service consumer and provider, Docker
+encourages service portability.
+
+eg, instead of
+
+.. code-block:: bash
+
+	(consumer) --> (redis)
+
+requiring you to restart the ``consumer`` to attach it to a different ``redis`` service, 
+you can add ambassadors
+
+.. code-block:: bash
+
+	(consumer) --> (redis-ambassador) --> (redis)
+
+	or
+
+	(consumer) --> (redis-ambassador) ---network---> (redis-ambassador) --> (redis)
+
+When you need to rewire your consumer to talk to a different resdis server, you 
+can just restart the ``redis-ambassador`` container that the consumer is connected to.
+
+This pattern also allows you to transparently move the redis server to a different
+docker host from the consumer.
+
+Using the ``svendowideit/ambassador`` container, the link wiring is controlled entirely 
+from the ``docker run`` parameters.
+
+Two host Example
+----------------
+
+Start actual redis server on one Docker host
+
+.. code-block:: bash
+
+	big-server $ docker run -d -name redis crosbymichael/redis
+
+Then add an ambassador linked to the redis server, mapping a port to the outside world
+
+.. code-block:: bash
+
+	big-server $ docker run -d -link redis:redis -name redis_ambassador -p 6379:6379 svendowideit/ambassador
+
+On the other host, you can set up another ambassador setting environment variables for each remote port we want to proxy to the ``big-server``
+
+.. code-block:: bash
+
+	client-server $ docker run -d -name redis_ambassador -expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador
+
+Then on the ``client-server`` host, you can use a redis client container to talk 
+to the remote redis server, just by linking to the local redis ambassador.
+
+.. code-block:: bash
+
+	client-server $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
+	redis 172.17.0.160:6379> ping
+	PONG
+
+
+
+How it works
+------------
+
+The following example shows what the ``svendowideit/ambassador`` container does 
+automatically (with a tiny amount of ``sed``)
+
+On the docker host (192.168.1.52) that redis will run on:
+
+.. code-block:: bash
+
+	# start actual redis server
+	$ docker run -d -name redis crosbymichael/redis
+
+	# get a redis-cli container for connection testing	
+	$ docker pull relateiq/redis-cli
+
+	# test the redis server by talking to it directly
+	$ docker run -t -i -rm -link redis:redis relateiq/redis-cli
+	redis 172.17.0.136:6379> ping
+	PONG
+	^D
+	
+	# add redis ambassador
+	$ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 busybox sh
+	
+in the redis_ambassador container, you can see the linked redis containers's env
+
+.. code-block:: bash
+
+	$ env
+	REDIS_PORT=tcp://172.17.0.136:6379
+	REDIS_PORT_6379_TCP_ADDR=172.17.0.136
+	REDIS_NAME=/redis_ambassador/redis
+	HOSTNAME=19d7adf4705e
+	REDIS_PORT_6379_TCP_PORT=6379
+	HOME=/
+	REDIS_PORT_6379_TCP_PROTO=tcp
+	container=lxc
+	REDIS_PORT_6379_TCP=tcp://172.17.0.136:6379
+	TERM=xterm
+	PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+	PWD=/
+	
+	
+This environment is used by the ambassador socat script to expose redis to the world 
+(via the -p 6379:6379 port mapping)
+
+.. code-block:: bash
+
+	$ docker rm redis_ambassador
+	$ sudo ./contrib/mkimage-unittest.sh
+	$ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 docker-ut sh
+	
+	$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379
+	
+then ping the redis server via the ambassador
+
+.. code-block::bash
+
+	$ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
+	redis 172.17.0.160:6379> ping
+	PONG
+
+Now goto a different server
+
+.. code-block:: bash
+
+	$ sudo ./contrib/mkimage-unittest.sh
+	$ docker run -t -i  -expose 6379 -name redis_ambassador docker-ut sh
+	
+	$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379
+
+and get the redis-cli image so we can talk over the ambassador bridge
+
+.. code-block:: bash
+
+	$ docker pull relateiq/redis-cli
+	$ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
+	redis 172.17.0.160:6379> ping
+	PONG
+
+The svendowideit/ambassador Dockerfile
+--------------------------------------
+
+The ``svendowideit/ambassador`` image is a small busybox image with ``socat`` built in.
+When you start the container, it uses a small ``sed`` script to parse out the (possibly multiple)
+link environment variables to set up the port forwarding. On the remote host, you need to set the 
+variable using the ``-e`` command line option.
+
+``-expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`` will forward the 
+local ``1234`` port to the remote IP and port - in this case ``192.168.1.52:6379``.
+
+
+.. code-block:: Dockerfile
+
+	#
+	#
+	# first you need to build the docker-ut image using ./contrib/mkimage-unittest.sh
+	# then 
+	#   docker build -t SvenDowideit/ambassador .
+	#   docker tag SvenDowideit/ambassador ambassador
+	# then to run it (on the host that has the real backend on it)
+	#   docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 ambassador
+	# on the remote host, you can set up another ambassador
+	#    docker run -t -i -name redis_ambassador -expose 6379 sh
+
+	FROM	docker-ut
+	MAINTAINER	SvenDowideit@home.org.au
+
+
+	CMD	env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/'  | sh && top
+

+ 3 - 3
docs/sources/use/baseimages.rst

@@ -37,7 +37,7 @@ There are more example scripts for creating base images in the
 Docker Github Repo:
 Docker Github Repo:
 
 
 * `BusyBox <https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh>`_
 * `BusyBox <https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh>`_
-* `CentOS
-  <https://github.com/dotcloud/docker/blob/master/contrib/mkimage-centos.sh>`_
-* `Debian/Ubuntu
+* `CentOS / Scientific Linux CERN (SLC)
+  <https://github.com/dotcloud/docker/blob/master/contrib/mkimage-rinse.sh>`_
+* `Debian / Ubuntu
   <https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debootstrap.sh>`_
   <https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debootstrap.sh>`_

+ 1 - 0
docs/sources/use/index.rst

@@ -21,3 +21,4 @@ Contents:
    host_integration
    host_integration
    working_with_volumes
    working_with_volumes
    working_with_links_names
    working_with_links_names
+   ambassador_pattern_linking

+ 11 - 2
docs/sources/use/working_with_links_names.rst

@@ -54,9 +54,9 @@ inter-container communication is set to false.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    # Example: there is an image called redis-2.6 that exposes the port 6379 and starts redis-server.
+    # Example: there is an image called crosbymichael/redis that exposes the port 6379 and starts redis-server.
     # Let's name the container as "redis" based on that image and run it as daemon.
     # Let's name the container as "redis" based on that image and run it as daemon.
-    $ sudo docker run -d -name redis redis-2.6
+    $ sudo docker run -d -name redis crosbymichael/redis
 
 
 We can issue all the commands that you would expect using the name "redis"; start, stop,
 We can issue all the commands that you would expect using the name "redis"; start, stop,
 attach, using the name for our container. The name also allows us to link other containers
 attach, using the name for our container. The name also allows us to link other containers
@@ -102,3 +102,12 @@ about the child container.
 
 
 Accessing the network information along with the environment of the child container allows
 Accessing the network information along with the environment of the child container allows
 us to easily connect to the Redis service on the specific IP and port in the environment.
 us to easily connect to the Redis service on the specific IP and port in the environment.
+
+Running ``docker ps`` shows the 2 containers, and the webapp/db alias name for the redis container.
+
+.. code-block:: bash
+
+    $ docker ps
+    CONTAINER ID        IMAGE                        COMMAND                CREATED              STATUS              PORTS               NAMES
+    4c01db0b339c        ubuntu:12.04                 bash                   17 seconds ago       Up 16 seconds                           webapp              
+    d7886598dbe2        crosbymichael/redis:latest   /redis-server --dir    33 minutes ago       Up 33 minutes       6379/tcp            redis,webapp/db     

+ 9 - 5
engine/engine.go

@@ -9,7 +9,7 @@ import (
 	"strings"
 	"strings"
 )
 )
 
 
-type Handler func(*Job) string
+type Handler func(*Job) Status
 
 
 var globalHandlers map[string]Handler
 var globalHandlers map[string]Handler
 
 
@@ -70,7 +70,9 @@ func New(root string) (*Engine, error) {
 		log.Printf("WARNING: %s\n", err)
 		log.Printf("WARNING: %s\n", err)
 	} else {
 	} else {
 		if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
 		if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
-			log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
+			if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
+				log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
+			}
 		}
 		}
 	}
 	}
 	if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) {
 	if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) {
@@ -99,10 +101,12 @@ func (eng *Engine) Job(name string, args ...string) *Job {
 		Eng:    eng,
 		Eng:    eng,
 		Name:   name,
 		Name:   name,
 		Args:   args,
 		Args:   args,
-		Stdin:  os.Stdin,
-		Stdout: os.Stdout,
-		Stderr: os.Stderr,
+		Stdin:  NewInput(),
+		Stdout: NewOutput(),
+		Stderr: NewOutput(),
 	}
 	}
+	job.Stdout.Add(utils.NopWriteCloser(os.Stdout))
+	job.Stderr.Add(utils.NopWriteCloser(os.Stderr))
 	handler, exists := eng.handlers[name]
 	handler, exists := eng.handlers[name]
 	if exists {
 	if exists {
 		job.handler = handler
 		job.handler = handler

+ 51 - 3
engine/engine_test.go

@@ -1,6 +1,9 @@
 package engine
 package engine
 
 
 import (
 import (
+	"io/ioutil"
+	"os"
+	"path"
 	"testing"
 	"testing"
 )
 )
 
 
@@ -38,8 +41,9 @@ func TestJob(t *testing.T) {
 		t.Fatalf("job1.handler should be empty")
 		t.Fatalf("job1.handler should be empty")
 	}
 	}
 
 
-	h := func(j *Job) string {
-		return j.Name
+	h := func(j *Job) Status {
+		j.Printf("%s\n", j.Name)
+		return 42
 	}
 	}
 
 
 	eng.Register("dummy2", h)
 	eng.Register("dummy2", h)
@@ -49,7 +53,51 @@ func TestJob(t *testing.T) {
 		t.Fatalf("job2.handler shouldn't be nil")
 		t.Fatalf("job2.handler shouldn't be nil")
 	}
 	}
 
 
-	if job2.handler(job2) != job2.Name {
+	if job2.handler(job2) != 42 {
 		t.Fatalf("handler dummy2 was not found in job2")
 		t.Fatalf("handler dummy2 was not found in job2")
 	}
 	}
 }
 }
+
+func TestEngineRoot(t *testing.T) {
+	tmp, err := ioutil.TempDir("", "docker-test-TestEngineCreateDir")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmp)
+	dir := path.Join(tmp, "dir")
+	eng, err := New(dir)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if st, err := os.Stat(dir); err != nil {
+		t.Fatal(err)
+	} else if !st.IsDir() {
+		t.Fatalf("engine.New() created something other than a directory at %s", dir)
+	}
+	if r := eng.Root(); r != dir {
+		t.Fatalf("Expected: %v\nReceived: %v", dir, r)
+	}
+}
+
+func TestEngineString(t *testing.T) {
+	eng1 := newTestEngine(t)
+	defer os.RemoveAll(eng1.Root())
+	eng2 := newTestEngine(t)
+	defer os.RemoveAll(eng2.Root())
+	s1 := eng1.String()
+	s2 := eng2.String()
+	if eng1 == eng2 {
+		t.Fatalf("Different engines should have different names (%v == %v)", s1, s2)
+	}
+}
+
+func TestEngineLogf(t *testing.T) {
+	eng := newTestEngine(t)
+	defer os.RemoveAll(eng.Root())
+	input := "Test log line"
+	if n, err := eng.Logf("%s\n", input); err != nil {
+		t.Fatal(err)
+	} else if n < len(input) {
+		t.Fatalf("Test: Logf() should print at least as much as the input\ninput=%d\nprinted=%d", len(input), n)
+	}
+}

+ 3 - 3
engine/env_test.go

@@ -37,16 +37,16 @@ func TestSetenvBool(t *testing.T) {
 	job := mkJob(t, "dummy")
 	job := mkJob(t, "dummy")
 	job.SetenvBool("foo", true)
 	job.SetenvBool("foo", true)
 	if val := job.GetenvBool("foo"); !val {
 	if val := job.GetenvBool("foo"); !val {
-		t.Fatalf("GetenvBool returns incorrect value: %b", val)
+		t.Fatalf("GetenvBool returns incorrect value: %t", val)
 	}
 	}
 
 
 	job.SetenvBool("bar", false)
 	job.SetenvBool("bar", false)
 	if val := job.GetenvBool("bar"); val {
 	if val := job.GetenvBool("bar"); val {
-		t.Fatalf("GetenvBool returns incorrect value: %b", val)
+		t.Fatalf("GetenvBool returns incorrect value: %t", val)
 	}
 	}
 
 
 	if val := job.GetenvBool("nonexistent"); val {
 	if val := job.GetenvBool("nonexistent"); val {
-		t.Fatalf("GetenvBool returns incorrect value: %b", val)
+		t.Fatalf("GetenvBool returns incorrect value: %t", val)
 	}
 	}
 }
 }
 
 

+ 2 - 16
engine/helpers_test.go

@@ -1,32 +1,18 @@
 package engine
 package engine
 
 
 import (
 import (
-	"fmt"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
-	"io/ioutil"
-	"runtime"
-	"strings"
 	"testing"
 	"testing"
 )
 )
 
 
 var globalTestID string
 var globalTestID string
 
 
 func newTestEngine(t *testing.T) *Engine {
 func newTestEngine(t *testing.T) *Engine {
-	// Use the caller function name as a prefix.
-	// This helps trace temp directories back to their test.
-	pc, _, _, _ := runtime.Caller(1)
-	callerLongName := runtime.FuncForPC(pc).Name()
-	parts := strings.Split(callerLongName, ".")
-	callerShortName := parts[len(parts)-1]
-	if globalTestID == "" {
-		globalTestID = utils.RandomString()[:4]
-	}
-	prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, callerShortName)
-	root, err := ioutil.TempDir("", prefix)
+	tmp, err := utils.TestDirectory("")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	eng, err := New(root)
+	eng, err := New(tmp)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}

+ 47 - 98
engine/job.go

@@ -1,16 +1,13 @@
 package engine
 package engine
 
 
 import (
 import (
-	"bufio"
 	"bytes"
 	"bytes"
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
-	"io/ioutil"
-	"os"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
-	"sync"
+	"time"
 )
 )
 
 
 // A job is the fundamental unit of work in the docker engine.
 // A job is the fundamental unit of work in the docker engine.
@@ -31,126 +28,75 @@ type Job struct {
 	Name    string
 	Name    string
 	Args    []string
 	Args    []string
 	env     []string
 	env     []string
-	Stdin   io.Reader
-	Stdout  io.Writer
-	Stderr  io.Writer
-	handler func(*Job) string
-	status  string
+	Stdout  *Output
+	Stderr  *Output
+	Stdin   *Input
+	handler Handler
+	status  Status
+	end     time.Time
 	onExit  []func()
 	onExit  []func()
 }
 }
 
 
+type Status int
+
+const (
+	StatusOK       Status = 0
+	StatusErr      Status = 1
+	StatusNotFound Status = 127
+)
+
 // Run executes the job and blocks until the job completes.
 // Run executes the job and blocks until the job completes.
 // If the job returns a failure status, an error is returned
 // If the job returns a failure status, an error is returned
 // which includes the status.
 // which includes the status.
 func (job *Job) Run() error {
 func (job *Job) Run() error {
-	defer func() {
-		var wg sync.WaitGroup
-		for _, f := range job.onExit {
-			wg.Add(1)
-			go func(f func()) {
-				f()
-				wg.Done()
-			}(f)
-		}
-		wg.Wait()
-	}()
-	if job.Stdout != nil && job.Stdout != os.Stdout {
-		job.Stdout = io.MultiWriter(job.Stdout, os.Stdout)
-	}
-	if job.Stderr != nil && job.Stderr != os.Stderr {
-		job.Stderr = io.MultiWriter(job.Stderr, os.Stderr)
+	// FIXME: make this thread-safe
+	// FIXME: implement wait
+	if !job.end.IsZero() {
+		return fmt.Errorf("%s: job has already completed", job.Name)
 	}
 	}
+	// Log beginning and end of the job
 	job.Eng.Logf("+job %s", job.CallString())
 	job.Eng.Logf("+job %s", job.CallString())
 	defer func() {
 	defer func() {
 		job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString())
 		job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString())
 	}()
 	}()
+	var errorMessage string
+	job.Stderr.AddString(&errorMessage)
 	if job.handler == nil {
 	if job.handler == nil {
-		job.status = "command not found"
+		job.Errorf("%s: command not found", job.Name)
+		job.status = 127
 	} else {
 	} else {
 		job.status = job.handler(job)
 		job.status = job.handler(job)
+		job.end = time.Now()
 	}
 	}
-	if job.status != "0" {
-		return fmt.Errorf("%s: %s", job.Name, job.status)
+	// Wait for all background tasks to complete
+	if err := job.Stdout.Close(); err != nil {
+		return err
+	}
+	if err := job.Stderr.Close(); err != nil {
+		return err
+	}
+	if job.status != 0 {
+		return fmt.Errorf("%s: %s", job.Name, errorMessage)
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-func (job *Job) StdoutParseLines(dst *[]string, limit int) {
-	job.parseLines(job.StdoutPipe(), dst, limit)
-}
-
-func (job *Job) StderrParseLines(dst *[]string, limit int) {
-	job.parseLines(job.StderrPipe(), dst, limit)
-}
-
-func (job *Job) parseLines(src io.Reader, dst *[]string, limit int) {
-	var wg sync.WaitGroup
-	wg.Add(1)
-	go func() {
-		defer wg.Done()
-		scanner := bufio.NewScanner(src)
-		for scanner.Scan() {
-			// If the limit is reached, flush the rest of the source and return
-			if limit > 0 && len(*dst) >= limit {
-				io.Copy(ioutil.Discard, src)
-				return
-			}
-			line := scanner.Text()
-			// Append the line (with delimitor removed)
-			*dst = append(*dst, line)
-		}
-	}()
-	job.onExit = append(job.onExit, wg.Wait)
-}
-
-func (job *Job) StdoutParseString(dst *string) {
-	lines := make([]string, 0, 1)
-	job.StdoutParseLines(&lines, 1)
-	job.onExit = append(job.onExit, func() {
-		if len(lines) >= 1 {
-			*dst = lines[0]
-		}
-	})
-}
-
-func (job *Job) StderrParseString(dst *string) {
-	lines := make([]string, 0, 1)
-	job.StderrParseLines(&lines, 1)
-	job.onExit = append(job.onExit, func() { *dst = lines[0] })
-}
-
-func (job *Job) StdoutPipe() io.ReadCloser {
-	r, w := io.Pipe()
-	job.Stdout = w
-	job.onExit = append(job.onExit, func() { w.Close() })
-	return r
-}
-
-func (job *Job) StderrPipe() io.ReadCloser {
-	r, w := io.Pipe()
-	job.Stderr = w
-	job.onExit = append(job.onExit, func() { w.Close() })
-	return r
-}
-
 func (job *Job) CallString() string {
 func (job *Job) CallString() string {
 	return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", "))
 	return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", "))
 }
 }
 
 
 func (job *Job) StatusString() string {
 func (job *Job) StatusString() string {
-	// FIXME: if a job returns the empty string, it will be printed
-	// as not having returned.
-	// (this only affects String which is a convenience function).
-	if job.status != "" {
-		var okerr string
-		if job.status == "0" {
-			okerr = "OK"
-		} else {
-			okerr = "ERR"
-		}
-		return fmt.Sprintf(" = %s (%s)", okerr, job.status)
+	// If the job hasn't completed, status string is empty
+	if job.end.IsZero() {
+		return ""
+	}
+	var okerr string
+	if job.status == StatusOK {
+		okerr = "OK"
+	} else {
+		okerr = "ERR"
 	}
 	}
-	return ""
+	return fmt.Sprintf(" = %s (%d)", okerr, job.status)
 }
 }
 
 
 // String returns a human-readable description of `job`
 // String returns a human-readable description of `job`
@@ -338,5 +284,8 @@ func (job *Job) Printf(format string, args ...interface{}) (n int, err error) {
 
 
 func (job *Job) Errorf(format string, args ...interface{}) (n int, err error) {
 func (job *Job) Errorf(format string, args ...interface{}) (n int, err error) {
 	return fmt.Fprintf(job.Stderr, format, args...)
 	return fmt.Fprintf(job.Stderr, format, args...)
+}
 
 
+func (job *Job) Error(err error) (int, error) {
+	return fmt.Fprintf(job.Stderr, "%s", err)
 }
 }

+ 80 - 0
engine/job_test.go

@@ -0,0 +1,80 @@
+package engine
+
+import (
+	"os"
+	"testing"
+)
+
+func TestJobStatusOK(t *testing.T) {
+	eng := newTestEngine(t)
+	defer os.RemoveAll(eng.Root())
+	eng.Register("return_ok", func(job *Job) Status { return StatusOK })
+	err := eng.Job("return_ok").Run()
+	if err != nil {
+		t.Fatalf("Expected: err=%v\nReceived: err=%v", nil, err)
+	}
+}
+
+func TestJobStatusErr(t *testing.T) {
+	eng := newTestEngine(t)
+	defer os.RemoveAll(eng.Root())
+	eng.Register("return_err", func(job *Job) Status { return StatusErr })
+	err := eng.Job("return_err").Run()
+	if err == nil {
+		t.Fatalf("When a job returns StatusErr, Run() should return an error")
+	}
+}
+
+func TestJobStatusNotFound(t *testing.T) {
+	eng := newTestEngine(t)
+	defer os.RemoveAll(eng.Root())
+	eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound })
+	err := eng.Job("return_not_found").Run()
+	if err == nil {
+		t.Fatalf("When a job returns StatusNotFound, Run() should return an error")
+	}
+}
+
+func TestJobStdoutString(t *testing.T) {
+	eng := newTestEngine(t)
+	defer os.RemoveAll(eng.Root())
+	// FIXME: test multiple combinations of output and status
+	eng.Register("say_something_in_stdout", func(job *Job) Status {
+		job.Printf("Hello world\n")
+		return StatusOK
+	})
+
+	job := eng.Job("say_something_in_stdout")
+	var output string
+	if err := job.Stdout.AddString(&output); err != nil {
+		t.Fatal(err)
+	}
+	if err := job.Run(); err != nil {
+		t.Fatal(err)
+	}
+	if expectedOutput := "Hello world"; output != expectedOutput {
+		t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output)
+	}
+}
+
+func TestJobStderrString(t *testing.T) {
+	eng := newTestEngine(t)
+	defer os.RemoveAll(eng.Root())
+	// FIXME: test multiple combinations of output and status
+	eng.Register("say_something_in_stderr", func(job *Job) Status {
+		job.Errorf("Warning, something might happen\nHere it comes!\nOh no...\nSomething happened\n")
+		return StatusOK
+	})
+
+	job := eng.Job("say_something_in_stderr")
+	var output string
+	if err := job.Stderr.AddString(&output); err != nil {
+		t.Fatal(err)
+	}
+	if err := job.Run(); err != nil {
+		t.Fatal(err)
+	}
+	if expectedOutput := "Something happened"; output != expectedOutput {
+		t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output)
+	}
+}

+ 166 - 0
engine/streams.go

@@ -0,0 +1,166 @@
+package engine
+
+import (
+	"bufio"
+	"container/ring"
+	"fmt"
+	"io"
+	"sync"
+)
+
+type Output struct {
+	sync.Mutex
+	dests []io.Writer
+	tasks sync.WaitGroup
+}
+
+// NewOutput returns a new Output object with no destinations attached.
+// Writing to an empty Output will cause the written data to be discarded.
+func NewOutput() *Output {
+	return &Output{}
+}
+
+// Add attaches a new destination to the Output. Any data subsequently written
+// to the output will be written to the new destination in addition to all the others.
+// This method is thread-safe.
+// FIXME: Add cannot fail
+func (o *Output) Add(dst io.Writer) error {
+	o.Mutex.Lock()
+	defer o.Mutex.Unlock()
+	o.dests = append(o.dests, dst)
+	return nil
+}
+
+// AddPipe creates an in-memory pipe with io.Pipe(), adds its writing end as a destination,
+// and returns its reading end for consumption by the caller.
+// This is a rough equivalent similar to Cmd.StdoutPipe() in the standard os/exec package.
+// This method is thread-safe.
+func (o *Output) AddPipe() (io.Reader, error) {
+	r, w := io.Pipe()
+	o.Add(w)
+	return r, nil
+}
+
+// AddTail starts a new goroutine which will read all subsequent data written to the output,
+// line by line, and append the last `n` lines to `dst`.
+func (o *Output) AddTail(dst *[]string, n int) error {
+	src, err := o.AddPipe()
+	if err != nil {
+		return err
+	}
+	o.tasks.Add(1)
+	go func() {
+		defer o.tasks.Done()
+		Tail(src, n, dst)
+	}()
+	return nil
+}
+
+// AddString starts a new goroutine which will read all subsequent data written to the output,
+// line by line, and store the last line into `dst`.
+func (o *Output) AddString(dst *string) error {
+	src, err := o.AddPipe()
+	if err != nil {
+		return err
+	}
+	o.tasks.Add(1)
+	go func() {
+		defer o.tasks.Done()
+		lines := make([]string, 0, 1)
+		Tail(src, 1, &lines)
+		if len(lines) == 0 {
+			*dst = ""
+		} else {
+			*dst = lines[0]
+		}
+	}()
+	return nil
+}
+
+// Write writes the same data to all registered destinations.
+// This method is thread-safe.
+func (o *Output) Write(p []byte) (n int, err error) {
+	o.Mutex.Lock()
+	defer o.Mutex.Unlock()
+	var firstErr error
+	for _, dst := range o.dests {
+		_, err := dst.Write(p)
+		if err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+	return len(p), firstErr
+}
+
+// Close unregisters all destinations and waits for all background
+// AddTail and AddString tasks to complete.
+// The Close method of each destination is called if it exists.
+func (o *Output) Close() error {
+	o.Mutex.Lock()
+	defer o.Mutex.Unlock()
+	var firstErr error
+	for _, dst := range o.dests {
+		if closer, ok := dst.(io.WriteCloser); ok {
+			err := closer.Close()
+			if err != nil && firstErr == nil {
+				firstErr = err
+			}
+		}
+	}
+	o.tasks.Wait()
+	return firstErr
+}
+
+type Input struct {
+	src io.Reader
+	sync.Mutex
+}
+
+// NewInput returns a new Input object with no source attached.
+// Reading to an empty Input will return io.EOF.
+func NewInput() *Input {
+	return &Input{}
+}
+
+// Read reads from the input in a thread-safe way.
+func (i *Input) Read(p []byte) (n int, err error) {
+	i.Mutex.Lock()
+	defer i.Mutex.Unlock()
+	if i.src == nil {
+		return 0, io.EOF
+	}
+	return i.src.Read(p)
+}
+
+// Add attaches a new source to the input.
+// Add can only be called once per input. Subsequent calls will
+// return an error.
+func (i *Input) Add(src io.Reader) error {
+	i.Mutex.Lock()
+	defer i.Mutex.Unlock()
+	if i.src != nil {
+		return fmt.Errorf("Maximum number of sources reached: 1")
+	}
+	i.src = src
+	return nil
+}
+
+// Tail reads from `src` line per line, and returns the last `n` lines as an array.
+// A ring buffer is used to only store `n` lines at any time.
+func Tail(src io.Reader, n int, dst *[]string) {
+	scanner := bufio.NewScanner(src)
+	r := ring.New(n)
+	for scanner.Scan() {
+		if n == 0 {
+			continue
+		}
+		r.Value = scanner.Text()
+		r = r.Next()
+	}
+	r.Do(func(v interface{}) {
+		if v == nil {
+			return
+		}
+		*dst = append(*dst, v.(string))
+	})
+}

+ 274 - 0
engine/streams_test.go

@@ -0,0 +1,274 @@
+package engine
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"strings"
+	"testing"
+)
+
+func TestOutputAddString(t *testing.T) {
+	var testInputs = [][2]string{
+		{
+			"hello, world!",
+			"hello, world!",
+		},
+
+		{
+			"One\nTwo\nThree",
+			"Three",
+		},
+
+		{
+			"",
+			"",
+		},
+
+		{
+			"A line\nThen another nl-terminated line\n",
+			"Then another nl-terminated line",
+		},
+
+		{
+			"A line followed by an empty line\n\n",
+			"",
+		},
+	}
+	for _, testData := range testInputs {
+		input := testData[0]
+		expectedOutput := testData[1]
+		o := NewOutput()
+		var output string
+		if err := o.AddString(&output); err != nil {
+			t.Error(err)
+		}
+		if n, err := o.Write([]byte(input)); err != nil {
+			t.Error(err)
+		} else if n != len(input) {
+			t.Errorf("Expected %d, got %d", len(input), n)
+		}
+		o.Close()
+		if output != expectedOutput {
+			t.Errorf("Last line is not stored as return string.\nInput:   '%s'\nExpected: '%s'\nGot:       '%s'", input, expectedOutput, output)
+		}
+	}
+}
+
+type sentinelWriteCloser struct {
+	calledWrite bool
+	calledClose bool
+}
+
+func (w *sentinelWriteCloser) Write(p []byte) (int, error) {
+	w.calledWrite = true
+	return len(p), nil
+}
+
+func (w *sentinelWriteCloser) Close() error {
+	w.calledClose = true
+	return nil
+}
+
+func TestOutputAddClose(t *testing.T) {
+	o := NewOutput()
+	var s sentinelWriteCloser
+	if err := o.Add(&s); err != nil {
+		t.Fatal(err)
+	}
+	if err := o.Close(); err != nil {
+		t.Fatal(err)
+	}
+	// Write data after the output is closed.
+	// Write should succeed, but no destination should receive it.
+	if _, err := o.Write([]byte("foo bar")); err != nil {
+		t.Fatal(err)
+	}
+	if !s.calledClose {
+		t.Fatal("Output.Close() didn't close the destination")
+	}
+}
+
+func TestOutputAddPipe(t *testing.T) {
+	var testInputs = []string{
+		"hello, world!",
+		"One\nTwo\nThree",
+		"",
+		"A line\nThen another nl-terminated line\n",
+		"A line followed by an empty line\n\n",
+	}
+	for _, input := range testInputs {
+		expectedOutput := input
+		o := NewOutput()
+		r, err := o.AddPipe()
+		if err != nil {
+			t.Fatal(err)
+		}
+		go func(o *Output) {
+			if n, err := o.Write([]byte(input)); err != nil {
+				t.Error(err)
+			} else if n != len(input) {
+				t.Errorf("Expected %d, got %d", len(input), n)
+			}
+			if err := o.Close(); err != nil {
+				t.Error(err)
+			}
+		}(o)
+		output, err := ioutil.ReadAll(r)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if string(output) != expectedOutput {
+			t.Errorf("Last line is not stored as return string.\nExpected: '%s'\nGot:       '%s'", expectedOutput, output)
+		}
+	}
+}
+
+func TestTail(t *testing.T) {
+	var tests = make(map[string][][]string)
+	tests["hello, world!"] = [][]string{
+		{},
+		{"hello, world!"},
+		{"hello, world!"},
+		{"hello, world!"},
+	}
+	tests["One\nTwo\nThree"] = [][]string{
+		{},
+		{"Three"},
+		{"Two", "Three"},
+		{"One", "Two", "Three"},
+	}
+	for input, outputs := range tests {
+		for n, expectedOutput := range outputs {
+			var output []string
+			Tail(strings.NewReader(input), n, &output)
+			if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) {
+				t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot     : '%s'", expectedOutput, output)
+			}
+		}
+	}
+}
+
+func TestOutputAddTail(t *testing.T) {
+	var tests = make(map[string][][]string)
+	tests["hello, world!"] = [][]string{
+		{},
+		{"hello, world!"},
+		{"hello, world!"},
+		{"hello, world!"},
+	}
+	tests["One\nTwo\nThree"] = [][]string{
+		{},
+		{"Three"},
+		{"Two", "Three"},
+		{"One", "Two", "Three"},
+	}
+	for input, outputs := range tests {
+		for n, expectedOutput := range outputs {
+			o := NewOutput()
+			var output []string
+			if err := o.AddTail(&output, n); err != nil {
+				t.Error(err)
+			}
+			if n, err := o.Write([]byte(input)); err != nil {
+				t.Error(err)
+			} else if n != len(input) {
+				t.Errorf("Expected %d, got %d", len(input), n)
+			}
+			o.Close()
+			if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) {
+				t.Errorf("Tail(%d) returned wrong result.\nExpected: %v\nGot:      %v", n, expectedOutput, output)
+			}
+		}
+	}
+}
+
+func lastLine(txt string) string {
+	scanner := bufio.NewScanner(strings.NewReader(txt))
+	var lastLine string
+	for scanner.Scan() {
+		lastLine = scanner.Text()
+	}
+	return lastLine
+}
+
+func TestOutputAdd(t *testing.T) {
+	o := NewOutput()
+	b := &bytes.Buffer{}
+	o.Add(b)
+	input := "hello, world!"
+	if n, err := o.Write([]byte(input)); err != nil {
+		t.Fatal(err)
+	} else if n != len(input) {
+		t.Fatalf("Expected %d, got %d", len(input), n)
+	}
+	if output := b.String(); output != input {
+		t.Fatal("Received wrong data from Add.\nExpected: '%s'\nGot:     '%s'", input, output)
+	}
+}
+
+func TestOutputWriteError(t *testing.T) {
+	o := NewOutput()
+	buf := &bytes.Buffer{}
+	o.Add(buf)
+	r, w := io.Pipe()
+	input := "Hello there"
+	expectedErr := fmt.Errorf("This is an error")
+	r.CloseWithError(expectedErr)
+	o.Add(w)
+	n, err := o.Write([]byte(input))
+	if err != expectedErr {
+		t.Fatalf("Output.Write() should return the first error encountered, if any")
+	}
+	if buf.String() != input {
+		t.Fatalf("Output.Write() should attempt write on all destinations, even after encountering an error")
+	}
+	if n != len(input) {
+		t.Fatalf("Output.Write() should return the size of the input if it successfully writes to at least one destination")
+	}
+}
+
+func TestInputAddEmpty(t *testing.T) {
+	i := NewInput()
+	var b bytes.Buffer
+	if err := i.Add(&b); err != nil {
+		t.Fatal(err)
+	}
+	data, err := ioutil.ReadAll(i)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(data) > 0 {
+		t.Fatalf("Read from empty input shoul yield no data")
+	}
+}
+
+func TestInputAddTwo(t *testing.T) {
+	i := NewInput()
+	var b1 bytes.Buffer
+	// First add should succeed
+	if err := i.Add(&b1); err != nil {
+		t.Fatal(err)
+	}
+	var b2 bytes.Buffer
+	// Second add should fail
+	if err := i.Add(&b2); err == nil {
+		t.Fatalf("Adding a second source should return an error")
+	}
+}
+
+func TestInputAddNotEmpty(t *testing.T) {
+	i := NewInput()
+	b := bytes.NewBufferString("hello world\nabc")
+	expectedResult := b.String()
+	i.Add(b)
+	result, err := ioutil.ReadAll(i)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if string(result) != expectedResult {
+		t.Fatalf("Expected: %v\nReceived: %v", expectedResult, result)
+	}
+}

+ 19 - 5
graph.go

@@ -94,11 +94,25 @@ func (graph *Graph) Get(name string) (*Image, error) {
 		return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID)
 		return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID)
 	}
 	}
 	img.graph = graph
 	img.graph = graph
-	if img.Size == 0 {
-		size, err := utils.TreeSize(rootfs)
-		if err != nil {
-			return nil, fmt.Errorf("Error computing size of rootfs %s: %s", img.ID, err)
+
+	if img.Size < 0 {
+		var size int64
+		if img.Parent == "" {
+			if size, err = utils.TreeSize(rootfs); err != nil {
+				return nil, err
+			}
+		} else {
+			parentFs, err := graph.driver.Get(img.Parent)
+			if err != nil {
+				return nil, err
+			}
+			changes, err := archive.ChangesDirs(rootfs, parentFs)
+			if err != nil {
+				return nil, err
+			}
+			size = archive.ChangesSize(rootfs, changes)
 		}
 		}
+
 		img.Size = size
 		img.Size = size
 		if err := img.SaveSize(graph.imageRoot(id)); err != nil {
 		if err := img.SaveSize(graph.imageRoot(id)); err != nil {
 			return nil, err
 			return nil, err
@@ -205,7 +219,7 @@ func (graph *Graph) TempLayerArchive(id string, compression archive.Compression,
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return archive.NewTempArchive(utils.ProgressReader(ioutil.NopCloser(a), 0, output, sf.FormatProgress("", "Buffering to disk", "%v/%v (%v)"), sf, true), tmp)
+	return archive.NewTempArchive(utils.ProgressReader(ioutil.NopCloser(a), 0, output, sf, true, "", "Buffering to disk"), tmp)
 }
 }
 
 
 // Mktemp creates a temporary sub-directory inside the graph's filesystem.
 // Mktemp creates a temporary sub-directory inside the graph's filesystem.

+ 126 - 0
graphdriver/devmapper/attach_loopback.go

@@ -0,0 +1,126 @@
+// +build linux
+
+package devmapper
+
+import (
+	"fmt"
+	"github.com/dotcloud/docker/utils"
+)
+
+func stringToLoopName(src string) [LoNameSize]uint8 {
+	var dst [LoNameSize]uint8
+	copy(dst[:], src[:])
+	return dst
+}
+
+func getNextFreeLoopbackIndex() (int, error) {
+	f, err := osOpenFile("/dev/loop-control", osORdOnly, 0644)
+	if err != nil {
+		return 0, err
+	}
+	defer f.Close()
+
+	index, err := ioctlLoopCtlGetFree(f.Fd())
+	if index < 0 {
+		index = 0
+	}
+	return index, err
+}
+
+func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, err error) {
+	// Start looking for a free /dev/loop
+	for {
+		target := fmt.Sprintf("/dev/loop%d", index)
+		index++
+
+		fi, err := osStat(target)
+		if err != nil {
+			if osIsNotExist(err) {
+				utils.Errorf("There are no more loopback device available.")
+			}
+			return nil, ErrAttachLoopbackDevice
+		}
+
+		if fi.Mode()&osModeDevice != osModeDevice {
+			utils.Errorf("Loopback device %s is not a block device.", target)
+			continue
+		}
+
+		// OpenFile adds O_CLOEXEC
+		loopFile, err = osOpenFile(target, osORdWr, 0644)
+		if err != nil {
+			utils.Errorf("Error openning loopback device: %s", err)
+			return nil, ErrAttachLoopbackDevice
+		}
+
+		// Try to attach to the loop file
+		if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil {
+			loopFile.Close()
+
+			// If the error is EBUSY, then try the next loopback
+			if err != sysEBusy {
+				utils.Errorf("Cannot set up loopback device %s: %s", target, err)
+				return nil, ErrAttachLoopbackDevice
+			}
+
+			// Otherwise, we keep going with the loop
+			continue
+		}
+		// In case of success, we finished. Break the loop.
+		break
+	}
+
+	// This can't happen, but let's be sure
+	if loopFile == nil {
+		utils.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
+		return nil, ErrAttachLoopbackDevice
+	}
+
+	return loopFile, nil
+}
+
+// attachLoopDevice attaches the given sparse file to the next
+// available loopback device. It returns an opened *osFile.
+func attachLoopDevice(sparseName string) (loop *osFile, err error) {
+
+	// Try to retrieve the next available loopback device via syscall.
+	// If it fails, we discard error and start loopking for a
+	// loopback from index 0.
+	startIndex, err := getNextFreeLoopbackIndex()
+	if err != nil {
+		utils.Debugf("Error retrieving the next available loopback: %s", err)
+	}
+
+	// OpenFile adds O_CLOEXEC
+	sparseFile, err := osOpenFile(sparseName, osORdWr, 0644)
+	if err != nil {
+		utils.Errorf("Error openning sparse file %s: %s", sparseName, err)
+		return nil, ErrAttachLoopbackDevice
+	}
+	defer sparseFile.Close()
+
+	loopFile, err := openNextAvailableLoopback(startIndex, sparseFile)
+	if err != nil {
+		return nil, err
+	}
+
+	// Set the status of the loopback device
+	loopInfo := &LoopInfo64{
+		loFileName: stringToLoopName(loopFile.Name()),
+		loOffset:   0,
+		loFlags:    LoFlagsAutoClear,
+	}
+
+	if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil {
+		utils.Errorf("Cannot set up loopback device info: %s", err)
+
+		// If the call failed, then free the loopback device
+		if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
+			utils.Errorf("Error while cleaning up the loopback device")
+		}
+		loopFile.Close()
+		return nil, ErrAttachLoopbackDevice
+	}
+
+	return loopFile, nil
+}

+ 11 - 8
graphdriver/devmapper/deviceset.go

@@ -1,7 +1,10 @@
+// +build linux
+
 package devmapper
 package devmapper
 
 
 import (
 import (
 	"encoding/json"
 	"encoding/json"
+	"errors"
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
@@ -172,7 +175,7 @@ func (devices *DeviceSet) saveMetadata() error {
 		return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err)
 		return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err)
 	}
 	}
 	if err := osRename(tmpFile.Name(), devices.jsonFile()); err != nil {
 	if err := osRename(tmpFile.Name(), devices.jsonFile()); err != nil {
-		return fmt.Errorf("Error committing metadata file", err)
+		return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err)
 	}
 	}
 
 
 	if devices.NewTransactionId != devices.TransactionId {
 	if devices.NewTransactionId != devices.TransactionId {
@@ -383,7 +386,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
 		return fmt.Errorf("Can't shrink file")
 		return fmt.Errorf("Can't shrink file")
 	}
 	}
 
 
-	dataloopback := FindLoopDeviceFor(&osFile{File: datafile})
+	dataloopback := FindLoopDeviceFor(datafile)
 	if dataloopback == nil {
 	if dataloopback == nil {
 		return fmt.Errorf("Unable to find loopback mount for: %s", datafilename)
 		return fmt.Errorf("Unable to find loopback mount for: %s", datafilename)
 	}
 	}
@@ -395,7 +398,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
 	}
 	}
 	defer metadatafile.Close()
 	defer metadatafile.Close()
 
 
-	metadataloopback := FindLoopDeviceFor(&osFile{File: metadatafile})
+	metadataloopback := FindLoopDeviceFor(metadatafile)
 	if metadataloopback == nil {
 	if metadataloopback == nil {
 		return fmt.Errorf("Unable to find loopback mount for: %s", metadatafilename)
 		return fmt.Errorf("Unable to find loopback mount for: %s", metadatafilename)
 	}
 	}
@@ -439,11 +442,11 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
 	hasMetadata := devices.hasImage("metadata")
 	hasMetadata := devices.hasImage("metadata")
 
 
 	if !doInit && !hasData {
 	if !doInit && !hasData {
-		return fmt.Errorf("Looback data file not found %s")
+		return errors.New("Loopback data file not found")
 	}
 	}
 
 
 	if !doInit && !hasMetadata {
 	if !doInit && !hasMetadata {
-		return fmt.Errorf("Looback metadata file not found %s")
+		return errors.New("Loopback metadata file not found")
 	}
 	}
 
 
 	createdLoopback := !hasData || !hasMetadata
 	createdLoopback := !hasData || !hasMetadata
@@ -491,14 +494,14 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
 	if info.Exists == 0 {
 	if info.Exists == 0 {
 		utils.Debugf("Pool doesn't exist. Creating it.")
 		utils.Debugf("Pool doesn't exist. Creating it.")
 
 
-		dataFile, err := AttachLoopDevice(data)
+		dataFile, err := attachLoopDevice(data)
 		if err != nil {
 		if err != nil {
 			utils.Debugf("\n--->Err: %s\n", err)
 			utils.Debugf("\n--->Err: %s\n", err)
 			return err
 			return err
 		}
 		}
 		defer dataFile.Close()
 		defer dataFile.Close()
 
 
-		metadataFile, err := AttachLoopDevice(metadata)
+		metadataFile, err := attachLoopDevice(metadata)
 		if err != nil {
 		if err != nil {
 			utils.Debugf("\n--->Err: %s\n", err)
 			utils.Debugf("\n--->Err: %s\n", err)
 			return err
 			return err
@@ -637,7 +640,7 @@ func (devices *DeviceSet) deactivateDevice(hash string) error {
 // or b) the 1 second timeout expires.
 // or b) the 1 second timeout expires.
 func (devices *DeviceSet) waitRemove(hash string) error {
 func (devices *DeviceSet) waitRemove(hash string) error {
 	utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, hash)
 	utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, hash)
-	defer utils.Debugf("[deviceset %s] waitRemove END", devices.devicePrefix, hash)
+	defer utils.Debugf("[deviceset %s] waitRemove(%) END", devices.devicePrefix, hash)
 	devname, err := devices.byHash(hash)
 	devname, err := devices.byHash(hash)
 	if err != nil {
 	if err != nil {
 		return err
 		return err

+ 15 - 20
graphdriver/devmapper/devmapper.go

@@ -1,3 +1,5 @@
+// +build linux
+
 package devmapper
 package devmapper
 
 
 import (
 import (
@@ -177,25 +179,18 @@ func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64,
 		start, length, targetType, params
 		start, length, targetType, params
 }
 }
 
 
-func AttachLoopDevice(filename string) (*osFile, error) {
-	var fd int
-	res := DmAttachLoopDevice(filename, &fd)
-	if res == "" {
-		return nil, ErrAttachLoopbackDevice
-	}
-	return &osFile{File: osNewFile(uintptr(fd), res)}, nil
-}
-
 func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) {
 func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) {
-	dev, inode, err := DmGetLoopbackBackingFile(file.Fd())
-	if err != 0 {
+	loopInfo, err := ioctlLoopGetStatus64(file.Fd())
+	if err != nil {
+		utils.Errorf("Error get loopback backing file: %s\n", err)
 		return 0, 0, ErrGetLoopbackBackingFile
 		return 0, 0, ErrGetLoopbackBackingFile
 	}
 	}
-	return dev, inode, nil
+	return loopInfo.loDevice, loopInfo.loInode, nil
 }
 }
 
 
 func LoopbackSetCapacity(file *osFile) error {
 func LoopbackSetCapacity(file *osFile) error {
-	if err := DmLoopbackSetCapacity(file.Fd()); err != 0 {
+	if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil {
+		utils.Errorf("Error loopbackSetCapacity: %s", err)
 		return ErrLoopbackSetCapacity
 		return ErrLoopbackSetCapacity
 	}
 	}
 	return nil
 	return nil
@@ -223,11 +218,10 @@ func FindLoopDeviceFor(file *osFile) *osFile {
 			continue
 			continue
 		}
 		}
 
 
-		dev, inode, err := getLoopbackBackingFile(&osFile{File: file})
+		dev, inode, err := getLoopbackBackingFile(file)
 		if err == nil && dev == targetDevice && inode == targetInode {
 		if err == nil && dev == targetDevice && inode == targetInode {
-			return &osFile{File: file}
+			return file
 		}
 		}
-
 		file.Close()
 		file.Close()
 	}
 	}
 
 
@@ -286,8 +280,9 @@ func RemoveDevice(name string) error {
 }
 }
 
 
 func GetBlockDeviceSize(file *osFile) (uint64, error) {
 func GetBlockDeviceSize(file *osFile) (uint64, error) {
-	size, errno := DmGetBlockSize(file.Fd())
-	if size == -1 || errno != 0 {
+	size, err := ioctlBlkGetSize64(file.Fd())
+	if err != nil {
+		utils.Errorf("Error getblockdevicesize: %s", err)
 		return 0, ErrGetBlockSize
 		return 0, ErrGetBlockSize
 	}
 	}
 	return uint64(size), nil
 	return uint64(size), nil
@@ -420,7 +415,7 @@ func suspendDevice(name string) error {
 		return err
 		return err
 	}
 	}
 	if err := task.Run(); err != nil {
 	if err := task.Run(); err != nil {
-		return fmt.Errorf("Error running DeviceSuspend")
+		return fmt.Errorf("Error running DeviceSuspend: %s", err)
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -437,7 +432,7 @@ func resumeDevice(name string) error {
 	}
 	}
 
 
 	if err := task.Run(); err != nil {
 	if err := task.Run(); err != nil {
-		return fmt.Errorf("Error running DeviceSuspend")
+		return fmt.Errorf("Error running DeviceResume")
 	}
 	}
 
 
 	UdevWait(cookie)
 	UdevWait(cookie)

+ 2 - 0
graphdriver/devmapper/devmapper_log.go

@@ -1,3 +1,5 @@
+// +build linux
+
 package devmapper
 package devmapper
 
 
 import "C"
 import "C"

+ 2 - 0
graphdriver/devmapper/devmapper_test.go

@@ -1,3 +1,5 @@
+// +build linux
+
 package devmapper
 package devmapper
 
 
 import (
 import (

+ 70 - 189
graphdriver/devmapper/devmapper_wrapper.go

@@ -1,125 +1,17 @@
+// +build linux
+
 package devmapper
 package devmapper
 
 
 /*
 /*
 #cgo LDFLAGS: -L. -ldevmapper
 #cgo LDFLAGS: -L. -ldevmapper
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
 #include <libdevmapper.h>
 #include <libdevmapper.h>
-#include <linux/loop.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <linux/fs.h>
-#include <errno.h>
-
-#ifndef LOOP_CTL_GET_FREE
-#define LOOP_CTL_GET_FREE       0x4C82
-#endif
-
-// FIXME: this could easily be rewritten in go
-char*			attach_loop_device(const char *filename, int *loop_fd_out)
-{
-  struct loop_info64	loopinfo = {0};
-  struct stat		st;
-  char			buf[64];
-  int			i, loop_fd, fd, start_index;
-  char*			loopname;
-
-
-  *loop_fd_out = -1;
-
-  start_index = 0;
-  fd = open("/dev/loop-control", O_RDONLY);
-  if (fd >= 0) {
-    start_index = ioctl(fd, LOOP_CTL_GET_FREE);
-    close(fd);
-
-    if (start_index < 0)
-      start_index = 0;
-  }
-
-  fd = open(filename, O_RDWR);
-  if (fd < 0) {
-    perror("open");
-    return NULL;
-  }
-
-  loop_fd = -1;
-  for (i = start_index ; loop_fd < 0 ; i++ ) {
-    if (sprintf(buf, "/dev/loop%d", i) < 0) {
-	close(fd);
-	return NULL;
-    }
-
-    if (stat(buf, &st)) {
-      if (!S_ISBLK(st.st_mode)) {
-	 fprintf(stderr, "[error] Loopback device %s is not a block device.\n", buf);
-      } else if (errno == ENOENT) {
-	fprintf(stderr, "[error] There are no more loopback device available.\n");
-      } else {
-	fprintf(stderr, "[error] Unkown error trying to stat the loopback device %s (errno: %d).\n", buf, errno);
-      }
-      close(fd);
-      return NULL;
-    }
-
-    loop_fd = open(buf, O_RDWR);
-    if (loop_fd < 0 && errno == ENOENT) {
-      fprintf(stderr, "[error] The loopback device %s does not exists.\n", buf);
-      close(fd);
-      return NULL;
-    } else if (loop_fd < 0) {
-	fprintf(stderr, "[error] Unkown error openning the loopback device %s. (errno: %d)\n", buf, errno);
-	continue;
-    }
-
-    if (ioctl(loop_fd, LOOP_SET_FD, (void *)(size_t)fd) < 0) {
-      int errsv = errno;
-      close(loop_fd);
-      loop_fd = -1;
-      if (errsv != EBUSY) {
-        close(fd);
-        fprintf(stderr, "cannot set up loopback device %s: %s", buf, strerror(errsv));
-        return NULL;
-      }
-      continue;
-    }
-
-    close(fd);
-
-    strncpy((char*)loopinfo.lo_file_name, buf, LO_NAME_SIZE);
-    loopinfo.lo_offset = 0;
-    loopinfo.lo_flags = LO_FLAGS_AUTOCLEAR;
-
-    if (ioctl(loop_fd, LOOP_SET_STATUS64, &loopinfo) < 0) {
-      perror("ioctl LOOP_SET_STATUS64");
-      if (ioctl(loop_fd, LOOP_CLR_FD, 0) < 0) {
-        perror("ioctl LOOP_CLR_FD");
-      }
-      close(loop_fd);
-      fprintf (stderr, "cannot set up loopback device info");
-      return (NULL);
-    }
-
-    loopname = strdup(buf);
-    if (loopname == NULL) {
-      close(loop_fd);
-      return (NULL);
-    }
-
-    *loop_fd_out = loop_fd;
-    return (loopname);
-  }
-
-  return (NULL);
-}
+#include <linux/loop.h> // FIXME: present only for defines, maybe we can remove it?
+#include <linux/fs.h>   // FIXME: present only for BLKGETSIZE64, maybe we can remove it?
 
 
+// FIXME: Can't we find a way to do the logging in pure Go?
 extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str);
 extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str);
 
 
-static void	log_cb(int level, const char *file, int line,
-		       int dm_errno_or_class, const char *f, ...)
+static void	log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...)
 {
 {
   char buffer[256];
   char buffer[256];
   va_list ap;
   va_list ap;
@@ -135,7 +27,6 @@ static void	log_with_errno_init()
 {
 {
   dm_log_with_errno_init(log_cb);
   dm_log_with_errno_init(log_cb);
 }
 }
-
 */
 */
 import "C"
 import "C"
 
 
@@ -145,31 +36,64 @@ import (
 
 
 type (
 type (
 	CDmTask C.struct_dm_task
 	CDmTask C.struct_dm_task
+
+	CLoopInfo64 C.struct_loop_info64
+	LoopInfo64  struct {
+		loDevice           uint64 /* ioctl r/o */
+		loInode            uint64 /* ioctl r/o */
+		loRdevice          uint64 /* ioctl r/o */
+		loOffset           uint64
+		loSizelimit        uint64 /* bytes, 0 == max available */
+		loNumber           uint32 /* ioctl r/o */
+		loEncrypt_type     uint32
+		loEncrypt_key_size uint32 /* ioctl w/o */
+		loFlags            uint32 /* ioctl r/o */
+		loFileName         [LoNameSize]uint8
+		loCryptName        [LoNameSize]uint8
+		loEncryptKey       [LoKeySize]uint8 /* ioctl w/o */
+		loInit             [2]uint64
+	}
+)
+
+// FIXME: Make sure the values are defined in C
+// IOCTL consts
+const (
+	BlkGetSize64 = C.BLKGETSIZE64
+
+	LoopSetFd       = C.LOOP_SET_FD
+	LoopCtlGetFree  = C.LOOP_CTL_GET_FREE
+	LoopGetStatus64 = C.LOOP_GET_STATUS64
+	LoopSetStatus64 = C.LOOP_SET_STATUS64
+	LoopClrFd       = C.LOOP_CLR_FD
+	LoopSetCapacity = C.LOOP_SET_CAPACITY
+)
+
+const (
+	LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR
+	LoFlagsReadOnly  = C.LO_FLAGS_READ_ONLY
+	LoFlagsPartScan  = C.LO_FLAGS_PARTSCAN
+	LoKeySize        = C.LO_KEY_SIZE
+	LoNameSize       = C.LO_NAME_SIZE
 )
 )
 
 
 var (
 var (
-	DmAttachLoopDevice       = dmAttachLoopDeviceFct
-	DmGetBlockSize           = dmGetBlockSizeFct
-	DmGetLibraryVersion      = dmGetLibraryVersionFct
-	DmGetNextTarget          = dmGetNextTargetFct
-	DmLogInitVerbose         = dmLogInitVerboseFct
-	DmSetDevDir              = dmSetDevDirFct
-	DmTaskAddTarget          = dmTaskAddTargetFct
-	DmTaskCreate             = dmTaskCreateFct
-	DmTaskDestroy            = dmTaskDestroyFct
-	DmTaskGetInfo            = dmTaskGetInfoFct
-	DmTaskRun                = dmTaskRunFct
-	DmTaskSetAddNode         = dmTaskSetAddNodeFct
-	DmTaskSetCookie          = dmTaskSetCookieFct
-	DmTaskSetMessage         = dmTaskSetMessageFct
-	DmTaskSetName            = dmTaskSetNameFct
-	DmTaskSetRo              = dmTaskSetRoFct
-	DmTaskSetSector          = dmTaskSetSectorFct
-	DmUdevWait               = dmUdevWaitFct
-	GetBlockSize             = getBlockSizeFct
-	LogWithErrnoInit         = logWithErrnoInitFct
-	DmGetLoopbackBackingFile = dmGetLoopbackBackingFileFct
-	DmLoopbackSetCapacity    = dmLoopbackSetCapacityFct
+	DmGetLibraryVersion = dmGetLibraryVersionFct
+	DmGetNextTarget     = dmGetNextTargetFct
+	DmLogInitVerbose    = dmLogInitVerboseFct
+	DmSetDevDir         = dmSetDevDirFct
+	DmTaskAddTarget     = dmTaskAddTargetFct
+	DmTaskCreate        = dmTaskCreateFct
+	DmTaskDestroy       = dmTaskDestroyFct
+	DmTaskGetInfo       = dmTaskGetInfoFct
+	DmTaskRun           = dmTaskRunFct
+	DmTaskSetAddNode    = dmTaskSetAddNodeFct
+	DmTaskSetCookie     = dmTaskSetCookieFct
+	DmTaskSetMessage    = dmTaskSetMessageFct
+	DmTaskSetName       = dmTaskSetNameFct
+	DmTaskSetRo         = dmTaskSetRoFct
+	DmTaskSetSector     = dmTaskSetSectorFct
+	DmUdevWait          = dmUdevWaitFct
+	LogWithErrnoInit    = logWithErrnoInitFct
 )
 )
 
 
 func free(p *C.char) {
 func free(p *C.char) {
@@ -185,28 +109,26 @@ func dmTaskCreateFct(taskType int) *CDmTask {
 }
 }
 
 
 func dmTaskRunFct(task *CDmTask) int {
 func dmTaskRunFct(task *CDmTask) int {
-	return int(C.dm_task_run((*C.struct_dm_task)(task)))
+	ret, _ := C.dm_task_run((*C.struct_dm_task)(task))
+	return int(ret)
 }
 }
 
 
 func dmTaskSetNameFct(task *CDmTask, name string) int {
 func dmTaskSetNameFct(task *CDmTask, name string) int {
 	Cname := C.CString(name)
 	Cname := C.CString(name)
 	defer free(Cname)
 	defer free(Cname)
 
 
-	return int(C.dm_task_set_name((*C.struct_dm_task)(task),
-		Cname))
+	return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname))
 }
 }
 
 
 func dmTaskSetMessageFct(task *CDmTask, message string) int {
 func dmTaskSetMessageFct(task *CDmTask, message string) int {
 	Cmessage := C.CString(message)
 	Cmessage := C.CString(message)
 	defer free(Cmessage)
 	defer free(Cmessage)
 
 
-	return int(C.dm_task_set_message((*C.struct_dm_task)(task),
-		Cmessage))
+	return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage))
 }
 }
 
 
 func dmTaskSetSectorFct(task *CDmTask, sector uint64) int {
 func dmTaskSetSectorFct(task *CDmTask, sector uint64) int {
-	return int(C.dm_task_set_sector((*C.struct_dm_task)(task),
-		C.uint64_t(sector)))
+	return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector)))
 }
 }
 
 
 func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int {
 func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int {
@@ -214,13 +136,11 @@ func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int {
 	defer func() {
 	defer func() {
 		*cookie = uint(cCookie)
 		*cookie = uint(cCookie)
 	}()
 	}()
-	return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie,
-		C.uint16_t(flags)))
+	return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags)))
 }
 }
 
 
 func dmTaskSetAddNodeFct(task *CDmTask, addNode AddNodeType) int {
 func dmTaskSetAddNodeFct(task *CDmTask, addNode AddNodeType) int {
-	return int(C.dm_task_set_add_node((*C.struct_dm_task)(task),
-		C.dm_add_node_t(addNode)))
+	return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode)))
 }
 }
 
 
 func dmTaskSetRoFct(task *CDmTask) int {
 func dmTaskSetRoFct(task *CDmTask) int {
@@ -236,26 +156,7 @@ func dmTaskAddTargetFct(task *CDmTask,
 	Cparams := C.CString(params)
 	Cparams := C.CString(params)
 	defer free(Cparams)
 	defer free(Cparams)
 
 
-	return int(C.dm_task_add_target((*C.struct_dm_task)(task),
-		C.uint64_t(start), C.uint64_t(size), Cttype, Cparams))
-}
-
-func dmGetLoopbackBackingFileFct(fd uintptr) (uint64, uint64, sysErrno) {
-	var lo64 C.struct_loop_info64
-	_, _, err := sysSyscall(sysSysIoctl, fd, C.LOOP_GET_STATUS64,
-		uintptr(unsafe.Pointer(&lo64)))
-	return uint64(lo64.lo_device), uint64(lo64.lo_inode), sysErrno(err)
-}
-
-func dmLoopbackSetCapacityFct(fd uintptr) sysErrno {
-	_, _, err := sysSyscall(sysSysIoctl, fd, C.LOOP_SET_CAPACITY, 0)
-	return sysErrno(err)
-}
-
-func dmGetBlockSizeFct(fd uintptr) (int64, sysErrno) {
-	var size int64
-	_, _, err := sysSyscall(sysSysIoctl, fd, C.BLKGETSIZE64, uintptr(unsafe.Pointer(&size)))
-	return size, sysErrno(err)
+	return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams))
 }
 }
 
 
 func dmTaskGetInfoFct(task *CDmTask, info *Info) int {
 func dmTaskGetInfoFct(task *CDmTask, info *Info) int {
@@ -287,30 +188,10 @@ func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64, targ
 		*params = C.GoString(Cparams)
 		*params = C.GoString(Cparams)
 	}()
 	}()
 
 
-	nextp := C.dm_get_next_target((*C.struct_dm_task)(task),
-		unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams)
+	nextp := C.dm_get_next_target((*C.struct_dm_task)(task), unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams)
 	return uintptr(nextp)
 	return uintptr(nextp)
 }
 }
 
 
-func dmAttachLoopDeviceFct(filename string, fd *int) string {
-	cFilename := C.CString(filename)
-	defer free(cFilename)
-
-	var cFd C.int
-	defer func() {
-		*fd = int(cFd)
-	}()
-
-	ret := C.attach_loop_device(cFilename, &cFd)
-	defer free(ret)
-	return C.GoString(ret)
-}
-
-func getBlockSizeFct(fd uintptr, size *uint64) sysErrno {
-	_, _, err := sysSyscall(sysSysIoctl, fd, C.BLKGETSIZE64, uintptr(unsafe.Pointer(&size)))
-	return sysErrno(err)
-}
-
 func dmUdevWaitFct(cookie uint) int {
 func dmUdevWaitFct(cookie uint) int {
 	return int(C.dm_udev_wait(C.uint32_t(cookie)))
 	return int(C.dm_udev_wait(C.uint32_t(cookie)))
 }
 }

+ 2 - 0
graphdriver/devmapper/driver.go

@@ -1,3 +1,5 @@
+// +build linux
+
 package devmapper
 package devmapper
 
 
 import (
 import (

+ 66 - 60
graphdriver/devmapper/driver_test.go

@@ -1,3 +1,5 @@
+// +build linux
+
 package devmapper
 package devmapper
 
 
 import (
 import (
@@ -55,12 +57,6 @@ func denyAllDevmapper() {
 	DmGetNextTarget = func(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr {
 	DmGetNextTarget = func(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr {
 		panic("DmGetNextTarget: this method should not be called here")
 		panic("DmGetNextTarget: this method should not be called here")
 	}
 	}
-	DmAttachLoopDevice = func(filename string, fd *int) string {
-		panic("DmAttachLoopDevice: this method should not be called here")
-	}
-	DmGetBlockSize = func(fd uintptr) (int64, sysErrno) {
-		panic("DmGetBlockSize: this method should not be called here")
-	}
 	DmUdevWait = func(cookie uint) int {
 	DmUdevWait = func(cookie uint) int {
 		panic("DmUdevWait: this method should not be called here")
 		panic("DmUdevWait: this method should not be called here")
 	}
 	}
@@ -76,9 +72,6 @@ func denyAllDevmapper() {
 	DmTaskDestroy = func(task *CDmTask) {
 	DmTaskDestroy = func(task *CDmTask) {
 		panic("DmTaskDestroy: this method should not be called here")
 		panic("DmTaskDestroy: this method should not be called here")
 	}
 	}
-	GetBlockSize = func(fd uintptr, size *uint64) sysErrno {
-		panic("GetBlockSize: this method should not be called here")
-	}
 	LogWithErrnoInit = func() {
 	LogWithErrnoInit = func() {
 		panic("LogWithErrnoInit: this method should not be called here")
 		panic("LogWithErrnoInit: this method should not be called here")
 	}
 	}
@@ -155,11 +148,10 @@ func (r Set) Assert(t *testing.T, names ...string) {
 
 
 func TestInit(t *testing.T) {
 func TestInit(t *testing.T) {
 	var (
 	var (
-		calls           = make(Set)
-		devicesAttached = make(Set)
-		taskMessages    = make(Set)
-		taskTypes       = make(Set)
-		home            = mkTestDirectory(t)
+		calls        = make(Set)
+		taskMessages = make(Set)
+		taskTypes    = make(Set)
+		home         = mkTestDirectory(t)
 	)
 	)
 	defer osRemoveAll(home)
 	defer osRemoveAll(home)
 
 
@@ -233,29 +225,6 @@ func TestInit(t *testing.T) {
 			taskMessages[message] = true
 			taskMessages[message] = true
 			return 1
 			return 1
 		}
 		}
-		var (
-			fakeDataLoop       = "/dev/loop42"
-			fakeMetadataLoop   = "/dev/loop43"
-			fakeDataLoopFd     = 42
-			fakeMetadataLoopFd = 43
-		)
-		var attachCount int
-		DmAttachLoopDevice = func(filename string, fd *int) string {
-			calls["DmAttachLoopDevice"] = true
-			if _, exists := devicesAttached[filename]; exists {
-				t.Fatalf("Already attached %s", filename)
-			}
-			devicesAttached[filename] = true
-			// This will crash if fd is not dereferenceable
-			if attachCount == 0 {
-				attachCount++
-				*fd = fakeDataLoopFd
-				return fakeDataLoop
-			} else {
-				*fd = fakeMetadataLoopFd
-				return fakeMetadataLoop
-			}
-		}
 		DmTaskDestroy = func(task *CDmTask) {
 		DmTaskDestroy = func(task *CDmTask) {
 			calls["DmTaskDestroy"] = true
 			calls["DmTaskDestroy"] = true
 			expectedTask := &task1
 			expectedTask := &task1
@@ -263,14 +232,6 @@ func TestInit(t *testing.T) {
 				t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task)
 				t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task)
 			}
 			}
 		}
 		}
-		fakeBlockSize := int64(4242 * 512)
-		DmGetBlockSize = func(fd uintptr) (int64, sysErrno) {
-			calls["DmGetBlockSize"] = true
-			if expectedFd := uintptr(42); fd != expectedFd {
-				t.Fatalf("Wrong libdevmapper call\nExpected: DmGetBlockSize(%v)\nReceived: DmGetBlockSize(%v)\n", expectedFd, fd)
-			}
-			return fakeBlockSize, 0
-		}
 		DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int {
 		DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int {
 			calls["DmTaskSetTarget"] = true
 			calls["DmTaskSetTarget"] = true
 			expectedTask := &task1
 			expectedTask := &task1
@@ -345,11 +306,9 @@ func TestInit(t *testing.T) {
 		"DmTaskSetName",
 		"DmTaskSetName",
 		"DmTaskRun",
 		"DmTaskRun",
 		"DmTaskGetInfo",
 		"DmTaskGetInfo",
-		"DmAttachLoopDevice",
 		"DmTaskDestroy",
 		"DmTaskDestroy",
 		"execRun",
 		"execRun",
 		"DmTaskCreate",
 		"DmTaskCreate",
-		"DmGetBlockSize",
 		"DmTaskSetTarget",
 		"DmTaskSetTarget",
 		"DmTaskSetCookie",
 		"DmTaskSetCookie",
 		"DmUdevWait",
 		"DmUdevWait",
@@ -357,7 +316,6 @@ func TestInit(t *testing.T) {
 		"DmTaskSetMessage",
 		"DmTaskSetMessage",
 		"DmTaskSetAddNode",
 		"DmTaskSetAddNode",
 	)
 	)
-	devicesAttached.Assert(t, path.Join(home, "devicemapper", "data"), path.Join(home, "devicemapper", "metadata"))
 	taskTypes.Assert(t, "0", "6", "17")
 	taskTypes.Assert(t, "0", "6", "17")
 	taskMessages.Assert(t, "create_thin 0", "set_transaction_id 0 1")
 	taskMessages.Assert(t, "create_thin 0", "set_transaction_id 0 1")
 }
 }
@@ -408,17 +366,9 @@ func mockAllDevmapper(calls Set) {
 		calls["DmTaskSetMessage"] = true
 		calls["DmTaskSetMessage"] = true
 		return 1
 		return 1
 	}
 	}
-	DmAttachLoopDevice = func(filename string, fd *int) string {
-		calls["DmAttachLoopDevice"] = true
-		return "/dev/loop42"
-	}
 	DmTaskDestroy = func(task *CDmTask) {
 	DmTaskDestroy = func(task *CDmTask) {
 		calls["DmTaskDestroy"] = true
 		calls["DmTaskDestroy"] = true
 	}
 	}
-	DmGetBlockSize = func(fd uintptr) (int64, sysErrno) {
-		calls["DmGetBlockSize"] = true
-		return int64(4242 * 512), 0
-	}
 	DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int {
 	DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int {
 		calls["DmTaskSetTarget"] = true
 		calls["DmTaskSetTarget"] = true
 		return 1
 		return 1
@@ -489,6 +439,32 @@ func TestDriverCreate(t *testing.T) {
 		return false, nil
 		return false, nil
 	}
 	}
 
 
+	sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+		calls["sysSyscall"] = true
+		if trap != sysSysIoctl {
+			t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap)
+		}
+		switch a2 {
+		case LoopSetFd:
+			calls["ioctl.loopsetfd"] = true
+		case LoopCtlGetFree:
+			calls["ioctl.loopctlgetfree"] = true
+		case LoopGetStatus64:
+			calls["ioctl.loopgetstatus"] = true
+		case LoopSetStatus64:
+			calls["ioctl.loopsetstatus"] = true
+		case LoopClrFd:
+			calls["ioctl.loopclrfd"] = true
+		case LoopSetCapacity:
+			calls["ioctl.loopsetcapacity"] = true
+		case BlkGetSize64:
+			calls["ioctl.blkgetsize"] = true
+		default:
+			t.Fatalf("Unexpected IOCTL. Received %d", a2)
+		}
+		return 0, 0, 0
+	}
+
 	func() {
 	func() {
 		d := newDriver(t)
 		d := newDriver(t)
 
 
@@ -498,16 +474,18 @@ func TestDriverCreate(t *testing.T) {
 			"DmTaskSetName",
 			"DmTaskSetName",
 			"DmTaskRun",
 			"DmTaskRun",
 			"DmTaskGetInfo",
 			"DmTaskGetInfo",
-			"DmAttachLoopDevice",
 			"execRun",
 			"execRun",
 			"DmTaskCreate",
 			"DmTaskCreate",
-			"DmGetBlockSize",
 			"DmTaskSetTarget",
 			"DmTaskSetTarget",
 			"DmTaskSetCookie",
 			"DmTaskSetCookie",
 			"DmUdevWait",
 			"DmUdevWait",
 			"DmTaskSetSector",
 			"DmTaskSetSector",
 			"DmTaskSetMessage",
 			"DmTaskSetMessage",
 			"DmTaskSetAddNode",
 			"DmTaskSetAddNode",
+			"sysSyscall",
+			"ioctl.blkgetsize",
+			"ioctl.loopsetfd",
+			"ioctl.loopsetstatus",
 		)
 		)
 
 
 		if err := d.Create("1", ""); err != nil {
 		if err := d.Create("1", ""); err != nil {
@@ -579,6 +557,32 @@ func TestDriverRemove(t *testing.T) {
 		return false, nil
 		return false, nil
 	}
 	}
 
 
+	sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+		calls["sysSyscall"] = true
+		if trap != sysSysIoctl {
+			t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap)
+		}
+		switch a2 {
+		case LoopSetFd:
+			calls["ioctl.loopsetfd"] = true
+		case LoopCtlGetFree:
+			calls["ioctl.loopctlgetfree"] = true
+		case LoopGetStatus64:
+			calls["ioctl.loopgetstatus"] = true
+		case LoopSetStatus64:
+			calls["ioctl.loopsetstatus"] = true
+		case LoopClrFd:
+			calls["ioctl.loopclrfd"] = true
+		case LoopSetCapacity:
+			calls["ioctl.loopsetcapacity"] = true
+		case BlkGetSize64:
+			calls["ioctl.blkgetsize"] = true
+		default:
+			t.Fatalf("Unexpected IOCTL. Received %d", a2)
+		}
+		return 0, 0, 0
+	}
+
 	func() {
 	func() {
 		d := newDriver(t)
 		d := newDriver(t)
 
 
@@ -588,16 +592,18 @@ func TestDriverRemove(t *testing.T) {
 			"DmTaskSetName",
 			"DmTaskSetName",
 			"DmTaskRun",
 			"DmTaskRun",
 			"DmTaskGetInfo",
 			"DmTaskGetInfo",
-			"DmAttachLoopDevice",
 			"execRun",
 			"execRun",
 			"DmTaskCreate",
 			"DmTaskCreate",
-			"DmGetBlockSize",
 			"DmTaskSetTarget",
 			"DmTaskSetTarget",
 			"DmTaskSetCookie",
 			"DmTaskSetCookie",
 			"DmUdevWait",
 			"DmUdevWait",
 			"DmTaskSetSector",
 			"DmTaskSetSector",
 			"DmTaskSetMessage",
 			"DmTaskSetMessage",
 			"DmTaskSetAddNode",
 			"DmTaskSetAddNode",
+			"sysSyscall",
+			"ioctl.blkgetsize",
+			"ioctl.loopsetfd",
+			"ioctl.loopsetstatus",
 		)
 		)
 
 
 		if err := d.Create("1", ""); err != nil {
 		if err := d.Create("1", ""); err != nil {

+ 60 - 0
graphdriver/devmapper/ioctl.go

@@ -0,0 +1,60 @@
+// +build linux
+
+package devmapper
+
+import (
+	"unsafe"
+)
+
+func ioctlLoopCtlGetFree(fd uintptr) (int, error) {
+	index, _, err := sysSyscall(sysSysIoctl, fd, LoopCtlGetFree, 0)
+	if err != 0 {
+		return 0, err
+	}
+	return int(index), nil
+}
+
+func ioctlLoopSetFd(loopFd, sparseFd uintptr) error {
+	if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetFd, sparseFd); err != 0 {
+		return err
+	}
+	return nil
+}
+
+func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error {
+	if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
+		return err
+	}
+	return nil
+}
+
+func ioctlLoopClrFd(loopFd uintptr) error {
+	if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopClrFd, 0); err != 0 {
+		return err
+	}
+	return nil
+}
+
+func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) {
+	loopInfo := &LoopInfo64{}
+
+	if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
+		return nil, err
+	}
+	return loopInfo, nil
+}
+
+func ioctlLoopSetCapacity(loopFd uintptr, value int) error {
+	if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetCapacity, uintptr(value)); err != 0 {
+		return err
+	}
+	return nil
+}
+
+func ioctlBlkGetSize64(fd uintptr) (int64, error) {
+	var size int64
+	if _, _, err := sysSyscall(sysSysIoctl, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 {
+		return 0, err
+	}
+	return size, nil
+}

+ 2 - 0
graphdriver/devmapper/mount.go

@@ -1,3 +1,5 @@
+// +build linux
+
 package devmapper
 package devmapper
 
 
 import (
 import (

+ 13 - 6
graphdriver/devmapper/sys.go

@@ -1,3 +1,5 @@
+// +build linux
+
 package devmapper
 package devmapper
 
 
 import (
 import (
@@ -19,7 +21,11 @@ var (
 	sysCloseOnExec = syscall.CloseOnExec
 	sysCloseOnExec = syscall.CloseOnExec
 	sysSyscall     = syscall.Syscall
 	sysSyscall     = syscall.Syscall
 
 
-	osOpenFile   = os.OpenFile
+	osOpenFile = func(name string, flag int, perm os.FileMode) (*osFile, error) {
+		f, err := os.OpenFile(name, flag, perm)
+		return &osFile{File: f}, err
+	}
+	osOpen       = func(name string) (*osFile, error) { f, err := os.Open(name); return &osFile{File: f}, err }
 	osNewFile    = os.NewFile
 	osNewFile    = os.NewFile
 	osCreate     = os.Create
 	osCreate     = os.Create
 	osStat       = os.Stat
 	osStat       = os.Stat
@@ -30,9 +36,7 @@ var (
 	osRename     = os.Rename
 	osRename     = os.Rename
 	osReadlink   = os.Readlink
 	osReadlink   = os.Readlink
 
 
-	execRun = func(name string, args ...string) error {
-		return exec.Command(name, args...).Run()
-	}
+	execRun = func(name string, args ...string) error { return exec.Command(name, args...).Run() }
 )
 )
 
 
 const (
 const (
@@ -40,9 +44,12 @@ const (
 	sysMsRdOnly = syscall.MS_RDONLY
 	sysMsRdOnly = syscall.MS_RDONLY
 	sysEInval   = syscall.EINVAL
 	sysEInval   = syscall.EINVAL
 	sysSysIoctl = syscall.SYS_IOCTL
 	sysSysIoctl = syscall.SYS_IOCTL
+	sysEBusy    = syscall.EBUSY
 
 
-	osORdWr   = os.O_RDWR
-	osOCreate = os.O_CREATE
+	osORdOnly    = os.O_RDONLY
+	osORdWr      = os.O_RDWR
+	osOCreate    = os.O_CREATE
+	osModeDevice = os.ModeDevice
 )
 )
 
 
 func toSysStatT(i interface{}) *sysStatT {
 func toSysStatT(i interface{}) *sysStatT {

+ 3 - 3
hack/PACKAGERS.md

@@ -36,7 +36,7 @@ To build docker, you will need the following system dependencies
 
 
 * An amd64 machine
 * An amd64 machine
 * A recent version of git and mercurial
 * A recent version of git and mercurial
-* Go version 1.2rc1 or later (see notes below regarding using Go 1.1.2 and dynbinary)
+* Go version 1.2 or later (see notes below regarding using Go 1.1.2 and dynbinary)
 * SQLite version 3.7.9 or later
 * SQLite version 3.7.9 or later
 * A clean checkout of the source must be added to a valid Go [workspace](http://golang.org/doc/code.html#Workspaces)
 * A clean checkout of the source must be added to a valid Go [workspace](http://golang.org/doc/code.html#Workspaces)
 under the path *src/github.com/dotcloud/docker*.
 under the path *src/github.com/dotcloud/docker*.
@@ -91,8 +91,8 @@ You would do the users of your distro a disservice and "void the docker warranty
 A good comparison is Busybox: all distros package it as a statically linked binary, because it just
 A good comparison is Busybox: all distros package it as a statically linked binary, because it just
 makes sense. Docker is the same way.
 makes sense. Docker is the same way.
 
 
-If you *must* have a non-static Docker binary, or require Go 1.1.2 (since Go 1.2 is not yet officially
-released at the time of this writing), please use:
+If you *must* have a non-static Docker binary, or require Go 1.1.2 (since Go 1.2 is still freshly released
+at the time of this writing), please use:
 
 
 ```bash
 ```bash
 ./hack/make.sh dynbinary
 ./hack/make.sh dynbinary

+ 20 - 11
hack/RELEASE-CHECKLIST.md

@@ -5,7 +5,6 @@ So you're in charge of a Docker release? Cool. Here's what to do.
 If your experience deviates from this document, please document the changes
 If your experience deviates from this document, please document the changes
 to keep it up-to-date.
 to keep it up-to-date.
 
 
-
 ### 1. Pull from master and create a release branch
 ### 1. Pull from master and create a release branch
 
 
 ```bash
 ```bash
@@ -13,6 +12,7 @@ export VERSION=vXXX
 git checkout release
 git checkout release
 git pull
 git pull
 git checkout -b bump_$VERSION
 git checkout -b bump_$VERSION
+git merge origin/master
 ```
 ```
 
 
 ### 2. Update CHANGELOG.md
 ### 2. Update CHANGELOG.md
@@ -54,10 +54,14 @@ EXAMPLES:
 
 
 ### 3. Change the contents of the VERSION file
 ### 3. Change the contents of the VERSION file
 
 
+```bash
+echo ${VERSION#v} > VERSION
+```
+
 ### 4. Run all tests
 ### 4. Run all tests
 
 
 ```bash
 ```bash
-docker run -privileged -lxc-conf=lxc.aa_profile=unconfined docker hack/make.sh test
+docker run -privileged docker hack/make.sh test
 ```
 ```
 
 
 ### 5. Test the docs
 ### 5. Test the docs
@@ -79,8 +83,8 @@ git push origin bump_$VERSION
 ### 8. Apply tag
 ### 8. Apply tag
 
 
 ```bash
 ```bash
-git tag -a v$VERSION # Don't forget the v!
-git push --tags
+git tag -a $VERSION -m $VERSION bump_$VERSION
+git push origin $VERSION
 ```
 ```
 
 
 Merging the pull request to the release branch will automatically
 Merging the pull request to the release branch will automatically
@@ -91,6 +95,9 @@ documentation releases, see ``docs/README.md``
 
 
 ### 9. Go to github to merge the bump_$VERSION into release
 ### 9. Go to github to merge the bump_$VERSION into release
 
 
+Don't forget to push that pretty blue button to delete the leftover
+branch afterwards!
+
 ### 10. Publish binaries
 ### 10. Publish binaries
 
 
 To run this you will need access to the release credentials.
 To run this you will need access to the release credentials.
@@ -107,17 +114,19 @@ docker run  \
        -e AWS_ACCESS_KEY=$(cat ~/.aws/access_key) \
        -e AWS_ACCESS_KEY=$(cat ~/.aws/access_key) \
        -e AWS_SECRET_KEY=$(cat ~/.aws/secret_key) \
        -e AWS_SECRET_KEY=$(cat ~/.aws/secret_key) \
        -e GPG_PASSPHRASE=supersecretsesame \
        -e GPG_PASSPHRASE=supersecretsesame \
-       -privileged -lxc-conf=lxc.aa_profile=unconfined \
-       -t -i \
+       -i -t -privileged \
        docker \
        docker \
        hack/release.sh
        hack/release.sh
 ```
 ```
 
 
-It will build and upload the binaries on the specified bucket (you should
-use test.docker.io for general testing, and once everything is fine,
-switch to get.docker.io).
-
+It will run the test suite one more time, build the binaries and packages,
+and upload to the specified bucket (you should use test.docker.io for
+general testing, and once everything is fine, switch to get.docker.io).
 
 
-### 11. Rejoice!
+### 11. Rejoice and Evangelize!
 
 
 Congratulations! You're done.
 Congratulations! You're done.
+
+Go forth and announce the glad tidings of the new release in `#docker`,
+`#docker-dev`, on the [mailing list](https://groups.google.com/forum/#!forum/docker-dev),
+and on Twitter!

+ 1 - 1
hack/infrastructure/docker-ci/docker-coverage/coverage-docker.sh

@@ -23,7 +23,7 @@ cd $BASE_PATH/go
 GOPATH=$BASE_PATH/go go get github.com/axw/gocov/gocov
 GOPATH=$BASE_PATH/go go get github.com/axw/gocov/gocov
 sudo -E GOPATH=$GOPATH ./bin/gocov test -deps -exclude-goroot -v\
 sudo -E GOPATH=$GOPATH ./bin/gocov test -deps -exclude-goroot -v\
  -exclude github.com/gorilla/context,github.com/gorilla/mux,github.com/kr/pty,\
  -exclude github.com/gorilla/context,github.com/gorilla/mux,github.com/kr/pty,\
-code.google.com/p/go.net/websocket,github.com/dotcloud/tar\
+code.google.com/p/go.net/websocket\
  github.com/dotcloud/docker | ./bin/gocov report; exit_status=$?
  github.com/dotcloud/docker | ./bin/gocov report; exit_status=$?
 
 
 # Cleanup testing directory
 # Cleanup testing directory

+ 1 - 1
hack/make/binary

@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 
 
 DEST=$1
 DEST=$1
 
 

+ 1 - 1
hack/make/dynbinary

@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 
 
 DEST=$1
 DEST=$1
 
 

+ 29 - 12
hack/make/dyntest

@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 
 
 DEST=$1
 DEST=$1
 INIT=$DEST/../dynbinary/dockerinit-$VERSION
 INIT=$DEST/../dynbinary/dockerinit-$VERSION
@@ -19,18 +19,35 @@ fi
 bundle_test() {
 bundle_test() {
 	{
 	{
 		date
 		date
-		for test_dir in $(find_test_dirs); do (
-			set -x
-			cd $test_dir
+		
+		TESTS_FAILED=()
+		for test_dir in $(find_test_dirs); do
+			echo
 			
 			
-			# Install packages that are dependencies of the tests.
-			#   Note: Does not run the tests.
-			go test -i -ldflags "$LDFLAGS" $BUILDFLAGS
-			
-			# Run the tests with the optional $TESTFLAGS.
-			export TEST_DOCKERINIT_PATH=$DEST/../dynbinary/dockerinit-$VERSION
-			go test -v -ldflags "$LDFLAGS -X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\"" $BUILDFLAGS $TESTFLAGS
-		)  done
+			if ! (
+				set -x
+				cd $test_dir
+				
+				# Install packages that are dependencies of the tests.
+				#   Note: Does not run the tests.
+				go test -i -ldflags "$LDFLAGS" $BUILDFLAGS
+				
+				# Run the tests with the optional $TESTFLAGS.
+				export TEST_DOCKERINIT_PATH=$DEST/../dynbinary/dockerinit-$VERSION
+				go test -ldflags "$LDFLAGS -X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\"" $BUILDFLAGS $TESTFLAGS
+			); then
+				TESTS_FAILED+=("$test_dir")
+				sleep 1 # give it a second, so observers watching can take note
+			fi
+		done
+		
+		# if some tests fail, we want the bundlescript to fail, but we want to
+		# try running ALL the tests first, hence TESTS_FAILED
+		if [ "${#TESTS_FAILED[@]}" -gt 0 ]; then
+			echo
+			echo "Test failures in: ${TESTS_FAILED[@]}"
+			false
+		fi
 	} 2>&1 | tee $DEST/test.log
 	} 2>&1 | tee $DEST/test.log
 }
 }
 
 

+ 41 - 13
hack/make/test

@@ -1,29 +1,57 @@
-#!/bin/sh
+#!/bin/bash
 
 
 DEST=$1
 DEST=$1
 
 
 set -e
 set -e
 
 
+TEXTRESET=$'\033[0m' # reset the foreground colour
+RED=$'\033[31m'
+GREEN=$'\033[32m'
+
 # Run Docker's test suite, including sub-packages, and store their output as a bundle
 # Run Docker's test suite, including sub-packages, and store their output as a bundle
 # If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
 # If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
 # You can use this to select certain tests to run, eg.
 # You can use this to select certain tests to run, eg.
-# 
+#
 # 	TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test
 # 	TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test
 #
 #
 bundle_test() {
 bundle_test() {
 	{
 	{
 		date
 		date
-		for test_dir in $(find_test_dirs); do (
-			set -x
-			cd $test_dir
-			
-			# Install packages that are dependencies of the tests.
-			#   Note: Does not run the tests.
-			go test -i -ldflags "$LDFLAGS $LDFLAGS_STATIC" $BUILDFLAGS
-			
-			# Run the tests with the optional $TESTFLAGS.
-			go test -v -ldflags "$LDFLAGS $LDFLAGS_STATIC" $BUILDFLAGS $TESTFLAGS
-		)  done
+
+		TESTS_FAILED=()
+		for test_dir in $(find_test_dirs); do
+			echo
+
+			if ! (
+				set -x
+				cd $test_dir
+
+				# Install packages that are dependencies of the tests.
+				#   Note: Does not run the tests.
+				go test -i -ldflags "$LDFLAGS $LDFLAGS_STATIC" $BUILDFLAGS
+
+				# Run the tests with the optional $TESTFLAGS.
+				go test -ldflags "$LDFLAGS $LDFLAGS_STATIC" $BUILDFLAGS $TESTFLAGS
+			); then
+				TESTS_FAILED+=("$test_dir")
+				echo
+				echo "${RED}Test Failed: $test_dir${TEXTRESET}"
+				echo
+				sleep 1 # give it a second, so observers watching can take note
+			fi
+		done
+
+		# if some tests fail, we want the bundlescript to fail, but we want to
+		# try running ALL the tests first, hence TESTS_FAILED
+		if [ "${#TESTS_FAILED[@]}" -gt 0 ]; then
+			echo
+			echo "${RED}Test failures in: ${TESTS_FAILED[@]}${TEXTRESET}"
+			false
+		else
+			echo
+			echo "${GREEN}Test success${TEXTRESET}"
+			true
+		fi
 	} 2>&1 | tee $DEST/test.log
 	} 2>&1 | tee $DEST/test.log
 }
 }
 
 

+ 1 - 1
hack/make/tgz

@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 
 
 DEST="$1"
 DEST="$1"
 BINARY="$DEST/../binary/docker-$VERSION"
 BINARY="$DEST/../binary/docker-$VERSION"

+ 2 - 1
hack/make/ubuntu

@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 
 
 DEST=$1
 DEST=$1
 
 
@@ -96,6 +96,7 @@ EOF
 		    --depends lxc \
 		    --depends lxc \
 		    --depends aufs-tools \
 		    --depends aufs-tools \
 		    --depends iptables \
 		    --depends iptables \
+		    --depends cgroup-bin \
 		    --description "$PACKAGE_DESCRIPTION" \
 		    --description "$PACKAGE_DESCRIPTION" \
 		    --maintainer "$PACKAGE_MAINTAINER" \
 		    --maintainer "$PACKAGE_MAINTAINER" \
 		    --conflicts lxc-docker-virtual-package \
 		    --conflicts lxc-docker-virtual-package \

+ 0 - 2
hack/vendor.sh

@@ -27,8 +27,6 @@ git_clone github.com/gorilla/context/ 708054d61e5
 
 
 git_clone github.com/gorilla/mux/ 9b36453141c
 git_clone github.com/gorilla/mux/ 9b36453141c
 
 
-git_clone github.com/dotcloud/tar/ e5ea6bb21a
-
 # Docker requires code.google.com/p/go.net/websocket
 # Docker requires code.google.com/p/go.net/websocket
 PKG=code.google.com/p/go.net REV=84a4013f96e0
 PKG=code.google.com/p/go.net REV=84a4013f96e0
 (
 (

+ 17 - 15
image.go

@@ -51,6 +51,9 @@ func LoadImage(root string) (*Image, error) {
 		if !os.IsNotExist(err) {
 		if !os.IsNotExist(err) {
 			return nil, err
 			return nil, err
 		}
 		}
+		// If the layersize file does not exist then set the size to a negative number
+		// because a layer size of 0 (zero) is valid
+		img.Size = -1
 	} else {
 	} else {
 		size, err := strconv.Atoi(string(buf))
 		size, err := strconv.Atoi(string(buf))
 		if err != nil {
 		if err != nil {
@@ -104,30 +107,29 @@ func StoreImage(img *Image, jsonData []byte, layerData archive.Archive, root, la
 				if err != nil {
 				if err != nil {
 					return err
 					return err
 				}
 				}
-				if size = archive.ChangesSize(layer, changes); err != nil {
-					return err
-				}
+				size = archive.ChangesSize(layer, changes)
 			}
 			}
 		}
 		}
 	}
 	}
 
 
-	// If raw json is provided, then use it
-	if jsonData != nil {
-		return ioutil.WriteFile(jsonPath(root), jsonData, 0600)
-	}
-	// Otherwise, unmarshal the image
-	if jsonData, err = json.Marshal(img); err != nil {
-		return err
-	}
-	if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil {
-		return err
-	}
-
 	img.Size = size
 	img.Size = size
 	if err := img.SaveSize(root); err != nil {
 	if err := img.SaveSize(root); err != nil {
 		return err
 		return err
 	}
 	}
 
 
+	// If raw json is provided, then use it
+	if jsonData != nil {
+		if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil {
+			return err
+		}
+	} else {
+		if jsonData, err = json.Marshal(img); err != nil {
+			return err
+		}
+		if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil {
+			return err
+		}
+	}
 	return nil
 	return nil
 }
 }
 
 

+ 44 - 3
integration/api_test.go

@@ -304,6 +304,10 @@ func TestGetContainersJSON(t *testing.T) {
 		Cmd:   []string{"echo", "test"},
 		Cmd:   []string{"echo", "test"},
 	}, t)
 	}, t)
 
 
+	if containerID == "" {
+		t.Fatalf("Received empty container ID")
+	}
+
 	req, err := http.NewRequest("GET", "/containers/json?all=1", nil)
 	req, err := http.NewRequest("GET", "/containers/json?all=1", nil)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -454,7 +458,7 @@ func TestGetContainersTop(t *testing.T) {
 	// Make sure sh spawn up cat
 	// Make sure sh spawn up cat
 	setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
 	setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
 		in, out := containerAttach(eng, containerID, t)
 		in, out := containerAttach(eng, containerID, t)
-		if err := assertPipe("hello\n", "hello", out, in, 15); err != nil {
+		if err := assertPipe("hello\n", "hello", out, in, 150); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	})
 	})
@@ -743,6 +747,43 @@ func TestPostContainersStart(t *testing.T) {
 	containerKill(eng, containerID, t)
 	containerKill(eng, containerID, t)
 }
 }
 
 
+// Expected behaviour: using / as a bind mount source should throw an error
+func TestRunErrorBindMountRootSource(t *testing.T) {
+	eng := NewTestEngine(t)
+	defer mkRuntimeFromEngine(eng, t).Nuke()
+	srv := mkServerFromEngine(eng, t)
+
+	containerID := createTestContainer(
+		eng,
+		&docker.Config{
+			Image:     unitTestImageID,
+			Cmd:       []string{"/bin/cat"},
+			OpenStdin: true,
+		},
+		t,
+	)
+
+	hostConfigJSON, err := json.Marshal(&docker.HostConfig{
+		Binds: []string{"/:/tmp"},
+	})
+
+	req, err := http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	req.Header.Set("Content-Type", "application/json")
+
+	r := httptest.NewRecorder()
+	if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil {
+		t.Fatal(err)
+	}
+	if r.Code != http.StatusInternalServerError {
+		containerKill(eng, containerID, t)
+		t.Fatal("should have failed to run when using / as a source for the bind mount")
+	}
+}
+
 func TestPostContainersStop(t *testing.T) {
 func TestPostContainersStop(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
 	defer mkRuntimeFromEngine(eng, t).Nuke()
 	defer mkRuntimeFromEngine(eng, t).Nuke()
@@ -877,7 +918,7 @@ func TestPostContainersAttach(t *testing.T) {
 	})
 	})
 
 
 	setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
 	setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", string([]byte{1, 0, 0, 0, 0, 0, 0, 6})+"hello", stdout, stdinPipe, 15); err != nil {
+		if err := assertPipe("hello\n", string([]byte{1, 0, 0, 0, 0, 0, 0, 6})+"hello", stdout, stdinPipe, 150); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	})
 	})
@@ -956,7 +997,7 @@ func TestPostContainersAttachStderr(t *testing.T) {
 	})
 	})
 
 
 	setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
 	setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", string([]byte{2, 0, 0, 0, 0, 0, 0, 6})+"hello", stdout, stdinPipe, 15); err != nil {
+		if err := assertPipe("hello\n", string([]byte{2, 0, 0, 0, 0, 0, 0, 6})+"hello", stdout, stdinPipe, 150); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	})
 	})

+ 96 - 27
integration/buildfile_test.go

@@ -5,6 +5,7 @@ import (
 	"github.com/dotcloud/docker"
 	"github.com/dotcloud/docker"
 	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/archive"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/engine"
+	"github.com/dotcloud/docker/utils"
 	"io/ioutil"
 	"io/ioutil"
 	"net"
 	"net"
 	"net/http"
 	"net/http"
@@ -226,11 +227,14 @@ func mkTestingFileServer(files [][2]string) (*httptest.Server, error) {
 
 
 func TestBuild(t *testing.T) {
 func TestBuild(t *testing.T) {
 	for _, ctx := range testContexts {
 	for _, ctx := range testContexts {
-		buildImage(ctx, t, nil, true)
+		_, err := buildImage(ctx, t, nil, true)
+		if err != nil {
+			t.Fatal(err)
+		}
 	}
 	}
 }
 }
 
 
-func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) *docker.Image {
+func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) (*docker.Image, error) {
 	if eng == nil {
 	if eng == nil {
 		eng = NewTestEngine(t)
 		eng = NewTestEngine(t)
 		runtime := mkRuntimeFromEngine(eng, t)
 		runtime := mkRuntimeFromEngine(eng, t)
@@ -262,25 +266,24 @@ func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, u
 	}
 	}
 	dockerfile := constructDockerfile(context.dockerfile, ip, port)
 	dockerfile := constructDockerfile(context.dockerfile, ip, port)
 
 
-	buildfile := docker.NewBuildFile(srv, ioutil.Discard, false, useCache, false)
+	buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, ioutil.Discard, utils.NewStreamFormatter(false))
 	id, err := buildfile.Build(mkTestContext(dockerfile, context.files, t))
 	id, err := buildfile.Build(mkTestContext(dockerfile, context.files, t))
 	if err != nil {
 	if err != nil {
-		t.Fatal(err)
+		return nil, err
 	}
 	}
 
 
-	img, err := srv.ImageInspect(id)
-	if err != nil {
-		t.Fatal(err)
-	}
-	return img
+	return srv.ImageInspect(id)
 }
 }
 
 
 func TestVolume(t *testing.T) {
 func TestVolume(t *testing.T) {
-	img := buildImage(testContextTemplate{`
+	img, err := buildImage(testContextTemplate{`
         from {IMAGE}
         from {IMAGE}
         volume /test
         volume /test
         cmd Hello world
         cmd Hello world
     `, nil, nil}, t, nil, true)
     `, nil, nil}, t, nil, true)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 
 	if len(img.Config.Volumes) == 0 {
 	if len(img.Config.Volumes) == 0 {
 		t.Fail()
 		t.Fail()
@@ -293,10 +296,13 @@ func TestVolume(t *testing.T) {
 }
 }
 
 
 func TestBuildMaintainer(t *testing.T) {
 func TestBuildMaintainer(t *testing.T) {
-	img := buildImage(testContextTemplate{`
+	img, err := buildImage(testContextTemplate{`
         from {IMAGE}
         from {IMAGE}
         maintainer dockerio
         maintainer dockerio
     `, nil, nil}, t, nil, true)
     `, nil, nil}, t, nil, true)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 
 	if img.Author != "dockerio" {
 	if img.Author != "dockerio" {
 		t.Fail()
 		t.Fail()
@@ -304,10 +310,13 @@ func TestBuildMaintainer(t *testing.T) {
 }
 }
 
 
 func TestBuildUser(t *testing.T) {
 func TestBuildUser(t *testing.T) {
-	img := buildImage(testContextTemplate{`
+	img, err := buildImage(testContextTemplate{`
         from {IMAGE}
         from {IMAGE}
         user dockerio
         user dockerio
     `, nil, nil}, t, nil, true)
     `, nil, nil}, t, nil, true)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 
 	if img.Config.User != "dockerio" {
 	if img.Config.User != "dockerio" {
 		t.Fail()
 		t.Fail()
@@ -315,11 +324,15 @@ func TestBuildUser(t *testing.T) {
 }
 }
 
 
 func TestBuildEnv(t *testing.T) {
 func TestBuildEnv(t *testing.T) {
-	img := buildImage(testContextTemplate{`
+	img, err := buildImage(testContextTemplate{`
         from {IMAGE}
         from {IMAGE}
         env port 4243
         env port 4243
         `,
         `,
 		nil, nil}, t, nil, true)
 		nil, nil}, t, nil, true)
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	hasEnv := false
 	hasEnv := false
 	for _, envVar := range img.Config.Env {
 	for _, envVar := range img.Config.Env {
 		if envVar == "port=4243" {
 		if envVar == "port=4243" {
@@ -333,11 +346,14 @@ func TestBuildEnv(t *testing.T) {
 }
 }
 
 
 func TestBuildCmd(t *testing.T) {
 func TestBuildCmd(t *testing.T) {
-	img := buildImage(testContextTemplate{`
+	img, err := buildImage(testContextTemplate{`
         from {IMAGE}
         from {IMAGE}
         cmd ["/bin/echo", "Hello World"]
         cmd ["/bin/echo", "Hello World"]
         `,
         `,
 		nil, nil}, t, nil, true)
 		nil, nil}, t, nil, true)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 
 	if img.Config.Cmd[0] != "/bin/echo" {
 	if img.Config.Cmd[0] != "/bin/echo" {
 		t.Log(img.Config.Cmd[0])
 		t.Log(img.Config.Cmd[0])
@@ -350,11 +366,14 @@ func TestBuildCmd(t *testing.T) {
 }
 }
 
 
 func TestBuildExpose(t *testing.T) {
 func TestBuildExpose(t *testing.T) {
-	img := buildImage(testContextTemplate{`
+	img, err := buildImage(testContextTemplate{`
         from {IMAGE}
         from {IMAGE}
         expose 4243
         expose 4243
         `,
         `,
 		nil, nil}, t, nil, true)
 		nil, nil}, t, nil, true)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 
 	if img.Config.PortSpecs[0] != "4243" {
 	if img.Config.PortSpecs[0] != "4243" {
 		t.Fail()
 		t.Fail()
@@ -362,11 +381,14 @@ func TestBuildExpose(t *testing.T) {
 }
 }
 
 
 func TestBuildEntrypoint(t *testing.T) {
 func TestBuildEntrypoint(t *testing.T) {
-	img := buildImage(testContextTemplate{`
+	img, err := buildImage(testContextTemplate{`
         from {IMAGE}
         from {IMAGE}
         entrypoint ["/bin/echo"]
         entrypoint ["/bin/echo"]
         `,
         `,
 		nil, nil}, t, nil, true)
 		nil, nil}, t, nil, true)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 
 	if img.Config.Entrypoint[0] != "/bin/echo" {
 	if img.Config.Entrypoint[0] != "/bin/echo" {
 	}
 	}
@@ -378,19 +400,25 @@ func TestBuildEntrypointRunCleanup(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
 	defer nuke(mkRuntimeFromEngine(eng, t))
 	defer nuke(mkRuntimeFromEngine(eng, t))
 
 
-	img := buildImage(testContextTemplate{`
+	img, err := buildImage(testContextTemplate{`
         from {IMAGE}
         from {IMAGE}
         run echo "hello"
         run echo "hello"
         `,
         `,
 		nil, nil}, t, eng, true)
 		nil, nil}, t, eng, true)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 
-	img = buildImage(testContextTemplate{`
+	img, err = buildImage(testContextTemplate{`
         from {IMAGE}
         from {IMAGE}
         run echo "hello"
         run echo "hello"
         add foo /foo
         add foo /foo
         entrypoint ["/bin/echo"]
         entrypoint ["/bin/echo"]
         `,
         `,
 		[][2]string{{"foo", "HEYO"}}, nil}, t, eng, true)
 		[][2]string{{"foo", "HEYO"}}, nil}, t, eng, true)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 
 	if len(img.Config.Cmd) != 0 {
 	if len(img.Config.Cmd) != 0 {
 		t.Fail()
 		t.Fail()
@@ -407,11 +435,18 @@ func TestBuildImageWithCache(t *testing.T) {
         `,
         `,
 		nil, nil}
 		nil, nil}
 
 
-	img := buildImage(template, t, eng, true)
+	img, err := buildImage(template, t, eng, true)
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	imageId := img.ID
 	imageId := img.ID
 
 
 	img = nil
 	img = nil
-	img = buildImage(template, t, eng, true)
+	img, err = buildImage(template, t, eng, true)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 
 	if imageId != img.ID {
 	if imageId != img.ID {
 		t.Logf("Image ids should match: %s != %s", imageId, img.ID)
 		t.Logf("Image ids should match: %s != %s", imageId, img.ID)
@@ -429,11 +464,17 @@ func TestBuildImageWithoutCache(t *testing.T) {
         `,
         `,
 		nil, nil}
 		nil, nil}
 
 
-	img := buildImage(template, t, eng, true)
+	img, err := buildImage(template, t, eng, true)
+	if err != nil {
+		t.Fatal(err)
+	}
 	imageId := img.ID
 	imageId := img.ID
 
 
 	img = nil
 	img = nil
-	img = buildImage(template, t, eng, false)
+	img, err = buildImage(template, t, eng, false)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 
 	if imageId == img.ID {
 	if imageId == img.ID {
 		t.Logf("Image ids should not match: %s == %s", imageId, img.ID)
 		t.Logf("Image ids should not match: %s == %s", imageId, img.ID)
@@ -475,7 +516,7 @@ func TestForbiddenContextPath(t *testing.T) {
 	}
 	}
 	dockerfile := constructDockerfile(context.dockerfile, ip, port)
 	dockerfile := constructDockerfile(context.dockerfile, ip, port)
 
 
-	buildfile := docker.NewBuildFile(srv, ioutil.Discard, false, true, false)
+	buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false))
 	_, err = buildfile.Build(mkTestContext(dockerfile, context.files, t))
 	_, err = buildfile.Build(mkTestContext(dockerfile, context.files, t))
 
 
 	if err == nil {
 	if err == nil {
@@ -483,7 +524,7 @@ func TestForbiddenContextPath(t *testing.T) {
 		t.Fail()
 		t.Fail()
 	}
 	}
 
 
-	if err.Error() != "Forbidden path: /" {
+	if err.Error() != "Forbidden path outside the build context: ../../ (/)" {
 		t.Logf("Error message is not expected: %s", err.Error())
 		t.Logf("Error message is not expected: %s", err.Error())
 		t.Fail()
 		t.Fail()
 	}
 	}
@@ -521,7 +562,7 @@ func TestBuildADDFileNotFound(t *testing.T) {
 	}
 	}
 	dockerfile := constructDockerfile(context.dockerfile, ip, port)
 	dockerfile := constructDockerfile(context.dockerfile, ip, port)
 
 
-	buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, false, true, false)
+	buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false))
 	_, err = buildfile.Build(mkTestContext(dockerfile, context.files, t))
 	_, err = buildfile.Build(mkTestContext(dockerfile, context.files, t))
 
 
 	if err == nil {
 	if err == nil {
@@ -539,18 +580,26 @@ func TestBuildInheritance(t *testing.T) {
 	eng := NewTestEngine(t)
 	eng := NewTestEngine(t)
 	defer nuke(mkRuntimeFromEngine(eng, t))
 	defer nuke(mkRuntimeFromEngine(eng, t))
 
 
-	img := buildImage(testContextTemplate{`
+	img, err := buildImage(testContextTemplate{`
             from {IMAGE}
             from {IMAGE}
             expose 4243
             expose 4243
             `,
             `,
 		nil, nil}, t, eng, true)
 		nil, nil}, t, eng, true)
 
 
-	img2 := buildImage(testContextTemplate{fmt.Sprintf(`
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	img2, _ := buildImage(testContextTemplate{fmt.Sprintf(`
             from %s
             from %s
             entrypoint ["/bin/echo"]
             entrypoint ["/bin/echo"]
             `, img.ID),
             `, img.ID),
 		nil, nil}, t, eng, true)
 		nil, nil}, t, eng, true)
 
 
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	// from child
 	// from child
 	if img2.Config.Entrypoint[0] != "/bin/echo" {
 	if img2.Config.Entrypoint[0] != "/bin/echo" {
 		t.Fail()
 		t.Fail()
@@ -561,3 +610,23 @@ func TestBuildInheritance(t *testing.T) {
 		t.Fail()
 		t.Fail()
 	}
 	}
 }
 }
+
+func TestBuildFails(t *testing.T) {
+	_, err := buildImage(testContextTemplate{`
+        from {IMAGE}
+        run sh -c "exit 23"
+        `,
+		nil, nil}, t, nil, true)
+
+	if err == nil {
+		t.Fatal("Error should not be nil")
+	}
+
+	sterr, ok := err.(*utils.JSONError)
+	if !ok {
+		t.Fatalf("Error should be utils.JSONError")
+	}
+	if sterr.Code != 23 {
+		t.Fatalf("StatusCode %d unexpected, should be 23", sterr.Code)
+	}
+}

+ 96 - 67
integration/commands_test.go

@@ -5,6 +5,7 @@ import (
 	"fmt"
 	"fmt"
 	"github.com/dotcloud/docker"
 	"github.com/dotcloud/docker"
 	"github.com/dotcloud/docker/engine"
 	"github.com/dotcloud/docker/engine"
+	"github.com/dotcloud/docker/term"
 	"github.com/dotcloud/docker/utils"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
@@ -31,6 +32,47 @@ func closeWrap(args ...io.Closer) error {
 	return nil
 	return nil
 }
 }
 
 
+func setRaw(t *testing.T, c *docker.Container) *term.State {
+	pty, err := c.GetPtyMaster()
+	if err != nil {
+		t.Fatal(err)
+	}
+	state, err := term.MakeRaw(pty.Fd())
+	if err != nil {
+		t.Fatal(err)
+	}
+	return state
+}
+
+func unsetRaw(t *testing.T, c *docker.Container, state *term.State) {
+	pty, err := c.GetPtyMaster()
+	if err != nil {
+		t.Fatal(err)
+	}
+	term.RestoreTerminal(pty.Fd(), state)
+}
+
+func waitContainerStart(t *testing.T, timeout time.Duration) *docker.Container {
+	var container *docker.Container
+
+	setTimeout(t, "Waiting for the container to be started timed out", timeout, func() {
+		for {
+			l := globalRuntime.List()
+			if len(l) == 1 && l[0].State.IsRunning() {
+				container = l[0]
+				break
+			}
+			time.Sleep(10 * time.Millisecond)
+		}
+	})
+
+	if container == nil {
+		t.Fatal("An error occured while waiting for the container to start")
+	}
+
+	return container
+}
+
 func setTimeout(t *testing.T, msg string, d time.Duration, f func()) {
 func setTimeout(t *testing.T, msg string, d time.Duration, f func()) {
 	c := make(chan bool)
 	c := make(chan bool)
 
 
@@ -213,7 +255,7 @@ func TestRunExit(t *testing.T) {
 	}()
 	}()
 
 
 	setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
 	setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
+		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	})
 	})
@@ -268,7 +310,7 @@ func TestRunDisconnect(t *testing.T) {
 	}()
 	}()
 
 
 	setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
 	setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
+		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	})
 	})
@@ -295,7 +337,8 @@ func TestRunDisconnect(t *testing.T) {
 	})
 	})
 }
 }
 
 
-// Expected behaviour: the process dies when the client disconnects
+// Expected behaviour: the process stay alive when the client disconnects
+// but the client detaches.
 func TestRunDisconnectTty(t *testing.T) {
 func TestRunDisconnectTty(t *testing.T) {
 
 
 	stdin, stdinPipe := io.Pipe()
 	stdin, stdinPipe := io.Pipe()
@@ -306,31 +349,22 @@ func TestRunDisconnectTty(t *testing.T) {
 
 
 	c1 := make(chan struct{})
 	c1 := make(chan struct{})
 	go func() {
 	go func() {
+		defer close(c1)
 		// We're simulating a disconnect so the return value doesn't matter. What matters is the
 		// We're simulating a disconnect so the return value doesn't matter. What matters is the
 		// fact that CmdRun returns.
 		// fact that CmdRun returns.
 		if err := cli.CmdRun("-i", "-t", unitTestImageID, "/bin/cat"); err != nil {
 		if err := cli.CmdRun("-i", "-t", unitTestImageID, "/bin/cat"); err != nil {
 			utils.Debugf("Error CmdRun: %s", err)
 			utils.Debugf("Error CmdRun: %s", err)
 		}
 		}
-
-		close(c1)
 	}()
 	}()
 
 
-	setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() {
-		for {
-			// Client disconnect after run -i should keep stdin out in TTY mode
-			l := globalRuntime.List()
-			if len(l) == 1 && l[0].State.IsRunning() {
-				break
-			}
-			time.Sleep(10 * time.Millisecond)
-		}
-	})
+	container := waitContainerStart(t, 10*time.Second)
 
 
-	// Client disconnect after run -i should keep stdin out in TTY mode
-	container := globalRuntime.List()[0]
+	state := setRaw(t, container)
+	defer unsetRaw(t, container, state)
 
 
+	// Client disconnect after run -i should keep stdin out in TTY mode
 	setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
 	setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
+		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	})
 	})
@@ -340,8 +374,12 @@ func TestRunDisconnectTty(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
+	// wait for CmdRun to return
+	setTimeout(t, "Waiting for CmdRun timed out", 5*time.Second, func() {
+		<-c1
+	})
+
 	// In tty mode, we expect the process to stay alive even after client's stdin closes.
 	// In tty mode, we expect the process to stay alive even after client's stdin closes.
-	// Do not wait for run to finish
 
 
 	// Give some time to monitor to do his thing
 	// Give some time to monitor to do his thing
 	container.WaitTimeout(500 * time.Millisecond)
 	container.WaitTimeout(500 * time.Millisecond)
@@ -431,27 +469,28 @@ func TestRunDetach(t *testing.T) {
 		cli.CmdRun("-i", "-t", unitTestImageID, "cat")
 		cli.CmdRun("-i", "-t", unitTestImageID, "cat")
 	}()
 	}()
 
 
+	container := waitContainerStart(t, 10*time.Second)
+
+	state := setRaw(t, container)
+	defer unsetRaw(t, container, state)
+
 	setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
 	setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
+		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	})
 	})
 
 
-	container := globalRuntime.List()[0]
-
 	setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
 	setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
-		stdinPipe.Write([]byte{16, 17})
-		if err := stdinPipe.Close(); err != nil {
-			t.Fatal(err)
-		}
+		stdinPipe.Write([]byte{16})
+		time.Sleep(100 * time.Millisecond)
+		stdinPipe.Write([]byte{17})
 	})
 	})
 
 
-	closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
-
 	// wait for CmdRun to return
 	// wait for CmdRun to return
 	setTimeout(t, "Waiting for CmdRun timed out", 15*time.Second, func() {
 	setTimeout(t, "Waiting for CmdRun timed out", 15*time.Second, func() {
 		<-ch
 		<-ch
 	})
 	})
+	closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
 
 
 	time.Sleep(500 * time.Millisecond)
 	time.Sleep(500 * time.Millisecond)
 	if !container.State.IsRunning() {
 	if !container.State.IsRunning() {
@@ -479,7 +518,7 @@ func TestAttachDetach(t *testing.T) {
 		}
 		}
 	}()
 	}()
 
 
-	var container *docker.Container
+	container := waitContainerStart(t, 10*time.Second)
 
 
 	setTimeout(t, "Reading container's id timed out", 10*time.Second, func() {
 	setTimeout(t, "Reading container's id timed out", 10*time.Second, func() {
 		buf := make([]byte, 1024)
 		buf := make([]byte, 1024)
@@ -488,8 +527,6 @@ func TestAttachDetach(t *testing.T) {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 
 
-		container = globalRuntime.List()[0]
-
 		if strings.Trim(string(buf[:n]), " \r\n") != container.ID {
 		if strings.Trim(string(buf[:n]), " \r\n") != container.ID {
 			t.Fatalf("Wrong ID received. Expect %s, received %s", container.ID, buf[:n])
 			t.Fatalf("Wrong ID received. Expect %s, received %s", container.ID, buf[:n])
 		}
 		}
@@ -498,6 +535,9 @@ func TestAttachDetach(t *testing.T) {
 		<-ch
 		<-ch
 	})
 	})
 
 
+	state := setRaw(t, container)
+	defer unsetRaw(t, container, state)
+
 	stdin, stdinPipe = io.Pipe()
 	stdin, stdinPipe = io.Pipe()
 	stdout, stdoutPipe = io.Pipe()
 	stdout, stdoutPipe = io.Pipe()
 	cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
 	cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
@@ -513,7 +553,7 @@ func TestAttachDetach(t *testing.T) {
 	}()
 	}()
 
 
 	setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
 	setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
+		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil {
 			if err != io.ErrClosedPipe {
 			if err != io.ErrClosedPipe {
 				t.Fatal(err)
 				t.Fatal(err)
 			}
 			}
@@ -521,18 +561,18 @@ func TestAttachDetach(t *testing.T) {
 	})
 	})
 
 
 	setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
 	setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
-		stdinPipe.Write([]byte{16, 17})
-		if err := stdinPipe.Close(); err != nil {
-			t.Fatal(err)
-		}
+		stdinPipe.Write([]byte{16})
+		time.Sleep(100 * time.Millisecond)
+		stdinPipe.Write([]byte{17})
 	})
 	})
-	closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
 
 
 	// wait for CmdRun to return
 	// wait for CmdRun to return
 	setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() {
 	setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() {
 		<-ch
 		<-ch
 	})
 	})
 
 
+	closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
+
 	time.Sleep(500 * time.Millisecond)
 	time.Sleep(500 * time.Millisecond)
 	if !container.State.IsRunning() {
 	if !container.State.IsRunning() {
 		t.Fatal("The detached container should be still running")
 		t.Fatal("The detached container should be still running")
@@ -551,6 +591,7 @@ func TestAttachDetachTruncatedID(t *testing.T) {
 	cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
 	cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
 	defer cleanup(globalEngine, t)
 	defer cleanup(globalEngine, t)
 
 
+	// Discard the CmdRun output
 	go stdout.Read(make([]byte, 1024))
 	go stdout.Read(make([]byte, 1024))
 	setTimeout(t, "Starting container timed out", 2*time.Second, func() {
 	setTimeout(t, "Starting container timed out", 2*time.Second, func() {
 		if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil {
 		if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil {
@@ -558,7 +599,10 @@ func TestAttachDetachTruncatedID(t *testing.T) {
 		}
 		}
 	})
 	})
 
 
-	container := globalRuntime.List()[0]
+	container := waitContainerStart(t, 10*time.Second)
+
+	state := setRaw(t, container)
+	defer unsetRaw(t, container, state)
 
 
 	stdin, stdinPipe = io.Pipe()
 	stdin, stdinPipe = io.Pipe()
 	stdout, stdoutPipe = io.Pipe()
 	stdout, stdoutPipe = io.Pipe()
@@ -575,7 +619,7 @@ func TestAttachDetachTruncatedID(t *testing.T) {
 	}()
 	}()
 
 
 	setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
 	setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
+		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil {
 			if err != io.ErrClosedPipe {
 			if err != io.ErrClosedPipe {
 				t.Fatal(err)
 				t.Fatal(err)
 			}
 			}
@@ -583,17 +627,16 @@ func TestAttachDetachTruncatedID(t *testing.T) {
 	})
 	})
 
 
 	setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
 	setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
-		stdinPipe.Write([]byte{16, 17})
-		if err := stdinPipe.Close(); err != nil {
-			t.Fatal(err)
-		}
+		stdinPipe.Write([]byte{16})
+		time.Sleep(100 * time.Millisecond)
+		stdinPipe.Write([]byte{17})
 	})
 	})
-	closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
 
 
 	// wait for CmdRun to return
 	// wait for CmdRun to return
 	setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() {
 	setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() {
 		<-ch
 		<-ch
 	})
 	})
+	closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
 
 
 	time.Sleep(500 * time.Millisecond)
 	time.Sleep(500 * time.Millisecond)
 	if !container.State.IsRunning() {
 	if !container.State.IsRunning() {
@@ -648,7 +691,7 @@ func TestAttachDisconnect(t *testing.T) {
 	}()
 	}()
 
 
 	setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
 	setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
+		if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	})
 	})
@@ -714,6 +757,7 @@ func TestRunAutoRemove(t *testing.T) {
 }
 }
 
 
 func TestCmdLogs(t *testing.T) {
 func TestCmdLogs(t *testing.T) {
+	t.Skip("Test not impemented")
 	cli := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr)
 	cli := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr)
 	defer cleanup(globalEngine, t)
 	defer cleanup(globalEngine, t)
 
 
@@ -729,25 +773,6 @@ func TestCmdLogs(t *testing.T) {
 	}
 	}
 }
 }
 
 
-// Expected behaviour: using / as a bind mount source should throw an error
-func TestRunErrorBindMountRootSource(t *testing.T) {
-
-	cli := docker.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
-	defer cleanup(globalEngine, t)
-
-	c := make(chan struct{})
-	go func() {
-		defer close(c)
-		if err := cli.CmdRun("-v", "/:/tmp", unitTestImageID, "echo 'should fail'"); err == nil {
-			t.Fatal("should have failed to run when using / as a source for the bind mount")
-		}
-	}()
-
-	setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
-		<-c
-	})
-}
-
 // Expected behaviour: error out when attempting to bind mount non-existing source paths
 // Expected behaviour: error out when attempting to bind mount non-existing source paths
 func TestRunErrorBindNonExistingSource(t *testing.T) {
 func TestRunErrorBindNonExistingSource(t *testing.T) {
 
 
@@ -757,6 +782,7 @@ func TestRunErrorBindNonExistingSource(t *testing.T) {
 	c := make(chan struct{})
 	c := make(chan struct{})
 	go func() {
 	go func() {
 		defer close(c)
 		defer close(c)
+		// This check is made at runtime, can't be "unit tested"
 		if err := cli.CmdRun("-v", "/i/dont/exist:/tmp", unitTestImageID, "echo 'should fail'"); err == nil {
 		if err := cli.CmdRun("-v", "/i/dont/exist:/tmp", unitTestImageID, "echo 'should fail'"); err == nil {
 			t.Fatal("should have failed to run when using /i/dont/exist as a source for the bind mount")
 			t.Fatal("should have failed to run when using /i/dont/exist as a source for the bind mount")
 		}
 		}
@@ -845,7 +871,7 @@ func TestImagesTree(t *testing.T) {
 			"(?m)   └─[0-9a-f]+.*",
 			"(?m)   └─[0-9a-f]+.*",
 			"(?m)    └─[0-9a-f]+.*",
 			"(?m)    └─[0-9a-f]+.*",
 			"(?m)      └─[0-9a-f]+.*",
 			"(?m)      └─[0-9a-f]+.*",
-			fmt.Sprintf("(?m)^        └─%s Size: \\d+.\\d+ MB \\(virtual \\d+.\\d+ MB\\) Tags: test:latest", utils.TruncateID(image.ID)),
+			fmt.Sprintf("(?m)^        └─%s Size: \\d+ B \\(virtual \\d+.\\d+ MB\\) Tags: test:latest", utils.TruncateID(image.ID)),
 		}
 		}
 
 
 		compiledRegexps := []*regexp.Regexp{}
 		compiledRegexps := []*regexp.Regexp{}
@@ -879,9 +905,12 @@ run    [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]
 		nil,
 		nil,
 		nil,
 		nil,
 	}
 	}
-	image := buildImage(testBuilder, t, eng, true)
+	image, err := buildImage(testBuilder, t, eng, true)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 
-	err := mkServerFromEngine(eng, t).ContainerTag(image.ID, "test", "latest", false)
+	err = mkServerFromEngine(eng, t).ContainerTag(image.ID, "test", "latest", false)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}

+ 40 - 3
integration/container_test.go

@@ -330,6 +330,36 @@ func TestCommitRun(t *testing.T) {
 }
 }
 
 
 func TestStart(t *testing.T) {
 func TestStart(t *testing.T) {
+	runtime := mkRuntime(t)
+	defer nuke(runtime)
+	container, _, _ := mkContainer(runtime, []string{"-i", "_", "/bin/cat"}, t)
+	defer runtime.Destroy(container)
+
+	cStdin, err := container.StdinPipe()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := container.Start(); err != nil {
+		t.Fatal(err)
+	}
+
+	// Give some time to the process to start
+	container.WaitTimeout(500 * time.Millisecond)
+
+	if !container.State.IsRunning() {
+		t.Errorf("Container should be running")
+	}
+	if err := container.Start(); err == nil {
+		t.Fatalf("A running container should be able to be started")
+	}
+
+	// Try to avoid the timeout in destroy. Best effort, don't check error
+	cStdin.Close()
+	container.WaitTimeout(2 * time.Second)
+}
+
+func TestCpuShares(t *testing.T) {
 	_, err1 := os.Stat("/sys/fs/cgroup/cpuacct,cpu")
 	_, err1 := os.Stat("/sys/fs/cgroup/cpuacct,cpu")
 	_, err2 := os.Stat("/sys/fs/cgroup/cpu,cpuacct")
 	_, err2 := os.Stat("/sys/fs/cgroup/cpu,cpuacct")
 	if err1 == nil || err2 == nil {
 	if err1 == nil || err2 == nil {
@@ -462,7 +492,7 @@ func TestKillDifferentUser(t *testing.T) {
 	setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
 	setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
 		out, _ := container.StdoutPipe()
 		out, _ := container.StdoutPipe()
 		in, _ := container.StdinPipe()
 		in, _ := container.StdinPipe()
-		if err := assertPipe("hello\n", "hello", out, in, 15); err != nil {
+		if err := assertPipe("hello\n", "hello", out, in, 150); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	})
 	})
@@ -499,7 +529,7 @@ func TestCreateVolume(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	var id string
 	var id string
-	jobCreate.StdoutParseString(&id)
+	jobCreate.Stdout.AddString(&id)
 	if err := jobCreate.Run(); err != nil {
 	if err := jobCreate.Run(); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -1257,6 +1287,13 @@ func TestBindMounts(t *testing.T) {
 	if _, err := runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:.", tmpDir), "_", "ls", "."}, nil); err == nil {
 	if _, err := runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:.", tmpDir), "_", "ls", "."}, nil); err == nil {
 		t.Fatal("Container bind mounted illegal directory")
 		t.Fatal("Container bind mounted illegal directory")
 	}
 	}
+
+	// test mount a file
+	runContainer(eng, r, []string{"-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "_", "sh", "-c", "echo -n 'yotta' > /tmp/holla"}, t)
+	content := readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist
+	if content != "yotta" {
+		t.Fatal("Container failed to write to bind mount file")
+	}
 }
 }
 
 
 // Test that -volumes-from supports both read-only mounts
 // Test that -volumes-from supports both read-only mounts
@@ -1502,7 +1539,7 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	var id string
 	var id string
-	jobCreate.StdoutParseString(&id)
+	jobCreate.Stdout.AddString(&id)
 	if err := jobCreate.Run(); err != nil {
 	if err := jobCreate.Run(); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}

+ 13 - 13
integration/graph_test.go

@@ -287,19 +287,19 @@ func assertNImages(graph *docker.Graph, t *testing.T, n int) {
 }
 }
 
 
 func tempGraph(t *testing.T) (*docker.Graph, graphdriver.Driver) {
 func tempGraph(t *testing.T) (*docker.Graph, graphdriver.Driver) {
-        tmp, err := ioutil.TempDir("", "docker-graph-")
-        if err != nil {
-                t.Fatal(err)
-        }
-        driver, err := graphdriver.New(tmp)
-        if err != nil {
-                t.Fatal(err)
-        }
-        graph, err := docker.NewGraph(tmp, driver)
-        if err != nil {
-                t.Fatal(err)
-        }
-        return graph, driver
+	tmp, err := ioutil.TempDir("", "docker-graph-")
+	if err != nil {
+		t.Fatal(err)
+	}
+	driver, err := graphdriver.New(tmp)
+	if err != nil {
+		t.Fatal(err)
+	}
+	graph, err := docker.NewGraph(tmp, driver)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return graph, driver
 }
 }
 
 
 func nukeGraph(graph *docker.Graph) {
 func nukeGraph(graph *docker.Graph) {

+ 43 - 3
integration/runtime_test.go

@@ -127,7 +127,7 @@ func setupBaseImage() {
 	job.SetenvBool("Autorestart", false)
 	job.SetenvBool("Autorestart", false)
 	job.Setenv("BridgeIface", unitTestNetworkBridge)
 	job.Setenv("BridgeIface", unitTestNetworkBridge)
 	if err := job.Run(); err != nil {
 	if err := job.Run(); err != nil {
-		log.Fatalf("Unable to create a runtime for tests:", err)
+		log.Fatalf("Unable to create a runtime for tests: %s", err)
 	}
 	}
 	srv := mkServerFromEngine(eng, log.New(os.Stderr, "", 0))
 	srv := mkServerFromEngine(eng, log.New(os.Stderr, "", 0))
 
 
@@ -173,7 +173,7 @@ func spawnGlobalDaemon() {
 func GetTestImage(runtime *docker.Runtime) *docker.Image {
 func GetTestImage(runtime *docker.Runtime) *docker.Image {
 	imgs, err := runtime.Graph().Map()
 	imgs, err := runtime.Graph().Map()
 	if err != nil {
 	if err != nil {
-		log.Fatalf("Unable to get the test image:", err)
+		log.Fatalf("Unable to get the test image: %s", err)
 	}
 	}
 	for _, image := range imgs {
 	for _, image := range imgs {
 		if image.ID == unitTestImageID {
 		if image.ID == unitTestImageID {
@@ -390,7 +390,7 @@ func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *doc
 		jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd})
 		jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd})
 		jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)})
 		jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)})
 		jobCreate.SetenvJson("ExposedPorts", ep)
 		jobCreate.SetenvJson("ExposedPorts", ep)
-		jobCreate.StdoutParseString(&id)
+		jobCreate.Stdout.AddString(&id)
 		if err := jobCreate.Run(); err != nil {
 		if err := jobCreate.Run(); err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
@@ -843,3 +843,43 @@ func TestGetAllChildren(t *testing.T) {
 		}
 		}
 	}
 	}
 }
 }
+
+func TestDestroyWithInitLayer(t *testing.T) {
+	runtime := mkRuntime(t)
+	defer nuke(runtime)
+
+	container, _, err := runtime.Create(&docker.Config{
+		Image: GetTestImage(runtime).ID,
+		Cmd:   []string{"ls", "-al"},
+	}, "")
+
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Destroy
+	if err := runtime.Destroy(container); err != nil {
+		t.Fatal(err)
+	}
+
+	// Make sure runtime.Exists() behaves correctly
+	if runtime.Exists("test_destroy") {
+		t.Fatalf("Exists() returned true")
+	}
+
+	// Make sure runtime.List() doesn't list the destroyed container
+	if len(runtime.List()) != 0 {
+		t.Fatalf("Expected 0 container, %v found", len(runtime.List()))
+	}
+
+	driver := runtime.Graph().Driver()
+
+	// Make sure that the container does not exist in the driver
+	if _, err := driver.Get(container.ID); err == nil {
+		t.Fatal("Conttainer should not exist in the driver")
+	}
+
+	// Make sure that the init layer is removed from the driver
+	if _, err := driver.Get(fmt.Sprintf("%s-init", container.ID)); err == nil {
+		t.Fatal("Container's init layer should not exist in the driver")
+	}
+}

+ 1 - 1
integration/server_test.go

@@ -224,7 +224,7 @@ func TestRunWithTooLowMemoryLimit(t *testing.T) {
 	job.Setenv("CpuShares", "1000")
 	job.Setenv("CpuShares", "1000")
 	job.SetenvList("Cmd", []string{"/bin/cat"})
 	job.SetenvList("Cmd", []string{"/bin/cat"})
 	var id string
 	var id string
-	job.StdoutParseString(&id)
+	job.Stdout.AddString(&id)
 	if err := job.Run(); err == nil {
 	if err := job.Run(); err == nil {
 		t.Errorf("Memory limit is smaller than the allowed limit. Container creation should've failed!")
 		t.Errorf("Memory limit is smaller than the allowed limit. Container creation should've failed!")
 	}
 	}

+ 1 - 1
integration/utils_test.go

@@ -46,7 +46,7 @@ func createNamedTestContainer(eng *engine.Engine, config *docker.Config, f utils
 	if err := job.ImportEnv(config); err != nil {
 	if err := job.ImportEnv(config); err != nil {
 		f.Fatal(err)
 		f.Fatal(err)
 	}
 	}
-	job.StdoutParseString(&shortId)
+	job.Stdout.AddString(&shortId)
 	if err := job.Run(); err != nil {
 	if err := job.Run(); err != nil {
 		f.Fatal(err)
 		f.Fatal(err)
 	}
 	}

+ 23 - 15
lxc_template.go

@@ -1,6 +1,7 @@
 package docker
 package docker
 
 
 import (
 import (
+	"strings"
 	"text/template"
 	"text/template"
 )
 )
 
 
@@ -31,8 +32,8 @@ lxc.rootfs = {{$ROOTFS}}
 
 
 {{if and .HostnamePath .HostsPath}}
 {{if and .HostnamePath .HostsPath}}
 # enable domain name support
 # enable domain name support
-lxc.mount.entry = {{.HostnamePath}} {{$ROOTFS}}/etc/hostname none bind,ro 0 0
-lxc.mount.entry = {{.HostsPath}} {{$ROOTFS}}/etc/hosts none bind,ro 0 0
+lxc.mount.entry = {{escapeFstabSpaces .HostnamePath}} {{escapeFstabSpaces $ROOTFS}}/etc/hostname none bind,ro 0 0
+lxc.mount.entry = {{escapeFstabSpaces .HostsPath}} {{escapeFstabSpaces $ROOTFS}}/etc/hosts none bind,ro 0 0
 {{end}}
 {{end}}
 
 
 # use a dedicated pts for the container (and limit the number of pseudo terminal
 # use a dedicated pts for the container (and limit the number of pseudo terminal
@@ -84,27 +85,27 @@ lxc.cgroup.devices.allow = c 10:200 rwm
 lxc.pivotdir = lxc_putold
 lxc.pivotdir = lxc_putold
 #  WARNING: procfs is a known attack vector and should probably be disabled
 #  WARNING: procfs is a known attack vector and should probably be disabled
 #           if your userspace allows it. eg. see http://blog.zx2c4.com/749
 #           if your userspace allows it. eg. see http://blog.zx2c4.com/749
-lxc.mount.entry = proc {{$ROOTFS}}/proc proc nosuid,nodev,noexec 0 0
+lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noexec 0 0
 #  WARNING: sysfs is a known attack vector and should probably be disabled
 #  WARNING: sysfs is a known attack vector and should probably be disabled
 #           if your userspace allows it. eg. see http://bit.ly/T9CkqJ
 #           if your userspace allows it. eg. see http://bit.ly/T9CkqJ
-lxc.mount.entry = sysfs {{$ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0
-lxc.mount.entry = devpts {{$ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0
-#lxc.mount.entry = varrun {{$ROOTFS}}/var/run tmpfs mode=755,size=4096k,nosuid,nodev,noexec 0 0
-#lxc.mount.entry = varlock {{$ROOTFS}}/var/lock tmpfs size=1024k,nosuid,nodev,noexec 0 0
-lxc.mount.entry = shm {{$ROOTFS}}/dev/shm tmpfs size=65536k,nosuid,nodev,noexec 0 0
+lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0
+lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0
+#lxc.mount.entry = varrun {{escapeFstabSpaces $ROOTFS}}/var/run tmpfs mode=755,size=4096k,nosuid,nodev,noexec 0 0
+#lxc.mount.entry = varlock {{escapeFstabSpaces $ROOTFS}}/var/lock tmpfs size=1024k,nosuid,nodev,noexec 0 0
+lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs size=65536k,nosuid,nodev,noexec 0 0
 
 
 # Inject dockerinit
 # Inject dockerinit
-lxc.mount.entry = {{.SysInitPath}} {{$ROOTFS}}/.dockerinit none bind,ro 0 0
+lxc.mount.entry = {{escapeFstabSpaces .SysInitPath}} {{escapeFstabSpaces $ROOTFS}}/.dockerinit none bind,ro 0 0
 
 
 # Inject env
 # Inject env
-lxc.mount.entry = {{.EnvConfigPath}} {{$ROOTFS}}/.dockerenv none bind,ro 0 0
+lxc.mount.entry = {{escapeFstabSpaces .EnvConfigPath}} {{escapeFstabSpaces $ROOTFS}}/.dockerenv none bind,ro 0 0
 
 
 # In order to get a working DNS environment, mount bind (ro) the host's /etc/resolv.conf into the container
 # In order to get a working DNS environment, mount bind (ro) the host's /etc/resolv.conf into the container
-lxc.mount.entry = {{.ResolvConfPath}} {{$ROOTFS}}/etc/resolv.conf none bind,ro 0 0
+lxc.mount.entry = {{escapeFstabSpaces .ResolvConfPath}} {{escapeFstabSpaces $ROOTFS}}/etc/resolv.conf none bind,ro 0 0
 {{if .Volumes}}
 {{if .Volumes}}
 {{ $rw := .VolumesRW }}
 {{ $rw := .VolumesRW }}
 {{range $virtualPath, $realPath := .Volumes}}
 {{range $virtualPath, $realPath := .Volumes}}
-lxc.mount.entry = {{$realPath}} {{$ROOTFS}}/{{$virtualPath}} none bind,{{ if index $rw $virtualPath }}rw{{else}}ro{{end}} 0 0
+lxc.mount.entry = {{escapeFstabSpaces $realPath}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $virtualPath}} none bind,{{ if index $rw $virtualPath }}rw{{else}}ro{{end}} 0 0
 {{end}}
 {{end}}
 {{end}}
 {{end}}
 
 
@@ -144,6 +145,12 @@ lxc.cgroup.cpu.shares = {{.Config.CpuShares}}
 
 
 var LxcTemplateCompiled *template.Template
 var LxcTemplateCompiled *template.Template
 
 
+// Escape spaces in strings according to the fstab documentation, which is the
+// format for "lxc.mount.entry" lines in lxc.conf. See also "man 5 fstab".
+func escapeFstabSpaces(field string) string {
+	return strings.Replace(field, " ", "\\040", -1)
+}
+
 func getMemorySwap(config *Config) int64 {
 func getMemorySwap(config *Config) int64 {
 	// By default, MemorySwap is set to twice the size of RAM.
 	// By default, MemorySwap is set to twice the size of RAM.
 	// If you want to omit MemorySwap, set it to `-1'.
 	// If you want to omit MemorySwap, set it to `-1'.
@@ -164,9 +171,10 @@ func getCapabilities(container *Container) *Capabilities {
 func init() {
 func init() {
 	var err error
 	var err error
 	funcMap := template.FuncMap{
 	funcMap := template.FuncMap{
-		"getMemorySwap":   getMemorySwap,
-		"getHostConfig":   getHostConfig,
-		"getCapabilities": getCapabilities,
+		"getMemorySwap":     getMemorySwap,
+		"getHostConfig":     getHostConfig,
+		"getCapabilities":   getCapabilities,
+		"escapeFstabSpaces": escapeFstabSpaces,
 	}
 	}
 	LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate)
 	LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate)
 	if err != nil {
 	if err != nil {

+ 18 - 0
lxc_template_unit_test.go

@@ -100,3 +100,21 @@ func grepFile(t *testing.T, path string, pattern string) {
 	}
 	}
 	t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path)
 	t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path)
 }
 }
+
+func TestEscapeFstabSpaces(t *testing.T) {
+	var testInputs = map[string]string{
+		" ":                      "\\040",
+		"":                       "",
+		"/double  space":         "/double\\040\\040space",
+		"/some long test string": "/some\\040long\\040test\\040string",
+		"/var/lib/docker":        "/var/lib/docker",
+		" leading":               "\\040leading",
+		"trailing ":              "trailing\\040",
+	}
+	for in, exp := range testInputs {
+		if out := escapeFstabSpaces(in); exp != out {
+			t.Logf("Expected %s got %s", exp, out)
+			t.Fail()
+		}
+	}
+}

+ 115 - 68
network.go

@@ -12,6 +12,8 @@ import (
 	"net"
 	"net"
 	"strconv"
 	"strconv"
 	"sync"
 	"sync"
+	"syscall"
+	"unsafe"
 )
 )
 
 
 const (
 const (
@@ -19,6 +21,7 @@ const (
 	DisableNetworkBridge = "none"
 	DisableNetworkBridge = "none"
 	portRangeStart       = 49153
 	portRangeStart       = 49153
 	portRangeEnd         = 65535
 	portRangeEnd         = 65535
+	siocBRADDBR          = 0x89a0
 )
 )
 
 
 // Calculates the first and last IP addresses in an IPNet
 // Calculates the first and last IP addresses in an IPNet
@@ -149,8 +152,8 @@ func CreateBridgeIface(config *DaemonConfig) error {
 	}
 	}
 	utils.Debugf("Creating bridge %s with network %s", config.BridgeIface, ifaceAddr)
 	utils.Debugf("Creating bridge %s with network %s", config.BridgeIface, ifaceAddr)
 
 
-	if err := netlink.NetworkLinkAdd(config.BridgeIface, "bridge"); err != nil {
-		return fmt.Errorf("Error creating bridge: %s", err)
+	if err := createBridgeIface(config.BridgeIface); err != nil {
+		return err
 	}
 	}
 	iface, err := net.InterfaceByName(config.BridgeIface)
 	iface, err := net.InterfaceByName(config.BridgeIface)
 	if err != nil {
 	if err != nil {
@@ -167,29 +170,25 @@ func CreateBridgeIface(config *DaemonConfig) error {
 		return fmt.Errorf("Unable to start network bridge: %s", err)
 		return fmt.Errorf("Unable to start network bridge: %s", err)
 	}
 	}
 
 
-	if config.EnableIptables {
-		// Enable NAT
-		if output, err := iptables.Raw("-t", "nat", "-A", "POSTROUTING", "-s", ifaceAddr,
-			"!", "-d", ifaceAddr, "-j", "MASQUERADE"); err != nil {
-			return fmt.Errorf("Unable to enable network bridge NAT: %s", err)
-		} else if len(output) != 0 {
-			return fmt.Errorf("Error iptables postrouting: %s", output)
-		}
+	return nil
+}
 
 
-		// Accept incoming packets for existing connections
-		if output, err := iptables.Raw("-I", "FORWARD", "-o", config.BridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"); err != nil {
-			return fmt.Errorf("Unable to allow incoming packets: %s", err)
-		} else if len(output) != 0 {
-			return fmt.Errorf("Error iptables allow incoming: %s", output)
-		}
+// Create the actual bridge device.  This is more backward-compatible than
+// netlink.NetworkLinkAdd and works on RHEL 6.
+func createBridgeIface(name string) error {
+	s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP)
+	if err != nil {
+		return fmt.Errorf("Error creating bridge creation socket: %s", err)
+	}
+	defer syscall.Close(s)
 
 
-		// Accept all non-intercontainer outgoing packets
-		if output, err := iptables.Raw("-I", "FORWARD", "-i", config.BridgeIface, "!", "-o", config.BridgeIface, "-j", "ACCEPT"); err != nil {
-			return fmt.Errorf("Unable to allow outgoing packets: %s", err)
-		} else if len(output) != 0 {
-			return fmt.Errorf("Error iptables allow outgoing: %s", output)
-		}
+	nameBytePtr, err := syscall.BytePtrFromString(name)
+	if err != nil {
+		return fmt.Errorf("Error converting bridge name %s to byte array: %s", name, err)
+	}
 
 
+	if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), siocBRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 {
+		return fmt.Errorf("Error creating bridge: %s", err)
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -225,16 +224,22 @@ func getIfaceAddr(name string) (net.Addr, error) {
 // up iptables rules.
 // up iptables rules.
 // It keeps track of all mappings and is able to unmap at will
 // It keeps track of all mappings and is able to unmap at will
 type PortMapper struct {
 type PortMapper struct {
-	tcpMapping map[int]*net.TCPAddr
-	tcpProxies map[int]proxy.Proxy
-	udpMapping map[int]*net.UDPAddr
-	udpProxies map[int]proxy.Proxy
+	tcpMapping map[string]*net.TCPAddr
+	tcpProxies map[string]proxy.Proxy
+	udpMapping map[string]*net.UDPAddr
+	udpProxies map[string]proxy.Proxy
 
 
-	iptables  *iptables.Chain
-	defaultIp net.IP
+	iptables         *iptables.Chain
+	defaultIp        net.IP
+	proxyFactoryFunc func(net.Addr, net.Addr) (proxy.Proxy, error)
 }
 }
 
 
 func (mapper *PortMapper) Map(ip net.IP, port int, backendAddr net.Addr) error {
 func (mapper *PortMapper) Map(ip net.IP, port int, backendAddr net.Addr) error {
+	mapKey := (&net.TCPAddr{Port: port, IP: ip}).String()
+	if _, exists := mapper.tcpProxies[mapKey]; exists {
+		return fmt.Errorf("Port %s is already in use", mapKey)
+	}
+
 	if _, isTCP := backendAddr.(*net.TCPAddr); isTCP {
 	if _, isTCP := backendAddr.(*net.TCPAddr); isTCP {
 		backendPort := backendAddr.(*net.TCPAddr).Port
 		backendPort := backendAddr.(*net.TCPAddr).Port
 		backendIP := backendAddr.(*net.TCPAddr).IP
 		backendIP := backendAddr.(*net.TCPAddr).IP
@@ -243,13 +248,13 @@ func (mapper *PortMapper) Map(ip net.IP, port int, backendAddr net.Addr) error {
 				return err
 				return err
 			}
 			}
 		}
 		}
-		mapper.tcpMapping[port] = backendAddr.(*net.TCPAddr)
-		proxy, err := proxy.NewProxy(&net.TCPAddr{IP: ip, Port: port}, backendAddr)
+		mapper.tcpMapping[mapKey] = backendAddr.(*net.TCPAddr)
+		proxy, err := mapper.proxyFactoryFunc(&net.TCPAddr{IP: ip, Port: port}, backendAddr)
 		if err != nil {
 		if err != nil {
 			mapper.Unmap(ip, port, "tcp")
 			mapper.Unmap(ip, port, "tcp")
 			return err
 			return err
 		}
 		}
-		mapper.tcpProxies[port] = proxy
+		mapper.tcpProxies[mapKey] = proxy
 		go proxy.Run()
 		go proxy.Run()
 	} else {
 	} else {
 		backendPort := backendAddr.(*net.UDPAddr).Port
 		backendPort := backendAddr.(*net.UDPAddr).Port
@@ -259,49 +264,50 @@ func (mapper *PortMapper) Map(ip net.IP, port int, backendAddr net.Addr) error {
 				return err
 				return err
 			}
 			}
 		}
 		}
-		mapper.udpMapping[port] = backendAddr.(*net.UDPAddr)
-		proxy, err := proxy.NewProxy(&net.UDPAddr{IP: ip, Port: port}, backendAddr)
+		mapper.udpMapping[mapKey] = backendAddr.(*net.UDPAddr)
+		proxy, err := mapper.proxyFactoryFunc(&net.UDPAddr{IP: ip, Port: port}, backendAddr)
 		if err != nil {
 		if err != nil {
 			mapper.Unmap(ip, port, "udp")
 			mapper.Unmap(ip, port, "udp")
 			return err
 			return err
 		}
 		}
-		mapper.udpProxies[port] = proxy
+		mapper.udpProxies[mapKey] = proxy
 		go proxy.Run()
 		go proxy.Run()
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
 func (mapper *PortMapper) Unmap(ip net.IP, port int, proto string) error {
 func (mapper *PortMapper) Unmap(ip net.IP, port int, proto string) error {
+	mapKey := (&net.TCPAddr{Port: port, IP: ip}).String()
 	if proto == "tcp" {
 	if proto == "tcp" {
-		backendAddr, ok := mapper.tcpMapping[port]
+		backendAddr, ok := mapper.tcpMapping[mapKey]
 		if !ok {
 		if !ok {
-			return fmt.Errorf("Port tcp/%v is not mapped", port)
+			return fmt.Errorf("Port tcp/%s is not mapped", mapKey)
 		}
 		}
-		if proxy, exists := mapper.tcpProxies[port]; exists {
+		if proxy, exists := mapper.tcpProxies[mapKey]; exists {
 			proxy.Close()
 			proxy.Close()
-			delete(mapper.tcpProxies, port)
+			delete(mapper.tcpProxies, mapKey)
 		}
 		}
 		if mapper.iptables != nil {
 		if mapper.iptables != nil {
 			if err := mapper.iptables.Forward(iptables.Delete, ip, port, proto, backendAddr.IP.String(), backendAddr.Port); err != nil {
 			if err := mapper.iptables.Forward(iptables.Delete, ip, port, proto, backendAddr.IP.String(), backendAddr.Port); err != nil {
 				return err
 				return err
 			}
 			}
 		}
 		}
-		delete(mapper.tcpMapping, port)
+		delete(mapper.tcpMapping, mapKey)
 	} else {
 	} else {
-		backendAddr, ok := mapper.udpMapping[port]
+		backendAddr, ok := mapper.udpMapping[mapKey]
 		if !ok {
 		if !ok {
-			return fmt.Errorf("Port udp/%v is not mapped", port)
+			return fmt.Errorf("Port udp/%s is not mapped", mapKey)
 		}
 		}
-		if proxy, exists := mapper.udpProxies[port]; exists {
+		if proxy, exists := mapper.udpProxies[mapKey]; exists {
 			proxy.Close()
 			proxy.Close()
-			delete(mapper.udpProxies, port)
+			delete(mapper.udpProxies, mapKey)
 		}
 		}
 		if mapper.iptables != nil {
 		if mapper.iptables != nil {
 			if err := mapper.iptables.Forward(iptables.Delete, ip, port, proto, backendAddr.IP.String(), backendAddr.Port); err != nil {
 			if err := mapper.iptables.Forward(iptables.Delete, ip, port, proto, backendAddr.IP.String(), backendAddr.Port); err != nil {
 				return err
 				return err
 			}
 			}
 		}
 		}
-		delete(mapper.udpMapping, port)
+		delete(mapper.udpMapping, mapKey)
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -321,12 +327,13 @@ func newPortMapper(config *DaemonConfig) (*PortMapper, error) {
 	}
 	}
 
 
 	mapper := &PortMapper{
 	mapper := &PortMapper{
-		tcpMapping: make(map[int]*net.TCPAddr),
-		tcpProxies: make(map[int]proxy.Proxy),
-		udpMapping: make(map[int]*net.UDPAddr),
-		udpProxies: make(map[int]proxy.Proxy),
-		iptables:   chain,
-		defaultIp:  config.DefaultIp,
+		tcpMapping:       make(map[string]*net.TCPAddr),
+		tcpProxies:       make(map[string]proxy.Proxy),
+		udpMapping:       make(map[string]*net.UDPAddr),
+		udpProxies:       make(map[string]proxy.Proxy),
+		iptables:         chain,
+		defaultIp:        config.DefaultIp,
+		proxyFactoryFunc: proxy.NewProxy,
 	}
 	}
 	return mapper, nil
 	return mapper, nil
 }
 }
@@ -334,7 +341,7 @@ func newPortMapper(config *DaemonConfig) (*PortMapper, error) {
 // Port allocator: Automatically allocate and release networking ports
 // Port allocator: Automatically allocate and release networking ports
 type PortAllocator struct {
 type PortAllocator struct {
 	sync.Mutex
 	sync.Mutex
-	inUse    map[int]struct{}
+	inUse    map[string]struct{}
 	fountain chan int
 	fountain chan int
 	quit     chan bool
 	quit     chan bool
 }
 }
@@ -354,20 +361,22 @@ func (alloc *PortAllocator) runFountain() {
 }
 }
 
 
 // FIXME: Release can no longer fail, change its prototype to reflect that.
 // FIXME: Release can no longer fail, change its prototype to reflect that.
-func (alloc *PortAllocator) Release(port int) error {
+func (alloc *PortAllocator) Release(addr net.IP, port int) error {
+	mapKey := (&net.TCPAddr{Port: port, IP: addr}).String()
 	utils.Debugf("Releasing %d", port)
 	utils.Debugf("Releasing %d", port)
 	alloc.Lock()
 	alloc.Lock()
-	delete(alloc.inUse, port)
+	delete(alloc.inUse, mapKey)
 	alloc.Unlock()
 	alloc.Unlock()
 	return nil
 	return nil
 }
 }
 
 
-func (alloc *PortAllocator) Acquire(port int) (int, error) {
-	utils.Debugf("Acquiring %d", port)
+func (alloc *PortAllocator) Acquire(addr net.IP, port int) (int, error) {
+	mapKey := (&net.TCPAddr{Port: port, IP: addr}).String()
+	utils.Debugf("Acquiring %s", mapKey)
 	if port == 0 {
 	if port == 0 {
 		// Allocate a port from the fountain
 		// Allocate a port from the fountain
 		for port := range alloc.fountain {
 		for port := range alloc.fountain {
-			if _, err := alloc.Acquire(port); err == nil {
+			if _, err := alloc.Acquire(addr, port); err == nil {
 				return port, nil
 				return port, nil
 			}
 			}
 		}
 		}
@@ -375,10 +384,10 @@ func (alloc *PortAllocator) Acquire(port int) (int, error) {
 	}
 	}
 	alloc.Lock()
 	alloc.Lock()
 	defer alloc.Unlock()
 	defer alloc.Unlock()
-	if _, inUse := alloc.inUse[port]; inUse {
+	if _, inUse := alloc.inUse[mapKey]; inUse {
 		return -1, fmt.Errorf("Port already in use: %d", port)
 		return -1, fmt.Errorf("Port already in use: %d", port)
 	}
 	}
-	alloc.inUse[port] = struct{}{}
+	alloc.inUse[mapKey] = struct{}{}
 	return port, nil
 	return port, nil
 }
 }
 
 
@@ -391,7 +400,7 @@ func (alloc *PortAllocator) Close() error {
 
 
 func newPortAllocator() (*PortAllocator, error) {
 func newPortAllocator() (*PortAllocator, error) {
 	allocator := &PortAllocator{
 	allocator := &PortAllocator{
-		inUse:    make(map[int]struct{}),
+		inUse:    make(map[string]struct{}),
 		fountain: make(chan int),
 		fountain: make(chan int),
 		quit:     make(chan bool),
 		quit:     make(chan bool),
 	}
 	}
@@ -546,25 +555,25 @@ func (iface *NetworkInterface) AllocatePort(port Port, binding PortBinding) (*Na
 	hostPort, _ := parsePort(nat.Binding.HostPort)
 	hostPort, _ := parsePort(nat.Binding.HostPort)
 
 
 	if nat.Port.Proto() == "tcp" {
 	if nat.Port.Proto() == "tcp" {
-		extPort, err := iface.manager.tcpPortAllocator.Acquire(hostPort)
+		extPort, err := iface.manager.tcpPortAllocator.Acquire(ip, hostPort)
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
 
 
 		backend := &net.TCPAddr{IP: iface.IPNet.IP, Port: containerPort}
 		backend := &net.TCPAddr{IP: iface.IPNet.IP, Port: containerPort}
 		if err := iface.manager.portMapper.Map(ip, extPort, backend); err != nil {
 		if err := iface.manager.portMapper.Map(ip, extPort, backend); err != nil {
-			iface.manager.tcpPortAllocator.Release(extPort)
+			iface.manager.tcpPortAllocator.Release(ip, extPort)
 			return nil, err
 			return nil, err
 		}
 		}
 		nat.Binding.HostPort = strconv.Itoa(extPort)
 		nat.Binding.HostPort = strconv.Itoa(extPort)
 	} else {
 	} else {
-		extPort, err := iface.manager.udpPortAllocator.Acquire(hostPort)
+		extPort, err := iface.manager.udpPortAllocator.Acquire(ip, hostPort)
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
 		backend := &net.UDPAddr{IP: iface.IPNet.IP, Port: containerPort}
 		backend := &net.UDPAddr{IP: iface.IPNet.IP, Port: containerPort}
 		if err := iface.manager.portMapper.Map(ip, extPort, backend); err != nil {
 		if err := iface.manager.portMapper.Map(ip, extPort, backend); err != nil {
-			iface.manager.udpPortAllocator.Release(extPort)
+			iface.manager.udpPortAllocator.Release(ip, extPort)
 			return nil, err
 			return nil, err
 		}
 		}
 		nat.Binding.HostPort = strconv.Itoa(extPort)
 		nat.Binding.HostPort = strconv.Itoa(extPort)
@@ -580,7 +589,7 @@ type Nat struct {
 }
 }
 
 
 func (n *Nat) String() string {
 func (n *Nat) String() string {
-	return fmt.Sprintf("%s:%d:%d/%s", n.Binding.HostIp, n.Binding.HostPort, n.Port.Port(), n.Port.Proto())
+	return fmt.Sprintf("%s:%s:%s/%s", n.Binding.HostIp, n.Binding.HostPort, n.Port.Port(), n.Port.Proto())
 }
 }
 
 
 // Release: Network cleanup - release all resources
 // Release: Network cleanup - release all resources
@@ -596,16 +605,19 @@ func (iface *NetworkInterface) Release() {
 			continue
 			continue
 		}
 		}
 		ip := net.ParseIP(nat.Binding.HostIp)
 		ip := net.ParseIP(nat.Binding.HostIp)
-		utils.Debugf("Unmaping %s/%s", nat.Port.Proto, nat.Binding.HostPort)
+		utils.Debugf("Unmaping %s/%s:%s", nat.Port.Proto, ip.String(), nat.Binding.HostPort)
 		if err := iface.manager.portMapper.Unmap(ip, hostPort, nat.Port.Proto()); err != nil {
 		if err := iface.manager.portMapper.Unmap(ip, hostPort, nat.Port.Proto()); err != nil {
 			log.Printf("Unable to unmap port %s: %s", nat, err)
 			log.Printf("Unable to unmap port %s: %s", nat, err)
 		}
 		}
+
 		if nat.Port.Proto() == "tcp" {
 		if nat.Port.Proto() == "tcp" {
-			if err := iface.manager.tcpPortAllocator.Release(hostPort); err != nil {
+			if err := iface.manager.tcpPortAllocator.Release(ip, hostPort); err != nil {
 				log.Printf("Unable to release port %s", nat)
 				log.Printf("Unable to release port %s", nat)
 			}
 			}
-		} else if err := iface.manager.udpPortAllocator.Release(hostPort); err != nil {
-			log.Printf("Unable to release port %s: %s", nat, err)
+		} else if nat.Port.Proto() == "udp" {
+			if err := iface.manager.tcpPortAllocator.Release(ip, hostPort); err != nil {
+				log.Printf("Unable to release port %s: %s", nat, err)
+			}
 		}
 		}
 	}
 	}
 
 
@@ -699,6 +711,40 @@ func newNetworkManager(config *DaemonConfig) (*NetworkManager, error) {
 
 
 	// Configure iptables for link support
 	// Configure iptables for link support
 	if config.EnableIptables {
 	if config.EnableIptables {
+
+		// Enable NAT
+		natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-d", addr.String(), "-j", "MASQUERADE"}
+
+		if !iptables.Exists(natArgs...) {
+			if output, err := iptables.Raw(append([]string{"-A"}, natArgs...)...); err != nil {
+				return nil, fmt.Errorf("Unable to enable network bridge NAT: %s", err)
+			} else if len(output) != 0 {
+				return nil, fmt.Errorf("Error iptables postrouting: %s", output)
+			}
+		}
+
+		// Accept incoming packets for existing connections
+		existingArgs := []string{"FORWARD", "-o", config.BridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"}
+
+		if !iptables.Exists(existingArgs...) {
+			if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil {
+				return nil, fmt.Errorf("Unable to allow incoming packets: %s", err)
+			} else if len(output) != 0 {
+				return nil, fmt.Errorf("Error iptables allow incoming: %s", output)
+			}
+		}
+
+		// Accept all non-intercontainer outgoing packets
+		outgoingArgs := []string{"FORWARD", "-i", config.BridgeIface, "!", "-o", config.BridgeIface, "-j", "ACCEPT"}
+
+		if !iptables.Exists(outgoingArgs...) {
+			if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil {
+				return nil, fmt.Errorf("Unable to allow outgoing packets: %s", err)
+			} else if len(output) != 0 {
+				return nil, fmt.Errorf("Error iptables allow outgoing: %s", output)
+			}
+		}
+
 		args := []string{"FORWARD", "-i", config.BridgeIface, "-o", config.BridgeIface, "-j"}
 		args := []string{"FORWARD", "-i", config.BridgeIface, "-o", config.BridgeIface, "-j"}
 		acceptArgs := append(args, "ACCEPT")
 		acceptArgs := append(args, "ACCEPT")
 		dropArgs := append(args, "DROP")
 		dropArgs := append(args, "DROP")
@@ -732,6 +778,7 @@ func newNetworkManager(config *DaemonConfig) (*NetworkManager, error) {
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
+
 	udpPortAllocator, err := newPortAllocator()
 	udpPortAllocator, err := newPortAllocator()
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err

+ 80 - 7
network_test.go

@@ -1,42 +1,52 @@
 package docker
 package docker
 
 
 import (
 import (
+	"github.com/dotcloud/docker/iptables"
+	"github.com/dotcloud/docker/proxy"
 	"net"
 	"net"
 	"testing"
 	"testing"
 )
 )
 
 
 func TestPortAllocation(t *testing.T) {
 func TestPortAllocation(t *testing.T) {
+	ip := net.ParseIP("192.168.0.1")
+	ip2 := net.ParseIP("192.168.0.2")
 	allocator, err := newPortAllocator()
 	allocator, err := newPortAllocator()
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	if port, err := allocator.Acquire(80); err != nil {
+	if port, err := allocator.Acquire(ip, 80); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	} else if port != 80 {
 	} else if port != 80 {
 		t.Fatalf("Acquire(80) should return 80, not %d", port)
 		t.Fatalf("Acquire(80) should return 80, not %d", port)
 	}
 	}
-	port, err := allocator.Acquire(0)
+	port, err := allocator.Acquire(ip, 0)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	if port <= 0 {
 	if port <= 0 {
 		t.Fatalf("Acquire(0) should return a non-zero port")
 		t.Fatalf("Acquire(0) should return a non-zero port")
 	}
 	}
-	if _, err := allocator.Acquire(port); err == nil {
+	if _, err := allocator.Acquire(ip, port); err == nil {
 		t.Fatalf("Acquiring a port already in use should return an error")
 		t.Fatalf("Acquiring a port already in use should return an error")
 	}
 	}
-	if newPort, err := allocator.Acquire(0); err != nil {
+	if newPort, err := allocator.Acquire(ip, 0); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	} else if newPort == port {
 	} else if newPort == port {
 		t.Fatalf("Acquire(0) allocated the same port twice: %d", port)
 		t.Fatalf("Acquire(0) allocated the same port twice: %d", port)
 	}
 	}
-	if _, err := allocator.Acquire(80); err == nil {
+	if _, err := allocator.Acquire(ip, 80); err == nil {
 		t.Fatalf("Acquiring a port already in use should return an error")
 		t.Fatalf("Acquiring a port already in use should return an error")
 	}
 	}
-	if err := allocator.Release(80); err != nil {
+	if _, err := allocator.Acquire(ip2, 80); err != nil {
+		t.Fatalf("It should be possible to allocate the same port on a different interface")
+	}
+	if _, err := allocator.Acquire(ip2, 80); err == nil {
+		t.Fatalf("Acquiring a port already in use should return an error")
+	}
+	if err := allocator.Release(ip, 80); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	if _, err := allocator.Acquire(80); err != nil {
+	if _, err := allocator.Acquire(ip, 80); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 }
 }
@@ -311,3 +321,66 @@ func TestCheckNameserverOverlaps(t *testing.T) {
 		t.Fatalf("%s should not overlap %v but it does", netX, nameservers)
 		t.Fatalf("%s should not overlap %v but it does", netX, nameservers)
 	}
 	}
 }
 }
+
+type StubProxy struct {
+	frontendAddr *net.Addr
+	backendAddr  *net.Addr
+}
+
+func (proxy *StubProxy) Run()                   {}
+func (proxy *StubProxy) Close()                 {}
+func (proxy *StubProxy) FrontendAddr() net.Addr { return *proxy.frontendAddr }
+func (proxy *StubProxy) BackendAddr() net.Addr  { return *proxy.backendAddr }
+
+func NewStubProxy(frontendAddr, backendAddr net.Addr) (proxy.Proxy, error) {
+	return &StubProxy{
+		frontendAddr: &frontendAddr,
+		backendAddr:  &backendAddr,
+	}, nil
+}
+
+func TestPortMapper(t *testing.T) {
+	var chain *iptables.Chain
+	mapper := &PortMapper{
+		tcpMapping:       make(map[string]*net.TCPAddr),
+		tcpProxies:       make(map[string]proxy.Proxy),
+		udpMapping:       make(map[string]*net.UDPAddr),
+		udpProxies:       make(map[string]proxy.Proxy),
+		iptables:         chain,
+		defaultIp:        net.IP("0.0.0.0"),
+		proxyFactoryFunc: NewStubProxy,
+	}
+
+	dstIp1 := net.ParseIP("192.168.0.1")
+	dstIp2 := net.ParseIP("192.168.0.2")
+	srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
+	srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")}
+
+	if err := mapper.Map(dstIp1, 80, srcAddr1); err != nil {
+		t.Fatalf("Failed to allocate port: %s", err)
+	}
+
+	if mapper.Map(dstIp1, 80, srcAddr1) == nil {
+		t.Fatalf("Port is in use - mapping should have failed")
+	}
+
+	if mapper.Map(dstIp1, 80, srcAddr2) == nil {
+		t.Fatalf("Port is in use - mapping should have failed")
+	}
+
+	if err := mapper.Map(dstIp2, 80, srcAddr2); err != nil {
+		t.Fatalf("Failed to allocate port: %s", err)
+	}
+
+	if mapper.Unmap(dstIp1, 80, "tcp") != nil {
+		t.Fatalf("Failed to release port")
+	}
+
+	if mapper.Unmap(dstIp2, 80, "tcp") != nil {
+		t.Fatalf("Failed to release port")
+	}
+
+	if mapper.Unmap(dstIp2, 80, "tcp") == nil {
+		t.Fatalf("Port already released, but no error reported")
+	}
+}

+ 146 - 0
opts.go

@@ -0,0 +1,146 @@
+package docker
+
+import (
+	"fmt"
+	"github.com/dotcloud/docker/utils"
+	"os"
+	"path/filepath"
+	"regexp"
+	"strings"
+)
+
+// ListOpts type
+type ListOpts struct {
+	values    []string
+	validator ValidatorFctType
+}
+
+func NewListOpts(validator ValidatorFctType) ListOpts {
+	return ListOpts{
+		validator: validator,
+	}
+}
+
+func (opts *ListOpts) String() string {
+	return fmt.Sprintf("%v", []string(opts.values))
+}
+
+// Set validates if needed the input value and add it to the
+// internal slice.
+func (opts *ListOpts) Set(value string) error {
+	if opts.validator != nil {
+		v, err := opts.validator(value)
+		if err != nil {
+			return err
+		}
+		value = v
+	}
+	opts.values = append(opts.values, value)
+	return nil
+}
+
+// Delete remove the given element from the slice.
+func (opts *ListOpts) Delete(key string) {
+	for i, k := range opts.values {
+		if k == key {
+			opts.values = append(opts.values[:i], opts.values[i+1:]...)
+			return
+		}
+	}
+}
+
+// GetMap returns the content of values in a map in order to avoid
+// duplicates.
+// FIXME: can we remove this?
+func (opts *ListOpts) GetMap() map[string]struct{} {
+	ret := make(map[string]struct{})
+	for _, k := range opts.values {
+		ret[k] = struct{}{}
+	}
+	return ret
+}
+
+// GetAll returns the values' slice.
+// FIXME: Can we remove this?
+func (opts *ListOpts) GetAll() []string {
+	return opts.values
+}
+
+// Get checks the existence of the given key.
+func (opts *ListOpts) Get(key string) bool {
+	for _, k := range opts.values {
+		if k == key {
+			return true
+		}
+	}
+	return false
+}
+
+// Len returns the amount of element in the slice.
+func (opts *ListOpts) Len() int {
+	return len(opts.values)
+}
+
+// Validators
+type ValidatorFctType func(val string) (string, error)
+
+func ValidateAttach(val string) (string, error) {
+	if val != "stdin" && val != "stdout" && val != "stderr" {
+		return val, fmt.Errorf("Unsupported stream name: %s", val)
+	}
+	return val, nil
+}
+
+func ValidateLink(val string) (string, error) {
+	if _, err := parseLink(val); err != nil {
+		return val, err
+	}
+	return val, nil
+}
+
+func ValidatePath(val string) (string, error) {
+	var containerPath string
+
+	if strings.Count(val, ":") > 2 {
+		return val, fmt.Errorf("bad format for volumes: %s", val)
+	}
+
+	splited := strings.SplitN(val, ":", 2)
+	if len(splited) == 1 {
+		containerPath = splited[0]
+		val = filepath.Clean(splited[0])
+	} else {
+		containerPath = splited[1]
+		val = fmt.Sprintf("%s:%s", splited[0], filepath.Clean(splited[1]))
+	}
+
+	if !filepath.IsAbs(containerPath) {
+		return val, fmt.Errorf("%s is not an absolute path", containerPath)
+	}
+	return val, nil
+}
+
+func ValidateEnv(val string) (string, error) {
+	arr := strings.Split(val, "=")
+	if len(arr) > 1 {
+		return val, nil
+	}
+	return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
+}
+
+func ValidateHost(val string) (string, error) {
+	host, err := utils.ParseHost(DEFAULTHTTPHOST, DEFAULTHTTPPORT, val)
+	if err != nil {
+		return val, err
+	}
+	return host, nil
+}
+
+func ValidateIp4Address(val string) (string, error) {
+	re := regexp.MustCompile(`^(([0-9]+\.){3}([0-9]+))\s*$`)
+	var ns = re.FindSubmatch([]byte(val))
+	if len(ns) > 0 {
+		return string(ns[1]), nil
+	}
+	return "", fmt.Errorf("%s is not an ip4 address", val)
+}

+ 24 - 0
opts_unit_test.go

@@ -0,0 +1,24 @@
+package docker
+
+import (
+	"testing"
+)
+
+func TestValidateIP4(t *testing.T) {
+	if ret, err := ValidateIp4Address(`1.2.3.4`); err != nil || ret == "" {
+		t.Fatalf("ValidateIp4Address(`1.2.3.4`) got %s %s", ret, err)
+	}
+
+	if ret, err := ValidateIp4Address(`127.0.0.1`); err != nil || ret == "" {
+		t.Fatalf("ValidateIp4Address(`127.0.0.1`) got %s %s", ret, err)
+	}
+
+	if ret, err := ValidateIp4Address(`127`); err == nil || ret != "" {
+		t.Fatalf("ValidateIp4Address(`127`) got %s %s", ret, err)
+	}
+
+	if ret, err := ValidateIp4Address(`random invalid string`); err == nil || ret != "" {
+		t.Fatalf("ValidateIp4Address(`random invalid string`) got %s %s", ret, err)
+	}
+
+}

+ 14 - 0
reflink_copy_darwin.go

@@ -0,0 +1,14 @@
+package docker
+
+import (
+	"os"
+	"io"
+)
+
+func CopyFile(dstFile, srcFile *os.File) error {
+	// No BTRFS reflink suppport, Fall back to normal copy
+
+	// FIXME: Check the return of Copy and compare with dstFile.Stat().Size
+	_, err := io.Copy(dstFile, srcFile)
+	return err
+}

+ 53 - 0
reflink_copy_linux.go

@@ -0,0 +1,53 @@
+package docker
+
+// FIXME: This could be easily rewritten in pure Go
+
+/*
+#include <sys/ioctl.h>
+#include <linux/fs.h>
+#include <errno.h>
+
+// See linux.git/fs/btrfs/ioctl.h
+#define BTRFS_IOCTL_MAGIC 0x94
+#define BTRFS_IOC_CLONE _IOW(BTRFS_IOCTL_MAGIC, 9, int)
+
+int
+btrfs_reflink(int fd_out, int fd_in)
+{
+  int res;
+  res = ioctl(fd_out, BTRFS_IOC_CLONE, fd_in);
+  if (res < 0)
+    return errno;
+  return 0;
+}
+
+*/
+import "C"
+
+import (
+	"os"
+	"io"
+	"syscall"
+)
+
+// FIXME: Move this to btrfs package?
+
+func BtrfsReflink(fd_out, fd_in uintptr) error {
+	res := C.btrfs_reflink(C.int(fd_out), C.int(fd_in))
+	if res != 0 {
+		return syscall.Errno(res)
+	}
+	return nil
+}
+
+func CopyFile(dstFile, srcFile *os.File) error {
+	err := BtrfsReflink(dstFile.Fd(), srcFile.Fd())
+	if err == nil {
+		return nil
+	}
+
+	// Fall back to normal copy
+	// FIXME: Check the return of Copy and compare with dstFile.Stat().Size
+	_, err = io.Copy(dstFile, srcFile)
+	return err
+}

+ 36 - 15
registry/registry.go

@@ -47,6 +47,8 @@ func pingRegistryEndpoint(endpoint string) error {
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
+	defer resp.Body.Close()
+
 	if resp.Header.Get("X-Docker-Registry-Version") == "" {
 	if resp.Header.Get("X-Docker-Registry-Version") == "" {
 		return errors.New("This does not look like a Registry server (\"X-Docker-Registry-Version\" header not found in the response)")
 		return errors.New("This does not look like a Registry server (\"X-Docker-Registry-Version\" header not found in the response)")
 	}
 	}
@@ -151,6 +153,12 @@ func doWithCookies(c *http.Client, req *http.Request) (*http.Response, error) {
 	return res, err
 	return res, err
 }
 }
 
 
+func setTokenAuth(req *http.Request, token []string) {
+	if req.Header.Get("Authorization") == "" { // Don't override
+		req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
+	}
+}
+
 // Retrieve the history of a given image from the Registry.
 // Retrieve the history of a given image from the Registry.
 // Return a list of the parent's json (requested image included)
 // Return a list of the parent's json (requested image included)
 func (r *Registry) GetRemoteHistory(imgID, registry string, token []string) ([]string, error) {
 func (r *Registry) GetRemoteHistory(imgID, registry string, token []string) ([]string, error) {
@@ -158,7 +166,7 @@ func (r *Registry) GetRemoteHistory(imgID, registry string, token []string) ([]s
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
+	setTokenAuth(req, token)
 	res, err := doWithCookies(r.client, req)
 	res, err := doWithCookies(r.client, req)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
@@ -191,7 +199,7 @@ func (r *Registry) LookupRemoteImage(imgID, registry string, token []string) boo
 	if err != nil {
 	if err != nil {
 		return false
 		return false
 	}
 	}
-	req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
+	setTokenAuth(req, token)
 	res, err := doWithCookies(r.client, req)
 	res, err := doWithCookies(r.client, req)
 	if err != nil {
 	if err != nil {
 		return false
 		return false
@@ -207,7 +215,7 @@ func (r *Registry) GetRemoteImageJSON(imgID, registry string, token []string) ([
 	if err != nil {
 	if err != nil {
 		return nil, -1, fmt.Errorf("Failed to download json: %s", err)
 		return nil, -1, fmt.Errorf("Failed to download json: %s", err)
 	}
 	}
-	req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
+	setTokenAuth(req, token)
 	res, err := doWithCookies(r.client, req)
 	res, err := doWithCookies(r.client, req)
 	if err != nil {
 	if err != nil {
 		return nil, -1, fmt.Errorf("Failed to download json: %s", err)
 		return nil, -1, fmt.Errorf("Failed to download json: %s", err)
@@ -234,7 +242,7 @@ func (r *Registry) GetRemoteImageLayer(imgID, registry string, token []string) (
 	if err != nil {
 	if err != nil {
 		return nil, fmt.Errorf("Error while getting from the server: %s\n", err)
 		return nil, fmt.Errorf("Error while getting from the server: %s\n", err)
 	}
 	}
-	req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
+	setTokenAuth(req, token)
 	res, err := doWithCookies(r.client, req)
 	res, err := doWithCookies(r.client, req)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
@@ -260,7 +268,7 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
-		req.Header.Set("Authorization", "Token "+strings.Join(token, ", "))
+		setTokenAuth(req, token)
 		res, err := doWithCookies(r.client, req)
 		res, err := doWithCookies(r.client, req)
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
@@ -288,7 +296,8 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [
 	return nil, fmt.Errorf("Could not reach any registry endpoint")
 	return nil, fmt.Errorf("Could not reach any registry endpoint")
 }
 }
 
 
-func (r *Registry) GetRepositoryData(indexEp, remote string) (*RepositoryData, error) {
+func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) {
+	indexEp := r.indexEndpoint
 	repositoryTarget := fmt.Sprintf("%srepositories/%s/images", indexEp, remote)
 	repositoryTarget := fmt.Sprintf("%srepositories/%s/images", indexEp, remote)
 
 
 	utils.Debugf("[registry] Calling GET %s", repositoryTarget)
 	utils.Debugf("[registry] Calling GET %s", repositoryTarget)
@@ -362,7 +371,7 @@ func (r *Registry) PushImageChecksumRegistry(imgData *ImgData, registry string,
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
+	setTokenAuth(req, token)
 	req.Header.Set("X-Docker-Checksum", imgData.Checksum)
 	req.Header.Set("X-Docker-Checksum", imgData.Checksum)
 
 
 	res, err := doWithCookies(r.client, req)
 	res, err := doWithCookies(r.client, req)
@@ -399,7 +408,7 @@ func (r *Registry) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regis
 		return err
 		return err
 	}
 	}
 	req.Header.Add("Content-type", "application/json")
 	req.Header.Add("Content-type", "application/json")
-	req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
+	setTokenAuth(req, token)
 
 
 	res, err := doWithCookies(r.client, req)
 	res, err := doWithCookies(r.client, req)
 	if err != nil {
 	if err != nil {
@@ -434,7 +443,7 @@ func (r *Registry) PushImageLayerRegistry(imgID string, layer io.Reader, registr
 	}
 	}
 	req.ContentLength = -1
 	req.ContentLength = -1
 	req.TransferEncoding = []string{"chunked"}
 	req.TransferEncoding = []string{"chunked"}
-	req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
+	setTokenAuth(req, token)
 	res, err := doWithCookies(r.client, req)
 	res, err := doWithCookies(r.client, req)
 	if err != nil {
 	if err != nil {
 		return "", fmt.Errorf("Failed to upload layer: %s", err)
 		return "", fmt.Errorf("Failed to upload layer: %s", err)
@@ -463,7 +472,7 @@ func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token
 		return err
 		return err
 	}
 	}
 	req.Header.Add("Content-type", "application/json")
 	req.Header.Add("Content-type", "application/json")
-	req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
+	setTokenAuth(req, token)
 	req.ContentLength = int64(len(revision))
 	req.ContentLength = int64(len(revision))
 	res, err := doWithCookies(r.client, req)
 	res, err := doWithCookies(r.client, req)
 	if err != nil {
 	if err != nil {
@@ -476,8 +485,9 @@ func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token
 	return nil
 	return nil
 }
 }
 
 
-func (r *Registry) PushImageJSONIndex(indexEp, remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) {
+func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) {
 	cleanImgList := []*ImgData{}
 	cleanImgList := []*ImgData{}
+	indexEp := r.indexEndpoint
 
 
 	if validate {
 	if validate {
 		for _, elem := range imgList {
 		for _, elem := range imgList {
@@ -581,6 +591,7 @@ func (r *Registry) PushImageJSONIndex(indexEp, remote string, imgList []*ImgData
 }
 }
 
 
 func (r *Registry) SearchRepositories(term string) (*SearchResults, error) {
 func (r *Registry) SearchRepositories(term string) (*SearchResults, error) {
+	utils.Debugf("Index server: %s", r.indexEndpoint)
 	u := auth.IndexServerAddress() + "search?q=" + url.QueryEscape(term)
 	u := auth.IndexServerAddress() + "search?q=" + url.QueryEscape(term)
 	req, err := r.reqFactory.NewRequest("GET", u, nil)
 	req, err := r.reqFactory.NewRequest("GET", u, nil)
 	if err != nil {
 	if err != nil {
@@ -642,12 +653,13 @@ type ImgData struct {
 }
 }
 
 
 type Registry struct {
 type Registry struct {
-	client     *http.Client
-	authConfig *auth.AuthConfig
-	reqFactory *utils.HTTPRequestFactory
+	client        *http.Client
+	authConfig    *auth.AuthConfig
+	reqFactory    *utils.HTTPRequestFactory
+	indexEndpoint string
 }
 }
 
 
-func NewRegistry(root string, authConfig *auth.AuthConfig, factory *utils.HTTPRequestFactory) (r *Registry, err error) {
+func NewRegistry(authConfig *auth.AuthConfig, factory *utils.HTTPRequestFactory, indexEndpoint string) (r *Registry, err error) {
 	httpTransport := &http.Transport{
 	httpTransport := &http.Transport{
 		DisableKeepAlives: true,
 		DisableKeepAlives: true,
 		Proxy:             http.ProxyFromEnvironment,
 		Proxy:             http.ProxyFromEnvironment,
@@ -658,12 +670,21 @@ func NewRegistry(root string, authConfig *auth.AuthConfig, factory *utils.HTTPRe
 		client: &http.Client{
 		client: &http.Client{
 			Transport: httpTransport,
 			Transport: httpTransport,
 		},
 		},
+		indexEndpoint: indexEndpoint,
 	}
 	}
 	r.client.Jar, err = cookiejar.New(nil)
 	r.client.Jar, err = cookiejar.New(nil)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
+	// If we're working with a private registry over HTTPS, send Basic Auth headers
+	// alongside our requests.
+	if indexEndpoint != auth.IndexServerAddress() && strings.HasPrefix(indexEndpoint, "https://") {
+		utils.Debugf("Endpoint %s is eligible for private registry auth. Enabling decorator.", indexEndpoint)
+		dec := utils.NewHTTPAuthDecorator(authConfig.Username, authConfig.Password)
+		factory.AddDecorator(dec)
+	}
+
 	r.reqFactory = factory
 	r.reqFactory = factory
 	return r, nil
 	return r, nil
 }
 }

+ 4 - 5
registry/registry_test.go

@@ -15,7 +15,7 @@ var (
 
 
 func spawnTestRegistry(t *testing.T) *Registry {
 func spawnTestRegistry(t *testing.T) *Registry {
 	authConfig := &auth.AuthConfig{}
 	authConfig := &auth.AuthConfig{}
-	r, err := NewRegistry("", authConfig, utils.NewHTTPRequestFactory())
+	r, err := NewRegistry(authConfig, utils.NewHTTPRequestFactory(), makeURL("/v1/"))
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -99,7 +99,7 @@ func TestGetRemoteTags(t *testing.T) {
 
 
 func TestGetRepositoryData(t *testing.T) {
 func TestGetRepositoryData(t *testing.T) {
 	r := spawnTestRegistry(t)
 	r := spawnTestRegistry(t)
-	data, err := r.GetRepositoryData(makeURL("/v1/"), "foo42/bar")
+	data, err := r.GetRepositoryData("foo42/bar")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -168,15 +168,14 @@ func TestPushImageJSONIndex(t *testing.T) {
 			Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2",
 			Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2",
 		},
 		},
 	}
 	}
-	ep := makeURL("/v1/")
-	repoData, err := r.PushImageJSONIndex(ep, "foo42/bar", imgData, false, nil)
+	repoData, err := r.PushImageJSONIndex("foo42/bar", imgData, false, nil)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	if repoData == nil {
 	if repoData == nil {
 		t.Fatal("Expected RepositoryData object")
 		t.Fatal("Expected RepositoryData object")
 	}
 	}
-	repoData, err = r.PushImageJSONIndex(ep, "foo42/bar", imgData, true, []string{ep})
+	repoData, err = r.PushImageJSONIndex("foo42/bar", imgData, true, []string{r.indexEndpoint})
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}

+ 25 - 5
runtime.go

@@ -159,7 +159,7 @@ func (runtime *Runtime) Register(container *Container) error {
 			return err
 			return err
 		}
 		}
 		if !strings.Contains(string(output), "RUNNING") {
 		if !strings.Contains(string(output), "RUNNING") {
-			utils.Debugf("Container %s was supposed to be running be is not.", container.ID)
+			utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
 			if runtime.config.AutoRestart {
 			if runtime.config.AutoRestart {
 				utils.Debugf("Restarting")
 				utils.Debugf("Restarting")
 				container.State.SetGhost(false)
 				container.State.SetGhost(false)
@@ -237,6 +237,11 @@ func (runtime *Runtime) Destroy(container *Container) error {
 		return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", runtime.driver, container.ID, err)
 		return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", runtime.driver, container.ID, err)
 	}
 	}
 
 
+	initID := fmt.Sprintf("%s-init", container.ID)
+	if err := runtime.driver.Remove(initID); err != nil {
+		return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", runtime.driver, initID, err)
+	}
+
 	if _, err := runtime.containerGraph.Purge(container.ID); err != nil {
 	if _, err := runtime.containerGraph.Purge(container.ID); err != nil {
 		utils.Debugf("Unable to remove container from link graph: %s", err)
 		utils.Debugf("Unable to remove container from link graph: %s", err)
 	}
 	}
@@ -420,11 +425,26 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin
 
 
 	// Set the enitity in the graph using the default name specified
 	// Set the enitity in the graph using the default name specified
 	if _, err := runtime.containerGraph.Set(name, id); err != nil {
 	if _, err := runtime.containerGraph.Set(name, id); err != nil {
-		if strings.HasSuffix(err.Error(), "name are not unique") {
-			conflictingContainer, _ := runtime.GetByName(name)
-			return nil, nil, fmt.Errorf("Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", name, utils.TruncateID(conflictingContainer.ID), name)
+		if !strings.HasSuffix(err.Error(), "name are not unique") {
+			return nil, nil, err
+		}
+
+		conflictingContainer, err := runtime.GetByName(name)
+		if err != nil {
+			if strings.Contains(err.Error(), "Could not find entity") {
+				return nil, nil, err
+			}
+
+			// Remove name and continue starting the container
+			if err := runtime.containerGraph.Delete(name); err != nil {
+				return nil, nil, err
+			}
+		} else {
+			nameAsKnownByUser := strings.TrimPrefix(name, "/")
+			return nil, nil, fmt.Errorf(
+				"Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", nameAsKnownByUser,
+				utils.TruncateID(conflictingContainer.ID), nameAsKnownByUser)
 		}
 		}
-		return nil, nil, err
 	}
 	}
 
 
 	// Generate default hostname
 	// Generate default hostname

+ 122 - 91
server.go

@@ -38,22 +38,25 @@ func init() {
 
 
 // jobInitApi runs the remote api server `srv` as a daemon,
 // jobInitApi runs the remote api server `srv` as a daemon,
 // Only one api server can run at the same time - this is enforced by a pidfile.
 // Only one api server can run at the same time - this is enforced by a pidfile.
-// The signals SIGINT, SIGKILL and SIGTERM are intercepted for cleanup.
-func jobInitApi(job *engine.Job) string {
+// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup.
+func jobInitApi(job *engine.Job) engine.Status {
 	job.Logf("Creating server")
 	job.Logf("Creating server")
+	// FIXME: ImportEnv deprecates ConfigFromJob
 	srv, err := NewServer(job.Eng, ConfigFromJob(job))
 	srv, err := NewServer(job.Eng, ConfigFromJob(job))
 	if err != nil {
 	if err != nil {
-		return err.Error()
+		job.Error(err)
+		return engine.StatusErr
 	}
 	}
 	if srv.runtime.config.Pidfile != "" {
 	if srv.runtime.config.Pidfile != "" {
 		job.Logf("Creating pidfile")
 		job.Logf("Creating pidfile")
 		if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil {
 		if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil {
+			// FIXME: do we need fatal here instead of returning a job error?
 			log.Fatal(err)
 			log.Fatal(err)
 		}
 		}
 	}
 	}
 	job.Logf("Setting up signal traps")
 	job.Logf("Setting up signal traps")
 	c := make(chan os.Signal, 1)
 	c := make(chan os.Signal, 1)
-	signal.Notify(c, os.Interrupt, os.Kill, os.Signal(syscall.SIGTERM))
+	signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
 	go func() {
 	go func() {
 		sig := <-c
 		sig := <-c
 		log.Printf("Received signal '%v', exiting\n", sig)
 		log.Printf("Received signal '%v', exiting\n", sig)
@@ -68,18 +71,21 @@ func jobInitApi(job *engine.Job) string {
 		job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", srv.runtime.networkManager.bridgeNetwork.IP)
 		job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", srv.runtime.networkManager.bridgeNetwork.IP)
 	}
 	}
 	if err := job.Eng.Register("create", srv.ContainerCreate); err != nil {
 	if err := job.Eng.Register("create", srv.ContainerCreate); err != nil {
-		return err.Error()
+		job.Error(err)
+		return engine.StatusErr
 	}
 	}
 	if err := job.Eng.Register("start", srv.ContainerStart); err != nil {
 	if err := job.Eng.Register("start", srv.ContainerStart); err != nil {
-		return err.Error()
+		job.Error(err)
+		return engine.StatusErr
 	}
 	}
 	if err := job.Eng.Register("serveapi", srv.ListenAndServe); err != nil {
 	if err := job.Eng.Register("serveapi", srv.ListenAndServe); err != nil {
-		return err.Error()
+		job.Error(err)
+		return engine.StatusErr
 	}
 	}
-	return "0"
+	return engine.StatusOK
 }
 }
 
 
-func (srv *Server) ListenAndServe(job *engine.Job) string {
+func (srv *Server) ListenAndServe(job *engine.Job) engine.Status {
 	protoAddrs := job.Args
 	protoAddrs := job.Args
 	chErrors := make(chan error, len(protoAddrs))
 	chErrors := make(chan error, len(protoAddrs))
 	for _, protoAddr := range protoAddrs {
 	for _, protoAddr := range protoAddrs {
@@ -94,7 +100,8 @@ func (srv *Server) ListenAndServe(job *engine.Job) string {
 				log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
 				log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
 			}
 			}
 		default:
 		default:
-			return "Invalid protocol format."
+			job.Errorf("Invalid protocol format.")
+			return engine.StatusErr
 		}
 		}
 		go func() {
 		go func() {
 			// FIXME: merge Server.ListenAndServe with ListenAndServe
 			// FIXME: merge Server.ListenAndServe with ListenAndServe
@@ -104,10 +111,11 @@ func (srv *Server) ListenAndServe(job *engine.Job) string {
 	for i := 0; i < len(protoAddrs); i += 1 {
 	for i := 0; i < len(protoAddrs); i += 1 {
 		err := <-chErrors
 		err := <-chErrors
 		if err != nil {
 		if err != nil {
-			return err.Error()
+			job.Error(err)
+			return engine.StatusErr
 		}
 		}
 	}
 	}
-	return "0"
+	return engine.StatusOK
 }
 }
 
 
 func (srv *Server) DockerVersion() APIVersion {
 func (srv *Server) DockerVersion() APIVersion {
@@ -265,6 +273,9 @@ func (srv *Server) exportImage(image *Image, tempdir string) error {
 		// temporary directory
 		// temporary directory
 		tmpImageDir := path.Join(tempdir, i.ID)
 		tmpImageDir := path.Join(tempdir, i.ID)
 		if err := os.Mkdir(tmpImageDir, os.ModeDir); err != nil {
 		if err := os.Mkdir(tmpImageDir, os.ModeDir); err != nil {
+			if os.IsExist(err) {
+				return nil
+			}
 			return err
 			return err
 		}
 		}
 
 
@@ -386,8 +397,8 @@ func (srv *Server) recursiveLoad(address, tmpImageDir string) error {
 
 
 		imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json"))
 		imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json"))
 		if err != nil {
 		if err != nil {
-			return err
 			utils.Debugf("Error reading json", err)
 			utils.Debugf("Error reading json", err)
+			return err
 		}
 		}
 
 
 		layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar"))
 		layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar"))
@@ -417,7 +428,7 @@ func (srv *Server) recursiveLoad(address, tmpImageDir string) error {
 }
 }
 
 
 func (srv *Server) ImagesSearch(term string) ([]registry.SearchResult, error) {
 func (srv *Server) ImagesSearch(term string) ([]registry.SearchResult, error) {
-	r, err := registry.NewRegistry(srv.runtime.config.Root, nil, srv.HTTPRequestFactory(nil))
+	r, err := registry.NewRegistry(nil, srv.HTTPRequestFactory(nil), auth.IndexServerAddress())
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -435,7 +446,7 @@ func (srv *Server) ImageInsert(name, url, path string, out io.Writer, sf *utils.
 		return err
 		return err
 	}
 	}
 
 
-	file, err := utils.Download(url, out)
+	file, err := utils.Download(url)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -451,7 +462,7 @@ func (srv *Server) ImageInsert(name, url, path string, out io.Writer, sf *utils.
 		return err
 		return err
 	}
 	}
 
 
-	if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf.FormatProgress("", "Downloading", "%8v/%v (%v)"), sf, false), path); err != nil {
+	if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf, false, "", "Downloading"), path); err != nil {
 		return err
 		return err
 	}
 	}
 	// FIXME: Handle custom repo, tag comment, author
 	// FIXME: Handle custom repo, tag comment, author
@@ -761,7 +772,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling", "dependent layers"))
+	out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil))
 	// FIXME: Try to stream the images?
 	// FIXME: Try to stream the images?
 	// FIXME: Launch the getRemoteImage() in goroutines
 	// FIXME: Launch the getRemoteImage() in goroutines
 
 
@@ -776,42 +787,42 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
 		defer srv.poolRemove("pull", "layer:"+id)
 		defer srv.poolRemove("pull", "layer:"+id)
 
 
 		if !srv.runtime.graph.Exists(id) {
 		if !srv.runtime.graph.Exists(id) {
-			out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling", "metadata"))
+			out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
 			imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token)
 			imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token)
 			if err != nil {
 			if err != nil {
-				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "pulling dependent layers"))
+				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
 				// FIXME: Keep going in case of error?
 				// FIXME: Keep going in case of error?
 				return err
 				return err
 			}
 			}
 			img, err := NewImgJSON(imgJSON)
 			img, err := NewImgJSON(imgJSON)
 			if err != nil {
 			if err != nil {
-				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "pulling dependent layers"))
+				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
 				return fmt.Errorf("Failed to parse json: %s", err)
 				return fmt.Errorf("Failed to parse json: %s", err)
 			}
 			}
 
 
 			// Get the layer
 			// Get the layer
-			out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling", "fs layer"))
+			out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling fs layer", nil))
 			layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token)
 			layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token)
 			if err != nil {
 			if err != nil {
-				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "pulling dependent layers"))
+				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
 				return err
 				return err
 			}
 			}
 			defer layer.Close()
 			defer layer.Close()
-			if err := srv.runtime.graph.Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf.FormatProgress(utils.TruncateID(id), "Downloading", "%8v/%v (%v)"), sf, false), img); err != nil {
-				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error", "downloading dependent layers"))
+			if err := srv.runtime.graph.Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
+				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
 				return err
 				return err
 			}
 			}
 		}
 		}
-		out.Write(sf.FormatProgress(utils.TruncateID(id), "Download", "complete"))
+		out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil))
 
 
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName, remoteName, askedTag, indexEp string, sf *utils.StreamFormatter, parallel bool) error {
+func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool) error {
 	out.Write(sf.FormatStatus("", "Pulling repository %s", localName))
 	out.Write(sf.FormatStatus("", "Pulling repository %s", localName))
 
 
-	repoData, err := r.GetRepositoryData(indexEp, remoteName)
+	repoData, err := r.GetRepositoryData(remoteName)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -866,8 +877,14 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName
 			}
 			}
 
 
 			// ensure no two downloads of the same image happen at the same time
 			// ensure no two downloads of the same image happen at the same time
-			if _, err := srv.poolAdd("pull", "img:"+img.ID); err != nil {
-				utils.Errorf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
+			if c, err := srv.poolAdd("pull", "img:"+img.ID); err != nil {
+				if c != nil {
+					out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
+					<-c
+					out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
+				} else {
+					utils.Errorf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
+				}
 				if parallel {
 				if parallel {
 					errors <- nil
 					errors <- nil
 				}
 				}
@@ -875,29 +892,29 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName
 			}
 			}
 			defer srv.poolRemove("pull", "img:"+img.ID)
 			defer srv.poolRemove("pull", "img:"+img.ID)
 
 
-			out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling", fmt.Sprintf("image (%s) from %s", img.Tag, localName)))
+			out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil))
 			success := false
 			success := false
 			var lastErr error
 			var lastErr error
 			for _, ep := range repoData.Endpoints {
 			for _, ep := range repoData.Endpoints {
-				out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling", fmt.Sprintf("image (%s) from %s, endpoint: %s", img.Tag, localName, ep)))
+				out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil))
 				if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
 				if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
 					// Its not ideal that only the last error  is returned, it would be better to concatenate the errors.
 					// Its not ideal that only the last error  is returned, it would be better to concatenate the errors.
 					// As the error is also given to the output stream the user will see the error.
 					// As the error is also given to the output stream the user will see the error.
 					lastErr = err
 					lastErr = err
-					out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Error pulling", fmt.Sprintf("image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err)))
+					out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil))
 					continue
 					continue
 				}
 				}
 				success = true
 				success = true
 				break
 				break
 			}
 			}
 			if !success {
 			if !success {
-				out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Error pulling", fmt.Sprintf("image (%s) from %s, %s", img.Tag, localName, lastErr)))
+				out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, %s", img.Tag, localName, lastErr), nil))
 				if parallel {
 				if parallel {
 					errors <- fmt.Errorf("Could not find repository on any of the indexed registries.")
 					errors <- fmt.Errorf("Could not find repository on any of the indexed registries.")
 					return
 					return
 				}
 				}
 			}
 			}
-			out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download", "complete"))
+			out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
 
 
 			if parallel {
 			if parallel {
 				errors <- nil
 				errors <- nil
@@ -981,11 +998,16 @@ func (srv *Server) poolRemove(kind, key string) error {
 }
 }
 
 
 func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig, metaHeaders map[string][]string, parallel bool) error {
 func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig, metaHeaders map[string][]string, parallel bool) error {
-	r, err := registry.NewRegistry(srv.runtime.config.Root, authConfig, srv.HTTPRequestFactory(metaHeaders))
+	out = utils.NewWriteFlusher(out)
+
+	c, err := srv.poolAdd("pull", localName+":"+tag)
 	if err != nil {
 	if err != nil {
-		return err
-	}
-	if _, err := srv.poolAdd("pull", localName+":"+tag); err != nil {
+		if c != nil {
+			// Another pull of the same repository is already taking place; just wait for it to finish
+			out.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
+			<-c
+			return nil
+		}
 		return err
 		return err
 	}
 	}
 	defer srv.poolRemove("pull", localName+":"+tag)
 	defer srv.poolRemove("pull", localName+":"+tag)
@@ -996,22 +1018,19 @@ func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *ut
 		return err
 		return err
 	}
 	}
 
 
+	r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint)
+	if err != nil {
+		return err
+	}
+
 	if endpoint == auth.IndexServerAddress() {
 	if endpoint == auth.IndexServerAddress() {
 		// If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
 		// If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
 		localName = remoteName
 		localName = remoteName
 	}
 	}
 
 
-	out = utils.NewWriteFlusher(out)
-	err = srv.pullRepository(r, out, localName, remoteName, tag, endpoint, sf, parallel)
-	if err == registry.ErrLoginRequired {
+	if err = srv.pullRepository(r, out, localName, remoteName, tag, sf, parallel); err != nil {
 		return err
 		return err
 	}
 	}
-	if err != nil {
-		if err := srv.pullImage(r, out, remoteName, endpoint, nil, sf); err != nil {
-			return err
-		}
-		return nil
-	}
 
 
 	return nil
 	return nil
 }
 }
@@ -1071,7 +1090,7 @@ func flatten(slc [][]*registry.ImgData) []*registry.ImgData {
 	return result
 	return result
 }
 }
 
 
-func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, indexEp string, sf *utils.StreamFormatter) error {
+func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, sf *utils.StreamFormatter) error {
 	out = utils.NewWriteFlusher(out)
 	out = utils.NewWriteFlusher(out)
 	imgList, err := srv.getImageList(localRepo)
 	imgList, err := srv.getImageList(localRepo)
 	if err != nil {
 	if err != nil {
@@ -1081,7 +1100,7 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName
 	out.Write(sf.FormatStatus("", "Sending image list"))
 	out.Write(sf.FormatStatus("", "Sending image list"))
 
 
 	var repoData *registry.RepositoryData
 	var repoData *registry.RepositoryData
-	repoData, err = r.PushImageJSONIndex(indexEp, remoteName, flattenedImgList, false, nil)
+	repoData, err = r.PushImageJSONIndex(remoteName, flattenedImgList, false, nil)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -1089,11 +1108,16 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName
 	for _, ep := range repoData.Endpoints {
 	for _, ep := range repoData.Endpoints {
 		out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, len(localRepo)))
 		out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, len(localRepo)))
 		// This section can not be parallelized (each round depends on the previous one)
 		// This section can not be parallelized (each round depends on the previous one)
-		for _, round := range imgList {
+		for i, round := range imgList {
 			// FIXME: This section can be parallelized
 			// FIXME: This section can be parallelized
 			for _, elem := range round {
 			for _, elem := range round {
 				var pushTags func() error
 				var pushTags func() error
 				pushTags = func() error {
 				pushTags = func() error {
+					if i < (len(imgList) - 1) {
+						// Only tag the top layer in the repository
+						return nil
+					}
+
 					out.Write(sf.FormatStatus("", "Pushing tags for rev [%s] on {%s}", elem.ID, ep+"repositories/"+remoteName+"/tags/"+elem.Tag))
 					out.Write(sf.FormatStatus("", "Pushing tags for rev [%s] on {%s}", elem.ID, ep+"repositories/"+remoteName+"/tags/"+elem.Tag))
 					if err := r.PushRegistryTag(remoteName, elem.ID, elem.Tag, ep, repoData.Tokens); err != nil {
 					if err := r.PushRegistryTag(remoteName, elem.ID, elem.Tag, ep, repoData.Tokens); err != nil {
 						return err
 						return err
@@ -1127,7 +1151,7 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName
 		}
 		}
 	}
 	}
 
 
-	if _, err := r.PushImageJSONIndex(indexEp, remoteName, flattenedImgList, true, repoData.Endpoints); err != nil {
+	if _, err := r.PushImageJSONIndex(remoteName, flattenedImgList, true, repoData.Endpoints); err != nil {
 		return err
 		return err
 	}
 	}
 
 
@@ -1162,7 +1186,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
 	defer os.RemoveAll(layerData.Name())
 	defer os.RemoveAll(layerData.Name())
 
 
 	// Send the layer
 	// Send the layer
-	checksum, err = r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf.FormatProgress("", "Pushing", "%8v/%v (%v)"), sf, false), ep, token, jsonRaw)
+	checksum, err = r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, "", "Pushing"), ep, token, jsonRaw)
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err
 	}
 	}
@@ -1193,7 +1217,7 @@ func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFo
 
 
 	out = utils.NewWriteFlusher(out)
 	out = utils.NewWriteFlusher(out)
 	img, err := srv.runtime.graph.Get(localName)
 	img, err := srv.runtime.graph.Get(localName)
-	r, err2 := registry.NewRegistry(srv.runtime.config.Root, authConfig, srv.HTTPRequestFactory(metaHeaders))
+	r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint)
 	if err2 != nil {
 	if err2 != nil {
 		return err2
 		return err2
 	}
 	}
@@ -1203,7 +1227,7 @@ func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFo
 		out.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
 		out.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
 		// If it fails, try to get the repository
 		// If it fails, try to get the repository
 		if localRepo, exists := srv.runtime.repositories.Repositories[localName]; exists {
 		if localRepo, exists := srv.runtime.repositories.Repositories[localName]; exists {
-			if err := srv.pushRepository(r, out, localName, remoteName, localRepo, endpoint, sf); err != nil {
+			if err := srv.pushRepository(r, out, localName, remoteName, localRepo, sf); err != nil {
 				return err
 				return err
 			}
 			}
 			return nil
 			return nil
@@ -1238,11 +1262,11 @@ func (srv *Server) ImageImport(src, repo, tag string, in io.Reader, out io.Write
 		out.Write(sf.FormatStatus("", "Downloading from %s", u))
 		out.Write(sf.FormatStatus("", "Downloading from %s", u))
 		// Download with curl (pretty progress bar)
 		// Download with curl (pretty progress bar)
 		// If curl is not available, fallback to http.Get()
 		// If curl is not available, fallback to http.Get()
-		resp, err = utils.Download(u.String(), out)
+		resp, err = utils.Download(u.String())
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
-		archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), out, sf.FormatProgress("", "Importing", "%8v/%v (%v)"), sf, true)
+		archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), out, sf, true, "", "Importing")
 	}
 	}
 	img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil)
 	img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil)
 	if err != nil {
 	if err != nil {
@@ -1258,19 +1282,22 @@ func (srv *Server) ImageImport(src, repo, tag string, in io.Reader, out io.Write
 	return nil
 	return nil
 }
 }
 
 
-func (srv *Server) ContainerCreate(job *engine.Job) string {
+func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
 	var name string
 	var name string
 	if len(job.Args) == 1 {
 	if len(job.Args) == 1 {
 		name = job.Args[0]
 		name = job.Args[0]
 	} else if len(job.Args) > 1 {
 	} else if len(job.Args) > 1 {
-		return fmt.Sprintf("Usage: %s ", job.Name)
+		job.Printf("Usage: %s", job.Name)
+		return engine.StatusErr
 	}
 	}
 	var config Config
 	var config Config
 	if err := job.ExportEnv(&config); err != nil {
 	if err := job.ExportEnv(&config); err != nil {
-		return err.Error()
+		job.Error(err)
+		return engine.StatusErr
 	}
 	}
 	if config.Memory != 0 && config.Memory < 524288 {
 	if config.Memory != 0 && config.Memory < 524288 {
-		return "Minimum memory limit allowed is 512k"
+		job.Errorf("Minimum memory limit allowed is 512k")
+		return engine.StatusErr
 	}
 	}
 	if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
 	if config.Memory > 0 && !srv.runtime.capabilities.MemoryLimit {
 		config.Memory = 0
 		config.Memory = 0
@@ -1285,9 +1312,11 @@ func (srv *Server) ContainerCreate(job *engine.Job) string {
 			if tag == "" {
 			if tag == "" {
 				tag = DEFAULTTAG
 				tag = DEFAULTTAG
 			}
 			}
-			return fmt.Sprintf("No such image: %s (tag: %s)", config.Image, tag)
+			job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
+			return engine.StatusErr
 		}
 		}
-		return err.Error()
+		job.Error(err)
+		return engine.StatusErr
 	}
 	}
 	srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image))
 	srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image))
 	// FIXME: this is necessary because runtime.Create might return a nil container
 	// FIXME: this is necessary because runtime.Create might return a nil container
@@ -1299,7 +1328,7 @@ func (srv *Server) ContainerCreate(job *engine.Job) string {
 	for _, warning := range buildWarnings {
 	for _, warning := range buildWarnings {
 		job.Errorf("%s\n", warning)
 		job.Errorf("%s\n", warning)
 	}
 	}
-	return "0"
+	return engine.StatusOK
 }
 }
 
 
 func (srv *Server) ContainerRestart(name string, t int) error {
 func (srv *Server) ContainerRestart(name string, t int) error {
@@ -1408,19 +1437,15 @@ func (srv *Server) ContainerDestroy(name string, removeVolume, removeLink bool)
 
 
 var ErrImageReferenced = errors.New("Image referenced by a repository")
 var ErrImageReferenced = errors.New("Image referenced by a repository")
 
 
-func (srv *Server) deleteImageAndChildren(id string, imgs *[]APIRmi) error {
+func (srv *Server) deleteImageAndChildren(id string, imgs *[]APIRmi, byParents map[string][]*Image) error {
 	// If the image is referenced by a repo, do not delete
 	// If the image is referenced by a repo, do not delete
 	if len(srv.runtime.repositories.ByID()[id]) != 0 {
 	if len(srv.runtime.repositories.ByID()[id]) != 0 {
 		return ErrImageReferenced
 		return ErrImageReferenced
 	}
 	}
 	// If the image is not referenced but has children, go recursive
 	// If the image is not referenced but has children, go recursive
 	referenced := false
 	referenced := false
-	byParents, err := srv.runtime.graph.ByParent()
-	if err != nil {
-		return err
-	}
 	for _, img := range byParents[id] {
 	for _, img := range byParents[id] {
-		if err := srv.deleteImageAndChildren(img.ID, imgs); err != nil {
+		if err := srv.deleteImageAndChildren(img.ID, imgs, byParents); err != nil {
 			if err != ErrImageReferenced {
 			if err != ErrImageReferenced {
 				return err
 				return err
 			}
 			}
@@ -1432,7 +1457,7 @@ func (srv *Server) deleteImageAndChildren(id string, imgs *[]APIRmi) error {
 	}
 	}
 
 
 	// If the image is not referenced and has no children, remove it
 	// If the image is not referenced and has no children, remove it
-	byParents, err = srv.runtime.graph.ByParent()
+	byParents, err := srv.runtime.graph.ByParent()
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -1457,8 +1482,12 @@ func (srv *Server) deleteImageParents(img *Image, imgs *[]APIRmi) error {
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
+		byParents, err := srv.runtime.graph.ByParent()
+		if err != nil {
+			return err
+		}
 		// Remove all children images
 		// Remove all children images
-		if err := srv.deleteImageAndChildren(img.Parent, imgs); err != nil {
+		if err := srv.deleteImageAndChildren(img.Parent, imgs, byParents); err != nil {
 			return err
 			return err
 		}
 		}
 		return srv.deleteImageParents(parent, imgs)
 		return srv.deleteImageParents(parent, imgs)
@@ -1500,7 +1529,7 @@ func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, erro
 		}
 		}
 	}
 	}
 	if len(srv.runtime.repositories.ByID()[img.ID]) == 0 {
 	if len(srv.runtime.repositories.ByID()[img.ID]) == 0 {
-		if err := srv.deleteImageAndChildren(img.ID, &imgs); err != nil {
+		if err := srv.deleteImageAndChildren(img.ID, &imgs, nil); err != nil {
 			if err != ErrImageReferenced {
 			if err != ErrImageReferenced {
 				return imgs, err
 				return imgs, err
 			}
 			}
@@ -1617,22 +1646,25 @@ func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
 	return nil
 	return nil
 }
 }
 
 
-func (srv *Server) ContainerStart(job *engine.Job) string {
+func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
 	if len(job.Args) < 1 {
 	if len(job.Args) < 1 {
-		return fmt.Sprintf("Usage: %s container_id", job.Name)
+		job.Errorf("Usage: %s container_id", job.Name)
+		return engine.StatusErr
 	}
 	}
 	name := job.Args[0]
 	name := job.Args[0]
 	runtime := srv.runtime
 	runtime := srv.runtime
 	container := runtime.Get(name)
 	container := runtime.Get(name)
 
 
 	if container == nil {
 	if container == nil {
-		return fmt.Sprintf("No such container: %s", name)
+		job.Errorf("No such container: %s", name)
+		return engine.StatusErr
 	}
 	}
 	// If no environment was set, then no hostconfig was passed.
 	// If no environment was set, then no hostconfig was passed.
 	if len(job.Environ()) > 0 {
 	if len(job.Environ()) > 0 {
 		var hostConfig HostConfig
 		var hostConfig HostConfig
 		if err := job.ExportEnv(&hostConfig); err != nil {
 		if err := job.ExportEnv(&hostConfig); err != nil {
-			return err.Error()
+			job.Error(err)
+			return engine.StatusErr
 		}
 		}
 		// Validate the HostConfig binds. Make sure that:
 		// Validate the HostConfig binds. Make sure that:
 		// 1) the source of a bind mount isn't /
 		// 1) the source of a bind mount isn't /
@@ -1645,29 +1677,33 @@ func (srv *Server) ContainerStart(job *engine.Job) string {
 
 
 			// refuse to bind mount "/" to the container
 			// refuse to bind mount "/" to the container
 			if source == "/" {
 			if source == "/" {
-				return fmt.Sprintf("Invalid bind mount '%s' : source can't be '/'", bind)
+				job.Errorf("Invalid bind mount '%s' : source can't be '/'", bind)
+				return engine.StatusErr
 			}
 			}
 
 
 			// ensure the source exists on the host
 			// ensure the source exists on the host
 			_, err := os.Stat(source)
 			_, err := os.Stat(source)
 			if err != nil && os.IsNotExist(err) {
 			if err != nil && os.IsNotExist(err) {
-				return fmt.Sprintf("Invalid bind mount '%s' : source doesn't exist", bind)
+				job.Errorf("Invalid bind mount '%s' : source doesn't exist", bind)
+				return engine.StatusErr
 			}
 			}
 		}
 		}
 		// Register any links from the host config before starting the container
 		// Register any links from the host config before starting the container
 		// FIXME: we could just pass the container here, no need to lookup by name again.
 		// FIXME: we could just pass the container here, no need to lookup by name again.
 		if err := srv.RegisterLinks(name, &hostConfig); err != nil {
 		if err := srv.RegisterLinks(name, &hostConfig); err != nil {
-			return err.Error()
+			job.Error(err)
+			return engine.StatusErr
 		}
 		}
 		container.hostConfig = &hostConfig
 		container.hostConfig = &hostConfig
 		container.ToDisk()
 		container.ToDisk()
 	}
 	}
 	if err := container.Start(); err != nil {
 	if err := container.Start(); err != nil {
-		return fmt.Sprintf("Cannot start container %s: %s", name, err)
+		job.Errorf("Cannot start container %s: %s", name, err)
+		return engine.StatusErr
 	}
 	}
 	srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image))
 	srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image))
 
 
-	return "0"
+	return engine.StatusOK
 }
 }
 
 
 func (srv *Server) ContainerStop(name string, t int) error {
 func (srv *Server) ContainerStop(name string, t int) error {
@@ -1830,7 +1866,6 @@ func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) {
 		pushingPool: make(map[string]chan struct{}),
 		pushingPool: make(map[string]chan struct{}),
 		events:      make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events
 		events:      make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events
 		listeners:   make(map[string]chan utils.JSONMessage),
 		listeners:   make(map[string]chan utils.JSONMessage),
-		reqFactory:  nil,
 	}
 	}
 	runtime.srv = srv
 	runtime.srv = srv
 	return srv, nil
 	return srv, nil
@@ -1839,15 +1874,12 @@ func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) {
 func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory {
 func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory {
 	srv.Lock()
 	srv.Lock()
 	defer srv.Unlock()
 	defer srv.Unlock()
-	if srv.reqFactory == nil {
-		ud := utils.NewHTTPUserAgentDecorator(srv.versionInfos()...)
-		md := &utils.HTTPMetaHeadersDecorator{
-			Headers: metaHeaders,
-		}
-		factory := utils.NewHTTPRequestFactory(ud, md)
-		srv.reqFactory = factory
+	ud := utils.NewHTTPUserAgentDecorator(srv.versionInfos()...)
+	md := &utils.HTTPMetaHeadersDecorator{
+		Headers: metaHeaders,
 	}
 	}
-	return srv.reqFactory
+	factory := utils.NewHTTPRequestFactory(ud, md)
+	return factory
 }
 }
 
 
 func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage {
 func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage {
@@ -1882,6 +1914,5 @@ type Server struct {
 	pushingPool map[string]chan struct{}
 	pushingPool map[string]chan struct{}
 	events      []utils.JSONMessage
 	events      []utils.JSONMessage
 	listeners   map[string]chan utils.JSONMessage
 	listeners   map[string]chan utils.JSONMessage
-	reqFactory  *utils.HTTPRequestFactory
 	Eng         *engine.Engine
 	Eng         *engine.Engine
 }
 }

Vissa filer visades inte eftersom för många filer har ändrats