Browse Source

Merge branch 'master' into list_container_processes-feature

Victor Vieux 12 năm trước cách đây
mục cha
commit
9232d1ef62
70 tập tin đã thay đổi với 2222 bổ sung901 xóa
  1. 1 0
      .gitignore
  2. 17 0
      CHANGELOG.md
  3. 30 0
      Dockerfile
  4. 9 9
      README.md
  5. 10 10
      api.go
  6. 71 95
      api_test.go
  7. 15 12
      archive.go
  8. 4 4
      archive_test.go
  9. 4 8
      auth/auth.go
  10. 1 1
      auth/auth_test.go
  11. 13 2
      builder.go
  12. 73 13
      buildfile.go
  13. 66 7
      buildfile_test.go
  14. 52 36
      commands.go
  15. 5 4
      commands_test.go
  16. 29 12
      container.go
  17. 34 6
      container_test.go
  18. 3 0
      contrib/mkimage-unittest.sh
  19. 1 1
      docker/docker.go
  20. 3 0
      docs/sources/api/docker_remote_api.rst
  21. 1 1
      docs/sources/api/docker_remote_api_v1.3.rst
  22. 1 0
      docs/sources/commandline/command/run.rst
  23. BIN
      docs/sources/concepts/images/dockerlogo-h.png
  24. BIN
      docs/sources/concepts/images/dockerlogo-v.png
  25. 4 4
      docs/sources/concepts/index.rst
  26. 190 0
      docs/sources/concepts/manifesto.rst
  27. 29 30
      docs/sources/contributing/devenvironment.rst
  28. 27 116
      docs/sources/index.rst
  29. 9 4
      docs/sources/installation/index.rst
  30. 40 0
      docs/sources/terms/container.rst
  31. 38 0
      docs/sources/terms/filesystem.rst
  32. 0 97
      docs/sources/terms/fundamentals.rst
  33. 38 0
      docs/sources/terms/image.rst
  34. BIN
      docs/sources/terms/images/docker-filesystems-busyboxrw.png
  35. BIN
      docs/sources/terms/images/docker-filesystems-debian.png
  36. BIN
      docs/sources/terms/images/docker-filesystems-debianrw.png
  37. BIN
      docs/sources/terms/images/docker-filesystems-generic.png
  38. BIN
      docs/sources/terms/images/docker-filesystems-multilayer.png
  39. BIN
      docs/sources/terms/images/docker-filesystems-multiroot.png
  40. 37 23
      docs/sources/terms/images/docker-filesystems.svg
  41. 8 4
      docs/sources/terms/index.rst
  42. 40 0
      docs/sources/terms/layer.rst
  43. 3 2
      docs/sources/toctree.rst
  44. 14 0
      docs/sources/use/builder.rst
  45. 7 7
      docs/sources/use/workingwithrepository.rst
  46. 1 2
      graph.go
  47. 119 0
      hack/RELEASE.md
  48. 4 1
      hack/dockerbuilder/Dockerfile
  49. 3 0
      image.go
  50. 135 96
      network.go
  51. 257 0
      network_proxy.go
  52. 221 0
      network_proxy_test.go
  53. 75 6
      network_test.go
  54. 1 1
      packaging/ubuntu/lxc-docker.prerm
  55. 113 61
      registry/registry.go
  56. 0 3
      runtime.go
  57. 133 65
      runtime_test.go
  58. 107 79
      server.go
  59. 11 6
      server_test.go
  60. 2 13
      state.go
  61. 0 3
      sysinit.go
  62. 7 10
      tags.go
  63. 2 2
      tags_test.go
  64. 1 1
      term/term.go
  65. 10 0
      testing/README.rst
  66. 7 3
      testing/Vagrantfile
  67. 30 16
      testing/buildbot/master.cfg
  68. 12 2
      testing/buildbot/setup.sh
  69. 13 5
      utils.go
  70. 31 18
      utils/utils.go

+ 1 - 0
.gitignore

@@ -15,3 +15,4 @@ docs/_build
 docs/_static
 docs/_templates
 .gopath/
+.dotcloud

+ 17 - 0
CHANGELOG.md

@@ -1,5 +1,22 @@
 # Changelog
 
+## 0.4.8 (2013-07-01)
+ + Builder: New build operation ENTRYPOINT adds an executable entry point to the container.
+ - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID.
+ - Tests: Fix issues in the test suite
+
+## 0.4.7 (2013-06-28)
+ * Registry: easier push/pull to a custom registry
+ * Remote API: the progress bar updates faster when downloading and uploading large files
+ - Remote API: fix a bug in the optional unix socket transport
+ * Runtime: improve detection of kernel version
+ + Runtime: host directories can be mounted as volumes with 'docker run -b'
+ - Runtime: fix an issue when only attaching to stdin
+ * Runtime: use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts
+ * Hack: improve test suite and dev environment
+ * Hack: remove dependency on unit tests on 'os/user'
+ + Documentation: add terminology section
+
 ## 0.4.6 (2013-06-22)
  - Runtime: fix a bug which caused creation of empty images (and volumes) to crash.
 

+ 30 - 0
Dockerfile

@@ -0,0 +1,30 @@
+# This file describes the standard way to build Docker, using docker
+docker-version 0.4.2
+from	ubuntu:12.04
+maintainer	Solomon Hykes <solomon@dotcloud.com>
+# Build dependencies
+run	apt-get install -y -q curl
+run	apt-get install -y -q git
+# Install Go
+run	curl -s https://go.googlecode.com/files/go1.1.1.linux-amd64.tar.gz | tar -v -C /usr/local -xz
+env	PATH	/usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
+env	GOPATH	/go
+env	CGO_ENABLED 0
+run	cd /tmp && echo 'package main' > t.go && go test -a -i -v
+# Download dependencies
+run	PKG=github.com/kr/pty REV=27435c699;		 git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
+run	PKG=github.com/gorilla/context/ REV=708054d61e5; git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
+run	PKG=github.com/gorilla/mux/ REV=9b36453141c;	 git clone http://$PKG /go/src/$PKG && cd /go/src/$PKG && git checkout -f $REV
+# Run dependencies
+run	apt-get install -y iptables
+# lxc requires updating ubuntu sources
+run	echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
+run	apt-get update
+run	apt-get install -y lxc
+run	apt-get install -y aufs-tools
+# Upload docker source
+add	.       /go/src/github.com/dotcloud/docker
+# Build the binary
+run	cd /go/src/github.com/dotcloud/docker/docker && go install -ldflags "-X main.GITCOMMIT '??' -d -w"
+env	PATH	/usr/local/go/bin:/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
+cmd	["docker"]

+ 9 - 9
README.md

@@ -23,15 +23,15 @@ happens, for a few reasons:
 
   * *Size*: VMs are very large which makes them impractical to store and transfer.
   * *Performance*: running VMs consumes significant CPU and memory, which makes them impractical in many scenarios, for example local development of multi-tier applications, and
-  	large-scale deployment of cpu and memory-intensive applications on large numbers of machines.
+    large-scale deployment of cpu and memory-intensive applications on large numbers of machines.
   * *Portability*: competing VM environments don't play well with each other. Although conversion tools do exist, they are limited and add even more overhead.
   * *Hardware-centric*: VMs were designed with machine operators in mind, not software developers. As a result, they offer very limited tooling for what developers need most:
-  	building, testing and running their software. For example, VMs offer no facilities for application versioning, monitoring, configuration, logging or service discovery.
+    building, testing and running their software. For example, VMs offer no facilities for application versioning, monitoring, configuration, logging or service discovery.
 
 By contrast, Docker relies on a different sandboxing method known as *containerization*. Unlike traditional virtualization,
 containerization takes place at the kernel level. Most modern operating system kernels now support the primitives necessary
 for containerization, including Linux with [openvz](http://openvz.org), [vserver](http://linux-vserver.org) and more recently [lxc](http://lxc.sourceforge.net),
-	Solaris with [zones](http://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc) and FreeBSD with [Jails](http://www.freebsd.org/doc/handbook/jails.html).
+    Solaris with [zones](http://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc) and FreeBSD with [Jails](http://www.freebsd.org/doc/handbook/jails.html).
 
 Docker builds on top of these low-level primitives to offer developers a portable format and runtime environment that solves
 all 4 problems. Docker containers are small (and their transfer can be optimized with layers), they have basically zero memory and cpu overhead,
@@ -56,17 +56,17 @@ A common problem for developers is the difficulty of managing all their applicat
 This is usually difficult for several reasons:
 
   * *Cross-platform dependencies*. Modern applications often depend on a combination of system libraries and binaries, language-specific packages, framework-specific modules,
-  	internal components developed for another project, etc. These dependencies live in different "worlds" and require different tools - these tools typically don't work
-	well with each other, requiring awkward custom integrations.
+    internal components developed for another project, etc. These dependencies live in different "worlds" and require different tools - these tools typically don't work
+    well with each other, requiring awkward custom integrations.
 
   * Conflicting dependencies. Different applications may depend on different versions of the same dependency. Packaging tools handle these situations with various degrees of ease -
-  	but they all handle them in different and incompatible ways, which again forces the developer to do extra work.
+    but they all handle them in different and incompatible ways, which again forces the developer to do extra work.
   
-  * Custom dependencies. A developer may need to prepare a custom version of his application's dependency. Some packaging systems can handle custom versions of a dependency,
-  	others can't - and all of them handle it differently.
+  * Custom dependencies. A developer may need to prepare a custom version of their application's dependency. Some packaging systems can handle custom versions of a dependency,
+    others can't - and all of them handle it differently.
 
 
-Docker solves dependency hell by giving the developer a simple way to express *all* his application's dependencies in one place,
+Docker solves dependency hell by giving the developer a simple way to express *all* their application's dependencies in one place,
 and streamline the process of assembling them. If this makes you think of [XKCD 927](http://xkcd.com/927/), don't worry. Docker doesn't
 *replace* your favorite packaging systems. It simply orchestrates their use in a simple and repeatable way. How does it do that? With layers.
 

+ 10 - 10
api.go

@@ -170,7 +170,7 @@ func getContainersExport(srv *Server, version float64, w http.ResponseWriter, r
 	name := vars["name"]
 
 	if err := srv.ContainerExport(name, w); err != nil {
-		utils.Debugf("%s", err.Error())
+		utils.Debugf("%s", err)
 		return err
 	}
 	return nil
@@ -323,7 +323,7 @@ func postCommit(srv *Server, version float64, w http.ResponseWriter, r *http.Req
 	}
 	config := &Config{}
 	if err := json.NewDecoder(r.Body).Decode(config); err != nil {
-		utils.Debugf("%s", err.Error())
+		utils.Debugf("%s", err)
 	}
 	repo := r.Form.Get("repo")
 	tag := r.Form.Get("tag")
@@ -359,8 +359,7 @@ func postImagesCreate(srv *Server, version float64, w http.ResponseWriter, r *ht
 	}
 	sf := utils.NewStreamFormatter(version > 1.0)
 	if image != "" { //pull
-		registry := r.Form.Get("registry")
-		if err := srv.ImagePull(image, tag, registry, w, sf, &auth.AuthConfig{}); err != nil {
+		if err := srv.ImagePull(image, tag, w, sf, &auth.AuthConfig{}); err != nil {
 			if sf.Used() {
 				w.Write(sf.FormatError(err))
 				return nil
@@ -443,7 +442,6 @@ func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http
 	if err := parseForm(r); err != nil {
 		return err
 	}
-	registry := r.Form.Get("registry")
 
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
@@ -453,7 +451,7 @@ func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http
 		w.Header().Set("Content-Type", "application/json")
 	}
 	sf := utils.NewStreamFormatter(version > 1.0)
-	if err := srv.ImagePush(name, registry, w, sf, authConfig); err != nil {
+	if err := srv.ImagePush(name, w, sf, authConfig); err != nil {
 		if sf.Used() {
 			w.Write(sf.FormatError(err))
 			return nil
@@ -552,7 +550,7 @@ func deleteImages(srv *Server, version float64, w http.ResponseWriter, r *http.R
 		return err
 	}
 	if imgs != nil {
-		if len(*imgs) != 0 {
+		if len(imgs) != 0 {
 			b, err := json.Marshal(imgs)
 			if err != nil {
 				return err
@@ -572,8 +570,10 @@ func postContainersStart(srv *Server, version float64, w http.ResponseWriter, r
 
 	// allow a nil body for backwards compatibility
 	if r.Body != nil {
-		if err := json.NewDecoder(r.Body).Decode(hostConfig); err != nil {
-			return err
+		if r.Header.Get("Content-Type") == "application/json" {
+			if err := json.NewDecoder(r.Body).Decode(hostConfig); err != nil {
+				return err
+			}
 		}
 	}
 
@@ -896,7 +896,7 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) {
 			localMethod := method
 			localFct := fct
 			f := func(w http.ResponseWriter, r *http.Request) {
-				utils.Debugf("Calling %s %s", localMethod, localRoute)
+				utils.Debugf("Calling %s %s from %s", localMethod, localRoute, r.RemoteAddr)
 
 				if logging {
 					log.Println(r.Method, r.RequestURI)

+ 71 - 95
api_test.go

@@ -5,7 +5,6 @@ import (
 	"bufio"
 	"bytes"
 	"encoding/json"
-	"github.com/dotcloud/docker/auth"
 	"github.com/dotcloud/docker/utils"
 	"io"
 	"net"
@@ -41,44 +40,6 @@ func TestGetBoolParam(t *testing.T) {
 	}
 }
 
-func TestPostAuth(t *testing.T) {
-	runtime, err := newTestRuntime()
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer nuke(runtime)
-
-	srv := &Server{
-		runtime: runtime,
-	}
-
-	r := httptest.NewRecorder()
-
-	authConfig := &auth.AuthConfig{
-		Username: "utest",
-		Password: "utest",
-		Email:    "utest@yopmail.com",
-	}
-
-	authConfigJSON, err := json.Marshal(authConfig)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	req, err := http.NewRequest("POST", "/auth", bytes.NewReader(authConfigJSON))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if err := postAuth(srv, APIVERSION, r, req, nil); err != nil {
-		t.Fatal(err)
-	}
-
-	if r.Code != http.StatusOK && r.Code != 0 {
-		t.Fatalf("%d OK or 0 expected, received %d\n", http.StatusOK, r.Code)
-	}
-}
-
 func TestGetVersion(t *testing.T) {
 	runtime, err := newTestRuntime()
 	if err != nil {
@@ -99,7 +60,7 @@ func TestGetVersion(t *testing.T) {
 		t.Fatal(err)
 	}
 	if v.Version != VERSION {
-		t.Errorf("Excepted version %s, %s found", VERSION, v.Version)
+		t.Errorf("Expected version %s, %s found", VERSION, v.Version)
 	}
 }
 
@@ -112,6 +73,11 @@ func TestGetInfo(t *testing.T) {
 
 	srv := &Server{runtime: runtime}
 
+	initialImages, err := srv.runtime.graph.All()
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	r := httptest.NewRecorder()
 
 	if err := getInfo(srv, APIVERSION, r, nil, nil); err != nil {
@@ -123,8 +89,8 @@ func TestGetInfo(t *testing.T) {
 	if err != nil {
 		t.Fatal(err)
 	}
-	if infos.Images != 1 {
-		t.Errorf("Excepted images: %d, %d found", 1, infos.Images)
+	if infos.Images != len(initialImages) {
+		t.Errorf("Expected images: %d, %d found", len(initialImages), infos.Images)
 	}
 }
 
@@ -138,6 +104,12 @@ func TestGetImagesJSON(t *testing.T) {
 	srv := &Server{runtime: runtime}
 
 	// all=0
+
+	initialImages, err := srv.Images(false, "")
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	req, err := http.NewRequest("GET", "/images/json?all=0", nil)
 	if err != nil {
 		t.Fatal(err)
@@ -154,17 +126,30 @@ func TestGetImagesJSON(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	if len(images) != 1 {
-		t.Errorf("Excepted 1 image, %d found", len(images))
+	if len(images) != len(initialImages) {
+		t.Errorf("Expected %d image, %d found", len(initialImages), len(images))
 	}
 
-	if images[0].Repository != unitTestImageName {
-		t.Errorf("Excepted image %s, %s found", unitTestImageName, images[0].Repository)
+	found := false
+	for _, img := range images {
+		if img.Repository == unitTestImageName {
+			found = true
+			break
+		}
+	}
+	if !found {
+		t.Errorf("Expected image %s, %+v found", unitTestImageName, images)
 	}
 
 	r2 := httptest.NewRecorder()
 
 	// all=1
+
+	initialImages, err = srv.Images(true, "")
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	req2, err := http.NewRequest("GET", "/images/json?all=true", nil)
 	if err != nil {
 		t.Fatal(err)
@@ -179,18 +164,25 @@ func TestGetImagesJSON(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	if len(images2) != 1 {
-		t.Errorf("Excepted 1 image, %d found", len(images2))
+	if len(images2) != len(initialImages) {
+		t.Errorf("Expected %d image, %d found", len(initialImages), len(images2))
 	}
 
-	if images2[0].ID != GetTestImage(runtime).ID {
-		t.Errorf("Retrieved image Id differs, expected %s, received %s", GetTestImage(runtime).ID, images2[0].ID)
+	found = false
+	for _, img := range images2 {
+		if img.ID == GetTestImage(runtime).ID {
+			found = true
+			break
+		}
+	}
+	if !found {
+		t.Errorf("Retrieved image Id differs, expected %s, received %+v", GetTestImage(runtime).ID, images2)
 	}
 
 	r3 := httptest.NewRecorder()
 
 	// filter=a
-	req3, err := http.NewRequest("GET", "/images/json?filter=a", nil)
+	req3, err := http.NewRequest("GET", "/images/json?filter=aaaaaaaaaa", nil)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -205,7 +197,7 @@ func TestGetImagesJSON(t *testing.T) {
 	}
 
 	if len(images3) != 0 {
-		t.Errorf("Excepted 1 image, %d found", len(images3))
+		t.Errorf("Expected 0 image, %d found", len(images3))
 	}
 
 	r4 := httptest.NewRecorder()
@@ -251,38 +243,7 @@ func TestGetImagesViz(t *testing.T) {
 		t.Fatal(err)
 	}
 	if line != "digraph docker {\n" {
-		t.Errorf("Excepted digraph docker {\n, %s found", line)
-	}
-}
-
-func TestGetImagesSearch(t *testing.T) {
-	runtime, err := newTestRuntime()
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer nuke(runtime)
-
-	srv := &Server{
-		runtime: runtime,
-	}
-
-	r := httptest.NewRecorder()
-
-	req, err := http.NewRequest("GET", "/images/search?term=redis", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if err := getImagesSearch(srv, APIVERSION, r, req, nil); err != nil {
-		t.Fatal(err)
-	}
-
-	results := []APISearch{}
-	if err := json.Unmarshal(r.Body.Bytes(), &results); err != nil {
-		t.Fatal(err)
-	}
-	if len(results) < 2 {
-		t.Errorf("Excepted at least 2 lines, %d found", len(results))
+		t.Errorf("Expected digraph docker {\n, %s found", line)
 	}
 }
 
@@ -306,7 +267,7 @@ func TestGetImagesHistory(t *testing.T) {
 		t.Fatal(err)
 	}
 	if len(history) != 1 {
-		t.Errorf("Excepted 1 line, %d found", len(history))
+		t.Errorf("Expected 1 line, %d found", len(history))
 	}
 }
 
@@ -328,7 +289,7 @@ func TestGetImagesByName(t *testing.T) {
 	if err := json.Unmarshal(r.Body.Bytes(), img); err != nil {
 		t.Fatal(err)
 	}
-	if img.ID != GetTestImage(runtime).ID || img.Comment != "Imported from http://get.docker.io/images/busybox" {
+	if img.ID != unitTestImageID {
 		t.Errorf("Error inspecting image")
 	}
 }
@@ -365,7 +326,7 @@ func TestGetContainersJSON(t *testing.T) {
 		t.Fatal(err)
 	}
 	if len(containers) != 1 {
-		t.Fatalf("Excepted %d container, %d found", 1, len(containers))
+		t.Fatalf("Expected %d container, %d found", 1, len(containers))
 	}
 	if containers[0].ID != container.ID {
 		t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", container.ID, containers[0].ID)
@@ -1365,6 +1326,11 @@ func TestDeleteImages(t *testing.T) {
 
 	srv := &Server{runtime: runtime}
 
+	initialImages, err := srv.Images(false, "")
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	if err := srv.runtime.repositories.Set("test", "test", unitTestImageName, true); err != nil {
 		t.Fatal(err)
 	}
@@ -1374,25 +1340,35 @@ func TestDeleteImages(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	if len(images) != 2 {
-		t.Errorf("Excepted 2 images, %d found", len(images))
+	if len(images) != len(initialImages)+1 {
+		t.Errorf("Expected %d images, %d found", len(initialImages)+1, len(images))
 	}
 
-	req, err := http.NewRequest("DELETE", "/images/test:test", nil)
+	req, err := http.NewRequest("DELETE", "/images/"+unitTestImageID, nil)
 	if err != nil {
 		t.Fatal(err)
 	}
 
 	r := httptest.NewRecorder()
-	if err := deleteImages(srv, APIVERSION, r, req, map[string]string{"name": "test:test"}); err != nil {
+	if err := deleteImages(srv, APIVERSION, r, req, map[string]string{"name": unitTestImageID}); err == nil {
+		t.Fatalf("Expected conflict error, got none")
+	}
+
+	req2, err := http.NewRequest("DELETE", "/images/test:test", nil)
+	if err != nil {
 		t.Fatal(err)
 	}
-	if r.Code != http.StatusOK {
+
+	r2 := httptest.NewRecorder()
+	if err := deleteImages(srv, APIVERSION, r2, req2, map[string]string{"name": "test:test"}); err != nil {
+		t.Fatal(err)
+	}
+	if r2.Code != http.StatusOK {
 		t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code)
 	}
 
 	var outs []APIRmi
-	if err := json.Unmarshal(r.Body.Bytes(), &outs); err != nil {
+	if err := json.Unmarshal(r2.Body.Bytes(), &outs); err != nil {
 		t.Fatal(err)
 	}
 	if len(outs) != 1 {
@@ -1403,8 +1379,8 @@ func TestDeleteImages(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	if len(images) != 1 {
-		t.Errorf("Excepted 1 image, %d found", len(images))
+	if len(images) != len(initialImages) {
+		t.Errorf("Expected %d image, %d found", len(initialImages), len(images))
 	}
 
 	/*	if c := runtime.Get(container.Id); c != nil {

+ 15 - 12
archive.go

@@ -2,9 +2,7 @@ package docker
 
 import (
 	"archive/tar"
-	"bufio"
 	"bytes"
-	"errors"
 	"fmt"
 	"github.com/dotcloud/docker/utils"
 	"io"
@@ -27,10 +25,6 @@ const (
 )
 
 func DetectCompression(source []byte) Compression {
-	for _, c := range source[:10] {
-		utils.Debugf("%x", c)
-	}
-
 	sourceLen := len(source)
 	for compression, m := range map[Compression][]byte{
 		Bzip2: {0x42, 0x5A, 0x68},
@@ -111,17 +105,26 @@ func Untar(archive io.Reader, path string) error {
 	if archive == nil {
 		return fmt.Errorf("Empty archive")
 	}
-	bufferedArchive := bufio.NewReaderSize(archive, 10)
-	buf, err := bufferedArchive.Peek(10)
-	if err != nil {
-		return err
+
+	buf := make([]byte, 10)
+	totalN := 0
+	for totalN < 10 {
+		if n, err := archive.Read(buf[totalN:]); err != nil {
+			if err == io.EOF {
+				return fmt.Errorf("Tarball too short")
+			}
+			return err
+		} else {
+			totalN += n
+			utils.Debugf("[tar autodetect] n: %d", n)
+		}
 	}
 	compression := DetectCompression(buf)
 
 	utils.Debugf("Archive compression detected: %s", compression.Extension())
 
 	cmd := exec.Command("tar", "--numeric-owner", "-f", "-", "-C", path, "-x"+compression.Flag())
-	cmd.Stdin = bufferedArchive
+	cmd.Stdin = io.MultiReader(bytes.NewReader(buf), archive)
 	// Hardcode locale environment for predictable outcome regardless of host configuration.
 	//   (see https://github.com/dotcloud/docker/issues/355)
 	cmd.Env = []string{"LANG=en_US.utf-8", "LC_ALL=en_US.utf-8"}
@@ -251,7 +254,7 @@ func CmdStream(cmd *exec.Cmd) (io.Reader, error) {
 		}
 		errText := <-errChan
 		if err := cmd.Wait(); err != nil {
-			pipeW.CloseWithError(errors.New(err.Error() + ": " + string(errText)))
+			pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
 		} else {
 			pipeW.Close()
 		}

+ 4 - 4
archive_test.go

@@ -16,7 +16,7 @@ func TestCmdStreamLargeStderr(t *testing.T) {
 	cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello")
 	out, err := CmdStream(cmd)
 	if err != nil {
-		t.Fatalf("Failed to start command: " + err.Error())
+		t.Fatalf("Failed to start command: %s", err)
 	}
 	errCh := make(chan error)
 	go func() {
@@ -26,7 +26,7 @@ func TestCmdStreamLargeStderr(t *testing.T) {
 	select {
 	case err := <-errCh:
 		if err != nil {
-			t.Fatalf("Command should not have failed (err=%s...)", err.Error()[:100])
+			t.Fatalf("Command should not have failed (err=%.100s...)", err)
 		}
 	case <-time.After(5 * time.Second):
 		t.Fatalf("Command did not complete in 5 seconds; probable deadlock")
@@ -37,12 +37,12 @@ func TestCmdStreamBad(t *testing.T) {
 	badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1")
 	out, err := CmdStream(badCmd)
 	if err != nil {
-		t.Fatalf("Failed to start command: " + err.Error())
+		t.Fatalf("Failed to start command: %s", err)
 	}
 	if output, err := ioutil.ReadAll(out); err == nil {
 		t.Fatalf("Command should have failed")
 	} else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" {
-		t.Fatalf("Wrong error value (%s)", err.Error())
+		t.Fatalf("Wrong error value (%s)", err)
 	} else if s := string(output); s != "hello\n" {
 		t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
 	}

+ 4 - 8
auth/auth.go

@@ -15,8 +15,8 @@ import (
 // Where we store the config file
 const CONFIGFILE = ".dockercfg"
 
-// the registry server we want to login against
-const INDEXSERVER = "https://index.docker.io/v1"
+// Only used for user auth + account creation
+const INDEXSERVER = "https://index.docker.io/v1/"
 
 //const INDEXSERVER = "http://indexstaging-docker.dotcloud.com/"
 
@@ -41,9 +41,6 @@ func NewAuthConfig(username, password, email, rootPath string) *AuthConfig {
 }
 
 func IndexServerAddress() string {
-	if os.Getenv("DOCKER_INDEX_URL") != "" {
-		return os.Getenv("DOCKER_INDEX_URL") + "/v1"
-	}
 	return INDEXSERVER
 }
 
@@ -74,7 +71,6 @@ func decodeAuth(authStr string) (*AuthConfig, error) {
 	}
 	password := strings.Trim(arr[1], "\x00")
 	return &AuthConfig{Username: arr[0], Password: password}, nil
-
 }
 
 // load up the auth config information and return values
@@ -133,7 +129,7 @@ func Login(authConfig *AuthConfig, store bool) (string, error) {
 
 	// using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status.
 	b := strings.NewReader(string(jsonBody))
-	req1, err := http.Post(IndexServerAddress()+"/users/", "application/json; charset=utf-8", b)
+	req1, err := http.Post(IndexServerAddress()+"users/", "application/json; charset=utf-8", b)
 	if err != nil {
 		return "", fmt.Errorf("Server Error: %s", err)
 	}
@@ -153,7 +149,7 @@ func Login(authConfig *AuthConfig, store bool) (string, error) {
 			"Please check your e-mail for a confirmation link.")
 	} else if reqStatusCode == 400 {
 		if string(reqBody) == "\"Username or email already exists\"" {
-			req, err := http.NewRequest("GET", IndexServerAddress()+"/users/", nil)
+			req, err := http.NewRequest("GET", IndexServerAddress()+"users/", nil)
 			req.SetBasicAuth(authConfig.Username, authConfig.Password)
 			resp, err := client.Do(req)
 			if err != nil {

+ 1 - 1
auth/auth_test.go

@@ -68,6 +68,6 @@ func TestCreateAccount(t *testing.T) {
 	expectedError := "Login: Account is not Active"
 
 	if !strings.Contains(err.Error(), expectedError) {
-		t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err.Error())
+		t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err)
 	}
 }

+ 13 - 2
builder.go

@@ -50,12 +50,23 @@ func (builder *Builder) Create(config *Config) (*Container, error) {
 		config.Hostname = id[:12]
 	}
 
+	var args []string
+	var entrypoint string
+
+	if len(config.Entrypoint) != 0 {
+		entrypoint = config.Entrypoint[0]
+		args = append(config.Entrypoint[1:], config.Cmd...)
+	} else {
+		entrypoint = config.Cmd[0]
+		args = config.Cmd[1:]
+	}
+
 	container := &Container{
 		// FIXME: we should generate the ID here instead of receiving it as an argument
 		ID:              id,
 		Created:         time.Now(),
-		Path:            config.Cmd[0],
-		Args:            config.Cmd[1:], //FIXME: de-duplicate from config
+		Path:            entrypoint,
+		Args:            args, //FIXME: de-duplicate from config
 		Config:          config,
 		Image:           img.ID, // Always use the resolved image id
 		NetworkSettings: &NetworkSettings{},

+ 73 - 13
buildfile.go

@@ -29,6 +29,7 @@ type buildFile struct {
 	config     *Config
 	context    string
 
+	lastContainer *Container
 	tmpContainers map[string]struct{}
 	tmpImages     map[string]struct{}
 
@@ -51,20 +52,10 @@ func (b *buildFile) CmdFrom(name string) error {
 	image, err := b.runtime.repositories.LookupImage(name)
 	if err != nil {
 		if b.runtime.graph.IsNotExist(err) {
-
-			var tag, remote string
-			if strings.Contains(name, ":") {
-				remoteParts := strings.Split(name, ":")
-				tag = remoteParts[1]
-				remote = remoteParts[0]
-			} else {
-				remote = name
-			}
-
-			if err := b.srv.ImagePull(remote, tag, "", b.out, utils.NewStreamFormatter(false), nil); err != nil {
+			remote, tag := utils.ParseRepositoryTag(name)
+			if err := b.srv.ImagePull(remote, tag, b.out, utils.NewStreamFormatter(false), nil); err != nil {
 				return err
 			}
-
 			image, err = b.runtime.repositories.LookupImage(name)
 			if err != nil {
 				return err
@@ -141,7 +132,7 @@ func (b *buildFile) CmdEnv(args string) error {
 func (b *buildFile) CmdCmd(args string) error {
 	var cmd []string
 	if err := json.Unmarshal([]byte(args), &cmd); err != nil {
-		utils.Debugf("Error unmarshalling: %s, using /bin/sh -c", err)
+		utils.Debugf("Error unmarshalling: %s, setting cmd to /bin/sh -c", err)
 		cmd = []string{"/bin/sh", "-c", args}
 	}
 	if err := b.commit("", cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
@@ -165,6 +156,44 @@ func (b *buildFile) CmdCopy(args string) error {
 	return fmt.Errorf("COPY has been deprecated. Please use ADD instead")
 }
 
+func (b *buildFile) CmdEntrypoint(args string) error {
+	if args == "" {
+		return fmt.Errorf("Entrypoint cannot be empty")
+	}
+
+	var entrypoint []string
+	if err := json.Unmarshal([]byte(args), &entrypoint); err != nil {
+		b.config.Entrypoint = []string{"/bin/sh", "-c", args}
+	} else {
+		b.config.Entrypoint = entrypoint
+	}
+	if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %s", args)); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (b *buildFile) CmdVolume(args string) error {
+	if args == "" {
+		return fmt.Errorf("Volume cannot be empty")
+	}
+
+	var volume []string
+	if err := json.Unmarshal([]byte(args), &volume); err != nil {
+		volume = []string{args}
+	}
+	if b.config.Volumes == nil {
+		b.config.Volumes = NewPathOpts()
+	}
+	for _, v := range volume {
+		b.config.Volumes[v] = struct{}{}
+	}
+	if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
+		return err
+	}
+	return nil
+}
+
 func (b *buildFile) addRemote(container *Container, orig, dest string) error {
 	file, err := utils.Download(orig, ioutil.Discard)
 	if err != nil {
@@ -225,6 +254,7 @@ func (b *buildFile) CmdAdd(args string) error {
 		return err
 	}
 	b.tmpContainers[container.ID] = struct{}{}
+	b.lastContainer = container
 
 	if err := container.EnsureMounted(); err != nil {
 		return err
@@ -260,8 +290,13 @@ func (b *buildFile) run() (string, error) {
 		return "", err
 	}
 	b.tmpContainers[c.ID] = struct{}{}
+	b.lastContainer = c
 	fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(c.ID))
 
+	// override the entry point that may have been picked up from the base image
+	c.Path = b.config.Cmd[0]
+	c.Args = b.config.Cmd[1:]
+
 	//start the container
 	hostConfig := &HostConfig{}
 	if err := c.Start(hostConfig); err != nil {
@@ -302,6 +337,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 			return err
 		}
 		b.tmpContainers[container.ID] = struct{}{}
+		b.lastContainer = container
 		fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(container.ID))
 		id = container.ID
 		if err := container.EnsureMounted(); err != nil {
@@ -329,6 +365,29 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 }
 
 func (b *buildFile) Build(context io.Reader) (string, error) {
+	defer func() {
+		// If we have an error and a container, the display the logs
+		if b.lastContainer != nil {
+			fmt.Fprintf(b.out, "******** Logs from last container (%s) *******\n", b.lastContainer.ShortID())
+
+			cLog, err := b.lastContainer.ReadLog("stdout")
+			if err != nil {
+				utils.Debugf("Error reading logs (stdout): %s", err)
+			}
+			if _, err := io.Copy(b.out, cLog); err != nil {
+				utils.Debugf("Error streaming logs (stdout): %s", err)
+			}
+			cLog, err = b.lastContainer.ReadLog("stderr")
+			if err != nil {
+				utils.Debugf("Error reading logs (stderr): %s", err)
+			}
+			if _, err := io.Copy(b.out, cLog); err != nil {
+				utils.Debugf("Error streaming logs (stderr): %s", err)
+			}
+			fmt.Fprintf(b.out, "************* End of logs for %s *************\n", b.lastContainer.ShortID())
+		}
+	}()
+
 	// FIXME: @creack any reason for using /tmp instead of ""?
 	// FIXME: @creack "name" is a terrible variable name
 	name, err := ioutil.TempDir("/tmp", "docker-build")
@@ -381,6 +440,7 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
 			return "", ret.(error)
 		}
 
+		b.lastContainer = nil
 		fmt.Fprintf(b.out, " ---> %v\n", utils.TruncateID(b.image))
 	}
 	if b.image != "" {

+ 66 - 7
buildfile_test.go

@@ -1,6 +1,7 @@
 package docker
 
 import (
+	"fmt"
 	"io/ioutil"
 	"testing"
 )
@@ -8,7 +9,7 @@ import (
 // mkTestContext generates a build context from the contents of the provided dockerfile.
 // This context is suitable for use as an argument to BuildFile.Build()
 func mkTestContext(dockerfile string, files [][2]string, t *testing.T) Archive {
-	context, err := mkBuildContext(dockerfile, files)
+	context, err := mkBuildContext(fmt.Sprintf(dockerfile, unitTestImageID), files)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -25,10 +26,10 @@ type testContextTemplate struct {
 
 // A table of all the contexts to build and test.
 // A new docker runtime will be created and torn down for each context.
-var testContexts []testContextTemplate = []testContextTemplate{
+var testContexts = []testContextTemplate{
 	{
 		`
-from   docker-ut
+from   %s
 run    sh -c 'echo root:testpass > /tmp/passwd'
 run    mkdir -p /var/run/sshd
 run    [ "$(cat /tmp/passwd)" = "root:testpass" ]
@@ -39,7 +40,7 @@ run    [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]
 
 	{
 		`
-from docker-ut
+from %s
 add foo /usr/lib/bla/bar
 run [ "$(cat /usr/lib/bla/bar)" = 'hello world!' ]
 `,
@@ -48,7 +49,7 @@ run [ "$(cat /usr/lib/bla/bar)" = 'hello world!' ]
 
 	{
 		`
-from docker-ut
+from %s
 add f /
 run [ "$(cat /f)" = "hello" ]
 add f /abc
@@ -74,9 +75,27 @@ run [ "$(cat /somewheeeere/over/the/rainbooow/ga)" = "bu" ]
 
 	{
 		`
-from docker-ut
+from %s
 env    FOO BAR
 run    [ "$FOO" = "BAR" ]
+`,
+		nil,
+	},
+
+	{
+		`
+from %s
+ENTRYPOINT /bin/echo
+CMD Hello world
+`,
+		nil,
+	},
+
+	{
+		`
+from %s
+VOLUME /test
+CMD Hello world
 `,
 		nil,
 	},
@@ -92,7 +111,11 @@ func TestBuild(t *testing.T) {
 		}
 		defer nuke(runtime)
 
-		srv := &Server{runtime: runtime}
+		srv := &Server{
+			runtime:     runtime,
+			pullingPool: make(map[string]struct{}),
+			pushingPool: make(map[string]struct{}),
+		}
 
 		buildfile := NewBuildFile(srv, ioutil.Discard)
 		if _, err := buildfile.Build(mkTestContext(ctx.dockerfile, ctx.files, t)); err != nil {
@@ -100,3 +123,39 @@ func TestBuild(t *testing.T) {
 		}
 	}
 }
+
+func TestVolume(t *testing.T) {
+	runtime, err := newTestRuntime()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer nuke(runtime)
+
+	srv := &Server{
+		runtime:     runtime,
+		pullingPool: make(map[string]struct{}),
+		pushingPool: make(map[string]struct{}),
+	}
+
+	buildfile := NewBuildFile(srv, ioutil.Discard)
+	imgId, err := buildfile.Build(mkTestContext(`
+from %s
+VOLUME /test
+CMD Hello world
+`, nil, t))
+	if err != nil {
+		t.Fatal(err)
+	}
+	img, err := srv.ImageInspect(imgId)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(img.Config.Volumes) == 0 {
+		t.Fail()
+	}
+	for key, _ := range img.Config.Volumes {
+		if key != "/test" {
+			t.Fail()
+		}
+	}
+}

+ 52 - 36
commands.go

@@ -19,7 +19,6 @@ import (
 	"os/signal"
 	"path/filepath"
 	"reflect"
-	"regexp"
 	"strconv"
 	"strings"
 	"syscall"
@@ -28,7 +27,7 @@ import (
 	"unicode"
 )
 
-const VERSION = "0.4.6"
+const VERSION = "0.4.8"
 
 var (
 	GITCOMMIT string
@@ -73,7 +72,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
 			return nil
 		}
 	}
-	help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n  -H=[tcp://%s:%d]: tcp://host:port to bind/connect to or unix://path/to/socker to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTHTTPHOST, DEFAULTHTTPPORT)
+	help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n  -H=[tcp://%s:%d]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTHTTPHOST, DEFAULTHTTPPORT)
 	for _, command := range [][]string{
 		{"attach", "Attach to a running container"},
 		{"build", "Build a container from a Dockerfile"},
@@ -749,7 +748,6 @@ func (cli *DockerCli) CmdImport(args ...string) error {
 
 func (cli *DockerCli) CmdPush(args ...string) error {
 	cmd := Subcmd("push", "[OPTION] NAME", "Push an image or a repository to the registry")
-	registry := cmd.String("registry", "", "Registry host to push the image to")
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -760,10 +758,14 @@ func (cli *DockerCli) CmdPush(args ...string) error {
 		return nil
 	}
 
-	if err := cli.checkIfLogged(*registry == "", "push"); err != nil {
+	if err := cli.checkIfLogged("push"); err != nil {
 		return err
 	}
 
+	// If we're not using a custom registry, we know the restrictions
+	// applied to repository names and can warn the user in advance.
+	// Custom repositories can have different rules, and we must also
+	// allow pushing by image ID.
 	if len(strings.SplitN(name, "/", 2)) == 1 {
 		return fmt.Errorf("Impossible to push a \"root\" repository. Please rename your repository in <user>/<repo> (ex: %s/%s)", cli.authConfig.Username, name)
 	}
@@ -772,18 +774,8 @@ func (cli *DockerCli) CmdPush(args ...string) error {
 	if err != nil {
 		return err
 	}
-	nameParts := strings.SplitN(name, "/", 2)
-	validNamespace := regexp.MustCompile(`^([a-z0-9_]{4,30})$`)
-	if !validNamespace.MatchString(nameParts[0]) {
-		return fmt.Errorf("Invalid namespace name (%s), only [a-z0-9_] are allowed, size between 4 and 30", nameParts[0])
-	}
-	validRepo := regexp.MustCompile(`^([a-zA-Z0-9-_.]+)$`)
-	if !validRepo.MatchString(nameParts[1]) {
-		return fmt.Errorf("Invalid repository name (%s), only [a-zA-Z0-9-_.] are allowed", nameParts[1])
-	}
 
 	v := url.Values{}
-	v.Set("registry", *registry)
 	if err := cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), bytes.NewBuffer(buf), cli.out); err != nil {
 		return err
 	}
@@ -793,7 +785,6 @@ func (cli *DockerCli) CmdPush(args ...string) error {
 func (cli *DockerCli) CmdPull(args ...string) error {
 	cmd := Subcmd("pull", "NAME", "Pull an image or a repository from the registry")
 	tag := cmd.String("t", "", "Download tagged image in repository")
-	registry := cmd.String("registry", "", "Registry to download from. Necessary if image is pulled by ID")
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -803,17 +794,12 @@ func (cli *DockerCli) CmdPull(args ...string) error {
 		return nil
 	}
 
-	remote := cmd.Arg(0)
-	if strings.Contains(remote, ":") {
-		remoteParts := strings.Split(remote, ":")
-		tag = &remoteParts[1]
-		remote = remoteParts[0]
-	}
+	remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0))
+	*tag = parsedTag
 
 	v := url.Values{}
 	v.Set("fromImage", remote)
 	v.Set("tag", *tag)
-	v.Set("registry", *registry)
 
 	if err := cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out); err != nil {
 		return err
@@ -1139,6 +1125,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
 
 func (cli *DockerCli) CmdSearch(args ...string) error {
 	cmd := Subcmd("search", "NAME", "Search the docker index for images")
+	noTrunc := cmd.Bool("notrunc", false, "Don't truncate output")
 	if err := cmd.Parse(args); err != nil {
 		return nil
 	}
@@ -1160,13 +1147,19 @@ func (cli *DockerCli) CmdSearch(args ...string) error {
 		return err
 	}
 	fmt.Fprintf(cli.out, "Found %d results matching your query (\"%s\")\n", len(outs), cmd.Arg(0))
-	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
+	w := tabwriter.NewWriter(cli.out, 33, 1, 3, ' ', 0)
 	fmt.Fprintf(w, "NAME\tDESCRIPTION\n")
+	_, width := cli.getTtySize()
+	if width == 0 {
+		width = 45
+	} else {
+		width = width - 33 //remote the first column
+	}
 	for _, out := range outs {
 		desc := strings.Replace(out.Description, "\n", " ", -1)
 		desc = strings.Replace(desc, "\r", " ", -1)
-		if len(desc) > 45 {
-			desc = utils.Trunc(desc, 42) + "..."
+		if !*noTrunc && len(desc) > width {
+			desc = utils.Trunc(desc, width-3) + "..."
 		}
 		fmt.Fprintf(w, "%s\t%s\n", out.Name, desc)
 	}
@@ -1277,7 +1270,9 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 	//if image not found try to pull it
 	if statusCode == 404 {
 		v := url.Values{}
-		v.Set("fromImage", config.Image)
+		repos, tag := utils.ParseRepositoryTag(config.Image)
+		v.Set("fromImage", repos)
+		v.Set("tag", tag)
 		err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err)
 		if err != nil {
 			return err
@@ -1298,7 +1293,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 	}
 
 	for _, warning := range runResult.Warnings {
-		fmt.Fprintln(cli.err, "WARNING: ", warning)
+		fmt.Fprintf(cli.err, "WARNING: %s\n", warning)
 	}
 
 	//start the container
@@ -1306,9 +1301,15 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 		return err
 	}
 
+	var wait chan struct{}
+
 	if !config.AttachStdout && !config.AttachStderr {
 		// Make this asynchrone in order to let the client write to stdin before having to read the ID
-		go fmt.Fprintf(cli.out, "%s\n", runResult.ID)
+		wait = make(chan struct{})
+		go func() {
+			defer close(wait)
+			fmt.Fprintf(cli.out, "%s\n", runResult.ID)
+		}()
 	}
 
 	if config.AttachStdin || config.AttachStdout || config.AttachStderr {
@@ -1337,12 +1338,16 @@ func (cli *DockerCli) CmdRun(args ...string) error {
 			return err
 		}
 	}
+
+	if !config.AttachStdout && !config.AttachStderr {
+		<-wait
+	}
 	return nil
 }
 
-func (cli *DockerCli) checkIfLogged(condition bool, action string) error {
+func (cli *DockerCli) checkIfLogged(action string) error {
 	// If condition AND the login failed
-	if condition && cli.authConfig.Username == "" {
+	if cli.authConfig.Username == "" {
 		if err := cli.CmdLogin(""); err != nil {
 			return err
 		}
@@ -1530,17 +1535,28 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
 
 }
 
-func (cli *DockerCli) resizeTty(id string) {
+func (cli *DockerCli) getTtySize() (int, int) {
 	if !cli.isTerminal {
-		return
+		return 0, 0
 	}
 	ws, err := term.GetWinsize(cli.terminalFd)
 	if err != nil {
 		utils.Debugf("Error getting size: %s", err)
+		if ws == nil {
+			return 0, 0
+		}
+	}
+	return int(ws.Height), int(ws.Width)
+}
+
+func (cli *DockerCli) resizeTty(id string) {
+	height, width := cli.getTtySize()
+	if height == 0 && width == 0 {
+		return
 	}
 	v := url.Values{}
-	v.Set("h", strconv.Itoa(int(ws.Height)))
-	v.Set("w", strconv.Itoa(int(ws.Width)))
+	v.Set("h", strconv.Itoa(height))
+	v.Set("w", strconv.Itoa(width))
 	if _, _, err := cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil); err != nil {
 		utils.Debugf("Error resize: %s", err)
 	}
@@ -1576,7 +1592,7 @@ func Subcmd(name, signature, description string) *flag.FlagSet {
 
 func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli {
 	var (
-		isTerminal bool = false
+		isTerminal = false
 		terminalFd uintptr
 	)
 

+ 5 - 4
commands_test.go

@@ -132,17 +132,18 @@ func TestImages(t *testing.T) {
 }
 
 */
+
 // TestRunHostname checks that 'docker run -h' correctly sets a custom hostname
 func TestRunHostname(t *testing.T) {
 	stdout, stdoutPipe := io.Pipe()
 
-	cli := NewDockerCli(nil, stdoutPipe, nil, testDaemonProto, testDaemonAddr)
+	cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
 	defer cleanup(globalRuntime)
 
 	c := make(chan struct{})
 	go func() {
 		defer close(c)
-		if err := cli.CmdRun("-h", "foobar", unitTestImageId, "hostname"); err != nil {
+		if err := cli.CmdRun("-h", "foobar", unitTestImageID, "hostname"); err != nil {
 			t.Fatal(err)
 		}
 	}()
@@ -329,13 +330,13 @@ func TestRunAttachStdin(t *testing.T) {
 	stdin, stdinPipe := io.Pipe()
 	stdout, stdoutPipe := io.Pipe()
 
-	cli := NewDockerCli(stdin, stdoutPipe, nil, testDaemonProto, testDaemonAddr)
+	cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
 	defer cleanup(globalRuntime)
 
 	ch := make(chan struct{})
 	go func() {
 		defer close(ch)
-		cli.CmdRun("-i", "-a", "stdin", unitTestImageId, "sh", "-c", "echo hello && cat")
+		cli.CmdRun("-i", "-a", "stdin", unitTestImageID, "sh", "-c", "echo hello && cat")
 	}()
 
 	// Send input to the command, close stdin

+ 29 - 12
container.go

@@ -76,6 +76,7 @@ type Config struct {
 	Image        string // Name of the image as it was passed by the operator (eg. could be symbolic)
 	Volumes      map[string]struct{}
 	VolumesFrom  string
+	Entrypoint   []string
 }
 
 type HostConfig struct {
@@ -123,6 +124,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 	cmd.Var(flVolumes, "v", "Attach a data volume")
 
 	flVolumesFrom := cmd.String("volumes-from", "", "Mount volumes from the specified container")
+	flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
 
 	var flBinds ListOpts
 	cmd.Var(&flBinds, "b", "Bind mount a volume from the host (e.g. -b /host:/container)")
@@ -153,6 +155,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 
 	parsedArgs := cmd.Args()
 	runCmd := []string{}
+	entrypoint := []string{}
 	image := ""
 	if len(parsedArgs) >= 1 {
 		image = cmd.Arg(0)
@@ -160,6 +163,10 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 	if len(parsedArgs) > 1 {
 		runCmd = parsedArgs[1:]
 	}
+	if *flEntrypoint != "" {
+		entrypoint = []string{*flEntrypoint}
+	}
+
 	config := &Config{
 		Hostname:     *flHostname,
 		PortSpecs:    flPorts,
@@ -177,6 +184,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 		Image:        image,
 		Volumes:      flVolumes,
 		VolumesFrom:  *flVolumesFrom,
+		Entrypoint:   entrypoint,
 	}
 	hostConfig := &HostConfig{
 		Binds: flBinds,
@@ -194,20 +202,25 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig,
 	return config, hostConfig, cmd, nil
 }
 
+type PortMapping map[string]string
+
 type NetworkSettings struct {
 	IPAddress   string
 	IPPrefixLen int
 	Gateway     string
 	Bridge      string
-	PortMapping map[string]string
+	PortMapping map[string]PortMapping
 }
 
 // String returns a human-readable description of the port mapping defined in the settings
 func (settings *NetworkSettings) PortMappingHuman() string {
 	var mapping []string
-	for private, public := range settings.PortMapping {
+	for private, public := range settings.PortMapping["Tcp"] {
 		mapping = append(mapping, fmt.Sprintf("%s->%s", public, private))
 	}
+	for private, public := range settings.PortMapping["Udp"] {
+		mapping = append(mapping, fmt.Sprintf("%s->%s/udp", public, private))
+	}
 	sort.Strings(mapping)
 	return strings.Join(mapping, ", ")
 }
@@ -458,8 +471,8 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 }
 
 func (container *Container) Start(hostConfig *HostConfig) error {
-	container.State.lock()
-	defer container.State.unlock()
+	container.State.Lock()
+	defer container.State.Unlock()
 
 	if container.State.Running {
 		return fmt.Errorf("The container %s is already running.", container.ID)
@@ -486,7 +499,7 @@ func (container *Container) Start(hostConfig *HostConfig) error {
 	// Create the requested bind mounts
 	binds := make(map[string]BindMap)
 	// Define illegal container destinations
-	illegal_dsts := []string{"/", "."}
+	illegalDsts := []string{"/", "."}
 
 	for _, bind := range hostConfig.Binds {
 		// FIXME: factorize bind parsing in parseBind
@@ -505,7 +518,7 @@ func (container *Container) Start(hostConfig *HostConfig) error {
 		}
 
 		// Bail if trying to mount to an illegal destination
-		for _, illegal := range illegal_dsts {
+		for _, illegal := range illegalDsts {
 			if dst == illegal {
 				return fmt.Errorf("Illegal bind destination: %s", dst)
 			}
@@ -680,14 +693,18 @@ func (container *Container) allocateNetwork() error {
 	if err != nil {
 		return err
 	}
-	container.NetworkSettings.PortMapping = make(map[string]string)
+	container.NetworkSettings.PortMapping = make(map[string]PortMapping)
+	container.NetworkSettings.PortMapping["Tcp"] = make(PortMapping)
+	container.NetworkSettings.PortMapping["Udp"] = make(PortMapping)
 	for _, spec := range container.Config.PortSpecs {
 		nat, err := iface.AllocatePort(spec)
 		if err != nil {
 			iface.Release()
 			return err
 		}
-		container.NetworkSettings.PortMapping[strconv.Itoa(nat.Backend)] = strconv.Itoa(nat.Frontend)
+		proto := strings.Title(nat.Proto)
+		backend, frontend := strconv.Itoa(nat.Backend), strconv.Itoa(nat.Frontend)
+		container.NetworkSettings.PortMapping[proto][backend] = frontend
 	}
 	container.network = iface
 	container.NetworkSettings.Bridge = container.runtime.networkManager.bridgeIface
@@ -813,8 +830,8 @@ func (container *Container) kill() error {
 }
 
 func (container *Container) Kill() error {
-	container.State.lock()
-	defer container.State.unlock()
+	container.State.Lock()
+	defer container.State.Unlock()
 	if !container.State.Running {
 		return nil
 	}
@@ -822,8 +839,8 @@ func (container *Container) Kill() error {
 }
 
 func (container *Container) Stop(seconds int) error {
-	container.State.lock()
-	defer container.State.unlock()
+	container.State.Lock()
+	defer container.State.Unlock()
 	if !container.State.Running {
 		return nil
 	}

+ 34 - 6
container_test.go

@@ -511,12 +511,14 @@ func TestKillDifferentUser(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	// Give some time to lxc to spawn the process (setuid might take some time)
-	container.WaitTimeout(500 * time.Millisecond)
+	setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
+		for !container.State.Running {
+			time.Sleep(10 * time.Millisecond)
+		}
+	})
 
-	if !container.State.Running {
-		t.Errorf("Container should be running")
-	}
+	// Even if the state is running, lets give some time to lxc to spawn the process
+	container.WaitTimeout(500 * time.Millisecond)
 
 	if err := container.Kill(); err != nil {
 		t.Fatal(err)
@@ -1001,7 +1003,7 @@ func TestEnv(t *testing.T) {
 	defer nuke(runtime)
 	container, err := NewBuilder(runtime).Create(&Config{
 		Image: GetTestImage(runtime).ID,
-		Cmd:   []string{"/usr/bin/env"},
+		Cmd:   []string{"env"},
 	},
 	)
 	if err != nil {
@@ -1043,6 +1045,32 @@ func TestEnv(t *testing.T) {
 	}
 }
 
+func TestEntrypoint(t *testing.T) {
+	runtime, err := newTestRuntime()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer nuke(runtime)
+	container, err := NewBuilder(runtime).Create(
+		&Config{
+			Image:      GetTestImage(runtime).ID,
+			Entrypoint: []string{"/bin/echo"},
+			Cmd:        []string{"-n", "foobar"},
+		},
+	)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer runtime.Destroy(container)
+	output, err := container.Output()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if string(output) != "foobar" {
+		t.Error(string(output))
+	}
+}
+
 func grepFile(t *testing.T, path string, pattern string) {
 	f, err := os.Open(path)
 	if err != nil {

+ 3 - 0
contrib/mkimage-unittest.sh

@@ -23,7 +23,9 @@ mkdir bin etc dev dev/pts lib proc sys tmp
 touch etc/resolv.conf
 cp /etc/nsswitch.conf etc/nsswitch.conf
 echo root:x:0:0:root:/:/bin/sh > etc/passwd
+echo daemon:x:1:1:daemon:/usr/sbin:/bin/sh >> etc/passwd
 echo root:x:0: > etc/group
+echo daemon:x:1: >> etc/group
 ln -s lib lib64
 ln -s bin sbin
 cp $BUSYBOX $SOCAT bin
@@ -41,6 +43,7 @@ do
     cp -a /dev/$X dev
 done
 
+chmod 0755 $ROOTFS # See #486
 tar -cf- . | docker import - docker-ut
 docker run -i -u root docker-ut /bin/echo Success.
 rm -rf $ROOTFS

+ 1 - 1
docker/docker.go

@@ -37,7 +37,7 @@ func main() {
 	flag.Var(&flHosts, "H", "tcp://host:port to bind/connect to or unix://path/to/socket to use")
 	flag.Parse()
 	if len(flHosts) > 1 {
-		flHosts = flHosts[1:len(flHosts)] //trick to display a nice defaul value in the usage
+		flHosts = flHosts[1:] //trick to display a nice defaul value in the usage
 	}
 	for i, flHost := range flHosts {
 		flHosts[i] = utils.ParseHost(docker.DEFAULTHTTPHOST, docker.DEFAULTHTTPPORT, flHost)

+ 3 - 0
docs/sources/api/docker_remote_api.rst

@@ -135,3 +135,6 @@ and we will add the libraries here.
 | Javascript (Angular) | dockerui       | https://github.com/crosbymichael/dockerui  |
 | **WebUI**            |                |                                            |
 +----------------------+----------------+--------------------------------------------+
+| Java                 | docker-java    | https://github.com/kpelykh/docker-java     |
++----------------------+----------------+--------------------------------------------+
+

+ 1 - 1
docs/sources/api/docker_remote_api_v1.3.rst

@@ -840,7 +840,7 @@ Remove an image
 	    {"Deleted":"53b4f83ac9"}
 	   ]
 
-	:statuscode 204: no error
+	:statuscode 200: no error
         :statuscode 404: no such image
 	:statuscode 409: conflict
         :statuscode 500: server error

+ 1 - 0
docs/sources/commandline/command/run.rst

@@ -26,3 +26,4 @@
       -v=[]: Creates a new volume and mounts it at the specified path.
       -volumes-from="": Mount all volumes from the given container.
       -b=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]
+      -entrypoint="": Overwrite the default entrypoint set by the image.

BIN
docs/sources/concepts/images/dockerlogo-h.png


BIN
docs/sources/concepts/images/dockerlogo-v.png


+ 4 - 4
docs/sources/concepts/index.rst

@@ -1,10 +1,10 @@
-:title: Concepts
-:description: -- todo: change me
+:title: Overview
+:description: Docker documentation summary
 :keywords: concepts, documentation, docker, containers
 
 
 
-Concepts
+Overview
 ========
 
 Contents:
@@ -13,4 +13,4 @@ Contents:
    :maxdepth: 1
 
    ../index
-
+   manifesto

+ 190 - 0
docs/sources/concepts/manifesto.rst

@@ -0,0 +1,190 @@
+:title: Manifesto
+:description: An overview of Docker and standard containers
+:keywords: containers, lxc, concepts, explanation
+
+.. _dockermanifesto:
+
+*(This was our original Welcome page, but it is a bit forward-looking
+for docs, and maybe not enough vision for a true manifesto. We'll
+reveal more vision in the future to make it more Manifesto-y.)*
+
+Docker Manifesto
+----------------
+
+Docker complements LXC with a high-level API which operates at the
+process level. It runs unix processes with strong guarantees of
+isolation and repeatability across servers.
+
+Docker is a great building block for automating distributed systems:
+large-scale web deployments, database clusters, continuous deployment
+systems, private PaaS, service-oriented architectures, etc.
+
+- **Heterogeneous payloads** Any combination of binaries, libraries,
+  configuration files, scripts, virtualenvs, jars, gems, tarballs, you
+  name it. No more juggling between domain-specific tools. Docker can
+  deploy and run them all.
+- **Any server** Docker can run on any x64 machine with a modern linux
+  kernel - whether it's a laptop, a bare metal server or a VM. This
+  makes it perfect for multi-cloud deployments.
+- **Isolation** docker isolates processes from each other and from the
+  underlying host, using lightweight containers.
+- **Repeatability** Because containers are isolated in their own
+  filesystem, they behave the same regardless of where, when, and
+  alongside what they run.
+
+.. image:: images/lego_docker.jpg
+   :target: http://bricks.argz.com/ins/7823-1/12
+
+What is a Standard Container?
+.............................
+
+Docker defines a unit of software delivery called a Standard
+Container. The goal of a Standard Container is to encapsulate a
+software component and all its dependencies in a format that is
+self-describing and portable, so that any compliant runtime can run it
+without extra dependency, regardless of the underlying machine and the
+contents of the container.
+
+The spec for Standard Containers is currently work in progress, but it
+is very straightforward. It mostly defines 1) an image format, 2) a
+set of standard operations, and 3) an execution environment.
+
+A great analogy for this is the shipping container. Just like Standard
+Containers are a fundamental unit of software delivery, shipping
+containers are a fundamental unit of physical delivery.
+
+Standard operations
+~~~~~~~~~~~~~~~~~~~
+
+Just like shipping containers, Standard Containers define a set of
+STANDARD OPERATIONS. Shipping containers can be lifted, stacked,
+locked, loaded, unloaded and labelled. Similarly, standard containers
+can be started, stopped, copied, snapshotted, downloaded, uploaded and
+tagged.
+
+
+Content-agnostic
+~~~~~~~~~~~~~~~~~~~
+
+Just like shipping containers, Standard Containers are
+CONTENT-AGNOSTIC: all standard operations have the same effect
+regardless of the contents. A shipping container will be stacked in
+exactly the same way whether it contains Vietnamese powder coffee or
+spare Maserati parts. Similarly, Standard Containers are started or
+uploaded in the same way whether they contain a postgres database, a
+php application with its dependencies and application server, or Java
+build artifacts.
+
+Infrastructure-agnostic
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Both types of containers are INFRASTRUCTURE-AGNOSTIC: they can be
+transported to thousands of facilities around the world, and
+manipulated by a wide variety of equipment. A shipping container can
+be packed in a factory in Ukraine, transported by truck to the nearest
+routing center, stacked onto a train, loaded into a German boat by an
+Australian-built crane, stored in a warehouse at a US facility,
+etc. Similarly, a standard container can be bundled on my laptop,
+uploaded to S3, downloaded, run and snapshotted by a build server at
+Equinix in Virginia, uploaded to 10 staging servers in a home-made
+Openstack cluster, then sent to 30 production instances across 3 EC2
+regions.
+
+
+Designed for automation
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Because they offer the same standard operations regardless of content
+and infrastructure, Standard Containers, just like their physical
+counterpart, are extremely well-suited for automation. In fact, you
+could say automation is their secret weapon.
+
+Many things that once required time-consuming and error-prone human
+effort can now be programmed. Before shipping containers, a bag of
+powder coffee was hauled, dragged, dropped, rolled and stacked by 10
+different people in 10 different locations by the time it reached its
+destination. 1 out of 50 disappeared. 1 out of 20 was damaged. The
+process was slow, inefficient and cost a fortune - and was entirely
+different depending on the facility and the type of goods.
+
+Similarly, before Standard Containers, by the time a software
+component ran in production, it had been individually built,
+configured, bundled, documented, patched, vendored, templated, tweaked
+and instrumented by 10 different people on 10 different
+computers. Builds failed, libraries conflicted, mirrors crashed,
+post-it notes were lost, logs were misplaced, cluster updates were
+half-broken. The process was slow, inefficient and cost a fortune -
+and was entirely different depending on the language and
+infrastructure provider.
+
+Industrial-grade delivery
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are 17 million shipping containers in existence, packed with
+every physical good imaginable. Every single one of them can be loaded
+on the same boats, by the same cranes, in the same facilities, and
+sent anywhere in the World with incredible efficiency. It is
+embarrassing to think that a 30 ton shipment of coffee can safely
+travel half-way across the World in *less time* than it takes a
+software team to deliver its code from one datacenter to another
+sitting 10 miles away.
+
+With Standard Containers we can put an end to that embarrassment, by
+making INDUSTRIAL-GRADE DELIVERY of software a reality.
+
+Standard Container Specification
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+(TODO)
+
+Image format
+~~~~~~~~~~~~
+
+Standard operations
+~~~~~~~~~~~~~~~~~~~
+
+-  Copy
+-  Run
+-  Stop
+-  Wait
+-  Commit
+-  Attach standard streams
+-  List filesystem changes
+-  ...
+
+Execution environment
+~~~~~~~~~~~~~~~~~~~~~
+
+Root filesystem
+^^^^^^^^^^^^^^^
+
+Environment variables
+^^^^^^^^^^^^^^^^^^^^^
+
+Process arguments
+^^^^^^^^^^^^^^^^^
+
+Networking
+^^^^^^^^^^
+
+Process namespacing
+^^^^^^^^^^^^^^^^^^^
+
+Resource limits
+^^^^^^^^^^^^^^^
+
+Process monitoring
+^^^^^^^^^^^^^^^^^^
+
+Logging
+^^^^^^^
+
+Signals
+^^^^^^^
+
+Pseudo-terminal allocation
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Security
+^^^^^^^^
+

+ 29 - 30
docs/sources/contributing/devenvironment.rst

@@ -5,53 +5,52 @@
 Setting Up a Dev Environment
 ============================
 
-Instructions that have been verified to work on Ubuntu Precise 12.04 (LTS) (64-bit),
+To make it easier to contribute to Docker, we provide a standard development environment. It is important that
+the same environment be used for all tests, builds and releases. The standard development environment defines
+all build dependencies: system libraries and binaries, go environment, go dependencies, etc.
 
 
-Dependencies
-------------
+Step 1: install docker
+----------------------
 
-**Linux kernel 3.8**
+Docker's build environment itself is a docker container, so the first step is to install docker on your system.
 
-Due to a bug in LXC docker works best on the 3.8 kernel. Precise comes with a 3.2 kernel, so we need to upgrade it. The kernel we install comes with AUFS built in.
+You can follow the `install instructions most relevant to your system <https://docs.docker.io/en/latest/installation/>`.
+Make sure you have a working, up-to-date docker installation, then continue to the next step.
 
 
-.. code-block:: bash
+Step 2: check out the source
+----------------------------
 
-   # install the backported kernel
-   sudo apt-get update && sudo apt-get install linux-image-generic-lts-raring
+::
 
-   # reboot
-   sudo reboot
+    git clone http://git@github.com/dotcloud/docker
+    cd docker
 
 
-Installation
-------------
+Step 3: build
+-------------
 
-.. code-block:: bash
-		
-    sudo apt-get install python-software-properties
-    sudo add-apt-repository ppa:gophers/go
-    sudo apt-get update
-    sudo apt-get -y install lxc xz-utils curl golang-stable git aufs-tools
+When you are ready to build docker, run this command:
 
-    export GOPATH=~/go/
-    export PATH=$GOPATH/bin:$PATH
+::
 
-    mkdir -p $GOPATH/src/github.com/dotcloud
-    cd $GOPATH/src/github.com/dotcloud
-    git clone git://github.com/dotcloud/docker.git
-    cd docker
+    docker build -t docker .
+
+This will build the revision currently checked out in the repository. Feel free to check out the version
+of your choice.
 
-    go get -v github.com/dotcloud/docker/...
-    go install -v github.com/dotcloud/docker/...
+If the build is successful, congratulations! You have produced a clean build of docker, neatly encapsulated
+in a standard build environment.
 
+You can run an interactive session in the newly built container:
 
-Then run the docker daemon,
+::
+    docker run -i -t docker bash
 
-.. code-block:: bash
 
-    sudo $GOPATH/bin/docker -d
+To extract the binaries from the container:
 
+::
+    docker run docker sh -c 'cat $(which docker)' > docker-build && chmod +x docker-build
 
-Run the ``go install`` command (above) to recompile docker.

+ 27 - 116
docs/sources/index.rst

@@ -1,127 +1,38 @@
-:title: Introduction
-:description: An introduction to docker and standard containers?
+:title: Welcome to the Docker Documentation
+:description: An overview of the Docker Documentation
 :keywords: containers, lxc, concepts, explanation
 
 .. _introduction:
 
-Introduction
-============
+Welcome
+=======
 
-Docker -- The Linux container runtime
--------------------------------------
+.. image:: concepts/images/dockerlogo-h.png
 
-Docker complements LXC with a high-level API which operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers.
+``docker``, the Linux Container Runtime, runs Unix processes with
+strong guarantees of isolation across servers. Your software runs
+repeatably everywhere because its :ref:`container_def` includes any
+dependencies.
 
-Docker is a great building block for automating distributed systems: large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc.
+``docker`` runs three ways:
 
+* as a daemon to manage LXC containers on your :ref:`Linux host
+  <kernel>` (``sudo docker -d``)
+* as a :ref:`CLI <cli>` which talks to the daemon's `REST API
+  <api/docker_remote_api>`_ (``docker run ...``)
+* as a client of :ref:`Repositories <working_with_the_repository>`
+  that let you share what you've built (``docker pull, docker
+  commit``).
 
-- **Heterogeneous payloads** Any combination of binaries, libraries, configuration files, scripts, virtualenvs, jars, gems, tarballs, you name it. No more juggling between domain-specific tools. Docker can deploy and run them all.
-- **Any server** Docker can run on any x64 machine with a modern linux kernel - whether it's a laptop, a bare metal server or a VM. This makes it perfect for multi-cloud deployments.
-- **Isolation** docker isolates processes from each other and from the underlying host, using lightweight containers.
-- **Repeatability** Because containers are isolated in their own filesystem, they behave the same regardless of where, when, and alongside what they run.
+Each use of ``docker`` is documented here. The features of Docker are
+currently in active development, so this documention will change
+frequently.
 
-.. image:: concepts/images/lego_docker.jpg
-
-
-What is a Standard Container?
------------------------------
-
-Docker defines a unit of software delivery called a Standard Container. The goal of a Standard Container is to encapsulate a software component and all its dependencies in
-a format that is self-describing and portable, so that any compliant runtime can run it without extra dependency, regardless of the underlying machine and the contents of the container.
-
-The spec for Standard Containers is currently work in progress, but it is very straightforward. It mostly defines 1) an image format, 2) a set of standard operations, and 3) an execution environment.
-
-A great analogy for this is the shipping container. Just like Standard Containers are a fundamental unit of software delivery, shipping containers (http://bricks.argz.com/ins/7823-1/12) are a fundamental unit of physical delivery.
-
-Standard operations
-~~~~~~~~~~~~~~~~~~~
-
-Just like shipping containers, Standard Containers define a set of STANDARD OPERATIONS. Shipping containers can be lifted, stacked, locked, loaded, unloaded and labelled. Similarly, standard containers can be started, stopped, copied, snapshotted, downloaded, uploaded and tagged.
-
-
-Content-agnostic
-~~~~~~~~~~~~~~~~~~~
-
-Just like shipping containers, Standard Containers are CONTENT-AGNOSTIC: all standard operations have the same effect regardless of the contents. A shipping container will be stacked in exactly the same way whether it contains Vietnamese powder coffee or spare Maserati parts. Similarly, Standard Containers are started or uploaded in the same way whether they contain a postgres database, a php application with its dependencies and application server, or Java build artifacts.
-
-
-Infrastructure-agnostic
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Both types of containers are INFRASTRUCTURE-AGNOSTIC: they can be transported to thousands of facilities around the world, and manipulated by a wide variety of equipment. A shipping container can be packed in a factory in Ukraine, transported by truck to the nearest routing center, stacked onto a train, loaded into a German boat by an Australian-built crane, stored in a warehouse at a US facility, etc. Similarly, a standard container can be bundled on my laptop, uploaded to S3, downloaded, run and snapshotted by a build server at Equinix in Virginia, uploaded to 10 staging servers in a home-made Openstack cluster, then sent to 30 production instances across 3 EC2 regions.
-
-
-Designed for automation
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Because they offer the same standard operations regardless of content and infrastructure, Standard Containers, just like their physical counterpart, are extremely well-suited for automation. In fact, you could say automation is their secret weapon.
-
-Many things that once required time-consuming and error-prone human effort can now be programmed. Before shipping containers, a bag of powder coffee was hauled, dragged, dropped, rolled and stacked by 10 different people in 10 different locations by the time it reached its destination. 1 out of 50 disappeared. 1 out of 20 was damaged. The process was slow, inefficient and cost a fortune - and was entirely different depending on the facility and the type of goods.
-
-Similarly, before Standard Containers, by the time a software component ran in production, it had been individually built, configured, bundled, documented, patched, vendored, templated, tweaked and instrumented by 10 different people on 10 different computers. Builds failed, libraries conflicted, mirrors crashed, post-it notes were lost, logs were misplaced, cluster updates were half-broken. The process was slow, inefficient and cost a fortune - and was entirely different depending on the language and infrastructure provider.
-
-
-Industrial-grade delivery
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-There are 17 million shipping containers in existence, packed with every physical good imaginable. Every single one of them can be loaded on the same boats, by the same cranes, in the same facilities, and sent anywhere in the World with incredible efficiency. It is embarrassing to think that a 30 ton shipment of coffee can safely travel half-way across the World in *less time* than it takes a software team to deliver its code from one datacenter to another sitting 10 miles away.
-
-With Standard Containers we can put an end to that embarrassment, by making INDUSTRIAL-GRADE DELIVERY of software a reality.
-
-
-Standard Container Specification
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-(TODO)
-
-Image format
-~~~~~~~~~~~~
-
-Standard operations
-~~~~~~~~~~~~~~~~~~~
-
--  Copy
--  Run
--  Stop
--  Wait
--  Commit
--  Attach standard streams
--  List filesystem changes
--  ...
-
-Execution environment
-~~~~~~~~~~~~~~~~~~~~~
-
-Root filesystem
-^^^^^^^^^^^^^^^
-
-Environment variables
-^^^^^^^^^^^^^^^^^^^^^
-
-Process arguments
-^^^^^^^^^^^^^^^^^
-
-Networking
-^^^^^^^^^^
-
-Process namespacing
-^^^^^^^^^^^^^^^^^^^
-
-Resource limits
-^^^^^^^^^^^^^^^
-
-Process monitoring
-^^^^^^^^^^^^^^^^^^
-
-Logging
-^^^^^^^
-
-Signals
-^^^^^^^
-
-Pseudo-terminal allocation
-^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Security
-^^^^^^^^
+For an overview of Docker, please see the `Introduction
+<http://www.docker.io>`_. When you're ready to start working with
+Docker, we have a `quick start <http://www.docker.io/gettingstarted>`_
+and a more in-depth guide to :ref:`ubuntu_linux` and other
+:ref:`installation_list` paths including prebuilt binaries,
+Vagrant-created VMs, Rackspace and Amazon instances.
 
+Enough reading! :ref:`Try it out! <running_examples>`

+ 9 - 4
docs/sources/installation/index.rst

@@ -1,12 +1,17 @@
-:title: Documentation
-:description: -- todo: change me
-:keywords: todo, docker, documentation, installation, OS support
-
+:title: Docker Installation
+:description: many ways to install Docker
+:keywords: docker, installation
 
+.. _installation_list:
 
 Installation
 ============
 
+There are a number of ways to install Docker, depending on where you
+want to run the daemon. The :ref:`ubuntu_linux` installation is the
+officially-tested version, and the community adds more techniques for
+installing Docker all the time.
+
 Contents:
 
 .. toctree::

+ 40 - 0
docs/sources/terms/container.rst

@@ -0,0 +1,40 @@
+:title: Container
+:description: Definitions of a container
+:keywords: containers, lxc, concepts, explanation, image, container
+
+.. _container_def:
+
+Container
+=========
+
+.. image:: images/docker-filesystems-busyboxrw.png
+
+Once you start a process in Docker from an :ref:`image_def`, Docker
+fetches the image and its :ref:`parent_image_def`, and repeats the
+process until it reaches the :ref:`base_image_def`. Then the
+:ref:`ufs_def` adds a read-write layer on top. That read-write layer,
+plus the information about its :ref:`parent_image_def` and some
+additional information like its unique id, networking configuration,
+and resource limits is called a **container**.
+
+.. _container_state_def:
+
+Container State
+...............
+
+Containers can change, and so they have state. A container may be
+**running** or **exited**. 
+
+When a container is running, the idea of a "container" also includes a
+tree of processes running on the CPU, isolated from the other
+processes running on the host.
+
+When the container is exited, the state of the file system and
+its exit value is preserved. You can start, stop, and restart a
+container. The processes restart from scratch (their memory state is
+**not** preserved in a container), but the file system is just as it
+was when the container was stopped.
+
+You can promote a container to an :ref:`image_def` with ``docker
+commit``. Once a container is an image, you can use it as a parent for
+new containers.

+ 38 - 0
docs/sources/terms/filesystem.rst

@@ -0,0 +1,38 @@
+:title: File Systems
+:description: How Linux organizes its persistent storage
+:keywords: containers, files, linux
+
+.. _filesystem_def:
+
+File System
+===========
+
+.. image:: images/docker-filesystems-generic.png
+
+In order for a Linux system to run, it typically needs two `file
+systems <http://en.wikipedia.org/wiki/Filesystem>`_:
+
+1. boot file system (bootfs)
+2. root file system (rootfs)
+
+The **boot file system** contains the bootloader and the kernel. The
+user never makes any changes to the boot file system. In fact, soon
+after the boot process is complete, the entire kernel is in memory,
+and the boot file system is unmounted to free up the RAM associated
+with the initrd disk image.
+
+
+The **root file system** includes the typical directory structure we
+associate with Unix-like operating systems: ``/dev, /proc, /bin, /etc,
+/lib, /usr,`` and ``/tmp`` plus all the configuration files, binaries
+and libraries required to run user applications (like bash, ls, and so
+forth). 
+
+While there can be important kernel differences between different
+Linux distributions, the contents and organization of the root file
+system are usually what make your software packages dependent on one
+distribution versus another. Docker can help solve this problem by
+running multiple distributions at the same time.
+
+.. image:: images/docker-filesystems-multiroot.png
+

+ 0 - 97
docs/sources/terms/fundamentals.rst

@@ -1,97 +0,0 @@
-:title: Image & Container
-:description: Definitions of an image and container
-:keywords: containers, lxc, concepts, explanation, image, container
-
-File Systems
-============
-
-.. image:: images/docker-filesystems-generic.png
-
-In order for a Linux system to run, it typically needs two `file
-systems <http://en.wikipedia.org/wiki/Filesystem>`_:
-
-1. boot file system (bootfs)
-2. root file system (rootfs)
-
-The **boot file system** contains the bootloader and the kernel. The
-user never makes any changes to the boot file system. In fact, soon
-after the boot process is complete, the entire kernel is in memory,
-and the boot file system is unmounted to free up the RAM associated
-with the initrd disk image.
-
-The **root file system** includes the typical directory structure we
-associate with Unix-like operating systems: ``/dev, /proc, /bin, /etc,
-/lib, /usr,`` and ``/tmp`` plus all the configuration files, binaries
-and libraries required to run user applications (like bash, ls, and so
-forth). 
-
-While there can be important kernal differences between different
-Linux distributions, the contents and organization of the root file
-system are usually what make your software packages dependent on one
-distribution versus another. Docker can help solve this problem by
-running multiple distributions at the same time.
-
-.. image:: images/docker-filesystems-multiroot.png
-
-Layers and Union Mounts
-=======================
-
-In a traditional Linux boot, the kernel first mounts the root file
-system as read-only, checks its integrity, and then switches the whole
-rootfs volume to read-write mode. Docker does something similar,
-*except* that instead of changing the file system to read-write mode,
-it takes advantage of a `union mount
-<http://en.wikipedia.org/wiki/Union_mount>`_ to add a read-write file
-system *over* the read-only file system. In fact there may be multiple
-read-only file systems stacked on top of each other.
-
-.. image:: images/docker-filesystems-multilayer.png
-
-At first, the top layer has nothing in it, but any time a process
-creates a file, this happens in the top layer. And if something needs
-to update an existing file in a lower layer, then the file gets copied
-to the upper layer and changes go into the copy. The version of the
-file on the lower layer cannot be seen by the applications anymore,
-but it is there, unchanged.
-
-We call the union of the read-write layer and all the read-only layers
-a **union file system**.
-
-Image
-=====
-
-In Docker terminology, a read-only layer is called an **image**. An
-image never changes. Because Docker uses a union file system, the
-applications think the whole file system is mounted read-write,
-because any file can be changed. But all the changes go to the
-top-most layer, and underneath, the image is unchanged. Since they
-don't change, images do not have state.
-
-Each image may depend on one more image which forms the layer beneath
-it. We sometimes say that the lower image is the **parent** of the
-upper image.
-
-Base Image
-==========
-
-An image that has no parent is a **base image**.
-
-Container
-=========
-
-Once you start a process in Docker from an image, Docker fetches the
-image and its parent, and repeats the process until it reaches the
-base image. Then the union file system adds a read-write layer on
-top. That read-write layer, plus the information about its parent and
-some additional information like its unique id, is called a
-**container**. 
-
-Containers can change, and so they have state. A container may be
-running or exited. In either case, the state of the file system and
-its exit value is preserved. You can start, stop, and restart a
-container. The processes restart from scratch (their memory state is
-**not** preserved in a container), but the file system is just as it
-was when the container was stopped.
-
-You can promote a container to an image with ``docker commit``. Once a
-container is an image, you can use it as a parent for new containers.

+ 38 - 0
docs/sources/terms/image.rst

@@ -0,0 +1,38 @@
+:title: Images
+:description: Definition of an image
+:keywords: containers, lxc, concepts, explanation, image, container
+
+.. _image_def:
+
+Image
+=====
+
+.. image:: images/docker-filesystems-debian.png
+
+In Docker terminology, a read-only :ref:`layer_def` is called an
+**image**. An image never changes. 
+
+Since Docker uses a :ref:`ufs_def`, the processes think the whole file
+system is mounted read-write. But all the changes go to the top-most
+writeable layer, and underneath, the original file in the read-only
+image is unchanged. Since images don't change, images do not have state.
+
+.. image:: images/docker-filesystems-debianrw.png
+
+.. _parent_image_def:
+
+Parent Image
+............
+
+.. image:: images/docker-filesystems-multilayer.png
+
+Each image may depend on one more image which forms the layer beneath
+it. We sometimes say that the lower image is the **parent** of the
+upper image.
+
+.. _base_image_def:
+
+Base Image
+..........
+
+An image that has no parent is a **base image**.

BIN
docs/sources/terms/images/docker-filesystems-busyboxrw.png


BIN
docs/sources/terms/images/docker-filesystems-debian.png


BIN
docs/sources/terms/images/docker-filesystems-debianrw.png


BIN
docs/sources/terms/images/docker-filesystems-generic.png


BIN
docs/sources/terms/images/docker-filesystems-multilayer.png


BIN
docs/sources/terms/images/docker-filesystems-multiroot.png


Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 37 - 23
docs/sources/terms/images/docker-filesystems.svg


+ 8 - 4
docs/sources/terms/index.rst

@@ -1,11 +1,11 @@
-:title: Terms
+:title: Glossary
 :description: Definitions of terms used in Docker documentation
 :keywords: concepts, documentation, docker, containers
 
 
 
-Terms
-=====
+Glossary
+========
 
 Definitions of terms used in Docker documentation.
 
@@ -14,5 +14,9 @@ Contents:
 .. toctree::
    :maxdepth: 1
 
-   fundamentals
+   filesystem
+   layer
+   image
+   container
+
 

+ 40 - 0
docs/sources/terms/layer.rst

@@ -0,0 +1,40 @@
+:title: Layers
+:description: Organizing the Docker Root File System
+:keywords: containers, lxc, concepts, explanation, image, container
+
+Layers
+======
+
+In a traditional Linux boot, the kernel first mounts the root
+:ref:`filesystem_def` as read-only, checks its integrity, and then
+switches the whole rootfs volume to read-write mode.
+
+.. _layer_def:
+
+Layer
+.....
+
+When Docker mounts the rootfs, it starts read-only, as in a tradtional
+Linux boot, but then, instead of changing the file system to
+read-write mode, it takes advantage of a `union mount
+<http://en.wikipedia.org/wiki/Union_mount>`_ to add a read-write file
+system *over* the read-only file system. In fact there may be multiple
+read-only file systems stacked on top of each other. We think of each
+one of these file systems as a **layer**.
+
+.. image:: images/docker-filesystems-multilayer.png
+
+At first, the top read-write layer has nothing in it, but any time a
+process creates a file, this happens in the top layer. And if
+something needs to update an existing file in a lower layer, then the
+file gets copied to the upper layer and changes go into the copy. The
+version of the file on the lower layer cannot be seen by the
+applications anymore, but it is there, unchanged.
+
+.. _ufs_def:
+
+Union File System
+.................
+
+We call the union of the read-write layer and all the read-only layers
+a **union file system**.

+ 3 - 2
docs/sources/toctree.rst

@@ -17,7 +17,8 @@ This documentation has the following resources:
    commandline/index
    contributing/index
    api/index
-   faq
    terms/index
+   faq
+
+
 
-.. image:: concepts/images/lego_docker.jpg

+ 14 - 0
docs/sources/use/builder.rst

@@ -153,6 +153,20 @@ of `<src>` will be written at `<dst>`.
 If `<dest>` doesn't exist, it is created along with all missing directories in its path. All new
 files and directories are created with mode 0700, uid and gid 0.
 
+2.8 ENTRYPOINT
+-------------
+
+    ``ENTRYPOINT /bin/echo``
+
+The `ENTRYPOINT` instruction adds an entry command that will not be overwritten when arguments are passed to docker run, unlike the behavior of `CMD`.  This allows arguments to be passed to the entrypoint.  i.e. `docker run <image> -d` will pass the "-d" argument to the entrypoint.
+
+2.9 VOLUME
+----------
+
+    ``VOLUME ["/data"]``
+
+The `VOLUME` instruction will add one or more new volumes to any container created from the image.
+
 3. Dockerfile Examples
 ======================
 

+ 7 - 7
docs/sources/use/workingwithrepository.rst

@@ -1,21 +1,21 @@
 :title: Working With Repositories
-:description: Generally, there are two types of repositories: Top-level repositories which are controlled by the people behind Docker, and user repositories.
+:description: Repositories allow users to share images.
 :keywords: repo, repositiores, usage, pull image, push image, image, documentation
 
 .. _working_with_the_repository:
 
-Working with the Repository
-===========================
+Working with Repositories
+=========================
 
 
 Top-level repositories and user repositories
 --------------------------------------------
 
-Generally, there are two types of repositories: Top-level repositories which are controlled by the people behind
-Docker, and user repositories.
+Generally, there are two types of repositories: Top-level repositories
+which are controlled by the people behind Docker, and user
+repositories.
 
-* Top-level repositories can easily be recognized by not having a ``/`` (slash) in their name. These repositories can
-  generally be trusted.
+* Top-level repositories can easily be recognized by not having a ``/`` (slash) in their name. These repositories can  generally be trusted.
 * User repositories always come in the form of ``<username>/<repo_name>``. This is what your published images will look like.
 * User images are not checked, it is therefore up to you whether or not you trust the creator of this image.
 

+ 1 - 2
graph.go

@@ -162,7 +162,7 @@ func (graph *Graph) Register(layerData Archive, store bool, img *Image) error {
 //   The archive is stored on disk and will be automatically deleted as soon as has been read.
 //   If output is not nil, a human-readable progress bar will be written to it.
 //   FIXME: does this belong in Graph? How about MktempFile, let the caller use it for archives?
-func (graph *Graph) TempLayerArchive(id string, compression Compression, output io.Writer) (*TempArchive, error) {
+func (graph *Graph) TempLayerArchive(id string, compression Compression, sf *utils.StreamFormatter, output io.Writer) (*TempArchive, error) {
 	image, err := graph.Get(id)
 	if err != nil {
 		return nil, err
@@ -175,7 +175,6 @@ func (graph *Graph) TempLayerArchive(id string, compression Compression, output
 	if err != nil {
 		return nil, err
 	}
-	sf := utils.NewStreamFormatter(false)
 	return NewTempArchive(utils.ProgressReader(ioutil.NopCloser(archive), 0, output, sf.FormatProgress("Buffering to disk", "%v/%v (%v)"), sf), tmp.Root)
 }
 

+ 119 - 0
hack/RELEASE.md

@@ -0,0 +1,119 @@
+## A maintainer's guide to releasing Docker
+
+So you're in charge of a docker release? Cool. Here's what to do.
+
+If your experience deviates from this document, please document the changes to keep it
+up-to-date.
+
+
+### 1. Pull from master and create a release branch
+
+	```bash
+	$ git checkout master
+	$ git pull
+	$ git checkout -b bump_$VERSION
+	```
+
+### 2. Update CHANGELOG.md
+
+	You can run this command for reference:
+
+	```bash
+	LAST_VERSION=$(git tag | grep -E "v[0-9\.]+$" | sort -nr | head -n 1)
+	git log $LAST_VERSION..HEAD
+	```
+
+	Each change should be formatted as ```BULLET CATEGORY: DESCRIPTION```
+
+	* BULLET is either ```-```, ```+``` or ```*```, to indicate a bugfix,
+	new feature or upgrade, respectively.
+
+	* CATEGORY should describe which part of the project is affected.
+	Valid categories are:
+		* Runtime
+		* Remote API
+		* Builder
+		* Documentation
+		* Hack
+
+	* DESCRIPTION: a concise description of the change that is relevant to the end-user,
+	using the present tense.
+	Changes should be described in terms of how they affect the user, for example "new feature
+	X which allows Y", "fixed bug which caused X", "increased performance of Y".
+
+	EXAMPLES:
+
+		```
+		 + Builder: 'docker build -t FOO' applies the tag FOO to the newly built container.
+		 * Runtime: improve detection of kernel version
+		 - Remote API: fix a bug in the optional unix socket transport
+		 ```
+
+### 3. Change VERSION in commands.go
+
+### 4. Run all tests
+
+### 5. Commit and create a pull request
+
+	```bash
+	$ git add commands.go CHANGELOG.md
+	$ git commit -m "Bump version to $VERSION"
+	$ git push origin bump_$VERSION
+	```
+
+### 6. Get 2 other maintainers to validate the pull request
+
+### 7. Merge the pull request and apply tags
+
+	```bash
+	$ git checkout master
+	$ git merge bump_$VERSION
+	$ git tag -a v$VERSION # Don't forget the v!
+	$ git tag -f -a latest
+	$ git push
+	$ git push --tags
+	```
+
+### 8. Publish binaries
+
+	To run this you will need access to the release credentials.
+	Get them from [the infrastructure maintainers](https://github.com/dotcloud/docker/blob/master/hack/infrastructure/MAINTAINERS).
+
+	```bash
+	$ RELEASE_IMAGE=image_provided_by_infrastructure_maintainers
+	$ BUILD=$(docker run -d -e RELEASE_PPA=0 $RELEASE_IMAGE)
+	```
+
+	This will do 2 things:
+	
+	* It will build and upload the binaries on http://get.docker.io
+	* It will *test* the release on our Ubuntu PPA (a PPA is a community repository for ubuntu packages)
+
+	Wait for the build to complete.
+
+	```bash
+	$ docker wait $BUILD # This should print 0. If it doesn't, your build failed.
+	```
+
+	Check that the output looks OK. Here's an example of a correct output:
+
+	```bash
+	$ docker logs 2>&1 b4e7c8299d73 | grep -e 'Public URL' -e 'Successfully uploaded'
+	Public URL of the object is: http://get.docker.io.s3.amazonaws.com/builds/Linux/x86_64/docker-v0.4.7.tgz
+	Public URL of the object is: http://get.docker.io.s3.amazonaws.com/builds/Linux/x86_64/docker-latest.tgz
+	Successfully uploaded packages.
+	```
+
+	If you don't see 3 lines similar to this, something might be wrong. Check the full logs and try again.
+	
+
+### 9. Publish Ubuntu packages
+
+	If everything went well in the previous step, you can finalize the release by submitting the Ubuntu packages.
+
+	```bash
+	$ RELEASE_IMAGE=image_provided_by_infrastructure_maintainers
+	$ docker run -e RELEASE_PPA=1 $RELEASE_IMAGE
+	```
+
+	If that goes well, congratulations! You're done.

+ 4 - 1
hack/dockerbuilder/Dockerfile

@@ -5,8 +5,11 @@
 # AUTHOR          Solomon Hykes <solomon@dotcloud.com>
 #                 Daniel Mizyrycki <daniel@dotcloud.net>
 # BUILD_CMD       docker build -t dockerbuilder .
-# RUN_CMD         docker run -e AWS_ID="$AWS_ID" -e AWS_KEY="$AWS_KEY" -e GPG_KEY="$GPG_KEY" dockerbuilder
+# RUN_CMD         docker run -e AWS_ID="$AWS_ID" -e AWS_KEY="$AWS_KEY" -e GPG_KEY="$GPG_KEY" -e PUBLISH_PPA="$PUBLISH_PPA" dockerbuilder
 #
+# ENV_VARIABLES   AWS_ID, AWS_KEY: S3 credentials for uploading Docker binary and tarball
+#                 GPG_KEY: Signing key for docker package
+#                 PUBLISH_PPA: 0 for staging release, 1 for production release
 #
 from	ubuntu:12.04
 maintainer	Solomon Hykes <solomon@dotcloud.com>

+ 3 - 0
image.go

@@ -94,9 +94,12 @@ func StoreImage(img *Image, layerData Archive, root string, store bool) error {
 	}
 	// If layerData is not nil, unpack it into the new layer
 	if layerData != nil {
+		start := time.Now()
+		utils.Debugf("Start untar layer")
 		if err := Untar(layerData, layer); err != nil {
 			return err
 		}
+		utils.Debugf("Untar time: %vs\n", time.Now().Sub(start).Seconds())
 	}
 
 	return StoreSize(img, root)

+ 135 - 96
network.go

@@ -5,7 +5,6 @@ import (
 	"errors"
 	"fmt"
 	"github.com/dotcloud/docker/utils"
-	"io"
 	"log"
 	"net"
 	"os/exec"
@@ -183,8 +182,10 @@ func getIfaceAddr(name string) (net.Addr, error) {
 // up iptables rules.
 // It keeps track of all mappings and is able to unmap at will
 type PortMapper struct {
-	mapping map[int]net.TCPAddr
-	proxies map[int]net.Listener
+	tcpMapping map[int]*net.TCPAddr
+	tcpProxies map[int]Proxy
+	udpMapping map[int]*net.UDPAddr
+	udpProxies map[int]Proxy
 }
 
 func (mapper *PortMapper) cleanup() error {
@@ -197,8 +198,10 @@ func (mapper *PortMapper) cleanup() error {
 	iptables("-t", "nat", "-D", "OUTPUT", "-j", "DOCKER")
 	iptables("-t", "nat", "-F", "DOCKER")
 	iptables("-t", "nat", "-X", "DOCKER")
-	mapper.mapping = make(map[int]net.TCPAddr)
-	mapper.proxies = make(map[int]net.Listener)
+	mapper.tcpMapping = make(map[int]*net.TCPAddr)
+	mapper.tcpProxies = make(map[int]Proxy)
+	mapper.udpMapping = make(map[int]*net.UDPAddr)
+	mapper.udpProxies = make(map[int]Proxy)
 	return nil
 }
 
@@ -215,76 +218,72 @@ func (mapper *PortMapper) setup() error {
 	return nil
 }
 
-func (mapper *PortMapper) iptablesForward(rule string, port int, dest net.TCPAddr) error {
-	return iptables("-t", "nat", rule, "DOCKER", "-p", "tcp", "--dport", strconv.Itoa(port),
-		"-j", "DNAT", "--to-destination", net.JoinHostPort(dest.IP.String(), strconv.Itoa(dest.Port)))
+func (mapper *PortMapper) iptablesForward(rule string, port int, proto string, dest_addr string, dest_port int) error {
+	return iptables("-t", "nat", rule, "DOCKER", "-p", proto, "--dport", strconv.Itoa(port),
+		"-j", "DNAT", "--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port)))
 }
 
-func (mapper *PortMapper) Map(port int, dest net.TCPAddr) error {
-	if err := mapper.iptablesForward("-A", port, dest); err != nil {
-		return err
-	}
-
-	mapper.mapping[port] = dest
-	listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", port))
-	if err != nil {
-		mapper.Unmap(port)
-		return err
-	}
-	mapper.proxies[port] = listener
-	go proxy(listener, "tcp", dest.String())
-	return nil
-}
-
-// proxy listens for socket connections on `listener`, and forwards them unmodified
-// to `proto:address`
-func proxy(listener net.Listener, proto, address string) error {
-	utils.Debugf("proxying to %s:%s", proto, address)
-	defer utils.Debugf("Done proxying to %s:%s", proto, address)
-	for {
-		utils.Debugf("Listening on %s", listener)
-		src, err := listener.Accept()
+func (mapper *PortMapper) Map(port int, backendAddr net.Addr) error {
+	if _, isTCP := backendAddr.(*net.TCPAddr); isTCP {
+		backendPort := backendAddr.(*net.TCPAddr).Port
+		backendIP := backendAddr.(*net.TCPAddr).IP
+		if err := mapper.iptablesForward("-A", port, "tcp", backendIP.String(), backendPort); err != nil {
+			return err
+		}
+		mapper.tcpMapping[port] = backendAddr.(*net.TCPAddr)
+		proxy, err := NewProxy(&net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: port}, backendAddr)
 		if err != nil {
+			mapper.Unmap(port, "tcp")
 			return err
 		}
-		utils.Debugf("Connecting to %s:%s", proto, address)
-		dst, err := net.Dial(proto, address)
+		mapper.tcpProxies[port] = proxy
+		go proxy.Run()
+	} else {
+		backendPort := backendAddr.(*net.UDPAddr).Port
+		backendIP := backendAddr.(*net.UDPAddr).IP
+		if err := mapper.iptablesForward("-A", port, "udp", backendIP.String(), backendPort); err != nil {
+			return err
+		}
+		mapper.udpMapping[port] = backendAddr.(*net.UDPAddr)
+		proxy, err := NewProxy(&net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: port}, backendAddr)
 		if err != nil {
-			log.Printf("Error connecting to %s:%s: %s", proto, address, err)
-			src.Close()
-			continue
+			mapper.Unmap(port, "udp")
+			return err
 		}
-		utils.Debugf("Connected to backend, splicing")
-		splice(src, dst)
+		mapper.udpProxies[port] = proxy
+		go proxy.Run()
 	}
+	return nil
 }
 
-func halfSplice(dst, src net.Conn) error {
-	_, err := io.Copy(dst, src)
-	// FIXME: on EOF from a tcp connection, pass WriteClose()
-	dst.Close()
-	src.Close()
-	return err
-}
-
-func splice(a, b net.Conn) {
-	go halfSplice(a, b)
-	go halfSplice(b, a)
-}
-
-func (mapper *PortMapper) Unmap(port int) error {
-	dest, ok := mapper.mapping[port]
-	if !ok {
-		return errors.New("Port is not mapped")
-	}
-	if proxy, exists := mapper.proxies[port]; exists {
-		proxy.Close()
-		delete(mapper.proxies, port)
-	}
-	if err := mapper.iptablesForward("-D", port, dest); err != nil {
-		return err
+func (mapper *PortMapper) Unmap(port int, proto string) error {
+	if proto == "tcp" {
+		backendAddr, ok := mapper.tcpMapping[port]
+		if !ok {
+			return fmt.Errorf("Port tcp/%v is not mapped", port)
+		}
+		if proxy, exists := mapper.tcpProxies[port]; exists {
+			proxy.Close()
+			delete(mapper.tcpProxies, port)
+		}
+		if err := mapper.iptablesForward("-D", port, proto, backendAddr.IP.String(), backendAddr.Port); err != nil {
+			return err
+		}
+		delete(mapper.tcpMapping, port)
+	} else {
+		backendAddr, ok := mapper.udpMapping[port]
+		if !ok {
+			return fmt.Errorf("Port udp/%v is not mapped", port)
+		}
+		if proxy, exists := mapper.udpProxies[port]; exists {
+			proxy.Close()
+			delete(mapper.udpProxies, port)
+		}
+		if err := mapper.iptablesForward("-D", port, proto, backendAddr.IP.String(), backendAddr.Port); err != nil {
+			return err
+		}
+		delete(mapper.udpMapping, port)
 	}
-	delete(mapper.mapping, port)
 	return nil
 }
 
@@ -301,9 +300,9 @@ func newPortMapper() (*PortMapper, error) {
 
 // Port allocator: Atomatically allocate and release networking ports
 type PortAllocator struct {
+	sync.Mutex
 	inUse    map[int]struct{}
 	fountain chan (int)
-	lock     sync.Mutex
 }
 
 func (alloc *PortAllocator) runFountain() {
@@ -317,9 +316,9 @@ func (alloc *PortAllocator) runFountain() {
 // FIXME: Release can no longer fail, change its prototype to reflect that.
 func (alloc *PortAllocator) Release(port int) error {
 	utils.Debugf("Releasing %d", port)
-	alloc.lock.Lock()
+	alloc.Lock()
 	delete(alloc.inUse, port)
-	alloc.lock.Unlock()
+	alloc.Unlock()
 	return nil
 }
 
@@ -334,8 +333,8 @@ func (alloc *PortAllocator) Acquire(port int) (int, error) {
 		}
 		return -1, fmt.Errorf("Port generator ended unexpectedly")
 	}
-	alloc.lock.Lock()
-	defer alloc.lock.Unlock()
+	alloc.Lock()
+	defer alloc.Unlock()
 	if _, inUse := alloc.inUse[port]; inUse {
 		return -1, fmt.Errorf("Port already in use: %d", port)
 	}
@@ -453,7 +452,7 @@ type NetworkInterface struct {
 	Gateway net.IP
 
 	manager  *NetworkManager
-	extPorts []int
+	extPorts []*Nat
 }
 
 // Allocate an external TCP port and map it to the interface
@@ -462,17 +461,32 @@ func (iface *NetworkInterface) AllocatePort(spec string) (*Nat, error) {
 	if err != nil {
 		return nil, err
 	}
-	// Allocate a random port if Frontend==0
-	extPort, err := iface.manager.portAllocator.Acquire(nat.Frontend)
-	if err != nil {
-		return nil, err
-	}
-	nat.Frontend = extPort
-	if err := iface.manager.portMapper.Map(nat.Frontend, net.TCPAddr{IP: iface.IPNet.IP, Port: nat.Backend}); err != nil {
-		iface.manager.portAllocator.Release(nat.Frontend)
-		return nil, err
+
+	if nat.Proto == "tcp" {
+		extPort, err := iface.manager.tcpPortAllocator.Acquire(nat.Frontend)
+		if err != nil {
+			return nil, err
+		}
+		backend := &net.TCPAddr{IP: iface.IPNet.IP, Port: nat.Backend}
+		if err := iface.manager.portMapper.Map(extPort, backend); err != nil {
+			iface.manager.tcpPortAllocator.Release(extPort)
+			return nil, err
+		}
+		nat.Frontend = extPort
+	} else {
+		extPort, err := iface.manager.udpPortAllocator.Acquire(nat.Frontend)
+		if err != nil {
+			return nil, err
+		}
+		backend := &net.UDPAddr{IP: iface.IPNet.IP, Port: nat.Backend}
+		if err := iface.manager.portMapper.Map(extPort, backend); err != nil {
+			iface.manager.udpPortAllocator.Release(extPort)
+			return nil, err
+		}
+		nat.Frontend = extPort
 	}
-	iface.extPorts = append(iface.extPorts, nat.Frontend)
+	iface.extPorts = append(iface.extPorts, nat)
+
 	return nat, nil
 }
 
@@ -485,6 +499,21 @@ type Nat struct {
 func parseNat(spec string) (*Nat, error) {
 	var nat Nat
 
+	if strings.Contains(spec, "/") {
+		specParts := strings.Split(spec, "/")
+		if len(specParts) != 2 {
+			return nil, fmt.Errorf("Invalid port format.")
+		}
+		proto := specParts[1]
+		spec = specParts[0]
+		if proto != "tcp" && proto != "udp" {
+			return nil, fmt.Errorf("Invalid port format: unknown protocol %v.", proto)
+		}
+		nat.Proto = proto
+	} else {
+		nat.Proto = "tcp"
+	}
+
 	if strings.Contains(spec, ":") {
 		specParts := strings.Split(spec, ":")
 		if len(specParts) != 2 {
@@ -517,20 +546,24 @@ func parseNat(spec string) (*Nat, error) {
 		}
 		nat.Backend = int(port)
 	}
-	nat.Proto = "tcp"
+
 	return &nat, nil
 }
 
 // Release: Network cleanup - release all resources
 func (iface *NetworkInterface) Release() {
-	for _, port := range iface.extPorts {
-		if err := iface.manager.portMapper.Unmap(port); err != nil {
-			log.Printf("Unable to unmap port %v: %v", port, err)
+	for _, nat := range iface.extPorts {
+		utils.Debugf("Unmaping %v/%v", nat.Proto, nat.Frontend)
+		if err := iface.manager.portMapper.Unmap(nat.Frontend, nat.Proto); err != nil {
+			log.Printf("Unable to unmap port %v/%v: %v", nat.Proto, nat.Frontend, err)
 		}
-		if err := iface.manager.portAllocator.Release(port); err != nil {
-			log.Printf("Unable to release port %v: %v", port, err)
+		if nat.Proto == "tcp" {
+			if err := iface.manager.tcpPortAllocator.Release(nat.Frontend); err != nil {
+				log.Printf("Unable to release port tcp/%v: %v", nat.Frontend, err)
+			}
+		} else if err := iface.manager.udpPortAllocator.Release(nat.Frontend); err != nil {
+			log.Printf("Unable to release port udp/%v: %v", nat.Frontend, err)
 		}
-
 	}
 
 	iface.manager.ipAllocator.Release(iface.IPNet.IP)
@@ -542,9 +575,10 @@ type NetworkManager struct {
 	bridgeIface   string
 	bridgeNetwork *net.IPNet
 
-	ipAllocator   *IPAllocator
-	portAllocator *PortAllocator
-	portMapper    *PortMapper
+	ipAllocator      *IPAllocator
+	tcpPortAllocator *PortAllocator
+	udpPortAllocator *PortAllocator
+	portMapper       *PortMapper
 }
 
 // Allocate a network interface
@@ -577,7 +611,11 @@ func newNetworkManager(bridgeIface string) (*NetworkManager, error) {
 
 	ipAllocator := newIPAllocator(network)
 
-	portAllocator, err := newPortAllocator()
+	tcpPortAllocator, err := newPortAllocator()
+	if err != nil {
+		return nil, err
+	}
+	udpPortAllocator, err := newPortAllocator()
 	if err != nil {
 		return nil, err
 	}
@@ -588,11 +626,12 @@ func newNetworkManager(bridgeIface string) (*NetworkManager, error) {
 	}
 
 	manager := &NetworkManager{
-		bridgeIface:   bridgeIface,
-		bridgeNetwork: network,
-		ipAllocator:   ipAllocator,
-		portAllocator: portAllocator,
-		portMapper:    portMapper,
+		bridgeIface:      bridgeIface,
+		bridgeNetwork:    network,
+		ipAllocator:      ipAllocator,
+		tcpPortAllocator: tcpPortAllocator,
+		udpPortAllocator: udpPortAllocator,
+		portMapper:       portMapper,
 	}
 	return manager, nil
 }

+ 257 - 0
network_proxy.go

@@ -0,0 +1,257 @@
+package docker
+
+import (
+	"encoding/binary"
+	"fmt"
+	"github.com/dotcloud/docker/utils"
+	"io"
+	"log"
+	"net"
+	"sync"
+	"syscall"
+	"time"
+)
+
+const (
+	UDPConnTrackTimeout = 90 * time.Second
+	UDPBufSize          = 2048
+)
+
+type Proxy interface {
+	// Start forwarding traffic back and forth the front and back-end
+	// addresses.
+	Run()
+	// Stop forwarding traffic and close both ends of the Proxy.
+	Close()
+	// Return the address on which the proxy is listening.
+	FrontendAddr() net.Addr
+	// Return the proxied address.
+	BackendAddr() net.Addr
+}
+
+type TCPProxy struct {
+	listener     *net.TCPListener
+	frontendAddr *net.TCPAddr
+	backendAddr  *net.TCPAddr
+}
+
+func NewTCPProxy(frontendAddr, backendAddr *net.TCPAddr) (*TCPProxy, error) {
+	listener, err := net.ListenTCP("tcp", frontendAddr)
+	if err != nil {
+		return nil, err
+	}
+	// If the port in frontendAddr was 0 then ListenTCP will have a picked
+	// a port to listen on, hence the call to Addr to get that actual port:
+	return &TCPProxy{
+		listener:     listener,
+		frontendAddr: listener.Addr().(*net.TCPAddr),
+		backendAddr:  backendAddr,
+	}, nil
+}
+
+func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) {
+	backend, err := net.DialTCP("tcp", nil, proxy.backendAddr)
+	if err != nil {
+		log.Printf("Can't forward traffic to backend tcp/%v: %v\n", proxy.backendAddr, err.Error())
+		client.Close()
+		return
+	}
+
+	event := make(chan int64)
+	var broker = func(to, from *net.TCPConn) {
+		written, err := io.Copy(to, from)
+		if err != nil {
+			err, ok := err.(*net.OpError)
+			// If the socket we are writing to is shutdown with
+			// SHUT_WR, forward it to the other end of the pipe:
+			if ok && err.Err == syscall.EPIPE {
+				from.CloseWrite()
+			}
+		}
+		event <- written
+	}
+	utils.Debugf("Forwarding traffic between tcp/%v and tcp/%v", client.RemoteAddr(), backend.RemoteAddr())
+	go broker(client, backend)
+	go broker(backend, client)
+
+	var transferred int64 = 0
+	for i := 0; i < 2; i++ {
+		select {
+		case written := <-event:
+			transferred += written
+		case <-quit:
+			// Interrupt the two brokers and "join" them.
+			client.Close()
+			backend.Close()
+			for ; i < 2; i++ {
+				transferred += <-event
+			}
+			goto done
+		}
+	}
+	client.Close()
+	backend.Close()
+done:
+	utils.Debugf("%v bytes transferred between tcp/%v and tcp/%v", transferred, client.RemoteAddr(), backend.RemoteAddr())
+}
+
+func (proxy *TCPProxy) Run() {
+	quit := make(chan bool)
+	defer close(quit)
+	utils.Debugf("Starting proxy on tcp/%v for tcp/%v", proxy.frontendAddr, proxy.backendAddr)
+	for {
+		client, err := proxy.listener.Accept()
+		if err != nil {
+			utils.Debugf("Stopping proxy on tcp/%v for tcp/%v (%v)", proxy.frontendAddr, proxy.backendAddr, err.Error())
+			return
+		}
+		go proxy.clientLoop(client.(*net.TCPConn), quit)
+	}
+}
+
+func (proxy *TCPProxy) Close()                 { proxy.listener.Close() }
+func (proxy *TCPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr }
+func (proxy *TCPProxy) BackendAddr() net.Addr  { return proxy.backendAddr }
+
+// A net.Addr where the IP is split into two fields so you can use it as a key
+// in a map:
+type connTrackKey struct {
+	IPHigh uint64
+	IPLow  uint64
+	Port   int
+}
+
+func newConnTrackKey(addr *net.UDPAddr) *connTrackKey {
+	if len(addr.IP) == net.IPv4len {
+		return &connTrackKey{
+			IPHigh: 0,
+			IPLow:  uint64(binary.BigEndian.Uint32(addr.IP)),
+			Port:   addr.Port,
+		}
+	}
+	return &connTrackKey{
+		IPHigh: binary.BigEndian.Uint64(addr.IP[:8]),
+		IPLow:  binary.BigEndian.Uint64(addr.IP[8:]),
+		Port:   addr.Port,
+	}
+}
+
+type connTrackMap map[connTrackKey]*net.UDPConn
+
+type UDPProxy struct {
+	listener       *net.UDPConn
+	frontendAddr   *net.UDPAddr
+	backendAddr    *net.UDPAddr
+	connTrackTable connTrackMap
+	connTrackLock  sync.Mutex
+}
+
+func NewUDPProxy(frontendAddr, backendAddr *net.UDPAddr) (*UDPProxy, error) {
+	listener, err := net.ListenUDP("udp", frontendAddr)
+	if err != nil {
+		return nil, err
+	}
+	return &UDPProxy{
+		listener:       listener,
+		frontendAddr:   listener.LocalAddr().(*net.UDPAddr),
+		backendAddr:    backendAddr,
+		connTrackTable: make(connTrackMap),
+	}, nil
+}
+
+func (proxy *UDPProxy) replyLoop(proxyConn *net.UDPConn, clientAddr *net.UDPAddr, clientKey *connTrackKey) {
+	defer func() {
+		proxy.connTrackLock.Lock()
+		delete(proxy.connTrackTable, *clientKey)
+		proxy.connTrackLock.Unlock()
+		utils.Debugf("Done proxying between udp/%v and udp/%v", clientAddr.String(), proxy.backendAddr.String())
+		proxyConn.Close()
+	}()
+
+	readBuf := make([]byte, UDPBufSize)
+	for {
+		proxyConn.SetReadDeadline(time.Now().Add(UDPConnTrackTimeout))
+	again:
+		read, err := proxyConn.Read(readBuf)
+		if err != nil {
+			if err, ok := err.(*net.OpError); ok && err.Err == syscall.ECONNREFUSED {
+				// This will happen if the last write failed
+				// (e.g: nothing is actually listening on the
+				// proxied port on the container), ignore it
+				// and continue until UDPConnTrackTimeout
+				// expires:
+				goto again
+			}
+			return
+		}
+		for i := 0; i != read; {
+			written, err := proxy.listener.WriteToUDP(readBuf[i:read], clientAddr)
+			if err != nil {
+				return
+			}
+			i += written
+			utils.Debugf("Forwarded %v/%v bytes to udp/%v", i, read, clientAddr.String())
+		}
+	}
+}
+
+func (proxy *UDPProxy) Run() {
+	readBuf := make([]byte, UDPBufSize)
+	utils.Debugf("Starting proxy on udp/%v for udp/%v", proxy.frontendAddr, proxy.backendAddr)
+	for {
+		read, from, err := proxy.listener.ReadFromUDP(readBuf)
+		if err != nil {
+			// NOTE: Apparently ReadFrom doesn't return
+			// ECONNREFUSED like Read do (see comment in
+			// UDPProxy.replyLoop)
+			utils.Debugf("Stopping proxy on udp/%v for udp/%v (%v)", proxy.frontendAddr, proxy.backendAddr, err.Error())
+			break
+		}
+
+		fromKey := newConnTrackKey(from)
+		proxy.connTrackLock.Lock()
+		proxyConn, hit := proxy.connTrackTable[*fromKey]
+		if !hit {
+			proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr)
+			if err != nil {
+				log.Printf("Can't proxy a datagram to udp/%s: %v\n", proxy.backendAddr.String(), err)
+				continue
+			}
+			proxy.connTrackTable[*fromKey] = proxyConn
+			go proxy.replyLoop(proxyConn, from, fromKey)
+		}
+		proxy.connTrackLock.Unlock()
+		for i := 0; i != read; {
+			written, err := proxyConn.Write(readBuf[i:read])
+			if err != nil {
+				log.Printf("Can't proxy a datagram to udp/%s: %v\n", proxy.backendAddr.String(), err)
+				break
+			}
+			i += written
+			utils.Debugf("Forwarded %v/%v bytes to udp/%v", i, read, proxy.backendAddr.String())
+		}
+	}
+}
+
+func (proxy *UDPProxy) Close() {
+	proxy.listener.Close()
+	proxy.connTrackLock.Lock()
+	defer proxy.connTrackLock.Unlock()
+	for _, conn := range proxy.connTrackTable {
+		conn.Close()
+	}
+}
+
+func (proxy *UDPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr }
+func (proxy *UDPProxy) BackendAddr() net.Addr  { return proxy.backendAddr }
+
+func NewProxy(frontendAddr, backendAddr net.Addr) (Proxy, error) {
+	switch frontendAddr.(type) {
+	case *net.UDPAddr:
+		return NewUDPProxy(frontendAddr.(*net.UDPAddr), backendAddr.(*net.UDPAddr))
+	case *net.TCPAddr:
+		return NewTCPProxy(frontendAddr.(*net.TCPAddr), backendAddr.(*net.TCPAddr))
+	default:
+		panic(fmt.Errorf("Unsupported protocol"))
+	}
+}

+ 221 - 0
network_proxy_test.go

@@ -0,0 +1,221 @@
+package docker
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net"
+	"strings"
+	"testing"
+	"time"
+)
+
+var testBuf = []byte("Buffalo buffalo Buffalo buffalo buffalo buffalo Buffalo buffalo")
+var testBufSize = len(testBuf)
+
+type EchoServer interface {
+	Run()
+	Close()
+	LocalAddr() net.Addr
+}
+
+type TCPEchoServer struct {
+	listener net.Listener
+	testCtx  *testing.T
+}
+
+type UDPEchoServer struct {
+	conn    net.PacketConn
+	testCtx *testing.T
+}
+
+func NewEchoServer(t *testing.T, proto, address string) EchoServer {
+	var server EchoServer
+	if strings.HasPrefix(proto, "tcp") {
+		listener, err := net.Listen(proto, address)
+		if err != nil {
+			t.Fatal(err)
+		}
+		server = &TCPEchoServer{listener: listener, testCtx: t}
+	} else {
+		socket, err := net.ListenPacket(proto, address)
+		if err != nil {
+			t.Fatal(err)
+		}
+		server = &UDPEchoServer{conn: socket, testCtx: t}
+	}
+	t.Logf("EchoServer listening on %v/%v\n", proto, server.LocalAddr().String())
+	return server
+}
+
+func (server *TCPEchoServer) Run() {
+	go func() {
+		for {
+			client, err := server.listener.Accept()
+			if err != nil {
+				return
+			}
+			go func(client net.Conn) {
+				server.testCtx.Logf("TCP client accepted on the EchoServer\n")
+				written, err := io.Copy(client, client)
+				server.testCtx.Logf("%v bytes echoed back to the client\n", written)
+				if err != nil {
+					server.testCtx.Logf("can't echo to the client: %v\n", err.Error())
+				}
+				client.Close()
+			}(client)
+		}
+	}()
+}
+
+func (server *TCPEchoServer) LocalAddr() net.Addr { return server.listener.Addr() }
+func (server *TCPEchoServer) Close()              { server.listener.Addr() }
+
+func (server *UDPEchoServer) Run() {
+	go func() {
+		readBuf := make([]byte, 1024)
+		for {
+			read, from, err := server.conn.ReadFrom(readBuf)
+			if err != nil {
+				return
+			}
+			server.testCtx.Logf("Writing UDP datagram back")
+			for i := 0; i != read; {
+				written, err := server.conn.WriteTo(readBuf[i:read], from)
+				if err != nil {
+					break
+				}
+				i += written
+			}
+		}
+	}()
+}
+
+func (server *UDPEchoServer) LocalAddr() net.Addr { return server.conn.LocalAddr() }
+func (server *UDPEchoServer) Close()              { server.conn.Close() }
+
+func testProxyAt(t *testing.T, proto string, proxy Proxy, addr string) {
+	defer proxy.Close()
+	go proxy.Run()
+	client, err := net.Dial(proto, addr)
+	if err != nil {
+		t.Fatalf("Can't connect to the proxy: %v", err)
+	}
+	defer client.Close()
+	client.SetDeadline(time.Now().Add(10 * time.Second))
+	if _, err = client.Write(testBuf); err != nil {
+		t.Fatal(err)
+	}
+	recvBuf := make([]byte, testBufSize)
+	if _, err = client.Read(recvBuf); err != nil {
+		t.Fatal(err)
+	}
+	if !bytes.Equal(testBuf, recvBuf) {
+		t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf))
+	}
+}
+
+func testProxy(t *testing.T, proto string, proxy Proxy) {
+	testProxyAt(t, proto, proxy, proxy.FrontendAddr().String())
+}
+
+func TestTCP4Proxy(t *testing.T) {
+	backend := NewEchoServer(t, "tcp", "127.0.0.1:0")
+	defer backend.Close()
+	backend.Run()
+	frontendAddr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
+	proxy, err := NewProxy(frontendAddr, backend.LocalAddr())
+	if err != nil {
+		t.Fatal(err)
+	}
+	testProxy(t, "tcp", proxy)
+}
+
+func TestTCP6Proxy(t *testing.T) {
+	backend := NewEchoServer(t, "tcp", "[::1]:0")
+	defer backend.Close()
+	backend.Run()
+	frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0}
+	proxy, err := NewProxy(frontendAddr, backend.LocalAddr())
+	if err != nil {
+		t.Fatal(err)
+	}
+	testProxy(t, "tcp", proxy)
+}
+
+func TestTCPDualStackProxy(t *testing.T) {
+	// If I understand `godoc -src net favoriteAddrFamily` (used by the
+	// net.Listen* functions) correctly this should work, but it doesn't.
+	t.Skip("No support for dual stack yet")
+	backend := NewEchoServer(t, "tcp", "[::1]:0")
+	defer backend.Close()
+	backend.Run()
+	frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0}
+	proxy, err := NewProxy(frontendAddr, backend.LocalAddr())
+	if err != nil {
+		t.Fatal(err)
+	}
+	ipv4ProxyAddr := &net.TCPAddr{
+		IP:   net.IPv4(127, 0, 0, 1),
+		Port: proxy.FrontendAddr().(*net.TCPAddr).Port,
+	}
+	testProxyAt(t, "tcp", proxy, ipv4ProxyAddr.String())
+}
+
+func TestUDP4Proxy(t *testing.T) {
+	backend := NewEchoServer(t, "udp", "127.0.0.1:0")
+	defer backend.Close()
+	backend.Run()
+	frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
+	proxy, err := NewProxy(frontendAddr, backend.LocalAddr())
+	if err != nil {
+		t.Fatal(err)
+	}
+	testProxy(t, "udp", proxy)
+}
+
+func TestUDP6Proxy(t *testing.T) {
+	backend := NewEchoServer(t, "udp", "[::1]:0")
+	defer backend.Close()
+	backend.Run()
+	frontendAddr := &net.UDPAddr{IP: net.IPv6loopback, Port: 0}
+	proxy, err := NewProxy(frontendAddr, backend.LocalAddr())
+	if err != nil {
+		t.Fatal(err)
+	}
+	testProxy(t, "udp", proxy)
+}
+
+func TestUDPWriteError(t *testing.T) {
+	frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
+	// Hopefully, this port will be free: */
+	backendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 25587}
+	proxy, err := NewProxy(frontendAddr, backendAddr)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer proxy.Close()
+	go proxy.Run()
+	client, err := net.Dial("udp", "127.0.0.1:25587")
+	if err != nil {
+		t.Fatalf("Can't connect to the proxy: %v", err)
+	}
+	defer client.Close()
+	// Make sure the proxy doesn't stop when there is no actual backend:
+	client.Write(testBuf)
+	client.Write(testBuf)
+	backend := NewEchoServer(t, "udp", "127.0.0.1:25587")
+	defer backend.Close()
+	backend.Run()
+	client.SetDeadline(time.Now().Add(10 * time.Second))
+	if _, err = client.Write(testBuf); err != nil {
+		t.Fatal(err)
+	}
+	recvBuf := make([]byte, testBufSize)
+	if _, err = client.Read(recvBuf); err != nil {
+		t.Fatal(err)
+	}
+	if !bytes.Equal(testBuf, recvBuf) {
+		t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf))
+	}
+}

+ 75 - 6
network_test.go

@@ -20,28 +20,97 @@ func TestIptables(t *testing.T) {
 
 func TestParseNat(t *testing.T) {
 	if nat, err := parseNat("4500"); err == nil {
-		if nat.Frontend != 0 || nat.Backend != 4500 {
-			t.Errorf("-p 4500 should produce 0->4500, got %d->%d", nat.Frontend, nat.Backend)
+		if nat.Frontend != 0 || nat.Backend != 4500 || nat.Proto != "tcp" {
+			t.Errorf("-p 4500 should produce 0->4500/tcp, got %d->%d/%s",
+				nat.Frontend, nat.Backend, nat.Proto)
 		}
 	} else {
 		t.Fatal(err)
 	}
 
 	if nat, err := parseNat(":4501"); err == nil {
-		if nat.Frontend != 4501 || nat.Backend != 4501 {
-			t.Errorf("-p :4501 should produce 4501->4501, got %d->%d", nat.Frontend, nat.Backend)
+		if nat.Frontend != 4501 || nat.Backend != 4501 || nat.Proto != "tcp" {
+			t.Errorf("-p :4501 should produce 4501->4501/tcp, got %d->%d/%s",
+				nat.Frontend, nat.Backend, nat.Proto)
 		}
 	} else {
 		t.Fatal(err)
 	}
 
 	if nat, err := parseNat("4502:4503"); err == nil {
-		if nat.Frontend != 4502 || nat.Backend != 4503 {
-			t.Errorf("-p 4502:4503 should produce 4502->4503, got %d->%d", nat.Frontend, nat.Backend)
+		if nat.Frontend != 4502 || nat.Backend != 4503 || nat.Proto != "tcp" {
+			t.Errorf("-p 4502:4503 should produce 4502->4503/tcp, got %d->%d/%s",
+				nat.Frontend, nat.Backend, nat.Proto)
 		}
 	} else {
 		t.Fatal(err)
 	}
+
+	if nat, err := parseNat("4502:4503/tcp"); err == nil {
+		if nat.Frontend != 4502 || nat.Backend != 4503 || nat.Proto != "tcp" {
+			t.Errorf("-p 4502:4503/tcp should produce 4502->4503/tcp, got %d->%d/%s",
+				nat.Frontend, nat.Backend, nat.Proto)
+		}
+	} else {
+		t.Fatal(err)
+	}
+
+	if nat, err := parseNat("4502:4503/udp"); err == nil {
+		if nat.Frontend != 4502 || nat.Backend != 4503 || nat.Proto != "udp" {
+			t.Errorf("-p 4502:4503/udp should produce 4502->4503/udp, got %d->%d/%s",
+				nat.Frontend, nat.Backend, nat.Proto)
+		}
+	} else {
+		t.Fatal(err)
+	}
+
+	if nat, err := parseNat(":4503/udp"); err == nil {
+		if nat.Frontend != 4503 || nat.Backend != 4503 || nat.Proto != "udp" {
+			t.Errorf("-p :4503/udp should produce 4503->4503/udp, got %d->%d/%s",
+				nat.Frontend, nat.Backend, nat.Proto)
+		}
+	} else {
+		t.Fatal(err)
+	}
+
+	if nat, err := parseNat(":4503/tcp"); err == nil {
+		if nat.Frontend != 4503 || nat.Backend != 4503 || nat.Proto != "tcp" {
+			t.Errorf("-p :4503/tcp should produce 4503->4503/tcp, got %d->%d/%s",
+				nat.Frontend, nat.Backend, nat.Proto)
+		}
+	} else {
+		t.Fatal(err)
+	}
+
+	if nat, err := parseNat("4503/tcp"); err == nil {
+		if nat.Frontend != 0 || nat.Backend != 4503 || nat.Proto != "tcp" {
+			t.Errorf("-p 4503/tcp should produce 0->4503/tcp, got %d->%d/%s",
+				nat.Frontend, nat.Backend, nat.Proto)
+		}
+	} else {
+		t.Fatal(err)
+	}
+
+	if nat, err := parseNat("4503/udp"); err == nil {
+		if nat.Frontend != 0 || nat.Backend != 4503 || nat.Proto != "udp" {
+			t.Errorf("-p 4503/udp should produce 0->4503/udp, got %d->%d/%s",
+				nat.Frontend, nat.Backend, nat.Proto)
+		}
+	} else {
+		t.Fatal(err)
+	}
+
+	if _, err := parseNat("4503/tcpgarbage"); err == nil {
+		t.Fatal(err)
+	}
+
+	if _, err := parseNat("4503/tcp/udp"); err == nil {
+		t.Fatal(err)
+	}
+
+	if _, err := parseNat("4503/"); err == nil {
+		t.Fatal(err)
+	}
 }
 
 func TestPortAllocation(t *testing.T) {

+ 1 - 1
packaging/ubuntu/lxc-docker.prerm

@@ -1,4 +1,4 @@
 #!/bin/sh
 
 # Stop docker
-/sbin/stop docker
+if [ "`pgrep -f '/usr/bin/docker -d'`" != "" ]; then /sbin/stop docker; fi

+ 113 - 61
registry/registry.go

@@ -12,11 +12,91 @@ import (
 	"net/http"
 	"net/http/cookiejar"
 	"net/url"
+	"regexp"
 	"strconv"
 	"strings"
 )
 
 var ErrAlreadyExists = errors.New("Image already exists")
+var ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")")
+
+func pingRegistryEndpoint(endpoint string) error {
+	if endpoint == auth.IndexServerAddress() {
+		// Skip the check, we now this one is valid
+		// (and we never want to fallback to http in case of error)
+		return nil
+	}
+	resp, err := http.Get(endpoint + "_ping")
+	if err != nil {
+		return err
+	}
+	if resp.Header.Get("X-Docker-Registry-Version") == "" {
+		return errors.New("This does not look like a Registry server (\"X-Docker-Registry-Version\" header not found in the response)")
+	}
+	return nil
+}
+
+func validateRepositoryName(repositoryName string) error {
+	var (
+		namespace string
+		name      string
+	)
+	nameParts := strings.SplitN(repositoryName, "/", 2)
+	if len(nameParts) < 2 {
+		namespace = "library"
+		name = nameParts[0]
+	} else {
+		namespace = nameParts[0]
+		name = nameParts[1]
+	}
+	validNamespace := regexp.MustCompile(`^([a-z0-9_]{4,30})$`)
+	if !validNamespace.MatchString(namespace) {
+		return fmt.Errorf("Invalid namespace name (%s), only [a-z0-9_] are allowed, size between 4 and 30", namespace)
+	}
+	validRepo := regexp.MustCompile(`^([a-zA-Z0-9-_.]+)$`)
+	if !validRepo.MatchString(name) {
+		return fmt.Errorf("Invalid repository name (%s), only [a-zA-Z0-9-_.] are allowed", name)
+	}
+	return nil
+}
+
+// Resolves a repository name to a endpoint + name
+func ResolveRepositoryName(reposName string) (string, string, error) {
+	if strings.Contains(reposName, "://") {
+		// It cannot contain a scheme!
+		return "", "", ErrInvalidRepositoryName
+	}
+	nameParts := strings.SplitN(reposName, "/", 2)
+	if !strings.Contains(nameParts[0], ".") && !strings.Contains(nameParts[0], ":") {
+		// This is a Docker Index repos (ex: samalba/hipache or ubuntu)
+		err := validateRepositoryName(reposName)
+		return auth.IndexServerAddress(), reposName, err
+	}
+	if len(nameParts) < 2 {
+		// There is a dot in repos name (and no registry address)
+		// Is it a Registry address without repos name?
+		return "", "", ErrInvalidRepositoryName
+	}
+	hostname := nameParts[0]
+	reposName = nameParts[1]
+	if strings.Contains(hostname, "index.docker.io") {
+		return "", "", fmt.Errorf("Invalid repository name, try \"%s\" instead", reposName)
+	}
+	if err := validateRepositoryName(reposName); err != nil {
+		return "", "", err
+	}
+	endpoint := fmt.Sprintf("https://%s/v1/", hostname)
+	if err := pingRegistryEndpoint(endpoint); err != nil {
+		utils.Debugf("Registry %s does not work (%s), falling back to http", endpoint, err)
+		endpoint = fmt.Sprintf("http://%s/v1/", hostname)
+		if err = pingRegistryEndpoint(endpoint); err != nil {
+			//TODO: triggering highland build can be done there without "failing"
+			return "", "", errors.New("Invalid Registry endpoint: " + err.Error())
+		}
+	}
+	err := validateRepositoryName(reposName)
+	return endpoint, reposName, err
+}
 
 func doWithCookies(c *http.Client, req *http.Request) (*http.Response, error) {
 	for _, cookie := range c.Jar.Cookies(req.URL) {
@@ -27,8 +107,8 @@ func doWithCookies(c *http.Client, req *http.Request) (*http.Response, error) {
 
 // Retrieve the history of a given image from the Registry.
 // Return a list of the parent's json (requested image included)
-func (r *Registry) GetRemoteHistory(imgId, registry string, token []string) ([]string, error) {
-	req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/ancestry", nil)
+func (r *Registry) GetRemoteHistory(imgID, registry string, token []string) ([]string, error) {
+	req, err := http.NewRequest("GET", registry+"images/"+imgID+"/ancestry", nil)
 	if err != nil {
 		return nil, err
 	}
@@ -36,7 +116,7 @@ func (r *Registry) GetRemoteHistory(imgId, registry string, token []string) ([]s
 	res, err := r.client.Do(req)
 	if err != nil || res.StatusCode != 200 {
 		if res != nil {
-			return nil, fmt.Errorf("Internal server error: %d trying to fetch remote history for %s", res.StatusCode, imgId)
+			return nil, fmt.Errorf("Internal server error: %d trying to fetch remote history for %s", res.StatusCode, imgID)
 		}
 		return nil, err
 	}
@@ -56,60 +136,25 @@ func (r *Registry) GetRemoteHistory(imgId, registry string, token []string) ([]s
 }
 
 // Check if an image exists in the Registry
-func (r *Registry) LookupRemoteImage(imgId, registry string, authConfig *auth.AuthConfig) bool {
+func (r *Registry) LookupRemoteImage(imgID, registry string, token []string) bool {
 	rt := &http.Transport{Proxy: http.ProxyFromEnvironment}
 
-	req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/json", nil)
+	req, err := http.NewRequest("GET", registry+"images/"+imgID+"/json", nil)
 	if err != nil {
 		return false
 	}
-	req.SetBasicAuth(authConfig.Username, authConfig.Password)
 	res, err := rt.RoundTrip(req)
 	if err != nil {
 		return false
 	}
 	res.Body.Close()
-	return res.StatusCode == 307
-}
-
-func (r *Registry) getImagesInRepository(repository string, authConfig *auth.AuthConfig) ([]map[string]string, error) {
-	u := auth.IndexServerAddress() + "/repositories/" + repository + "/images"
-	req, err := http.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, err
-	}
-	if authConfig != nil && len(authConfig.Username) > 0 {
-		req.SetBasicAuth(authConfig.Username, authConfig.Password)
-	}
-	res, err := r.client.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	defer res.Body.Close()
-
-	// Repository doesn't exist yet
-	if res.StatusCode == 404 {
-		return nil, nil
-	}
-
-	jsonData, err := ioutil.ReadAll(res.Body)
-	if err != nil {
-		return nil, err
-	}
-
-	imageList := []map[string]string{}
-	if err := json.Unmarshal(jsonData, &imageList); err != nil {
-		utils.Debugf("Body: %s (%s)\n", res.Body, u)
-		return nil, err
-	}
-
-	return imageList, nil
+	return res.StatusCode == 200
 }
 
 // Retrieve an image from the Registry.
-func (r *Registry) GetRemoteImageJSON(imgId, registry string, token []string) ([]byte, int, error) {
+func (r *Registry) GetRemoteImageJSON(imgID, registry string, token []string) ([]byte, int, error) {
 	// Get the JSON
-	req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/json", nil)
+	req, err := http.NewRequest("GET", registry+"images/"+imgID+"/json", nil)
 	if err != nil {
 		return nil, -1, fmt.Errorf("Failed to download json: %s", err)
 	}
@@ -135,8 +180,8 @@ func (r *Registry) GetRemoteImageJSON(imgId, registry string, token []string) ([
 	return jsonString, imageSize, nil
 }
 
-func (r *Registry) GetRemoteImageLayer(imgId, registry string, token []string) (io.ReadCloser, error) {
-	req, err := http.NewRequest("GET", registry+"/images/"+imgId+"/layer", nil)
+func (r *Registry) GetRemoteImageLayer(imgID, registry string, token []string) (io.ReadCloser, error) {
+	req, err := http.NewRequest("GET", registry+"images/"+imgID+"/layer", nil)
 	if err != nil {
 		return nil, fmt.Errorf("Error while getting from the server: %s\n", err)
 	}
@@ -155,7 +200,7 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [
 		repository = "library/" + repository
 	}
 	for _, host := range registries {
-		endpoint := fmt.Sprintf("https://%s/v1/repositories/%s/tags", host, repository)
+		endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository)
 		req, err := r.opaqueRequest("GET", endpoint, nil)
 		if err != nil {
 			return nil, err
@@ -165,6 +210,7 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [
 		if err != nil {
 			return nil, err
 		}
+
 		utils.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
 		defer res.Body.Close()
 
@@ -187,8 +233,8 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [
 	return nil, fmt.Errorf("Could not reach any registry endpoint")
 }
 
-func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) {
-	repositoryTarget := auth.IndexServerAddress() + "/repositories/" + remote + "/images"
+func (r *Registry) GetRepositoryData(indexEp, remote string) (*RepositoryData, error) {
+	repositoryTarget := fmt.Sprintf("%srepositories/%s/images", indexEp, remote)
 
 	req, err := r.opaqueRequest("GET", repositoryTarget, nil)
 	if err != nil {
@@ -219,8 +265,12 @@ func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) {
 	}
 
 	var endpoints []string
+	var urlScheme = indexEp[:strings.Index(indexEp, ":")]
 	if res.Header.Get("X-Docker-Endpoints") != "" {
-		endpoints = res.Header["X-Docker-Endpoints"]
+		// The Registry's URL scheme has to match the Index'
+		for _, ep := range res.Header["X-Docker-Endpoints"] {
+			endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, ep))
+		}
 	} else {
 		return nil, fmt.Errorf("Index response didn't contain any endpoints")
 	}
@@ -249,9 +299,8 @@ func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) {
 
 // Push a local image to the registry
 func (r *Registry) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string, token []string) error {
-	registry = "https://" + registry + "/v1"
 	// FIXME: try json with UTF8
-	req, err := http.NewRequest("PUT", registry+"/images/"+imgData.ID+"/json", strings.NewReader(string(jsonRaw)))
+	req, err := http.NewRequest("PUT", registry+"images/"+imgData.ID+"/json", strings.NewReader(string(jsonRaw)))
 	if err != nil {
 		return err
 	}
@@ -284,9 +333,8 @@ func (r *Registry) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regis
 	return nil
 }
 
-func (r *Registry) PushImageLayerRegistry(imgId string, layer io.Reader, registry string, token []string) error {
-	registry = "https://" + registry + "/v1"
-	req, err := http.NewRequest("PUT", registry+"/images/"+imgId+"/layer", layer)
+func (r *Registry) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, token []string) error {
+	req, err := http.NewRequest("PUT", registry+"images/"+imgID+"/layer", layer)
 	if err != nil {
 		return err
 	}
@@ -323,9 +371,8 @@ func (r *Registry) opaqueRequest(method, urlStr string, body io.Reader) (*http.R
 func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token []string) error {
 	// "jsonify" the string
 	revision = "\"" + revision + "\""
-	registry = "https://" + registry + "/v1"
 
-	req, err := r.opaqueRequest("PUT", registry+"/repositories/"+remote+"/tags/"+tag, strings.NewReader(revision))
+	req, err := r.opaqueRequest("PUT", registry+"repositories/"+remote+"/tags/"+tag, strings.NewReader(revision))
 	if err != nil {
 		return err
 	}
@@ -343,7 +390,7 @@ func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token
 	return nil
 }
 
-func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) {
+func (r *Registry) PushImageJSONIndex(indexEp, remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) {
 	imgListJSON, err := json.Marshal(imgList)
 	if err != nil {
 		return nil, err
@@ -353,9 +400,10 @@ func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validat
 		suffix = "images"
 	}
 
+	u := fmt.Sprintf("%srepositories/%s/%s", indexEp, remote, suffix)
+	utils.Debugf("PUT %s", u)
 	utils.Debugf("Image list pushed to index:\n%s\n", imgListJSON)
-
-	req, err := r.opaqueRequest("PUT", auth.IndexServerAddress()+"/repositories/"+remote+"/"+suffix, bytes.NewReader(imgListJSON))
+	req, err := r.opaqueRequest("PUT", u, bytes.NewReader(imgListJSON))
 	if err != nil {
 		return nil, err
 	}
@@ -393,6 +441,7 @@ func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validat
 	}
 
 	var tokens, endpoints []string
+	var urlScheme = indexEp[:strings.Index(indexEp, ":")]
 	if !validate {
 		if res.StatusCode != 200 && res.StatusCode != 201 {
 			errBody, err := ioutil.ReadAll(res.Body)
@@ -409,7 +458,10 @@ func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validat
 		}
 
 		if res.Header.Get("X-Docker-Endpoints") != "" {
-			endpoints = res.Header["X-Docker-Endpoints"]
+			// The Registry's URL scheme has to match the Index'
+			for _, ep := range res.Header["X-Docker-Endpoints"] {
+				endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, ep))
+			}
 		} else {
 			return nil, fmt.Errorf("Index response didn't contain any endpoints")
 		}
@@ -431,7 +483,7 @@ func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validat
 }
 
 func (r *Registry) SearchRepositories(term string) (*SearchResults, error) {
-	u := auth.IndexServerAddress() + "/search?q=" + url.QueryEscape(term)
+	u := auth.IndexServerAddress() + "search?q=" + url.QueryEscape(term)
 	req, err := http.NewRequest("GET", u, nil)
 	if err != nil {
 		return nil, err

+ 0 - 3
runtime.go

@@ -108,9 +108,6 @@ func (runtime *Runtime) Register(container *Container) error {
 	// init the wait lock
 	container.waitLock = make(chan struct{})
 
-	// Even if not running, we init the lock (prevents races in start/stop/kill)
-	container.State.initLock()
-
 	container.runtime = runtime
 
 	// Attach to stdout and stderr

+ 133 - 65
runtime_test.go

@@ -1,6 +1,7 @@
 package docker
 
 import (
+	"bytes"
 	"fmt"
 	"github.com/dotcloud/docker/utils"
 	"io"
@@ -17,11 +18,12 @@ import (
 )
 
 const (
-	unitTestImageName = "docker-ut"
-	unitTestImageId   = "e9aa60c60128cad1"
-	unitTestStoreBase = "/var/lib/docker/unit-tests"
-	testDaemonAddr    = "127.0.0.1:4270"
-	testDaemonProto   = "tcp"
+	unitTestImageName	= "docker-test-image"
+	unitTestImageID		= "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0
+	unitTestNetworkBridge	= "testdockbr0"
+	unitTestStoreBase	= "/var/lib/docker/unit-tests"
+	testDaemonAddr		= "127.0.0.1:4270"
+	testDaemonProto		= "tcp"
 )
 
 var globalRuntime *Runtime
@@ -49,7 +51,7 @@ func cleanup(runtime *Runtime) error {
 		return err
 	}
 	for _, image := range images {
-		if image.ID != unitTestImageId {
+		if image.ID != unitTestImageID {
 			runtime.graph.Delete(image.ID)
 		}
 	}
@@ -73,10 +75,10 @@ func init() {
 	}
 
 	if uid := syscall.Geteuid(); uid != 0 {
-		log.Fatal("docker tests needs to be run as root")
+		log.Fatal("docker tests need to be run as root")
 	}
 
-	NetworkBridgeIface = "testdockbr0"
+	NetworkBridgeIface = unitTestNetworkBridge
 
 	// Make it our Store root
 	runtime, err := NewRuntimeFromDirectory(unitTestStoreBase, false)
@@ -89,15 +91,16 @@ func init() {
 	srv := &Server{
 		runtime:     runtime,
 		enableCors:  false,
-		lock:        &sync.Mutex{},
 		pullingPool: make(map[string]struct{}),
 		pushingPool: make(map[string]struct{}),
 	}
-	// Retrieve the Image
-	if err := srv.ImagePull(unitTestImageName, "", "", os.Stdout, utils.NewStreamFormatter(false), nil); err != nil {
-		panic(err)
+	// If the unit test is not found, try to download it.
+	if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID {
+		// Retrieve the Image
+		if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil); err != nil {
+			panic(err)
+		}
 	}
-
 	// Spawn a Daemon
 	go func() {
 		if err := ListenAndServe(testDaemonProto, testDaemonAddr, srv, os.Getenv("DEBUG") != ""); err != nil {
@@ -135,10 +138,13 @@ func GetTestImage(runtime *Runtime) *Image {
 	imgs, err := runtime.graph.All()
 	if err != nil {
 		panic(err)
-	} else if len(imgs) < 1 {
-		panic("GASP")
 	}
-	return imgs[0]
+	for i := range imgs {
+		if imgs[i].ID == unitTestImageID {
+			return imgs[i]
+		}
+	}
+	panic(fmt.Errorf("Test image %v not found", unitTestImageID))
 }
 
 func TestRuntimeCreate(t *testing.T) {
@@ -316,82 +322,144 @@ func TestGet(t *testing.T) {
 
 }
 
-func findAvailalblePort(runtime *Runtime, port int) (*Container, error) {
-	strPort := strconv.Itoa(port)
-	container, err := NewBuilder(runtime).Create(&Config{
-		Image:     GetTestImage(runtime).ID,
-		Cmd:       []string{"sh", "-c", "echo well hello there | nc -l -p " + strPort},
-		PortSpecs: []string{strPort},
-	},
-	)
-	if err != nil {
-		return nil, err
-	}
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
-		if strings.Contains(err.Error(), "address already in use") {
-			return nil, nil
-		}
-		return nil, err
-	}
-	return container, nil
-}
-
-// Run a container with a TCP port allocated, and test that it can receive connections on localhost
-func TestAllocatePortLocalhost(t *testing.T) {
+func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, string) {
 	runtime, err := newTestRuntime()
 	if err != nil {
 		t.Fatal(err)
 	}
-	port := 5554
 
+	port := 5554
 	var container *Container
+	var strPort string
 	for {
 		port += 1
-		log.Println("Trying port", port)
-		t.Log("Trying port", port)
-		container, err = findAvailalblePort(runtime, port)
+		strPort = strconv.Itoa(port)
+		var cmd string
+		if proto == "tcp" {
+			cmd = "socat TCP-LISTEN:" + strPort + ",reuseaddr,fork EXEC:/bin/cat"
+		} else if proto == "udp" {
+			cmd = "socat UDP-RECVFROM:" + strPort + ",fork EXEC:/bin/cat"
+		} else {
+			t.Fatal(fmt.Errorf("Unknown protocol %v", proto))
+		}
+		t.Log("Trying port", strPort)
+		container, err = NewBuilder(runtime).Create(&Config{
+			Image:     GetTestImage(runtime).ID,
+			Cmd:       []string{"sh", "-c", cmd},
+			PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)},
+		})
 		if container != nil {
 			break
 		}
 		if err != nil {
+			nuke(runtime)
 			t.Fatal(err)
 		}
-		log.Println("Port", port, "already in use")
-		t.Log("Port", port, "already in use")
+		t.Logf("Port %v already in use", strPort)
 	}
 
-	defer container.Kill()
+	hostConfig := &HostConfig{}
+	if err := container.Start(hostConfig); err != nil {
+		nuke(runtime)
+		t.Fatal(err)
+	}
 
 	setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
-		for {
-			if container.State.Running {
-				break
-			}
+		for !container.State.Running {
 			time.Sleep(10 * time.Millisecond)
 		}
 	})
 
-	conn, err := net.Dial("tcp",
-		fmt.Sprintf(
-			"localhost:%s", container.NetworkSettings.PortMapping[strconv.Itoa(port)],
-		),
-	)
-	if err != nil {
-		t.Fatal(err)
+	// Even if the state is running, lets give some time to lxc to spawn the process
+	container.WaitTimeout(500 * time.Millisecond)
+
+	strPort = container.NetworkSettings.PortMapping[strings.Title(proto)][strPort]
+	return runtime, container, strPort
+}
+
+// Run a container with a TCP port allocated, and test that it can receive connections on localhost
+func TestAllocateTCPPortLocalhost(t *testing.T) {
+	runtime, container, port := startEchoServerContainer(t, "tcp")
+	defer nuke(runtime)
+	defer container.Kill()
+
+	for i := 0; i != 10; i++ {
+		conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", port))
+		if err != nil {
+			t.Fatal(err)
+		}
+		defer conn.Close()
+
+		input := bytes.NewBufferString("well hello there\n")
+		_, err = conn.Write(input.Bytes())
+		if err != nil {
+			t.Fatal(err)
+		}
+		buf := make([]byte, 16)
+		read := 0
+		conn.SetReadDeadline(time.Now().Add(3 * time.Second))
+		read, err = conn.Read(buf)
+		if err != nil {
+			if err, ok := err.(*net.OpError); ok {
+				if err.Err == syscall.ECONNRESET {
+					t.Logf("Connection reset by the proxy, socat is probably not listening yet, trying again in a sec")
+					conn.Close()
+					time.Sleep(time.Second)
+					continue
+				}
+				if err.Timeout() {
+					t.Log("Timeout, trying again")
+					conn.Close()
+					continue
+				}
+			}
+			t.Fatal(err)
+		}
+		output := string(buf[:read])
+		if !strings.Contains(output, "well hello there") {
+			t.Fatal(fmt.Errorf("[%v] doesn't contain [well hello there]", output))
+		} else {
+			return
+		}
 	}
-	defer conn.Close()
-	output, err := ioutil.ReadAll(conn)
+
+	t.Fatal("No reply from the container")
+}
+
+// Run a container with an UDP port allocated, and test that it can receive connections on localhost
+func TestAllocateUDPPortLocalhost(t *testing.T) {
+	runtime, container, port := startEchoServerContainer(t, "udp")
+	defer nuke(runtime)
+	defer container.Kill()
+
+	conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port))
 	if err != nil {
 		t.Fatal(err)
 	}
-	if string(output) != "well hello there\n" {
-		t.Fatalf("Received wrong output from network connection: should be '%s', not '%s'",
-			"well hello there\n",
-			string(output),
-		)
+	defer conn.Close()
+
+	input := bytes.NewBufferString("well hello there\n")
+	buf := make([]byte, 16)
+	// Try for a minute, for some reason the select in socat may take ages
+	// to return even though everything on the path seems fine (i.e: the
+	// UDPProxy forwards the traffic correctly and you can see the packets
+	// on the interface from within the container).
+	for i := 0; i != 120; i++ {
+		_, err := conn.Write(input.Bytes())
+		if err != nil {
+			t.Fatal(err)
+		}
+		conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond))
+		read, err := conn.Read(buf)
+		if err == nil {
+			output := string(buf[:read])
+			if strings.Contains(output, "well hello there") {
+				return
+			}
+		}
 	}
-	container.Wait()
+
+	t.Fatal("No reply from the container")
 }
 
 func TestRestore(t *testing.T) {

+ 107 - 79
server.go

@@ -31,7 +31,7 @@ func (srv *Server) DockerVersion() APIVersion {
 func (srv *Server) ContainerKill(name string) error {
 	if container := srv.runtime.Get(name); container != nil {
 		if err := container.Kill(); err != nil {
-			return fmt.Errorf("Error restarting container %s: %s", name, err.Error())
+			return fmt.Errorf("Error restarting container %s: %s", name, err)
 		}
 	} else {
 		return fmt.Errorf("No such container: %s", name)
@@ -351,8 +351,8 @@ func (srv *Server) ContainerTag(name, repo, tag string, force bool) error {
 	return nil
 }
 
-func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgId, endpoint string, token []string, sf *utils.StreamFormatter) error {
-	history, err := r.GetRemoteHistory(imgId, endpoint, token)
+func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error {
+	history, err := r.GetRemoteHistory(imgID, endpoint, token)
 	if err != nil {
 		return err
 	}
@@ -387,9 +387,10 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgId, endpoin
 	return nil
 }
 
-func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, local, remote, askedTag string, sf *utils.StreamFormatter) error {
-	out.Write(sf.FormatStatus("Pulling repository %s from %s", local, auth.IndexServerAddress()))
-	repoData, err := r.GetRepositoryData(remote)
+func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName, remoteName, askedTag, indexEp string, sf *utils.StreamFormatter) error {
+	out.Write(sf.FormatStatus("Pulling repository %s", localName))
+
+	repoData, err := r.GetRepositoryData(indexEp, remoteName)
 	if err != nil {
 		return err
 	}
@@ -401,12 +402,22 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, local, re
 	}
 
 	utils.Debugf("Retrieving the tag list")
-	tagsList, err := r.GetRemoteTags(repoData.Endpoints, remote, repoData.Tokens)
+	tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens)
 	if err != nil {
+		utils.Debugf("%v", err)
 		return err
 	}
+
+	for tag, id := range tagsList {
+		repoData.ImgList[id] = &registry.ImgData{
+			ID:       id,
+			Tag:      tag,
+			Checksum: "",
+		}
+	}
+
 	utils.Debugf("Registering tags")
-	// If not specific tag have been asked, take all
+	// If no tag has been specified, pull them all
 	if askedTag == "" {
 		for tag, id := range tagsList {
 			repoData.ImgList[id].Tag = tag
@@ -415,7 +426,7 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, local, re
 		// Otherwise, check that the tag exists and use only that one
 		id, exists := tagsList[askedTag]
 		if !exists {
-			return fmt.Errorf("Tag %s not found in repositoy %s", askedTag, local)
+			return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName)
 		}
 		repoData.ImgList[id].Tag = askedTag
 	}
@@ -425,10 +436,15 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, local, re
 			utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
 			continue
 		}
-		out.Write(sf.FormatStatus("Pulling image %s (%s) from %s", img.ID, img.Tag, remote))
+
+		if img.Tag == "" {
+			utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
+			continue
+		}
+		out.Write(sf.FormatStatus("Pulling image %s (%s) from %s", img.ID, img.Tag, localName))
 		success := false
 		for _, ep := range repoData.Endpoints {
-			if err := srv.pullImage(r, out, img.ID, "https://"+ep+"/v1", repoData.Tokens, sf); err != nil {
+			if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
 				out.Write(sf.FormatStatus("Error while retrieving image for tag: %s (%s); checking next endpoint", askedTag, err))
 				continue
 			}
@@ -443,7 +459,7 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, local, re
 		if askedTag != "" && tag != askedTag {
 			continue
 		}
-		if err := srv.runtime.repositories.Set(local, tag, id, true); err != nil {
+		if err := srv.runtime.repositories.Set(localName, tag, id, true); err != nil {
 			return err
 		}
 	}
@@ -455,8 +471,8 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, local, re
 }
 
 func (srv *Server) poolAdd(kind, key string) error {
-	srv.lock.Lock()
-	defer srv.lock.Unlock()
+	srv.Lock()
+	defer srv.Unlock()
 
 	if _, exists := srv.pullingPool[key]; exists {
 		return fmt.Errorf("%s %s is already in progress", key, kind)
@@ -489,31 +505,36 @@ func (srv *Server) poolRemove(kind, key string) error {
 	return nil
 }
 
-func (srv *Server) ImagePull(name, tag, endpoint string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig) error {
+func (srv *Server) ImagePull(localName string, tag string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig) error {
 	r, err := registry.NewRegistry(srv.runtime.root, authConfig)
 	if err != nil {
 		return err
 	}
-	if err := srv.poolAdd("pull", name+":"+tag); err != nil {
+	if err := srv.poolAdd("pull", localName+":"+tag); err != nil {
+		return err
+	}
+	defer srv.poolRemove("pull", localName+":"+tag)
+
+	// Resolve the Repository name from fqn to endpoint + name
+	endpoint, remoteName, err := registry.ResolveRepositoryName(localName)
+	if err != nil {
 		return err
 	}
-	defer srv.poolRemove("pull", name+":"+tag)
+
+	if endpoint == auth.IndexServerAddress() {
+		// If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
+		localName = remoteName
+	}
 
 	out = utils.NewWriteFlusher(out)
-	if endpoint != "" {
-		if err := srv.pullImage(r, out, name, endpoint, nil, sf); err != nil {
+	err = srv.pullRepository(r, out, localName, remoteName, tag, endpoint, sf)
+	if err != nil {
+		if err := srv.pullImage(r, out, remoteName, endpoint, nil, sf); err != nil {
 			return err
 		}
 		return nil
 	}
-	remote := name
-	parts := strings.Split(name, "/")
-	if len(parts) > 2 {
-		remote = fmt.Sprintf("src/%s", url.QueryEscape(strings.Join(parts, "/")))
-	}
-	if err := srv.pullRepository(r, out, name, remote, tag, sf); err != nil {
-		return err
-	}
+
 	return nil
 }
 
@@ -523,20 +544,20 @@ func (srv *Server) ImagePull(name, tag, endpoint string, out io.Writer, sf *util
 // - Check if the archive exists, if it does not, ask the registry
 // - If the archive does exists, process the checksum from it
 // - If the archive does not exists and not found on registry, process checksum from layer
-func (srv *Server) getChecksum(imageId string) (string, error) {
+func (srv *Server) getChecksum(imageID string) (string, error) {
 	// FIXME: Use in-memory map instead of reading the file each time
 	if sums, err := srv.runtime.graph.getStoredChecksums(); err != nil {
 		return "", err
-	} else if checksum, exists := sums[imageId]; exists {
+	} else if checksum, exists := sums[imageID]; exists {
 		return checksum, nil
 	}
 
-	img, err := srv.runtime.graph.Get(imageId)
+	img, err := srv.runtime.graph.Get(imageID)
 	if err != nil {
 		return "", err
 	}
 
-	if _, err := os.Stat(layerArchivePath(srv.runtime.graph.imageRoot(imageId))); err != nil {
+	if _, err := os.Stat(layerArchivePath(srv.runtime.graph.imageRoot(imageID))); err != nil {
 		if os.IsNotExist(err) {
 			// TODO: Ask the registry for the checksum
 			//       As the archive is not there, it is supposed to come from a pull.
@@ -583,7 +604,7 @@ func (srv *Server) getImageList(localRepo map[string]string) ([]*registry.ImgDat
 	return imgList, nil
 }
 
-func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, name string, localRepo map[string]string, sf *utils.StreamFormatter) error {
+func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, indexEp string, sf *utils.StreamFormatter) error {
 	out = utils.NewWriteFlusher(out)
 	out.Write(sf.FormatStatus("Processing checksums"))
 	imgList, err := srv.getImageList(localRepo)
@@ -592,64 +613,63 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, name stri
 	}
 	out.Write(sf.FormatStatus("Sending image list"))
 
-	srvName := name
-	parts := strings.Split(name, "/")
-	if len(parts) > 2 {
-		srvName = fmt.Sprintf("src/%s", url.QueryEscape(strings.Join(parts, "/")))
-	}
-
-	repoData, err := r.PushImageJSONIndex(srvName, imgList, false, nil)
+	var repoData *registry.RepositoryData
+	repoData, err = r.PushImageJSONIndex(indexEp, remoteName, imgList, false, nil)
 	if err != nil {
 		return err
 	}
 
 	for _, ep := range repoData.Endpoints {
-		out.Write(sf.FormatStatus("Pushing repository %s to %s (%d tags)", name, ep, len(localRepo)))
+		out.Write(sf.FormatStatus("Pushing repository %s (%d tags)", localName, len(localRepo)))
 		// For each image within the repo, push them
 		for _, elem := range imgList {
 			if _, exists := repoData.ImgList[elem.ID]; exists {
-				out.Write(sf.FormatStatus("Image %s already on registry, skipping", name))
+				out.Write(sf.FormatStatus("Image %s already pushed, skipping", elem.ID))
+				continue
+			} else if r.LookupRemoteImage(elem.ID, ep, repoData.Tokens) {
+				out.Write(sf.FormatStatus("Image %s already pushed, skipping", elem.ID))
 				continue
 			}
-			if err := srv.pushImage(r, out, name, elem.ID, ep, repoData.Tokens, sf); err != nil {
+			if err := srv.pushImage(r, out, remoteName, elem.ID, ep, repoData.Tokens, sf); err != nil {
 				// FIXME: Continue on error?
 				return err
 			}
-			out.Write(sf.FormatStatus("Pushing tags for rev [%s] on {%s}", elem.ID, ep+"/repositories/"+srvName+"/tags/"+elem.Tag))
-			if err := r.PushRegistryTag(srvName, elem.ID, elem.Tag, ep, repoData.Tokens); err != nil {
+			out.Write(sf.FormatStatus("Pushing tags for rev [%s] on {%s}", elem.ID, ep+"repositories/"+remoteName+"/tags/"+elem.Tag))
+			if err := r.PushRegistryTag(remoteName, elem.ID, elem.Tag, ep, repoData.Tokens); err != nil {
 				return err
 			}
 		}
 	}
 
-	if _, err := r.PushImageJSONIndex(srvName, imgList, true, repoData.Endpoints); err != nil {
+	if _, err := r.PushImageJSONIndex(indexEp, remoteName, imgList, true, repoData.Endpoints); err != nil {
 		return err
 	}
+
 	return nil
 }
 
-func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgId, ep string, token []string, sf *utils.StreamFormatter) error {
+func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) error {
 	out = utils.NewWriteFlusher(out)
-	jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgId, "json"))
+	jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgID, "json"))
 	if err != nil {
-		return fmt.Errorf("Error while retreiving the path for {%s}: %s", imgId, err)
+		return fmt.Errorf("Error while retreiving the path for {%s}: %s", imgID, err)
 	}
-	out.Write(sf.FormatStatus("Pushing %s", imgId))
+	out.Write(sf.FormatStatus("Pushing %s", imgID))
 
 	// Make sure we have the image's checksum
-	checksum, err := srv.getChecksum(imgId)
+	checksum, err := srv.getChecksum(imgID)
 	if err != nil {
 		return err
 	}
 	imgData := &registry.ImgData{
-		ID:       imgId,
+		ID:       imgID,
 		Checksum: checksum,
 	}
 
 	// Send the json
 	if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
 		if err == registry.ErrAlreadyExists {
-			out.Write(sf.FormatStatus("Image %s already uploaded ; skipping", imgData.ID))
+			out.Write(sf.FormatStatus("Image %s already pushed, skipping", imgData.ID))
 			return nil
 		}
 		return err
@@ -658,11 +678,11 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgId,
 	// Retrieve the tarball to be sent
 	var layerData *TempArchive
 	// If the archive exists, use it
-	file, err := os.Open(layerArchivePath(srv.runtime.graph.imageRoot(imgId)))
+	file, err := os.Open(layerArchivePath(srv.runtime.graph.imageRoot(imgID)))
 	if err != nil {
 		if os.IsNotExist(err) {
 			// If the archive does not exist, create one from the layer
-			layerData, err = srv.runtime.graph.TempLayerArchive(imgId, Xz, out)
+			layerData, err = srv.runtime.graph.TempLayerArchive(imgID, Xz, sf, out)
 			if err != nil {
 				return fmt.Errorf("Failed to generate layer archive: %s", err)
 			}
@@ -689,32 +709,41 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgId,
 }
 
 // FIXME: Allow to interupt current push when new push of same image is done.
-func (srv *Server) ImagePush(name, endpoint string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig) error {
-	if err := srv.poolAdd("push", name); err != nil {
+func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig) error {
+	if err := srv.poolAdd("push", localName); err != nil {
+		return err
+	}
+	defer srv.poolRemove("push", localName)
+
+	// Resolve the Repository name from fqn to endpoint + name
+	endpoint, remoteName, err := registry.ResolveRepositoryName(localName)
+	if err != nil {
 		return err
 	}
-	defer srv.poolRemove("push", name)
 
 	out = utils.NewWriteFlusher(out)
-	img, err := srv.runtime.graph.Get(name)
+	img, err := srv.runtime.graph.Get(localName)
 	r, err2 := registry.NewRegistry(srv.runtime.root, authConfig)
 	if err2 != nil {
 		return err2
 	}
+
 	if err != nil {
-		out.Write(sf.FormatStatus("The push refers to a repository [%s] (len: %d)", name, len(srv.runtime.repositories.Repositories[name])))
+		reposLen := len(srv.runtime.repositories.Repositories[localName])
+		out.Write(sf.FormatStatus("The push refers to a repository [%s] (len: %d)", localName, reposLen))
 		// If it fails, try to get the repository
-		if localRepo, exists := srv.runtime.repositories.Repositories[name]; exists {
-			if err := srv.pushRepository(r, out, name, localRepo, sf); err != nil {
+		if localRepo, exists := srv.runtime.repositories.Repositories[localName]; exists {
+			if err := srv.pushRepository(r, out, localName, remoteName, localRepo, endpoint, sf); err != nil {
 				return err
 			}
 			return nil
 		}
-
 		return err
 	}
-	out.Write(sf.FormatStatus("The push refers to an image: [%s]", name))
-	if err := srv.pushImage(r, out, name, img.ID, endpoint, nil, sf); err != nil {
+
+	var token []string
+	out.Write(sf.FormatStatus("The push refers to an image: [%s]", localName))
+	if err := srv.pushImage(r, out, remoteName, img.ID, endpoint, token, sf); err != nil {
 		return err
 	}
 	return nil
@@ -786,7 +815,7 @@ func (srv *Server) ContainerCreate(config *Config) (string, error) {
 func (srv *Server) ContainerRestart(name string, t int) error {
 	if container := srv.runtime.Get(name); container != nil {
 		if err := container.Restart(t); err != nil {
-			return fmt.Errorf("Error restarting container %s: %s", name, err.Error())
+			return fmt.Errorf("Error restarting container %s: %s", name, err)
 		}
 	} else {
 		return fmt.Errorf("No such container: %s", name)
@@ -805,7 +834,7 @@ func (srv *Server) ContainerDestroy(name string, removeVolume bool) error {
 			volumes[volumeId] = struct{}{}
 		}
 		if err := srv.runtime.Destroy(container); err != nil {
-			return fmt.Errorf("Error destroying container %s: %s", name, err.Error())
+			return fmt.Errorf("Error destroying container %s: %s", name, err)
 		}
 
 		if removeVolume {
@@ -894,9 +923,9 @@ func (srv *Server) deleteImageParents(img *Image, imgs *[]APIRmi) error {
 	return nil
 }
 
-func (srv *Server) deleteImage(img *Image, repoName, tag string) (*[]APIRmi, error) {
+func (srv *Server) deleteImage(img *Image, repoName, tag string) ([]APIRmi, error) {
 	//Untag the current image
-	var imgs []APIRmi
+	imgs := []APIRmi{}
 	tagDeleted, err := srv.runtime.repositories.Delete(repoName, tag)
 	if err != nil {
 		return nil, err
@@ -907,25 +936,25 @@ func (srv *Server) deleteImage(img *Image, repoName, tag string) (*[]APIRmi, err
 	if len(srv.runtime.repositories.ByID()[img.ID]) == 0 {
 		if err := srv.deleteImageAndChildren(img.ID, &imgs); err != nil {
 			if err != ErrImageReferenced {
-				return &imgs, err
+				return imgs, err
 			}
 		} else if err := srv.deleteImageParents(img, &imgs); err != nil {
 			if err != ErrImageReferenced {
-				return &imgs, err
+				return imgs, err
 			}
 		}
 	}
-	return &imgs, nil
+	return imgs, nil
 }
 
-func (srv *Server) ImageDelete(name string, autoPrune bool) (*[]APIRmi, error) {
+func (srv *Server) ImageDelete(name string, autoPrune bool) ([]APIRmi, error) {
 	img, err := srv.runtime.repositories.LookupImage(name)
 	if err != nil {
 		return nil, fmt.Errorf("No such image: %s", name)
 	}
 	if !autoPrune {
 		if err := srv.runtime.graph.Delete(img.ID); err != nil {
-			return nil, fmt.Errorf("Error deleting image %s: %s", name, err.Error())
+			return nil, fmt.Errorf("Error deleting image %s: %s", name, err)
 		}
 		return nil, nil
 	}
@@ -940,7 +969,7 @@ func (srv *Server) ImageDelete(name string, autoPrune bool) (*[]APIRmi, error) {
 	return srv.deleteImage(img, name, tag)
 }
 
-func (srv *Server) ImageGetCached(imgId string, config *Config) (*Image, error) {
+func (srv *Server) ImageGetCached(imgID string, config *Config) (*Image, error) {
 
 	// Retrieve all images
 	images, err := srv.runtime.graph.All()
@@ -958,7 +987,7 @@ func (srv *Server) ImageGetCached(imgId string, config *Config) (*Image, error)
 	}
 
 	// Loop on the children of the given image and check the config
-	for elem := range imageMap[imgId] {
+	for elem := range imageMap[imgID] {
 		img, err := srv.runtime.graph.Get(elem)
 		if err != nil {
 			return nil, err
@@ -973,7 +1002,7 @@ func (srv *Server) ImageGetCached(imgId string, config *Config) (*Image, error)
 func (srv *Server) ContainerStart(name string, hostConfig *HostConfig) error {
 	if container := srv.runtime.Get(name); container != nil {
 		if err := container.Start(hostConfig); err != nil {
-			return fmt.Errorf("Error starting container %s: %s", name, err.Error())
+			return fmt.Errorf("Error starting container %s: %s", name, err)
 		}
 	} else {
 		return fmt.Errorf("No such container: %s", name)
@@ -984,7 +1013,7 @@ func (srv *Server) ContainerStart(name string, hostConfig *HostConfig) error {
 func (srv *Server) ContainerStop(name string, t int) error {
 	if container := srv.runtime.Get(name); container != nil {
 		if err := container.Stop(t); err != nil {
-			return fmt.Errorf("Error stopping container %s: %s", name, err.Error())
+			return fmt.Errorf("Error stopping container %s: %s", name, err)
 		}
 	} else {
 		return fmt.Errorf("No such container: %s", name)
@@ -1096,7 +1125,6 @@ func NewServer(flGraphPath string, autoRestart, enableCors bool, dns ListOpts) (
 	srv := &Server{
 		runtime:     runtime,
 		enableCors:  enableCors,
-		lock:        &sync.Mutex{},
 		pullingPool: make(map[string]struct{}),
 		pushingPool: make(map[string]struct{}),
 	}
@@ -1105,9 +1133,9 @@ func NewServer(flGraphPath string, autoRestart, enableCors bool, dns ListOpts) (
 }
 
 type Server struct {
+	sync.Mutex
 	runtime     *Runtime
 	enableCors  bool
-	lock        *sync.Mutex
 	pullingPool map[string]struct{}
 	pushingPool map[string]struct{}
 }

+ 11 - 6
server_test.go

@@ -13,6 +13,11 @@ func TestContainerTagImageDelete(t *testing.T) {
 
 	srv := &Server{runtime: runtime}
 
+	initialImages, err := srv.Images(false, "")
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	if err := srv.runtime.repositories.Set("utest", "tag1", unitTestImageName, false); err != nil {
 		t.Fatal(err)
 	}
@@ -25,8 +30,8 @@ func TestContainerTagImageDelete(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	if len(images) != 3 {
-		t.Errorf("Excepted 3 images, %d found", len(images))
+	if len(images) != len(initialImages)+2 {
+		t.Errorf("Expected %d images, %d found", len(initialImages)+2, len(images))
 	}
 
 	if _, err := srv.ImageDelete("utest/docker:tag2", true); err != nil {
@@ -38,8 +43,8 @@ func TestContainerTagImageDelete(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	if len(images) != 2 {
-		t.Errorf("Excepted 2 images, %d found", len(images))
+	if len(images) != len(initialImages)+1 {
+		t.Errorf("Expected %d images, %d found", len(initialImages)+1, len(images))
 	}
 
 	if _, err := srv.ImageDelete("utest:tag1", true); err != nil {
@@ -51,8 +56,8 @@ func TestContainerTagImageDelete(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	if len(images) != 1 {
-		t.Errorf("Excepted 1 image, %d found", len(images))
+	if len(images) != len(initialImages) {
+		t.Errorf("Expected %d image, %d found", len(initialImages), len(images))
 	}
 }
 

+ 2 - 13
state.go

@@ -8,11 +8,11 @@ import (
 )
 
 type State struct {
+	sync.Mutex
 	Running   bool
 	Pid       int
 	ExitCode  int
 	StartedAt time.Time
-	l         *sync.Mutex
 	Ghost     bool
 }
 
@@ -29,6 +29,7 @@ func (s *State) String() string {
 
 func (s *State) setRunning(pid int) {
 	s.Running = true
+	s.Ghost = false
 	s.ExitCode = 0
 	s.Pid = pid
 	s.StartedAt = time.Now()
@@ -39,15 +40,3 @@ func (s *State) setStopped(exitCode int) {
 	s.Pid = 0
 	s.ExitCode = exitCode
 }
-
-func (s *State) initLock() {
-	s.l = &sync.Mutex{}
-}
-
-func (s *State) lock() {
-	s.l.Lock()
-}
-
-func (s *State) unlock() {
-	s.l.Unlock()
-}

+ 0 - 3
sysinit.go

@@ -60,9 +60,6 @@ func cleanupEnv(env ListOpts) {
 		if len(parts) == 1 {
 			parts = append(parts, "")
 		}
-		if parts[0] == "container" {
-			continue
-		}
 		os.Setenv(parts[0], parts[1])
 	}
 }

+ 7 - 10
tags.go

@@ -70,11 +70,11 @@ func (store *TagStore) LookupImage(name string) (*Image, error) {
 	if err != nil {
 		// FIXME: standardize on returning nil when the image doesn't exist, and err for everything else
 		// (so we can pass all errors here)
-		repoAndTag := strings.SplitN(name, ":", 2)
-		if len(repoAndTag) == 1 {
-			repoAndTag = append(repoAndTag, DEFAULTTAG)
+		repos, tag := utils.ParseRepositoryTag(name)
+		if tag == "" {
+			tag = DEFAULTTAG
 		}
-		if i, err := store.GetImage(repoAndTag[0], repoAndTag[1]); err != nil {
+		if i, err := store.GetImage(repos, tag); err != nil {
 			return nil, err
 		} else if i == nil {
 			return nil, fmt.Errorf("Image does not exist: %s", name)
@@ -197,7 +197,7 @@ func (store *TagStore) Get(repoName string) (Repository, error) {
 	return nil, nil
 }
 
-func (store *TagStore) GetImage(repoName, tagOrId string) (*Image, error) {
+func (store *TagStore) GetImage(repoName, tagOrID string) (*Image, error) {
 	repo, err := store.Get(repoName)
 	if err != nil {
 		return nil, err
@@ -206,11 +206,11 @@ func (store *TagStore) GetImage(repoName, tagOrId string) (*Image, error) {
 	}
 	//go through all the tags, to see if tag is in fact an ID
 	for _, revision := range repo {
-		if strings.HasPrefix(revision, tagOrId) {
+		if strings.HasPrefix(revision, tagOrID) {
 			return store.graph.Get(revision)
 		}
 	}
-	if revision, exists := repo[tagOrId]; exists {
+	if revision, exists := repo[tagOrID]; exists {
 		return store.graph.Get(revision)
 	}
 	return nil, nil
@@ -221,9 +221,6 @@ func validateRepoName(name string) error {
 	if name == "" {
 		return fmt.Errorf("Repository name can't be empty")
 	}
-	if strings.Contains(name, ":") {
-		return fmt.Errorf("Illegal repository name: %s", name)
-	}
 	return nil
 }
 

+ 2 - 2
tags_test.go

@@ -35,13 +35,13 @@ func TestLookupImage(t *testing.T) {
 		t.Errorf("Expected 0 image, 1 found")
 	}
 
-	if img, err := runtime.repositories.LookupImage(unitTestImageId); err != nil {
+	if img, err := runtime.repositories.LookupImage(unitTestImageID); err != nil {
 		t.Fatal(err)
 	} else if img == nil {
 		t.Errorf("Expected 1 image, none found")
 	}
 
-	if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + unitTestImageId); err != nil {
+	if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + unitTestImageID); err != nil {
 		t.Fatal(err)
 	} else if img == nil {
 		t.Errorf("Expected 1 image, none found")

+ 1 - 1
term/term.go

@@ -12,8 +12,8 @@ type State struct {
 }
 
 type Winsize struct {
-	Width  uint16
 	Height uint16
+	Width  uint16
 	x      uint16
 	y      uint16
 }

+ 10 - 0
testing/README.rst

@@ -30,6 +30,16 @@ Deployment
   export AWS_KEYPAIR_NAME=xxxxxxxxxxxx
   export AWS_SSH_PRIVKEY=xxxxxxxxxxxx
 
+  # Define email recipient and IRC channel
+  export EMAIL_RCP=xxxxxx@domain.com
+  export IRC_CHANNEL=docker
+
+  # Define buildbot credentials
+  export BUILDBOT_PWD=xxxxxxxxxxxx
+  export IRC_PWD=xxxxxxxxxxxx
+  export SMTP_USER=xxxxxxxxxxxx
+  export SMTP_PWD=xxxxxxxxxxxx
+
   # Checkout docker
   git clone git://github.com/dotcloud/docker.git
 

+ 7 - 3
testing/Vagrantfile

@@ -19,17 +19,20 @@ Vagrant::Config.run do |config|
   config.vm.share_folder "v-data", DOCKER_PATH, "#{File.dirname(__FILE__)}/.."
   config.vm.network :hostonly, BUILDBOT_IP
 
+
   # Deploy buildbot and its dependencies if it was not done
   if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
-    pkg_cmd = "apt-get update -qq; apt-get install -q -y linux-image-3.8.0-19-generic; "
+    pkg_cmd = "apt-get update -qq; apt-get install -q -y linux-image-generic-lts-raring; "
     # Deploy buildbot CI
     pkg_cmd << "apt-get install -q -y python-dev python-pip supervisor; " \
       "pip install -r #{CFG_PATH}/requirements.txt; " \
       "chown #{USER}.#{USER} /data; cd /data; " \
-      "#{CFG_PATH}/setup.sh #{USER} #{CFG_PATH}; "
+      "#{CFG_PATH}/setup.sh #{USER} #{CFG_PATH} #{ENV['BUILDBOT_PWD']} " \
+        "#{ENV['IRC_PWD']} #{ENV['IRC_CHANNEL']} #{ENV['SMTP_USER']} " \
+        "#{ENV['SMTP_PWD']} #{ENV['EMAIL_RCP']}; "
     # Install docker dependencies
     pkg_cmd << "apt-get install -q -y python-software-properties; " \
-      "add-apt-repository -y ppa:gophers/go/ubuntu; apt-get update -qq; " \
+      "add-apt-repository -y ppa:dotcloud/docker-golang/ubuntu; apt-get update -qq; " \
       "DEBIAN_FRONTEND=noninteractive apt-get install -q -y lxc git golang-stable aufs-tools make; "
     # Activate new kernel
     pkg_cmd << "shutdown -r +1; "
@@ -40,6 +43,7 @@ end
 # Providers were added on Vagrant >= 1.1.0
 Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
   config.vm.provider :aws do |aws, override|
+    aws.tags = { 'Name' => 'docker-ci' }
     aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
     aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
     aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]

+ 30 - 16
testing/buildbot/master.cfg

@@ -5,9 +5,11 @@ from buildbot.schedulers.basic import SingleBranchScheduler
 from buildbot.changes import filter
 from buildbot.config import BuilderConfig
 from buildbot.process.factory import BuildFactory
+from buildbot.process.properties import Interpolate
 from buildbot.steps.shell import ShellCommand
-from buildbot.status import html
+from buildbot.status import html, words
 from buildbot.status.web import authz, auth
+from buildbot.status.mail import MailNotifier
 
 PORT_WEB = 80           # Buildbot webserver port
 PORT_GITHUB = 8011      # Buildbot github hook port
@@ -15,20 +17,27 @@ PORT_MASTER = 9989      # Port where buildbot master listen buildworkers
 TEST_USER = 'buildbot'  # Credential to authenticate build triggers
 TEST_PWD = 'docker'     # Credential to authenticate build triggers
 BUILDER_NAME = 'docker'
-BUILDPASSWORD = 'pass-docker'  # Credential to authenticate buildworkers
-GITHUB_DOCKER = "github.com/dotcloud/docker"
-DOCKER_PATH = "/data/docker"
-BUILDER_PATH = "/data/buildbot/slave/{0}/build".format(BUILDER_NAME)
+GITHUB_DOCKER = 'github.com/dotcloud/docker'
+DOCKER_PATH = '/data/docker'
+BUILDER_PATH = '/data/buildbot/slave/{0}/build'.format(BUILDER_NAME)
 DOCKER_BUILD_PATH = BUILDER_PATH + '/src/github.com/dotcloud/docker'
 
+# Credentials set by setup.sh and Vagrantfile
+BUILDBOT_PWD = ''
+IRC_PWD = ''
+IRC_CHANNEL = ''
+SMTP_USER = ''
+SMTP_PWD = ''
+EMAIL_RCP = ''
+
 
 c = BuildmasterConfig = {}
 
 c['title'] = "Docker"
 c['titleURL'] = "waterfall"
-c['buildbotURL'] = "http://0.0.0.0:{0}/".format(PORT_WEB)
+c['buildbotURL'] = "http://docker-ci.dotcloud.com/"
 c['db'] = {'db_url':"sqlite:///state.sqlite"}
-c['slaves'] = [BuildSlave('buildworker', BUILDPASSWORD)]
+c['slaves'] = [BuildSlave('buildworker', BUILDBOT_PWD)]
 c['slavePortnum'] = PORT_MASTER
 
 c['schedulers'] = [ForceScheduler(name='trigger',builderNames=[BUILDER_NAME])]
@@ -36,20 +45,25 @@ c['schedulers'].append(SingleBranchScheduler(name="all",
     change_filter=filter.ChangeFilter(branch='master'),treeStableTimer=None,
     builderNames=[BUILDER_NAME]))
 
-# Docker test command
-test_cmd = ("cd /tmp; rm -rf {0}; export GOPATH={0}; go get -d {1}; cd {2}; "
-    "go test").format(BUILDER_PATH,GITHUB_DOCKER,DOCKER_BUILD_PATH)
-
 # Builder
 factory = BuildFactory()
-factory.addStep(ShellCommand(description='Docker',logEnviron=False,
-    usePTY=True,command=test_cmd))
+factory.addStep(ShellCommand(description='Docker',logEnviron=False,usePTY=True,
+    command=["sh", "-c", Interpolate("cd ..; rm -rf build; export GOPATH={0}; "
+    "go get -d {1}; cd {2}; git reset --hard %(src::revision:-unknown)s; "
+    "go test -v".format(BUILDER_PATH,GITHUB_DOCKER,DOCKER_BUILD_PATH))]))
 c['builders'] = [BuilderConfig(name=BUILDER_NAME,slavenames=['buildworker'],
     factory=factory)]
 
 # Status
-authz_cfg=authz.Authz(auth=auth.BasicAuth([(TEST_USER,TEST_PWD)]),
+authz_cfg = authz.Authz(auth=auth.BasicAuth([(TEST_USER, TEST_PWD)]),
     forceBuild='auth')
 c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)]
-c['status'].append(html.WebStatus(http_port=PORT_GITHUB,allowForce=True,
-    change_hook_dialects={ 'github' : True }))
+c['status'].append(html.WebStatus(http_port=PORT_GITHUB, allowForce=True,
+    change_hook_dialects={ 'github': True }))
+c['status'].append(MailNotifier(fromaddr='buildbot@docker.io',
+    sendToInterestedUsers=False, extraRecipients=[EMAIL_RCP],
+    mode='failing', relayhost='smtp.mailgun.org', smtpPort=587, useTls=True,
+    smtpUser=SMTP_USER, smtpPassword=SMTP_PWD))
+c['status'].append(words.IRC("irc.freenode.net", "dockerqabot",
+    channels=[IRC_CHANNEL], password=IRC_PWD, allowForce=True,
+    notify_events={'exception':1, 'successToFailure':1, 'failureToSuccess':1}))

+ 12 - 2
testing/buildbot/setup.sh

@@ -6,11 +6,16 @@
 
 USER=$1
 CFG_PATH=$2
+BUILDBOT_PWD=$3
+IRC_PWD=$4
+IRC_CHANNEL=$5
+SMTP_USER=$6
+SMTP_PWD=$7
+EMAIL_RCP=$8
 BUILDBOT_PATH="/data/buildbot"
 DOCKER_PATH="/data/docker"
 SLAVE_NAME="buildworker"
 SLAVE_SOCKET="localhost:9989"
-BUILDBOT_PWD="pass-docker"
 export PATH="/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin"
 
 function run { su $USER -c "$1"; }
@@ -23,7 +28,12 @@ run "mkdir -p $BUILDBOT_PATH"
 cd $BUILDBOT_PATH
 run "buildbot create-master master"
 run "cp $CFG_PATH/master.cfg master"
-run "sed -i -E 's#(DOCKER_PATH = ).+#\1\"$DOCKER_PATH\"#' master/master.cfg"
+run "sed -i -E 's#(BUILDBOT_PWD = ).+#\1\"$BUILDBOT_PWD\"#' master/master.cfg"
+run "sed -i -E 's#(IRC_PWD = ).+#\1\"$IRC_PWD\"#' master/master.cfg"
+run "sed -i -E 's#(IRC_CHANNEL = ).+#\1\"$IRC_CHANNEL\"#' master/master.cfg"
+run "sed -i -E 's#(SMTP_USER = ).+#\1\"$SMTP_USER\"#' master/master.cfg"
+run "sed -i -E 's#(SMTP_PWD = ).+#\1\"$SMTP_PWD\"#' master/master.cfg"
+run "sed -i -E 's#(EMAIL_RCP = ).+#\1\"$EMAIL_RCP\"#' master/master.cfg"
 run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD"
 
 # Allow buildbot subprocesses (docker tests) to properly run in containers,

+ 13 - 5
utils.go

@@ -20,7 +20,8 @@ func CompareConfig(a, b *Config) bool {
 	if len(a.Cmd) != len(b.Cmd) ||
 		len(a.Dns) != len(b.Dns) ||
 		len(a.Env) != len(b.Env) ||
-		len(a.PortSpecs) != len(b.PortSpecs) {
+		len(a.PortSpecs) != len(b.PortSpecs) ||
+		len(a.Entrypoint) != len(b.Entrypoint) {
 		return false
 	}
 
@@ -44,14 +45,15 @@ func CompareConfig(a, b *Config) bool {
 			return false
 		}
 	}
-
+	for i := 0; i < len(a.Entrypoint); i++ {
+		if a.Entrypoint[i] != b.Entrypoint[i] {
+			return false
+		}
+	}
 	return true
 }
 
 func MergeConfig(userConf, imageConf *Config) {
-	if userConf.Hostname == "" {
-		userConf.Hostname = imageConf.Hostname
-	}
 	if userConf.User == "" {
 		userConf.User = imageConf.User
 	}
@@ -85,4 +87,10 @@ func MergeConfig(userConf, imageConf *Config) {
 	if userConf.Dns == nil || len(userConf.Dns) == 0 {
 		userConf.Dns = imageConf.Dns
 	}
+	if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 {
+		userConf.Entrypoint = imageConf.Entrypoint
+	}
+	if userConf.Volumes == nil || len(userConf.Volumes) == 0 {
+		userConf.Volumes = imageConf.Volumes
+	}
 }

+ 31 - 18
utils/utils.go

@@ -170,10 +170,9 @@ func SelfPath() string {
 	return path
 }
 
-type NopWriter struct {
-}
+type NopWriter struct{}
 
-func (w *NopWriter) Write(buf []byte) (int, error) {
+func (*NopWriter) Write(buf []byte) (int, error) {
 	return len(buf), nil
 }
 
@@ -188,10 +187,10 @@ func NopWriteCloser(w io.Writer) io.WriteCloser {
 }
 
 type bufReader struct {
+	sync.Mutex
 	buf    *bytes.Buffer
 	reader io.Reader
 	err    error
-	l      sync.Mutex
 	wait   sync.Cond
 }
 
@@ -200,7 +199,7 @@ func NewBufReader(r io.Reader) *bufReader {
 		buf:    &bytes.Buffer{},
 		reader: r,
 	}
-	reader.wait.L = &reader.l
+	reader.wait.L = &reader.Mutex
 	go reader.drain()
 	return reader
 }
@@ -209,14 +208,14 @@ func (r *bufReader) drain() {
 	buf := make([]byte, 1024)
 	for {
 		n, err := r.reader.Read(buf)
-		r.l.Lock()
+		r.Lock()
 		if err != nil {
 			r.err = err
 		} else {
 			r.buf.Write(buf[0:n])
 		}
 		r.wait.Signal()
-		r.l.Unlock()
+		r.Unlock()
 		if err != nil {
 			break
 		}
@@ -224,8 +223,8 @@ func (r *bufReader) drain() {
 }
 
 func (r *bufReader) Read(p []byte) (n int, err error) {
-	r.l.Lock()
-	defer r.l.Unlock()
+	r.Lock()
+	defer r.Unlock()
 	for {
 		n, err = r.buf.Read(p)
 		if n > 0 {
@@ -247,27 +246,27 @@ func (r *bufReader) Close() error {
 }
 
 type WriteBroadcaster struct {
-	mu      sync.Mutex
+	sync.Mutex
 	writers map[io.WriteCloser]struct{}
 }
 
 func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser) {
-	w.mu.Lock()
+	w.Lock()
 	w.writers[writer] = struct{}{}
-	w.mu.Unlock()
+	w.Unlock()
 }
 
 // FIXME: Is that function used?
 // FIXME: This relies on the concrete writer type used having equality operator
 func (w *WriteBroadcaster) RemoveWriter(writer io.WriteCloser) {
-	w.mu.Lock()
+	w.Lock()
 	delete(w.writers, writer)
-	w.mu.Unlock()
+	w.Unlock()
 }
 
 func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
-	w.mu.Lock()
-	defer w.mu.Unlock()
+	w.Lock()
+	defer w.Unlock()
 	for writer := range w.writers {
 		if n, err := writer.Write(p); err != nil || n != len(p) {
 			// On error, evict the writer
@@ -278,8 +277,8 @@ func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
 }
 
 func (w *WriteBroadcaster) CloseWriters() error {
-	w.mu.Lock()
-	defer w.mu.Unlock()
+	w.Lock()
+	defer w.Unlock()
 	for writer := range w.writers {
 		writer.Close()
 	}
@@ -687,3 +686,17 @@ func ParseHost(host string, port int, addr string) string {
 	}
 	return fmt.Sprintf("tcp://%s:%d", host, port)
 }
+
+// Get a repos name and returns the right reposName + tag
+// The tag can be confusing because of a port in a repository name.
+//     Ex: localhost.localdomain:5000/samalba/hipache:latest
+func ParseRepositoryTag(repos string) (string, string) {
+	n := strings.LastIndex(repos, ":")
+	if n < 0 {
+		return repos, ""
+	}
+	if tag := repos[n+1:]; !strings.Contains(tag, "/") {
+		return repos[:n], tag
+	}
+	return repos, ""
+}

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác