Sfoglia il codice sorgente

Merging from master

Sam Alba 12 anni fa
parent
commit
be49f0a118
47 ha cambiato i file con 599 aggiunte e 399 eliminazioni
  1. 1 0
      .gitignore
  2. 2 2
      api.go
  3. 13 13
      api_test.go
  4. 1 2
      archive.go
  5. 4 4
      archive_test.go
  6. 1 1
      auth/auth_test.go
  7. 2 4
      buildfile_test.go
  8. 1 1
      commands.go
  9. 2 2
      commands_test.go
  10. 8 8
      container.go
  11. 1 1
      docker/docker.go
  12. BIN
      docs/sources/concepts/images/dockerlogo-h.png
  13. BIN
      docs/sources/concepts/images/dockerlogo-v.png
  14. 4 4
      docs/sources/concepts/index.rst
  15. 190 0
      docs/sources/concepts/manifesto.rst
  16. 27 116
      docs/sources/index.rst
  17. 9 4
      docs/sources/installation/index.rst
  18. 40 0
      docs/sources/terms/container.rst
  19. 38 0
      docs/sources/terms/filesystem.rst
  20. 0 97
      docs/sources/terms/fundamentals.rst
  21. 38 0
      docs/sources/terms/image.rst
  22. BIN
      docs/sources/terms/images/docker-filesystems-busyboxrw.png
  23. BIN
      docs/sources/terms/images/docker-filesystems-debian.png
  24. BIN
      docs/sources/terms/images/docker-filesystems-debianrw.png
  25. BIN
      docs/sources/terms/images/docker-filesystems-generic.png
  26. BIN
      docs/sources/terms/images/docker-filesystems-multilayer.png
  27. BIN
      docs/sources/terms/images/docker-filesystems-multiroot.png
  28. 37 23
      docs/sources/terms/images/docker-filesystems.svg
  29. 8 4
      docs/sources/terms/index.rst
  30. 40 0
      docs/sources/terms/layer.rst
  31. 3 2
      docs/sources/toctree.rst
  32. 7 7
      docs/sources/use/workingwithrepository.rst
  33. 5 5
      network.go
  34. 3 3
      registry/registry.go
  35. 0 3
      runtime.go
  36. 5 6
      runtime_test.go
  37. 28 26
      server.go
  38. 3 3
      server_test.go
  39. 1 13
      state.go
  40. 3 3
      tags.go
  41. 2 2
      tags_test.go
  42. 10 0
      testing/README.rst
  43. 3 1
      testing/Vagrantfile
  44. 30 16
      testing/buildbot/master.cfg
  45. 12 2
      testing/buildbot/setup.sh
  46. 0 3
      utils.go
  47. 17 18
      utils/utils.go

+ 1 - 0
.gitignore

@@ -15,3 +15,4 @@ docs/_build
 docs/_static
 docs/_templates
 .gopath/
+.dotcloud

+ 2 - 2
api.go

@@ -170,7 +170,7 @@ func getContainersExport(srv *Server, version float64, w http.ResponseWriter, r
 	name := vars["name"]
 
 	if err := srv.ContainerExport(name, w); err != nil {
-		utils.Debugf("%s", err.Error())
+		utils.Debugf("%s", err)
 		return err
 	}
 	return nil
@@ -306,7 +306,7 @@ func postCommit(srv *Server, version float64, w http.ResponseWriter, r *http.Req
 	}
 	config := &Config{}
 	if err := json.NewDecoder(r.Body).Decode(config); err != nil {
-		utils.Debugf("%s", err.Error())
+		utils.Debugf("%s", err)
 	}
 	repo := r.Form.Get("repo")
 	tag := r.Form.Get("tag")

+ 13 - 13
api_test.go

@@ -99,7 +99,7 @@ func TestGetVersion(t *testing.T) {
 		t.Fatal(err)
 	}
 	if v.Version != VERSION {
-		t.Errorf("Excepted version %s, %s found", VERSION, v.Version)
+		t.Errorf("Expected version %s, %s found", VERSION, v.Version)
 	}
 }
 
@@ -129,7 +129,7 @@ func TestGetInfo(t *testing.T) {
 		t.Fatal(err)
 	}
 	if infos.Images != len(initialImages) {
-		t.Errorf("Excepted images: %d, %d found", len(initialImages), infos.Images)
+		t.Errorf("Expected images: %d, %d found", len(initialImages), infos.Images)
 	}
 }
 
@@ -166,7 +166,7 @@ func TestGetImagesJSON(t *testing.T) {
 	}
 
 	if len(images) != len(initialImages) {
-		t.Errorf("Excepted %d image, %d found", len(initialImages), len(images))
+		t.Errorf("Expected %d image, %d found", len(initialImages), len(images))
 	}
 
 	found := false
@@ -177,7 +177,7 @@ func TestGetImagesJSON(t *testing.T) {
 		}
 	}
 	if !found {
-		t.Errorf("Excepted image %s, %+v found", unitTestImageName, images)
+		t.Errorf("Expected image %s, %+v found", unitTestImageName, images)
 	}
 
 	r2 := httptest.NewRecorder()
@@ -204,7 +204,7 @@ func TestGetImagesJSON(t *testing.T) {
 	}
 
 	if len(images2) != len(initialImages) {
-		t.Errorf("Excepted %d image, %d found", len(initialImages), len(images2))
+		t.Errorf("Expected %d image, %d found", len(initialImages), len(images2))
 	}
 
 	found = false
@@ -236,7 +236,7 @@ func TestGetImagesJSON(t *testing.T) {
 	}
 
 	if len(images3) != 0 {
-		t.Errorf("Excepted 0 image, %d found", len(images3))
+		t.Errorf("Expected 0 image, %d found", len(images3))
 	}
 
 	r4 := httptest.NewRecorder()
@@ -282,7 +282,7 @@ func TestGetImagesViz(t *testing.T) {
 		t.Fatal(err)
 	}
 	if line != "digraph docker {\n" {
-		t.Errorf("Excepted digraph docker {\n, %s found", line)
+		t.Errorf("Expected digraph docker {\n, %s found", line)
 	}
 }
 
@@ -313,7 +313,7 @@ func TestGetImagesSearch(t *testing.T) {
 		t.Fatal(err)
 	}
 	if len(results) < 2 {
-		t.Errorf("Excepted at least 2 lines, %d found", len(results))
+		t.Errorf("Expected at least 2 lines, %d found", len(results))
 	}
 }
 
@@ -337,7 +337,7 @@ func TestGetImagesHistory(t *testing.T) {
 		t.Fatal(err)
 	}
 	if len(history) != 1 {
-		t.Errorf("Excepted 1 line, %d found", len(history))
+		t.Errorf("Expected 1 line, %d found", len(history))
 	}
 }
 
@@ -359,7 +359,7 @@ func TestGetImagesByName(t *testing.T) {
 	if err := json.Unmarshal(r.Body.Bytes(), img); err != nil {
 		t.Fatal(err)
 	}
-	if img.ID != unitTestImageId {
+	if img.ID != unitTestImageID {
 		t.Errorf("Error inspecting image")
 	}
 }
@@ -396,7 +396,7 @@ func TestGetContainersJSON(t *testing.T) {
 		t.Fatal(err)
 	}
 	if len(containers) != 1 {
-		t.Fatalf("Excepted %d container, %d found", 1, len(containers))
+		t.Fatalf("Expected %d container, %d found", 1, len(containers))
 	}
 	if containers[0].ID != container.ID {
 		t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", container.ID, containers[0].ID)
@@ -1356,7 +1356,7 @@ func TestDeleteImages(t *testing.T) {
 	}
 
 	if len(images) != len(initialImages)+1 {
-		t.Errorf("Excepted %d images, %d found", len(initialImages)+1, len(images))
+		t.Errorf("Expected %d images, %d found", len(initialImages)+1, len(images))
 	}
 
 	req, err := http.NewRequest("DELETE", "/images/test:test", nil)
@@ -1385,7 +1385,7 @@ func TestDeleteImages(t *testing.T) {
 	}
 
 	if len(images) != len(initialImages) {
-		t.Errorf("Excepted %d image, %d found", len(initialImages), len(images))
+		t.Errorf("Expected %d image, %d found", len(initialImages), len(images))
 	}
 
 	/*	if c := runtime.Get(container.Id); c != nil {

+ 1 - 2
archive.go

@@ -4,7 +4,6 @@ import (
 	"archive/tar"
 	"bufio"
 	"bytes"
-	"errors"
 	"fmt"
 	"github.com/dotcloud/docker/utils"
 	"io"
@@ -251,7 +250,7 @@ func CmdStream(cmd *exec.Cmd) (io.Reader, error) {
 		}
 		errText := <-errChan
 		if err := cmd.Wait(); err != nil {
-			pipeW.CloseWithError(errors.New(err.Error() + ": " + string(errText)))
+			pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
 		} else {
 			pipeW.Close()
 		}

+ 4 - 4
archive_test.go

@@ -16,7 +16,7 @@ func TestCmdStreamLargeStderr(t *testing.T) {
 	cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello")
 	out, err := CmdStream(cmd)
 	if err != nil {
-		t.Fatalf("Failed to start command: " + err.Error())
+		t.Fatalf("Failed to start command: %s", err)
 	}
 	errCh := make(chan error)
 	go func() {
@@ -26,7 +26,7 @@ func TestCmdStreamLargeStderr(t *testing.T) {
 	select {
 	case err := <-errCh:
 		if err != nil {
-			t.Fatalf("Command should not have failed (err=%s...)", err.Error()[:100])
+			t.Fatalf("Command should not have failed (err=%.100s...)", err)
 		}
 	case <-time.After(5 * time.Second):
 		t.Fatalf("Command did not complete in 5 seconds; probable deadlock")
@@ -37,12 +37,12 @@ func TestCmdStreamBad(t *testing.T) {
 	badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1")
 	out, err := CmdStream(badCmd)
 	if err != nil {
-		t.Fatalf("Failed to start command: " + err.Error())
+		t.Fatalf("Failed to start command: %s", err)
 	}
 	if output, err := ioutil.ReadAll(out); err == nil {
 		t.Fatalf("Command should have failed")
 	} else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" {
-		t.Fatalf("Wrong error value (%s)", err.Error())
+		t.Fatalf("Wrong error value (%s)", err)
 	} else if s := string(output); s != "hello\n" {
 		t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
 	}

+ 1 - 1
auth/auth_test.go

@@ -68,6 +68,6 @@ func TestCreateAccount(t *testing.T) {
 	expectedError := "Login: Account is not Active"
 
 	if !strings.Contains(err.Error(), expectedError) {
-		t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err.Error())
+		t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err)
 	}
 }

+ 2 - 4
buildfile_test.go

@@ -3,14 +3,13 @@ package docker
 import (
 	"fmt"
 	"io/ioutil"
-	"sync"
 	"testing"
 )
 
 // mkTestContext generates a build context from the contents of the provided dockerfile.
 // This context is suitable for use as an argument to BuildFile.Build()
 func mkTestContext(dockerfile string, files [][2]string, t *testing.T) Archive {
-	context, err := mkBuildContext(fmt.Sprintf(dockerfile, unitTestImageId), files)
+	context, err := mkBuildContext(fmt.Sprintf(dockerfile, unitTestImageID), files)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -27,7 +26,7 @@ type testContextTemplate struct {
 
 // A table of all the contexts to build and test.
 // A new docker runtime will be created and torn down for each context.
-var testContexts []testContextTemplate = []testContextTemplate{
+var testContexts = []testContextTemplate{
 	{
 		`
 from   %s
@@ -105,7 +104,6 @@ func TestBuild(t *testing.T) {
 
 		srv := &Server{
 			runtime:     runtime,
-			lock:        &sync.Mutex{},
 			pullingPool: make(map[string]struct{}),
 			pushingPool: make(map[string]struct{}),
 		}

+ 1 - 1
commands.go

@@ -1548,7 +1548,7 @@ func Subcmd(name, signature, description string) *flag.FlagSet {
 
 func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli {
 	var (
-		isTerminal bool = false
+		isTerminal = false
 		terminalFd uintptr
 	)
 

+ 2 - 2
commands_test.go

@@ -142,7 +142,7 @@ func TestRunHostname(t *testing.T) {
 	c := make(chan struct{})
 	go func() {
 		defer close(c)
-		if err := cli.CmdRun("-h", "foobar", unitTestImageId, "hostname"); err != nil {
+		if err := cli.CmdRun("-h", "foobar", unitTestImageID, "hostname"); err != nil {
 			t.Fatal(err)
 		}
 	}()
@@ -335,7 +335,7 @@ func TestRunAttachStdin(t *testing.T) {
 	ch := make(chan struct{})
 	go func() {
 		defer close(ch)
-		cli.CmdRun("-i", "-a", "stdin", unitTestImageId, "sh", "-c", "echo hello && cat")
+		cli.CmdRun("-i", "-a", "stdin", unitTestImageID, "sh", "-c", "echo hello && cat")
 	}()
 
 	// Send input to the command, close stdin

+ 8 - 8
container.go

@@ -466,8 +466,8 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
 }
 
 func (container *Container) Start(hostConfig *HostConfig) error {
-	container.State.lock()
-	defer container.State.unlock()
+	container.State.Lock()
+	defer container.State.Unlock()
 
 	if container.State.Running {
 		return fmt.Errorf("The container %s is already running.", container.ID)
@@ -494,7 +494,7 @@ func (container *Container) Start(hostConfig *HostConfig) error {
 	// Create the requested bind mounts
 	binds := make(map[string]BindMap)
 	// Define illegal container destinations
-	illegal_dsts := []string{"/", "."}
+	illegalDsts := []string{"/", "."}
 
 	for _, bind := range hostConfig.Binds {
 		// FIXME: factorize bind parsing in parseBind
@@ -513,7 +513,7 @@ func (container *Container) Start(hostConfig *HostConfig) error {
 		}
 
 		// Bail if trying to mount to an illegal destination
-		for _, illegal := range illegal_dsts {
+		for _, illegal := range illegalDsts {
 			if dst == illegal {
 				return fmt.Errorf("Illegal bind destination: %s", dst)
 			}
@@ -821,8 +821,8 @@ func (container *Container) kill() error {
 }
 
 func (container *Container) Kill() error {
-	container.State.lock()
-	defer container.State.unlock()
+	container.State.Lock()
+	defer container.State.Unlock()
 	if !container.State.Running {
 		return nil
 	}
@@ -830,8 +830,8 @@ func (container *Container) Kill() error {
 }
 
 func (container *Container) Stop(seconds int) error {
-	container.State.lock()
-	defer container.State.unlock()
+	container.State.Lock()
+	defer container.State.Unlock()
 	if !container.State.Running {
 		return nil
 	}

+ 1 - 1
docker/docker.go

@@ -37,7 +37,7 @@ func main() {
 	flag.Var(&flHosts, "H", "tcp://host:port to bind/connect to or unix://path/to/socket to use")
 	flag.Parse()
 	if len(flHosts) > 1 {
-		flHosts = flHosts[1:len(flHosts)] //trick to display a nice defaul value in the usage
+		flHosts = flHosts[1:] //trick to display a nice defaul value in the usage
 	}
 	for i, flHost := range flHosts {
 		flHosts[i] = utils.ParseHost(docker.DEFAULTHTTPHOST, docker.DEFAULTHTTPPORT, flHost)

BIN
docs/sources/concepts/images/dockerlogo-h.png


BIN
docs/sources/concepts/images/dockerlogo-v.png


+ 4 - 4
docs/sources/concepts/index.rst

@@ -1,10 +1,10 @@
-:title: Concepts
-:description: -- todo: change me
+:title: Overview
+:description: Docker documentation summary
 :keywords: concepts, documentation, docker, containers
 
 
 
-Concepts
+Overview
 ========
 
 Contents:
@@ -13,4 +13,4 @@ Contents:
    :maxdepth: 1
 
    ../index
-
+   manifesto

+ 190 - 0
docs/sources/concepts/manifesto.rst

@@ -0,0 +1,190 @@
+:title: Manifesto
+:description: An overview of Docker and standard containers
+:keywords: containers, lxc, concepts, explanation
+
+.. _dockermanifesto:
+
+*(This was our original Welcome page, but it is a bit forward-looking
+for docs, and maybe not enough vision for a true manifesto. We'll
+reveal more vision in the future to make it more Manifesto-y.)*
+
+Docker Manifesto
+----------------
+
+Docker complements LXC with a high-level API which operates at the
+process level. It runs unix processes with strong guarantees of
+isolation and repeatability across servers.
+
+Docker is a great building block for automating distributed systems:
+large-scale web deployments, database clusters, continuous deployment
+systems, private PaaS, service-oriented architectures, etc.
+
+- **Heterogeneous payloads** Any combination of binaries, libraries,
+  configuration files, scripts, virtualenvs, jars, gems, tarballs, you
+  name it. No more juggling between domain-specific tools. Docker can
+  deploy and run them all.
+- **Any server** Docker can run on any x64 machine with a modern linux
+  kernel - whether it's a laptop, a bare metal server or a VM. This
+  makes it perfect for multi-cloud deployments.
+- **Isolation** docker isolates processes from each other and from the
+  underlying host, using lightweight containers.
+- **Repeatability** Because containers are isolated in their own
+  filesystem, they behave the same regardless of where, when, and
+  alongside what they run.
+
+.. image:: images/lego_docker.jpg
+   :target: http://bricks.argz.com/ins/7823-1/12
+
+What is a Standard Container?
+.............................
+
+Docker defines a unit of software delivery called a Standard
+Container. The goal of a Standard Container is to encapsulate a
+software component and all its dependencies in a format that is
+self-describing and portable, so that any compliant runtime can run it
+without extra dependency, regardless of the underlying machine and the
+contents of the container.
+
+The spec for Standard Containers is currently work in progress, but it
+is very straightforward. It mostly defines 1) an image format, 2) a
+set of standard operations, and 3) an execution environment.
+
+A great analogy for this is the shipping container. Just like Standard
+Containers are a fundamental unit of software delivery, shipping
+containers are a fundamental unit of physical delivery.
+
+Standard operations
+~~~~~~~~~~~~~~~~~~~
+
+Just like shipping containers, Standard Containers define a set of
+STANDARD OPERATIONS. Shipping containers can be lifted, stacked,
+locked, loaded, unloaded and labelled. Similarly, standard containers
+can be started, stopped, copied, snapshotted, downloaded, uploaded and
+tagged.
+
+
+Content-agnostic
+~~~~~~~~~~~~~~~~~~~
+
+Just like shipping containers, Standard Containers are
+CONTENT-AGNOSTIC: all standard operations have the same effect
+regardless of the contents. A shipping container will be stacked in
+exactly the same way whether it contains Vietnamese powder coffee or
+spare Maserati parts. Similarly, Standard Containers are started or
+uploaded in the same way whether they contain a postgres database, a
+php application with its dependencies and application server, or Java
+build artifacts.
+
+Infrastructure-agnostic
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Both types of containers are INFRASTRUCTURE-AGNOSTIC: they can be
+transported to thousands of facilities around the world, and
+manipulated by a wide variety of equipment. A shipping container can
+be packed in a factory in Ukraine, transported by truck to the nearest
+routing center, stacked onto a train, loaded into a German boat by an
+Australian-built crane, stored in a warehouse at a US facility,
+etc. Similarly, a standard container can be bundled on my laptop,
+uploaded to S3, downloaded, run and snapshotted by a build server at
+Equinix in Virginia, uploaded to 10 staging servers in a home-made
+Openstack cluster, then sent to 30 production instances across 3 EC2
+regions.
+
+
+Designed for automation
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Because they offer the same standard operations regardless of content
+and infrastructure, Standard Containers, just like their physical
+counterpart, are extremely well-suited for automation. In fact, you
+could say automation is their secret weapon.
+
+Many things that once required time-consuming and error-prone human
+effort can now be programmed. Before shipping containers, a bag of
+powder coffee was hauled, dragged, dropped, rolled and stacked by 10
+different people in 10 different locations by the time it reached its
+destination. 1 out of 50 disappeared. 1 out of 20 was damaged. The
+process was slow, inefficient and cost a fortune - and was entirely
+different depending on the facility and the type of goods.
+
+Similarly, before Standard Containers, by the time a software
+component ran in production, it had been individually built,
+configured, bundled, documented, patched, vendored, templated, tweaked
+and instrumented by 10 different people on 10 different
+computers. Builds failed, libraries conflicted, mirrors crashed,
+post-it notes were lost, logs were misplaced, cluster updates were
+half-broken. The process was slow, inefficient and cost a fortune -
+and was entirely different depending on the language and
+infrastructure provider.
+
+Industrial-grade delivery
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are 17 million shipping containers in existence, packed with
+every physical good imaginable. Every single one of them can be loaded
+on the same boats, by the same cranes, in the same facilities, and
+sent anywhere in the World with incredible efficiency. It is
+embarrassing to think that a 30 ton shipment of coffee can safely
+travel half-way across the World in *less time* than it takes a
+software team to deliver its code from one datacenter to another
+sitting 10 miles away.
+
+With Standard Containers we can put an end to that embarrassment, by
+making INDUSTRIAL-GRADE DELIVERY of software a reality.
+
+Standard Container Specification
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+(TODO)
+
+Image format
+~~~~~~~~~~~~
+
+Standard operations
+~~~~~~~~~~~~~~~~~~~
+
+-  Copy
+-  Run
+-  Stop
+-  Wait
+-  Commit
+-  Attach standard streams
+-  List filesystem changes
+-  ...
+
+Execution environment
+~~~~~~~~~~~~~~~~~~~~~
+
+Root filesystem
+^^^^^^^^^^^^^^^
+
+Environment variables
+^^^^^^^^^^^^^^^^^^^^^
+
+Process arguments
+^^^^^^^^^^^^^^^^^
+
+Networking
+^^^^^^^^^^
+
+Process namespacing
+^^^^^^^^^^^^^^^^^^^
+
+Resource limits
+^^^^^^^^^^^^^^^
+
+Process monitoring
+^^^^^^^^^^^^^^^^^^
+
+Logging
+^^^^^^^
+
+Signals
+^^^^^^^
+
+Pseudo-terminal allocation
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Security
+^^^^^^^^
+

+ 27 - 116
docs/sources/index.rst

@@ -1,127 +1,38 @@
-:title: Introduction
-:description: An introduction to docker and standard containers?
+:title: Welcome to the Docker Documentation
+:description: An overview of the Docker Documentation
 :keywords: containers, lxc, concepts, explanation
 
 .. _introduction:
 
-Introduction
-============
+Welcome
+=======
 
-Docker -- The Linux container runtime
--------------------------------------
+.. image:: concepts/images/dockerlogo-h.png
 
-Docker complements LXC with a high-level API which operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers.
+``docker``, the Linux Container Runtime, runs Unix processes with
+strong guarantees of isolation across servers. Your software runs
+repeatably everywhere because its :ref:`container_def` includes any
+dependencies.
 
-Docker is a great building block for automating distributed systems: large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc.
+``docker`` runs three ways:
 
+* as a daemon to manage LXC containers on your :ref:`Linux host
+  <kernel>` (``sudo docker -d``)
+* as a :ref:`CLI <cli>` which talks to the daemon's `REST API
+  <api/docker_remote_api>`_ (``docker run ...``)
+* as a client of :ref:`Repositories <working_with_the_repository>`
+  that let you share what you've built (``docker pull, docker
+  commit``).
 
-- **Heterogeneous payloads** Any combination of binaries, libraries, configuration files, scripts, virtualenvs, jars, gems, tarballs, you name it. No more juggling between domain-specific tools. Docker can deploy and run them all.
-- **Any server** Docker can run on any x64 machine with a modern linux kernel - whether it's a laptop, a bare metal server or a VM. This makes it perfect for multi-cloud deployments.
-- **Isolation** docker isolates processes from each other and from the underlying host, using lightweight containers.
-- **Repeatability** Because containers are isolated in their own filesystem, they behave the same regardless of where, when, and alongside what they run.
+Each use of ``docker`` is documented here. The features of Docker are
+currently in active development, so this documention will change
+frequently.
 
-.. image:: concepts/images/lego_docker.jpg
-
-
-What is a Standard Container?
------------------------------
-
-Docker defines a unit of software delivery called a Standard Container. The goal of a Standard Container is to encapsulate a software component and all its dependencies in
-a format that is self-describing and portable, so that any compliant runtime can run it without extra dependency, regardless of the underlying machine and the contents of the container.
-
-The spec for Standard Containers is currently work in progress, but it is very straightforward. It mostly defines 1) an image format, 2) a set of standard operations, and 3) an execution environment.
-
-A great analogy for this is the shipping container. Just like Standard Containers are a fundamental unit of software delivery, shipping containers (http://bricks.argz.com/ins/7823-1/12) are a fundamental unit of physical delivery.
-
-Standard operations
-~~~~~~~~~~~~~~~~~~~
-
-Just like shipping containers, Standard Containers define a set of STANDARD OPERATIONS. Shipping containers can be lifted, stacked, locked, loaded, unloaded and labelled. Similarly, standard containers can be started, stopped, copied, snapshotted, downloaded, uploaded and tagged.
-
-
-Content-agnostic
-~~~~~~~~~~~~~~~~~~~
-
-Just like shipping containers, Standard Containers are CONTENT-AGNOSTIC: all standard operations have the same effect regardless of the contents. A shipping container will be stacked in exactly the same way whether it contains Vietnamese powder coffee or spare Maserati parts. Similarly, Standard Containers are started or uploaded in the same way whether they contain a postgres database, a php application with its dependencies and application server, or Java build artifacts.
-
-
-Infrastructure-agnostic
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Both types of containers are INFRASTRUCTURE-AGNOSTIC: they can be transported to thousands of facilities around the world, and manipulated by a wide variety of equipment. A shipping container can be packed in a factory in Ukraine, transported by truck to the nearest routing center, stacked onto a train, loaded into a German boat by an Australian-built crane, stored in a warehouse at a US facility, etc. Similarly, a standard container can be bundled on my laptop, uploaded to S3, downloaded, run and snapshotted by a build server at Equinix in Virginia, uploaded to 10 staging servers in a home-made Openstack cluster, then sent to 30 production instances across 3 EC2 regions.
-
-
-Designed for automation
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Because they offer the same standard operations regardless of content and infrastructure, Standard Containers, just like their physical counterpart, are extremely well-suited for automation. In fact, you could say automation is their secret weapon.
-
-Many things that once required time-consuming and error-prone human effort can now be programmed. Before shipping containers, a bag of powder coffee was hauled, dragged, dropped, rolled and stacked by 10 different people in 10 different locations by the time it reached its destination. 1 out of 50 disappeared. 1 out of 20 was damaged. The process was slow, inefficient and cost a fortune - and was entirely different depending on the facility and the type of goods.
-
-Similarly, before Standard Containers, by the time a software component ran in production, it had been individually built, configured, bundled, documented, patched, vendored, templated, tweaked and instrumented by 10 different people on 10 different computers. Builds failed, libraries conflicted, mirrors crashed, post-it notes were lost, logs were misplaced, cluster updates were half-broken. The process was slow, inefficient and cost a fortune - and was entirely different depending on the language and infrastructure provider.
-
-
-Industrial-grade delivery
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-There are 17 million shipping containers in existence, packed with every physical good imaginable. Every single one of them can be loaded on the same boats, by the same cranes, in the same facilities, and sent anywhere in the World with incredible efficiency. It is embarrassing to think that a 30 ton shipment of coffee can safely travel half-way across the World in *less time* than it takes a software team to deliver its code from one datacenter to another sitting 10 miles away.
-
-With Standard Containers we can put an end to that embarrassment, by making INDUSTRIAL-GRADE DELIVERY of software a reality.
-
-
-Standard Container Specification
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-(TODO)
-
-Image format
-~~~~~~~~~~~~
-
-Standard operations
-~~~~~~~~~~~~~~~~~~~
-
--  Copy
--  Run
--  Stop
--  Wait
--  Commit
--  Attach standard streams
--  List filesystem changes
--  ...
-
-Execution environment
-~~~~~~~~~~~~~~~~~~~~~
-
-Root filesystem
-^^^^^^^^^^^^^^^
-
-Environment variables
-^^^^^^^^^^^^^^^^^^^^^
-
-Process arguments
-^^^^^^^^^^^^^^^^^
-
-Networking
-^^^^^^^^^^
-
-Process namespacing
-^^^^^^^^^^^^^^^^^^^
-
-Resource limits
-^^^^^^^^^^^^^^^
-
-Process monitoring
-^^^^^^^^^^^^^^^^^^
-
-Logging
-^^^^^^^
-
-Signals
-^^^^^^^
-
-Pseudo-terminal allocation
-^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Security
-^^^^^^^^
+For an overview of Docker, please see the `Introduction
+<http://www.docker.io>`_. When you're ready to start working with
+Docker, we have a `quick start <http://www.docker.io/gettingstarted>`_
+and a more in-depth guide to :ref:`ubuntu_linux` and other
+:ref:`installation_list` paths including prebuilt binaries,
+Vagrant-created VMs, Rackspace and Amazon instances.
 
+Enough reading! :ref:`Try it out! <running_examples>`

+ 9 - 4
docs/sources/installation/index.rst

@@ -1,12 +1,17 @@
-:title: Documentation
-:description: -- todo: change me
-:keywords: todo, docker, documentation, installation, OS support
-
+:title: Docker Installation
+:description: many ways to install Docker
+:keywords: docker, installation
 
+.. _installation_list:
 
 Installation
 ============
 
+There are a number of ways to install Docker, depending on where you
+want to run the daemon. The :ref:`ubuntu_linux` installation is the
+officially-tested version, and the community adds more techniques for
+installing Docker all the time.
+
 Contents:
 
 .. toctree::

+ 40 - 0
docs/sources/terms/container.rst

@@ -0,0 +1,40 @@
+:title: Container
+:description: Definitions of a container
+:keywords: containers, lxc, concepts, explanation, image, container
+
+.. _container_def:
+
+Container
+=========
+
+.. image:: images/docker-filesystems-busyboxrw.png
+
+Once you start a process in Docker from an :ref:`image_def`, Docker
+fetches the image and its :ref:`parent_image_def`, and repeats the
+process until it reaches the :ref:`base_image_def`. Then the
+:ref:`ufs_def` adds a read-write layer on top. That read-write layer,
+plus the information about its :ref:`parent_image_def` and some
+additional information like its unique id, networking configuration,
+and resource limits is called a **container**.
+
+.. _container_state_def:
+
+Container State
+...............
+
+Containers can change, and so they have state. A container may be
+**running** or **exited**. 
+
+When a container is running, the idea of a "container" also includes a
+tree of processes running on the CPU, isolated from the other
+processes running on the host.
+
+When the container is exited, the state of the file system and
+its exit value is preserved. You can start, stop, and restart a
+container. The processes restart from scratch (their memory state is
+**not** preserved in a container), but the file system is just as it
+was when the container was stopped.
+
+You can promote a container to an :ref:`image_def` with ``docker
+commit``. Once a container is an image, you can use it as a parent for
+new containers.

+ 38 - 0
docs/sources/terms/filesystem.rst

@@ -0,0 +1,38 @@
+:title: File Systems
+:description: How Linux organizes its persistent storage
+:keywords: containers, files, linux
+
+.. _filesystem_def:
+
+File System
+===========
+
+.. image:: images/docker-filesystems-generic.png
+
+In order for a Linux system to run, it typically needs two `file
+systems <http://en.wikipedia.org/wiki/Filesystem>`_:
+
+1. boot file system (bootfs)
+2. root file system (rootfs)
+
+The **boot file system** contains the bootloader and the kernel. The
+user never makes any changes to the boot file system. In fact, soon
+after the boot process is complete, the entire kernel is in memory,
+and the boot file system is unmounted to free up the RAM associated
+with the initrd disk image.
+
+
+The **root file system** includes the typical directory structure we
+associate with Unix-like operating systems: ``/dev, /proc, /bin, /etc,
+/lib, /usr,`` and ``/tmp`` plus all the configuration files, binaries
+and libraries required to run user applications (like bash, ls, and so
+forth). 
+
+While there can be important kernel differences between different
+Linux distributions, the contents and organization of the root file
+system are usually what make your software packages dependent on one
+distribution versus another. Docker can help solve this problem by
+running multiple distributions at the same time.
+
+.. image:: images/docker-filesystems-multiroot.png
+

+ 0 - 97
docs/sources/terms/fundamentals.rst

@@ -1,97 +0,0 @@
-:title: Image & Container
-:description: Definitions of an image and container
-:keywords: containers, lxc, concepts, explanation, image, container
-
-File Systems
-============
-
-.. image:: images/docker-filesystems-generic.png
-
-In order for a Linux system to run, it typically needs two `file
-systems <http://en.wikipedia.org/wiki/Filesystem>`_:
-
-1. boot file system (bootfs)
-2. root file system (rootfs)
-
-The **boot file system** contains the bootloader and the kernel. The
-user never makes any changes to the boot file system. In fact, soon
-after the boot process is complete, the entire kernel is in memory,
-and the boot file system is unmounted to free up the RAM associated
-with the initrd disk image.
-
-The **root file system** includes the typical directory structure we
-associate with Unix-like operating systems: ``/dev, /proc, /bin, /etc,
-/lib, /usr,`` and ``/tmp`` plus all the configuration files, binaries
-and libraries required to run user applications (like bash, ls, and so
-forth). 
-
-While there can be important kernal differences between different
-Linux distributions, the contents and organization of the root file
-system are usually what make your software packages dependent on one
-distribution versus another. Docker can help solve this problem by
-running multiple distributions at the same time.
-
-.. image:: images/docker-filesystems-multiroot.png
-
-Layers and Union Mounts
-=======================
-
-In a traditional Linux boot, the kernel first mounts the root file
-system as read-only, checks its integrity, and then switches the whole
-rootfs volume to read-write mode. Docker does something similar,
-*except* that instead of changing the file system to read-write mode,
-it takes advantage of a `union mount
-<http://en.wikipedia.org/wiki/Union_mount>`_ to add a read-write file
-system *over* the read-only file system. In fact there may be multiple
-read-only file systems stacked on top of each other.
-
-.. image:: images/docker-filesystems-multilayer.png
-
-At first, the top layer has nothing in it, but any time a process
-creates a file, this happens in the top layer. And if something needs
-to update an existing file in a lower layer, then the file gets copied
-to the upper layer and changes go into the copy. The version of the
-file on the lower layer cannot be seen by the applications anymore,
-but it is there, unchanged.
-
-We call the union of the read-write layer and all the read-only layers
-a **union file system**.
-
-Image
-=====
-
-In Docker terminology, a read-only layer is called an **image**. An
-image never changes. Because Docker uses a union file system, the
-applications think the whole file system is mounted read-write,
-because any file can be changed. But all the changes go to the
-top-most layer, and underneath, the image is unchanged. Since they
-don't change, images do not have state.
-
-Each image may depend on one more image which forms the layer beneath
-it. We sometimes say that the lower image is the **parent** of the
-upper image.
-
-Base Image
-==========
-
-An image that has no parent is a **base image**.
-
-Container
-=========
-
-Once you start a process in Docker from an image, Docker fetches the
-image and its parent, and repeats the process until it reaches the
-base image. Then the union file system adds a read-write layer on
-top. That read-write layer, plus the information about its parent and
-some additional information like its unique id, is called a
-**container**. 
-
-Containers can change, and so they have state. A container may be
-running or exited. In either case, the state of the file system and
-its exit value is preserved. You can start, stop, and restart a
-container. The processes restart from scratch (their memory state is
-**not** preserved in a container), but the file system is just as it
-was when the container was stopped.
-
-You can promote a container to an image with ``docker commit``. Once a
-container is an image, you can use it as a parent for new containers.

+ 38 - 0
docs/sources/terms/image.rst

@@ -0,0 +1,38 @@
+:title: Images
+:description: Definition of an image
+:keywords: containers, lxc, concepts, explanation, image, container
+
+.. _image_def:
+
+Image
+=====
+
+.. image:: images/docker-filesystems-debian.png
+
+In Docker terminology, a read-only :ref:`layer_def` is called an
+**image**. An image never changes. 
+
+Since Docker uses a :ref:`ufs_def`, the processes think the whole file
+system is mounted read-write. But all the changes go to the top-most
+writeable layer, and underneath, the original file in the read-only
+image is unchanged. Since images don't change, images do not have state.
+
+.. image:: images/docker-filesystems-debianrw.png
+
+.. _parent_image_def:
+
+Parent Image
+............
+
+.. image:: images/docker-filesystems-multilayer.png
+
+Each image may depend on one more image which forms the layer beneath
+it. We sometimes say that the lower image is the **parent** of the
+upper image.
+
+.. _base_image_def:
+
+Base Image
+..........
+
+An image that has no parent is a **base image**.

BIN
docs/sources/terms/images/docker-filesystems-busyboxrw.png


BIN
docs/sources/terms/images/docker-filesystems-debian.png


BIN
docs/sources/terms/images/docker-filesystems-debianrw.png


BIN
docs/sources/terms/images/docker-filesystems-generic.png


BIN
docs/sources/terms/images/docker-filesystems-multilayer.png


BIN
docs/sources/terms/images/docker-filesystems-multiroot.png


File diff suppressed because it is too large
+ 37 - 23
docs/sources/terms/images/docker-filesystems.svg


+ 8 - 4
docs/sources/terms/index.rst

@@ -1,11 +1,11 @@
-:title: Terms
+:title: Glossary
 :description: Definitions of terms used in Docker documentation
 :keywords: concepts, documentation, docker, containers
 
 
 
-Terms
-=====
+Glossary
+========
 
 Definitions of terms used in Docker documentation.
 
@@ -14,5 +14,9 @@ Contents:
 .. toctree::
    :maxdepth: 1
 
-   fundamentals
+   filesystem
+   layer
+   image
+   container
+
 

+ 40 - 0
docs/sources/terms/layer.rst

@@ -0,0 +1,40 @@
+:title: Layers
+:description: Organizing the Docker Root File System
+:keywords: containers, lxc, concepts, explanation, image, container
+
+Layers
+======
+
+In a traditional Linux boot, the kernel first mounts the root
+:ref:`filesystem_def` as read-only, checks its integrity, and then
+switches the whole rootfs volume to read-write mode.
+
+.. _layer_def:
+
+Layer
+.....
+
+When Docker mounts the rootfs, it starts read-only, as in a tradtional
+Linux boot, but then, instead of changing the file system to
+read-write mode, it takes advantage of a `union mount
+<http://en.wikipedia.org/wiki/Union_mount>`_ to add a read-write file
+system *over* the read-only file system. In fact there may be multiple
+read-only file systems stacked on top of each other. We think of each
+one of these file systems as a **layer**.
+
+.. image:: images/docker-filesystems-multilayer.png
+
+At first, the top read-write layer has nothing in it, but any time a
+process creates a file, this happens in the top layer. And if
+something needs to update an existing file in a lower layer, then the
+file gets copied to the upper layer and changes go into the copy. The
+version of the file on the lower layer cannot be seen by the
+applications anymore, but it is there, unchanged.
+
+.. _ufs_def:
+
+Union File System
+.................
+
+We call the union of the read-write layer and all the read-only layers
+a **union file system**.

+ 3 - 2
docs/sources/toctree.rst

@@ -17,7 +17,8 @@ This documentation has the following resources:
    commandline/index
    contributing/index
    api/index
-   faq
    terms/index
+   faq
+
+
 
-.. image:: concepts/images/lego_docker.jpg

+ 7 - 7
docs/sources/use/workingwithrepository.rst

@@ -1,21 +1,21 @@
 :title: Working With Repositories
-:description: Generally, there are two types of repositories: Top-level repositories which are controlled by the people behind Docker, and user repositories.
+:description: Repositories allow users to share images.
 :keywords: repo, repositiores, usage, pull image, push image, image, documentation
 
 .. _working_with_the_repository:
 
-Working with the Repository
-===========================
+Working with Repositories
+=========================
 
 
 Top-level repositories and user repositories
 --------------------------------------------
 
-Generally, there are two types of repositories: Top-level repositories which are controlled by the people behind
-Docker, and user repositories.
+Generally, there are two types of repositories: Top-level repositories
+which are controlled by the people behind Docker, and user
+repositories.
 
-* Top-level repositories can easily be recognized by not having a ``/`` (slash) in their name. These repositories can
-  generally be trusted.
+* Top-level repositories can easily be recognized by not having a ``/`` (slash) in their name. These repositories can  generally be trusted.
 * User repositories always come in the form of ``<username>/<repo_name>``. This is what your published images will look like.
 * User images are not checked, it is therefore up to you whether or not you trust the creator of this image.
 

+ 5 - 5
network.go

@@ -301,9 +301,9 @@ func newPortMapper() (*PortMapper, error) {
 
 // Port allocator: Atomatically allocate and release networking ports
 type PortAllocator struct {
+	sync.Mutex
 	inUse    map[int]struct{}
 	fountain chan (int)
-	lock     sync.Mutex
 }
 
 func (alloc *PortAllocator) runFountain() {
@@ -317,9 +317,9 @@ func (alloc *PortAllocator) runFountain() {
 // FIXME: Release can no longer fail, change its prototype to reflect that.
 func (alloc *PortAllocator) Release(port int) error {
 	utils.Debugf("Releasing %d", port)
-	alloc.lock.Lock()
+	alloc.Lock()
 	delete(alloc.inUse, port)
-	alloc.lock.Unlock()
+	alloc.Unlock()
 	return nil
 }
 
@@ -334,8 +334,8 @@ func (alloc *PortAllocator) Acquire(port int) (int, error) {
 		}
 		return -1, fmt.Errorf("Port generator ended unexpectedly")
 	}
-	alloc.lock.Lock()
-	defer alloc.lock.Unlock()
+	alloc.Lock()
+	defer alloc.Unlock()
 	if _, inUse := alloc.inUse[port]; inUse {
 		return -1, fmt.Errorf("Port already in use: %d", port)
 	}

+ 3 - 3
registry/registry.go

@@ -96,7 +96,7 @@ func (r *Registry) GetRemoteHistory(imgId, registry string, token []string) ([]s
 	res, err := r.client.Do(req)
 	if err != nil || res.StatusCode != 200 {
 		if res != nil {
-			return nil, fmt.Errorf("Internal server error: %d trying to fetch remote history for %s", res.StatusCode, imgId)
+			return nil, fmt.Errorf("Internal server error: %d trying to fetch remote history for %s", res.StatusCode, imgID)
 		}
 		return nil, err
 	}
@@ -116,7 +116,7 @@ func (r *Registry) GetRemoteHistory(imgId, registry string, token []string) ([]s
 }
 
 // Check if an image exists in the Registry
-func (r *Registry) LookupRemoteImage(imgId, registry string, token []string) bool {
+func (r *Registry) LookupRemoteImage(imgID, registry string, token []string) bool {
 	rt := &http.Transport{Proxy: http.ProxyFromEnvironment}
 
 	req, err := http.NewRequest("GET", registry+"images/"+imgId+"/json", nil)
@@ -132,7 +132,7 @@ func (r *Registry) LookupRemoteImage(imgId, registry string, token []string) boo
 }
 
 // Retrieve an image from the Registry.
-func (r *Registry) GetRemoteImageJSON(imgId, registry string, token []string) ([]byte, int, error) {
+func (r *Registry) GetRemoteImageJSON(imgID, registry string, token []string) ([]byte, int, error) {
 	// Get the JSON
 	req, err := http.NewRequest("GET", registry+"images/"+imgId+"/json", nil)
 	if err != nil {

+ 0 - 3
runtime.go

@@ -108,9 +108,6 @@ func (runtime *Runtime) Register(container *Container) error {
 	// init the wait lock
 	container.waitLock = make(chan struct{})
 
-	// Even if not running, we init the lock (prevents races in start/stop/kill)
-	container.State.initLock()
-
 	container.runtime = runtime
 
 	// Attach to stdout and stderr

+ 5 - 6
runtime_test.go

@@ -18,7 +18,7 @@ import (
 
 const (
 	unitTestImageName = "docker-unit-tests"
-	unitTestImageId   = "e9aa60c60128cad1"
+	unitTestImageID   = "e9aa60c60128cad1"
 	unitTestStoreBase = "/var/lib/docker/unit-tests"
 	testDaemonAddr    = "127.0.0.1:4270"
 	testDaemonProto   = "tcp"
@@ -49,7 +49,7 @@ func cleanup(runtime *Runtime) error {
 		return err
 	}
 	for _, image := range images {
-		if image.ID != unitTestImageId {
+		if image.ID != unitTestImageID {
 			runtime.graph.Delete(image.ID)
 		}
 	}
@@ -73,7 +73,7 @@ func init() {
 	}
 
 	if uid := syscall.Geteuid(); uid != 0 {
-		log.Fatal("docker tests needs to be run as root")
+		log.Fatal("docker tests need to be run as root")
 	}
 
 	NetworkBridgeIface = "testdockbr0"
@@ -89,7 +89,6 @@ func init() {
 	srv := &Server{
 		runtime:     runtime,
 		enableCors:  false,
-		lock:        &sync.Mutex{},
 		pullingPool: make(map[string]struct{}),
 		pushingPool: make(map[string]struct{}),
 	}
@@ -136,11 +135,11 @@ func GetTestImage(runtime *Runtime) *Image {
 		panic(err)
 	}
 	for i := range imgs {
-		if imgs[i].ID == unitTestImageId {
+		if imgs[i].ID == unitTestImageID {
 			return imgs[i]
 		}
 	}
-	panic(fmt.Errorf("Test image %v not found", unitTestImageId))
+	panic(fmt.Errorf("Test image %v not found", unitTestImageID))
 }
 
 func TestRuntimeCreate(t *testing.T) {

+ 28 - 26
server.go

@@ -29,7 +29,7 @@ func (srv *Server) DockerVersion() APIVersion {
 func (srv *Server) ContainerKill(name string) error {
 	if container := srv.runtime.Get(name); container != nil {
 		if err := container.Kill(); err != nil {
-			return fmt.Errorf("Error restarting container %s: %s", name, err.Error())
+			return fmt.Errorf("Error restarting container %s: %s", name, err)
 		}
 	} else {
 		return fmt.Errorf("No such container: %s", name)
@@ -315,8 +315,8 @@ func (srv *Server) ContainerTag(name, repo, tag string, force bool) error {
 	return nil
 }
 
-func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgId, endpoint string, token []string, sf *utils.StreamFormatter) error {
-	history, err := r.GetRemoteHistory(imgId, endpoint, token)
+func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error {
+	history, err := r.GetRemoteHistory(imgID, endpoint, token)
 	if err != nil {
 		return err
 	}
@@ -435,8 +435,8 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, name, ask
 }
 
 func (srv *Server) poolAdd(kind, key string) error {
-	srv.lock.Lock()
-	defer srv.lock.Unlock()
+	srv.Lock()
+	defer srv.Unlock()
 
 	if _, exists := srv.pullingPool[key]; exists {
 		return fmt.Errorf("%s %s is already in progress", key, kind)
@@ -504,20 +504,20 @@ func (srv *Server) ImagePull(name string, tag string, out io.Writer, sf *utils.S
 // - Check if the archive exists, if it does not, ask the registry
 // - If the archive does exists, process the checksum from it
 // - If the archive does not exists and not found on registry, process checksum from layer
-func (srv *Server) getChecksum(imageId string) (string, error) {
+func (srv *Server) getChecksum(imageID string) (string, error) {
 	// FIXME: Use in-memory map instead of reading the file each time
 	if sums, err := srv.runtime.graph.getStoredChecksums(); err != nil {
 		return "", err
-	} else if checksum, exists := sums[imageId]; exists {
+	} else if checksum, exists := sums[imageID]; exists {
 		return checksum, nil
 	}
 
-	img, err := srv.runtime.graph.Get(imageId)
+	img, err := srv.runtime.graph.Get(imageID)
 	if err != nil {
 		return "", err
 	}
 
-	if _, err := os.Stat(layerArchivePath(srv.runtime.graph.imageRoot(imageId))); err != nil {
+	if _, err := os.Stat(layerArchivePath(srv.runtime.graph.imageRoot(imageID))); err != nil {
 		if os.IsNotExist(err) {
 			// TODO: Ask the registry for the checksum
 			//       As the archive is not there, it is supposed to come from a pull.
@@ -591,6 +591,9 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, name stri
 			if _, exists := repoData.ImgList[elem.ID]; exists {
 				out.Write(sf.FormatStatus("Image %s already on registry, skipping", name))
 				continue
+			} else if r.LookupRemoteImage(elem.ID, ep, repoData.Tokens) {
+				fmt.Fprintf(out, "Image %s already on registry, skipping\n", name)
+				continue
 			}
 			if err := srv.pushImage(r, out, name, elem.ID, ep, repoData.Tokens, sf); err != nil {
 				// FIXME: Continue on error?
@@ -610,21 +613,21 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, name stri
 	return nil
 }
 
-func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgId, ep string, token []string, sf *utils.StreamFormatter) error {
+func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) error {
 	out = utils.NewWriteFlusher(out)
-	jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgId, "json"))
+	jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgID, "json"))
 	if err != nil {
-		return fmt.Errorf("Error while retreiving the path for {%s}: %s", imgId, err)
+		return fmt.Errorf("Error while retreiving the path for {%s}: %s", imgID, err)
 	}
-	out.Write(sf.FormatStatus("Pushing %s", imgId))
+	out.Write(sf.FormatStatus("Pushing %s", imgID))
 
 	// Make sure we have the image's checksum
-	checksum, err := srv.getChecksum(imgId)
+	checksum, err := srv.getChecksum(imgID)
 	if err != nil {
 		return err
 	}
 	imgData := &registry.ImgData{
-		ID:       imgId,
+		ID:       imgID,
 		Checksum: checksum,
 	}
 
@@ -640,11 +643,11 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgId,
 	// Retrieve the tarball to be sent
 	var layerData *TempArchive
 	// If the archive exists, use it
-	file, err := os.Open(layerArchivePath(srv.runtime.graph.imageRoot(imgId)))
+	file, err := os.Open(layerArchivePath(srv.runtime.graph.imageRoot(imgID)))
 	if err != nil {
 		if os.IsNotExist(err) {
 			// If the archive does not exist, create one from the layer
-			layerData, err = srv.runtime.graph.TempLayerArchive(imgId, Xz, out)
+			layerData, err = srv.runtime.graph.TempLayerArchive(imgID, Xz, out)
 			if err != nil {
 				return fmt.Errorf("Failed to generate layer archive: %s", err)
 			}
@@ -780,7 +783,7 @@ func (srv *Server) ContainerCreate(config *Config) (string, error) {
 func (srv *Server) ContainerRestart(name string, t int) error {
 	if container := srv.runtime.Get(name); container != nil {
 		if err := container.Restart(t); err != nil {
-			return fmt.Errorf("Error restarting container %s: %s", name, err.Error())
+			return fmt.Errorf("Error restarting container %s: %s", name, err)
 		}
 	} else {
 		return fmt.Errorf("No such container: %s", name)
@@ -799,7 +802,7 @@ func (srv *Server) ContainerDestroy(name string, removeVolume bool) error {
 			volumes[volumeId] = struct{}{}
 		}
 		if err := srv.runtime.Destroy(container); err != nil {
-			return fmt.Errorf("Error destroying container %s: %s", name, err.Error())
+			return fmt.Errorf("Error destroying container %s: %s", name, err)
 		}
 
 		if removeVolume {
@@ -919,7 +922,7 @@ func (srv *Server) ImageDelete(name string, autoPrune bool) ([]APIRmi, error) {
 	}
 	if !autoPrune {
 		if err := srv.runtime.graph.Delete(img.ID); err != nil {
-			return nil, fmt.Errorf("Error deleting image %s: %s", name, err.Error())
+			return nil, fmt.Errorf("Error deleting image %s: %s", name, err)
 		}
 		return nil, nil
 	}
@@ -934,7 +937,7 @@ func (srv *Server) ImageDelete(name string, autoPrune bool) ([]APIRmi, error) {
 	return srv.deleteImage(img, name, tag)
 }
 
-func (srv *Server) ImageGetCached(imgId string, config *Config) (*Image, error) {
+func (srv *Server) ImageGetCached(imgID string, config *Config) (*Image, error) {
 
 	// Retrieve all images
 	images, err := srv.runtime.graph.All()
@@ -952,7 +955,7 @@ func (srv *Server) ImageGetCached(imgId string, config *Config) (*Image, error)
 	}
 
 	// Loop on the children of the given image and check the config
-	for elem := range imageMap[imgId] {
+	for elem := range imageMap[imgID] {
 		img, err := srv.runtime.graph.Get(elem)
 		if err != nil {
 			return nil, err
@@ -967,7 +970,7 @@ func (srv *Server) ImageGetCached(imgId string, config *Config) (*Image, error)
 func (srv *Server) ContainerStart(name string, hostConfig *HostConfig) error {
 	if container := srv.runtime.Get(name); container != nil {
 		if err := container.Start(hostConfig); err != nil {
-			return fmt.Errorf("Error starting container %s: %s", name, err.Error())
+			return fmt.Errorf("Error starting container %s: %s", name, err)
 		}
 	} else {
 		return fmt.Errorf("No such container: %s", name)
@@ -978,7 +981,7 @@ func (srv *Server) ContainerStart(name string, hostConfig *HostConfig) error {
 func (srv *Server) ContainerStop(name string, t int) error {
 	if container := srv.runtime.Get(name); container != nil {
 		if err := container.Stop(t); err != nil {
-			return fmt.Errorf("Error stopping container %s: %s", name, err.Error())
+			return fmt.Errorf("Error stopping container %s: %s", name, err)
 		}
 	} else {
 		return fmt.Errorf("No such container: %s", name)
@@ -1090,7 +1093,6 @@ func NewServer(flGraphPath string, autoRestart, enableCors bool, dns ListOpts) (
 	srv := &Server{
 		runtime:     runtime,
 		enableCors:  enableCors,
-		lock:        &sync.Mutex{},
 		pullingPool: make(map[string]struct{}),
 		pushingPool: make(map[string]struct{}),
 	}
@@ -1099,9 +1101,9 @@ func NewServer(flGraphPath string, autoRestart, enableCors bool, dns ListOpts) (
 }
 
 type Server struct {
+	sync.Mutex
 	runtime     *Runtime
 	enableCors  bool
-	lock        *sync.Mutex
 	pullingPool map[string]struct{}
 	pushingPool map[string]struct{}
 }

+ 3 - 3
server_test.go

@@ -31,7 +31,7 @@ func TestContainerTagImageDelete(t *testing.T) {
 	}
 
 	if len(images) != len(initialImages)+2 {
-		t.Errorf("Excepted %d images, %d found", len(initialImages)+2, len(images))
+		t.Errorf("Expected %d images, %d found", len(initialImages)+2, len(images))
 	}
 
 	if _, err := srv.ImageDelete("utest/docker:tag2", true); err != nil {
@@ -44,7 +44,7 @@ func TestContainerTagImageDelete(t *testing.T) {
 	}
 
 	if len(images) != len(initialImages)+1 {
-		t.Errorf("Excepted %d images, %d found", len(initialImages)+1, len(images))
+		t.Errorf("Expected %d images, %d found", len(initialImages)+1, len(images))
 	}
 
 	if _, err := srv.ImageDelete("utest:tag1", true); err != nil {
@@ -57,7 +57,7 @@ func TestContainerTagImageDelete(t *testing.T) {
 	}
 
 	if len(images) != len(initialImages) {
-		t.Errorf("Excepted %d image, %d found", len(initialImages), len(images))
+		t.Errorf("Expected %d image, %d found", len(initialImages), len(images))
 	}
 }
 

+ 1 - 13
state.go

@@ -8,11 +8,11 @@ import (
 )
 
 type State struct {
+	sync.Mutex
 	Running   bool
 	Pid       int
 	ExitCode  int
 	StartedAt time.Time
-	l         *sync.Mutex
 	Ghost     bool
 }
 
@@ -39,15 +39,3 @@ func (s *State) setStopped(exitCode int) {
 	s.Pid = 0
 	s.ExitCode = exitCode
 }
-
-func (s *State) initLock() {
-	s.l = &sync.Mutex{}
-}
-
-func (s *State) lock() {
-	s.l.Lock()
-}
-
-func (s *State) unlock() {
-	s.l.Unlock()
-}

+ 3 - 3
tags.go

@@ -197,7 +197,7 @@ func (store *TagStore) Get(repoName string) (Repository, error) {
 	return nil, nil
 }
 
-func (store *TagStore) GetImage(repoName, tagOrId string) (*Image, error) {
+func (store *TagStore) GetImage(repoName, tagOrID string) (*Image, error) {
 	repo, err := store.Get(repoName)
 	if err != nil {
 		return nil, err
@@ -206,11 +206,11 @@ func (store *TagStore) GetImage(repoName, tagOrId string) (*Image, error) {
 	}
 	//go through all the tags, to see if tag is in fact an ID
 	for _, revision := range repo {
-		if strings.HasPrefix(revision, tagOrId) {
+		if strings.HasPrefix(revision, tagOrID) {
 			return store.graph.Get(revision)
 		}
 	}
-	if revision, exists := repo[tagOrId]; exists {
+	if revision, exists := repo[tagOrID]; exists {
 		return store.graph.Get(revision)
 	}
 	return nil, nil

+ 2 - 2
tags_test.go

@@ -35,13 +35,13 @@ func TestLookupImage(t *testing.T) {
 		t.Errorf("Expected 0 image, 1 found")
 	}
 
-	if img, err := runtime.repositories.LookupImage(unitTestImageId); err != nil {
+	if img, err := runtime.repositories.LookupImage(unitTestImageID); err != nil {
 		t.Fatal(err)
 	} else if img == nil {
 		t.Errorf("Expected 1 image, none found")
 	}
 
-	if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + unitTestImageId); err != nil {
+	if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + unitTestImageID); err != nil {
 		t.Fatal(err)
 	} else if img == nil {
 		t.Errorf("Expected 1 image, none found")

+ 10 - 0
testing/README.rst

@@ -30,6 +30,16 @@ Deployment
   export AWS_KEYPAIR_NAME=xxxxxxxxxxxx
   export AWS_SSH_PRIVKEY=xxxxxxxxxxxx
 
+  # Define email recipient and IRC channel
+  export EMAIL_RCP=xxxxxx@domain.com
+  export IRC_CHANNEL=docker
+
+  # Define buildbot credentials
+  export BUILDBOT_PWD=xxxxxxxxxxxx
+  export IRC_PWD=xxxxxxxxxxxx
+  export SMTP_USER=xxxxxxxxxxxx
+  export SMTP_PWD=xxxxxxxxxxxx
+
   # Checkout docker
   git clone git://github.com/dotcloud/docker.git
 

+ 3 - 1
testing/Vagrantfile

@@ -27,7 +27,9 @@ Vagrant::Config.run do |config|
     pkg_cmd << "apt-get install -q -y python-dev python-pip supervisor; " \
       "pip install -r #{CFG_PATH}/requirements.txt; " \
       "chown #{USER}.#{USER} /data; cd /data; " \
-      "#{CFG_PATH}/setup.sh #{USER} #{CFG_PATH}; "
+      "#{CFG_PATH}/setup.sh #{USER} #{CFG_PATH} #{ENV['BUILDBOT_PWD']} " \
+        "#{ENV['IRC_PWD']} #{ENV['IRC_CHANNEL']} #{ENV['SMTP_USER']} " \
+        "#{ENV['SMTP_PWD']} #{ENV['EMAIL_RCP']}; "
     # Install docker dependencies
     pkg_cmd << "apt-get install -q -y python-software-properties; " \
       "add-apt-repository -y ppa:dotcloud/docker-golang/ubuntu; apt-get update -qq; " \

+ 30 - 16
testing/buildbot/master.cfg

@@ -5,9 +5,11 @@ from buildbot.schedulers.basic import SingleBranchScheduler
 from buildbot.changes import filter
 from buildbot.config import BuilderConfig
 from buildbot.process.factory import BuildFactory
+from buildbot.process.properties import Interpolate
 from buildbot.steps.shell import ShellCommand
-from buildbot.status import html
+from buildbot.status import html, words
 from buildbot.status.web import authz, auth
+from buildbot.status.mail import MailNotifier
 
 PORT_WEB = 80           # Buildbot webserver port
 PORT_GITHUB = 8011      # Buildbot github hook port
@@ -15,20 +17,27 @@ PORT_MASTER = 9989      # Port where buildbot master listen buildworkers
 TEST_USER = 'buildbot'  # Credential to authenticate build triggers
 TEST_PWD = 'docker'     # Credential to authenticate build triggers
 BUILDER_NAME = 'docker'
-BUILDPASSWORD = 'pass-docker'  # Credential to authenticate buildworkers
-GITHUB_DOCKER = "github.com/dotcloud/docker"
-DOCKER_PATH = "/data/docker"
-BUILDER_PATH = "/data/buildbot/slave/{0}/build".format(BUILDER_NAME)
+GITHUB_DOCKER = 'github.com/dotcloud/docker'
+DOCKER_PATH = '/data/docker'
+BUILDER_PATH = '/data/buildbot/slave/{0}/build'.format(BUILDER_NAME)
 DOCKER_BUILD_PATH = BUILDER_PATH + '/src/github.com/dotcloud/docker'
 
+# Credentials set by setup.sh and Vagrantfile
+BUILDBOT_PWD = ''
+IRC_PWD = ''
+IRC_CHANNEL = ''
+SMTP_USER = ''
+SMTP_PWD = ''
+EMAIL_RCP = ''
+
 
 c = BuildmasterConfig = {}
 
 c['title'] = "Docker"
 c['titleURL'] = "waterfall"
-c['buildbotURL'] = "http://0.0.0.0:{0}/".format(PORT_WEB)
+c['buildbotURL'] = "http://docker-ci.dotcloud.com/"
 c['db'] = {'db_url':"sqlite:///state.sqlite"}
-c['slaves'] = [BuildSlave('buildworker', BUILDPASSWORD)]
+c['slaves'] = [BuildSlave('buildworker', BUILDBOT_PWD)]
 c['slavePortnum'] = PORT_MASTER
 
 c['schedulers'] = [ForceScheduler(name='trigger',builderNames=[BUILDER_NAME])]
@@ -36,20 +45,25 @@ c['schedulers'].append(SingleBranchScheduler(name="all",
     change_filter=filter.ChangeFilter(branch='master'),treeStableTimer=None,
     builderNames=[BUILDER_NAME]))
 
-# Docker test command
-test_cmd = ("cd /tmp; rm -rf {0}; export GOPATH={0}; go get -d {1}; cd {2}; "
-    "go test").format(BUILDER_PATH,GITHUB_DOCKER,DOCKER_BUILD_PATH)
-
 # Builder
 factory = BuildFactory()
-factory.addStep(ShellCommand(description='Docker',logEnviron=False,
-    usePTY=True,command=test_cmd))
+factory.addStep(ShellCommand(description='Docker',logEnviron=False,usePTY=True,
+    command=["sh", "-c", Interpolate("cd ..; rm -rf build; export GOPATH={0}; "
+    "go get -d {1}; cd {2}; git reset --hard %(src::revision:-unknown)s; "
+    "go test -v".format(BUILDER_PATH,GITHUB_DOCKER,DOCKER_BUILD_PATH))]))
 c['builders'] = [BuilderConfig(name=BUILDER_NAME,slavenames=['buildworker'],
     factory=factory)]
 
 # Status
-authz_cfg=authz.Authz(auth=auth.BasicAuth([(TEST_USER,TEST_PWD)]),
+authz_cfg = authz.Authz(auth=auth.BasicAuth([(TEST_USER, TEST_PWD)]),
     forceBuild='auth')
 c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)]
-c['status'].append(html.WebStatus(http_port=PORT_GITHUB,allowForce=True,
-    change_hook_dialects={ 'github' : True }))
+c['status'].append(html.WebStatus(http_port=PORT_GITHUB, allowForce=True,
+    change_hook_dialects={ 'github': True }))
+c['status'].append(MailNotifier(fromaddr='buildbot@docker.io',
+    sendToInterestedUsers=False, extraRecipients=[EMAIL_RCP],
+    mode='failing', relayhost='smtp.mailgun.org', smtpPort=587, useTls=True,
+    smtpUser=SMTP_USER, smtpPassword=SMTP_PWD))
+c['status'].append(words.IRC("irc.freenode.net", "dockerqabot",
+    channels=[IRC_CHANNEL], password=IRC_PWD, allowForce=True,
+    notify_events={'exception':1, 'successToFailure':1, 'failureToSuccess':1}))

+ 12 - 2
testing/buildbot/setup.sh

@@ -6,11 +6,16 @@
 
 USER=$1
 CFG_PATH=$2
+BUILDBOT_PWD=$3
+IRC_PWD=$4
+IRC_CHANNEL=$5
+SMTP_USER=$6
+SMTP_PWD=$7
+EMAIL_RCP=$8
 BUILDBOT_PATH="/data/buildbot"
 DOCKER_PATH="/data/docker"
 SLAVE_NAME="buildworker"
 SLAVE_SOCKET="localhost:9989"
-BUILDBOT_PWD="pass-docker"
 export PATH="/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin"
 
 function run { su $USER -c "$1"; }
@@ -23,7 +28,12 @@ run "mkdir -p $BUILDBOT_PATH"
 cd $BUILDBOT_PATH
 run "buildbot create-master master"
 run "cp $CFG_PATH/master.cfg master"
-run "sed -i -E 's#(DOCKER_PATH = ).+#\1\"$DOCKER_PATH\"#' master/master.cfg"
+run "sed -i -E 's#(BUILDBOT_PWD = ).+#\1\"$BUILDBOT_PWD\"#' master/master.cfg"
+run "sed -i -E 's#(IRC_PWD = ).+#\1\"$IRC_PWD\"#' master/master.cfg"
+run "sed -i -E 's#(IRC_CHANNEL = ).+#\1\"$IRC_CHANNEL\"#' master/master.cfg"
+run "sed -i -E 's#(SMTP_USER = ).+#\1\"$SMTP_USER\"#' master/master.cfg"
+run "sed -i -E 's#(SMTP_PWD = ).+#\1\"$SMTP_PWD\"#' master/master.cfg"
+run "sed -i -E 's#(EMAIL_RCP = ).+#\1\"$EMAIL_RCP\"#' master/master.cfg"
 run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD"
 
 # Allow buildbot subprocesses (docker tests) to properly run in containers,

+ 0 - 3
utils.go

@@ -53,9 +53,6 @@ func CompareConfig(a, b *Config) bool {
 }
 
 func MergeConfig(userConf, imageConf *Config) {
-	if userConf.Hostname == "" {
-		userConf.Hostname = imageConf.Hostname
-	}
 	if userConf.User == "" {
 		userConf.User = imageConf.User
 	}

+ 17 - 18
utils/utils.go

@@ -170,10 +170,9 @@ func SelfPath() string {
 	return path
 }
 
-type NopWriter struct {
-}
+type NopWriter struct{}
 
-func (w *NopWriter) Write(buf []byte) (int, error) {
+func (*NopWriter) Write(buf []byte) (int, error) {
 	return len(buf), nil
 }
 
@@ -188,10 +187,10 @@ func NopWriteCloser(w io.Writer) io.WriteCloser {
 }
 
 type bufReader struct {
+	sync.Mutex
 	buf    *bytes.Buffer
 	reader io.Reader
 	err    error
-	l      sync.Mutex
 	wait   sync.Cond
 }
 
@@ -200,7 +199,7 @@ func NewBufReader(r io.Reader) *bufReader {
 		buf:    &bytes.Buffer{},
 		reader: r,
 	}
-	reader.wait.L = &reader.l
+	reader.wait.L = &reader.Mutex
 	go reader.drain()
 	return reader
 }
@@ -209,14 +208,14 @@ func (r *bufReader) drain() {
 	buf := make([]byte, 1024)
 	for {
 		n, err := r.reader.Read(buf)
-		r.l.Lock()
+		r.Lock()
 		if err != nil {
 			r.err = err
 		} else {
 			r.buf.Write(buf[0:n])
 		}
 		r.wait.Signal()
-		r.l.Unlock()
+		r.Unlock()
 		if err != nil {
 			break
 		}
@@ -224,8 +223,8 @@ func (r *bufReader) drain() {
 }
 
 func (r *bufReader) Read(p []byte) (n int, err error) {
-	r.l.Lock()
-	defer r.l.Unlock()
+	r.Lock()
+	defer r.Unlock()
 	for {
 		n, err = r.buf.Read(p)
 		if n > 0 {
@@ -247,27 +246,27 @@ func (r *bufReader) Close() error {
 }
 
 type WriteBroadcaster struct {
-	mu      sync.Mutex
+	sync.Mutex
 	writers map[io.WriteCloser]struct{}
 }
 
 func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser) {
-	w.mu.Lock()
+	w.Lock()
 	w.writers[writer] = struct{}{}
-	w.mu.Unlock()
+	w.Unlock()
 }
 
 // FIXME: Is that function used?
 // FIXME: This relies on the concrete writer type used having equality operator
 func (w *WriteBroadcaster) RemoveWriter(writer io.WriteCloser) {
-	w.mu.Lock()
+	w.Lock()
 	delete(w.writers, writer)
-	w.mu.Unlock()
+	w.Unlock()
 }
 
 func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
-	w.mu.Lock()
-	defer w.mu.Unlock()
+	w.Lock()
+	defer w.Unlock()
 	for writer := range w.writers {
 		if n, err := writer.Write(p); err != nil || n != len(p) {
 			// On error, evict the writer
@@ -278,8 +277,8 @@ func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
 }
 
 func (w *WriteBroadcaster) CloseWriters() error {
-	w.mu.Lock()
-	defer w.mu.Unlock()
+	w.Lock()
+	defer w.Unlock()
 	for writer := range w.writers {
 		writer.Close()
 	}

Some files were not shown because too many files changed in this diff