ソースを参照

Merge remote-tracking branch 'dotcloud/master' into docs-smart-changes

Conflicts:
	docs/sources/conf.py
Thatcher Peskens 12 年 前
コミット
f127c471a1
52 ファイル変更873 行追加130 行削除
  1. 1 3
      .gitignore
  2. 2 1
      .mailmap
  3. 3 0
      AUTHORS
  4. 3 0
      Dockerfile
  5. 6 6
      MAINTAINERS
  6. 21 3
      api.go
  7. 5 5
      api_test.go
  8. 1 1
      archive.go
  9. 1 1
      buildfile.go
  10. 7 19
      commands.go
  11. 1 1
      commands_test.go
  12. 18 11
      container_test.go
  13. 455 0
      contrib/docker.bash
  14. 1 1
      docker/docker.go
  15. 2 2
      docs/MAINTAINERS
  16. 1 1
      docs/README.md
  17. 1 1
      docs/sources/api/MAINTAINERS
  18. 2 2
      docs/sources/api/docker_remote_api.rst
  19. 2 2
      docs/sources/api/docker_remote_api_v1.1.rst
  20. 2 2
      docs/sources/api/docker_remote_api_v1.2.rst
  21. 2 2
      docs/sources/api/docker_remote_api_v1.3.rst
  22. 2 2
      docs/sources/api/docker_remote_api_v1.4.rst
  23. 3 2
      docs/sources/api/registry_index_spec.rst
  24. 1 1
      docs/sources/commandline/command/run.rst
  25. 1 1
      docs/sources/conf.py
  26. 1 1
      docs/sources/contributing/contributing.rst
  27. 2 1
      docs/sources/examples/index.rst
  28. 158 0
      docs/sources/examples/postgresql_service.rst
  29. 7 1
      docs/sources/examples/python_web_app.rst
  30. 2 2
      docs/sources/examples/running_ssh_service.rst
  31. 118 18
      docs/sources/faq.rst
  32. 1 1
      docs/sources/index.rst
  33. 1 1
      docs/sources/installation/rackspace.rst
  34. 1 1
      docs/sources/terms/layer.rst
  35. 2 0
      docs/sources/use/builder.rst
  36. 2 2
      docs/sources/use/workingwithrepository.rst
  37. 1 1
      docs/theme/MAINTAINERS
  38. 1 1
      hack/dockerbuilder/Dockerfile
  39. 1 1
      hack/dockerbuilder/MAINTAINERS
  40. 2 2
      hack/infrastructure/MAINTAINERS
  41. 1 1
      library/MAINTAINERS
  42. 2 2
      network.go
  43. 1 1
      packaging/MAINTAINERS
  44. 3 1
      packaging/ubuntu/docker.upstart
  45. 3 3
      registry/MAINTAINERS
  46. 1 1
      registry/registry.go
  47. 5 5
      runtime.go
  48. 5 5
      server.go
  49. 4 4
      server_test.go
  50. 2 2
      term/MAINTAINERS
  51. 1 1
      testing/MAINTAINERS
  52. 1 1
      utils/utils_test.go

+ 1 - 3
.gitignore

@@ -5,10 +5,7 @@ docker/docker
 a.out
 a.out
 *.orig
 *.orig
 build_src
 build_src
-command-line-arguments.test
 .flymake*
 .flymake*
-docker.test
-auth/auth.test
 .idea
 .idea
 .DS_Store
 .DS_Store
 docs/_build
 docs/_build
@@ -16,3 +13,4 @@ docs/_static
 docs/_templates
 docs/_templates
 .gopath/
 .gopath/
 .dotcloud
 .dotcloud
+*.test

+ 2 - 1
.mailmap

@@ -1,4 +1,4 @@
-# Generate AUTHORS: git log --all --format='%aN <%aE>' | sort -uf | grep -v vagrant-ubuntu-12
+# Generate AUTHORS: git log --format='%aN <%aE>' | sort -uf | grep -v vagrant-ubuntu-12
 <charles.hooper@dotcloud.com> <chooper@plumata.com> 
 <charles.hooper@dotcloud.com> <chooper@plumata.com> 
 <daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
 <daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
 <daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
 <daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
@@ -25,3 +25,4 @@ Walter Stanish <walter@pratyeka.org>
 Roberto Hashioka <roberto_hashioka@hotmail.com>
 Roberto Hashioka <roberto_hashioka@hotmail.com>
 Konstantin Pelykh <kpelykh@zettaset.com>
 Konstantin Pelykh <kpelykh@zettaset.com>
 David Sissitka <me@dsissitka.com>
 David Sissitka <me@dsissitka.com>
+Nolan Darilek <nolan@thewordnerd.info>

+ 3 - 0
AUTHORS

@@ -40,6 +40,7 @@ Erno Hopearuoho <erno.hopearuoho@gmail.com>
 Evan Wies <evan@neomantra.net>
 Evan Wies <evan@neomantra.net>
 ezbercih <cem.ezberci@gmail.com>
 ezbercih <cem.ezberci@gmail.com>
 Fabrizio Regini <freegenie@gmail.com>
 Fabrizio Regini <freegenie@gmail.com>
+Fareed Dudhia <fareeddudhia@googlemail.com>
 Flavio Castelli <fcastelli@suse.com>
 Flavio Castelli <fcastelli@suse.com>
 Francisco Souza <f@souza.cc>
 Francisco Souza <f@souza.cc>
 Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
 Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
@@ -83,6 +84,7 @@ Nelson Chen <crazysim@gmail.com>
 Niall O'Higgins <niallo@unworkable.org>
 Niall O'Higgins <niallo@unworkable.org>
 Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
 Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
 Nick Stinemates <nick@stinemates.org>
 Nick Stinemates <nick@stinemates.org>
+Nolan Darilek <nolan@thewordnerd.info>
 odk- <github@odkurzacz.org>
 odk- <github@odkurzacz.org>
 Paul Bowsher <pbowsher@globalpersonals.co.uk>
 Paul Bowsher <pbowsher@globalpersonals.co.uk>
 Paul Hammond <paul@paulhammond.org>
 Paul Hammond <paul@paulhammond.org>
@@ -106,6 +108,7 @@ Thomas Hansen <thomas.hansen@gmail.com>
 Tianon Gravi <admwiggin@gmail.com>
 Tianon Gravi <admwiggin@gmail.com>
 Tim Terhorst <mynamewastaken+git@gmail.com>
 Tim Terhorst <mynamewastaken+git@gmail.com>
 Tobias Bieniek <Tobias.Bieniek@gmx.de>
 Tobias Bieniek <Tobias.Bieniek@gmx.de>
+Tobias Schmidt <ts@soundcloud.com>
 Tobias Schwab <tobias.schwab@dynport.de>
 Tobias Schwab <tobias.schwab@dynport.de>
 Tom Hulihan <hulihan.tom159@gmail.com>
 Tom Hulihan <hulihan.tom159@gmail.com>
 unclejack <unclejacksons@gmail.com>
 unclejack <unclejacksons@gmail.com>

+ 3 - 0
Dockerfile

@@ -22,6 +22,9 @@ run	echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt
 run	apt-get update
 run	apt-get update
 run	apt-get install -y lxc
 run	apt-get install -y lxc
 run	apt-get install -y aufs-tools
 run	apt-get install -y aufs-tools
+# Docker requires code.google.com/p/go.net/websocket
+run	apt-get install -y -q mercurial
+run	PKG=code.google.com/p/go.net REV=78ad7f42aa2e;	 hg clone https://$PKG /go/src/$PKG && cd /go/src/$PKG && hg checkout -r $REV
 # Upload docker source
 # Upload docker source
 add	.       /go/src/github.com/dotcloud/docker
 add	.       /go/src/github.com/dotcloud/docker
 # Build the binary
 # Build the binary

+ 6 - 6
MAINTAINERS

@@ -1,6 +1,6 @@
-Solomon Hykes <solomon@dotcloud.com>
-Guillaume Charmes <guillaume@dotcloud.com>
-Victor Vieux <victor@dotcloud.com>
-Michael Crosby <michael@crosbymichael.com>
-api.go: Victor Vieux <victor@dotcloud.com>
-Vagrantfile: Daniel Mizyrycki <daniel@dotcloud.com>
+Solomon Hykes <solomon@dotcloud.com> (@shykes)
+Guillaume Charmes <guillaume@dotcloud.com> (@creack)
+Victor Vieux <victor@dotcloud.com> (@vieux)
+Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
+api.go: Victor Vieux <victor@dotcloud.com> (@vieux)
+Vagrantfile: Daniel Mizyrycki <daniel@dotcloud.com> (@mzdaniel)

+ 21 - 3
api.go

@@ -15,6 +15,7 @@ import (
 	"net/http"
 	"net/http"
 	"os"
 	"os"
 	"os/exec"
 	"os/exec"
+	"regexp"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 )
 )
@@ -236,8 +237,7 @@ func getEvents(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
 			}
 			}
 		}
 		}
 	}
 	}
-	for {
-		event := <-listener
+	for event := range listener {
 		err := sendEvent(wf, &event)
 		err := sendEvent(wf, &event)
 		if err != nil && err.Error() == "JSON error" {
 		if err != nil && err.Error() == "JSON error" {
 			continue
 			continue
@@ -1087,7 +1087,25 @@ func ListenAndServe(proto, addr string, srv *Server, logging bool) error {
 		return e
 		return e
 	}
 	}
 	if proto == "unix" {
 	if proto == "unix" {
-		os.Chmod(addr, 0700)
+		if err := os.Chmod(addr, 0660); err != nil {
+			return err
+		}
+
+		groups, err := ioutil.ReadFile("/etc/group")
+		if err != nil {
+			return err
+		}
+		re := regexp.MustCompile("(^|\n)docker:.*?:([0-9]+)")
+		if gidMatch := re.FindStringSubmatch(string(groups)); gidMatch != nil {
+			gid, err := strconv.Atoi(gidMatch[2])
+			if err != nil {
+				return err
+			}
+			utils.Debugf("docker group found. gid: %d", gid)
+			if err := os.Chown(addr, 0, gid); err != nil {
+				return err
+			}
+		}
 	}
 	}
 	httpSrv := http.Server{Addr: addr, Handler: r}
 	httpSrv := http.Server{Addr: addr, Handler: r}
 	return httpSrv.Serve(l)
 	return httpSrv.Serve(l)

+ 5 - 5
api_test.go

@@ -471,7 +471,7 @@ func TestGetContainersTop(t *testing.T) {
 	}
 	}
 	defer runtime.Destroy(container)
 	defer runtime.Destroy(container)
 	defer func() {
 	defer func() {
-		// Make sure the process dies before destorying runtime
+		// Make sure the process dies before destroying runtime
 		container.stdin.Close()
 		container.stdin.Close()
 		container.WaitTimeout(2 * time.Second)
 		container.WaitTimeout(2 * time.Second)
 	}()
 	}()
@@ -563,7 +563,7 @@ func TestGetContainersByName(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	if outContainer.ID != container.ID {
 	if outContainer.ID != container.ID {
-		t.Fatalf("Wrong containers retrieved. Expected %s, recieved %s", container.ID, outContainer.ID)
+		t.Fatalf("Wrong containers retrieved. Expected %s, received %s", container.ID, outContainer.ID)
 	}
 	}
 }
 }
 
 
@@ -802,7 +802,7 @@ func TestPostContainersStart(t *testing.T) {
 
 
 	r = httptest.NewRecorder()
 	r = httptest.NewRecorder()
 	if err = postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err == nil {
 	if err = postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err == nil {
-		t.Fatalf("A running containter should be able to be started")
+		t.Fatalf("A running container should be able to be started")
 	}
 	}
 
 
 	if err := container.Kill(); err != nil {
 	if err := container.Kill(); err != nil {
@@ -926,7 +926,7 @@ func TestPostContainersAttach(t *testing.T) {
 	stdin, stdinPipe := io.Pipe()
 	stdin, stdinPipe := io.Pipe()
 	stdout, stdoutPipe := io.Pipe()
 	stdout, stdoutPipe := io.Pipe()
 
 
-	// Try to avoid the timeoout in destroy. Best effort, don't check error
+	// Try to avoid the timeout in destroy. Best effort, don't check error
 	defer func() {
 	defer func() {
 		closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
 		closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
 		container.Kill()
 		container.Kill()
@@ -982,7 +982,7 @@ func TestPostContainersAttach(t *testing.T) {
 		t.Fatalf("/bin/cat is not running after closing stdin")
 		t.Fatalf("/bin/cat is not running after closing stdin")
 	}
 	}
 
 
-	// Try to avoid the timeoout in destroy. Best effort, don't check error
+	// Try to avoid the timeout in destroy. Best effort, don't check error
 	cStdin, _ := container.StdinPipe()
 	cStdin, _ := container.StdinPipe()
 	cStdin.Close()
 	cStdin.Close()
 	container.Wait()
 	container.Wait()

+ 1 - 1
archive.go

@@ -98,7 +98,7 @@ func TarFilter(path string, compression Compression, filter []string) (io.Reader
 
 
 // Untar reads a stream of bytes from `archive`, parses it as a tar archive,
 // Untar reads a stream of bytes from `archive`, parses it as a tar archive,
 // and unpacks it into the directory at `path`.
 // and unpacks it into the directory at `path`.
-// The archive may be compressed with one of the following algorithgms:
+// The archive may be compressed with one of the following algorithms:
 //  identity (uncompressed), gzip, bzip2, xz.
 //  identity (uncompressed), gzip, bzip2, xz.
 // FIXME: specify behavior when target path exists vs. doesn't exist.
 // FIXME: specify behavior when target path exists vs. doesn't exist.
 func Untar(archive io.Reader, path string) error {
 func Untar(archive io.Reader, path string) error {

+ 1 - 1
buildfile.go

@@ -509,7 +509,7 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
 		fmt.Fprintf(b.out, "Successfully built %s\n", utils.TruncateID(b.image))
 		fmt.Fprintf(b.out, "Successfully built %s\n", utils.TruncateID(b.image))
 		return b.image, nil
 		return b.image, nil
 	}
 	}
-	return "", fmt.Errorf("An error occured during the build\n")
+	return "", fmt.Errorf("An error occurred during the build\n")
 }
 }
 
 
 func NewBuildFile(srv *Server, out io.Writer, verbose, utilizeCache bool) BuildFile {
 func NewBuildFile(srv *Server, out io.Writer, verbose, utilizeCache bool) BuildFile {

+ 7 - 19
commands.go

@@ -194,7 +194,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
 	}
 	}
 	var body io.Reader
 	var body io.Reader
 	// Setup an upload progress bar
 	// Setup an upload progress bar
-	// FIXME: ProgressReader shouldn't be this annoyning to use
+	// FIXME: ProgressReader shouldn't be this annoying to use
 	if context != nil {
 	if context != nil {
 		sf := utils.NewStreamFormatter(false)
 		sf := utils.NewStreamFormatter(false)
 		body = utils.ProgressReader(ioutil.NopCloser(context), 0, cli.err, sf.FormatProgress("", "Uploading context", "%v bytes%0.0s%0.0s"), sf, true)
 		body = utils.ProgressReader(ioutil.NopCloser(context), 0, cli.err, sf.FormatProgress("", "Uploading context", "%v bytes%0.0s%0.0s"), sf, true)
@@ -857,10 +857,12 @@ func (cli *DockerCli) CmdPush(args ...string) error {
 	}
 	}
 
 
 	if err := push(); err != nil {
 	if err := push(); err != nil {
-		if err == fmt.Errorf("Authentication is required.") {
-			if err = cli.checkIfLogged("push"); err == nil {
-				return push()
+		if err.Error() == "Authentication is required." {
+			fmt.Fprintln(cli.out, "\nPlease login prior to push:")
+			if err := cli.CmdLogin(""); err != nil {
+				return err
 			}
 			}
+			return push()
 		}
 		}
 		return err
 		return err
 	}
 	}
@@ -1512,19 +1514,6 @@ func (cli *DockerCli) CmdCp(args ...string) error {
 	return nil
 	return nil
 }
 }
 
 
-func (cli *DockerCli) checkIfLogged(action string) error {
-	// If condition AND the login failed
-	if cli.configFile.Configs[auth.IndexServerAddress()].Username == "" {
-		if err := cli.CmdLogin(""); err != nil {
-			return err
-		}
-		if cli.configFile.Configs[auth.IndexServerAddress()].Username == "" {
-			return fmt.Errorf("Please login prior to %s. ('docker login')", action)
-		}
-	}
-	return nil
-}
-
 func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int, error) {
 func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int, error) {
 	var params io.Reader
 	var params io.Reader
 	if data != nil {
 	if data != nil {
@@ -1728,8 +1717,7 @@ func (cli *DockerCli) monitorTtySize(id string) error {
 	sigchan := make(chan os.Signal, 1)
 	sigchan := make(chan os.Signal, 1)
 	signal.Notify(sigchan, syscall.SIGWINCH)
 	signal.Notify(sigchan, syscall.SIGWINCH)
 	go func() {
 	go func() {
-		for {
-			<-sigchan
+		for _ = range sigchan {
 			cli.resizeTty(id)
 			cli.resizeTty(id)
 		}
 		}
 	}()
 	}()

+ 1 - 1
commands_test.go

@@ -373,7 +373,7 @@ func TestAttachDisconnect(t *testing.T) {
 		t.Fatalf("/bin/cat is not running after closing stdin")
 		t.Fatalf("/bin/cat is not running after closing stdin")
 	}
 	}
 
 
-	// Try to avoid the timeoout in destroy. Best effort, don't check error
+	// Try to avoid the timeout in destroy. Best effort, don't check error
 	cStdin, _ := container.StdinPipe()
 	cStdin, _ := container.StdinPipe()
 	cStdin.Close()
 	cStdin.Close()
 	container.Wait()
 	container.Wait()

+ 18 - 11
container_test.go

@@ -186,7 +186,7 @@ func TestDiff(t *testing.T) {
 		}
 		}
 	}
 	}
 
 
-	// Create a new containere
+	// Create a new container
 	container3, _, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t)
 	container3, _, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t)
 	defer runtime.Destroy(container3)
 	defer runtime.Destroy(container3)
 
 
@@ -351,10 +351,10 @@ func TestStart(t *testing.T) {
 		t.Errorf("Container should be running")
 		t.Errorf("Container should be running")
 	}
 	}
 	if err := container.Start(hostConfig); err == nil {
 	if err := container.Start(hostConfig); err == nil {
-		t.Fatalf("A running containter should be able to be started")
+		t.Fatalf("A running container should be able to be started")
 	}
 	}
 
 
-	// Try to avoid the timeoout in destroy. Best effort, don't check error
+	// Try to avoid the timeout in destroy. Best effort, don't check error
 	cStdin.Close()
 	cStdin.Close()
 	container.WaitTimeout(2 * time.Second)
 	container.WaitTimeout(2 * time.Second)
 }
 }
@@ -401,22 +401,24 @@ func TestOutput(t *testing.T) {
 func TestKillDifferentUser(t *testing.T) {
 func TestKillDifferentUser(t *testing.T) {
 	runtime := mkRuntime(t)
 	runtime := mkRuntime(t)
 	defer nuke(runtime)
 	defer nuke(runtime)
+
 	container, err := NewBuilder(runtime).Create(&Config{
 	container, err := NewBuilder(runtime).Create(&Config{
-		Image: GetTestImage(runtime).ID,
-		Cmd:   []string{"tail", "-f", "/etc/resolv.conf"},
-		User:  "daemon",
+		Image:     GetTestImage(runtime).ID,
+		Cmd:       []string{"cat"},
+		OpenStdin: true,
+		User:      "daemon",
 	},
 	},
 	)
 	)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	defer runtime.Destroy(container)
 	defer runtime.Destroy(container)
+	defer container.stdin.Close()
 
 
 	if container.State.Running {
 	if container.State.Running {
 		t.Errorf("Container shouldn't be running")
 		t.Errorf("Container shouldn't be running")
 	}
 	}
-	hostConfig := &HostConfig{}
-	if err := container.Start(hostConfig); err != nil {
+	if err := container.Start(&HostConfig{}); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
@@ -426,8 +428,13 @@ func TestKillDifferentUser(t *testing.T) {
 		}
 		}
 	})
 	})
 
 
-	// Even if the state is running, lets give some time to lxc to spawn the process
-	container.WaitTimeout(500 * time.Millisecond)
+	setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
+		out, _ := container.StdoutPipe()
+		in, _ := container.StdinPipe()
+		if err := assertPipe("hello\n", "hello", out, in, 15); err != nil {
+			t.Fatal(err)
+		}
+	})
 
 
 	if err := container.Kill(); err != nil {
 	if err := container.Kill(); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -764,7 +771,7 @@ func TestUser(t *testing.T) {
 		Image: GetTestImage(runtime).ID,
 		Image: GetTestImage(runtime).ID,
 		Cmd:   []string{"id"},
 		Cmd:   []string{"id"},
 
 
-		User: "unkownuser",
+		User: "unknownuser",
 	},
 	},
 	)
 	)
 	if err != nil {
 	if err != nil {

+ 455 - 0
contrib/docker.bash

@@ -0,0 +1,455 @@
+#!bash
+#
+# bash completion file for core docker commands
+#
+# This script provides supports completion of:
+#  - commands and their options
+#  - container ids
+#  - image repos and tags
+#  - filepaths
+#
+# To enable the completions either:
+#  - place this file in /etc/bash_completion.d
+#  or
+#  - copy this file and add the line below to your .bashrc after
+#    bash completion features are loaded
+#     . docker.bash
+#
+# Note:
+# Currently, the completions will not work if the docker daemon is not
+# bound to the default communication port/socket
+# If the docker daemon is using a unix socket for communication your user
+# must have access to the socket for the completions to function correctly
+
+have docker && {
+__docker_containers()
+{
+	local containers
+	containers="$( docker ps -a -q )"
+	COMPREPLY=( $( compgen -W "$containers" -- "$cur" ) )
+}
+
+__docker_image_repos()
+{
+	local repos
+	repos="$( docker images | awk 'NR>1{print $1}' )"
+	COMPREPLY=( $( compgen -W "$repos" -- "$cur" ) )
+}
+
+__docker_images()
+{
+	local images
+	images="$( docker images | awk 'NR>1{print $1":"$2}' )"
+	COMPREPLY=( $( compgen -W "$images" -- "$cur" ) )
+	__ltrim_colon_completions "$cur"
+}
+
+__docker_image_repos_and_tags()
+{
+	local repos images
+	repos="$( docker images | awk 'NR>1{print $1}' )"
+	images="$( docker images | awk 'NR>1{print $1":"$2}' )"
+	COMPREPLY=( $( compgen -W "$repos $images" -- "$cur" ) )
+	__ltrim_colon_completions "$cur"
+}
+
+__docker_containers_and_images()
+{
+	local containers images
+	containers="$( docker ps -a -q )"
+	images="$( docker images | awk 'NR>1{print $1":"$2}' )"
+	COMPREPLY=( $( compgen -W "$images $containers" -- "$cur" ) )
+	__ltrim_colon_completions "$cur"
+}
+
+_docker_docker()
+{
+	case "$prev" in
+		-H)
+			return
+			;;
+		*)
+			;;
+	esac
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "-H" -- "$cur" ) )
+			;;
+		*)
+			COMPREPLY=( $( compgen -W "$commands help" -- "$cur" ) )
+			;;
+	esac
+}
+
+_docker_attach()
+{
+	if [ $cpos -eq $cword ]; then
+		__docker_containers
+	fi
+}
+
+_docker_build()
+{
+	case "$prev" in
+		-t)
+			return
+			;;
+		*)
+			;;
+	esac
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "-t -q" -- "$cur" ) )
+			;;
+		*)
+			_filedir
+			;;
+	esac
+}
+
+_docker_commit()
+{
+	case "$prev" in
+		-author|-m|-run)
+			return
+			;;
+		*)
+			;;
+	esac
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "-author -m -run" -- "$cur" ) )
+			;;
+		*)
+			__docker_containers
+			;;
+	esac
+}
+
+_docker_diff()
+{
+	if [ $cpos -eq $cword ]; then
+		__docker_containers
+	fi
+}
+
+_docker_events()
+{
+	COMPREPLY=( $( compgen -W "-since" -- "$cur" ) )
+}
+
+_docker_export()
+{
+	if [ $cpos -eq $cword ]; then
+		__docker_containers
+	fi
+}
+
+_docker_help()
+{
+	if [ $cpos -eq $cword ]; then
+		COMPREPLY=( $( compgen -W "$commands" -- "$cur" ) )
+	fi
+}
+
+_docker_history()
+{
+	if [ $cpos -eq $cword ]; then
+		__docker_image_repos_and_tags
+	fi
+}
+
+_docker_images()
+{
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "-a -notrunc -q -viz" -- "$cur" ) )
+			;;
+		*)
+			local counter=$cpos
+			while [ $counter -le $cword ]; do
+				case "${words[$counter]}" in
+					-*)
+						;;
+					*)
+						break
+						;;
+				esac
+				(( counter++ ))
+			done
+
+			if [ $counter -eq $cword ]; then
+				__docker_image_repos
+			fi
+			;;
+	esac
+}
+
+_docker_import()
+{
+	return
+}
+
+_docker_info()
+{
+	return
+}
+
+_docker_insert()
+{
+	if [ $cpos -eq $cword ]; then
+		__docker_image_repos_and_tags
+	fi
+}
+
+_docker_inspect()
+{
+	__docker_containers_and_images
+}
+
+_docker_kill()
+{
+	__docker_containers
+}
+
+_docker_login()
+{
+	COMPREPLY=( $( compgen -W "-e -p -u" -- "$cur" ) )
+}
+
+_docker_logs()
+{
+	if [ $cpos -eq $cword ]; then
+		__docker_containers
+	fi
+}
+
+_docker_port()
+{
+	if [ $cpos -eq $cword ]; then
+		__docker_containers
+	fi
+}
+
+_docker_ps()
+{
+	COMPREPLY=( $( compgen -W "-a -beforeId -l -n -notrunc -q -s -sinceId" -- "$cur" ) )
+}
+
+_docker_pull()
+{
+	COMPREPLY=( $( compgen -W "-t" -- "$cur" ) )
+}
+
+_docker_push()
+{
+	return
+}
+
+_docker_restart()
+{
+	case "$prev" in
+		-t)
+			return
+			;;
+		*)
+			;;
+	esac
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "-t" -- "$cur" ) )
+			;;
+		*)
+			__docker_containers
+			;;
+	esac
+}
+
+_docker_rm()
+{
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "-v" -- "$cur" ) )
+			;;
+		*)
+			__docker_containers
+			;;
+	esac
+}
+
+_docker_rmi()
+{
+	__docker_image_repos_and_tags
+}
+
+_docker_run()
+{
+	case "$prev" in
+		-cidfile)
+			_filedir
+			;;
+		-volumes-from)
+			__docker_containers
+			;;
+		-a|-c|-dns|-e|-entrypoint|-h|-m|-p|-u|-v)
+			return
+			;;
+		*)
+			;;
+	esac
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "-a -c -cidfile -d -dns -e -entrypoint -h -i -m -n -p -t -u -v -volumes-from" -- "$cur" ) )
+			;;
+		*)
+			case "$cur" in
+				-*)
+					COMPREPLY=( $( compgen -W "-a -notrunc -q -viz" -- "$cur" ) )
+					;;
+				*)
+					local counter=$cpos
+					while [ $counter -le $cword ]; do
+						case "${words[$counter]}" in
+							-a|-c|-cidfile|-dns|-e|-entrypoint|-h|-m|-p|-u|-v|-volumes-from)
+								(( counter++ ))
+								;;
+							-*)
+								;;
+							*)
+								break
+								;;
+						esac
+						(( counter++ ))
+					done
+
+					if [ $counter -eq $cword ]; then
+						__docker_image_repos_and_tags
+					fi
+					;;
+			esac
+			;;
+	esac
+}
+
+_docker_search()
+{
+	COMPREPLY=( $( compgen -W "-notrunc" -- "$cur" ) )
+}
+
+_docker_start()
+{
+	__docker_containers
+}
+
+_docker_stop()
+{
+	case "$prev" in
+		-t)
+			return
+			;;
+		*)
+			;;
+	esac
+
+	case "$cur" in
+		-*)
+			COMPREPLY=( $( compgen -W "-t" -- "$cur" ) )
+			;;
+		*)
+			__docker_containers
+			;;
+	esac
+}
+
+_docker_tag()
+{
+	COMPREPLY=( $( compgen -W "-f" -- "$cur" ) )
+}
+
+_docker_top()
+{
+	if [ $cpos -eq $cword ]; then
+		__docker_containers
+	fi
+}
+
+_docker_version()
+{
+	return
+}
+
+_docker_wait()
+{
+	__docker_containers
+}
+
+_docker()
+{
+	local cur prev words cword command="docker" counter=1 word cpos
+	local commands="
+			attach
+			build
+			commit
+			diff
+			events
+			export
+			history
+			images
+			import
+			info
+			insert
+			inspect
+			kill
+			login
+			logs
+			port
+			ps
+			pull
+			push
+			restart
+			rm
+			rmi
+			run
+			search
+			start
+			stop
+			tag
+			top
+			version
+			wait
+		"
+
+	COMPREPLY=()
+	_get_comp_words_by_ref -n : cur prev words cword
+
+	while [ $counter -lt $cword ]; do
+		word="${words[$counter]}"
+		case "$word" in
+			-H)
+				(( counter++ ))
+				;;
+			-*)
+				;;
+			*)
+				command="$word"
+				cpos=$counter
+				(( cpos++ ))
+				break
+				;;
+		esac
+		(( counter++ ))
+	done
+
+	local completions_func=_docker_${command}
+	declare -F $completions_func >/dev/null && $completions_func
+
+	return 0
+}
+
+complete -F _docker docker
+}

+ 1 - 1
docker/docker.go

@@ -37,7 +37,7 @@ func main() {
 	flag.Var(&flHosts, "H", "tcp://host:port to bind/connect to or unix://path/to/socket to use")
 	flag.Var(&flHosts, "H", "tcp://host:port to bind/connect to or unix://path/to/socket to use")
 	flag.Parse()
 	flag.Parse()
 	if len(flHosts) > 1 {
 	if len(flHosts) > 1 {
-		flHosts = flHosts[1:] //trick to display a nice defaul value in the usage
+		flHosts = flHosts[1:] //trick to display a nice default value in the usage
 	}
 	}
 	for i, flHost := range flHosts {
 	for i, flHost := range flHosts {
 		flHosts[i] = utils.ParseHost(docker.DEFAULTHTTPHOST, docker.DEFAULTHTTPPORT, flHost)
 		flHosts[i] = utils.ParseHost(docker.DEFAULTHTTPHOST, docker.DEFAULTHTTPPORT, flHost)

+ 2 - 2
docs/MAINTAINERS

@@ -1,2 +1,2 @@
-Andy Rothfusz <andy@dotcloud.com>
-Ken Cochrane <ken@dotcloud.com>
+Andy Rothfusz <andy@dotcloud.com> (@metalivedev)
+Ken Cochrane <ken@dotcloud.com> (@kencochrane)

+ 1 - 1
docs/README.md

@@ -28,7 +28,7 @@ Usage
 Working using GitHub's file editor
 Working using GitHub's file editor
 ----------------------------------
 ----------------------------------
 Alternatively, for small changes and typo's you might want to use GitHub's built in file editor. It allows
 Alternatively, for small changes and typo's you might want to use GitHub's built in file editor. It allows
-you to preview your changes right online. Just be carefull not to create many commits.
+you to preview your changes right online. Just be careful not to create many commits.
 
 
 Images
 Images
 ------
 ------

+ 1 - 1
docs/sources/api/MAINTAINERS

@@ -1 +1 @@
-Solomon Hykes <solomon@dotcloud.com>
+Solomon Hykes <solomon@dotcloud.com> (@shykes)

+ 2 - 2
docs/sources/api/docker_remote_api.rst

@@ -26,7 +26,7 @@ Docker Remote API
 2. Versions
 2. Versions
 ===========
 ===========
 
 
-The current verson of the API is 1.4
+The current version of the API is 1.4
 
 
 Calling /images/<name>/insert is the same as calling
 Calling /images/<name>/insert is the same as calling
 /v1.4/images/<name>/insert 
 /v1.4/images/<name>/insert 
@@ -107,7 +107,7 @@ The client should send it's authConfig as POST on each call of
   Only checks the configuration but doesn't store it on the server
   Only checks the configuration but doesn't store it on the server
 
 
   Deleting an image is now improved, will only untag the image if it
   Deleting an image is now improved, will only untag the image if it
-  has chidren and remove all the untagged parents if has any.
+  has children and remove all the untagged parents if has any.
 
 
 .. http:post:: /images/<name>/delete 
 .. http:post:: /images/<name>/delete 
 
 

+ 2 - 2
docs/sources/api/docker_remote_api_v1.1.rst

@@ -305,8 +305,8 @@ Start a container
 	:statuscode 500: server error
 	:statuscode 500: server error
 
 
 
 
-Stop a contaier
-***************
+Stop a container
+****************
 
 
 .. http:post:: /containers/(id)/stop
 .. http:post:: /containers/(id)/stop
 
 

+ 2 - 2
docs/sources/api/docker_remote_api_v1.2.rst

@@ -317,8 +317,8 @@ Start a container
 	:statuscode 500: server error
 	:statuscode 500: server error
 
 
 
 
-Stop a contaier
-***************
+Stop a container
+****************
 
 
 .. http:post:: /containers/(id)/stop
 .. http:post:: /containers/(id)/stop
 
 

+ 2 - 2
docs/sources/api/docker_remote_api_v1.3.rst

@@ -365,8 +365,8 @@ Start a container
         :statuscode 500: server error
         :statuscode 500: server error
 
 
 
 
-Stop a contaier
-***************
+Stop a container
+****************
 
 
 .. http:post:: /containers/(id)/stop
 .. http:post:: /containers/(id)/stop
 
 

+ 2 - 2
docs/sources/api/docker_remote_api_v1.4.rst

@@ -368,8 +368,8 @@ Start a container
         :statuscode 500: server error
         :statuscode 500: server error
 
 
 
 
-Stop a contaier
-***************
+Stop a container
+****************
 
 
 .. http:post:: /containers/(id)/stop
 .. http:post:: /containers/(id)/stop
 
 

+ 3 - 2
docs/sources/api/registry_index_spec.rst

@@ -2,9 +2,10 @@
 :description: Documentation for docker Registry and Registry API
 :description: Documentation for docker Registry and Registry API
 :keywords: docker, registry, api, index
 :keywords: docker, registry, api, index
 
 
+.. _registryindexspec:
 
 
 =====================
 =====================
-Registry & index Spec
+Registry & Index Spec
 =====================
 =====================
 
 
 .. contents:: Table of Contents
 .. contents:: Table of Contents
@@ -154,7 +155,7 @@ API (pulling repository foo/bar):
 
 
 .. note::
 .. note::
 
 
-    **It’s possible not to use the Index at all!** In this case, a deployed version of the Registry is deployed to store and serve images. Those images are not authentified and the security is not guaranteed.
+    **It’s possible not to use the Index at all!** In this case, a deployed version of the Registry is deployed to store and serve images. Those images are not authenticated and the security is not guaranteed.
 
 
 .. note::
 .. note::
 
 

+ 1 - 1
docs/sources/commandline/command/run.rst

@@ -24,7 +24,7 @@
       -p=[]: Map a network port to the container
       -p=[]: Map a network port to the container
       -t=false: Allocate a pseudo-tty
       -t=false: Allocate a pseudo-tty
       -u="": Username or UID
       -u="": Username or UID
-      -d=[]: Set custom dns servers for the container
+      -dns=[]: Set custom dns servers for the container
       -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "host-dir" is missing, then docker creates a new volume.
       -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "host-dir" is missing, then docker creates a new volume.
       -volumes-from="": Mount all volumes from the given container.
       -volumes-from="": Mount all volumes from the given container.
       -entrypoint="": Overwrite the default entrypoint set by the image.
       -entrypoint="": Overwrite the default entrypoint set by the image.

+ 1 - 1
docs/sources/conf.py

@@ -18,7 +18,7 @@ import sys, os
 # documentation root, use os.path.abspath to make it absolute, like shown here.
 # documentation root, use os.path.abspath to make it absolute, like shown here.
 #sys.path.insert(0, os.path.abspath('.'))
 #sys.path.insert(0, os.path.abspath('.'))
 
 
-# -- General configuratiofn -----------------------------------------------------
+# -- General configuration -----------------------------------------------------
 
 
 
 
 
 

+ 1 - 1
docs/sources/contributing/contributing.rst

@@ -1,5 +1,5 @@
 :title: Contribution Guidelines
 :title: Contribution Guidelines
-:description: Contribution guidelines: create issues, convetions, pull requests
+:description: Contribution guidelines: create issues, conventions, pull requests
 :keywords: contributing, docker, documentation, help, guideline
 :keywords: contributing, docker, documentation, help, guideline
 
 
 Contributing to Docker
 Contributing to Docker

+ 2 - 1
docs/sources/examples/index.rst

@@ -1,6 +1,6 @@
 :title: Docker Examples
 :title: Docker Examples
 :description: Examples on how to use Docker
 :description: Examples on how to use Docker
-:keywords: docker, hello world, node, nodejs, python, couch, couchdb, redis, ssh, sshd, examples
+:keywords: docker, hello world, node, nodejs, python, couch, couchdb, redis, ssh, sshd, examples, postgresql
 
 
 
 
 
 
@@ -20,3 +20,4 @@ Contents:
    running_redis_service
    running_redis_service
    running_ssh_service
    running_ssh_service
    couchdb_data_volumes
    couchdb_data_volumes
+   postgresql_service

+ 158 - 0
docs/sources/examples/postgresql_service.rst

@@ -0,0 +1,158 @@
+:title: PostgreSQL service How-To
+:description: Running and installing a PostgreSQL service
+:keywords: docker, example, package installation, postgresql
+
+.. _postgresql_service:
+
+PostgreSQL Service
+==================
+
+.. note::
+
+    A shorter version of `this blog post`_.
+
+.. note::
+
+    As of version 0.5.2, docker requires root privileges to run.
+    You have to either manually adjust your system configuration (permissions on
+    /var/run/docker.sock or sudo config), or prefix `docker` with `sudo`. Check
+    `this thread`_ for details.
+
+.. _this blog post: http://zaiste.net/2013/08/docker_postgresql_how_to/
+.. _this thread: https://groups.google.com/forum/?fromgroups#!topic/docker-club/P3xDLqmLp0E
+
+Installing PostgreSQL on Docker
+-------------------------------
+
+For clarity I won't be showing commands output.
+
+
+Run an interactive shell in Docker container.
+
+.. code-block:: bash
+
+    docker run -i -t ubuntu /bin/bash
+
+Update its dependencies.
+
+.. code-block:: bash
+
+    apt-get update
+
+Install ``python-software-properies``.
+
+.. code-block:: bash
+
+    apt-get install python-software-properties
+    apt-get install software-properties-common
+
+Add Pitti's PostgreSQL repository. It contains the most recent stable release
+of PostgreSQL i.e. ``9.2``.
+
+.. code-block:: bash
+
+    add-apt-repository ppa:pitti/postgresql
+    apt-get update
+
+Finally, install PostgreSQL 9.2
+
+.. code-block:: bash
+
+    apt-get -y install postgresql-9.2 postgresql-client-9.2 postgresql-contrib-9.2
+
+Now, create a PostgreSQL superuser role that can create databases and other roles.
+Following Vagrant's convention the role will be named `docker` with `docker`
+password assigned to it.
+
+.. code-block:: bash
+
+    sudo -u postgres createuser -P -d -r -s docker
+
+Create a test database also named ``docker`` owned by previously created ``docker``
+role.
+
+.. code-block:: bash
+
+    sudo -u postgres createdb -O docker docker
+
+Adjust PostgreSQL configuration so that remote connections to the database are
+possible. Make sure that inside ``/etc/postgresql/9.2/main/pg_hba.conf`` you have
+following line:
+
+.. code-block:: bash
+
+    host    all             all             0.0.0.0/0               md5
+
+Additionaly, inside ``/etc/postgresql/9.2/main/postgresql.conf`` uncomment
+``listen_address`` so it is as follows:
+
+.. code-block:: bash
+
+    listen_address='*'
+
+*Note:* this PostgreSQL setup is for development only purposes. Refer to
+PostgreSQL documentation how to fine-tune these settings so that it is enough
+secure.
+
+Create an image and assign it a name. ``<container_id>`` is in the Bash prompt;
+you can also locate it using ``docker ps -a``.
+
+.. code-block:: bash
+
+    docker commit <container_id> <your username>/postgresql
+
+Finally, run PostgreSQL server via ``docker``.
+
+.. code-block:: bash
+
+    CONTAINER=$(docker run -d -p 5432 \
+      -t <your username>/postgresql \
+      /bin/su postgres -c '/usr/lib/postgresql/9.2/bin/postgres \
+        -D /var/lib/postgresql/9.2/main \
+        -c config_file=/etc/postgresql/9.2/main/postgresql.conf')
+
+Connect the PostgreSQL server using ``psql``.
+
+.. code-block:: bash
+
+    CONTAINER_IP=$(docker inspect $CONTAINER | grep IPAddress | awk '{ print $2 }' | tr -d ',"')
+    psql -h $CONTAINER_IP -p 5432 -d docker -U docker -W
+
+As before, create roles or databases if needed.
+
+.. code-block:: bash
+
+    psql (9.2.4)
+    Type "help" for help.
+
+    docker=# CREATE DATABASE foo OWNER=docker;
+    CREATE DATABASE
+
+Additionally, publish there your newly created image on Docker Index.
+
+.. code-block:: bash
+
+    docker login
+    Username: <your username>
+    [...]
+
+.. code-block:: bash
+
+    docker push <your username>/postgresql
+
+PostgreSQL service auto-launch
+------------------------------
+
+Running our image seems complicated. We have to specify the whole command with
+``docker run``. Let's simplify it so the service starts automatically when the
+container starts.
+
+.. code-block:: bash
+
+    docker commit <container_id> <your username>/postgresql -run='{"Cmd": \
+      ["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.2/bin/postgres -D \
+      /var/lib/postgresql/9.2/main -c \
+      config_file=/etc/postgresql/9.2/main/postgresql.conf"], PortSpecs": ["5432"]}
+
+From now on, just type ``docker run <your username>/postgresql`` and PostgreSQL
+should automatically start.

+ 7 - 1
docs/sources/examples/python_web_app.rst

@@ -36,7 +36,13 @@ Inside of the "shykes/pybuilder" image there is a command called buildapp, we ar
     docker attach $BUILD_JOB
     docker attach $BUILD_JOB
     [...]
     [...]
 
 
-We attach to the new container to see what is going on. Ctrl-C to disconnect
+While this container is running, we can attach to the new container to see what is going on. Ctrl-C to disconnect.
+
+.. code-block:: bash
+
+    docker ps -a
+    
+List all docker containers. If this container has already finished running, it will still be listed here.
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 

+ 2 - 2
docs/sources/examples/running_ssh_service.rst

@@ -33,7 +33,7 @@ The password is 'screencast'
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-         # Hello! We are going to try and install openssh on a container and run it as a servic 
+         # Hello! We are going to try and install openssh on a container and run it as a service
          # let's pull ubuntu to get a base ubuntu image. 
          # let's pull ubuntu to get a base ubuntu image. 
          $ docker pull ubuntu
          $ docker pull ubuntu
          # I had it so it was quick
          # I had it so it was quick
@@ -46,7 +46,7 @@ The password is 'screencast'
          $ apt-get install openssh-server
          $ apt-get install openssh-server
          # ok. lets see if we can run it.
          # ok. lets see if we can run it.
          $ which sshd
          $ which sshd
-         # we need to create priviledge separation directory
+         # we need to create privilege separation directory
          $ mkdir /var/run/sshd
          $ mkdir /var/run/sshd
          $ /usr/sbin/sshd
          $ /usr/sbin/sshd
          $ exit
          $ exit

+ 118 - 18
docs/sources/faq.rst

@@ -9,40 +9,140 @@ FAQ
 Most frequently asked questions.
 Most frequently asked questions.
 --------------------------------
 --------------------------------
 
 
-1. **How much does Docker cost?**
+How much does Docker cost?
+..........................
 
 
    Docker is 100% free, it is open source, so you can use it without paying.
    Docker is 100% free, it is open source, so you can use it without paying.
 
 
-2. **What open source license are you using?**
-
-   We are using the Apache License Version 2.0, see it here: https://github.com/dotcloud/docker/blob/master/LICENSE
-
-3. **Does Docker run on Mac OS X or Windows?**
-
-   Not at this time, Docker currently only runs on Linux, but you can use VirtualBox to run Docker in a
-   virtual machine on your box, and get the best of both worlds. Check out the :ref:`install_using_vagrant` and :ref:`windows` installation guides.
-
-4. **How do containers compare to virtual machines?**
-
-   They are complementary. VMs are best used to allocate chunks of hardware resources. Containers operate at the process level, which makes them very lightweight and perfect as a unit of software delivery.
-
-5. **Can I help by adding some questions and answers?**
+What open source license are you using?
+.......................................
+
+   We are using the Apache License Version 2.0, see it here:
+   https://github.com/dotcloud/docker/blob/master/LICENSE
+
+Does Docker run on Mac OS X or Windows?
+.......................................
+
+   Not at this time, Docker currently only runs on Linux, but you can
+   use VirtualBox to run Docker in a virtual machine on your box, and
+   get the best of both worlds. Check out the
+   :ref:`install_using_vagrant` and :ref:`windows` installation
+   guides.
+
+How do containers compare to virtual machines?
+..............................................
+
+   They are complementary. VMs are best used to allocate chunks of
+   hardware resources. Containers operate at the process level, which
+   makes them very lightweight and perfect as a unit of software
+   delivery.
+
+What does Docker add to just plain LXC?
+.......................................
+
+   Docker is not a replacement for LXC. "LXC" refers to capabilities
+   of the Linux kernel (specifically namespaces and control groups)
+   which allow sandboxing processes from one another, and controlling
+   their resource allocations.  On top of this low-level foundation of
+   kernel features, Docker offers a high-level tool with several
+   powerful functionalities:
+
+   * *Portable deployment across machines.* 
+      Docker defines a format for bundling an application and all its
+      dependencies into a single object which can be transferred to
+      any Docker-enabled machine, and executed there with the
+      guarantee that the execution environment exposed to the
+      application will be the same. LXC implements process sandboxing,
+      which is an important pre-requisite for portable deployment, but
+      that alone is not enough for portable deployment. If you sent me
+      a copy of your application installed in a custom LXC
+      configuration, it would almost certainly not run on my machine
+      the way it does on yours, because it is tied to your machine's
+      specific configuration: networking, storage, logging, distro,
+      etc. Docker defines an abstraction for these machine-specific
+      settings, so that the exact same Docker container can run -
+      unchanged - on many different machines, with many different
+      configurations.
+
+   * *Application-centric.* 
+      Docker is optimized for the deployment of applications, as
+      opposed to machines. This is reflected in its API, user
+      interface, design philosophy and documentation. By contrast, the
+      ``lxc`` helper scripts focus on containers as lightweight
+      machines - basically servers that boot faster and need less
+      RAM. We think there's more to containers than just that.
+
+   * *Automatic build.* 
+      Docker includes :ref:`a tool for developers to automatically
+      assemble a container from their source code <dockerbuilder>`,
+      with full control over application dependencies, build tools,
+      packaging etc. They are free to use ``make, maven, chef, puppet,
+      salt,`` Debian packages, RPMs, source tarballs, or any
+      combination of the above, regardless of the configuration of the
+      machines.
+
+   * *Versioning.* 
+      Docker includes git-like capabilities for tracking successive
+      versions of a container, inspecting the diff between versions,
+      committing new versions, rolling back etc. The history also
+      includes how a container was assembled and by whom, so you get
+      full traceability from the production server all the way back to
+      the upstream developer. Docker also implements incremental
+      uploads and downloads, similar to ``git pull``, so new versions
+      of a container can be transferred by only sending diffs.
+
+   * *Component re-use.* 
+      Any container can be used as a :ref:`"base image"
+      <base_image_def>` to create more specialized components. This
+      can be done manually or as part of an automated build. For
+      example you can prepare the ideal Python environment, and use it
+      as a base for 10 different applications. Your ideal Postgresql
+      setup can be re-used for all your future projects. And so on.
+
+   * *Sharing.*
+      Docker has access to a `public registry
+      <http://index.docker.io>`_ where thousands of people have
+      uploaded useful containers: anything from Redis, CouchDB,
+      Postgres to IRC bouncers to Rails app servers to Hadoop to base
+      images for various Linux distros. The :ref:`registry
+      <registryindexspec>` also includes an official "standard
+      library" of useful containers maintained by the Docker team. The
+      registry itself is open-source, so anyone can deploy their own
+      registry to store and transfer private containers, for internal
+      server deployments for example.
+
+   * *Tool ecosystem.* 
+      Docker defines an API for automating and customizing the
+      creation and deployment of containers. There are a huge number
+      of tools integrating with Docker to extend its
+      capabilities. PaaS-like deployment (Dokku, Deis, Flynn),
+      multi-node orchestration (Maestro, Salt, Mesos, Openstack Nova),
+      management dashboards (docker-ui, Openstack Horizon, Shipyard),
+      configuration management (Chef, Puppet), continuous integration
+      (Jenkins, Strider, Travis), etc. Docker is rapidly establishing
+      itself as the standard for container-based tooling.
+
+Can I help by adding some questions and answers?
+................................................
 
 
    Definitely! You can fork `the repo`_ and edit the documentation sources.
    Definitely! You can fork `the repo`_ and edit the documentation sources.
 
 
 
 
-42. **Where can I find more answers?**
+Where can I find more answers?
+..............................
 
 
     You can find more answers on:
     You can find more answers on:
 
 
-    * `Docker club mailinglist`_
+    * `Docker user mailinglist`_
+    * `Docker developer mailinglist`_
     * `IRC, docker on freenode`_
     * `IRC, docker on freenode`_
     * `Github`_
     * `Github`_
     * `Ask questions on Stackoverflow`_
     * `Ask questions on Stackoverflow`_
     * `Join the conversation on Twitter`_
     * `Join the conversation on Twitter`_
 
 
 
 
-    .. _Docker club mailinglist: https://groups.google.com/d/forum/docker-club
+    .. _Docker user mailinglist: https://groups.google.com/d/forum/docker-user
+    .. _Docker developer mailinglist: https://groups.google.com/d/forum/docker-dev
     .. _the repo: http://www.github.com/dotcloud/docker
     .. _the repo: http://www.github.com/dotcloud/docker
     .. _IRC, docker on freenode: irc://chat.freenode.net#docker
     .. _IRC, docker on freenode: irc://chat.freenode.net#docker
     .. _Github: http://www.github.com/dotcloud/docker
     .. _Github: http://www.github.com/dotcloud/docker

+ 1 - 1
docs/sources/index.rst

@@ -23,7 +23,7 @@ dependencies.
   commit``).
   commit``).
 
 
 Each use of ``docker`` is documented here. The features of Docker are
 Each use of ``docker`` is documented here. The features of Docker are
-currently in active development, so this documention will change
+currently in active development, so this documentation will change
 frequently.
 frequently.
 
 
 For an overview of Docker, please see the `Introduction
 For an overview of Docker, please see the `Introduction

+ 1 - 1
docs/sources/installation/rackspace.rst

@@ -10,7 +10,7 @@ Rackspace Cloud
   :ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
   :ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
 
 
 
 
-Installing Docker on Ubuntu proviced by Rackspace is pretty straightforward, and you should mostly be able to follow the
+Installing Docker on Ubuntu provided by Rackspace is pretty straightforward, and you should mostly be able to follow the
 :ref:`ubuntu_linux` installation guide.
 :ref:`ubuntu_linux` installation guide.
 
 
 **However, there is one caveat:**
 **However, there is one caveat:**

+ 1 - 1
docs/sources/terms/layer.rst

@@ -14,7 +14,7 @@ switches the whole rootfs volume to read-write mode.
 Layer
 Layer
 .....
 .....
 
 
-When Docker mounts the rootfs, it starts read-only, as in a tradtional
+When Docker mounts the rootfs, it starts read-only, as in a traditional
 Linux boot, but then, instead of changing the file system to
 Linux boot, but then, instead of changing the file system to
 read-write mode, it takes advantage of a `union mount
 read-write mode, it takes advantage of a `union mount
 <http://en.wikipedia.org/wiki/Union_mount>`_ to add a read-write file
 <http://en.wikipedia.org/wiki/Union_mount>`_ to add a read-write file

+ 2 - 0
docs/sources/use/builder.rst

@@ -2,6 +2,8 @@
 :description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image.
 :description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image.
 :keywords: builder, docker, Dockerfile, automation, image creation
 :keywords: builder, docker, Dockerfile, automation, image creation
 
 
+.. _dockerbuilder:
+
 ==================
 ==================
 Dockerfile Builder
 Dockerfile Builder
 ==================
 ==================

+ 2 - 2
docs/sources/use/workingwithrepository.rst

@@ -1,6 +1,6 @@
 :title: Working With Repositories
 :title: Working With Repositories
 :description: Repositories allow users to share images.
 :description: Repositories allow users to share images.
-:keywords: repo, repositiores, usage, pull image, push image, image, documentation
+:keywords: repo, repositories, usage, pull image, push image, image, documentation
 
 
 .. _working_with_the_repository:
 .. _working_with_the_repository:
 
 
@@ -71,7 +71,7 @@ function completely independently from the Central Index.
 Find public images available on the Central Index
 Find public images available on the Central Index
 -------------------------------------------------
 -------------------------------------------------
 
 
-Seach by name, namespace or description
+Search by name, namespace or description
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 

+ 1 - 1
docs/theme/MAINTAINERS

@@ -1 +1 @@
-Thatcher Peskens <thatcher@dotcloud.com>
+Thatcher Peskens <thatcher@dotcloud.com> (@dhrp)

+ 1 - 1
hack/dockerbuilder/Dockerfile

@@ -23,7 +23,7 @@ run	add-apt-repository -y ppa:dotcloud/docker-golang/ubuntu
 run	apt-get update
 run	apt-get update
 # Packages required to checkout, build and upload docker
 # Packages required to checkout, build and upload docker
 run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q s3cmd curl
 run	DEBIAN_FRONTEND=noninteractive apt-get install -y -q s3cmd curl
-run	curl -s -o /go.tar.gz https://go.googlecode.com/files/go1.1.1.linux-amd64.tar.gz
+run	curl -s -o /go.tar.gz https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz
 run	tar -C /usr/local -xzf /go.tar.gz
 run	tar -C /usr/local -xzf /go.tar.gz
 run	echo "export PATH=/usr/local/go/bin:$PATH" > /.bashrc
 run	echo "export PATH=/usr/local/go/bin:$PATH" > /.bashrc
 run	echo "export PATH=/usr/local/go/bin:$PATH" > /.bash_profile
 run	echo "export PATH=/usr/local/go/bin:$PATH" > /.bash_profile

+ 1 - 1
hack/dockerbuilder/MAINTAINERS

@@ -1 +1 @@
-Daniel Mizyrycki <daniel@dotcloud.com>
+Daniel Mizyrycki <daniel@dotcloud.com> (@mzdaniel)

+ 2 - 2
hack/infrastructure/MAINTAINERS

@@ -1,2 +1,2 @@
-Ken Cochrane <ken@dotcloud.com>
-Jerome Petazzoni <jerome@dotcloud.com>
+Ken Cochrane <ken@dotcloud.com> (@kencochrane)
+Jerome Petazzoni <jerome@dotcloud.com> (@jpetazzo)

+ 1 - 1
library/MAINTAINERS

@@ -1 +1 @@
-Joffrey Fuhrer <joffrey@dotcloud.com>
+Joffrey Fuhrer <joffrey@dotcloud.com> (@shin-)

+ 2 - 2
network.go

@@ -332,7 +332,7 @@ func newPortMapper() (*PortMapper, error) {
 	return mapper, nil
 	return mapper, nil
 }
 }
 
 
-// Port allocator: Atomatically allocate and release networking ports
+// Port allocator: Automatically allocate and release networking ports
 type PortAllocator struct {
 type PortAllocator struct {
 	sync.Mutex
 	sync.Mutex
 	inUse    map[int]struct{}
 	inUse    map[int]struct{}
@@ -385,7 +385,7 @@ func newPortAllocator() (*PortAllocator, error) {
 	return allocator, nil
 	return allocator, nil
 }
 }
 
 
-// IP allocator: Atomatically allocate and release networking ports
+// IP allocator: Automatically allocate and release networking ports
 type IPAllocator struct {
 type IPAllocator struct {
 	network       *net.IPNet
 	network       *net.IPNet
 	queueAlloc    chan allocatedIP
 	queueAlloc    chan allocatedIP

+ 1 - 1
packaging/MAINTAINERS

@@ -1 +1 @@
-Daniel Mizyrycki <daniel@dotcloud.com>
+Daniel Mizyrycki <daniel@dotcloud.com> (@mzdaniel)

+ 3 - 1
packaging/ubuntu/docker.upstart

@@ -5,4 +5,6 @@ stop on runlevel [!2345]
 
 
 respawn
 respawn
 
 
-exec /usr/bin/docker -d
+script
+    /usr/bin/docker -d
+end script

+ 3 - 3
registry/MAINTAINERS

@@ -1,3 +1,3 @@
-Sam Alba <sam@dotcloud.com>
-Joffrey Fuhrer <joffrey@dotcloud.com>
-Ken Cochrane <ken@dotcloud.com>
+Sam Alba <sam@dotcloud.com> (@samalba)
+Joffrey Fuhrer <joffrey@dotcloud.com> (@shin-)
+Ken Cochrane <ken@dotcloud.com> (@kencochrane)

+ 1 - 1
registry/registry.go

@@ -384,7 +384,7 @@ func (r *Registry) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regis
 	if res.StatusCode != 200 {
 	if res.StatusCode != 200 {
 		errBody, err := ioutil.ReadAll(res.Body)
 		errBody, err := ioutil.ReadAll(res.Body)
 		if err != nil {
 		if err != nil {
-			return utils.NewHTTPRequestError(fmt.Sprint("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
+			return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
 		}
 		}
 		var jsonBody map[string]string
 		var jsonBody map[string]string
 		if err := json.Unmarshal(errBody, &jsonBody); err != nil {
 		if err := json.Unmarshal(errBody, &jsonBody); err != nil {

+ 5 - 5
runtime.go

@@ -241,12 +241,12 @@ func (runtime *Runtime) UpdateCapabilities(quiet bool) {
 		if !runtime.capabilities.SwapLimit && !quiet {
 		if !runtime.capabilities.SwapLimit && !quiet {
 			log.Printf("WARNING: Your kernel does not support cgroup swap limit.")
 			log.Printf("WARNING: Your kernel does not support cgroup swap limit.")
 		}
 		}
+	}
 
 
-		content, err3 := ioutil.ReadFile("/proc/sys/net/ipv4/ip_forward")
-		runtime.capabilities.IPv4Forwarding = err3 == nil && len(content) > 0 && content[0] == '1'
-		if !runtime.capabilities.IPv4Forwarding && !quiet {
-			log.Printf("WARNING: IPv4 forwarding is disabled.")
-		}
+	content, err3 := ioutil.ReadFile("/proc/sys/net/ipv4/ip_forward")
+	runtime.capabilities.IPv4Forwarding = err3 == nil && len(content) > 0 && content[0] == '1'
+	if !runtime.capabilities.IPv4Forwarding && !quiet {
+		log.Printf("WARNING: IPv4 forwarding is disabled.")
 	}
 	}
 }
 }
 
 

+ 5 - 5
server.go

@@ -425,7 +425,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
 			out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling", "metadata"))
 			out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling", "metadata"))
 			imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token)
 			imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token)
 			if err != nil {
 			if err != nil {
-				// FIXME: Keep goging in case of error?
+				// FIXME: Keep going in case of error?
 				return err
 				return err
 			}
 			}
 			img, err := NewImgJSON(imgJSON)
 			img, err := NewImgJSON(imgJSON)
@@ -565,7 +565,7 @@ func (srv *Server) poolAdd(kind, key string) error {
 		srv.pushingPool[key] = struct{}{}
 		srv.pushingPool[key] = struct{}{}
 		break
 		break
 	default:
 	default:
-		return fmt.Errorf("Unkown pool type")
+		return fmt.Errorf("Unknown pool type")
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -579,7 +579,7 @@ func (srv *Server) poolRemove(kind, key string) error {
 		delete(srv.pushingPool, key)
 		delete(srv.pushingPool, key)
 		break
 		break
 	default:
 	default:
-		return fmt.Errorf("Unkown pool type")
+		return fmt.Errorf("Unknown pool type")
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -693,7 +693,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
 	out = utils.NewWriteFlusher(out)
 	out = utils.NewWriteFlusher(out)
 	jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgID, "json"))
 	jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgID, "json"))
 	if err != nil {
 	if err != nil {
-		return "", fmt.Errorf("Error while retreiving the path for {%s}: %s", imgID, err)
+		return "", fmt.Errorf("Error while retrieving the path for {%s}: %s", imgID, err)
 	}
 	}
 	out.Write(sf.FormatStatus("", "Pushing %s", imgID))
 	out.Write(sf.FormatStatus("", "Pushing %s", imgID))
 
 
@@ -731,7 +731,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
 	return imgData.Checksum, nil
 	return imgData.Checksum, nil
 }
 }
 
 
-// FIXME: Allow to interupt current push when new push of same image is done.
+// FIXME: Allow to interrupt current push when new push of same image is done.
 func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig) error {
 func (srv *Server) ImagePush(localName string, out io.Writer, sf *utils.StreamFormatter, authConfig *auth.AuthConfig) error {
 	if err := srv.poolAdd("push", localName); err != nil {
 	if err := srv.poolAdd("push", localName); err != nil {
 		return err
 		return err

+ 4 - 4
server_test.go

@@ -283,8 +283,8 @@ func TestPools(t *testing.T) {
 		t.Fatalf("Expected `pull test1 is already in progress`")
 		t.Fatalf("Expected `pull test1 is already in progress`")
 	}
 	}
 	err = srv.poolAdd("wait", "test3")
 	err = srv.poolAdd("wait", "test3")
-	if err == nil || err.Error() != "Unkown pool type" {
-		t.Fatalf("Expected `Unkown pool type`")
+	if err == nil || err.Error() != "Unknown pool type" {
+		t.Fatalf("Expected `Unknown pool type`")
 	}
 	}
 
 
 	err = srv.poolRemove("pull", "test2")
 	err = srv.poolRemove("pull", "test2")
@@ -304,8 +304,8 @@ func TestPools(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	err = srv.poolRemove("wait", "test3")
 	err = srv.poolRemove("wait", "test3")
-	if err == nil || err.Error() != "Unkown pool type" {
-		t.Fatalf("Expected `Unkown pool type`")
+	if err == nil || err.Error() != "Unknown pool type" {
+		t.Fatalf("Expected `Unknown pool type`")
 	}
 	}
 }
 }
 
 

+ 2 - 2
term/MAINTAINERS

@@ -1,2 +1,2 @@
-Guillaume Charmes <guillaume@dotcloud.com>
-Solomon Hykes <solomon@dotcloud.com>
+Guillaume Charmes <guillaume@dotcloud.com> (@creack)
+Solomon Hykes <solomon@dotcloud.com> (@shykes)

+ 1 - 1
testing/MAINTAINERS

@@ -1 +1 @@
-Daniel Mizyrycki <daniel@dotcloud.com>
+Daniel Mizyrycki <daniel@dotcloud.com> (@mzdaniel)

+ 1 - 1
utils/utils_test.go

@@ -219,7 +219,7 @@ func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult strin
 
 
 func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) {
 func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) {
 	if r := CompareKernelVersion(a, b); r != result {
 	if r := CompareKernelVersion(a, b); r != result {
-		t.Fatalf("Unepected kernel version comparaison result. Found %d, expected %d", r, result)
+		t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result)
 	}
 	}
 }
 }